diff --git a/Documentation/ABI/testing/configfs-usb-gadget-uvc b/Documentation/ABI/testing/configfs-usb-gadget-uvc index ac5e11af79a81b0a8e8b57016103d4ea2090d579..4b1813994bd0d964badb626eaddb880f65a535a4 100644 --- a/Documentation/ABI/testing/configfs-usb-gadget-uvc +++ b/Documentation/ABI/testing/configfs-usb-gadget-uvc @@ -51,7 +51,7 @@ Date: Dec 2014 KernelVersion: 4.0 Description: Default output terminal descriptors - All attributes read only: + All attributes read only except bSourceID: ============== ============================================= iTerminal index of string descriptor diff --git a/Documentation/ABI/testing/sysfs-kernel-oops_count b/Documentation/ABI/testing/sysfs-kernel-oops_count new file mode 100644 index 0000000000000000000000000000000000000000..156cca9dbc960628c07458c9ec52d686cc551385 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-kernel-oops_count @@ -0,0 +1,6 @@ +What: /sys/kernel/oops_count +Date: November 2022 +KernelVersion: 6.2.0 +Contact: Linux Kernel Hardening List +Description: + Shows how many times the system has Oopsed since last boot. diff --git a/Documentation/ABI/testing/sysfs-kernel-warn_count b/Documentation/ABI/testing/sysfs-kernel-warn_count new file mode 100644 index 0000000000000000000000000000000000000000..90a029813717dc5d327df120815b57a7eba6e113 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-kernel-warn_count @@ -0,0 +1,6 @@ +What: /sys/kernel/warn_count +Date: November 2022 +KernelVersion: 6.2.0 +Contact: Linux Kernel Hardening List +Description: + Shows how many times the system has Warned since last boot. diff --git a/Documentation/admin-guide/cgroup-v1/memory.rst b/Documentation/admin-guide/cgroup-v1/memory.rst index fd913ae64ea87a909595396b059a7b98138abd47..131f93a69d92bc5ef9487f0789916d0df694f655 100644 --- a/Documentation/admin-guide/cgroup-v1/memory.rst +++ b/Documentation/admin-guide/cgroup-v1/memory.rst @@ -82,6 +82,8 @@ Brief summary of control files. memory.swappiness set/show swappiness parameter of vmscan (See sysctl's vm.swappiness) memory.move_charge_at_immigrate set/show controls of moving charges + This knob is deprecated and shouldn't be + used. memory.oom_control set/show oom controls. memory.numa_stat show the number of memory usage per numa node @@ -740,8 +742,15 @@ NOTE2: It is recommended to set the soft limit always below the hard limit, otherwise the hard limit will take precedence. -8. Move charges at task migration -================================= +8. Move charges at task migration (DEPRECATED!) +=============================================== + +THIS IS DEPRECATED! + +It's expensive and unreliable! It's better practice to launch workload +tasks directly from inside their target cgroup. Use dedicated workload +cgroups to allow fine-grained policy adjustments without having to +move physical pages between control domains. Users can move charges associated with a task along with task migration, that is, uncharge task's pages from the old cgroup and charge them to the new cgroup. diff --git a/Documentation/admin-guide/hw-vuln/spectre.rst b/Documentation/admin-guide/hw-vuln/spectre.rst index 7e061ed449aaab9ebfbb4aff73640021aa55e848..0fba3758d0da89273376b126717f8e031cf18630 100644 --- a/Documentation/admin-guide/hw-vuln/spectre.rst +++ b/Documentation/admin-guide/hw-vuln/spectre.rst @@ -479,8 +479,16 @@ Spectre variant 2 On Intel Skylake-era systems the mitigation covers most, but not all, cases. See :ref:`[3] ` for more details. - On CPUs with hardware mitigation for Spectre variant 2 (e.g. Enhanced - IBRS on x86), retpoline is automatically disabled at run time. + On CPUs with hardware mitigation for Spectre variant 2 (e.g. IBRS + or enhanced IBRS on x86), retpoline is automatically disabled at run time. + + Systems which support enhanced IBRS (eIBRS) enable IBRS protection once at + boot, by setting the IBRS bit, and they're automatically protected against + Spectre v2 variant attacks, including cross-thread branch target injections + on SMT systems (STIBP). In other words, eIBRS enables STIBP too. + + Legacy IBRS systems clear the IBRS bit on exit to userspace and + therefore explicitly enable STIBP for that The retpoline mitigation is turned on by default on vulnerable CPUs. It can be forced on or off by the administrator @@ -504,9 +512,12 @@ Spectre variant 2 For Spectre variant 2 mitigation, individual user programs can be compiled with return trampolines for indirect branches. This protects them from consuming poisoned entries in the branch - target buffer left by malicious software. Alternatively, the - programs can disable their indirect branch speculation via prctl() - (See :ref:`Documentation/userspace-api/spec_ctrl.rst `). + target buffer left by malicious software. + + On legacy IBRS systems, at return to userspace, implicit STIBP is disabled + because the kernel clears the IBRS bit. In this case, the userspace programs + can disable indirect branch speculation via prctl() (See + :ref:`Documentation/userspace-api/spec_ctrl.rst `). On x86, this will turn on STIBP to guard against attacks from the sibling thread when the user program is running, and use IBPB to flush the branch target buffer when switching to/from the program. diff --git a/Documentation/admin-guide/kdump/gdbmacros.txt b/Documentation/admin-guide/kdump/gdbmacros.txt index 82aecdcae8a6c182d0bebea90492b79a199510a8..030de95e3e6b2464f130ecd2f2e93f8ce804d151 100644 --- a/Documentation/admin-guide/kdump/gdbmacros.txt +++ b/Documentation/admin-guide/kdump/gdbmacros.txt @@ -312,10 +312,10 @@ define dmesg set var $prev_flags = $info->flags end - set var $id = ($id + 1) & $id_mask if ($id == $end_id) loop_break end + set var $id = ($id + 1) & $id_mask end end document dmesg diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst index a4b1ebc2e70b0829fbc5e8802c1b11d9a8190fd9..6b0c7b650deaae2712bb9705e318ee9469f781bf 100644 --- a/Documentation/admin-guide/sysctl/kernel.rst +++ b/Documentation/admin-guide/sysctl/kernel.rst @@ -663,6 +663,15 @@ This is the default behavior. an oops event is detected. +oops_limit +========== + +Number of kernel oopses after which the kernel should panic when +``panic_on_oops`` is not set. Setting this to 0 disables checking +the count. Setting this to 1 has the same effect as setting +``panic_on_oops=1``. The default value is 10000. + + osrelease, ostype & version =========================== @@ -1469,6 +1478,16 @@ entry will default to 2 instead of 0. 2 Unprivileged calls to ``bpf()`` are disabled = ============================================================= + +warn_limit +========== + +Number of kernel warnings after which the kernel should panic when +``panic_on_warn`` is not set. Setting this to 0 disables checking +the warning count. Setting this to 1 has the same effect as setting +``panic_on_warn=1``. The default value is 0. + + watchdog ======== diff --git a/Documentation/dev-tools/gdb-kernel-debugging.rst b/Documentation/dev-tools/gdb-kernel-debugging.rst index 4756f6b3a04e54d6b09254809fc5b6ce84499d42..10cdd990b63d00cd56a38c7db1853e514965d285 100644 --- a/Documentation/dev-tools/gdb-kernel-debugging.rst +++ b/Documentation/dev-tools/gdb-kernel-debugging.rst @@ -39,6 +39,10 @@ Setup this mode. In this case, you should build the kernel with CONFIG_RANDOMIZE_BASE disabled if the architecture supports KASLR. +- Build the gdb scripts (required on kernels v5.1 and above):: + + make scripts_gdb + - Enable the gdb stub of QEMU/KVM, either - at VM startup time by appending "-s" to the QEMU command line diff --git a/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml b/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml index 2e35aeaa8781da51257892c7de17804b52260bb3..89e3819c6127a384cc41aef1f08f37d3aae87f9e 100644 --- a/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml +++ b/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml @@ -61,7 +61,7 @@ patternProperties: description: phandle of the CPU DAI patternProperties: - "^codec-[0-9]+$": + "^codec(-[0-9]+)?$": type: object description: |- Codecs: diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 2b4b64797191fb96d1ab62491f7f655b07bad14c..08295f488d057e1016a7b8160cb3c5233a5fcb93 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -4031,6 +4031,18 @@ not holding a previously reported uncorrected error). :Parameters: struct kvm_s390_cmma_log (in, out) :Returns: 0 on success, a negative value on error +Errors: + + ====== ============================================================= + ENOMEM not enough memory can be allocated to complete the task + ENXIO if CMMA is not enabled + EINVAL if KVM_S390_CMMA_PEEK is not set but migration mode was not enabled + EINVAL if KVM_S390_CMMA_PEEK is not set but dirty tracking has been + disabled (and thus migration mode was automatically disabled) + EFAULT if the userspace address is invalid or if no page table is + present for the addresses (e.g. when using hugepages). + ====== ============================================================= + This ioctl is used to get the values of the CMMA bits on the s390 architecture. It is meant to be used in two scenarios: @@ -4111,12 +4123,6 @@ mask is unused. values points to the userspace buffer where the result will be stored. -This ioctl can fail with -ENOMEM if not enough memory can be allocated to -complete the task, with -ENXIO if CMMA is not enabled, with -EINVAL if -KVM_S390_CMMA_PEEK is not set but migration mode was not enabled, with --EFAULT if the userspace address is invalid or if no page table is -present for the addresses (e.g. when using hugepages). - 4.108 KVM_S390_SET_CMMA_BITS ---------------------------- diff --git a/Documentation/virt/kvm/devices/vm.rst b/Documentation/virt/kvm/devices/vm.rst index 60acc39e0e937c52d5e98c160ccf5f8fb9825264..147efec626e5218825e36d7e4035e07e4892d86b 100644 --- a/Documentation/virt/kvm/devices/vm.rst +++ b/Documentation/virt/kvm/devices/vm.rst @@ -302,6 +302,10 @@ Allows userspace to start migration mode, needed for PGSTE migration. Setting this attribute when migration mode is already active will have no effects. +Dirty tracking must be enabled on all memslots, else -EINVAL is returned. When +dirty tracking is disabled on any memslot, migration mode is automatically +stopped. + :Parameters: none :Returns: -ENOMEM if there is not enough free memory to start migration mode; -EINVAL if the state of the VM is invalid (e.g. no memory defined); diff --git a/MAINTAINERS b/MAINTAINERS index f6c6b403a1b7c16b29d4b00c3efb25a2408dad32..6c5efc4013ab5e237530a22e07da84b8bfe3f986 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3001,7 +3001,7 @@ F: drivers/net/ieee802154/atusb.h AUDIT SUBSYSTEM M: Paul Moore M: Eric Paris -L: linux-audit@redhat.com (moderated for non-subscribers) +L: audit@vger.kernel.org S: Supported W: https://github.com/linux-audit T: git git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/audit.git diff --git a/Makefile b/Makefile index f68560f5b4187f7a4004e26add4e60aa19fb22fb..f9138338f86f53f0efd56df6359caa50a7101723 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 5 PATCHLEVEL = 10 -SUBLEVEL = 165 +SUBLEVEL = 174 EXTRAVERSION = NAME = Dare mighty things @@ -93,9 +93,16 @@ endif # If the user is running make -s (silent mode), suppress echoing of # commands +# make-4.0 (and later) keep single letter options in the 1st word of MAKEFLAGS. -ifneq ($(findstring s,$(filter-out --%,$(MAKEFLAGS))),) - quiet=silent_ +ifeq ($(filter 3.%,$(MAKE_VERSION)),) +silence:=$(findstring s,$(firstword -$(MAKEFLAGS))) +else +silence:=$(findstring s,$(filter-out --%,$(MAKEFLAGS))) +endif + +ifeq ($(silence),s) +quiet=silent_ endif export quiet Q KBUILD_VERBOSE diff --git a/README.OpenSource b/README.OpenSource index 5cd2c2dc118e7a9eecf68d65d641c7ec78a51fd9..400d39fc927db1683a605ca28f76d6cbd2d95cb9 100644 --- a/README.OpenSource +++ b/README.OpenSource @@ -3,7 +3,7 @@ "Name": "linux-5.10", "License": "GPL-2.0+", "License File": "COPYING", - "Version Number": "5.10.165", + "Version Number": "5.10.174", "Owner": "liuyu82@huawei.com", "Upstream URL": "https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/log/?h=linux-5.10.y", "Description": "linux kernel 5.10" diff --git a/arch/alpha/boot/tools/objstrip.c b/arch/alpha/boot/tools/objstrip.c index 08b430d25a315ff07d37e5e95e6dbda9048f8183..7cf92d172dce901057992bcbd7be1308e221e4f3 100644 --- a/arch/alpha/boot/tools/objstrip.c +++ b/arch/alpha/boot/tools/objstrip.c @@ -148,7 +148,7 @@ main (int argc, char *argv[]) #ifdef __ELF__ elf = (struct elfhdr *) buf; - if (elf->e_ident[0] == 0x7f && str_has_prefix((char *)elf->e_ident + 1, "ELF")) { + if (memcmp(&elf->e_ident[EI_MAG0], ELFMAG, SELFMAG) == 0) { if (elf->e_type != ET_EXEC) { fprintf(stderr, "%s: %s is not an ELF executable\n", prog_name, inname); diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c index 5b60c248de9eae6d762937f3eb570fbfeaeb5b65..cbefa5a7738465b4d240cd0d493a6b725058e1b7 100644 --- a/arch/alpha/kernel/module.c +++ b/arch/alpha/kernel/module.c @@ -146,10 +146,8 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab, base = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr; symtab = (Elf64_Sym *)sechdrs[symindex].sh_addr; - /* The small sections were sorted to the end of the segment. - The following should definitely cover them. */ - gp = (u64)me->core_layout.base + me->core_layout.size - 0x8000; got = sechdrs[me->arch.gotsecindex].sh_addr; + gp = got + 0x8000; for (i = 0; i < n; i++) { unsigned long r_sym = ELF64_R_SYM (rela[i].r_info); diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c index 921d4b6e4d956185e6dbb7c3dd06f18f47b84bd7..751d3197ca766c4d6c7bde82cee27383f327c20b 100644 --- a/arch/alpha/kernel/traps.c +++ b/arch/alpha/kernel/traps.c @@ -192,7 +192,7 @@ die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15) local_irq_enable(); while (1); } - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } #ifndef CONFIG_MATHEMU @@ -235,7 +235,21 @@ do_entIF(unsigned long type, struct pt_regs *regs) { int signo, code; - if ((regs->ps & ~IPL_MAX) == 0) { + if (type == 3) { /* FEN fault */ + /* Irritating users can call PAL_clrfen to disable the + FPU for the process. The kernel will then trap in + do_switch_stack and undo_switch_stack when we try + to save and restore the FP registers. + + Given that GCC by default generates code that uses the + FP registers, PAL_clrfen is not useful except for DoS + attacks. So turn the bleeding FPU back on and be done + with it. */ + current_thread_info()->pcb.flags |= 1; + __reload_thread(¤t_thread_info()->pcb); + return; + } + if (!user_mode(regs)) { if (type == 1) { const unsigned int *data = (const unsigned int *) regs->pc; @@ -368,20 +382,6 @@ do_entIF(unsigned long type, struct pt_regs *regs) } break; - case 3: /* FEN fault */ - /* Irritating users can call PAL_clrfen to disable the - FPU for the process. The kernel will then trap in - do_switch_stack and undo_switch_stack when we try - to save and restore the FP registers. - - Given that GCC by default generates code that uses the - FP registers, PAL_clrfen is not useful except for DoS - attacks. So turn the bleeding FPU back on and be done - with it. */ - current_thread_info()->pcb.flags |= 1; - __reload_thread(¤t_thread_info()->pcb); - return; - case 5: /* illoc */ default: /* unexpected instruction-fault type */ ; @@ -577,7 +577,7 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg, printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n", pc, va, opcode, reg); - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); got_exception: /* Ok, we caught the exception, but we don't want it. Is there @@ -632,7 +632,7 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg, local_irq_enable(); while (1); } - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } /* diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index 09172f017efc0fc7122e99300b96c5353c2dd343..5d42f94887daf36f9cbf4c0fa5ff7cca1d404106 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -204,7 +204,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr, printk(KERN_ALERT "Unable to handle kernel paging request at " "virtual address %016lx\n", address); die_if_kernel("Oops", regs, cause, (unsigned long*)regs - 16); - do_exit(SIGKILL); + make_task_dead(SIGKILL); /* We ran out of memory, or some other thing happened to us that made us unable to handle the page fault gracefully. */ diff --git a/arch/arm/boot/dts/exynos3250-rinato.dts b/arch/arm/boot/dts/exynos3250-rinato.dts index f9e3b13d3aac2721c7d6b3c0f7d0db9f6cf4ac7a..bbf01f76ce3b1f66f7a20c6231fb090892535ab3 100644 --- a/arch/arm/boot/dts/exynos3250-rinato.dts +++ b/arch/arm/boot/dts/exynos3250-rinato.dts @@ -249,7 +249,7 @@ &fimd { i80-if-timings { cs-setup = <0>; wr-setup = <0>; - wr-act = <1>; + wr-active = <1>; wr-hold = <0>; }; }; diff --git a/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi b/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi index 021d9fc1b49235c75342ba8d1d949f5f77fc4f25..27a1a895266559190e4708321d9c896cf89f9aee 100644 --- a/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi +++ b/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi @@ -10,7 +10,7 @@ / { thermal-zones { cpu_thermal: cpu-thermal { - thermal-sensors = <&tmu 0>; + thermal-sensors = <&tmu>; polling-delay-passive = <0>; polling-delay = <0>; trips { diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi index a1e54449f33f011f0a9d1181464e5991ee754b5b..41f0e64b1365ecfb6a654aef5b42711e2bfe5414 100644 --- a/arch/arm/boot/dts/exynos4.dtsi +++ b/arch/arm/boot/dts/exynos4.dtsi @@ -605,7 +605,7 @@ i2c_8: i2c@138e0000 { status = "disabled"; hdmi_i2c_phy: hdmiphy@38 { - compatible = "exynos4210-hdmiphy"; + compatible = "samsung,exynos4210-hdmiphy"; reg = <0x38>; }; }; diff --git a/arch/arm/boot/dts/exynos4210.dtsi b/arch/arm/boot/dts/exynos4210.dtsi index fddc661ded28f6664d4c7cb5c645fb83b7773876..448e1b153a01b4d0108c5685f846901bc9991096 100644 --- a/arch/arm/boot/dts/exynos4210.dtsi +++ b/arch/arm/boot/dts/exynos4210.dtsi @@ -382,7 +382,6 @@ &cpu_alert2 { &cpu_thermal { polling-delay-passive = <0>; polling-delay = <0>; - thermal-sensors = <&tmu 0>; }; &gic { diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi index bd2d8835dd3695b02660b60aba6d40cf207ccfc8..62051e600b325914971630dc666913a9be1294bb 100644 --- a/arch/arm/boot/dts/exynos5250.dtsi +++ b/arch/arm/boot/dts/exynos5250.dtsi @@ -1109,7 +1109,7 @@ timer { &cpu_thermal { polling-delay-passive = <0>; polling-delay = <0>; - thermal-sensors = <&tmu 0>; + thermal-sensors = <&tmu>; cooling-maps { map0 { diff --git a/arch/arm/boot/dts/exynos5410-odroidxu.dts b/arch/arm/boot/dts/exynos5410-odroidxu.dts index bd1d8499a108b658c8a0c1f9d753036946161f39..147c077e8855191aded667ff2fde9460b5eac0b3 100644 --- a/arch/arm/boot/dts/exynos5410-odroidxu.dts +++ b/arch/arm/boot/dts/exynos5410-odroidxu.dts @@ -116,7 +116,6 @@ &clock_audss { }; &cpu0_thermal { - thermal-sensors = <&tmu_cpu0 0>; polling-delay-passive = <0>; polling-delay = <0>; diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi index 83580f076a5875ea31aafc98ec87869ce02e8dc0..34886535f84772d5e8f8d74ba9d3066fdf756046 100644 --- a/arch/arm/boot/dts/exynos5420.dtsi +++ b/arch/arm/boot/dts/exynos5420.dtsi @@ -605,7 +605,7 @@ dp_phy: dp-video-phy { }; mipi_phy: mipi-video-phy { - compatible = "samsung,s5pv210-mipi-video-phy"; + compatible = "samsung,exynos5420-mipi-video-phy"; syscon = <&pmu_system_controller>; #phy-cells = <1>; }; diff --git a/arch/arm/boot/dts/exynos5422-odroidhc1.dts b/arch/arm/boot/dts/exynos5422-odroidhc1.dts index 88f5c150a30a17df075a166472572e6724cd42d4..a1871d4a0f2a9fd7ae47d96a50d5ffe45f7f9489 100644 --- a/arch/arm/boot/dts/exynos5422-odroidhc1.dts +++ b/arch/arm/boot/dts/exynos5422-odroidhc1.dts @@ -29,7 +29,7 @@ blueled { thermal-zones { cpu0_thermal: cpu0-thermal { - thermal-sensors = <&tmu_cpu0 0>; + thermal-sensors = <&tmu_cpu0>; trips { cpu0_alert0: cpu-alert-0 { temperature = <70000>; /* millicelsius */ @@ -84,7 +84,7 @@ map1 { }; }; cpu1_thermal: cpu1-thermal { - thermal-sensors = <&tmu_cpu1 0>; + thermal-sensors = <&tmu_cpu1>; trips { cpu1_alert0: cpu-alert-0 { temperature = <70000>; @@ -128,7 +128,7 @@ map1 { }; }; cpu2_thermal: cpu2-thermal { - thermal-sensors = <&tmu_cpu2 0>; + thermal-sensors = <&tmu_cpu2>; trips { cpu2_alert0: cpu-alert-0 { temperature = <70000>; @@ -172,7 +172,7 @@ map1 { }; }; cpu3_thermal: cpu3-thermal { - thermal-sensors = <&tmu_cpu3 0>; + thermal-sensors = <&tmu_cpu3>; trips { cpu3_alert0: cpu-alert-0 { temperature = <70000>; @@ -216,7 +216,7 @@ map1 { }; }; gpu_thermal: gpu-thermal { - thermal-sensors = <&tmu_gpu 0>; + thermal-sensors = <&tmu_gpu>; trips { gpu_alert0: gpu-alert-0 { temperature = <70000>; diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi index 5da2d81e3be274744c534c32f967a24791983e3a..099ed4384be891123fe3dbec8617c67b8ab73fa8 100644 --- a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi +++ b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi @@ -50,7 +50,7 @@ fan0: pwm-fan { thermal-zones { cpu0_thermal: cpu0-thermal { - thermal-sensors = <&tmu_cpu0 0>; + thermal-sensors = <&tmu_cpu0>; polling-delay-passive = <250>; polling-delay = <0>; trips { @@ -139,7 +139,7 @@ cpu0_cooling_map4: map4 { }; }; cpu1_thermal: cpu1-thermal { - thermal-sensors = <&tmu_cpu1 0>; + thermal-sensors = <&tmu_cpu1>; polling-delay-passive = <250>; polling-delay = <0>; trips { @@ -212,7 +212,7 @@ cpu1_cooling_map4: map4 { }; }; cpu2_thermal: cpu2-thermal { - thermal-sensors = <&tmu_cpu2 0>; + thermal-sensors = <&tmu_cpu2>; polling-delay-passive = <250>; polling-delay = <0>; trips { @@ -285,7 +285,7 @@ cpu2_cooling_map4: map4 { }; }; cpu3_thermal: cpu3-thermal { - thermal-sensors = <&tmu_cpu3 0>; + thermal-sensors = <&tmu_cpu3>; polling-delay-passive = <250>; polling-delay = <0>; trips { @@ -358,7 +358,7 @@ cpu3_cooling_map4: map4 { }; }; gpu_thermal: gpu-thermal { - thermal-sensors = <&tmu_gpu 0>; + thermal-sensors = <&tmu_gpu>; polling-delay-passive = <250>; polling-delay = <0>; trips { diff --git a/arch/arm/boot/dts/imx53-ppd.dts b/arch/arm/boot/dts/imx53-ppd.dts index 006fbd7f543221e0dd0c9bc4df6373f26decf19e..54e39db447c4db377e418642ef94625ff654b6c9 100644 --- a/arch/arm/boot/dts/imx53-ppd.dts +++ b/arch/arm/boot/dts/imx53-ppd.dts @@ -487,7 +487,7 @@ &i2c1 { scl-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>; status = "okay"; - i2c-switch@70 { + i2c-mux@70 { compatible = "nxp,pca9547"; #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm/boot/dts/imx6qdl-gw560x.dtsi b/arch/arm/boot/dts/imx6qdl-gw560x.dtsi index 093a219a77ae188fa300cd81311d8b9ecc30a650..f520e337698af9afcafb176551e439df6a551c95 100644 --- a/arch/arm/boot/dts/imx6qdl-gw560x.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw560x.dtsi @@ -634,7 +634,6 @@ &ssi1 { &uart1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_uart1>; - uart-has-rtscts; rts-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6ul-pico-dwarf.dts b/arch/arm/boot/dts/imx6ul-pico-dwarf.dts index 162dc259edc8c5cb0c14b5c2b7dd9b948c861b61..5a74c7f68eb62dbf03b1af91a3b83241b5137717 100644 --- a/arch/arm/boot/dts/imx6ul-pico-dwarf.dts +++ b/arch/arm/boot/dts/imx6ul-pico-dwarf.dts @@ -32,7 +32,7 @@ sys_mclk: clock-sys-mclk { }; &i2c2 { - clock_frequency = <100000>; + clock-frequency = <100000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c2>; status = "okay"; diff --git a/arch/arm/boot/dts/imx7d-pico-dwarf.dts b/arch/arm/boot/dts/imx7d-pico-dwarf.dts index 5162fe227d1ea3eedd035577970f01ce70e69f54..fdc10563f1473eed1f9cd7aaec7d7c59820db1f1 100644 --- a/arch/arm/boot/dts/imx7d-pico-dwarf.dts +++ b/arch/arm/boot/dts/imx7d-pico-dwarf.dts @@ -32,7 +32,7 @@ sys_mclk: clock-sys-mclk { }; &i2c1 { - clock_frequency = <100000>; + clock-frequency = <100000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c1>; status = "okay"; @@ -52,7 +52,7 @@ pressure-sensor@60 { }; &i2c4 { - clock_frequency = <100000>; + clock-frequency = <100000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c1>; status = "okay"; diff --git a/arch/arm/boot/dts/imx7d-pico-nymph.dts b/arch/arm/boot/dts/imx7d-pico-nymph.dts index 104a85254adbbd8688f5294205e08b6250972bca..5afb1674e012518add059d5936fd438977c5a7e8 100644 --- a/arch/arm/boot/dts/imx7d-pico-nymph.dts +++ b/arch/arm/boot/dts/imx7d-pico-nymph.dts @@ -43,7 +43,7 @@ sys_mclk: clock-sys-mclk { }; &i2c1 { - clock_frequency = <100000>; + clock-frequency = <100000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c1>; status = "okay"; @@ -64,7 +64,7 @@ adc@52 { }; &i2c2 { - clock_frequency = <100000>; + clock-frequency = <100000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c2>; status = "okay"; diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi index 9e1b0af0aa43f28198ffe7e90f9879d686a5abc9..43b39ad9ddceeded13edc3b2c7e6d51f940a1c39 100644 --- a/arch/arm/boot/dts/imx7s.dtsi +++ b/arch/arm/boot/dts/imx7s.dtsi @@ -494,7 +494,7 @@ gpr: iomuxc-gpr@30340000 { mux: mux-controller { compatible = "mmio-mux"; - #mux-control-cells = <0>; + #mux-control-cells = <1>; mux-reg-masks = <0x14 0x00000010>; }; diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi index 9051fb4a267d4f9daac2db4d034945605172bbb4..aab28161b9ae90844026d5035ded62439a8b2c7d 100644 --- a/arch/arm/boot/dts/rk3288.dtsi +++ b/arch/arm/boot/dts/rk3288.dtsi @@ -1203,6 +1203,7 @@ edp: dp@ff970000 { clock-names = "dp", "pclk"; phys = <&edp_phy>; phy-names = "dp"; + power-domains = <&power RK3288_PD_VIO>; resets = <&cru SRST_EDP>; reset-names = "dp"; rockchip,grf = <&grf>; diff --git a/arch/arm/boot/dts/sam9x60.dtsi b/arch/arm/boot/dts/sam9x60.dtsi index ec45ced3cde68eb3492e619d31d261eaaf27248f..e1e0dec8cc1f2d2da8d33ff6fb6e54d3419118c8 100644 --- a/arch/arm/boot/dts/sam9x60.dtsi +++ b/arch/arm/boot/dts/sam9x60.dtsi @@ -567,7 +567,7 @@ pmecc: ecc-engine@ffffe000 { mpddrc: mpddrc@ffffe800 { compatible = "microchip,sam9x60-ddramc", "atmel,sama5d3-ddramc"; reg = <0xffffe800 0x200>; - clocks = <&pmc PMC_TYPE_SYSTEM 2>, <&pmc PMC_TYPE_CORE PMC_MCK>; + clocks = <&pmc PMC_TYPE_SYSTEM 2>, <&pmc PMC_TYPE_PERIPHERAL 49>; clock-names = "ddrck", "mpddr"; }; diff --git a/arch/arm/boot/dts/spear320-hmi.dts b/arch/arm/boot/dts/spear320-hmi.dts index 367ba48aac3e56b1f6e6da4383f685b67c961077..5c562fb4886f4d3edd9fa2a8201f7827685aad3c 100644 --- a/arch/arm/boot/dts/spear320-hmi.dts +++ b/arch/arm/boot/dts/spear320-hmi.dts @@ -242,7 +242,7 @@ stmpe811@41 { irq-trigger = <0x1>; stmpegpio: stmpe-gpio { - compatible = "stmpe,gpio"; + compatible = "st,stmpe-gpio"; reg = <0>; gpio-controller; #gpio-cells = <2>; diff --git a/arch/arm/boot/dts/sun8i-h3-nanopi-duo2.dts b/arch/arm/boot/dts/sun8i-h3-nanopi-duo2.dts index 6b149271ef13f6dc4e1867aa6fdb962953c45a0c..8722fdf77ebc2f7dece2a9061fc7114471327818 100644 --- a/arch/arm/boot/dts/sun8i-h3-nanopi-duo2.dts +++ b/arch/arm/boot/dts/sun8i-h3-nanopi-duo2.dts @@ -57,7 +57,7 @@ reg_vdd_cpux: vdd-cpux-regulator { regulator-ramp-delay = <50>; /* 4ms */ enable-active-high; - enable-gpio = <&r_pio 0 8 GPIO_ACTIVE_HIGH>; /* PL8 */ + enable-gpios = <&r_pio 0 8 GPIO_ACTIVE_HIGH>; /* PL8 */ gpios = <&r_pio 0 6 GPIO_ACTIVE_HIGH>; /* PL6 */ gpios-states = <0x1>; states = <1100000 0>, <1300000 1>; diff --git a/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts b/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts index 6f1e0f0d4f0aeac691de5f16325e26b7396f0906..073f5d196ca9b385726f99cb26f6d988d7bb4107 100644 --- a/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts +++ b/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts @@ -345,7 +345,7 @@ gpio6: io-expander@22 { }; &i2c2 { - tca9548@70 { + i2c-mux@70 { compatible = "nxp,pca9548"; pinctrl-0 = <&pinctrl_i2c_mux_reset>; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/vf610-zii-dev-rev-c.dts b/arch/arm/boot/dts/vf610-zii-dev-rev-c.dts index de79dcfd32e626cd269f637e2ec9343ca858537f..ba2001f3731580d2f5cd73e14cab5b92e6a9642b 100644 --- a/arch/arm/boot/dts/vf610-zii-dev-rev-c.dts +++ b/arch/arm/boot/dts/vf610-zii-dev-rev-c.dts @@ -340,7 +340,7 @@ eeprom@50 { }; &i2c2 { - tca9548@70 { + i2c-mux@70 { compatible = "nxp,pca9548"; pinctrl-0 = <&pinctrl_i2c_mux_reset>; pinctrl-names = "default"; diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index a531afad87fdb984fc8444e9a9e971c7100dff73..7878c33e188d7e95d3154d71937f913ba2d77f59 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -348,7 +348,7 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr) if (panic_on_oops) panic("Fatal exception"); if (signr) - do_exit(signr); + make_task_dead(signr); } /* diff --git a/arch/arm/mach-imx/cpu-imx25.c b/arch/arm/mach-imx/cpu-imx25.c index b2e1963f473ded80a6a64ade9b4fe685a907baae..2ee2d2813d577d57fb9ae2cd59d81cf784699186 100644 --- a/arch/arm/mach-imx/cpu-imx25.c +++ b/arch/arm/mach-imx/cpu-imx25.c @@ -23,6 +23,7 @@ static int mx25_read_cpu_rev(void) np = of_find_compatible_node(NULL, NULL, "fsl,imx25-iim"); iim_base = of_iomap(np, 0); + of_node_put(np); BUG_ON(!iim_base); rev = readl(iim_base + MXC_IIMSREV); iounmap(iim_base); diff --git a/arch/arm/mach-imx/cpu-imx27.c b/arch/arm/mach-imx/cpu-imx27.c index bf70e13bbe9ee0ad41155a407006becf8f51d9f2..1d2893908368380499119e4ab30d955c32245053 100644 --- a/arch/arm/mach-imx/cpu-imx27.c +++ b/arch/arm/mach-imx/cpu-imx27.c @@ -28,6 +28,7 @@ static int mx27_read_cpu_rev(void) np = of_find_compatible_node(NULL, NULL, "fsl,imx27-ccm"); ccm_base = of_iomap(np, 0); + of_node_put(np); BUG_ON(!ccm_base); /* * now we have access to the IO registers. As we need diff --git a/arch/arm/mach-imx/cpu-imx31.c b/arch/arm/mach-imx/cpu-imx31.c index b9c24b851d1abd1fd18a7e30199b048335addc85..35c544924e509593759a1f59abca62173bea1c1b 100644 --- a/arch/arm/mach-imx/cpu-imx31.c +++ b/arch/arm/mach-imx/cpu-imx31.c @@ -39,6 +39,7 @@ static int mx31_read_cpu_rev(void) np = of_find_compatible_node(NULL, NULL, "fsl,imx31-iim"); iim_base = of_iomap(np, 0); + of_node_put(np); BUG_ON(!iim_base); /* read SREV register from IIM module */ diff --git a/arch/arm/mach-imx/cpu-imx35.c b/arch/arm/mach-imx/cpu-imx35.c index 80e7d8ab9f1bb81ad4a70fb8805a45bb170ff01f..1fe75b39c2d995d2aeca9ff923f561e0ba3c8bbd 100644 --- a/arch/arm/mach-imx/cpu-imx35.c +++ b/arch/arm/mach-imx/cpu-imx35.c @@ -21,6 +21,7 @@ static int mx35_read_cpu_rev(void) np = of_find_compatible_node(NULL, NULL, "fsl,imx35-iim"); iim_base = of_iomap(np, 0); + of_node_put(np); BUG_ON(!iim_base); rev = imx_readl(iim_base + MXC_IIMSREV); diff --git a/arch/arm/mach-imx/cpu-imx5.c b/arch/arm/mach-imx/cpu-imx5.c index ad56263778f936d2e1f0f9a2e6a548b201da7d1a..a67c89bf155dd3723a7406caac6ed3bf82ad0f98 100644 --- a/arch/arm/mach-imx/cpu-imx5.c +++ b/arch/arm/mach-imx/cpu-imx5.c @@ -28,6 +28,7 @@ static u32 imx5_read_srev_reg(const char *compat) np = of_find_compatible_node(NULL, NULL, compat); iim_base = of_iomap(np, 0); + of_node_put(np); WARN_ON(!iim_base); srev = readl(iim_base + IIM_SREV) & 0xff; diff --git a/arch/arm/mach-imx/mmdc.c b/arch/arm/mach-imx/mmdc.c index af12668d0bf51c173014eaf2e69afd0f9387e9ff..b9efe9da06e0bb55c85ca478cbfc04bc4fedade7 100644 --- a/arch/arm/mach-imx/mmdc.c +++ b/arch/arm/mach-imx/mmdc.c @@ -99,6 +99,7 @@ struct mmdc_pmu { cpumask_t cpu; struct hrtimer hrtimer; unsigned int active_events; + int id; struct device *dev; struct perf_event *mmdc_events[MMDC_NUM_COUNTERS]; struct hlist_node node; @@ -433,8 +434,6 @@ static enum hrtimer_restart mmdc_pmu_timer_handler(struct hrtimer *hrtimer) static int mmdc_pmu_init(struct mmdc_pmu *pmu_mmdc, void __iomem *mmdc_base, struct device *dev) { - int mmdc_num; - *pmu_mmdc = (struct mmdc_pmu) { .pmu = (struct pmu) { .task_ctx_nr = perf_invalid_context, @@ -452,15 +451,16 @@ static int mmdc_pmu_init(struct mmdc_pmu *pmu_mmdc, .active_events = 0, }; - mmdc_num = ida_simple_get(&mmdc_ida, 0, 0, GFP_KERNEL); + pmu_mmdc->id = ida_simple_get(&mmdc_ida, 0, 0, GFP_KERNEL); - return mmdc_num; + return pmu_mmdc->id; } static int imx_mmdc_remove(struct platform_device *pdev) { struct mmdc_pmu *pmu_mmdc = platform_get_drvdata(pdev); + ida_simple_remove(&mmdc_ida, pmu_mmdc->id); cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node); perf_pmu_unregister(&pmu_mmdc->pmu); iounmap(pmu_mmdc->mmdc_base); @@ -474,7 +474,6 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b { struct mmdc_pmu *pmu_mmdc; char *name; - int mmdc_num; int ret; const struct of_device_id *of_id = of_match_device(imx_mmdc_dt_ids, &pdev->dev); @@ -497,14 +496,14 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b cpuhp_mmdc_state = ret; } - mmdc_num = mmdc_pmu_init(pmu_mmdc, mmdc_base, &pdev->dev); - pmu_mmdc->mmdc_ipg_clk = mmdc_ipg_clk; - if (mmdc_num == 0) - name = "mmdc"; - else - name = devm_kasprintf(&pdev->dev, - GFP_KERNEL, "mmdc%d", mmdc_num); + ret = mmdc_pmu_init(pmu_mmdc, mmdc_base, &pdev->dev); + if (ret < 0) + goto pmu_free; + name = devm_kasprintf(&pdev->dev, + GFP_KERNEL, "mmdc%d", ret); + + pmu_mmdc->mmdc_ipg_clk = mmdc_ipg_clk; pmu_mmdc->devtype_data = (struct fsl_mmdc_devtype_data *)of_id->data; hrtimer_init(&pmu_mmdc->hrtimer, CLOCK_MONOTONIC, @@ -525,6 +524,7 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b pmu_register_err: pr_warn("MMDC Perf PMU failed (%d), disabled\n", ret); + ida_simple_remove(&mmdc_ida, pmu_mmdc->id); cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node); hrtimer_cancel(&pmu_mmdc->hrtimer); pmu_free: diff --git a/arch/arm/mach-omap1/timer.c b/arch/arm/mach-omap1/timer.c index 97fc2096b9709104cdb4a539cd6bfdca10563543..05f016d5e9f67d0e1f3832faa3104123c42fb35b 100644 --- a/arch/arm/mach-omap1/timer.c +++ b/arch/arm/mach-omap1/timer.c @@ -165,7 +165,7 @@ static int __init omap1_dm_timer_init(void) kfree(pdata); err_free_pdev: - platform_device_unregister(pdev); + platform_device_put(pdev); return ret; } diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index 620ba69c8f114facc8ae5a626c6f0b0204d5e2d7..5677c4a08f37650c819aba6bf86243ad6e095657 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c @@ -76,6 +76,7 @@ static void __init realtime_counter_init(void) } rate = clk_get_rate(sys_clk); + clk_put(sys_clk); if (soc_is_dra7xx()) { /* diff --git a/arch/arm/mach-s3c/s3c64xx.c b/arch/arm/mach-s3c/s3c64xx.c index 4dfb648142f2aca66e879323888e33ceb2485513..17f0065031490c828e58903d0eb213d099aa1637 100644 --- a/arch/arm/mach-s3c/s3c64xx.c +++ b/arch/arm/mach-s3c/s3c64xx.c @@ -173,7 +173,8 @@ static struct samsung_pwm_variant s3c64xx_pwm_variant = { .tclk_mask = (1 << 7) | (1 << 6) | (1 << 5), }; -void __init s3c64xx_set_timer_source(unsigned int event, unsigned int source) +void __init s3c64xx_set_timer_source(enum s3c64xx_timer_mode event, + enum s3c64xx_timer_mode source) { s3c64xx_pwm_variant.output_mask = BIT(SAMSUNG_PWM_NUM) - 1; s3c64xx_pwm_variant.output_mask &= ~(BIT(event) | BIT(source)); diff --git a/arch/arm/mach-zynq/slcr.c b/arch/arm/mach-zynq/slcr.c index 37707614885a5050828b2ae3cd8b480666f346bc..9765b3f4c2fc5f7277732b202e391b8acd4e92bb 100644 --- a/arch/arm/mach-zynq/slcr.c +++ b/arch/arm/mach-zynq/slcr.c @@ -213,6 +213,7 @@ int __init zynq_early_slcr_init(void) zynq_slcr_regmap = syscon_regmap_lookup_by_compatible("xlnx,zynq-slcr"); if (IS_ERR(zynq_slcr_regmap)) { pr_err("%s: failed to find zynq-slcr\n", __func__); + of_node_put(np); return -ENODEV; } diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index bc8779d54a64051d4648de6c60ff053d9652226d..bf1a0c618c49bb47029c2080aaa963f083c970de 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -111,7 +111,7 @@ static void die_kernel_fault(const char *msg, struct mm_struct *mm, show_pte(KERN_ALERT, mm, addr); die("Oops", regs, fsr); bust_spinlocks(0); - do_exit(SIGKILL); + make_task_dead(SIGKILL); } /* diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 959f0570173847366186fbe705c54a75791afb89..c4d5b3bacf6449c48836741d1a2cb6d51eb5c0c1 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -161,7 +161,7 @@ void __init paging_init(const struct machine_desc *mdesc) mpu_setup(); /* allocate the zero page. */ - zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + zero_page = (void *)memblock_alloc(PAGE_SIZE, PAGE_SIZE); if (!zero_page) panic("%s: Failed to allocate %lu bytes align=0x%lx\n", __func__, PAGE_SIZE, PAGE_SIZE); diff --git a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi index fae48efae83e9d8dae75f41c5bf2927215f99be6..c892b252e5b0c2dc849780c1df84d50e552191c7 100644 --- a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi @@ -151,7 +151,7 @@ scpi { scpi_clocks: clocks { compatible = "arm,scpi-clocks"; - scpi_dvfs: clock-controller { + scpi_dvfs: clocks-0 { compatible = "arm,scpi-dvfs-clocks"; #clock-cells = <1>; clock-indices = <0>; @@ -160,7 +160,7 @@ scpi_dvfs: clock-controller { }; scpi_sensors: sensors { - compatible = "amlogic,meson-gxbb-scpi-sensors"; + compatible = "amlogic,meson-gxbb-scpi-sensors", "arm,scpi-sensors"; #thermal-sensor-cells = <1>; }; }; @@ -1754,7 +1754,7 @@ apb: bus@ffe00000 { sd_emmc_b: sd@5000 { compatible = "amlogic,meson-axg-mmc"; reg = <0x0 0x5000 0x0 0x800>; - interrupts = ; + interrupts = ; status = "disabled"; clocks = <&clkc CLKID_SD_EMMC_B>, <&clkc CLKID_SD_EMMC_B_CLK0>, @@ -1766,7 +1766,7 @@ sd_emmc_b: sd@5000 { sd_emmc_c: mmc@7000 { compatible = "amlogic,meson-axg-mmc"; reg = <0x0 0x7000 0x0 0x800>; - interrupts = ; + interrupts = ; status = "disabled"; clocks = <&clkc CLKID_SD_EMMC_C>, <&clkc CLKID_SD_EMMC_C_CLK0>, diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi index 075153a4d49fc597a616467f3decf8135125c7db..c0defb36592d0bfb4ce84517f1be3fe7db59c901 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi @@ -1727,7 +1727,7 @@ int_mdio: mdio@1 { #address-cells = <1>; #size-cells = <0>; - internal_ephy: ethernet_phy@8 { + internal_ephy: ethernet-phy@8 { compatible = "ethernet-phy-id0180.3301", "ethernet-phy-ieee802.3-c22"; interrupts = ; @@ -2317,7 +2317,7 @@ uart_A: serial@24000 { sd_emmc_a: sd@ffe03000 { compatible = "amlogic,meson-axg-mmc"; reg = <0x0 0xffe03000 0x0 0x800>; - interrupts = ; + interrupts = ; status = "disabled"; clocks = <&clkc CLKID_SD_EMMC_A>, <&clkc CLKID_SD_EMMC_A_CLK0>, @@ -2329,7 +2329,7 @@ sd_emmc_a: sd@ffe03000 { sd_emmc_b: sd@ffe05000 { compatible = "amlogic,meson-axg-mmc"; reg = <0x0 0xffe05000 0x0 0x800>; - interrupts = ; + interrupts = ; status = "disabled"; clocks = <&clkc CLKID_SD_EMMC_B>, <&clkc CLKID_SD_EMMC_B_CLK0>, @@ -2341,7 +2341,7 @@ sd_emmc_b: sd@ffe05000 { sd_emmc_c: mmc@ffe07000 { compatible = "amlogic,meson-axg-mmc"; reg = <0x0 0xffe07000 0x0 0x800>; - interrupts = ; + interrupts = ; status = "disabled"; clocks = <&clkc CLKID_SD_EMMC_C>, <&clkc CLKID_SD_EMMC_C_CLK0>, diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi index fb0ab27d1f642dbe5e7100f737ea64b7a69d5eb1..6eaceb717d6172f880d011856f2684b6c1f6a335 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi @@ -57,26 +57,6 @@ cpu_opp_table: opp-table { compatible = "operating-points-v2"; opp-shared; - opp-100000000 { - opp-hz = /bits/ 64 <100000000>; - opp-microvolt = <731000>; - }; - - opp-250000000 { - opp-hz = /bits/ 64 <250000000>; - opp-microvolt = <731000>; - }; - - opp-500000000 { - opp-hz = /bits/ 64 <500000000>; - opp-microvolt = <731000>; - }; - - opp-667000000 { - opp-hz = /bits/ 64 <666666666>; - opp-microvolt = <731000>; - }; - opp-1000000000 { opp-hz = /bits/ 64 <1000000000>; opp-microvolt = <731000>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi index c2480bab8d3374f13a90368d9671044d0da1d229..27e964bfa947a8c51a4e3b66fd1bb056509e273d 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi @@ -17,7 +17,7 @@ adc-keys { io-channel-names = "buttons"; keyup-threshold-microvolt = <1800000>; - update-button { + button-update { label = "update"; linux,code = ; press-threshold-microvolt = <1300000>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi index 47cbb0a1eb183ca0c7993f9f69337a47f7306a81..4c7131526c4d3a4a7ef623d0d2ffa95a7be873ba 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi @@ -226,7 +226,7 @@ sn: sn@14 { reg = <0x14 0x10>; }; - eth_mac: eth_mac@34 { + eth_mac: eth-mac@34 { reg = <0x34 0x10>; }; @@ -243,7 +243,7 @@ scpi { scpi_clocks: clocks { compatible = "arm,scpi-clocks"; - scpi_dvfs: scpi_clocks@0 { + scpi_dvfs: clocks-0 { compatible = "arm,scpi-dvfs-clocks"; #clock-cells = <1>; clock-indices = <0>; @@ -524,7 +524,7 @@ periphs: bus@c8834000 { #size-cells = <2>; ranges = <0x0 0x0 0x0 0xc8834000 0x0 0x2000>; - hwrng: rng { + hwrng: rng@0 { compatible = "amlogic,meson-rng"; reg = <0x0 0x0 0x0 0x4>; }; @@ -595,21 +595,21 @@ apb: apb@d0000000 { sd_emmc_a: mmc@70000 { compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc"; reg = <0x0 0x70000 0x0 0x800>; - interrupts = ; + interrupts = ; status = "disabled"; }; sd_emmc_b: mmc@72000 { compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc"; reg = <0x0 0x72000 0x0 0x800>; - interrupts = ; + interrupts = ; status = "disabled"; }; sd_emmc_c: mmc@74000 { compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc"; reg = <0x0 0x74000 0x0 0x800>; - interrupts = ; + interrupts = ; status = "disabled"; }; }; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-kii-pro.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-kii-pro.dts index e8394a8269ee15e7f6b8aff89eece13a911e236c..802faf7e4e3cbb471308c391b1342246e45ec032 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-kii-pro.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-kii-pro.dts @@ -16,7 +16,7 @@ / { leds { compatible = "gpio-leds"; - status { + led { gpios = <&gpio_ao GPIOAO_13 GPIO_ACTIVE_LOW>; default-state = "off"; color = ; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-phicomm-n1.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-phicomm-n1.dts index 9ef210f17b4aae71be8b29fb206b00a4658538d5..393d3cb33b9ee382e88eb8ac32b4d5ae54531606 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-phicomm-n1.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-phicomm-n1.dts @@ -18,7 +18,7 @@ cvbs-connector { leds { compatible = "gpio-leds"; - status { + led { label = "n1:white:status"; gpios = <&gpio_ao GPIOAO_9 GPIO_ACTIVE_HIGH>; default-state = "on"; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-sml5442tw.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-sml5442tw.dts index 0b95e9ecbef0a4081817aec78317779111621757..ca3fd6b67b9404d53f2ad9b28e8a94c717955a69 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-sml5442tw.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-sml5442tw.dts @@ -75,6 +75,5 @@ bluetooth { enable-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>; max-speed = <2000000>; clocks = <&wifi32k>; - clock-names = "lpo"; }; }; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi index c3ac531c4f84af6141f69ab9228a80969e1fd232..35002293505224af4f6bf81124d649322cb0e1b1 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi @@ -759,7 +759,7 @@ mux { }; }; - eth-phy-mux { + eth-phy-mux@55c { compatible = "mdio-mux-mmioreg", "mdio-mux"; #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi index 5667009aae13a2856c90e825a012857dd09e6d6b..674a0ab8a53963c94f2ad156e914b12494442e24 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi @@ -70,7 +70,7 @@ sound { &ecspi2 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_espi2>; - cs-gpios = <&gpio5 9 GPIO_ACTIVE_LOW>; + cs-gpios = <&gpio5 13 GPIO_ACTIVE_LOW>; status = "okay"; eeprom@0 { @@ -187,7 +187,7 @@ pinctrl_espi2: espi2grp { MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0x82 MX8MM_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0x82 MX8MM_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0x82 - MX8MM_IOMUXC_ECSPI1_SS0_GPIO5_IO9 0x41 + MX8MM_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0x41 >; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h index a003e6af33533d7f00bbba92a3f410c48c585398..56271abfb7e09414485638c585ba739a66315dfc 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h +++ b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h @@ -601,7 +601,7 @@ #define MX8MM_IOMUXC_UART1_RXD_GPIO5_IO22 0x234 0x49C 0x000 0x5 0x0 #define MX8MM_IOMUXC_UART1_RXD_TPSMP_HDATA24 0x234 0x49C 0x000 0x7 0x0 #define MX8MM_IOMUXC_UART1_TXD_UART1_DCE_TX 0x238 0x4A0 0x000 0x0 0x0 -#define MX8MM_IOMUXC_UART1_TXD_UART1_DTE_RX 0x238 0x4A0 0x4F4 0x0 0x0 +#define MX8MM_IOMUXC_UART1_TXD_UART1_DTE_RX 0x238 0x4A0 0x4F4 0x0 0x1 #define MX8MM_IOMUXC_UART1_TXD_ECSPI3_MOSI 0x238 0x4A0 0x000 0x1 0x0 #define MX8MM_IOMUXC_UART1_TXD_GPIO5_IO23 0x238 0x4A0 0x000 0x5 0x0 #define MX8MM_IOMUXC_UART1_TXD_TPSMP_HDATA25 0x238 0x4A0 0x000 0x7 0x0 diff --git a/arch/arm64/boot/dts/freescale/imx8mq-thor96.dts b/arch/arm64/boot/dts/freescale/imx8mq-thor96.dts index 5d5aa6537225fe79b0c270e6e00bbc34d5f682d5..6e6182709d2201c0155c9c0ef9085355956d90ee 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq-thor96.dts +++ b/arch/arm64/boot/dts/freescale/imx8mq-thor96.dts @@ -339,7 +339,7 @@ &usdhc1 { bus-width = <4>; non-removable; no-sd; - no-emmc; + no-mmc; status = "okay"; brcmf: wifi@1 { @@ -359,7 +359,7 @@ &usdhc2 { cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>; bus-width = <4>; no-sdio; - no-emmc; + no-mmc; disable-wp; status = "okay"; }; diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi index 7c6d871538a63bfff476de69fbcda458e2610116..884930a5849a29eb8ddd04b38007818a296af405 100644 --- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi @@ -428,6 +428,7 @@ uart3: serial@11005000 { pwm: pwm@11006000 { compatible = "mediatek,mt7622-pwm"; reg = <0 0x11006000 0 0x1000>; + #pwm-cells = <2>; interrupts = ; clocks = <&topckgen CLK_TOP_PWM_SEL>, <&pericfg CLK_PERI_PWM_PD>, diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi index 08a914d3a643529517b8082f28f247bddcf72752..31bc8bae8cff84f4347514a17abb3e59640f263f 100644 --- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi @@ -205,6 +205,15 @@ psci { method = "smc"; }; + clk13m: fixed-factor-clock-13m { + compatible = "fixed-factor-clock"; + #clock-cells = <0>; + clocks = <&clk26m>; + clock-div = <2>; + clock-mult = <1>; + clock-output-names = "clk13m"; + }; + clk26m: oscillator { compatible = "fixed-clock"; #clock-cells = <0>; @@ -355,8 +364,7 @@ systimer: timer@10017000 { "mediatek,mt6765-timer"; reg = <0 0x10017000 0 0x1000>; interrupts = ; - clocks = <&topckgen CLK_TOP_CLK13M>; - clock-names = "clk13m"; + clocks = <&clk13m>; }; gce: mailbox@10238000 { diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi index 99e2488b92dc39a603dcafd413994a8954615f2b..e191a7bc532beb6d8dcd66c93000dc5763de79db 100644 --- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi +++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi @@ -108,7 +108,7 @@ usb1_ssphy: lane@58200 { #phy-cells = <0>; clocks = <&gcc GCC_USB1_PIPE_CLK>; clock-names = "pipe0"; - clock-output-names = "gcc_usb1_pipe_clk_src"; + clock-output-names = "usb3phy_1_cc_pipe_clk"; }; }; @@ -151,7 +151,7 @@ usb0_ssphy: lane@78200 { #phy-cells = <0>; clocks = <&gcc GCC_USB0_PIPE_CLK>; clock-names = "pipe0"; - clock-output-names = "gcc_usb0_pipe_clk_src"; + clock-output-names = "usb3phy_0_cc_pipe_clk"; }; }; @@ -167,34 +167,61 @@ qusb_phy_0: phy@79000 { resets = <&gcc GCC_QUSB2_0_PHY_BCR>; }; - pcie_phy0: phy@86000 { - compatible = "qcom,ipq8074-qmp-pcie-phy"; - reg = <0x00086000 0x1000>; - #phy-cells = <0>; - clocks = <&gcc GCC_PCIE0_PIPE_CLK>; - clock-names = "pipe_clk"; - clock-output-names = "pcie20_phy0_pipe_clk"; + pcie_qmp0: phy@84000 { + compatible = "qcom,ipq8074-qmp-gen3-pcie-phy"; + reg = <0x00084000 0x1bc>; + #address-cells = <1>; + #size-cells = <1>; + ranges; + clocks = <&gcc GCC_PCIE0_AUX_CLK>, + <&gcc GCC_PCIE0_AHB_CLK>; + clock-names = "aux", "cfg_ahb"; resets = <&gcc GCC_PCIE0_PHY_BCR>, <&gcc GCC_PCIE0PHY_PHY_BCR>; reset-names = "phy", "common"; status = "disabled"; + + pcie_phy0: phy@84200 { + reg = <0x84200 0x16c>, + <0x84400 0x200>, + <0x84800 0x1f0>, + <0x84c00 0xf4>; + #phy-cells = <0>; + #clock-cells = <0>; + clocks = <&gcc GCC_PCIE0_PIPE_CLK>; + clock-names = "pipe0"; + clock-output-names = "pcie20_phy0_pipe_clk"; + }; }; - pcie_phy1: phy@8e000 { + pcie_qmp1: phy@8e000 { compatible = "qcom,ipq8074-qmp-pcie-phy"; - reg = <0x0008e000 0x1000>; - #phy-cells = <0>; - clocks = <&gcc GCC_PCIE1_PIPE_CLK>; - clock-names = "pipe_clk"; - clock-output-names = "pcie20_phy1_pipe_clk"; + reg = <0x0008e000 0x1c4>; + #address-cells = <1>; + #size-cells = <1>; + ranges; + clocks = <&gcc GCC_PCIE1_AUX_CLK>, + <&gcc GCC_PCIE1_AHB_CLK>; + clock-names = "aux", "cfg_ahb"; resets = <&gcc GCC_PCIE1_PHY_BCR>, <&gcc GCC_PCIE1PHY_PHY_BCR>; reset-names = "phy", "common"; status = "disabled"; + + pcie_phy1: phy@8e200 { + reg = <0x8e200 0x130>, + <0x8e400 0x200>, + <0x8e800 0x1f8>; + #phy-cells = <0>; + #clock-cells = <0>; + clocks = <&gcc GCC_PCIE1_PIPE_CLK>; + clock-names = "pipe0"; + clock-output-names = "pcie20_phy1_pipe_clk"; + }; }; tlmm: pinctrl@1000000 { @@ -583,9 +610,9 @@ pcie1: pci@10000000 { phy-names = "pciephy"; ranges = <0x81000000 0 0x10200000 0x10200000 - 0 0x100000 /* downstream I/O */ - 0x82000000 0 0x10300000 0x10300000 - 0 0xd00000>; /* non-prefetchable memory */ + 0 0x10000>, /* downstream I/O */ + <0x82000000 0 0x10220000 0x10220000 + 0 0xfde0000>; /* non-prefetchable memory */ interrupts = ; interrupt-names = "msi"; @@ -628,16 +655,18 @@ IRQ_TYPE_LEVEL_HIGH>, /* int_c */ }; pcie0: pci@20000000 { - compatible = "qcom,pcie-ipq8074"; + compatible = "qcom,pcie-ipq8074-gen3"; reg = <0x20000000 0xf1d>, <0x20000f20 0xa8>, - <0x00080000 0x2000>, + <0x20001000 0x1000>, + <0x00080000 0x4000>, <0x20100000 0x1000>; - reg-names = "dbi", "elbi", "parf", "config"; + reg-names = "dbi", "elbi", "atu", "parf", "config"; device_type = "pci"; linux,pci-domain = <0>; bus-range = <0x00 0xff>; num-lanes = <1>; + max-link-speed = <3>; #address-cells = <3>; #size-cells = <2>; @@ -645,9 +674,9 @@ pcie0: pci@20000000 { phy-names = "pciephy"; ranges = <0x81000000 0 0x20200000 0x20200000 - 0 0x100000 /* downstream I/O */ - 0x82000000 0 0x20300000 0x20300000 - 0 0xd00000>; /* non-prefetchable memory */ + 0 0x10000>, /* downstream I/O */ + <0x82000000 0 0x20220000 0x20220000 + 0 0xfde0000>; /* non-prefetchable memory */ interrupts = ; interrupt-names = "msi"; @@ -665,28 +694,30 @@ IRQ_TYPE_LEVEL_HIGH>, /* int_c */ clocks = <&gcc GCC_SYS_NOC_PCIE0_AXI_CLK>, <&gcc GCC_PCIE0_AXI_M_CLK>, <&gcc GCC_PCIE0_AXI_S_CLK>, - <&gcc GCC_PCIE0_AHB_CLK>, - <&gcc GCC_PCIE0_AUX_CLK>; - + <&gcc GCC_PCIE0_AXI_S_BRIDGE_CLK>, + <&gcc GCC_PCIE0_RCHNG_CLK>; clock-names = "iface", "axi_m", "axi_s", - "ahb", - "aux"; + "axi_bridge", + "rchng"; + resets = <&gcc GCC_PCIE0_PIPE_ARES>, <&gcc GCC_PCIE0_SLEEP_ARES>, <&gcc GCC_PCIE0_CORE_STICKY_ARES>, <&gcc GCC_PCIE0_AXI_MASTER_ARES>, <&gcc GCC_PCIE0_AXI_SLAVE_ARES>, <&gcc GCC_PCIE0_AHB_ARES>, - <&gcc GCC_PCIE0_AXI_MASTER_STICKY_ARES>; + <&gcc GCC_PCIE0_AXI_MASTER_STICKY_ARES>, + <&gcc GCC_PCIE0_AXI_SLAVE_STICKY_ARES>; reset-names = "pipe", "sleep", "sticky", "axi_m", "axi_s", "ahb", - "axi_m_sticky"; + "axi_m_sticky", + "axi_s_sticky"; status = "disabled"; }; }; diff --git a/arch/arm64/boot/dts/qcom/qcs404.dtsi b/arch/arm64/boot/dts/qcom/qcs404.dtsi index 7bddc5ebc6aa24bc5ac4020f7d4448d08014ff7d..d41f068dde1670bde0a2b69633694cf594351565 100644 --- a/arch/arm64/boot/dts/qcom/qcs404.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs404.dtsi @@ -775,7 +775,7 @@ pcie_phy: phy@7786000 { clocks = <&gcc GCC_PCIE_0_PIPE_CLK>; resets = <&gcc GCC_PCIEPHY_0_PHY_BCR>, - <&gcc 21>; + <&gcc GCC_PCIE_0_PIPE_ARES>; reset-names = "phy", "pipe"; clock-output-names = "pcie_0_pipe_clk"; @@ -1305,12 +1305,12 @@ pcie: pci@10000000 { <&gcc GCC_PCIE_0_SLV_AXI_CLK>; clock-names = "iface", "aux", "master_bus", "slave_bus"; - resets = <&gcc 18>, - <&gcc 17>, - <&gcc 15>, - <&gcc 19>, + resets = <&gcc GCC_PCIE_0_AXI_MASTER_ARES>, + <&gcc GCC_PCIE_0_AXI_SLAVE_ARES>, + <&gcc GCC_PCIE_0_AXI_MASTER_STICKY_ARES>, + <&gcc GCC_PCIE_0_CORE_STICKY_ARES>, <&gcc GCC_PCIE_0_BCR>, - <&gcc 16>; + <&gcc GCC_PCIE_0_AHB_ARES>; reset-names = "axi_m", "axi_s", "axi_m_sticky", diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi index c71f3afc1cc9f9df0a486f71435dabdcde052bbc..eb07a882d43b3d4198838a9ddbbdc04303b4a93c 100644 --- a/arch/arm64/boot/dts/qcom/sc7180.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi @@ -3066,8 +3066,8 @@ spmi_bus: spmi@c440000 { interrupts-extended = <&pdc 1 IRQ_TYPE_LEVEL_HIGH>; qcom,ee = <0>; qcom,channel = <0>; - #address-cells = <1>; - #size-cells = <1>; + #address-cells = <2>; + #size-cells = <0>; interrupt-controller; #interrupt-cells = <4>; cell-index = <0>; diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts index c6691bdc810026868a74db3ad5eb75726f2870e2..1e889ca932e4118008f237ba9c6619e5d32f7599 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts +++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts @@ -896,7 +896,7 @@ sdc2_card_det_n: sd-card-det-n { }; wcd_intr_default: wcd_intr_default { - pins = <54>; + pins = "gpio54"; function = "gpio"; input-enable; diff --git a/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi b/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi index 53e1d43cbecf8352890bf5f965b2859f4f2fd9c7..663adf79471bcf4707e240fd310de5654ea562b7 100644 --- a/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi +++ b/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi @@ -399,20 +399,6 @@ wm8962_endpoint: endpoint { }; }; - /* 0 - lcd_reset */ - /* 1 - lcd_pwr */ - /* 2 - lcd_select */ - /* 3 - backlight-enable */ - /* 4 - Touch_shdwn */ - /* 5 - LCD_H_pol */ - /* 6 - lcd_V_pol */ - gpio_exp1: gpio@20 { - compatible = "onnn,pca9654"; - reg = <0x20>; - gpio-controller; - #gpio-cells = <2>; - }; - touchscreen@26 { compatible = "ilitek,ili2117"; reg = <0x26>; @@ -445,6 +431,16 @@ hd3ss3220_ep: endpoint { }; }; }; + + gpio_exp1: gpio@70 { + compatible = "nxp,pca9538"; + reg = <0x70>; + gpio-controller; + #gpio-cells = <2>; + gpio-line-names = "lcd_reset", "lcd_pwr", "lcd_select", + "backlight-enable", "Touch_shdwn", + "LCD_H_pol", "lcd_V_pol"; + }; }; &lvds0 { diff --git a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts index daa9a0c601a9f21f17faf430cd4c97216496d0b2..22ab5e1d7319d6ab042a3cb26ab92c55382a8637 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts @@ -91,7 +91,6 @@ power_led: led-0 { linux,default-trigger = "heartbeat"; gpios = <&rk805 1 GPIO_ACTIVE_LOW>; default-state = "on"; - mode = <0x23>; }; user_led: led-1 { @@ -99,7 +98,6 @@ user_led: led-1 { linux,default-trigger = "mmc1"; gpios = <&rk805 0 GPIO_ACTIVE_LOW>; default-state = "off"; - mode = <0x05>; }; }; }; diff --git a/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts index e8a4143e1c2415e155eb2926c3af6c8cc6480301..909ab6661aef54f541606b89664eff6250c38e26 100644 --- a/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts +++ b/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts @@ -16,7 +16,7 @@ chosen { }; }; -&wkup_pmx0 { +&wkup_pmx2 { mcu_cpsw_pins_default: mcu-cpsw-pins-default { pinctrl-single,pins = < J721E_WKUP_IOPAD(0x0068, PIN_OUTPUT, 0) /* MCU_RGMII1_TX_CTL */ diff --git a/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi index eb2a78a53512cf596d7e5cda68b6f8e6098696f6..7f252cc6eb3794571df7040268b10f4b775b99e9 100644 --- a/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi @@ -56,7 +56,34 @@ chipid@43000014 { wkup_pmx0: pinctrl@4301c000 { compatible = "pinctrl-single"; /* Proxy 0 addressing */ - reg = <0x00 0x4301c000 0x00 0x178>; + reg = <0x00 0x4301c000 0x00 0x34>; + #pinctrl-cells = <1>; + pinctrl-single,register-width = <32>; + pinctrl-single,function-mask = <0xffffffff>; + }; + + wkup_pmx1: pinctrl@0x4301c038 { + compatible = "pinctrl-single"; + /* Proxy 0 addressing */ + reg = <0x00 0x4301c038 0x00 0x8>; + #pinctrl-cells = <1>; + pinctrl-single,register-width = <32>; + pinctrl-single,function-mask = <0xffffffff>; + }; + + wkup_pmx2: pinctrl@0x4301c068 { + compatible = "pinctrl-single"; + /* Proxy 0 addressing */ + reg = <0x00 0x4301c068 0x00 0xec>; + #pinctrl-cells = <1>; + pinctrl-single,register-width = <32>; + pinctrl-single,function-mask = <0xffffffff>; + }; + + wkup_pmx3: pinctrl@0x4301c174 { + compatible = "pinctrl-single"; + /* Proxy 0 addressing */ + reg = <0x00 0x4301c174 0x00 0x20>; #pinctrl-cells = <1>; pinctrl-single,register-width = <32>; pinctrl-single,function-mask = <0xffffffff>; diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 16892f0d05ad635cd22ac0ac520e180b678ff648..538b6a1b198b99ab7a54ca5101615ecea70ff0b3 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -25,7 +25,7 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md); ({ \ efi_virtmap_load(); \ __efi_fpsimd_begin(); \ - spin_lock(&efi_rt_lock); \ + raw_spin_lock(&efi_rt_lock); \ }) #define arch_efi_call_virt(p, f, args...) \ @@ -37,12 +37,12 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md); #define arch_efi_call_virt_teardown() \ ({ \ - spin_unlock(&efi_rt_lock); \ + raw_spin_unlock(&efi_rt_lock); \ __efi_fpsimd_end(); \ efi_virtmap_unload(); \ }) -extern spinlock_t efi_rt_lock; +extern raw_spinlock_t efi_rt_lock; efi_status_t __efi_rt_asm_wrapper(void *, const char *, ...); #define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT) diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index 72f432d23ec5cc9dc2dc385ac334bd6a33dbdeba..3ee3b3daca47b918ea9374c34863b1618e4a66bc 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -144,7 +144,7 @@ asmlinkage efi_status_t efi_handle_corrupted_x18(efi_status_t s, const char *f) return s; } -DEFINE_SPINLOCK(efi_rt_lock); +DEFINE_RAW_SPINLOCK(efi_rt_lock); asmlinkage u64 *efi_rt_stack_top __ro_after_init; diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 2059d8f43f55f05d9bd93ca3515043f4ae0dcf44..2cdd53425509d7788923f2bd94fca34fc1b4abd2 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -144,7 +144,7 @@ void die(const char *str, struct pt_regs *regs, int err) raw_spin_unlock_irqrestore(&die_lock, flags); if (ret != NOTIFY_STOP) - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } static void arm64_show_signal(int signo, const char *str) diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 795d224f184ff2ce63d8427f5e70d4a9a89fd4ef..2be856731e8174c91607a662396ec649d5e3b6d4 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -293,7 +293,7 @@ static void die_kernel_fault(const char *msg, unsigned long addr, show_pte(addr); die("Oops", regs, esr); bust_spinlocks(0); - do_exit(SIGKILL); + make_task_dead(SIGKILL); } static void __do_kernel_fault(unsigned long addr, unsigned int esr, diff --git a/arch/csky/abiv1/alignment.c b/arch/csky/abiv1/alignment.c index cb2a0d94a144d381541d81da2618c46849b7345b..2df115d0e210535ec3f9c70c3c9361551dcade51 100644 --- a/arch/csky/abiv1/alignment.c +++ b/arch/csky/abiv1/alignment.c @@ -294,7 +294,7 @@ void csky_alignment(struct pt_regs *regs) __func__, opcode, rz, rx, imm, addr); show_regs(regs); bust_spinlocks(0); - do_exit(SIGKILL); + make_task_dead(SIGKILL); } force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)addr); diff --git a/arch/csky/kernel/traps.c b/arch/csky/kernel/traps.c index 22721468a04b28acee2f4795c165586f1d1c3315..15711efa14a4eac9ff3a10bbee3db3f34ffe3463 100644 --- a/arch/csky/kernel/traps.c +++ b/arch/csky/kernel/traps.c @@ -111,7 +111,7 @@ void die(struct pt_regs *regs, const char *str) if (panic_on_oops) panic("Fatal exception"); if (ret != NOTIFY_STOP) - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr) diff --git a/arch/h8300/kernel/traps.c b/arch/h8300/kernel/traps.c index 5d8b969cd8f3496d3975ef43922371ca14c5a0f5..cf23ccb50c17e6211f10dd04fed561eea86cc22e 100644 --- a/arch/h8300/kernel/traps.c +++ b/arch/h8300/kernel/traps.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -110,7 +111,7 @@ void die(const char *str, struct pt_regs *fp, unsigned long err) dump(fp); spin_unlock_irq(&die_lock); - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } static int kstack_depth_to_print = 24; diff --git a/arch/h8300/mm/fault.c b/arch/h8300/mm/fault.c index d4bc9c16f2df9b08862f7ea6590daace1cf0c9f4..b465441f490df87edab337ace91fdc949bbac4bf 100644 --- a/arch/h8300/mm/fault.c +++ b/arch/h8300/mm/fault.c @@ -51,7 +51,7 @@ asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address, printk(" at virtual address %08lx\n", address); if (!user_mode(regs)) die("Oops", regs, error_code); - do_exit(SIGKILL); + make_task_dead(SIGKILL); return 1; } diff --git a/arch/hexagon/kernel/traps.c b/arch/hexagon/kernel/traps.c index 904134b37232f38ab05c1a18a127b70c1118d9a9..b334e807170993e1e719148190b5b804a29daeaf 100644 --- a/arch/hexagon/kernel/traps.c +++ b/arch/hexagon/kernel/traps.c @@ -218,7 +218,7 @@ int die(const char *str, struct pt_regs *regs, long err) panic("Fatal exception"); oops_exit(); - do_exit(err); + make_task_dead(err); return 0; } diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index cc0d4ce7a04551eb940873a1297d591a70c2f918..983a5125cd9f5e43f8b07a63b27feb525898fcb5 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -358,7 +358,7 @@ config ARCH_PROC_KCORE_TEXT depends on PROC_KCORE config IA64_MCA_RECOVERY - tristate "MCA recovery from errors other than TLB." + bool "MCA recovery from errors other than TLB." config IA64_PALINFO tristate "/proc/pal support" diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c index 2a40268c3d494c13ed92a5d1274bfb822b8bacc6..d9ee3b186249d8dfd5cb328b64be4c67f0df20a9 100644 --- a/arch/ia64/kernel/mca_drv.c +++ b/arch/ia64/kernel/mca_drv.c @@ -176,7 +176,7 @@ mca_handler_bh(unsigned long paddr, void *iip, unsigned long ipsr) spin_unlock(&mca_bh_lock); /* This process is about to be killed itself */ - do_exit(SIGKILL); + make_task_dead(SIGKILL); } /** diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index e13cb905930fb754bdaf899ec6c950970377b733..753642366e12eba9fdb4850af7b4ab54bbfb25a0 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c @@ -85,7 +85,7 @@ die (const char *str, struct pt_regs *regs, long err) if (panic_on_oops) panic("Fatal exception"); - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); return 0; } diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index cd9766d2b6e0e054f71d855a8a1e46ea66913bc1..829198180ca6ff2cc30aa46b25612350b0d36457 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -274,7 +274,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re regs = NULL; bust_spinlocks(0); if (regs) - do_exit(SIGKILL); + make_task_dead(SIGKILL); return; out_of_memory: diff --git a/arch/m68k/68000/entry.S b/arch/m68k/68000/entry.S index 259b3661b614168ff8ab377587c66b1478222218..94abf3d8afc52588806f7e7007ddd6ee896b399a 100644 --- a/arch/m68k/68000/entry.S +++ b/arch/m68k/68000/entry.S @@ -47,6 +47,8 @@ do_trace: jbsr syscall_trace_enter RESTORE_SWITCH_STACK addql #4,%sp + addql #1,%d0 + jeq ret_from_exception movel %sp@(PT_OFF_ORIG_D0),%d1 movel #-ENOSYS,%d0 cmpl #NR_syscalls,%d1 diff --git a/arch/m68k/Kconfig.devices b/arch/m68k/Kconfig.devices index 6a87b4a5fcac2ca8a07a9cb69a8a612a2dd774e0..e6e3efac184074c2b28b3c7349fa5a9685118202 100644 --- a/arch/m68k/Kconfig.devices +++ b/arch/m68k/Kconfig.devices @@ -19,6 +19,7 @@ config HEARTBEAT # We have a dedicated heartbeat LED. :-) config PROC_HARDWARE bool "/proc/hardware support" + depends on PROC_FS help Say Y here to support the /proc/hardware file, which gives you access to information about the machine you're running on, diff --git a/arch/m68k/coldfire/entry.S b/arch/m68k/coldfire/entry.S index d43a02795a4a445e18efea2d5eb386534ae8816e..f1d41a9328a2794cc13b9594bc4e882402a17813 100644 --- a/arch/m68k/coldfire/entry.S +++ b/arch/m68k/coldfire/entry.S @@ -92,6 +92,8 @@ ENTRY(system_call) jbsr syscall_trace_enter RESTORE_SWITCH_STACK addql #4,%sp + addql #1,%d0 + jeq ret_from_exception movel %d3,%a0 jbsr %a0@ movel %d0,%sp@(PT_OFF_D0) /* save the return value */ diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S index 9dd76fbb7c6b2752b0a8eefb847a65e2f26251b1..546bab6bfc273e2eec73787f80317bbf2d42f9c2 100644 --- a/arch/m68k/kernel/entry.S +++ b/arch/m68k/kernel/entry.S @@ -167,9 +167,12 @@ do_trace_entry: jbsr syscall_trace RESTORE_SWITCH_STACK addql #4,%sp + addql #1,%d0 | optimization for cmpil #-1,%d0 + jeq ret_from_syscall movel %sp@(PT_OFF_ORIG_D0),%d0 cmpl #NR_syscalls,%d0 jcs syscall + jra ret_from_syscall badsys: movel #-ENOSYS,%sp@(PT_OFF_D0) jra ret_from_syscall diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c index 9e1261462bcc52b5dd24bb36b409a276a2118a7c..b2a31afb998c2465ad53f4f1465bac4c3385dd6f 100644 --- a/arch/m68k/kernel/traps.c +++ b/arch/m68k/kernel/traps.c @@ -1136,7 +1136,7 @@ void die_if_kernel (char *str, struct pt_regs *fp, int nr) pr_crit("%s: %08x\n", str, nr); show_registers(fp); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } asmlinkage void set_esp0(unsigned long ssp) diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index ef46e77e97a5b35ad0315691af456ca90f1870fb..fcb3a0d8421c5f71b208cc7986952d11a7f345b9 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -48,7 +48,7 @@ int send_fault_sig(struct pt_regs *regs) pr_alert("Unable to handle kernel access"); pr_cont(" at virtual address %p\n", addr); die_if_kernel("Oops", regs, 0 /*error_code*/); - do_exit(SIGKILL); + make_task_dead(SIGKILL); } return 1; diff --git a/arch/microblaze/kernel/exceptions.c b/arch/microblaze/kernel/exceptions.c index cf99c411503e3571594a680a01bf0967d8e134b3..6d3a6a64422056e3e00f1008cf438c22f5dd2206 100644 --- a/arch/microblaze/kernel/exceptions.c +++ b/arch/microblaze/kernel/exceptions.c @@ -44,10 +44,10 @@ void die(const char *str, struct pt_regs *fp, long err) pr_warn("Oops: %s, sig: %ld\n", str, err); show_regs(fp); spin_unlock_irq(&die_lock); - /* do_exit() should take care of panic'ing from an interrupt + /* make_task_dead() should take care of panic'ing from an interrupt * context so we don't handle it here */ - do_exit(err); + make_task_dead(err); } /* for user application debugging */ diff --git a/arch/mips/include/asm/mach-rc32434/pci.h b/arch/mips/include/asm/mach-rc32434/pci.h index 9a6eefd127571fd6d69f55151264a463e1927089..3eb767c8a4eecd0561a9e3c0f2081d47fe769d09 100644 --- a/arch/mips/include/asm/mach-rc32434/pci.h +++ b/arch/mips/include/asm/mach-rc32434/pci.h @@ -374,7 +374,7 @@ struct pci_msu { PCI_CFG04_STAT_SSE | \ PCI_CFG04_STAT_PE) -#define KORINA_CNFG1 ((KORINA_STAT<<16)|KORINA_CMD) +#define KORINA_CNFG1 (KORINA_STAT | KORINA_CMD) #define KORINA_REVID 0 #define KORINA_CLASS_CODE 0 diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h index 25fa651c937d5b24315d519ccce9210af8cfc7e1..ebdf4d910af2f1429832115bb3e937c1d4d1f90b 100644 --- a/arch/mips/include/asm/syscall.h +++ b/arch/mips/include/asm/syscall.h @@ -38,7 +38,7 @@ static inline bool mips_syscall_is_indirect(struct task_struct *task, static inline long syscall_get_nr(struct task_struct *task, struct pt_regs *regs) { - return current_thread_info()->syscall; + return task_thread_info(task)->syscall; } static inline void mips_syscall_update_nr(struct task_struct *task, diff --git a/arch/mips/include/asm/vpe.h b/arch/mips/include/asm/vpe.h index 80e70dbd1f6417aaecf22871e8e82fefc130f8bf..012731546cf607f999b8a5dacf29dce164815be2 100644 --- a/arch/mips/include/asm/vpe.h +++ b/arch/mips/include/asm/vpe.h @@ -104,7 +104,6 @@ struct vpe_control { struct list_head tc_list; /* Thread contexts */ }; -extern unsigned long physical_memsize; extern struct vpe_control vpecontrol; extern const struct file_operations vpe_fops; diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index dbb3f1fc71ab602a7da797816584b148eaea143a..f659adb681bc32868132dbc4c89183c46fc69e5f 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -423,9 +423,11 @@ static void cps_shutdown_this_cpu(enum cpu_death death) wmb(); } } else { - pr_debug("Gating power to core %d\n", core); - /* Power down the core */ - cps_pm_enter_state(CPS_PM_POWER_GATED); + if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) { + pr_debug("Gating power to core %d\n", core); + /* Power down the core */ + cps_pm_enter_state(CPS_PM_POWER_GATED); + } } } diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index b1fe4518bd221b546253103f64bb044c55f95f0e..ebd0101f0958d59e3d776e92ea8b5ebb2910ea17 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -413,7 +413,7 @@ void __noreturn die(const char *str, struct pt_regs *regs) if (regs && kexec_should_crash(current)) crash_kexec(regs); - do_exit(sig); + make_task_dead(sig); } extern struct exception_table_entry __start___dbe_table[]; diff --git a/arch/mips/kernel/vpe-mt.c b/arch/mips/kernel/vpe-mt.c index 9fd7cd48ea1d2f61265146af00999f2d57393547..496ed8f362f62fda5d4f7f042118688dec95b7f8 100644 --- a/arch/mips/kernel/vpe-mt.c +++ b/arch/mips/kernel/vpe-mt.c @@ -92,12 +92,11 @@ int vpe_run(struct vpe *v) write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H); /* - * The sde-kit passes 'memsize' to __start in $a3, so set something - * here... Or set $a3 to zero and define DFLT_STACK_SIZE and - * DFLT_HEAP_SIZE when you compile your program + * We don't pass the memsize here, so VPE programs need to be + * compiled with DFLT_STACK_SIZE and DFLT_HEAP_SIZE defined. */ + mttgpr(7, 0); mttgpr(6, v->ntcs); - mttgpr(7, physical_memsize); /* set up VPE1 */ /* diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c index 3f568f5aae2d1121ee684b1b53fe257b17ba511f..2729a4b63e187845c02bc934aaf1c70a3e2b101e 100644 --- a/arch/mips/lantiq/prom.c +++ b/arch/mips/lantiq/prom.c @@ -22,12 +22,6 @@ DEFINE_SPINLOCK(ebu_lock); EXPORT_SYMBOL_GPL(ebu_lock); -/* - * This is needed by the VPE loader code, just set it to 0 and assume - * that the firmware hardcodes this value to something useful. - */ -unsigned long physical_memsize = 0L; - /* * this struct is filled by the soc specific detection code and holds * information about the specific soc type, revision and name diff --git a/arch/nds32/kernel/fpu.c b/arch/nds32/kernel/fpu.c index 9edd7ed7d7bf849e6d32b8036f6d6b5f1b0f37a5..701c09a668de468899632429e71e5a20ed284edf 100644 --- a/arch/nds32/kernel/fpu.c +++ b/arch/nds32/kernel/fpu.c @@ -223,7 +223,7 @@ inline void handle_fpu_exception(struct pt_regs *regs) } } else if (fpcsr & FPCSR_mskRIT) { if (!user_mode(regs)) - do_exit(SIGILL); + make_task_dead(SIGILL); si_signo = SIGILL; } diff --git a/arch/nds32/kernel/traps.c b/arch/nds32/kernel/traps.c index 6a9772ba73927696c05d5a866ef08696bd7c2bba..12cdd6549360cb200ad0ea0c53e0bce239d06334 100644 --- a/arch/nds32/kernel/traps.c +++ b/arch/nds32/kernel/traps.c @@ -185,7 +185,7 @@ void die(const char *str, struct pt_regs *regs, int err) bust_spinlocks(0); spin_unlock_irq(&die_lock); - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } EXPORT_SYMBOL(die); @@ -289,7 +289,7 @@ void unhandled_interruption(struct pt_regs *regs) pr_emerg("unhandled_interruption\n"); show_regs(regs); if (!user_mode(regs)) - do_exit(SIGKILL); + make_task_dead(SIGKILL); force_sig(SIGKILL); } @@ -300,7 +300,7 @@ void unhandled_exceptions(unsigned long entry, unsigned long addr, addr, type); show_regs(regs); if (!user_mode(regs)) - do_exit(SIGKILL); + make_task_dead(SIGKILL); force_sig(SIGKILL); } @@ -327,7 +327,7 @@ void do_revinsn(struct pt_regs *regs) pr_emerg("Reserved Instruction\n"); show_regs(regs); if (!user_mode(regs)) - do_exit(SIGILL); + make_task_dead(SIGILL); force_sig(SIGILL); } diff --git a/arch/nios2/kernel/traps.c b/arch/nios2/kernel/traps.c index b172da4eb1a9522784040d211f794ebef44f4419..86208178024f60283cfa50287b193aa1750048d1 100644 --- a/arch/nios2/kernel/traps.c +++ b/arch/nios2/kernel/traps.c @@ -37,10 +37,10 @@ void die(const char *str, struct pt_regs *regs, long err) show_regs(regs); spin_unlock_irq(&die_lock); /* - * do_exit() should take care of panic'ing from an interrupt + * make_task_dead() should take care of panic'ing from an interrupt * context so we don't handle it here */ - do_exit(err); + make_task_dead(err); } void _exception(int signo, struct pt_regs *regs, int code, unsigned long addr) diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c index 206e5325e61bc7f938969efb72fc6bf72e5a264f..fca5317f3ce177d27716a4c0da88e21e9a68ee54 100644 --- a/arch/openrisc/kernel/traps.c +++ b/arch/openrisc/kernel/traps.c @@ -212,7 +212,7 @@ void die(const char *str, struct pt_regs *regs, long err) __asm__ __volatile__("l.nop 1"); do {} while (1); #endif - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } /* This is normally the 'Oops' routine */ diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c index 665b70086685c3f4c7229bdeafaeebd9d106512b..7ed28ddcaba7d8a84c4f209bf4976008aba4e0e0 100644 --- a/arch/parisc/kernel/firmware.c +++ b/arch/parisc/kernel/firmware.c @@ -1230,7 +1230,7 @@ static char __attribute__((aligned(64))) iodc_dbuf[4096]; */ int pdc_iodc_print(const unsigned char *str, unsigned count) { - unsigned int i; + unsigned int i, found = 0; unsigned long flags; for (i = 0; i < count;) { @@ -1239,6 +1239,7 @@ int pdc_iodc_print(const unsigned char *str, unsigned count) iodc_dbuf[i+0] = '\r'; iodc_dbuf[i+1] = '\n'; i += 2; + found = 1; goto print; default: iodc_dbuf[i] = str[i]; @@ -1255,7 +1256,7 @@ int pdc_iodc_print(const unsigned char *str, unsigned count) __pa(iodc_retbuf), 0, __pa(iodc_dbuf), i, 0); spin_unlock_irqrestore(&pdc_lock, flags); - return i; + return i - found; } #if !defined(BOOTLOADER) diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index 2127974982df9a029b9c8819a257debb30bb5f0c..df9b9f0591bfbeb2c18a7a361174f437fdc041f1 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c @@ -127,6 +127,12 @@ long arch_ptrace(struct task_struct *child, long request, unsigned long tmp; long ret = -EIO; + unsigned long user_regs_struct_size = sizeof(struct user_regs_struct); +#ifdef CONFIG_64BIT + if (is_compat_task()) + user_regs_struct_size /= 2; +#endif + switch (request) { /* Read the word at location addr in the USER area. For ptraced @@ -182,14 +188,14 @@ long arch_ptrace(struct task_struct *child, long request, return copy_regset_to_user(child, task_user_regset_view(current), REGSET_GENERAL, - 0, sizeof(struct user_regs_struct), + 0, user_regs_struct_size, datap); case PTRACE_SETREGS: /* Set all gp regs in the child. */ return copy_regset_from_user(child, task_user_regset_view(current), REGSET_GENERAL, - 0, sizeof(struct user_regs_struct), + 0, user_regs_struct_size, datap); case PTRACE_GETFPREGS: /* Get the child FPU state. */ @@ -303,6 +309,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, } } break; + case PTRACE_GETREGS: + case PTRACE_SETREGS: + case PTRACE_GETFPREGS: + case PTRACE_SETFPREGS: + return arch_ptrace(child, request, addr, data); default: ret = compat_ptrace_request(child, request, addr, data); diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index bce47e0fb692cb77cfc803d95b41939d2d8799c0..2fad7867af100a50d204e00989c1650ca9ec8c23 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -268,7 +268,7 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err) panic("Fatal exception"); oops_exit(); - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } /* gdb uses break 4,8 */ diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 612254141296142876ccb2dc985d910404871a58..a3f66ade09b32ac9c6dab237e6fbbd032b7b16fb 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -92,7 +92,7 @@ aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian ifeq ($(HAS_BIARCH),y) KBUILD_CFLAGS += -m$(BITS) -KBUILD_AFLAGS += -m$(BITS) -Wl,-a$(BITS) +KBUILD_AFLAGS += -m$(BITS) KBUILD_LDFLAGS += -m elf$(BITS)$(LDEMULATION) endif diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-2.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-2.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..437dab3fc017699d4e3981370f47fee464d6a958 --- /dev/null +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-2.dtsi @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later +/* + * QorIQ FMan v3 10g port #2 device tree stub [ controller @ offset 0x400000 ] + * + * Copyright 2022 Sean Anderson + * Copyright 2012 - 2015 Freescale Semiconductor Inc. + */ + +fman@400000 { + fman0_rx_0x08: port@88000 { + cell-index = <0x8>; + compatible = "fsl,fman-v3-port-rx"; + reg = <0x88000 0x1000>; + fsl,fman-10g-port; + }; + + fman0_tx_0x28: port@a8000 { + cell-index = <0x28>; + compatible = "fsl,fman-v3-port-tx"; + reg = <0xa8000 0x1000>; + fsl,fman-10g-port; + }; + + ethernet@e0000 { + cell-index = <0>; + compatible = "fsl,fman-memac"; + reg = <0xe0000 0x1000>; + fsl,fman-ports = <&fman0_rx_0x08 &fman0_tx_0x28>; + ptp-timer = <&ptp_timer0>; + pcsphy-handle = <&pcsphy0>; + }; + + mdio@e1000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; + reg = <0xe1000 0x1000>; + fsl,erratum-a011043; /* must ignore read errors */ + + pcsphy0: ethernet-phy@0 { + reg = <0x0>; + }; + }; +}; diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-3.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-3.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..ad116b17850a8b9e6a23848c532f97c60e8fd7ec --- /dev/null +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-3.dtsi @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later +/* + * QorIQ FMan v3 10g port #3 device tree stub [ controller @ offset 0x400000 ] + * + * Copyright 2022 Sean Anderson + * Copyright 2012 - 2015 Freescale Semiconductor Inc. + */ + +fman@400000 { + fman0_rx_0x09: port@89000 { + cell-index = <0x9>; + compatible = "fsl,fman-v3-port-rx"; + reg = <0x89000 0x1000>; + fsl,fman-10g-port; + }; + + fman0_tx_0x29: port@a9000 { + cell-index = <0x29>; + compatible = "fsl,fman-v3-port-tx"; + reg = <0xa9000 0x1000>; + fsl,fman-10g-port; + }; + + ethernet@e2000 { + cell-index = <1>; + compatible = "fsl,fman-memac"; + reg = <0xe2000 0x1000>; + fsl,fman-ports = <&fman0_rx_0x09 &fman0_tx_0x29>; + ptp-timer = <&ptp_timer0>; + pcsphy-handle = <&pcsphy1>; + }; + + mdio@e3000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; + reg = <0xe3000 0x1000>; + fsl,erratum-a011043; /* must ignore read errors */ + + pcsphy1: ethernet-phy@0 { + reg = <0x0>; + }; + }; +}; diff --git a/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts b/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts index 73f8c998c64dfefa6859cd5bcda8ddafcdc51f38..d4f5f159d6f236b02c010013e61cdc1e1e915080 100644 --- a/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts +++ b/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts @@ -10,7 +10,6 @@ / { model = "fsl,T1040RDB-REV-A"; - compatible = "fsl,T1040RDB-REV-A"; }; &seville_port0 { diff --git a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi index ecbb447920bc65ffe83b3d4dfb2a600977e56487..27714dc2f04a541a739c4dc97bcf2b4fe78265d7 100644 --- a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi @@ -609,8 +609,8 @@ usb1: usb@211000 { /include/ "qoriq-bman1.dtsi" /include/ "qoriq-fman3-0.dtsi" -/include/ "qoriq-fman3-0-1g-0.dtsi" -/include/ "qoriq-fman3-0-1g-1.dtsi" +/include/ "qoriq-fman3-0-10g-2.dtsi" +/include/ "qoriq-fman3-0-10g-3.dtsi" /include/ "qoriq-fman3-0-1g-2.dtsi" /include/ "qoriq-fman3-0-1g-3.dtsi" /include/ "qoriq-fman3-0-1g-4.dtsi" @@ -659,3 +659,19 @@ L2_1: l2-cache-controller@c20000 { interrupts = <16 2 1 9>; }; }; + +&fman0_rx_0x08 { + /delete-property/ fsl,fman-10g-port; +}; + +&fman0_tx_0x28 { + /delete-property/ fsl,fman-10g-port; +}; + +&fman0_rx_0x09 { + /delete-property/ fsl,fman-10g-port; +}; + +&fman0_tx_0x29 { + /delete-property/ fsl,fman-10g-port; +}; diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 3eff6a4888e79aa4b0ccdb1f2add248f41cfc736..665d847ef9b5a5d04e867289180fb58056fc5f4a 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -1054,45 +1054,46 @@ void eeh_handle_normal_event(struct eeh_pe *pe) } pr_info("EEH: Recovery successful.\n"); - } else { - /* - * About 90% of all real-life EEH failures in the field - * are due to poorly seated PCI cards. Only 10% or so are - * due to actual, failed cards. - */ - pr_err("EEH: Unable to recover from failure from PHB#%x-PE#%x.\n" - "Please try reseating or replacing it\n", - pe->phb->global_number, pe->addr); + goto out; + } - eeh_slot_error_detail(pe, EEH_LOG_PERM); + /* + * About 90% of all real-life EEH failures in the field + * are due to poorly seated PCI cards. Only 10% or so are + * due to actual, failed cards. + */ + pr_err("EEH: Unable to recover from failure from PHB#%x-PE#%x.\n" + "Please try reseating or replacing it\n", + pe->phb->global_number, pe->addr); - /* Notify all devices that they're about to go down. */ - eeh_set_channel_state(pe, pci_channel_io_perm_failure); - eeh_set_irq_state(pe, false); - eeh_pe_report("error_detected(permanent failure)", pe, - eeh_report_failure, NULL); + eeh_slot_error_detail(pe, EEH_LOG_PERM); - /* Mark the PE to be removed permanently */ - eeh_pe_state_mark(pe, EEH_PE_REMOVED); + /* Notify all devices that they're about to go down. */ + eeh_set_irq_state(pe, false); + eeh_pe_report("error_detected(permanent failure)", pe, + eeh_report_failure, NULL); + eeh_set_channel_state(pe, pci_channel_io_perm_failure); - /* - * Shut down the device drivers for good. We mark - * all removed devices correctly to avoid access - * the their PCI config any more. - */ - if (pe->type & EEH_PE_VF) { - eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL); - eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED); - } else { - eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true); - eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED); + /* Mark the PE to be removed permanently */ + eeh_pe_state_mark(pe, EEH_PE_REMOVED); - pci_lock_rescan_remove(); - pci_hp_remove_devices(bus); - pci_unlock_rescan_remove(); - /* The passed PE should no longer be used */ - return; - } + /* + * Shut down the device drivers for good. We mark + * all removed devices correctly to avoid access + * the their PCI config any more. + */ + if (pe->type & EEH_PE_VF) { + eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL); + eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED); + } else { + eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true); + eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED); + + pci_lock_rescan_remove(); + pci_hp_remove_devices(bus); + pci_unlock_rescan_remove(); + /* The passed PE should no longer be used */ + return; } out: @@ -1188,10 +1189,10 @@ void eeh_handle_special_event(void) /* Notify all devices to be down */ eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true); - eeh_set_channel_state(pe, pci_channel_io_perm_failure); eeh_pe_report( "error_detected(permanent failure)", pe, eeh_report_failure, NULL); + eeh_set_channel_state(pe, pci_channel_io_perm_failure); pci_lock_rescan_remove(); list_for_each_entry(hose, &hose_list, list_node) { diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 014229c40435a2e7ddbff7886595bd604559133d..c2e407a112a28cc7463d1a1c71e39b4ca6253e0b 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -52,10 +52,10 @@ struct rtas_t rtas = { EXPORT_SYMBOL(rtas); DEFINE_SPINLOCK(rtas_data_buf_lock); -EXPORT_SYMBOL(rtas_data_buf_lock); +EXPORT_SYMBOL_GPL(rtas_data_buf_lock); -char rtas_data_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned; -EXPORT_SYMBOL(rtas_data_buf); +char rtas_data_buf[RTAS_DATA_BUF_SIZE] __aligned(SZ_4K); +EXPORT_SYMBOL_GPL(rtas_data_buf); unsigned long rtas_rmo_buf; @@ -64,7 +64,7 @@ unsigned long rtas_rmo_buf; * This is done like this so rtas_flash can be a module. */ void (*rtas_flash_term_hook)(int); -EXPORT_SYMBOL(rtas_flash_term_hook); +EXPORT_SYMBOL_GPL(rtas_flash_term_hook); /* RTAS use home made raw locking instead of spin_lock_irqsave * because those can be called from within really nasty contexts @@ -312,7 +312,7 @@ void rtas_progress(char *s, unsigned short hex) spin_unlock(&progress_lock); } -EXPORT_SYMBOL(rtas_progress); /* needed by rtas_flash module */ +EXPORT_SYMBOL_GPL(rtas_progress); /* needed by rtas_flash module */ int rtas_token(const char *service) { @@ -322,7 +322,7 @@ int rtas_token(const char *service) tokp = of_get_property(rtas.dev, service, NULL); return tokp ? be32_to_cpu(*tokp) : RTAS_UNKNOWN_SERVICE; } -EXPORT_SYMBOL(rtas_token); +EXPORT_SYMBOL_GPL(rtas_token); int rtas_service_present(const char *service) { @@ -482,7 +482,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...) } return ret; } -EXPORT_SYMBOL(rtas_call); +EXPORT_SYMBOL_GPL(rtas_call); /* For RTAS_BUSY (-2), delay for 1 millisecond. For an extended busy status * code of 990n, perform the hinted delay of 10^n (last digit) milliseconds. @@ -517,7 +517,7 @@ unsigned int rtas_busy_delay(int status) return ms; } -EXPORT_SYMBOL(rtas_busy_delay); +EXPORT_SYMBOL_GPL(rtas_busy_delay); static int rtas_error_rc(int rtas_rc) { @@ -563,7 +563,7 @@ int rtas_get_power_level(int powerdomain, int *level) return rtas_error_rc(rc); return rc; } -EXPORT_SYMBOL(rtas_get_power_level); +EXPORT_SYMBOL_GPL(rtas_get_power_level); int rtas_set_power_level(int powerdomain, int level, int *setlevel) { @@ -581,7 +581,7 @@ int rtas_set_power_level(int powerdomain, int level, int *setlevel) return rtas_error_rc(rc); return rc; } -EXPORT_SYMBOL(rtas_set_power_level); +EXPORT_SYMBOL_GPL(rtas_set_power_level); int rtas_get_sensor(int sensor, int index, int *state) { @@ -599,7 +599,7 @@ int rtas_get_sensor(int sensor, int index, int *state) return rtas_error_rc(rc); return rc; } -EXPORT_SYMBOL(rtas_get_sensor); +EXPORT_SYMBOL_GPL(rtas_get_sensor); int rtas_get_sensor_fast(int sensor, int index, int *state) { @@ -660,7 +660,7 @@ int rtas_set_indicator(int indicator, int index, int new_value) return rtas_error_rc(rc); return rc; } -EXPORT_SYMBOL(rtas_set_indicator); +EXPORT_SYMBOL_GPL(rtas_set_indicator); /* * Ignoring RTAS extended delay diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 1d20f0f77a920a226b5e508a3b5626450595b79a..ba9b54d35f570e8c1422395a4d0fa3d94ef5c2e1 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -436,7 +436,7 @@ void vtime_flush(struct task_struct *tsk) #define calc_cputime_factors() #endif -void __delay(unsigned long loops) +void __no_kcsan __delay(unsigned long loops) { unsigned long start; @@ -457,7 +457,7 @@ void __delay(unsigned long loops) } EXPORT_SYMBOL(__delay); -void udelay(unsigned long usecs) +void __no_kcsan udelay(unsigned long usecs) { __delay(tb_ticks_per_usec * usecs); } diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 069d451240fa4c961fc85fed91a3eb5777d57676..5e5a2448ae79a6281f2a0662d107dc37c1ed090f 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -245,7 +245,7 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, if (panic_on_oops) panic("Fatal exception"); - do_exit(signr); + make_task_dead(signr); } NOKPROBE_SYMBOL(oops_end); diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c index 4c2f75916a7eafec68afae624e8810e56205e14d..abbfd5cc40c93e815c42f366265ea714a0f37660 100644 --- a/arch/powerpc/mm/book3s64/radix_tlb.c +++ b/arch/powerpc/mm/book3s64/radix_tlb.c @@ -941,15 +941,12 @@ static inline void __radix__flush_tlb_range(struct mm_struct *mm, } } } else { - bool hflush = false; + bool hflush; unsigned long hstart, hend; - if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { - hstart = (start + PMD_SIZE - 1) & PMD_MASK; - hend = end & PMD_MASK; - if (hstart < hend) - hflush = true; - } + hstart = (start + PMD_SIZE - 1) & PMD_MASK; + hend = end & PMD_MASK; + hflush = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hstart < hend; if (local) { asm volatile("ptesync": : :"memory"); diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c index 6e7e820508df7c071f7ef38ecf2eba6c461a2944..1cd2351d241e83c5c008c53afd585e66b8bcd029 100644 --- a/arch/powerpc/perf/hv-24x7.c +++ b/arch/powerpc/perf/hv-24x7.c @@ -79,9 +79,8 @@ static u32 phys_coresperchip; /* Physical cores per chip */ */ void read_24x7_sys_info(void) { - int call_status, len, ntypes; - - spin_lock(&rtas_data_buf_lock); + const s32 token = rtas_token("ibm,get-system-parameter"); + int call_status; /* * Making system parameter: chips and sockets and cores per chip @@ -91,32 +90,27 @@ void read_24x7_sys_info(void) phys_chipspersocket = 1; phys_coresperchip = 1; - call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1, - NULL, - PROCESSOR_MODULE_INFO, - __pa(rtas_data_buf), - RTAS_DATA_BUF_SIZE); + do { + spin_lock(&rtas_data_buf_lock); + call_status = rtas_call(token, 3, 1, NULL, PROCESSOR_MODULE_INFO, + __pa(rtas_data_buf), RTAS_DATA_BUF_SIZE); + if (call_status == 0) { + int ntypes = be16_to_cpup((__be16 *)&rtas_data_buf[2]); + int len = be16_to_cpup((__be16 *)&rtas_data_buf[0]); + + if (len >= 8 && ntypes != 0) { + phys_sockets = be16_to_cpup((__be16 *)&rtas_data_buf[4]); + phys_chipspersocket = be16_to_cpup((__be16 *)&rtas_data_buf[6]); + phys_coresperchip = be16_to_cpup((__be16 *)&rtas_data_buf[8]); + } + } + spin_unlock(&rtas_data_buf_lock); + } while (rtas_busy_delay(call_status)); if (call_status != 0) { pr_err("Error calling get-system-parameter %d\n", call_status); - } else { - len = be16_to_cpup((__be16 *)&rtas_data_buf[0]); - if (len < 8) - goto out; - - ntypes = be16_to_cpup((__be16 *)&rtas_data_buf[2]); - - if (!ntypes) - goto out; - - phys_sockets = be16_to_cpup((__be16 *)&rtas_data_buf[4]); - phys_chipspersocket = be16_to_cpup((__be16 *)&rtas_data_buf[6]); - phys_coresperchip = be16_to_cpup((__be16 *)&rtas_data_buf[8]); } - -out: - spin_unlock(&rtas_data_buf_lock); } /* Domains for which more than one result element are returned for each event. */ diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c index e42c2fe3dd367bdf9ad2818b01d4890006c65fd1..b773c411aa5c2733074ac69949f19c725d44e2d7 100644 --- a/arch/powerpc/perf/imc-pmu.c +++ b/arch/powerpc/perf/imc-pmu.c @@ -21,7 +21,7 @@ * Used to avoid races in counting the nest-pmu units during hotplug * register and unregister */ -static DEFINE_SPINLOCK(nest_init_lock); +static DEFINE_MUTEX(nest_init_lock); static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc); static struct imc_pmu **per_nest_pmu_arr; static cpumask_t nest_imc_cpumask; @@ -1621,7 +1621,7 @@ static void imc_common_mem_free(struct imc_pmu *pmu_ptr) static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr) { if (pmu_ptr->domain == IMC_DOMAIN_NEST) { - spin_lock(&nest_init_lock); + mutex_lock(&nest_init_lock); if (nest_pmus == 1) { cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE); kfree(nest_imc_refc); @@ -1631,7 +1631,7 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr) if (nest_pmus > 0) nest_pmus--; - spin_unlock(&nest_init_lock); + mutex_unlock(&nest_init_lock); } /* Free core_imc memory */ @@ -1788,11 +1788,11 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id * rest. To handle the cpuhotplug callback unregister, we track * the number of nest pmus in "nest_pmus". */ - spin_lock(&nest_init_lock); + mutex_lock(&nest_init_lock); if (nest_pmus == 0) { ret = init_nest_pmu_ref(); if (ret) { - spin_unlock(&nest_init_lock); + mutex_unlock(&nest_init_lock); kfree(per_nest_pmu_arr); per_nest_pmu_arr = NULL; goto err_free_mem; @@ -1800,7 +1800,7 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id /* Register for cpu hotplug notification. */ ret = nest_pmu_cpumask_init(); if (ret) { - spin_unlock(&nest_init_lock); + mutex_unlock(&nest_init_lock); kfree(nest_imc_refc); kfree(per_nest_pmu_arr); per_nest_pmu_arr = NULL; @@ -1808,7 +1808,7 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id } } nest_pmus++; - spin_unlock(&nest_init_lock); + mutex_unlock(&nest_init_lock); break; case IMC_DOMAIN_CORE: ret = core_imc_pmu_cpumask_init(); diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 2b4ceb5e6ce4c95a16ad9210097241dfbbc095e9..a1e6dd47743f1776042cb1a6a4b58114f3cd9f27 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -2260,7 +2260,8 @@ static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe, int index; int64_t rc; - if (!res || !res->flags || res->start > res->end) + if (!res || !res->flags || res->start > res->end || + res->flags & IORESOURCE_UNSET) return; if (res->flags & IORESOURCE_IO) { diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 1c3ac0f6633697892445b0735ee768df44344a50..115d196560b8b90d5f4a208f4a1c20b890c61a0b 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -1433,22 +1433,22 @@ static inline void __init check_lp_set_hblkrm(unsigned int lp, void __init pseries_lpar_read_hblkrm_characteristics(void) { + const s32 token = rtas_token("ibm,get-system-parameter"); unsigned char local_buffer[SPLPAR_TLB_BIC_MAXLENGTH]; int call_status, len, idx, bpsize; if (!firmware_has_feature(FW_FEATURE_BLOCK_REMOVE)) return; - spin_lock(&rtas_data_buf_lock); - memset(rtas_data_buf, 0, RTAS_DATA_BUF_SIZE); - call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1, - NULL, - SPLPAR_TLB_BIC_TOKEN, - __pa(rtas_data_buf), - RTAS_DATA_BUF_SIZE); - memcpy(local_buffer, rtas_data_buf, SPLPAR_TLB_BIC_MAXLENGTH); - local_buffer[SPLPAR_TLB_BIC_MAXLENGTH - 1] = '\0'; - spin_unlock(&rtas_data_buf_lock); + do { + spin_lock(&rtas_data_buf_lock); + memset(rtas_data_buf, 0, RTAS_DATA_BUF_SIZE); + call_status = rtas_call(token, 3, 1, NULL, SPLPAR_TLB_BIC_TOKEN, + __pa(rtas_data_buf), RTAS_DATA_BUF_SIZE); + memcpy(local_buffer, rtas_data_buf, SPLPAR_TLB_BIC_MAXLENGTH); + local_buffer[SPLPAR_TLB_BIC_MAXLENGTH - 1] = '\0'; + spin_unlock(&rtas_data_buf_lock); + } while (rtas_busy_delay(call_status)); if (call_status != 0) { pr_warn("%s %s Error calling get-system-parameter (0x%x)\n", diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c index e278390ab28d14c53071ffc99c447c07fb487dbc..d3517e498512f38a182f0d37e3ace5a7588095fc 100644 --- a/arch/powerpc/platforms/pseries/lparcfg.c +++ b/arch/powerpc/platforms/pseries/lparcfg.c @@ -322,6 +322,7 @@ static void parse_mpp_x_data(struct seq_file *m) */ static void parse_system_parameter_string(struct seq_file *m) { + const s32 token = rtas_token("ibm,get-system-parameter"); int call_status; unsigned char *local_buffer = kmalloc(SPLPAR_MAXLENGTH, GFP_KERNEL); @@ -331,16 +332,15 @@ static void parse_system_parameter_string(struct seq_file *m) return; } - spin_lock(&rtas_data_buf_lock); - memset(rtas_data_buf, 0, SPLPAR_MAXLENGTH); - call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1, - NULL, - SPLPAR_CHARACTERISTICS_TOKEN, - __pa(rtas_data_buf), - RTAS_DATA_BUF_SIZE); - memcpy(local_buffer, rtas_data_buf, SPLPAR_MAXLENGTH); - local_buffer[SPLPAR_MAXLENGTH - 1] = '\0'; - spin_unlock(&rtas_data_buf_lock); + do { + spin_lock(&rtas_data_buf_lock); + memset(rtas_data_buf, 0, SPLPAR_MAXLENGTH); + call_status = rtas_call(token, 3, 1, NULL, SPLPAR_CHARACTERISTICS_TOKEN, + __pa(rtas_data_buf), RTAS_DATA_BUF_SIZE); + memcpy(local_buffer, rtas_data_buf, SPLPAR_MAXLENGTH); + local_buffer[SPLPAR_MAXLENGTH - 1] = '\0'; + spin_unlock(&rtas_data_buf_lock); + } while (rtas_busy_delay(call_status)); if (call_status != 0) { printk(KERN_INFO diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index 1bb1bf1141cc7fb482afa72bcfc6450c36b9a116..9446282b52babe727d6d0872285828d338cab593 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -74,6 +74,9 @@ ifeq ($(CONFIG_PERF_EVENTS),y) KBUILD_CFLAGS += -fno-omit-frame-pointer endif +# Avoid generating .eh_frame sections. +KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables + KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax) KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax) diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h index 04dad3380041896e30531f81bf66d4a5e4b26cf6..bc745900c16318ede3ff56d50c69e9ab2cd1b69b 100644 --- a/arch/riscv/include/asm/ftrace.h +++ b/arch/riscv/include/asm/ftrace.h @@ -83,6 +83,6 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec); #define ftrace_init_nop ftrace_init_nop #endif -#endif +#endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* _ASM_RISCV_FTRACE_H */ diff --git a/arch/riscv/include/asm/jump_label.h b/arch/riscv/include/asm/jump_label.h index 38af2ec7b9bf92decb3bd7260a9731cb970ad379..729991e8f782540b58063e8029cd1cd90e835a7d 100644 --- a/arch/riscv/include/asm/jump_label.h +++ b/arch/riscv/include/asm/jump_label.h @@ -18,6 +18,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool branch) { asm_volatile_goto( + " .align 2 \n\t" " .option push \n\t" " .option norelax \n\t" " .option norvc \n\t" @@ -39,6 +40,7 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) { asm_volatile_goto( + " .align 2 \n\t" " .option push \n\t" " .option norelax \n\t" " .option norvc \n\t" diff --git a/arch/riscv/include/asm/parse_asm.h b/arch/riscv/include/asm/parse_asm.h index f36368de839f5f81912c9d7a7752327dfb0cb327..ad254da85e6153b8709023caf103bd6bb8930d2d 100644 --- a/arch/riscv/include/asm/parse_asm.h +++ b/arch/riscv/include/asm/parse_asm.h @@ -3,6 +3,9 @@ * Copyright (C) 2020 SiFive */ +#ifndef _ASM_RISCV_INSN_H +#define _ASM_RISCV_INSN_H + #include /* The bit field of immediate value in I-type instruction */ @@ -125,7 +128,7 @@ #define FUNCT3_C_J 0xa000 #define FUNCT3_C_JAL 0x2000 #define FUNCT4_C_JR 0x8000 -#define FUNCT4_C_JALR 0xf000 +#define FUNCT4_C_JALR 0x9000 #define FUNCT12_SRET 0x10200000 @@ -217,3 +220,5 @@ static inline bool is_ ## INSN_NAME ## _insn(long insn) \ (RVC_X(x_, RVC_B_IMM_5_OPOFF, RVC_B_IMM_5_MASK) << RVC_B_IMM_5_OFF) | \ (RVC_X(x_, RVC_B_IMM_7_6_OPOFF, RVC_B_IMM_7_6_MASK) << RVC_B_IMM_7_6_OFF) | \ (RVC_IMM_SIGN(x_) << RVC_B_IMM_SIGN_OFF); }) + +#endif /* _ASM_RISCV_INSN_H */ diff --git a/arch/riscv/include/asm/patch.h b/arch/riscv/include/asm/patch.h index 9a7d7346001ee249678a1a5c28f1de09a758d2ab..98d9de07cba17739e7d37120f3a7ea5545cefc9a 100644 --- a/arch/riscv/include/asm/patch.h +++ b/arch/riscv/include/asm/patch.h @@ -9,4 +9,6 @@ int patch_text_nosync(void *addr, const void *insns, size_t len); int patch_text(void *addr, u32 insn); +extern int riscv_patch_in_stop_machine; + #endif /* _ASM_RISCV_PATCH_H */ diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c index 765b62434f303d2c99f63b82be3799725b82e5dc..8693dfcffb022a23329ea62e736043eae31f538c 100644 --- a/arch/riscv/kernel/ftrace.c +++ b/arch/riscv/kernel/ftrace.c @@ -15,11 +15,21 @@ int ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex) { mutex_lock(&text_mutex); + + /* + * The code sequences we use for ftrace can't be patched while the + * kernel is running, so we need to use stop_machine() to modify them + * for now. This doesn't play nice with text_mutex, we use this flag + * to elide the check. + */ + riscv_patch_in_stop_machine = true; + return 0; } int ftrace_arch_code_modify_post_process(void) __releases(&text_mutex) { + riscv_patch_in_stop_machine = false; mutex_unlock(&text_mutex); return 0; } @@ -109,9 +119,9 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) { int out; - ftrace_arch_code_modify_prepare(); + mutex_lock(&text_mutex); out = ftrace_make_nop(mod, rec, MCOUNT_ADDR); - ftrace_arch_code_modify_post_process(); + mutex_unlock(&text_mutex); return out; } diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c index 1612e11f7bf6d60c9011b78c521f7a7dc206209c..c3fced410e74298198248810fb034e5b5a698223 100644 --- a/arch/riscv/kernel/patch.c +++ b/arch/riscv/kernel/patch.c @@ -11,6 +11,7 @@ #include #include #include +#include #include struct patch_insn { @@ -19,6 +20,8 @@ struct patch_insn { atomic_t cpu_count; }; +int riscv_patch_in_stop_machine = false; + #ifdef CONFIG_MMU static void *patch_map(void *addr, int fixmap) { @@ -55,8 +58,15 @@ static int patch_insn_write(void *addr, const void *insn, size_t len) * Before reaching here, it was expected to lock the text_mutex * already, so we don't need to give another lock here and could * ensure that it was safe between each cores. + * + * We're currently using stop_machine() for ftrace & kprobes, and while + * that ensures text_mutex is held before installing the mappings it + * does not ensure text_mutex is held by the calling thread. That's + * safe but triggers a lockdep failure, so just elide it for that + * specific case. */ - lockdep_assert_held(&text_mutex); + if (!riscv_patch_in_stop_machine) + lockdep_assert_held(&text_mutex); if (across_pages) patch_map(addr + len, FIX_TEXT_POKE1); @@ -117,13 +127,25 @@ NOKPROBE_SYMBOL(patch_text_cb); int patch_text(void *addr, u32 insn) { + int ret; struct patch_insn patch = { .addr = addr, .insn = insn, .cpu_count = ATOMIC_INIT(0), }; - return stop_machine_cpuslocked(patch_text_cb, - &patch, cpu_online_mask); + /* + * kprobes takes text_mutex, before calling patch_text(), but as we call + * calls stop_machine(), the lockdep assertion in patch_insn_write() + * gets confused by the context in which the lock is taken. + * Instead, ensure the lock is held before calling stop_machine(), and + * set riscv_patch_in_stop_machine to skip the check in + * patch_insn_write(). + */ + lockdep_assert_held(&text_mutex); + riscv_patch_in_stop_machine = true; + ret = stop_machine_cpuslocked(patch_text_cb, &patch, cpu_online_mask); + riscv_patch_in_stop_machine = false; + return ret; } NOKPROBE_SYMBOL(patch_text); diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c index 1e53fbe5eb7834213c7f99c24d78485042eb00f8..9c34735c1e771dd83ee24c6c1b4997ebec75e4fe 100644 --- a/arch/riscv/kernel/stacktrace.c +++ b/arch/riscv/kernel/stacktrace.c @@ -96,7 +96,7 @@ void notrace walk_stackframe(struct task_struct *task, while (!kstack_end(ksp)) { if (__kernel_text_address(pc) && unlikely(fn(pc, arg))) break; - pc = (*ksp++) - 0x4; + pc = READ_ONCE_NOCHECK(*ksp++) - 0x4; } } diff --git a/arch/riscv/kernel/time.c b/arch/riscv/kernel/time.c index 8a5cf99c07762403ebd55ba6f54e03005e28b7e4..303ae47dfb4d66d8c53a2663159abbd0c369115b 100644 --- a/arch/riscv/kernel/time.c +++ b/arch/riscv/kernel/time.c @@ -5,6 +5,7 @@ */ #include +#include #include #include #include @@ -28,6 +29,8 @@ void __init time_init(void) of_clk_init(NULL); timer_probe(); + + tick_setup_hrtimer_broadcast(); } void clocksource_arch_init(struct clocksource *cs) diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index c1a13011fb8e516ca3f11f2d7f3445ed3d81d5ff..227253fde33c4a728d24a8ba3171979430b954ff 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -31,25 +31,29 @@ void die(struct pt_regs *regs, const char *str) { static int die_counter; int ret; + long cause; + unsigned long flags; oops_enter(); - spin_lock_irq(&die_lock); + spin_lock_irqsave(&die_lock, flags); console_verbose(); bust_spinlocks(1); pr_emerg("%s [#%d]\n", str, ++die_counter); print_modules(); - show_regs(regs); + if (regs) + show_regs(regs); - ret = notify_die(DIE_OOPS, str, regs, 0, regs->cause, SIGSEGV); + cause = regs ? regs->cause : -1; + ret = notify_die(DIE_OOPS, str, regs, 0, cause, SIGSEGV); - if (regs && kexec_should_crash(current)) + if (kexec_should_crash(current)) crash_kexec(regs); bust_spinlocks(0); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); - spin_unlock_irq(&die_lock); + spin_unlock_irqrestore(&die_lock, flags); oops_exit(); if (in_interrupt()) @@ -57,7 +61,7 @@ void die(struct pt_regs *regs, const char *str) if (panic_on_oops) panic("Fatal exception"); if (ret != NOTIFY_STOP) - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr) diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c index 89f81067e09edba0c9cacdafd41cc97ea2b0e19e..2ae1201cff886e77367ce1fcdda762c177900fc7 100644 --- a/arch/riscv/mm/cacheflush.c +++ b/arch/riscv/mm/cacheflush.c @@ -85,7 +85,9 @@ void flush_icache_pte(pte_t pte) { struct page *page = pte_page(pte); - if (!test_and_set_bit(PG_dcache_clean, &page->flags)) + if (!test_bit(PG_dcache_clean, &page->flags)) { flush_icache_all(); + set_bit(PG_dcache_clean, &page->flags); + } } #endif /* CONFIG_MMU */ diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index 8f84bbe0ac33e256f61fa59af676872e670deebf..54b12943cc7b0aeb71746f468d2542d3f75f7130 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -34,7 +34,7 @@ static inline void no_context(struct pt_regs *regs, unsigned long addr) (addr < PAGE_SIZE) ? "NULL pointer dereference" : "paging request", addr); die(regs, "Oops"); - do_exit(SIGKILL); + make_task_dead(SIGKILL); } static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault) diff --git a/arch/s390/boot/compressed/decompressor.c b/arch/s390/boot/compressed/decompressor.c index 3061b11c4d27f49a61e13173b793e3e58b6a12d1..8eaa1712a1c8d9576cf7e74faf573dbc48513457 100644 --- a/arch/s390/boot/compressed/decompressor.c +++ b/arch/s390/boot/compressed/decompressor.c @@ -79,6 +79,6 @@ void *decompress_kernel(void) void *output = (void *)decompress_offset; __decompress(_compressed_start, _compressed_end - _compressed_start, - NULL, NULL, output, 0, NULL, error); + NULL, NULL, output, vmlinux.image_size, NULL, error); return output; } diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h index c1b82bcc017cf043989413c10025f5eb8e8e76ec..29a1badbe2f5d253799a764e0c8be9f1eb75f932 100644 --- a/arch/s390/include/asm/debug.h +++ b/arch/s390/include/asm/debug.h @@ -4,8 +4,8 @@ * * Copyright IBM Corp. 1999, 2020 */ -#ifndef DEBUG_H -#define DEBUG_H +#ifndef _ASM_S390_DEBUG_H +#define _ASM_S390_DEBUG_H #include #include @@ -425,4 +425,4 @@ int debug_unregister_view(debug_info_t *id, struct debug_view *view); #define PRINT_FATAL(x...) printk(KERN_DEBUG PRINTK_HEADER x) #endif /* DASD_DEBUG */ -#endif /* DEBUG_H */ +#endif /* _ASM_S390_DEBUG_H */ diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c index 0dc4b258b98d51c1c80c2a1eefea64c46ea7e1da..763e726025b3d26e8bf5304d977e95eff13ea34c 100644 --- a/arch/s390/kernel/dumpstack.c +++ b/arch/s390/kernel/dumpstack.c @@ -214,5 +214,5 @@ void die(struct pt_regs *regs, const char *str) if (panic_on_oops) panic("Fatal exception: panic_on_oops"); oops_exit(); - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index aae24dc75df61bfed0c062822f60645bb783f380..0f7e7a68d57bb20add0c33f81c1f4f94d3247286 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -241,6 +241,7 @@ static void pop_kprobe(struct kprobe_ctlblk *kcb) { __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); kcb->kprobe_status = kcb->prev_kprobe.status; + kcb->prev_kprobe.kp = NULL; } NOKPROBE_SYMBOL(pop_kprobe); @@ -402,12 +403,11 @@ static int post_kprobe_handler(struct pt_regs *regs) if (!p) return 0; + resume_execution(p, regs); if (kcb->kprobe_status != KPROBE_REENTER && p->post_handler) { kcb->kprobe_status = KPROBE_HIT_SSDONE; p->post_handler(p, regs, 0); } - - resume_execution(p, regs); pop_kprobe(kcb); preempt_enable_no_resched(); diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index 86c8d5370e7f4c34b9545d656a52527a9080dabe..0102376eca3db161f47a90772aeb752db00cabc7 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c @@ -178,7 +178,7 @@ void s390_handle_mcck(void) "malfunction (code 0x%016lx).\n", mcck.mcck_code); printk(KERN_EMERG "mcck: task: %s, pid: %d.\n", current->comm, current->pid); - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } } EXPORT_SYMBOL_GPL(s390_handle_mcck); diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index b27b6c1f058d07e8175e85c640c243254e4efa84..9e900a8977bd2110f798439e1fc464c20a769e9b 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -472,7 +472,7 @@ void do_signal(struct pt_regs *regs) current->thread.system_call = test_pt_regs_flag(regs, PIF_SYSCALL) ? regs->int_code : 0; - if (test_thread_flag(TIF_SIGPENDING) && get_signal(&ksig)) { + if (get_signal(&ksig)) { /* Whee! Actually deliver the signal. */ if (current->thread.system_call) { regs->int_code = current->thread.system_call; diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 9505bdb0aa54499e154a2322c8503c7a2e2e391d..d7291eb0d0c070739aee2ffbcfbc7423ed726faf 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -188,5 +188,6 @@ SECTIONS DISCARDS /DISCARD/ : { *(.eh_frame) + *(.interp) } } diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index b51ab19eb9721479bb05718895e1a159fa763fef..64d1dfe6dca53fe070eee9dacabe13491a2f7e4e 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -81,8 +81,9 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id) struct esca_block *sca = vcpu->kvm->arch.sca; union esca_sigp_ctrl *sigp_ctrl = &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); - union esca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl; + union esca_sigp_ctrl new_val = {0}, old_val; + old_val = READ_ONCE(*sigp_ctrl); new_val.scn = src_id; new_val.c = 1; old_val.c = 0; @@ -93,8 +94,9 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id) struct bsca_block *sca = vcpu->kvm->arch.sca; union bsca_sigp_ctrl *sigp_ctrl = &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); - union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl; + union bsca_sigp_ctrl new_val = {0}, old_val; + old_val = READ_ONCE(*sigp_ctrl); new_val.scn = src_id; new_val.c = 1; old_val.c = 0; @@ -124,16 +126,18 @@ static void sca_clear_ext_call(struct kvm_vcpu *vcpu) struct esca_block *sca = vcpu->kvm->arch.sca; union esca_sigp_ctrl *sigp_ctrl = &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); - union esca_sigp_ctrl old = *sigp_ctrl; + union esca_sigp_ctrl old; + old = READ_ONCE(*sigp_ctrl); expect = old.value; rc = cmpxchg(&sigp_ctrl->value, old.value, 0); } else { struct bsca_block *sca = vcpu->kvm->arch.sca; union bsca_sigp_ctrl *sigp_ctrl = &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); - union bsca_sigp_ctrl old = *sigp_ctrl; + union bsca_sigp_ctrl old; + old = READ_ONCE(*sigp_ctrl); expect = old.value; rc = cmpxchg(&sigp_ctrl->value, old.value, 0); } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 59db85fb63e1cb0015bfb9cfc4c728a76e617b36..7ffc73ba220fbe34111fec5ae674d1b3f58ba131 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -5012,6 +5012,23 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, /* When we are protected, we should not change the memory slots */ if (kvm_s390_pv_get_handle(kvm)) return -EINVAL; + + if (!kvm->arch.migration_mode) + return 0; + + /* + * Turn off migration mode when: + * - userspace creates a new memslot with dirty logging off, + * - userspace modifies an existing memslot (MOVE or FLAGS_ONLY) and + * dirty logging is turned off. + * Migration mode expects dirty page logging being enabled to store + * its dirty bitmap. + */ + if (change != KVM_MR_DELETE && + !(mem->flags & KVM_MEM_LOG_DIRTY_PAGES)) + WARN(kvm_s390_vm_stop_migration(kvm), + "Failed to stop migration mode"); + return 0; } diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c index 5060956b8e7d61181452d47d998d181ef1877f0f..1bc42ce2659908778078528a67c169c4c719f6a0 100644 --- a/arch/s390/mm/extmem.c +++ b/arch/s390/mm/extmem.c @@ -289,15 +289,17 @@ segment_overlaps_others (struct dcss_segment *seg) /* * real segment loading function, called from segment_load + * Must return either an error code < 0, or the segment type code >= 0 */ static int __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long *end) { unsigned long start_addr, end_addr, dummy; struct dcss_segment *seg; - int rc, diag_cc; + int rc, diag_cc, segtype; start_addr = end_addr = 0; + segtype = -1; seg = kmalloc(sizeof(*seg), GFP_KERNEL | GFP_DMA); if (seg == NULL) { rc = -ENOMEM; @@ -326,9 +328,9 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long seg->res_name[8] = '\0'; strlcat(seg->res_name, " (DCSS)", sizeof(seg->res_name)); seg->res->name = seg->res_name; - rc = seg->vm_segtype; - if (rc == SEG_TYPE_SC || - ((rc == SEG_TYPE_SR || rc == SEG_TYPE_ER) && !do_nonshared)) + segtype = seg->vm_segtype; + if (segtype == SEG_TYPE_SC || + ((segtype == SEG_TYPE_SR || segtype == SEG_TYPE_ER) && !do_nonshared)) seg->res->flags |= IORESOURCE_READONLY; /* Check for overlapping resources before adding the mapping. */ @@ -386,7 +388,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long out_free: kfree(seg); out: - return rc; + return rc < 0 ? rc : segtype; } /* diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index b239f2ba93b092c8485a9cb101559917696aba20..cbfff2460e58d523388ec5587bc9dbf07c111e1d 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -296,7 +296,7 @@ static void try_free_pmd_table(pud_t *pud, unsigned long start) if (end > VMALLOC_START) return; #ifdef CONFIG_KASAN - if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end) + if (start < KASAN_SHADOW_END && end > KASAN_SHADOW_START) return; #endif pmd = pmd_offset(pud, start); @@ -371,7 +371,7 @@ static void try_free_pud_table(p4d_t *p4d, unsigned long start) if (end > VMALLOC_START) return; #ifdef CONFIG_KASAN - if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end) + if (start < KASAN_SHADOW_END && end > KASAN_SHADOW_START) return; #endif @@ -425,7 +425,7 @@ static void try_free_p4d_table(pgd_t *pgd, unsigned long start) if (end > VMALLOC_START) return; #ifdef CONFIG_KASAN - if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end) + if (start < KASAN_SHADOW_END && end > KASAN_SHADOW_START) return; #endif diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c index 9c3d32b80038ab1efa19e8be97e07af576e86c0a..4efffc18c851283cdc06d9c1978931e913706b27 100644 --- a/arch/sh/kernel/traps.c +++ b/arch/sh/kernel/traps.c @@ -57,7 +57,7 @@ void die(const char *str, struct pt_regs *regs, long err) if (panic_on_oops) panic("Fatal exception"); - do_exit(SIGSEGV); + make_task_dead(SIGSEGV); } void die_if_kernel(const char *str, struct pt_regs *regs, long err) diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 530b7ec5d3ca9c6e397958a7ed6d79b0cff265c0..b5ed893420591e6681ea1f4a87a5dbe9621459eb 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -293,7 +293,7 @@ config FORCE_MAX_ZONEORDER This config option is actually maximum order plus one. For example, a value of 13 means that the largest free memory block is 2^12 pages. -if SPARC64 +if SPARC64 || COMPILE_TEST source "kernel/power/Kconfig" endif diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c index 247a0d9683b2bcfc184c0d1af0314cff27c5ce01..5d47f4a3422617447424f2a23d69b4a5d7b75cf6 100644 --- a/arch/sparc/kernel/traps_32.c +++ b/arch/sparc/kernel/traps_32.c @@ -86,9 +86,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs) } printk("Instruction DUMP:"); instruction_dump ((unsigned long *) regs->pc); - if(regs->psr & PSR_PS) - do_exit(SIGKILL); - do_exit(SIGSEGV); + make_task_dead((regs->psr & PSR_PS) ? SIGKILL : SIGSEGV); } void do_hw_interrupt(struct pt_regs *regs, unsigned long type) diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index a850dccd78ea19c8a9e30f9140884d3d0091b82a..814277d0e3e8fd2908b13df82cb6a1a72abb79d7 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -2564,9 +2564,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs) } if (panic_on_oops) panic("Fatal exception"); - if (regs->tstate & TSTATE_PRIV) - do_exit(SIGKILL); - do_exit(SIGSEGV); + make_task_dead((regs->tstate & TSTATE_PRIV)? SIGKILL : SIGSEGV); } EXPORT_SYMBOL(die_if_kernel); diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c index 555203e3e7b45664bdd08e98fe55fdbb5402cdc7..fc662f7cc2afbce39a89e234bc4aeda60cfc630e 100644 --- a/arch/um/drivers/vector_kern.c +++ b/arch/um/drivers/vector_kern.c @@ -771,6 +771,7 @@ static int vector_config(char *str, char **error_out) if (parsed == NULL) { *error_out = "vector_config failed to parse parameters"; + kfree(params); return -EINVAL; } diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index d64e690139950085cd575e815cdfb3d5e0af9223..2284666e8c90cecbafaa78046fd7ce64ce87092b 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1329,17 +1329,16 @@ config MICROCODE_AMD If you select this option, microcode patch loading support for AMD processors will be enabled. -config MICROCODE_OLD_INTERFACE - bool "Ancient loading interface (DEPRECATED)" +config MICROCODE_LATE_LOADING + bool "Late microcode loading (DANGEROUS)" default n depends on MICROCODE help - DO NOT USE THIS! This is the ancient /dev/cpu/microcode interface - which was used by userspace tools like iucode_tool and microcode.ctl. - It is inadequate because it runs too late to be able to properly - load microcode on a machine and it needs special tools. Instead, you - should've switched to the early loading method with the initrd or - builtin microcode by now: Documentation/x86/microcode.rst + Loading microcode late, when the system is up and executing instructions + is a tricky business and should be avoided if possible. Just the sequence + of synchronizing all cores and SMT threads is one fragile dance which does + not guarantee that cores might not softlock after the loading. Therefore, + use this at your own risk. Late loading taints the kernel too. config X86_MSR tristate "/dev/cpu/*/msr - Model-specific register support" diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c index 1f1a95f3dd0ca6828077544c26c5cb45545c31b6..c0ab0ff4af655ca7e6b604a14c8d9e45571e0ae9 100644 --- a/arch/x86/crypto/ghash-clmulni-intel_glue.c +++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c @@ -19,6 +19,7 @@ #include #include #include +#include #define GHASH_BLOCK_SIZE 16 #define GHASH_DIGEST_SIZE 16 @@ -54,15 +55,14 @@ static int ghash_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { struct ghash_ctx *ctx = crypto_shash_ctx(tfm); - be128 *x = (be128 *)key; u64 a, b; if (keylen != GHASH_BLOCK_SIZE) return -EINVAL; /* perform multiplication by 'x' in GF(2^128) */ - a = be64_to_cpu(x->a); - b = be64_to_cpu(x->b); + a = get_unaligned_be64(key); + b = get_unaligned_be64(key + 8); ctx->shash.a = (b << 1) | (a >> 63); ctx->shash.b = (a << 1) | (b >> 63); diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 8fcd6a42b3a18f0e5f83bc022cbd6d22a6049bec..70bd81b6c612e8d31d78927de428d4dce368d55a 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -1333,14 +1333,14 @@ SYM_CODE_START(asm_exc_nmi) SYM_CODE_END(asm_exc_nmi) .pushsection .text, "ax" -SYM_CODE_START(rewind_stack_do_exit) +SYM_CODE_START(rewind_stack_and_make_dead) /* Prevent any naive code from trying to unwind to our caller. */ xorl %ebp, %ebp movl PER_CPU_VAR(cpu_current_top_of_stack), %esi leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp - call do_exit + call make_task_dead 1: jmp 1b -SYM_CODE_END(rewind_stack_do_exit) +SYM_CODE_END(rewind_stack_and_make_dead) .popsection diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 559c82b83475753fd2e0c5ce23e209e47ceab443..23212c53cef7f2ee35e8ff864fff7b16c1723a1d 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -1509,7 +1509,7 @@ SYM_CODE_END(ignore_sysret) #endif .pushsection .text, "ax" -SYM_CODE_START(rewind_stack_do_exit) +SYM_CODE_START(rewind_stack_and_make_dead) UNWIND_HINT_FUNC /* Prevent any naive code from trying to unwind to our caller. */ xorl %ebp, %ebp @@ -1518,6 +1518,6 @@ SYM_CODE_START(rewind_stack_do_exit) leaq -PTREGS_SIZE(%rax), %rsp UNWIND_HINT_REGS - call do_exit -SYM_CODE_END(rewind_stack_do_exit) + call make_task_dead +SYM_CODE_END(rewind_stack_and_make_dead) .popsection diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index 39eb276d02775756880365aecc78aaebb93b0005..52eba415928a3f342fedd48d04ef22aaf83496e9 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c @@ -976,7 +976,7 @@ static int __init amd_core_pmu_init(void) * numbered counter following it. */ for (i = 0; i < x86_pmu.num_counters - 1; i += 2) - even_ctr_mask |= 1 << i; + even_ctr_mask |= BIT_ULL(i); pair_constraint = (struct event_constraint) __EVENT_CONSTRAINT(0, even_ctr_mask, 0, diff --git a/arch/x86/events/zhaoxin/core.c b/arch/x86/events/zhaoxin/core.c index e68827e604ad1bb82af185e2e8db6e8ce77024c4..e927346960303c2271bc9cbe3f87ead401e6282c 100644 --- a/arch/x86/events/zhaoxin/core.c +++ b/arch/x86/events/zhaoxin/core.c @@ -541,7 +541,13 @@ __init int zhaoxin_pmu_init(void) switch (boot_cpu_data.x86) { case 0x06: - if (boot_cpu_data.x86_model == 0x0f || boot_cpu_data.x86_model == 0x19) { + /* + * Support Zhaoxin CPU from ZXC series, exclude Nano series through FMS. + * Nano FMS: Family=6, Model=F, Stepping=[0-A][C-D] + * ZXC FMS: Family=6, Model=F, Stepping=E-F OR Family=6, Model=0x19, Stepping=0-3 + */ + if ((boot_cpu_data.x86_model == 0x0f && boot_cpu_data.x86_stepping >= 0x0e) || + boot_cpu_data.x86_model == 0x19) { x86_pmu.max_period = x86_pmu.cntval_mask >> 1; diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h index cfdf307ddc0129019f64efebc01bf5a5159578fa..9ed8343c9b3cb3e0b80e3a6a8c90ea0da47975ac 100644 --- a/arch/x86/include/asm/debugreg.h +++ b/arch/x86/include/asm/debugreg.h @@ -39,7 +39,20 @@ static __always_inline unsigned long native_get_debugreg(int regno) asm("mov %%db6, %0" :"=r" (val)); break; case 7: - asm("mov %%db7, %0" :"=r" (val)); + /* + * Apply __FORCE_ORDER to DR7 reads to forbid re-ordering them + * with other code. + * + * This is needed because a DR7 access can cause a #VC exception + * when running under SEV-ES. Taking a #VC exception is not a + * safe thing to do just anywhere in the entry code and + * re-ordering might place the access into an unsafe location. + * + * This happened in the NMI handler, where the DR7 read was + * re-ordered to happen before the call to sev_es_ist_enter(), + * causing stack recursion. + */ + asm volatile("mov %%db7, %0" : "=r" (val) : __FORCE_ORDER); break; default: BUG(); @@ -66,7 +79,16 @@ static __always_inline void native_set_debugreg(int regno, unsigned long value) asm("mov %0, %%db6" ::"r" (value)); break; case 7: - asm("mov %0, %%db7" ::"r" (value)); + /* + * Apply __FORCE_ORDER to DR7 writes to forbid re-ordering them + * with other code. + * + * While is didn't happen with a DR7 write (see the DR7 read + * comment above which explains where it happened), add the + * __FORCE_ORDER here too to avoid similar problems in the + * future. + */ + asm volatile("mov %0, %%db7" ::"r" (value), __FORCE_ORDER); break; default: BUG(); diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index f73327397b898a3b0ec7cb801bd16070324b6894..509cc0262fdc254338aa164675c070d88576ac31 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h @@ -131,7 +131,7 @@ static inline unsigned int x86_cpuid_family(void) int __init microcode_init(void); extern void __init load_ucode_bsp(void); extern void load_ucode_ap(void); -void reload_early_microcode(void); +void reload_early_microcode(unsigned int cpu); extern bool get_builtin_firmware(struct cpio_data *cd, const char *name); extern bool initrd_gone; void microcode_bsp_resume(void); @@ -139,7 +139,7 @@ void microcode_bsp_resume(void); static inline int __init microcode_init(void) { return 0; }; static inline void __init load_ucode_bsp(void) { } static inline void load_ucode_ap(void) { } -static inline void reload_early_microcode(void) { } +static inline void reload_early_microcode(unsigned int cpu) { } static inline void microcode_bsp_resume(void) { } static inline bool get_builtin_firmware(struct cpio_data *cd, const char *name) { return false; } diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h index 7063b5a43220a96a363f97e3567fabd0fbc8be70..a645b25ee442a7ccc936fb903e7e9a3ad65f2411 100644 --- a/arch/x86/include/asm/microcode_amd.h +++ b/arch/x86/include/asm/microcode_amd.h @@ -47,12 +47,12 @@ struct microcode_amd { extern void __init load_ucode_amd_bsp(unsigned int family); extern void load_ucode_amd_ap(unsigned int family); extern int __init save_microcode_in_initrd_amd(unsigned int family); -void reload_ucode_amd(void); +void reload_ucode_amd(unsigned int cpu); #else static inline void __init load_ucode_amd_bsp(unsigned int family) {} static inline void load_ucode_amd_ap(unsigned int family) {} static inline int __init save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; } -static inline void reload_ucode_amd(void) {} +static inline void reload_ucode_amd(unsigned int cpu) {} #endif #endif /* _ASM_X86_MICROCODE_AMD_H */ diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 5a8ee3b83af2a8c375374e55cd0f456560a7d9f3..f71a177b6b185a631b82c76b8907495bb4e8cb38 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -54,6 +54,10 @@ #define SPEC_CTRL_RRSBA_DIS_S_SHIFT 6 /* Disable RRSBA behavior */ #define SPEC_CTRL_RRSBA_DIS_S BIT(SPEC_CTRL_RRSBA_DIS_S_SHIFT) +/* A mask for bits which the kernel toggles when controlling mitigations */ +#define SPEC_CTRL_MITIGATIONS_MASK (SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD \ + | SPEC_CTRL_RRSBA_DIS_S) + #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */ #define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */ diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index d428d611a43a97fa4ce48671c8ac0a89b73e7b55..60514502ead676dabc1ea44d7f71875fcb3a4e6e 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -682,6 +682,7 @@ extern void load_direct_gdt(int); extern void load_fixmap_gdt(int); extern void load_percpu_segment(int); extern void cpu_init(void); +extern void cpu_init_secondary(void); extern void cpu_init_exception_handling(void); extern void cr4_init(void); @@ -838,8 +839,9 @@ bool xen_set_default_idle(void); #define xen_set_default_idle 0 #endif -void stop_this_cpu(void *dummy); -void microcode_check(void); +void __noreturn stop_this_cpu(void *dummy); +void microcode_check(struct cpuinfo_x86 *prev_info); +void store_cpu_caps(struct cpuinfo_x86 *info); enum l1tf_mitigations { L1TF_MITIGATION_OFF, diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h index 04c17be9b5fdab86845999cc594634558fec18e3..bc5b4d788c08dbc8f47bb8d72c7f267982e716e6 100644 --- a/arch/x86/include/asm/reboot.h +++ b/arch/x86/include/asm/reboot.h @@ -25,6 +25,8 @@ void __noreturn machine_real_restart(unsigned int type); #define MRR_BIOS 0 #define MRR_APM 1 +void cpu_emergency_disable_virtualization(void); + typedef void (*nmi_shootdown_cb)(int, struct pt_regs*); void nmi_panic_self_stop(struct pt_regs *regs); void nmi_shootdown_cpus(nmi_shootdown_cb callback); diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index 07603064df8fcf5b5f25c50fcf5655f6bb0461e5..b9ccdf5ea98baeacac4bbc1c7f767a4bdce78d49 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -51,24 +51,27 @@ DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key); * simple as possible. * Must be called with preemption disabled. */ -static void __resctrl_sched_in(void) +static inline void __resctrl_sched_in(struct task_struct *tsk) { struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state); u32 closid = state->default_closid; u32 rmid = state->default_rmid; + u32 tmp; /* * If this task has a closid/rmid assigned, use it. * Else use the closid/rmid assigned to this cpu. */ if (static_branch_likely(&rdt_alloc_enable_key)) { - if (current->closid) - closid = current->closid; + tmp = READ_ONCE(tsk->closid); + if (tmp) + closid = tmp; } if (static_branch_likely(&rdt_mon_enable_key)) { - if (current->rmid) - rmid = current->rmid; + tmp = READ_ONCE(tsk->rmid); + if (tmp) + rmid = tmp; } if (closid != state->cur_closid || rmid != state->cur_rmid) { @@ -78,17 +81,17 @@ static void __resctrl_sched_in(void) } } -static inline void resctrl_sched_in(void) +static inline void resctrl_sched_in(struct task_struct *tsk) { if (static_branch_likely(&rdt_enable_key)) - __resctrl_sched_in(); + __resctrl_sched_in(tsk); } void resctrl_cpu_detect(struct cpuinfo_x86 *c); #else -static inline void resctrl_sched_in(void) {} +static inline void resctrl_sched_in(struct task_struct *tsk) {} static inline void resctrl_cpu_detect(struct cpuinfo_x86 *c) {} #endif /* CONFIG_X86_CPU_RESCTRL */ diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h index fda3e7747c2238f2d8f1b81594026b81c631caae..8eefa3386d8cefd655e4b5c3f8e98e446d2a356e 100644 --- a/arch/x86/include/asm/virtext.h +++ b/arch/x86/include/asm/virtext.h @@ -120,7 +120,21 @@ static inline void cpu_svm_disable(void) wrmsrl(MSR_VM_HSAVE_PA, 0); rdmsrl(MSR_EFER, efer); - wrmsrl(MSR_EFER, efer & ~EFER_SVME); + if (efer & EFER_SVME) { + /* + * Force GIF=1 prior to disabling SVM to ensure INIT and NMI + * aren't blocked, e.g. if a fatal error occurred between CLGI + * and STGI. Note, STGI may #UD if SVM is disabled from NMI + * context between reading EFER and executing STGI. In that + * case, GIF must already be set, otherwise the NMI would have + * been blocked, so just eat the fault. + */ + asm_volatile_goto("1: stgi\n\t" + _ASM_EXTABLE(1b, %l[fault]) + ::: "memory" : fault); +fault: + wrmsrl(MSR_EFER, efer & ~EFER_SVME); + } } /** Makes sure SVM is disabled, if it is supported on the CPU diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index 49ae4e1ac9cd8b58d8ad9eeb7d7f2483020711aa..d28d43d774a26ddffb3281af0202436fe2d70402 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c @@ -79,6 +79,21 @@ void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags, */ flags->bm_control = 0; } + if (c->x86_vendor == X86_VENDOR_AMD && c->x86 >= 0x17) { + /* + * For all AMD Zen or newer CPUs that support C3, caches + * should not be flushed by software while entering C3 + * type state. Set bm->check to 1 so that kernel doesn't + * need to execute cache flush operation. + */ + flags->bm_check = 1; + /* + * In current AMD C state implementation ARB_DIS is no longer + * used. So set bm_control to zero to indicate ARB_DIS is not + * required while entering C3 type state. + */ + flags->bm_control = 0; + } } EXPORT_SYMBOL(acpi_processor_power_init_bm_check); diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index ec3fa4dc9031847a0d10b212a91372e9cb268de8..89a9b7754476547cc2aa83e71015842e0349444a 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -932,6 +932,15 @@ void init_spectral_chicken(struct cpuinfo_x86 *c) } } #endif + /* + * Work around Erratum 1386. The XSAVES instruction malfunctions in + * certain circumstances on Zen1/2 uarch, and not all parts have had + * updated microcode at the time of writing (March 2023). + * + * Affected parts all have no supervisor XSAVE states, meaning that + * the XSAVEC instruction (which works fine) is equivalent. + */ + clear_cpu_cap(c, X86_FEATURE_XSAVES); } static void init_amd_zn(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 2f55e0f9d2805fda6dfa79bedd8cff9850f46eb8..c81b8b029b680e80dbd452a07c4b0a47ee6b0a02 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -136,9 +136,17 @@ void __init check_bugs(void) * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD * init code as it is not enumerated and depends on the family. */ - if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) + if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) { rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); + /* + * Previously running kernel (kexec), may have some controls + * turned ON. Clear them and let the mitigations setup below + * rediscover them based on configuration. + */ + x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK; + } + /* Select the proper CPU mitigations before patching alternatives: */ spectre_v1_select_mitigation(); spectre_v2_select_mitigation(); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 56573241d0293df4589003108ed2c53f1a636c1c..e2dee60108460510f38d0e7b69f6b4ebb9537a01 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -2048,13 +2048,12 @@ void cpu_init_exception_handling(void) /* * cpu_init() initializes state that is per-CPU. Some data is already - * initialized (naturally) in the bootstrap process, such as the GDT - * and IDT. We reload them nevertheless, this function acts as a - * 'CPU state barrier', nothing should get across. + * initialized (naturally) in the bootstrap process, such as the GDT. We + * reload it nevertheless, this function acts as a 'CPU state barrier', + * nothing should get across. */ void cpu_init(void) { - struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw); struct task_struct *cur = current; int cpu = raw_smp_processor_id(); @@ -2067,8 +2066,6 @@ void cpu_init(void) early_cpu_to_node(cpu) != NUMA_NO_NODE) set_numa_node(early_cpu_to_node(cpu)); #endif - setup_getcpu(cpu); - pr_debug("Initializing CPU#%d\n", cpu); if (IS_ENABLED(CONFIG_X86_64) || cpu_feature_enabled(X86_FEATURE_VME) || @@ -2080,7 +2077,6 @@ void cpu_init(void) * and set up the GDT descriptor: */ switch_to_new_gdt(cpu); - load_current_idt(); if (IS_ENABLED(CONFIG_X86_64)) { loadsegment(fs, 0); @@ -2100,12 +2096,6 @@ void cpu_init(void) initialize_tlbstate_and_flush(); enter_lazy_tlb(&init_mm, cur); - /* Initialize the TSS. */ - tss_setup_ist(tss); - tss_setup_io_bitmap(tss); - set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); - - load_TR_desc(); /* * sp0 points to the entry trampoline stack regardless of what task * is running. @@ -2127,35 +2117,64 @@ void cpu_init(void) load_fixmap_gdt(cpu); } -/* +#ifdef CONFIG_SMP +void cpu_init_secondary(void) +{ + /* + * Relies on the BP having set-up the IDT tables, which are loaded + * on this CPU in cpu_init_exception_handling(). + */ + cpu_init_exception_handling(); + cpu_init(); +} +#endif + +#ifdef CONFIG_MICROCODE_LATE_LOADING +/** + * store_cpu_caps() - Store a snapshot of CPU capabilities + * @curr_info: Pointer where to store it + * + * Returns: None + */ +void store_cpu_caps(struct cpuinfo_x86 *curr_info) +{ + /* Reload CPUID max function as it might've changed. */ + curr_info->cpuid_level = cpuid_eax(0); + + /* Copy all capability leafs and pick up the synthetic ones. */ + memcpy(&curr_info->x86_capability, &boot_cpu_data.x86_capability, + sizeof(curr_info->x86_capability)); + + /* Get the hardware CPUID leafs */ + get_cpu_cap(curr_info); +} + +/** + * microcode_check() - Check if any CPU capabilities changed after an update. + * @prev_info: CPU capabilities stored before an update. + * * The microcode loader calls this upon late microcode load to recheck features, * only when microcode has been updated. Caller holds microcode_mutex and CPU * hotplug lock. + * + * Return: None */ -void microcode_check(void) +void microcode_check(struct cpuinfo_x86 *prev_info) { - struct cpuinfo_x86 info; + struct cpuinfo_x86 curr_info; perf_check_microcode(); - /* Reload CPUID max function as it might've changed. */ - info.cpuid_level = cpuid_eax(0); - - /* - * Copy all capability leafs to pick up the synthetic ones so that - * memcmp() below doesn't fail on that. The ones coming from CPUID will - * get overwritten in get_cpu_cap(). - */ - memcpy(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability)); + store_cpu_caps(&curr_info); - get_cpu_cap(&info); - - if (!memcmp(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability))) + if (!memcmp(&prev_info->x86_capability, &curr_info.x86_capability, + sizeof(prev_info->x86_capability))) return; pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n"); pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n"); } +#endif /* * Invoked from core CPU hotplug code after hotplug operations diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 234a96f25248dbb32dd3c68f133500dfd0d003c6..d3bce6d380ed6a6f6848b012e4897c1cd0aaf199 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -55,7 +55,9 @@ struct cont_desc { }; static u32 ucode_new_rev; -static u8 amd_ucode_patch[PATCH_MAX_SIZE]; + +/* One blob per node. */ +static u8 amd_ucode_patch[MAX_NUMNODES][PATCH_MAX_SIZE]; /* * Microcode patch container file is prepended to the initrd in cpio @@ -429,7 +431,7 @@ apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size, bool save_p patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch); #else new_rev = &ucode_new_rev; - patch = &amd_ucode_patch; + patch = &amd_ucode_patch[0]; #endif desc.cpuid_1_eax = cpuid_1_eax; @@ -548,8 +550,7 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax) apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, false); } -static enum ucode_state -load_microcode_amd(bool save, u8 family, const u8 *data, size_t size); +static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size); int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax) { @@ -567,19 +568,19 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax) if (!desc.mc) return -EINVAL; - ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size); + ret = load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size); if (ret > UCODE_UPDATED) return -EINVAL; return 0; } -void reload_ucode_amd(void) +void reload_ucode_amd(unsigned int cpu) { - struct microcode_amd *mc; u32 rev, dummy __always_unused; + struct microcode_amd *mc; - mc = (struct microcode_amd *)amd_ucode_patch; + mc = (struct microcode_amd *)amd_ucode_patch[cpu_to_node(cpu)]; rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); @@ -845,9 +846,10 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, return UCODE_OK; } -static enum ucode_state -load_microcode_amd(bool save, u8 family, const u8 *data, size_t size) +static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size) { + struct cpuinfo_x86 *c; + unsigned int nid, cpu; struct ucode_patch *p; enum ucode_state ret; @@ -860,22 +862,22 @@ load_microcode_amd(bool save, u8 family, const u8 *data, size_t size) return ret; } - p = find_patch(0); - if (!p) { - return ret; - } else { - if (boot_cpu_data.microcode >= p->patch_id) - return ret; + for_each_node(nid) { + cpu = cpumask_first(cpumask_of_node(nid)); + c = &cpu_data(cpu); - ret = UCODE_NEW; - } + p = find_patch(cpu); + if (!p) + continue; - /* save BSP's matching patch for early load */ - if (!save) - return ret; + if (c->microcode >= p->patch_id) + continue; - memset(amd_ucode_patch, 0, PATCH_MAX_SIZE); - memcpy(amd_ucode_patch, p->data, min_t(u32, p->size, PATCH_MAX_SIZE)); + ret = UCODE_NEW; + + memset(&amd_ucode_patch[nid], 0, PATCH_MAX_SIZE); + memcpy(&amd_ucode_patch[nid], p->data, min_t(u32, p->size, PATCH_MAX_SIZE)); + } return ret; } @@ -901,12 +903,11 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device, { char fw_name[36] = "amd-ucode/microcode_amd.bin"; struct cpuinfo_x86 *c = &cpu_data(cpu); - bool bsp = c->cpu_index == boot_cpu_data.cpu_index; enum ucode_state ret = UCODE_NFOUND; const struct firmware *fw; /* reload ucode container only on the boot cpu */ - if (!refresh_fw || !bsp) + if (!refresh_fw) return UCODE_OK; if (c->x86 >= 0x15) @@ -921,7 +922,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device, if (!verify_container(fw->data, fw->size, false)) goto fw_release; - ret = load_microcode_amd(bsp, c->x86, fw->data, fw->size); + ret = load_microcode_amd(c->x86, fw->data, fw->size); fw_release: release_firmware(fw); diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 0b1732b98e719f3da832cfe44bf93ca1617e4e89..24254d141178945f75ba7596280773abbb0ea082 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -55,7 +55,7 @@ LIST_HEAD(microcode_cache); * All non cpu-hotplug-callback call sites use: * * - microcode_mutex to synchronize with each other; - * - get/put_online_cpus() to synchronize with + * - cpus_read_lock/unlock() to synchronize with * the cpu-hotplug-callback call sites. * * We guarantee that only a single cpu is being @@ -315,7 +315,7 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa) #endif } -void reload_early_microcode(void) +void reload_early_microcode(unsigned int cpu) { int vendor, family; @@ -329,7 +329,7 @@ void reload_early_microcode(void) break; case X86_VENDOR_AMD: if (family >= 0x10) - reload_ucode_amd(); + reload_ucode_amd(cpu); break; default: break; @@ -390,101 +390,10 @@ static int apply_microcode_on_target(int cpu) return ret; } -#ifdef CONFIG_MICROCODE_OLD_INTERFACE -static int do_microcode_update(const void __user *buf, size_t size) -{ - int error = 0; - int cpu; - - for_each_online_cpu(cpu) { - struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - enum ucode_state ustate; - - if (!uci->valid) - continue; - - ustate = microcode_ops->request_microcode_user(cpu, buf, size); - if (ustate == UCODE_ERROR) { - error = -1; - break; - } else if (ustate == UCODE_NEW) { - apply_microcode_on_target(cpu); - } - } - - return error; -} - -static int microcode_open(struct inode *inode, struct file *file) -{ - return capable(CAP_SYS_RAWIO) ? stream_open(inode, file) : -EPERM; -} - -static ssize_t microcode_write(struct file *file, const char __user *buf, - size_t len, loff_t *ppos) -{ - ssize_t ret = -EINVAL; - unsigned long nr_pages = totalram_pages(); - - if ((len >> PAGE_SHIFT) > nr_pages) { - pr_err("too much data (max %ld pages)\n", nr_pages); - return ret; - } - - get_online_cpus(); - mutex_lock(µcode_mutex); - - if (do_microcode_update(buf, len) == 0) - ret = (ssize_t)len; - - if (ret > 0) - perf_check_microcode(); - - mutex_unlock(µcode_mutex); - put_online_cpus(); - - return ret; -} - -static const struct file_operations microcode_fops = { - .owner = THIS_MODULE, - .write = microcode_write, - .open = microcode_open, - .llseek = no_llseek, -}; - -static struct miscdevice microcode_dev = { - .minor = MICROCODE_MINOR, - .name = "microcode", - .nodename = "cpu/microcode", - .fops = µcode_fops, -}; - -static int __init microcode_dev_init(void) -{ - int error; - - error = misc_register(µcode_dev); - if (error) { - pr_err("can't misc_register on minor=%d\n", MICROCODE_MINOR); - return error; - } - - return 0; -} - -static void __exit microcode_dev_exit(void) -{ - misc_deregister(µcode_dev); -} -#else -#define microcode_dev_init() 0 -#define microcode_dev_exit() do { } while (0) -#endif - /* fake device for request_firmware */ static struct platform_device *microcode_pdev; +#ifdef CONFIG_MICROCODE_LATE_LOADING /* * Late loading dance. Why the heavy-handed stomp_machine effort? * @@ -599,16 +508,27 @@ static int __reload_late(void *info) */ static int microcode_reload_late(void) { - int ret; + int old = boot_cpu_data.microcode, ret; + struct cpuinfo_x86 prev_info; atomic_set(&late_cpus_in, 0); atomic_set(&late_cpus_out, 0); - ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask); - if (ret == 0) - microcode_check(); + /* + * Take a snapshot before the microcode update in order to compare and + * check whether any bits changed after an update. + */ + store_cpu_caps(&prev_info); - pr_info("Reload completed, microcode revision: 0x%x\n", boot_cpu_data.microcode); + ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask); + if (!ret) { + pr_info("Reload succeeded, microcode revision: 0x%x -> 0x%x\n", + old, boot_cpu_data.microcode); + microcode_check(&prev_info); + } else { + pr_info("Reload failed, current microcode revision: 0x%x\n", + boot_cpu_data.microcode); + } return ret; } @@ -629,7 +549,7 @@ static ssize_t reload_store(struct device *dev, if (val != 1) return size; - get_online_cpus(); + cpus_read_lock(); ret = check_online_cpus(); if (ret) @@ -644,7 +564,7 @@ static ssize_t reload_store(struct device *dev, mutex_unlock(µcode_mutex); put: - put_online_cpus(); + cpus_read_unlock(); if (ret == 0) ret = size; @@ -652,6 +572,9 @@ static ssize_t reload_store(struct device *dev, return ret; } +static DEVICE_ATTR_WO(reload); +#endif + static ssize_t version_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -668,7 +591,6 @@ static ssize_t pf_show(struct device *dev, return sprintf(buf, "0x%x\n", uci->cpu_sig.pf); } -static DEVICE_ATTR_WO(reload); static DEVICE_ATTR(version, 0444, version_show, NULL); static DEVICE_ATTR(processor_flags, 0444, pf_show, NULL); @@ -785,7 +707,7 @@ void microcode_bsp_resume(void) if (uci->valid && uci->mc) microcode_ops->apply_microcode(cpu); else if (!uci->mc) - reload_early_microcode(); + reload_early_microcode(cpu); } static struct syscore_ops mc_syscore_ops = { @@ -821,7 +743,9 @@ static int mc_cpu_down_prep(unsigned int cpu) } static struct attribute *cpu_root_microcode_attrs[] = { +#ifdef CONFIG_MICROCODE_LATE_LOADING &dev_attr_reload.attr, +#endif NULL }; @@ -853,14 +777,14 @@ int __init microcode_init(void) if (IS_ERR(microcode_pdev)) return PTR_ERR(microcode_pdev); - get_online_cpus(); + cpus_read_lock(); mutex_lock(µcode_mutex); error = subsys_interface_register(&mc_cpu_interface); if (!error) perf_check_microcode(); mutex_unlock(µcode_mutex); - put_online_cpus(); + cpus_read_unlock(); if (error) goto out_pdev; @@ -873,10 +797,6 @@ int __init microcode_init(void) goto out_driver; } - error = microcode_dev_init(); - if (error) - goto out_ucode_group; - register_syscore_ops(&mc_syscore_ops); cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:starting", mc_cpu_starting, NULL); @@ -887,18 +807,14 @@ int __init microcode_init(void) return 0; - out_ucode_group: - sysfs_remove_group(&cpu_subsys.dev_root->kobj, - &cpu_root_microcode_group); - out_driver: - get_online_cpus(); + cpus_read_lock(); mutex_lock(µcode_mutex); subsys_interface_unregister(&mc_cpu_interface); mutex_unlock(µcode_mutex); - put_online_cpus(); + cpus_read_unlock(); out_pdev: platform_device_unregister(microcode_pdev); diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index ff26de11b3f1524328c9c9a434d86d9cf266877c..1a943743cfe4bd24baf9a0b8e1af3389ec645376 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -311,7 +311,7 @@ static void update_cpu_closid_rmid(void *info) * executing task might have its own closid selected. Just reuse * the context switch code. */ - resctrl_sched_in(); + resctrl_sched_in(current); } /* @@ -532,7 +532,7 @@ static void _update_task_closid_rmid(void *task) * Otherwise, the MSR is updated when the task is scheduled in. */ if (task == current) - resctrl_sched_in(); + resctrl_sched_in(task); } static void update_task_closid_rmid(struct task_struct *t) @@ -563,11 +563,11 @@ static int __rdtgroup_move_task(struct task_struct *tsk, */ if (rdtgrp->type == RDTCTRL_GROUP) { - tsk->closid = rdtgrp->closid; - tsk->rmid = rdtgrp->mon.rmid; + WRITE_ONCE(tsk->closid, rdtgrp->closid); + WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid); } else if (rdtgrp->type == RDTMON_GROUP) { if (rdtgrp->mon.parent->closid == tsk->closid) { - tsk->rmid = rdtgrp->mon.rmid; + WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid); } else { rdt_last_cmd_puts("Can't move task to different control group\n"); return -EINVAL; @@ -2312,8 +2312,8 @@ static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to, for_each_process_thread(p, t) { if (!from || is_closid_match(t, from) || is_rmid_match(t, from)) { - t->closid = to->closid; - t->rmid = to->mon.rmid; + WRITE_ONCE(t->closid, to->closid); + WRITE_ONCE(t->rmid, to->mon.rmid); /* * Order the closid/rmid stores above before the loads diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index b1deacbeb266934b76c7d46b556784d00ae8cb91..a932a07d002538ead40b174ffe9fabc772af1c94 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -37,7 +37,6 @@ #include #include #include -#include #include #include #include @@ -94,15 +93,6 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs) */ cpu_crash_vmclear_loaded_vmcss(); - /* Disable VMX or SVM if needed. - * - * We need to disable virtualization on all CPUs. - * Having VMX or SVM enabled on any CPU may break rebooting - * after the kdump kernel has finished its task. - */ - cpu_emergency_vmxoff(); - cpu_emergency_svm_disable(); - /* * Disable Intel PT to stop its logging */ @@ -161,12 +151,7 @@ void native_machine_crash_shutdown(struct pt_regs *regs) */ cpu_crash_vmclear_loaded_vmcss(); - /* Booting kdump kernel with VMX or SVM enabled won't work, - * because (among other limitations) we can't disable paging - * with the virt flags. - */ - cpu_emergency_vmxoff(); - cpu_emergency_svm_disable(); + cpu_emergency_disable_virtualization(); /* * Disable Intel PT to stop its logging diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 97aa900386cbbe507cdf538de42f3f226dd02bdb..b4964300153a1ae6d43bba564af7ae5b2bf8b52e 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -351,7 +351,7 @@ unsigned long oops_begin(void) } NOKPROBE_SYMBOL(oops_begin); -void __noreturn rewind_stack_do_exit(int signr); +void __noreturn rewind_stack_and_make_dead(int signr); void oops_end(unsigned long flags, struct pt_regs *regs, int signr) { @@ -386,7 +386,7 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr) * reuse the task stack and that existing poisons are invalid. */ kasan_unpoison_task_stack(current); - rewind_stack_do_exit(signr); + rewind_stack_and_make_dead(signr); } NOKPROBE_SYMBOL(oops_end); diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c index 282b4ee1339f87ce315c6c766a84689b2af394c6..f325389d035168b8e76341dc0d9f6f06a38804db 100644 --- a/arch/x86/kernel/i8259.c +++ b/arch/x86/kernel/i8259.c @@ -114,6 +114,7 @@ static void make_8259A_irq(unsigned int irq) disable_irq_nosync(irq); io_apic_irqs &= ~(1<init(0); - for (i = 0; i < nr_legacy_irqs(); i++) + for (i = 0; i < nr_legacy_irqs(); i++) { irq_set_chip_and_handler(i, chip, handle_level_irq); + irq_set_status_flags(i, IRQ_LEVEL); + } } void __init init_IRQ(void) diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 3d620149200647a8e27ab4bb5c40ae26e248175d..e37e5e82481a59a1601001d0f7bf5f8d87bebedf 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -46,8 +46,8 @@ unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr) /* This function only handles jump-optimized kprobe */ if (kp && kprobe_optimized(kp)) { op = container_of(kp, struct optimized_kprobe, kp); - /* If op->list is not empty, op is under optimizing */ - if (list_empty(&op->list)) + /* If op is optimized or under unoptimizing */ + if (list_empty(&op->list) || optprobe_queued_unopt(op)) goto found; } } @@ -346,7 +346,7 @@ int arch_check_optimized_kprobe(struct optimized_kprobe *op) for (i = 1; i < op->optinsn.size; i++) { p = get_kprobe(op->kp.addr + i); - if (p && !kprobe_disabled(p)) + if (p && !kprobe_disarmed(p)) return -EEXIST; } diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 5e17c3939dd1c6f89609efe4e4a2d6858208d83e..1cba09a9f1c13ab31f385d29cd6f031d08da4f55 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -720,7 +720,7 @@ bool xen_set_default_idle(void) } #endif -void stop_this_cpu(void *dummy) +void __noreturn stop_this_cpu(void *dummy) { local_irq_disable(); /* diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 98bf8fd189025d02168436669b28f11cfad81b73..3b4c394a1a768bab1cc87e089fc7e396cbcefdfa 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -214,7 +214,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) switch_fpu_finish(next_p); /* Load the Intel cache allocation PQR MSR. */ - resctrl_sched_in(); + resctrl_sched_in(next_p); return prev_p; } diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index ad3f82a18de9df1e151e41ea3c5f184a493b9cac..1d8bc4736fb7949106ccd24e8523a9e3a2e84afa 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -629,7 +629,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) } /* Load the Intel cache allocation PQR MSR. */ - resctrl_sched_in(); + resctrl_sched_in(next_p); return prev_p; } diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index df3514835b3563522fdf93a2e9e36aabb2234b69..4d8c0e2581500f1f269556aa6c5a92974777abc5 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -528,33 +528,29 @@ static inline void kb_wait(void) } } -static void vmxoff_nmi(int cpu, struct pt_regs *regs) -{ - cpu_emergency_vmxoff(); -} +static inline void nmi_shootdown_cpus_on_restart(void); -/* Use NMIs as IPIs to tell all CPUs to disable virtualization */ -static void emergency_vmx_disable_all(void) +static void emergency_reboot_disable_virtualization(void) { /* Just make sure we won't change CPUs while doing this */ local_irq_disable(); /* - * Disable VMX on all CPUs before rebooting, otherwise we risk hanging - * the machine, because the CPU blocks INIT when it's in VMX root. + * Disable virtualization on all CPUs before rebooting to avoid hanging + * the system, as VMX and SVM block INIT when running in the host. * * We can't take any locks and we may be on an inconsistent state, so - * use NMIs as IPIs to tell the other CPUs to exit VMX root and halt. + * use NMIs as IPIs to tell the other CPUs to disable VMX/SVM and halt. * - * Do the NMI shootdown even if VMX if off on _this_ CPU, as that - * doesn't prevent a different CPU from being in VMX root operation. + * Do the NMI shootdown even if virtualization is off on _this_ CPU, as + * other CPUs may have virtualization enabled. */ - if (cpu_has_vmx()) { - /* Safely force _this_ CPU out of VMX root operation. */ - __cpu_emergency_vmxoff(); + if (cpu_has_vmx() || cpu_has_svm(NULL)) { + /* Safely force _this_ CPU out of VMX/SVM operation. */ + cpu_emergency_disable_virtualization(); - /* Halt and exit VMX root operation on the other CPUs. */ - nmi_shootdown_cpus(vmxoff_nmi); + /* Disable VMX/SVM and halt on other CPUs. */ + nmi_shootdown_cpus_on_restart(); } } @@ -590,7 +586,7 @@ static void native_machine_emergency_restart(void) unsigned short mode; if (reboot_emergency) - emergency_vmx_disable_all(); + emergency_reboot_disable_virtualization(); tboot_shutdown(TB_SHUTDOWN_REBOOT); @@ -795,6 +791,17 @@ void machine_crash_shutdown(struct pt_regs *regs) /* This is the CPU performing the emergency shutdown work. */ int crashing_cpu = -1; +/* + * Disable virtualization, i.e. VMX or SVM, to ensure INIT is recognized during + * reboot. VMX blocks INIT if the CPU is post-VMXON, and SVM blocks INIT if + * GIF=0, i.e. if the crash occurred between CLGI and STGI. + */ +void cpu_emergency_disable_virtualization(void) +{ + cpu_emergency_vmxoff(); + cpu_emergency_svm_disable(); +} + #if defined(CONFIG_SMP) static nmi_shootdown_cb shootdown_callback; @@ -817,7 +824,14 @@ static int crash_nmi_callback(unsigned int val, struct pt_regs *regs) return NMI_HANDLED; local_irq_disable(); - shootdown_callback(cpu, regs); + if (shootdown_callback) + shootdown_callback(cpu, regs); + + /* + * Prepare the CPU for reboot _after_ invoking the callback so that the + * callback can safely use virtualization instructions, e.g. VMCLEAR. + */ + cpu_emergency_disable_virtualization(); atomic_dec(&waiting_for_crash_ipi); /* Assume hlt works */ @@ -828,18 +842,32 @@ static int crash_nmi_callback(unsigned int val, struct pt_regs *regs) return NMI_HANDLED; } -/* - * Halt all other CPUs, calling the specified function on each of them +/** + * nmi_shootdown_cpus - Stop other CPUs via NMI + * @callback: Optional callback to be invoked from the NMI handler + * + * The NMI handler on the remote CPUs invokes @callback, if not + * NULL, first and then disables virtualization to ensure that + * INIT is recognized during reboot. * - * This function can be used to halt all other CPUs on crash - * or emergency reboot time. The function passed as parameter - * will be called inside a NMI handler on all CPUs. + * nmi_shootdown_cpus() can only be invoked once. After the first + * invocation all other CPUs are stuck in crash_nmi_callback() and + * cannot respond to a second NMI. */ void nmi_shootdown_cpus(nmi_shootdown_cb callback) { unsigned long msecs; + local_irq_disable(); + /* + * Avoid certain doom if a shootdown already occurred; re-registering + * the NMI handler will cause list corruption, modifying the callback + * will do who knows what, etc... + */ + if (WARN_ON_ONCE(crash_ipi_issued)) + return; + /* Make a note of crashing cpu. Will be used in NMI callback. */ crashing_cpu = safe_smp_processor_id(); @@ -867,7 +895,17 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback) msecs--; } - /* Leave the nmi callback set */ + /* + * Leave the nmi callback set, shootdown is a one-time thing. Clearing + * the callback could result in a NULL pointer dereference if a CPU + * (finally) responds after the timeout expires. + */ +} + +static inline void nmi_shootdown_cpus_on_restart(void) +{ + if (!crash_ipi_issued) + nmi_shootdown_cpus(NULL); } /* @@ -897,6 +935,8 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback) /* No other CPUs to shoot down */ } +static inline void nmi_shootdown_cpus_on_restart(void) { } + void run_crash_ipi_callback(struct pt_regs *regs) { } diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index eff4ce3b10da7185cb770e35b6006516e72830db..95758ae120bafa608ed69e573a68ca0c2bb577dc 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -32,7 +32,7 @@ #include #include #include -#include +#include /* * Some notes on x86 processor bugs affecting SMP operation: @@ -122,7 +122,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs) if (raw_smp_processor_id() == atomic_read(&stopping_cpu)) return NMI_HANDLED; - cpu_emergency_vmxoff(); + cpu_emergency_disable_virtualization(); stop_this_cpu(NULL); return NMI_HANDLED; @@ -134,7 +134,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs) DEFINE_IDTENTRY_SYSVEC(sysvec_reboot) { ack_APIC_irq(); - cpu_emergency_vmxoff(); + cpu_emergency_disable_virtualization(); stop_this_cpu(NULL); } diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index e8e5515fb7e9c241cb9148a405c061946809d8a2..bda89ecc7799f5117b021d4cd79b035e6861e6cb 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -227,8 +227,7 @@ static void notrace start_secondary(void *unused) load_cr3(swapper_pg_dir); __flush_tlb_all(); #endif - cpu_init_exception_handling(); - cpu_init(); + cpu_init_secondary(); rcu_cpu_starting(raw_smp_processor_id()); x86_cpuinit.early_percpu_clock_init(); smp_callin(); diff --git a/arch/x86/kernel/sysfb_efi.c b/arch/x86/kernel/sysfb_efi.c index 653b7f617b61bfce19ccf0e0254462c00e18c5e5..9ea65611fba0bed1d79103283a91b705de550ecd 100644 --- a/arch/x86/kernel/sysfb_efi.c +++ b/arch/x86/kernel/sysfb_efi.c @@ -264,6 +264,14 @@ static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = { "Lenovo ideapad D330-10IGM"), }, }, + { + /* Lenovo IdeaPad Duet 3 10IGL5 with 1200x1920 portrait screen */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, + "IdeaPad Duet 3 10IGL5"), + }, + }, {}, }; diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 2a39a2df6f43e5e6294469eb9ff76ed71d8246d5..3780c728345c34d970484a1e94e1d6213669afde 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -1185,9 +1185,7 @@ void __init trap_init(void) idt_setup_traps(); - /* - * Should be a barrier for any external CPU state: - */ + cpu_init_exception_handling(); cpu_init(); idt_setup_ist_traps(); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 260727eaa6b9629d1cd7cf3df2286c28e43043ff..21189804524a77f591cdfd6e0f6912e373da6df7 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -2115,10 +2115,14 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) break; case APIC_SELF_IPI: - if (apic_x2apic_mode(apic)) - kvm_apic_send_ipi(apic, APIC_DEST_SELF | (val & APIC_VECTOR_MASK), 0); - else + /* + * Self-IPI exists only when x2APIC is enabled. Bits 7:0 hold + * the vector, everything else is reserved. + */ + if (!apic_x2apic_mode(apic) || (val & ~APIC_VECTOR_MASK)) ret = 1; + else + kvm_apic_send_ipi(apic, APIC_DEST_SELF | val, 0); break; default: ret = 1; diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index ef5a72d80f54d93632c8f8d2e763687bbfd92ee4..7b2b61309d8a401f6f5fc3288f1ef8b0701ea86e 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3480,8 +3480,14 @@ static void svm_cancel_injection(struct kvm_vcpu *vcpu) static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu) { - if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR && - to_svm(vcpu)->vmcb->control.exit_info_1) + struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control; + + /* + * Note, the next RIP must be provided as SRCU isn't held, i.e. KVM + * can't read guest memory (dereference memslots) to decode the WRMSR. + */ + if (control->exit_code == SVM_EXIT_MSR && control->exit_info_1 && + nrips && control->next_rip) return handle_fastpath_set_msr_irqoff(vcpu); return EXIT_FASTPATH_NONE; diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 7fe780db83c6c92cc6aa0f20c6c346f017c5c9ce..5f6f926b02a9f8eb860c9ae2e593cc35db6eb75f 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -3334,18 +3334,15 @@ static u32 vmx_segment_access_rights(struct kvm_segment *var) { u32 ar; - if (var->unusable || !var->present) - ar = 1 << 16; - else { - ar = var->type & 15; - ar |= (var->s & 1) << 4; - ar |= (var->dpl & 3) << 5; - ar |= (var->present & 1) << 7; - ar |= (var->avl & 1) << 12; - ar |= (var->l & 1) << 13; - ar |= (var->db & 1) << 14; - ar |= (var->g & 1) << 15; - } + ar = var->type & 15; + ar |= (var->s & 1) << 4; + ar |= (var->dpl & 3) << 5; + ar |= (var->present & 1) << 7; + ar |= (var->avl & 1) << 12; + ar |= (var->l & 1) << 13; + ar |= (var->db & 1) << 14; + ar |= (var->g & 1) << 15; + ar |= (var->unusable || !var->present) << 16; return ar; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index fb5a10c70beb452091c2afcb0c15ab5989bcef87..c1351335d22f0c7a9d935531037e0eae70fc9126 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7549,7 +7549,9 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, write_fault_to_spt, emulation_type)) return 1; - if (ctxt->have_exception) { + + if (ctxt->have_exception && + !(emulation_type & EMULTYPE_SKIP)) { /* * #UD should result in just EMULATION_FAILED, and trap-like * exception should not be encountered during decode. diff --git a/arch/x86/um/vdso/um_vdso.c b/arch/x86/um/vdso/um_vdso.c index 2112b8d146688834897dde9595af02ac89d2c327..ff0f3b4b6c45ed567173766b5d370e2c6fc392a0 100644 --- a/arch/x86/um/vdso/um_vdso.c +++ b/arch/x86/um/vdso/um_vdso.c @@ -17,8 +17,10 @@ int __vdso_clock_gettime(clockid_t clock, struct __kernel_old_timespec *ts) { long ret; - asm("syscall" : "=a" (ret) : - "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory"); + asm("syscall" + : "=a" (ret) + : "0" (__NR_clock_gettime), "D" (clock), "S" (ts) + : "rcx", "r11", "memory"); return ret; } @@ -29,8 +31,10 @@ int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz) { long ret; - asm("syscall" : "=a" (ret) : - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory"); + asm("syscall" + : "=a" (ret) + : "0" (__NR_gettimeofday), "D" (tv), "S" (tz) + : "rcx", "r11", "memory"); return ret; } diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index efc3a29cde8039aac2ea4368cd2649bd053bc7b1..129f23c0ab5539f9a4b6673cb38549dfcb578fa7 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -545,5 +545,5 @@ void die(const char * str, struct pt_regs * regs, long err) if (panic_on_oops) panic("Fatal exception"); - do_exit(err); + make_task_dead(err); } diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index e8c2f9a3bf5ab7d2b6688bd46c1bc599190abb64..6f7da44bc0667efbfc4ddc060fb57f3e460ff705 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -707,15 +707,15 @@ static void *__bfq_bic_change_cgroup(struct bfq_data *bfqd, struct bfq_io_cq *bic, struct bfq_group *bfqg) { - struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0); - struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1); + struct bfq_queue *async_bfqq = bic_to_bfqq(bic, false); + struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, true); struct bfq_entity *entity; if (async_bfqq) { entity = &async_bfqq->entity; if (entity->sched_data != &bfqg->sched_data) { - bic_set_bfqq(bic, NULL, 0); + bic_set_bfqq(bic, NULL, false); bfq_release_process_ref(bfqd, async_bfqq); } } @@ -750,8 +750,8 @@ static void *__bfq_bic_change_cgroup(struct bfq_data *bfqd, * request from the old cgroup. */ bfq_put_cooperator(sync_bfqq); + bic_set_bfqq(bic, NULL, true); bfq_release_process_ref(bfqd, sync_bfqq); - bic_set_bfqq(bic, NULL, 1); } } } diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index da5669e34c882ccd882f246f8670dd2408b195ca..3662629cb361baea7fc56a719fa4e88fdc080217 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -373,6 +373,12 @@ struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync) void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync) { + struct bfq_queue *old_bfqq = bic->bfqq[is_sync]; + + /* Clear bic pointer if bfqq is detached from this bic */ + if (old_bfqq && old_bfqq->bic == bic) + old_bfqq->bic = NULL; + bic->bfqq[is_sync] = bfqq; } @@ -2818,7 +2824,7 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, /* * Merge queues (that is, let bic redirect its requests to new_bfqq) */ - bic_set_bfqq(bic, new_bfqq, 1); + bic_set_bfqq(bic, new_bfqq, true); bfq_mark_bfqq_coop(new_bfqq); /* * new_bfqq now belongs to at least two bics (it is a shared queue): @@ -4985,9 +4991,8 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync) unsigned long flags; spin_lock_irqsave(&bfqd->lock, flags); - bfqq->bic = NULL; - bfq_exit_bfqq(bfqd, bfqq); bic_set_bfqq(bic, NULL, is_sync); + bfq_exit_bfqq(bfqd, bfqq); spin_unlock_irqrestore(&bfqd->lock, flags); } } @@ -5073,9 +5078,11 @@ static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio) bfqq = bic_to_bfqq(bic, false); if (bfqq) { - bfq_release_process_ref(bfqd, bfqq); - bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic); + struct bfq_queue *old_bfqq = bfqq; + + bfqq = bfq_get_queue(bfqd, bio, false, bic); bic_set_bfqq(bic, bfqq, false); + bfq_release_process_ref(bfqd, old_bfqq); } bfqq = bic_to_bfqq(bic, true); @@ -5996,7 +6003,7 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq) return bfqq; } - bic_set_bfqq(bic, NULL, 1); + bic_set_bfqq(bic, NULL, true); bfq_put_cooperator(bfqq); diff --git a/block/bio-integrity.c b/block/bio-integrity.c index 4f6f140a44e064f18d9c6b3726e3fdcb8572ed40..a4cfc97275df6ed6b5ee146be5abf4df4c2f8cfd 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -428,6 +428,7 @@ int bio_integrity_clone(struct bio *bio, struct bio *bio_src, bip->bip_vcnt = bip_src->bip_vcnt; bip->bip_iter = bip_src->bip_iter; + bip->bip_flags = bip_src->bip_flags & ~BIP_BLOCK_INTEGRITY; return 0; } diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index eec5b3000f6aa1d11c72ef587267c9be3b8602f5..b95e8e58e24d83c2dbd63e7a22b81cad8215ad5b 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1381,6 +1381,10 @@ int blkcg_activate_policy(struct request_queue *q, list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) pol->pd_init_fn(blkg->pd[pol->plid]); + if (pol->pd_online_fn) + list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) + pol->pd_online_fn(blkg->pd[pol->plid]); + __set_bit(pol->plid, q->blkcg_pols); ret = 0; diff --git a/block/blk-core.c b/block/blk-core.c index a50205bcb755419fd161c440f2e99746029cb753..593ba10475cb3404baa3128b0778a6d370d20339 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -700,9 +700,7 @@ static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part) if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) return false; - - WARN_ONCE(1, - "Trying to write to read-only block-device %s (partno %d)\n", + pr_warn("Trying to write to read-only block-device %s (partno %d)\n", bio_devname(bio, b), part->partno); /* Older lvm-tools actually trigger this */ return false; diff --git a/block/blk-iocost.c b/block/blk-iocost.c index fb8f959a7f327cad713fe2726db5dd9fe853fa58..9255b642d6adbf831cd9b67458d9c257878e1faa 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -872,9 +872,14 @@ static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops, *page = *seqio = *randio = 0; - if (bps) - *page = DIV64_U64_ROUND_UP(VTIME_PER_SEC, - DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE)); + if (bps) { + u64 bps_pages = DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE); + + if (bps_pages) + *page = DIV64_U64_ROUND_UP(VTIME_PER_SEC, bps_pages); + else + *page = 1; + } if (seqiops) { v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops); diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index b1c0ac8e0da25c6c33ca57e693eea1d08ddd8435..0948eecd8987c26791b1b2a3de3b8b6f6aa8cfc2 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -45,8 +45,7 @@ void blk_mq_sched_assign_ioc(struct request *rq) } /* - * Mark a hardware queue as needing a restart. For shared queues, maintain - * a count of how many hardware queues are marked for restart. + * Mark a hardware queue as needing a restart. */ void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) { @@ -110,7 +109,7 @@ static bool blk_mq_dispatch_hctx_list(struct list_head *rq_list) /* * Only SCSI implements .get_budget and .put_budget, and SCSI restarts * its queue by itself in its completion handler, so we don't need to - * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE. + * restart queue if .get_budget() fails to get the budget. * * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to * be run again. This is necessary to avoid starving flushes. @@ -224,7 +223,7 @@ static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx, /* * Only SCSI implements .get_budget and .put_budget, and SCSI restarts * its queue by itself in its completion handler, so we don't need to - * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE. + * restart queue if .get_budget() fails to get the budget. * * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to * be run again. This is necessary to avoid starving flushes. diff --git a/block/blk-mq.c b/block/blk-mq.c index fe6926fccca7e0be5cdf0b6ae738db8bf9ec6fd8..02ca17c575c1a276d40e482cbc535620ca09ecd3 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -464,7 +464,8 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, * allocator for this for the rare use case of a command tied to * a specific queue. */ - if (WARN_ON_ONCE(!(flags & (BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED)))) + if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)) || + WARN_ON_ONCE(!(flags & BLK_MQ_REQ_RESERVED))) return ERR_PTR(-EINVAL); if (hctx_idx >= q->nr_hw_queues) diff --git a/crypto/essiv.c b/crypto/essiv.c index d012be23d496d09f7157ee68d8541d35a89e3f7d..85bb624e32b9be09dc502012e8c1216a0e9e5d0e 100644 --- a/crypto/essiv.c +++ b/crypto/essiv.c @@ -170,7 +170,12 @@ static void essiv_aead_done(struct crypto_async_request *areq, int err) struct aead_request *req = areq->data; struct essiv_aead_request_ctx *rctx = aead_request_ctx(req); + if (err == -EINPROGRESS) + goto out; + kfree(rctx->assoc); + +out: aead_request_complete(req, err); } @@ -246,7 +251,7 @@ static int essiv_aead_crypt(struct aead_request *req, bool enc) err = enc ? crypto_aead_encrypt(subreq) : crypto_aead_decrypt(subreq); - if (rctx->assoc && err != -EINPROGRESS) + if (rctx->assoc && err != -EINPROGRESS && err != -EBUSY) kfree(rctx->assoc); return err; } diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index 9d804831c8b3f9d1dba390cd6f92aab574af7967..a4ebbb889274eb95d62a489a7e784ed8b6a7e10e 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c @@ -214,16 +214,14 @@ static void pkcs1pad_encrypt_sign_complete_cb( struct crypto_async_request *child_async_req, int err) { struct akcipher_request *req = child_async_req->data; - struct crypto_async_request async_req; if (err == -EINPROGRESS) - return; + goto out; + + err = pkcs1pad_encrypt_sign_complete(req, err); - async_req.data = req->base.data; - async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req)); - async_req.flags = child_async_req->flags; - req->base.complete(&async_req, - pkcs1pad_encrypt_sign_complete(req, err)); +out: + akcipher_request_complete(req, err); } static int pkcs1pad_encrypt(struct akcipher_request *req) @@ -332,15 +330,14 @@ static void pkcs1pad_decrypt_complete_cb( struct crypto_async_request *child_async_req, int err) { struct akcipher_request *req = child_async_req->data; - struct crypto_async_request async_req; if (err == -EINPROGRESS) - return; + goto out; + + err = pkcs1pad_decrypt_complete(req, err); - async_req.data = req->base.data; - async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req)); - async_req.flags = child_async_req->flags; - req->base.complete(&async_req, pkcs1pad_decrypt_complete(req, err)); +out: + akcipher_request_complete(req, err); } static int pkcs1pad_decrypt(struct akcipher_request *req) @@ -512,15 +509,14 @@ static void pkcs1pad_verify_complete_cb( struct crypto_async_request *child_async_req, int err) { struct akcipher_request *req = child_async_req->data; - struct crypto_async_request async_req; if (err == -EINPROGRESS) - return; + goto out; - async_req.data = req->base.data; - async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req)); - async_req.flags = child_async_req->flags; - req->base.complete(&async_req, pkcs1pad_verify_complete(req, err)); + err = pkcs1pad_verify_complete(req, err); + +out: + akcipher_request_complete(req, err); } /* diff --git a/crypto/seqiv.c b/crypto/seqiv.c index 0899d527c2845b648bc1872daf5409ca1c4a8792..b1bcfe537daf11059aeef1c8aae7b04c2b05641a 100644 --- a/crypto/seqiv.c +++ b/crypto/seqiv.c @@ -23,7 +23,7 @@ static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err) struct aead_request *subreq = aead_request_ctx(req); struct crypto_aead *geniv; - if (err == -EINPROGRESS) + if (err == -EINPROGRESS || err == -EBUSY) return; if (err) diff --git a/crypto/xts.c b/crypto/xts.c index ad45b009774b1172eb9e7e34c3f2a14c1f6b65c4..c6a105dba38b946ecf8e26a6ff884d21b9ba2d60 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -202,12 +202,12 @@ static void xts_encrypt_done(struct crypto_async_request *areq, int err) if (!err) { struct xts_request_ctx *rctx = skcipher_request_ctx(req); - rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + rctx->subreq.base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; err = xts_xor_tweak_post(req, true); if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) { err = xts_cts_final(req, crypto_skcipher_encrypt); - if (err == -EINPROGRESS) + if (err == -EINPROGRESS || err == -EBUSY) return; } } @@ -222,12 +222,12 @@ static void xts_decrypt_done(struct crypto_async_request *areq, int err) if (!err) { struct xts_request_ctx *rctx = skcipher_request_ctx(req); - rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + rctx->subreq.base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; err = xts_xor_tweak_post(req, false); if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) { err = xts_cts_final(req, crypto_skcipher_decrypt); - if (err == -EINPROGRESS) + if (err == -EINPROGRESS || err == -EBUSY) return; } } diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile index 59700433a96e51599adf6306068b03bcb3dceec7..f919811156b1f54d72467e1f16396fcbdbe3f2e8 100644 --- a/drivers/acpi/acpica/Makefile +++ b/drivers/acpi/acpica/Makefile @@ -3,7 +3,7 @@ # Makefile for ACPICA Core interpreter # -ccflags-y := -Os -D_LINUX -DBUILDING_ACPICA +ccflags-y := -D_LINUX -DBUILDING_ACPICA ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT # use acpi.o to put all files here into acpi.o modparam namespace diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c index b2ca7dfd3fc927daee6111d3918dbf9b38e096b8..0cc4de3f71d510d42871e8476d094cf23ff03653 100644 --- a/drivers/acpi/acpica/hwvalid.c +++ b/drivers/acpi/acpica/hwvalid.c @@ -23,8 +23,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width); * * The table is used to implement the Microsoft port access rules that * first appeared in Windows XP. Some ports are always illegal, and some - * ports are only illegal if the BIOS calls _OSI with a win_XP string or - * later (meaning that the BIOS itelf is post-XP.) + * ports are only illegal if the BIOS calls _OSI with nothing newer than + * the specific _OSI strings. * * This provides ACPICA with the desired port protections and * Microsoft compatibility. @@ -145,7 +145,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width) /* Port illegality may depend on the _OSI calls made by the BIOS */ - if (acpi_gbl_osi_data >= port_info->osi_dependency) { + if (port_info->osi_dependency == ACPI_ALWAYS_ILLEGAL || + acpi_gbl_osi_data == port_info->osi_dependency) { ACPI_DEBUG_PRINT((ACPI_DB_VALUES, "Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)\n", ACPI_FORMAT_UINT64(address), diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c index 90db2d85e7f5c5c0b3b12e8e5dab9243f9ae6c30..f28d811a3724d58a2a777f364552830d849c1497 100644 --- a/drivers/acpi/acpica/nsrepair.c +++ b/drivers/acpi/acpica/nsrepair.c @@ -181,8 +181,9 @@ acpi_ns_simple_repair(struct acpi_evaluate_info *info, * Try to fix if there was no return object. Warning if failed to fix. */ if (!return_object) { - if (expected_btypes && (!(expected_btypes & ACPI_RTYPE_NONE))) { - if (package_index != ACPI_NOT_PACKAGE_ELEMENT) { + if (expected_btypes) { + if (!(expected_btypes & ACPI_RTYPE_NONE) && + package_index != ACPI_NOT_PACKAGE_ELEMENT) { ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname, ACPI_WARN_ALWAYS, @@ -196,14 +197,15 @@ acpi_ns_simple_repair(struct acpi_evaluate_info *info, if (ACPI_SUCCESS(status)) { return (AE_OK); /* Repair was successful */ } - } else { + } + + if (expected_btypes != ACPI_RTYPE_NONE) { ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname, ACPI_WARN_ALWAYS, "Missing expected return value")); + return (AE_AML_NO_RETURN_VALUE); } - - return (AE_AML_NO_RETURN_VALUE); } } diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index be743d177bcbf8ddc5089d6079c651550fb40179..8b43efe97da5d3a19ba080acd39cf2bd00b0275a 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -454,7 +454,7 @@ static int extract_package(struct acpi_battery *battery, u8 *ptr = (u8 *)battery + offsets[i].offset; if (element->type == ACPI_TYPE_STRING || element->type == ACPI_TYPE_BUFFER) - strncpy(ptr, element->string.pointer, 32); + strscpy(ptr, element->string.pointer, 32); else if (element->type == ACPI_TYPE_INTEGER) { strncpy(ptr, (u8 *)&element->integer.value, sizeof(u64)); diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 99e23a5df0267447f9b4eed07eacdfb724f8b04a..2306abb09f7f5e0536453e04f2f15fb3360144e3 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -3687,8 +3687,8 @@ void acpi_nfit_shutdown(void *data) mutex_lock(&acpi_desc->init_mutex); set_bit(ARS_CANCEL, &acpi_desc->scrub_flags); - cancel_delayed_work_sync(&acpi_desc->dwork); mutex_unlock(&acpi_desc->init_mutex); + cancel_delayed_work_sync(&acpi_desc->dwork); /* * Bounce the nvdimm bus lock to make sure any in-flight diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index e5dd87ddc6b346b0bf03ab1a4eda357fd202e0ab..59781e765e0e2dfe039c5030219f9f088f3a3564 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -536,10 +536,27 @@ static void wait_for_freeze(void) /* No delay is needed if we are in guest */ if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) return; + /* + * Modern (>=Nehalem) Intel systems use ACPI via intel_idle, + * not this code. Assume that any Intel systems using this + * are ancient and may need the dummy wait. This also assumes + * that the motivating chipset issue was Intel-only. + */ + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) + return; #endif - /* Dummy wait op - must do something useless after P_LVL2 read - because chipsets cannot guarantee that STPCLK# signal - gets asserted in time to freeze execution properly. */ + /* + * Dummy wait op - must do something useless after P_LVL2 read + * because chipsets cannot guarantee that STPCLK# signal gets + * asserted in time to freeze execution properly + * + * This workaround has been in place since the original ACPI + * implementation was merged, circa 2002. + * + * If a profile is pointing to this instruction, please first + * consider moving your system to a more modern idle + * mechanism. + */ inl(acpi_gbl_FADT.xpm_timer_block.address); } diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index b13713199ad94aaa3a64e854c6c1a52ec401aa68..038542b3a80a7499f3d6155003f7cd7ed7365789 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -313,7 +313,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = { .ident = "Lenovo Ideapad Z570", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_NAME, "102434U"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Ideapad Z570"), }, }, { diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index d13474c6d18187c616851ca19ab63e96ecbea896..14150767be4441193ec3dda5bf18519a608fc8ff 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -3051,7 +3051,7 @@ int sata_down_spd_limit(struct ata_link *link, u32 spd_limit) */ if (spd > 1) mask &= (1 << (spd - 1)) - 1; - else + else if (link->sata_spd) return -EINVAL; /* were we already at the bottom? */ diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c index 3bb7beb127a9609aeaa5f8450b4b2de26d7910a7..c157a912d673989a641ae3f901cf1ba8b9673d77 100644 --- a/drivers/base/test/test_async_driver_probe.c +++ b/drivers/base/test/test_async_driver_probe.c @@ -146,7 +146,7 @@ static int __init test_async_probe_init(void) calltime = ktime_get(); for_each_online_cpu(cpu) { nid = cpu_to_node(cpu); - pdev = &sync_dev[sync_id]; + pdev = &async_dev[async_id]; *pdev = test_platform_device_register_node("test_async_driver", async_id, diff --git a/drivers/block/brd.c b/drivers/block/brd.c index cc49a921339f778aaf948b52f92dd797677203d8..11078e1663683ad16a69d2e08fb9303867264d20 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -80,11 +80,9 @@ static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector) } /* - * Look up and return a brd's page for a given sector. - * If one does not exist, allocate an empty page, and insert that. Then - * return it. + * Insert a new page for a given sector, if one does not already exist. */ -static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) +static int brd_insert_page(struct brd_device *brd, sector_t sector) { pgoff_t idx; struct page *page; @@ -92,7 +90,7 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) page = brd_lookup_page(brd, sector); if (page) - return page; + return 0; /* * Must use NOIO because we don't want to recurse back into the @@ -101,11 +99,11 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) gfp_flags = GFP_NOIO | __GFP_ZERO | __GFP_HIGHMEM; page = alloc_page(gfp_flags); if (!page) - return NULL; + return -ENOMEM; if (radix_tree_preload(GFP_NOIO)) { __free_page(page); - return NULL; + return -ENOMEM; } spin_lock(&brd->brd_lock); @@ -120,8 +118,7 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) spin_unlock(&brd->brd_lock); radix_tree_preload_end(); - - return page; + return 0; } /* @@ -174,16 +171,17 @@ static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n) { unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; size_t copy; + int ret; copy = min_t(size_t, n, PAGE_SIZE - offset); - if (!brd_insert_page(brd, sector)) - return -ENOSPC; + ret = brd_insert_page(brd, sector); + if (ret) + return ret; if (copy < n) { sector += copy >> SECTOR_SHIFT; - if (!brd_insert_page(brd, sector)) - return -ENOSPC; + ret = brd_insert_page(brd, sector); } - return 0; + return ret; } /* diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 538a0764c0f7fcfb9cbc8c00fb6123137a1b55a1..38a2461ac2a6851f9d0c898a72baf747eb686341 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1029,13 +1029,13 @@ loop_set_status_from_info(struct loop_device *lo, if (err) return err; + /* Avoid assigning overflow values */ + if (info->lo_offset > LLONG_MAX || info->lo_sizelimit > LLONG_MAX) + return -EOVERFLOW; + lo->lo_offset = info->lo_offset; lo->lo_sizelimit = info->lo_sizelimit; - /* loff_t vars have been assigned __u64 */ - if (lo->lo_offset < 0 || lo->lo_sizelimit < 0) - return -EOVERFLOW; - memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE); memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE); lo->lo_file_name[LO_NAME_SIZE-1] = 0; diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index edfe29990bee62f4e7d5267fcad45268427806af..ecdf16bb130ee0c32f7b8ad9537cce59d37f7d01 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -1833,18 +1833,7 @@ static int nbd_dev_add(int index) refcount_set(&nbd->refs, 1); INIT_LIST_HEAD(&nbd->list); disk->major = NBD_MAJOR; - - /* - * Too big index can cause duplicate creation of sysfs files/links, - * because MKDEV() expect that the max first minor is MINORMASK, or - * index << part_shift can overflow. - */ disk->first_minor = index << part_shift; - if (disk->first_minor < index || disk->first_minor > MINORMASK) { - err = -EINVAL; - goto out_free_tags; - } - disk->fops = &nbd_fops; disk->private_data = nbd; sprintf(disk->disk_name, "nbd%d", index); @@ -1938,8 +1927,19 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info) if (!netlink_capable(skb, CAP_SYS_ADMIN)) return -EPERM; - if (info->attrs[NBD_ATTR_INDEX]) + if (info->attrs[NBD_ATTR_INDEX]) { index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]); + + /* + * Too big first_minor can cause duplicate creation of + * sysfs files/links, since index << part_shift might overflow, or + * MKDEV() expect that the max bits of first_minor is 20. + */ + if (index < 0 || index > MINORMASK >> part_shift) { + printk(KERN_ERR "nbd: illegal input index %d\n", index); + return -EINVAL; + } + } if (!info->attrs[NBD_ATTR_SOCKETS]) { printk(KERN_ERR "nbd: must specify at least one socket\n"); return -EINVAL; diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 340b1df365f72372ffa1827749eda9c155796313..932d4bb8e403534f2b061071101e18923d8ee32b 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -5369,8 +5369,7 @@ static void rbd_dev_release(struct device *dev) module_put(THIS_MODULE); } -static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc, - struct rbd_spec *spec) +static struct rbd_device *__rbd_dev_create(struct rbd_spec *spec) { struct rbd_device *rbd_dev; @@ -5415,9 +5414,6 @@ static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc, rbd_dev->dev.parent = &rbd_root_dev; device_initialize(&rbd_dev->dev); - rbd_dev->rbd_client = rbdc; - rbd_dev->spec = spec; - return rbd_dev; } @@ -5430,12 +5426,10 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc, { struct rbd_device *rbd_dev; - rbd_dev = __rbd_dev_create(rbdc, spec); + rbd_dev = __rbd_dev_create(spec); if (!rbd_dev) return NULL; - rbd_dev->opts = opts; - /* get an id and fill in device name */ rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0, minor_to_rbd_dev_id(1 << MINORBITS), @@ -5452,6 +5446,10 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc, /* we have a ref from do_rbd_add() */ __module_get(THIS_MODULE); + rbd_dev->rbd_client = rbdc; + rbd_dev->spec = spec; + rbd_dev->opts = opts; + dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id); return rbd_dev; @@ -6812,7 +6810,7 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth) goto out_err; } - parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec); + parent = __rbd_dev_create(rbd_dev->parent_spec); if (!parent) { ret = -ENOMEM; goto out_err; @@ -6822,8 +6820,8 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth) * Images related by parent/child relationships always share * rbd_client and spec/parent_spec, so bump their refcounts. */ - __rbd_get_client(rbd_dev->rbd_client); - rbd_spec_get(rbd_dev->parent_spec); + parent->rbd_client = __rbd_get_client(rbd_dev->rbd_client); + parent->spec = rbd_spec_get(rbd_dev->parent_spec); __set_bit(RBD_DEV_FLAG_READONLY, &parent->flags); diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 3d905fda9b29a4576564766b3ac06fb8eb6d50c2..2695ece47eb0e242d5999a1832b2f1b780816ad2 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -393,6 +393,10 @@ static const struct usb_device_id blacklist_table[] = { { USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01), .driver_info = BTUSB_IGNORE }, + /* Realtek 8821CE Bluetooth devices */ + { USB_DEVICE(0x13d3, 0x3529), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + /* Realtek 8822CE Bluetooth devices */ { USB_DEVICE(0x0bda, 0xb00c), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c index f8c29b888e6b4683170b27e7bf7cb76aa85cc1e9..98cbb18f17fa3971cb4a08057eddc302a9808444 100644 --- a/drivers/bus/sunxi-rsb.c +++ b/drivers/bus/sunxi-rsb.c @@ -781,7 +781,13 @@ static int __init sunxi_rsb_init(void) return ret; } - return platform_driver_register(&sunxi_rsb_driver); + ret = platform_driver_register(&sunxi_rsb_driver); + if (ret) { + bus_unregister(&sunxi_rsb_bus); + return ret; + } + + return 0; } module_init(sunxi_rsb_init); diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index 4771397495130533a2a15c90208809d6a4905da2..0f2bac24e564dccc917970d0dede2bd6df962bea 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -92,7 +92,7 @@ #define SSIF_WATCH_WATCHDOG_TIMEOUT msecs_to_jiffies(250) enum ssif_intf_state { - SSIF_NORMAL, + SSIF_IDLE, SSIF_GETTING_FLAGS, SSIF_GETTING_EVENTS, SSIF_CLEARING_FLAGS, @@ -100,8 +100,8 @@ enum ssif_intf_state { /* FIXME - add watchdog stuff. */ }; -#define SSIF_IDLE(ssif) ((ssif)->ssif_state == SSIF_NORMAL \ - && (ssif)->curr_msg == NULL) +#define IS_SSIF_IDLE(ssif) ((ssif)->ssif_state == SSIF_IDLE \ + && (ssif)->curr_msg == NULL) /* * Indexes into stats[] in ssif_info below. @@ -348,9 +348,9 @@ static void return_hosed_msg(struct ssif_info *ssif_info, /* * Must be called with the message lock held. This will release the - * message lock. Note that the caller will check SSIF_IDLE and start a - * new operation, so there is no need to check for new messages to - * start in here. + * message lock. Note that the caller will check IS_SSIF_IDLE and + * start a new operation, so there is no need to check for new + * messages to start in here. */ static void start_clear_flags(struct ssif_info *ssif_info, unsigned long *flags) { @@ -367,7 +367,7 @@ static void start_clear_flags(struct ssif_info *ssif_info, unsigned long *flags) if (start_send(ssif_info, msg, 3) != 0) { /* Error, just go to normal state. */ - ssif_info->ssif_state = SSIF_NORMAL; + ssif_info->ssif_state = SSIF_IDLE; } } @@ -382,7 +382,7 @@ static void start_flag_fetch(struct ssif_info *ssif_info, unsigned long *flags) mb[0] = (IPMI_NETFN_APP_REQUEST << 2); mb[1] = IPMI_GET_MSG_FLAGS_CMD; if (start_send(ssif_info, mb, 2) != 0) - ssif_info->ssif_state = SSIF_NORMAL; + ssif_info->ssif_state = SSIF_IDLE; } static void check_start_send(struct ssif_info *ssif_info, unsigned long *flags, @@ -393,7 +393,7 @@ static void check_start_send(struct ssif_info *ssif_info, unsigned long *flags, flags = ipmi_ssif_lock_cond(ssif_info, &oflags); ssif_info->curr_msg = NULL; - ssif_info->ssif_state = SSIF_NORMAL; + ssif_info->ssif_state = SSIF_IDLE; ipmi_ssif_unlock_cond(ssif_info, flags); ipmi_free_smi_msg(msg); } @@ -407,7 +407,7 @@ static void start_event_fetch(struct ssif_info *ssif_info, unsigned long *flags) msg = ipmi_alloc_smi_msg(); if (!msg) { - ssif_info->ssif_state = SSIF_NORMAL; + ssif_info->ssif_state = SSIF_IDLE; ipmi_ssif_unlock_cond(ssif_info, flags); return; } @@ -430,7 +430,7 @@ static void start_recv_msg_fetch(struct ssif_info *ssif_info, msg = ipmi_alloc_smi_msg(); if (!msg) { - ssif_info->ssif_state = SSIF_NORMAL; + ssif_info->ssif_state = SSIF_IDLE; ipmi_ssif_unlock_cond(ssif_info, flags); return; } @@ -448,9 +448,9 @@ static void start_recv_msg_fetch(struct ssif_info *ssif_info, /* * Must be called with the message lock held. This will release the - * message lock. Note that the caller will check SSIF_IDLE and start a - * new operation, so there is no need to check for new messages to - * start in here. + * message lock. Note that the caller will check IS_SSIF_IDLE and + * start a new operation, so there is no need to check for new + * messages to start in here. */ static void handle_flags(struct ssif_info *ssif_info, unsigned long *flags) { @@ -466,7 +466,7 @@ static void handle_flags(struct ssif_info *ssif_info, unsigned long *flags) /* Events available. */ start_event_fetch(ssif_info, flags); else { - ssif_info->ssif_state = SSIF_NORMAL; + ssif_info->ssif_state = SSIF_IDLE; ipmi_ssif_unlock_cond(ssif_info, flags); } } @@ -579,7 +579,7 @@ static void watch_timeout(struct timer_list *t) if (ssif_info->watch_timeout) { mod_timer(&ssif_info->watch_timer, jiffies + ssif_info->watch_timeout); - if (SSIF_IDLE(ssif_info)) { + if (IS_SSIF_IDLE(ssif_info)) { start_flag_fetch(ssif_info, flags); /* Releases lock */ return; } @@ -782,7 +782,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, } switch (ssif_info->ssif_state) { - case SSIF_NORMAL: + case SSIF_IDLE: ipmi_ssif_unlock_cond(ssif_info, flags); if (!msg) break; @@ -800,7 +800,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, * Error fetching flags, or invalid length, * just give up for now. */ - ssif_info->ssif_state = SSIF_NORMAL; + ssif_info->ssif_state = SSIF_IDLE; ipmi_ssif_unlock_cond(ssif_info, flags); dev_warn(&ssif_info->client->dev, "Error getting flags: %d %d, %x\n", @@ -835,7 +835,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, "Invalid response clearing flags: %x %x\n", data[0], data[1]); } - ssif_info->ssif_state = SSIF_NORMAL; + ssif_info->ssif_state = SSIF_IDLE; ipmi_ssif_unlock_cond(ssif_info, flags); break; @@ -913,7 +913,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, } flags = ipmi_ssif_lock_cond(ssif_info, &oflags); - if (SSIF_IDLE(ssif_info) && !ssif_info->stopping) { + if (IS_SSIF_IDLE(ssif_info) && !ssif_info->stopping) { if (ssif_info->req_events) start_event_fetch(ssif_info, flags); else if (ssif_info->req_flags) @@ -1087,7 +1087,7 @@ static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags) unsigned long oflags; restart: - if (!SSIF_IDLE(ssif_info)) { + if (!IS_SSIF_IDLE(ssif_info)) { ipmi_ssif_unlock_cond(ssif_info, flags); return; } @@ -1310,7 +1310,7 @@ static void shutdown_ssif(void *send_info) dev_set_drvdata(&ssif_info->client->dev, NULL); /* make sure the driver is not looking for flags any more. */ - while (ssif_info->ssif_state != SSIF_NORMAL) + while (ssif_info->ssif_state != SSIF_IDLE) schedule_timeout(1); ssif_info->stopping = true; @@ -1882,7 +1882,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) } spin_lock_init(&ssif_info->lock); - ssif_info->ssif_state = SSIF_NORMAL; + ssif_info->ssif_state = SSIF_IDLE; timer_setup(&ssif_info->retry_timer, retry_timeout, 0); timer_setup(&ssif_info->watch_timer, watch_timeout, 0); diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index 92eda5b2f1341ba46053a67c715b6ecc67be7516..883b4a3410122b84a143e6c73d517597663f5d7d 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c @@ -503,7 +503,7 @@ static void panic_halt_ipmi_heartbeat(void) msg.cmd = IPMI_WDOG_RESET_TIMER; msg.data = NULL; msg.data_len = 0; - atomic_add(1, &panic_done_count); + atomic_add(2, &panic_done_count); rv = ipmi_request_supply_msgs(watchdog_user, (struct ipmi_addr *) &addr, 0, @@ -513,7 +513,7 @@ static void panic_halt_ipmi_heartbeat(void) &panic_halt_heartbeat_recv_msg, 1); if (rv) - atomic_sub(1, &panic_done_count); + atomic_sub(2, &panic_done_count); } static struct ipmi_smi_msg panic_halt_smi_msg = { @@ -537,12 +537,12 @@ static void panic_halt_ipmi_set_timeout(void) /* Wait for the messages to be free. */ while (atomic_read(&panic_done_count) != 0) ipmi_poll_interface(watchdog_user); - atomic_add(1, &panic_done_count); + atomic_add(2, &panic_done_count); rv = __ipmi_set_timeout(&panic_halt_smi_msg, &panic_halt_recv_msg, &send_heartbeat_now); if (rv) { - atomic_sub(1, &panic_done_count); + atomic_sub(2, &panic_done_count); pr_warn("Unable to extend the watchdog timeout\n"); } else { if (send_heartbeat_now) diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c index 0913d3eb8d518e7658677d95326a698cadd093e4..cd266021d0103954b7966aa289240ed4451521a5 100644 --- a/drivers/char/tpm/eventlog/acpi.c +++ b/drivers/char/tpm/eventlog/acpi.c @@ -143,8 +143,12 @@ int tpm_read_log_acpi(struct tpm_chip *chip) ret = -EIO; virt = acpi_os_map_iomem(start, len); - if (!virt) + if (!virt) { + dev_warn(&chip->dev, "%s: Failed to map ACPI memory\n", __func__); + /* try EFI log next */ + ret = -ENODEV; goto err; + } memcpy_fromio(log->bios_event_log, virt, len); diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c index f9d5b7334341733f9de8271ee4642218243a1286..4fb4fd4b06bdade6729ef0f15f861bc0cd262930 100644 --- a/drivers/clk/clk-devres.c +++ b/drivers/clk/clk-devres.c @@ -4,42 +4,101 @@ #include #include +struct devm_clk_state { + struct clk *clk; + void (*exit)(struct clk *clk); +}; + static void devm_clk_release(struct device *dev, void *res) { - clk_put(*(struct clk **)res); + struct devm_clk_state *state = res; + + if (state->exit) + state->exit(state->clk); + + clk_put(state->clk); } -struct clk *devm_clk_get(struct device *dev, const char *id) +static struct clk *__devm_clk_get(struct device *dev, const char *id, + struct clk *(*get)(struct device *dev, const char *id), + int (*init)(struct clk *clk), + void (*exit)(struct clk *clk)) { - struct clk **ptr, *clk; + struct devm_clk_state *state; + struct clk *clk; + int ret; - ptr = devres_alloc(devm_clk_release, sizeof(*ptr), GFP_KERNEL); - if (!ptr) + state = devres_alloc(devm_clk_release, sizeof(*state), GFP_KERNEL); + if (!state) return ERR_PTR(-ENOMEM); - clk = clk_get(dev, id); - if (!IS_ERR(clk)) { - *ptr = clk; - devres_add(dev, ptr); - } else { - devres_free(ptr); + clk = get(dev, id); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + goto err_clk_get; } + if (init) { + ret = init(clk); + if (ret) + goto err_clk_init; + } + + state->clk = clk; + state->exit = exit; + + devres_add(dev, state); + return clk; + +err_clk_init: + + clk_put(clk); +err_clk_get: + + devres_free(state); + return ERR_PTR(ret); +} + +struct clk *devm_clk_get(struct device *dev, const char *id) +{ + return __devm_clk_get(dev, id, clk_get, NULL, NULL); } EXPORT_SYMBOL(devm_clk_get); -struct clk *devm_clk_get_optional(struct device *dev, const char *id) +struct clk *devm_clk_get_prepared(struct device *dev, const char *id) { - struct clk *clk = devm_clk_get(dev, id); + return __devm_clk_get(dev, id, clk_get, clk_prepare, clk_unprepare); +} +EXPORT_SYMBOL_GPL(devm_clk_get_prepared); - if (clk == ERR_PTR(-ENOENT)) - return NULL; +struct clk *devm_clk_get_enabled(struct device *dev, const char *id) +{ + return __devm_clk_get(dev, id, clk_get, + clk_prepare_enable, clk_disable_unprepare); +} +EXPORT_SYMBOL_GPL(devm_clk_get_enabled); - return clk; +struct clk *devm_clk_get_optional(struct device *dev, const char *id) +{ + return __devm_clk_get(dev, id, clk_get_optional, NULL, NULL); } EXPORT_SYMBOL(devm_clk_get_optional); +struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id) +{ + return __devm_clk_get(dev, id, clk_get_optional, + clk_prepare, clk_unprepare); +} +EXPORT_SYMBOL_GPL(devm_clk_get_optional_prepared); + +struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id) +{ + return __devm_clk_get(dev, id, clk_get_optional, + clk_prepare_enable, clk_disable_unprepare); +} +EXPORT_SYMBOL_GPL(devm_clk_get_optional_enabled); + struct clk_bulk_devres { struct clk_bulk_data *clks; int num_clks; diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index b355d3d40f63afe876c642c106e14010257e5705..3575afe16a574eb0388ef07569e72c8153fe0b04 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -251,6 +251,17 @@ static bool clk_core_is_enabled(struct clk_core *core) } } + /* + * This could be called with the enable lock held, or from atomic + * context. If the parent isn't enabled already, we can't do + * anything here. We can also assume this clock isn't enabled. + */ + if ((core->flags & CLK_OPS_PARENT_ENABLE) && core->parent) + if (!clk_core_is_enabled(core->parent)) { + ret = false; + goto done; + } + ret = core->ops->is_enabled(core->hw); done: if (core->rpm_enabled) diff --git a/drivers/clk/imx/clk.c b/drivers/clk/imx/clk.c index 7cc669934253a210e300b5e9d597091e9a6b1897..d4cf0c7045ab27072be04c447ad0984ee7611490 100644 --- a/drivers/clk/imx/clk.c +++ b/drivers/clk/imx/clk.c @@ -201,9 +201,10 @@ static int __init imx_clk_disable_uart(void) clk_disable_unprepare(imx_uart_clocks[i]); clk_put(imx_uart_clocks[i]); } - kfree(imx_uart_clocks); } + kfree(imx_uart_clocks); + return 0; } late_initcall_sync(imx_clk_disable_uart); diff --git a/drivers/clk/qcom/gcc-qcs404.c b/drivers/clk/qcom/gcc-qcs404.c index 46d314d69250584d715c5a28ce2ecf5dba043398..a7a9884799cd3c832fe2cc5e6c9630f72aa814d0 100644 --- a/drivers/clk/qcom/gcc-qcs404.c +++ b/drivers/clk/qcom/gcc-qcs404.c @@ -25,11 +25,9 @@ enum { P_CORE_BI_PLL_TEST_SE, P_DSI0_PHY_PLL_OUT_BYTECLK, P_DSI0_PHY_PLL_OUT_DSICLK, - P_GPLL0_OUT_AUX, P_GPLL0_OUT_MAIN, P_GPLL1_OUT_MAIN, P_GPLL3_OUT_MAIN, - P_GPLL4_OUT_AUX, P_GPLL4_OUT_MAIN, P_GPLL6_OUT_AUX, P_HDMI_PHY_PLL_CLK, @@ -109,28 +107,24 @@ static const char * const gcc_parent_names_4[] = { static const struct parent_map gcc_parent_map_5[] = { { P_XO, 0 }, { P_DSI0_PHY_PLL_OUT_BYTECLK, 1 }, - { P_GPLL0_OUT_AUX, 2 }, { P_CORE_BI_PLL_TEST_SE, 7 }, }; static const char * const gcc_parent_names_5[] = { "cxo", - "dsi0pll_byteclk_src", - "gpll0_out_aux", + "dsi0pllbyte", "core_bi_pll_test_se", }; static const struct parent_map gcc_parent_map_6[] = { { P_XO, 0 }, { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 }, - { P_GPLL0_OUT_AUX, 3 }, { P_CORE_BI_PLL_TEST_SE, 7 }, }; static const char * const gcc_parent_names_6[] = { "cxo", - "dsi0_phy_pll_out_byteclk", - "gpll0_out_aux", + "dsi0pllbyte", "core_bi_pll_test_se", }; @@ -139,7 +133,6 @@ static const struct parent_map gcc_parent_map_7[] = { { P_GPLL0_OUT_MAIN, 1 }, { P_GPLL3_OUT_MAIN, 2 }, { P_GPLL6_OUT_AUX, 3 }, - { P_GPLL4_OUT_AUX, 4 }, { P_CORE_BI_PLL_TEST_SE, 7 }, }; @@ -148,7 +141,6 @@ static const char * const gcc_parent_names_7[] = { "gpll0_out_main", "gpll3_out_main", "gpll6_out_aux", - "gpll4_out_aux", "core_bi_pll_test_se", }; @@ -175,7 +167,7 @@ static const struct parent_map gcc_parent_map_9[] = { static const char * const gcc_parent_names_9[] = { "cxo", "gpll0_out_main", - "dsi0_phy_pll_out_dsiclk", + "dsi0pll", "gpll6_out_aux", "core_bi_pll_test_se", }; @@ -207,14 +199,12 @@ static const char * const gcc_parent_names_11[] = { static const struct parent_map gcc_parent_map_12[] = { { P_XO, 0 }, { P_DSI0_PHY_PLL_OUT_DSICLK, 1 }, - { P_GPLL0_OUT_AUX, 2 }, { P_CORE_BI_PLL_TEST_SE, 7 }, }; static const char * const gcc_parent_names_12[] = { "cxo", - "dsi0pll_pclk_src", - "gpll0_out_aux", + "dsi0pll", "core_bi_pll_test_se", }; @@ -237,40 +227,34 @@ static const char * const gcc_parent_names_13[] = { static const struct parent_map gcc_parent_map_14[] = { { P_XO, 0 }, { P_GPLL0_OUT_MAIN, 1 }, - { P_GPLL4_OUT_AUX, 2 }, { P_CORE_BI_PLL_TEST_SE, 7 }, }; static const char * const gcc_parent_names_14[] = { "cxo", "gpll0_out_main", - "gpll4_out_aux", "core_bi_pll_test_se", }; static const struct parent_map gcc_parent_map_15[] = { { P_XO, 0 }, - { P_GPLL0_OUT_AUX, 2 }, { P_CORE_BI_PLL_TEST_SE, 7 }, }; static const char * const gcc_parent_names_15[] = { "cxo", - "gpll0_out_aux", "core_bi_pll_test_se", }; static const struct parent_map gcc_parent_map_16[] = { { P_XO, 0 }, { P_GPLL0_OUT_MAIN, 1 }, - { P_GPLL0_OUT_AUX, 2 }, { P_CORE_BI_PLL_TEST_SE, 7 }, }; static const char * const gcc_parent_names_16[] = { "cxo", "gpll0_out_main", - "gpll0_out_aux", "core_bi_pll_test_se", }; diff --git a/drivers/clk/qcom/gpucc-sc7180.c b/drivers/clk/qcom/gpucc-sc7180.c index 88a739b6fec3997c55ef0dcdbeb8c2fde6bce3b0..c51114e7e1e6629b5986f4857a53b398783aeeb0 100644 --- a/drivers/clk/qcom/gpucc-sc7180.c +++ b/drivers/clk/qcom/gpucc-sc7180.c @@ -21,8 +21,6 @@ #define CX_GMU_CBCR_SLEEP_SHIFT 4 #define CX_GMU_CBCR_WAKE_MASK 0xF #define CX_GMU_CBCR_WAKE_SHIFT 8 -#define CLK_DIS_WAIT_SHIFT 12 -#define CLK_DIS_WAIT_MASK (0xf << CLK_DIS_WAIT_SHIFT) enum { P_BI_TCXO, @@ -163,6 +161,7 @@ static struct clk_branch gpu_cc_cxo_clk = { static struct gdsc cx_gdsc = { .gdscr = 0x106c, .gds_hw_ctrl = 0x1540, + .clk_dis_wait_val = 8, .pd = { .name = "cx_gdsc", }, @@ -245,10 +244,6 @@ static int gpu_cc_sc7180_probe(struct platform_device *pdev) value = 0xF << CX_GMU_CBCR_WAKE_SHIFT | 0xF << CX_GMU_CBCR_SLEEP_SHIFT; regmap_update_bits(regmap, 0x1098, mask, value); - /* Configure clk_dis_wait for gpu_cx_gdsc */ - regmap_update_bits(regmap, 0x106c, CLK_DIS_WAIT_MASK, - 8 << CLK_DIS_WAIT_SHIFT); - return qcom_cc_really_probe(pdev, &gpu_cc_sc7180_desc, regmap); } diff --git a/drivers/clk/qcom/gpucc-sdm845.c b/drivers/clk/qcom/gpucc-sdm845.c index 5663698b306b9df623a940076bdebf20052693ae..658c6ac700e1ecdc54d09c59572d7abf129dfcb3 100644 --- a/drivers/clk/qcom/gpucc-sdm845.c +++ b/drivers/clk/qcom/gpucc-sdm845.c @@ -22,8 +22,6 @@ #define CX_GMU_CBCR_SLEEP_SHIFT 4 #define CX_GMU_CBCR_WAKE_MASK 0xf #define CX_GMU_CBCR_WAKE_SHIFT 8 -#define CLK_DIS_WAIT_SHIFT 12 -#define CLK_DIS_WAIT_MASK (0xf << CLK_DIS_WAIT_SHIFT) enum { P_BI_TCXO, @@ -124,6 +122,7 @@ static struct clk_branch gpu_cc_cxo_clk = { static struct gdsc gpu_cx_gdsc = { .gdscr = 0x106c, .gds_hw_ctrl = 0x1540, + .clk_dis_wait_val = 0x8, .pd = { .name = "gpu_cx_gdsc", }, @@ -196,10 +195,6 @@ static int gpu_cc_sdm845_probe(struct platform_device *pdev) value = 0xf << CX_GMU_CBCR_WAKE_SHIFT | 0xf << CX_GMU_CBCR_SLEEP_SHIFT; regmap_update_bits(regmap, 0x1098, mask, value); - /* Configure clk_dis_wait for gpu_cx_gdsc */ - regmap_update_bits(regmap, 0x106c, CLK_DIS_WAIT_MASK, - 8 << CLK_DIS_WAIT_SHIFT); - return qcom_cc_really_probe(pdev, &gpu_cc_sdm845_desc, regmap); } diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c index 94db8837033771a76fa63e3cfed7604062723713..a5a68e1e7549011ea9fba4cb3196fc9eb07f2af7 100644 --- a/drivers/clk/renesas/renesas-cpg-mssr.c +++ b/drivers/clk/renesas/renesas-cpg-mssr.c @@ -914,9 +914,8 @@ static int cpg_mssr_resume_noirq(struct device *dev) } if (!i) - dev_warn(dev, "Failed to enable %s%u[0x%x]\n", - priv->reg_layout == CLK_REG_LAYOUT_RZ_A ? - "STB" : "SMSTP", reg, oldval & mask); + dev_warn(dev, "Failed to enable SMSTP%u[0x%x]\n", reg, + oldval & mask); } return 0; @@ -960,7 +959,6 @@ static int __init cpg_mssr_common_init(struct device *dev, goto out_err; } - cpg_mssr_priv = priv; priv->num_core_clks = info->num_total_core_clks; priv->num_mod_clks = info->num_hw_mod_clks; priv->last_dt_core_clk = info->last_dt_core_clk; @@ -990,6 +988,8 @@ static int __init cpg_mssr_common_init(struct device *dev, if (error) goto out_err; + cpg_mssr_priv = priv; + return 0; out_err: diff --git a/drivers/clk/x86/Kconfig b/drivers/clk/x86/Kconfig index 69642e15fcc1fc85be71320100dc0f7f6f897bc3..ced99e082e3dd29a85ce7bfecc5d297abedbaf49 100644 --- a/drivers/clk/x86/Kconfig +++ b/drivers/clk/x86/Kconfig @@ -1,8 +1,9 @@ # SPDX-License-Identifier: GPL-2.0-only config CLK_LGM_CGU depends on OF && HAS_IOMEM && (X86 || COMPILE_TEST) + select MFD_SYSCON select OF_EARLY_FLATTREE bool "Clock driver for Lightning Mountain(LGM) platform" help - Clock Generation Unit(CGU) driver for Intel Lightning Mountain(LGM) - network processor SoC. + Clock Generation Unit(CGU) driver for MaxLinear's x86 based + Lightning Mountain(LGM) network processor SoC. diff --git a/drivers/clk/x86/clk-cgu-pll.c b/drivers/clk/x86/clk-cgu-pll.c index 3179557b5f784f5394bc196924251538a2cf601d..409dbf55f4caea49f2913a50550b368cfbbd118b 100644 --- a/drivers/clk/x86/clk-cgu-pll.c +++ b/drivers/clk/x86/clk-cgu-pll.c @@ -1,8 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 /* + * Copyright (C) 2020-2022 MaxLinear, Inc. * Copyright (C) 2020 Intel Corporation. - * Zhu YiXin - * Rahul Tanwar + * Zhu Yixin + * Rahul Tanwar */ #include @@ -40,13 +41,10 @@ static unsigned long lgm_pll_recalc_rate(struct clk_hw *hw, unsigned long prate) { struct lgm_clk_pll *pll = to_lgm_clk_pll(hw); unsigned int div, mult, frac; - unsigned long flags; - spin_lock_irqsave(&pll->lock, flags); mult = lgm_get_clk_val(pll->membase, PLL_REF_DIV(pll->reg), 0, 12); div = lgm_get_clk_val(pll->membase, PLL_REF_DIV(pll->reg), 18, 6); frac = lgm_get_clk_val(pll->membase, pll->reg, 2, 24); - spin_unlock_irqrestore(&pll->lock, flags); if (pll->type == TYPE_LJPLL) div *= 4; @@ -57,12 +55,9 @@ static unsigned long lgm_pll_recalc_rate(struct clk_hw *hw, unsigned long prate) static int lgm_pll_is_enabled(struct clk_hw *hw) { struct lgm_clk_pll *pll = to_lgm_clk_pll(hw); - unsigned long flags; unsigned int ret; - spin_lock_irqsave(&pll->lock, flags); ret = lgm_get_clk_val(pll->membase, pll->reg, 0, 1); - spin_unlock_irqrestore(&pll->lock, flags); return ret; } @@ -70,15 +65,13 @@ static int lgm_pll_is_enabled(struct clk_hw *hw) static int lgm_pll_enable(struct clk_hw *hw) { struct lgm_clk_pll *pll = to_lgm_clk_pll(hw); - unsigned long flags; u32 val; int ret; - spin_lock_irqsave(&pll->lock, flags); lgm_set_clk_val(pll->membase, pll->reg, 0, 1, 1); - ret = readl_poll_timeout_atomic(pll->membase + pll->reg, - val, (val & 0x1), 1, 100); - spin_unlock_irqrestore(&pll->lock, flags); + ret = regmap_read_poll_timeout_atomic(pll->membase, pll->reg, + val, (val & 0x1), 1, 100); + return ret; } @@ -86,11 +79,8 @@ static int lgm_pll_enable(struct clk_hw *hw) static void lgm_pll_disable(struct clk_hw *hw) { struct lgm_clk_pll *pll = to_lgm_clk_pll(hw); - unsigned long flags; - spin_lock_irqsave(&pll->lock, flags); lgm_set_clk_val(pll->membase, pll->reg, 0, 1, 0); - spin_unlock_irqrestore(&pll->lock, flags); } static const struct clk_ops lgm_pll_ops = { @@ -121,7 +111,6 @@ lgm_clk_register_pll(struct lgm_clk_provider *ctx, return ERR_PTR(-ENOMEM); pll->membase = ctx->membase; - pll->lock = ctx->lock; pll->reg = list->reg; pll->flags = list->flags; pll->type = list->type; diff --git a/drivers/clk/x86/clk-cgu.c b/drivers/clk/x86/clk-cgu.c index 33de600e0c38e4664a0fa667978284c1fee76d8e..89b53f280aee03b7838e4656eae3571f00eaa263 100644 --- a/drivers/clk/x86/clk-cgu.c +++ b/drivers/clk/x86/clk-cgu.c @@ -1,8 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 /* + * Copyright (C) 2020-2022 MaxLinear, Inc. * Copyright (C) 2020 Intel Corporation. - * Zhu YiXin - * Rahul Tanwar + * Zhu Yixin + * Rahul Tanwar */ #include #include @@ -24,14 +25,10 @@ static struct clk_hw *lgm_clk_register_fixed(struct lgm_clk_provider *ctx, const struct lgm_clk_branch *list) { - unsigned long flags; - if (list->div_flags & CLOCK_FLAG_VAL_INIT) { - spin_lock_irqsave(&ctx->lock, flags); + if (list->div_flags & CLOCK_FLAG_VAL_INIT) lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift, list->div_width, list->div_val); - spin_unlock_irqrestore(&ctx->lock, flags); - } return clk_hw_register_fixed_rate(NULL, list->name, list->parent_data[0].name, @@ -41,33 +38,27 @@ static struct clk_hw *lgm_clk_register_fixed(struct lgm_clk_provider *ctx, static u8 lgm_clk_mux_get_parent(struct clk_hw *hw) { struct lgm_clk_mux *mux = to_lgm_clk_mux(hw); - unsigned long flags; u32 val; - spin_lock_irqsave(&mux->lock, flags); if (mux->flags & MUX_CLK_SW) val = mux->reg; else val = lgm_get_clk_val(mux->membase, mux->reg, mux->shift, mux->width); - spin_unlock_irqrestore(&mux->lock, flags); return clk_mux_val_to_index(hw, NULL, mux->flags, val); } static int lgm_clk_mux_set_parent(struct clk_hw *hw, u8 index) { struct lgm_clk_mux *mux = to_lgm_clk_mux(hw); - unsigned long flags; u32 val; val = clk_mux_index_to_val(NULL, mux->flags, index); - spin_lock_irqsave(&mux->lock, flags); if (mux->flags & MUX_CLK_SW) mux->reg = val; else lgm_set_clk_val(mux->membase, mux->reg, mux->shift, mux->width, val); - spin_unlock_irqrestore(&mux->lock, flags); return 0; } @@ -90,7 +81,7 @@ static struct clk_hw * lgm_clk_register_mux(struct lgm_clk_provider *ctx, const struct lgm_clk_branch *list) { - unsigned long flags, cflags = list->mux_flags; + unsigned long cflags = list->mux_flags; struct device *dev = ctx->dev; u8 shift = list->mux_shift; u8 width = list->mux_width; @@ -111,7 +102,6 @@ lgm_clk_register_mux(struct lgm_clk_provider *ctx, init.num_parents = list->num_parents; mux->membase = ctx->membase; - mux->lock = ctx->lock; mux->reg = reg; mux->shift = shift; mux->width = width; @@ -123,11 +113,8 @@ lgm_clk_register_mux(struct lgm_clk_provider *ctx, if (ret) return ERR_PTR(ret); - if (cflags & CLOCK_FLAG_VAL_INIT) { - spin_lock_irqsave(&mux->lock, flags); + if (cflags & CLOCK_FLAG_VAL_INIT) lgm_set_clk_val(mux->membase, reg, shift, width, list->mux_val); - spin_unlock_irqrestore(&mux->lock, flags); - } return hw; } @@ -136,13 +123,10 @@ static unsigned long lgm_clk_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct lgm_clk_divider *divider = to_lgm_clk_divider(hw); - unsigned long flags; unsigned int val; - spin_lock_irqsave(÷r->lock, flags); val = lgm_get_clk_val(divider->membase, divider->reg, divider->shift, divider->width); - spin_unlock_irqrestore(÷r->lock, flags); return divider_recalc_rate(hw, parent_rate, val, divider->table, divider->flags, divider->width); @@ -163,7 +147,6 @@ lgm_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long prate) { struct lgm_clk_divider *divider = to_lgm_clk_divider(hw); - unsigned long flags; int value; value = divider_get_val(rate, prate, divider->table, @@ -171,10 +154,8 @@ lgm_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, if (value < 0) return value; - spin_lock_irqsave(÷r->lock, flags); lgm_set_clk_val(divider->membase, divider->reg, divider->shift, divider->width, value); - spin_unlock_irqrestore(÷r->lock, flags); return 0; } @@ -182,12 +163,10 @@ lgm_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, static int lgm_clk_divider_enable_disable(struct clk_hw *hw, int enable) { struct lgm_clk_divider *div = to_lgm_clk_divider(hw); - unsigned long flags; - spin_lock_irqsave(&div->lock, flags); - lgm_set_clk_val(div->membase, div->reg, div->shift_gate, - div->width_gate, enable); - spin_unlock_irqrestore(&div->lock, flags); + if (div->flags != DIV_CLK_NO_MASK) + lgm_set_clk_val(div->membase, div->reg, div->shift_gate, + div->width_gate, enable); return 0; } @@ -213,7 +192,7 @@ static struct clk_hw * lgm_clk_register_divider(struct lgm_clk_provider *ctx, const struct lgm_clk_branch *list) { - unsigned long flags, cflags = list->div_flags; + unsigned long cflags = list->div_flags; struct device *dev = ctx->dev; struct lgm_clk_divider *div; struct clk_init_data init = {}; @@ -236,7 +215,6 @@ lgm_clk_register_divider(struct lgm_clk_provider *ctx, init.num_parents = 1; div->membase = ctx->membase; - div->lock = ctx->lock; div->reg = reg; div->shift = shift; div->width = width; @@ -251,11 +229,8 @@ lgm_clk_register_divider(struct lgm_clk_provider *ctx, if (ret) return ERR_PTR(ret); - if (cflags & CLOCK_FLAG_VAL_INIT) { - spin_lock_irqsave(&div->lock, flags); + if (cflags & CLOCK_FLAG_VAL_INIT) lgm_set_clk_val(div->membase, reg, shift, width, list->div_val); - spin_unlock_irqrestore(&div->lock, flags); - } return hw; } @@ -264,7 +239,6 @@ static struct clk_hw * lgm_clk_register_fixed_factor(struct lgm_clk_provider *ctx, const struct lgm_clk_branch *list) { - unsigned long flags; struct clk_hw *hw; hw = clk_hw_register_fixed_factor(ctx->dev, list->name, @@ -273,12 +247,9 @@ lgm_clk_register_fixed_factor(struct lgm_clk_provider *ctx, if (IS_ERR(hw)) return ERR_CAST(hw); - if (list->div_flags & CLOCK_FLAG_VAL_INIT) { - spin_lock_irqsave(&ctx->lock, flags); + if (list->div_flags & CLOCK_FLAG_VAL_INIT) lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift, list->div_width, list->div_val); - spin_unlock_irqrestore(&ctx->lock, flags); - } return hw; } @@ -286,13 +257,10 @@ lgm_clk_register_fixed_factor(struct lgm_clk_provider *ctx, static int lgm_clk_gate_enable(struct clk_hw *hw) { struct lgm_clk_gate *gate = to_lgm_clk_gate(hw); - unsigned long flags; unsigned int reg; - spin_lock_irqsave(&gate->lock, flags); reg = GATE_HW_REG_EN(gate->reg); lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1); - spin_unlock_irqrestore(&gate->lock, flags); return 0; } @@ -300,25 +268,19 @@ static int lgm_clk_gate_enable(struct clk_hw *hw) static void lgm_clk_gate_disable(struct clk_hw *hw) { struct lgm_clk_gate *gate = to_lgm_clk_gate(hw); - unsigned long flags; unsigned int reg; - spin_lock_irqsave(&gate->lock, flags); reg = GATE_HW_REG_DIS(gate->reg); lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1); - spin_unlock_irqrestore(&gate->lock, flags); } static int lgm_clk_gate_is_enabled(struct clk_hw *hw) { struct lgm_clk_gate *gate = to_lgm_clk_gate(hw); unsigned int reg, ret; - unsigned long flags; - spin_lock_irqsave(&gate->lock, flags); reg = GATE_HW_REG_STAT(gate->reg); ret = lgm_get_clk_val(gate->membase, reg, gate->shift, 1); - spin_unlock_irqrestore(&gate->lock, flags); return ret; } @@ -333,7 +295,7 @@ static struct clk_hw * lgm_clk_register_gate(struct lgm_clk_provider *ctx, const struct lgm_clk_branch *list) { - unsigned long flags, cflags = list->gate_flags; + unsigned long cflags = list->gate_flags; const char *pname = list->parent_data[0].name; struct device *dev = ctx->dev; u8 shift = list->gate_shift; @@ -354,7 +316,6 @@ lgm_clk_register_gate(struct lgm_clk_provider *ctx, init.num_parents = pname ? 1 : 0; gate->membase = ctx->membase; - gate->lock = ctx->lock; gate->reg = reg; gate->shift = shift; gate->flags = cflags; @@ -366,9 +327,7 @@ lgm_clk_register_gate(struct lgm_clk_provider *ctx, return ERR_PTR(ret); if (cflags & CLOCK_FLAG_VAL_INIT) { - spin_lock_irqsave(&gate->lock, flags); lgm_set_clk_val(gate->membase, reg, shift, 1, list->gate_val); - spin_unlock_irqrestore(&gate->lock, flags); } return hw; @@ -396,8 +355,22 @@ int lgm_clk_register_branches(struct lgm_clk_provider *ctx, hw = lgm_clk_register_fixed_factor(ctx, list); break; case CLK_TYPE_GATE: - hw = lgm_clk_register_gate(ctx, list); + if (list->gate_flags & GATE_CLK_HW) { + hw = lgm_clk_register_gate(ctx, list); + } else { + /* + * GATE_CLKs can be controlled either from + * CGU clk driver i.e. this driver or directly + * from power management driver/daemon. It is + * dependent on the power policy/profile requirements + * of the end product. To override control of gate + * clks from this driver, provide NULL for this index + * of gate clk provider. + */ + hw = NULL; + } break; + default: dev_err(ctx->dev, "invalid clk type\n"); return -EINVAL; @@ -443,24 +416,18 @@ lgm_clk_ddiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) static int lgm_clk_ddiv_enable(struct clk_hw *hw) { struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); - unsigned long flags; - spin_lock_irqsave(&ddiv->lock, flags); lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate, ddiv->width_gate, 1); - spin_unlock_irqrestore(&ddiv->lock, flags); return 0; } static void lgm_clk_ddiv_disable(struct clk_hw *hw) { struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); - unsigned long flags; - spin_lock_irqsave(&ddiv->lock, flags); lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate, ddiv->width_gate, 0); - spin_unlock_irqrestore(&ddiv->lock, flags); } static int @@ -497,32 +464,25 @@ lgm_clk_ddiv_set_rate(struct clk_hw *hw, unsigned long rate, { struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); u32 div, ddiv1, ddiv2; - unsigned long flags; div = DIV_ROUND_CLOSEST_ULL((u64)prate, rate); - spin_lock_irqsave(&ddiv->lock, flags); if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) { div = DIV_ROUND_CLOSEST_ULL((u64)div, 5); div = div * 2; } - if (div <= 0) { - spin_unlock_irqrestore(&ddiv->lock, flags); + if (div <= 0) return -EINVAL; - } - if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2)) { - spin_unlock_irqrestore(&ddiv->lock, flags); + if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2)) return -EINVAL; - } lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift0, ddiv->width0, ddiv1 - 1); lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift1, ddiv->width1, ddiv2 - 1); - spin_unlock_irqrestore(&ddiv->lock, flags); return 0; } @@ -533,18 +493,15 @@ lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate, { struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); u32 div, ddiv1, ddiv2; - unsigned long flags; u64 rate64; div = DIV_ROUND_CLOSEST_ULL((u64)*prate, rate); /* if predivide bit is enabled, modify div by factor of 2.5 */ - spin_lock_irqsave(&ddiv->lock, flags); if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) { div = div * 2; div = DIV_ROUND_CLOSEST_ULL((u64)div, 5); } - spin_unlock_irqrestore(&ddiv->lock, flags); if (div <= 0) return *prate; @@ -558,12 +515,10 @@ lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate, do_div(rate64, ddiv2); /* if predivide bit is enabled, modify rounded rate by factor of 2.5 */ - spin_lock_irqsave(&ddiv->lock, flags); if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) { rate64 = rate64 * 2; rate64 = DIV_ROUND_CLOSEST_ULL(rate64, 5); } - spin_unlock_irqrestore(&ddiv->lock, flags); return rate64; } @@ -600,7 +555,6 @@ int lgm_clk_register_ddiv(struct lgm_clk_provider *ctx, init.num_parents = 1; ddiv->membase = ctx->membase; - ddiv->lock = ctx->lock; ddiv->reg = list->reg; ddiv->shift0 = list->shift0; ddiv->width0 = list->width0; diff --git a/drivers/clk/x86/clk-cgu.h b/drivers/clk/x86/clk-cgu.h index 4e22bfb2231289c1e95c47ce6701e5ab8c12200d..bcaf8aec94e5d3d166d7fd6149c4e97ca039847c 100644 --- a/drivers/clk/x86/clk-cgu.h +++ b/drivers/clk/x86/clk-cgu.h @@ -1,28 +1,28 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright(c) 2020 Intel Corporation. - * Zhu YiXin - * Rahul Tanwar + * Copyright (C) 2020-2022 MaxLinear, Inc. + * Copyright (C) 2020 Intel Corporation. + * Zhu Yixin + * Rahul Tanwar */ #ifndef __CLK_CGU_H #define __CLK_CGU_H -#include +#include struct lgm_clk_mux { struct clk_hw hw; - void __iomem *membase; + struct regmap *membase; unsigned int reg; u8 shift; u8 width; unsigned long flags; - spinlock_t lock; }; struct lgm_clk_divider { struct clk_hw hw; - void __iomem *membase; + struct regmap *membase; unsigned int reg; u8 shift; u8 width; @@ -30,12 +30,11 @@ struct lgm_clk_divider { u8 width_gate; unsigned long flags; const struct clk_div_table *table; - spinlock_t lock; }; struct lgm_clk_ddiv { struct clk_hw hw; - void __iomem *membase; + struct regmap *membase; unsigned int reg; u8 shift0; u8 width0; @@ -48,16 +47,14 @@ struct lgm_clk_ddiv { unsigned int mult; unsigned int div; unsigned long flags; - spinlock_t lock; }; struct lgm_clk_gate { struct clk_hw hw; - void __iomem *membase; + struct regmap *membase; unsigned int reg; u8 shift; unsigned long flags; - spinlock_t lock; }; enum lgm_clk_type { @@ -77,11 +74,10 @@ enum lgm_clk_type { * @clk_data: array of hw clocks and clk number. */ struct lgm_clk_provider { - void __iomem *membase; + struct regmap *membase; struct device_node *np; struct device *dev; struct clk_hw_onecell_data clk_data; - spinlock_t lock; }; enum pll_type { @@ -92,11 +88,10 @@ enum pll_type { struct lgm_clk_pll { struct clk_hw hw; - void __iomem *membase; + struct regmap *membase; unsigned int reg; unsigned long flags; enum pll_type type; - spinlock_t lock; }; /** @@ -202,6 +197,8 @@ struct lgm_clk_branch { /* clock flags definition */ #define CLOCK_FLAG_VAL_INIT BIT(16) #define MUX_CLK_SW BIT(17) +#define GATE_CLK_HW BIT(18) +#define DIV_CLK_NO_MASK BIT(19) #define LGM_MUX(_id, _name, _pdata, _f, _reg, \ _shift, _width, _cf, _v) \ @@ -300,29 +297,32 @@ struct lgm_clk_branch { .div = _d, \ } -static inline void lgm_set_clk_val(void __iomem *membase, u32 reg, +static inline void lgm_set_clk_val(struct regmap *membase, u32 reg, u8 shift, u8 width, u32 set_val) { u32 mask = (GENMASK(width - 1, 0) << shift); - u32 regval; - regval = readl(membase + reg); - regval = (regval & ~mask) | ((set_val << shift) & mask); - writel(regval, membase + reg); + regmap_update_bits(membase, reg, mask, set_val << shift); } -static inline u32 lgm_get_clk_val(void __iomem *membase, u32 reg, +static inline u32 lgm_get_clk_val(struct regmap *membase, u32 reg, u8 shift, u8 width) { u32 mask = (GENMASK(width - 1, 0) << shift); u32 val; - val = readl(membase + reg); + if (regmap_read(membase, reg, &val)) { + WARN_ONCE(1, "Failed to read clk reg: 0x%x\n", reg); + return 0; + } + val = (val & mask) >> shift; return val; } + + int lgm_clk_register_branches(struct lgm_clk_provider *ctx, const struct lgm_clk_branch *list, unsigned int nr_clk); diff --git a/drivers/clk/x86/clk-lgm.c b/drivers/clk/x86/clk-lgm.c index 020f4e83a5ccb498df19993571a36012b8ef0e61..f69455dd1c9802a04b4aa2a902e0fed946494a20 100644 --- a/drivers/clk/x86/clk-lgm.c +++ b/drivers/clk/x86/clk-lgm.c @@ -1,10 +1,12 @@ // SPDX-License-Identifier: GPL-2.0 /* + * Copyright (C) 2020-2022 MaxLinear, Inc. * Copyright (C) 2020 Intel Corporation. - * Zhu YiXin - * Rahul Tanwar + * Zhu Yixin + * Rahul Tanwar */ #include +#include #include #include #include @@ -253,8 +255,8 @@ static const struct lgm_clk_branch lgm_branch_clks[] = { LGM_FIXED(LGM_CLK_SLIC, "slic", NULL, 0, CGU_IF_CLK1, 8, 2, CLOCK_FLAG_VAL_INIT, 8192000, 2), LGM_FIXED(LGM_CLK_DOCSIS, "v_docsis", NULL, 0, 0, 0, 0, 0, 16000000, 0), - LGM_DIV(LGM_CLK_DCL, "dcl", "v_ifclk", 0, CGU_PCMCR, - 25, 3, 0, 0, 0, 0, dcl_div), + LGM_DIV(LGM_CLK_DCL, "dcl", "v_ifclk", CLK_SET_RATE_PARENT, CGU_PCMCR, + 25, 3, 0, 0, DIV_CLK_NO_MASK, 0, dcl_div), LGM_MUX(LGM_CLK_PCM, "pcm", pcm_p, 0, CGU_C55_PCMCR, 0, 1, CLK_MUX_ROUND_CLOSEST, 0), LGM_FIXED_FACTOR(LGM_CLK_DDR_PHY, "ddr_phy", "ddr", @@ -433,13 +435,15 @@ static int lgm_cgu_probe(struct platform_device *pdev) ctx->clk_data.num = CLK_NR_CLKS; - ctx->membase = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(ctx->membase)) + ctx->membase = syscon_node_to_regmap(np); + if (IS_ERR(ctx->membase)) { + dev_err(dev, "Failed to get clk CGU iomem\n"); return PTR_ERR(ctx->membase); + } + ctx->np = np; ctx->dev = dev; - spin_lock_init(&ctx->lock); ret = lgm_clk_register_plls(ctx, lgm_pll_clks, ARRAY_SIZE(lgm_pll_clks)); diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c index 2de7fd18f66a1802e80e027564d6386f57c855f8..f0be8a43ec4965ecbdeac6011273593fee5be7b0 100644 --- a/drivers/cpufreq/armada-37xx-cpufreq.c +++ b/drivers/cpufreq/armada-37xx-cpufreq.c @@ -443,7 +443,7 @@ static int __init armada37xx_cpufreq_driver_init(void) return -ENODEV; } - clk = clk_get(cpu_dev, 0); + clk = clk_get(cpu_dev, NULL); if (IS_ERR(clk)) { dev_err(cpu_dev, "Cannot get clock for CPU0\n"); return PTR_ERR(clk); diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index a3734014db4778ca32783bfbc4f7ba68b6d35d2e..aea285651fbafc171aa45a20519ddcbff64da43a 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -130,6 +130,7 @@ static const struct of_device_id blacklist[] __initconst = { { .compatible = "nvidia,tegra30", }, { .compatible = "nvidia,tegra124", }, { .compatible = "nvidia,tegra210", }, + { .compatible = "nvidia,tegra234", }, { .compatible = "qcom,apq8096", }, { .compatible = "qcom,msm8996", }, diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 2e3690f65786d0d9509a344e0d8b3377a8a8e99b..6d05ac0c051340cd4318517adf568ec083d11b10 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c @@ -522,7 +522,6 @@ static void crypto4xx_cipher_done(struct crypto4xx_device *dev, { struct skcipher_request *req; struct scatterlist *dst; - dma_addr_t addr; req = skcipher_request_cast(pd_uinfo->async_req); @@ -531,8 +530,8 @@ static void crypto4xx_cipher_done(struct crypto4xx_device *dev, req->cryptlen, req->dst); } else { dst = pd_uinfo->dest_va; - addr = dma_map_page(dev->core_dev->device, sg_page(dst), - dst->offset, dst->length, DMA_FROM_DEVICE); + dma_unmap_page(dev->core_dev->device, pd->dest, dst->length, + DMA_FROM_DEVICE); } if (pd_uinfo->sa_va->sa_command_0.bf.save_iv == SA_SAVE_IV) { @@ -557,10 +556,9 @@ static void crypto4xx_ahash_done(struct crypto4xx_device *dev, struct ahash_request *ahash_req; ahash_req = ahash_request_cast(pd_uinfo->async_req); - ctx = crypto_tfm_ctx(ahash_req->base.tfm); + ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(ahash_req)); - crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo, - crypto_tfm_ctx(ahash_req->base.tfm)); + crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo, ctx); crypto4xx_ret_sg_desc(dev, pd_uinfo); if (pd_uinfo->state & PD_ENTRY_BUSY) diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c index b9299defb431d5cf1a98e855c66c5ca02fc174b7..e416456b2b8aa615aec80c04f7fbbf5e13712cf7 100644 --- a/drivers/crypto/ccp/ccp-dmaengine.c +++ b/drivers/crypto/ccp/ccp-dmaengine.c @@ -643,14 +643,26 @@ static void ccp_dma_release(struct ccp_device *ccp) chan = ccp->ccp_dma_chan + i; dma_chan = &chan->dma_chan; - if (dma_chan->client_count) - dma_release_channel(dma_chan); - tasklet_kill(&chan->cleanup_tasklet); list_del_rcu(&dma_chan->device_node); } } +static void ccp_dma_release_channels(struct ccp_device *ccp) +{ + struct ccp_dma_chan *chan; + struct dma_chan *dma_chan; + unsigned int i; + + for (i = 0; i < ccp->cmd_q_count; i++) { + chan = ccp->ccp_dma_chan + i; + dma_chan = &chan->dma_chan; + + if (dma_chan->client_count) + dma_release_channel(dma_chan); + } +} + int ccp_dmaengine_register(struct ccp_device *ccp) { struct ccp_dma_chan *chan; @@ -771,8 +783,9 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp) if (!dmaengine) return; - ccp_dma_release(ccp); + ccp_dma_release_channels(ccp); dma_async_device_unregister(dma_dev); + ccp_dma_release(ccp); kmem_cache_destroy(ccp->dma_desc_cache); kmem_cache_destroy(ccp->dma_cmd_cache); diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index ed39a22e1b2b978c0d8af2f028307f0366658571..856d867f46ebb7f6bba10a61e9062163d76a7219 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -23,6 +23,7 @@ #include #include +#include #include "psp-dev.h" #include "sev-dev.h" @@ -138,6 +139,17 @@ static int sev_cmd_buffer_len(int cmd) return 0; } +static void *sev_fw_alloc(unsigned long len) +{ + struct page *page; + + page = alloc_pages(GFP_KERNEL, get_order(len)); + if (!page) + return NULL; + + return page_address(page); +} + static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) { struct psp_device *psp = psp_master; @@ -304,15 +316,14 @@ static int sev_platform_shutdown(int *error) static int sev_get_platform_state(int *state, int *error) { - struct sev_device *sev = psp_master->sev_data; + struct sev_user_data_status data; int rc; - rc = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, - &sev->status_cmd_buf, error); + rc = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, error); if (rc) return rc; - *state = sev->status_cmd_buf.state; + *state = data.state; return rc; } @@ -350,15 +361,16 @@ static int sev_ioctl_do_reset(struct sev_issue_cmd *argp, bool writable) static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp) { - struct sev_device *sev = psp_master->sev_data; - struct sev_user_data_status *data = &sev->status_cmd_buf; + struct sev_user_data_status data; int ret; - ret = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, data, &argp->error); + memset(&data, 0, sizeof(data)); + + ret = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, &argp->error); if (ret) return ret; - if (copy_to_user((void __user *)argp->data, data, sizeof(*data))) + if (copy_to_user((void __user *)argp->data, &data, sizeof(data))) ret = -EFAULT; return ret; @@ -385,7 +397,7 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable) { struct sev_device *sev = psp_master->sev_data; struct sev_user_data_pek_csr input; - struct sev_data_pek_csr *data; + struct sev_data_pek_csr data; void __user *input_address; void *blob = NULL; int ret; @@ -396,9 +408,7 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable) if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) return -EFAULT; - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) - return -ENOMEM; + memset(&data, 0, sizeof(data)); /* userspace wants to query CSR length */ if (!input.address || !input.length) @@ -406,19 +416,15 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable) /* allocate a physically contiguous buffer to store the CSR blob */ input_address = (void __user *)input.address; - if (input.length > SEV_FW_BLOB_MAX_SIZE) { - ret = -EFAULT; - goto e_free; - } + if (input.length > SEV_FW_BLOB_MAX_SIZE) + return -EFAULT; - blob = kmalloc(input.length, GFP_KERNEL); - if (!blob) { - ret = -ENOMEM; - goto e_free; - } + blob = kzalloc(input.length, GFP_KERNEL); + if (!blob) + return -ENOMEM; - data->address = __psp_pa(blob); - data->len = input.length; + data.address = __psp_pa(blob); + data.len = input.length; cmd: if (sev->state == SEV_STATE_UNINIT) { @@ -427,10 +433,10 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable) goto e_free_blob; } - ret = __sev_do_cmd_locked(SEV_CMD_PEK_CSR, data, &argp->error); + ret = __sev_do_cmd_locked(SEV_CMD_PEK_CSR, &data, &argp->error); /* If we query the CSR length, FW responded with expected data. */ - input.length = data->len; + input.length = data.len; if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) { ret = -EFAULT; @@ -444,8 +450,6 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable) e_free_blob: kfree(blob); -e_free: - kfree(data); return ret; } @@ -465,21 +469,20 @@ EXPORT_SYMBOL_GPL(psp_copy_user_blob); static int sev_get_api_version(void) { struct sev_device *sev = psp_master->sev_data; - struct sev_user_data_status *status; + struct sev_user_data_status status; int error = 0, ret; - status = &sev->status_cmd_buf; - ret = sev_platform_status(status, &error); + ret = sev_platform_status(&status, &error); if (ret) { dev_err(sev->dev, "SEV: failed to get status. Error: %#x\n", error); return 1; } - sev->api_major = status->api_major; - sev->api_minor = status->api_minor; - sev->build = status->build; - sev->state = status->state; + sev->api_major = status.api_major; + sev->api_minor = status.api_minor; + sev->build = status.build; + sev->state = status.state; return 0; } @@ -577,7 +580,7 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable) { struct sev_device *sev = psp_master->sev_data; struct sev_user_data_pek_cert_import input; - struct sev_data_pek_cert_import *data; + struct sev_data_pek_cert_import data; void *pek_blob, *oca_blob; int ret; @@ -587,19 +590,14 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable) if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) return -EFAULT; - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) - return -ENOMEM; - /* copy PEK certificate blobs from userspace */ pek_blob = psp_copy_user_blob(input.pek_cert_address, input.pek_cert_len); - if (IS_ERR(pek_blob)) { - ret = PTR_ERR(pek_blob); - goto e_free; - } + if (IS_ERR(pek_blob)) + return PTR_ERR(pek_blob); - data->pek_cert_address = __psp_pa(pek_blob); - data->pek_cert_len = input.pek_cert_len; + data.reserved = 0; + data.pek_cert_address = __psp_pa(pek_blob); + data.pek_cert_len = input.pek_cert_len; /* copy PEK certificate blobs from userspace */ oca_blob = psp_copy_user_blob(input.oca_cert_address, input.oca_cert_len); @@ -608,8 +606,8 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable) goto e_free_pek; } - data->oca_cert_address = __psp_pa(oca_blob); - data->oca_cert_len = input.oca_cert_len; + data.oca_cert_address = __psp_pa(oca_blob); + data.oca_cert_len = input.oca_cert_len; /* If platform is not in INIT state then transition it to INIT */ if (sev->state != SEV_STATE_INIT) { @@ -618,21 +616,19 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable) goto e_free_oca; } - ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, data, &argp->error); + ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, &data, &argp->error); e_free_oca: kfree(oca_blob); e_free_pek: kfree(pek_blob); -e_free: - kfree(data); return ret; } static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp) { struct sev_user_data_get_id2 input; - struct sev_data_get_id *data; + struct sev_data_get_id data; void __user *input_address; void *id_blob = NULL; int ret; @@ -646,28 +642,32 @@ static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp) input_address = (void __user *)input.address; - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) - return -ENOMEM; - if (input.address && input.length) { - id_blob = kmalloc(input.length, GFP_KERNEL); - if (!id_blob) { - kfree(data); + /* + * The length of the ID shouldn't be assumed by software since + * it may change in the future. The allocation size is limited + * to 1 << (PAGE_SHIFT + MAX_ORDER - 1) by the page allocator. + * If the allocation fails, simply return ENOMEM rather than + * warning in the kernel log. + */ + id_blob = kzalloc(input.length, GFP_KERNEL | __GFP_NOWARN); + if (!id_blob) return -ENOMEM; - } - data->address = __psp_pa(id_blob); - data->len = input.length; + data.address = __psp_pa(id_blob); + data.len = input.length; + } else { + data.address = 0; + data.len = 0; } - ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error); + ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, &data, &argp->error); /* * Firmware will return the length of the ID value (either the minimum * required length or the actual length written), return it to the user. */ - input.length = data->len; + input.length = data.len; if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) { ret = -EFAULT; @@ -675,7 +675,7 @@ static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp) } if (id_blob) { - if (copy_to_user(input_address, id_blob, data->len)) { + if (copy_to_user(input_address, id_blob, data.len)) { ret = -EFAULT; goto e_free; } @@ -683,7 +683,6 @@ static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp) e_free: kfree(id_blob); - kfree(data); return ret; } @@ -733,7 +732,7 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable) struct sev_device *sev = psp_master->sev_data; struct sev_user_data_pdh_cert_export input; void *pdh_blob = NULL, *cert_blob = NULL; - struct sev_data_pdh_cert_export *data; + struct sev_data_pdh_cert_export data; void __user *input_cert_chain_address; void __user *input_pdh_cert_address; int ret; @@ -751,9 +750,7 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable) if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) return -EFAULT; - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) - return -ENOMEM; + memset(&data, 0, sizeof(data)); /* Userspace wants to query the certificate length. */ if (!input.pdh_cert_address || @@ -765,41 +762,35 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable) input_cert_chain_address = (void __user *)input.cert_chain_address; /* Allocate a physically contiguous buffer to store the PDH blob. */ - if (input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) { - ret = -EFAULT; - goto e_free; - } + if (input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) + return -EFAULT; /* Allocate a physically contiguous buffer to store the cert chain blob. */ - if (input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) { - ret = -EFAULT; - goto e_free; - } + if (input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) + return -EFAULT; - pdh_blob = kmalloc(input.pdh_cert_len, GFP_KERNEL); - if (!pdh_blob) { - ret = -ENOMEM; - goto e_free; - } + pdh_blob = kzalloc(input.pdh_cert_len, GFP_KERNEL); + if (!pdh_blob) + return -ENOMEM; - data->pdh_cert_address = __psp_pa(pdh_blob); - data->pdh_cert_len = input.pdh_cert_len; + data.pdh_cert_address = __psp_pa(pdh_blob); + data.pdh_cert_len = input.pdh_cert_len; - cert_blob = kmalloc(input.cert_chain_len, GFP_KERNEL); + cert_blob = kzalloc(input.cert_chain_len, GFP_KERNEL); if (!cert_blob) { ret = -ENOMEM; goto e_free_pdh; } - data->cert_chain_address = __psp_pa(cert_blob); - data->cert_chain_len = input.cert_chain_len; + data.cert_chain_address = __psp_pa(cert_blob); + data.cert_chain_len = input.cert_chain_len; cmd: - ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, data, &argp->error); + ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, &data, &argp->error); /* If we query the length, FW responded with expected data. */ - input.cert_chain_len = data->cert_chain_len; - input.pdh_cert_len = data->pdh_cert_len; + input.cert_chain_len = data.cert_chain_len; + input.pdh_cert_len = data.pdh_cert_len; if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) { ret = -EFAULT; @@ -824,8 +815,6 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable) kfree(cert_blob); e_free_pdh: kfree(pdh_blob); -e_free: - kfree(data); return ret; } @@ -1063,7 +1052,6 @@ EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user); void sev_pci_init(void) { struct sev_device *sev = psp_master->sev_data; - struct page *tmr_page; int error, rc; if (!sev) @@ -1079,14 +1067,13 @@ void sev_pci_init(void) sev_get_api_version(); /* Obtain the TMR memory area for SEV-ES use */ - tmr_page = alloc_pages(GFP_KERNEL, get_order(SEV_ES_TMR_SIZE)); - if (tmr_page) { - sev_es_tmr = page_address(tmr_page); - } else { - sev_es_tmr = NULL; + sev_es_tmr = sev_fw_alloc(SEV_ES_TMR_SIZE); + if (sev_es_tmr) + /* Must flush the cache before giving it to the firmware */ + clflush_cache_range(sev_es_tmr, SEV_ES_TMR_SIZE); + else dev_warn(sev->dev, "SEV: TMR allocation failed, SEV-ES support unavailable\n"); - } /* Initialize the platform */ rc = sev_platform_init(&error); diff --git a/drivers/crypto/ccp/sev-dev.h b/drivers/crypto/ccp/sev-dev.h index dd5c4fe82914c7341673c95b2b2800c7bf71c008..3b0cd0f854df9bfa08d8f61f4c6b0be22f600730 100644 --- a/drivers/crypto/ccp/sev-dev.h +++ b/drivers/crypto/ccp/sev-dev.h @@ -46,7 +46,6 @@ struct sev_device { unsigned int int_rcvd; wait_queue_head_t int_queue; struct sev_misc_dev *misc; - struct sev_user_data_status status_cmd_buf; struct sev_data_init init_cmd_buf; u8 api_major; diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c index 725a739800b0ad46c59d1a3ca24254bc3de3cba1..ce77826c7fb0514fa40c46fa72bae3d6d71f6c2b 100644 --- a/drivers/crypto/hisilicon/sgl.c +++ b/drivers/crypto/hisilicon/sgl.c @@ -113,9 +113,8 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev, for (j = 0; j < i; j++) { dma_free_coherent(dev, block_size, block[j].sgl, block[j].sgl_dma); - memset(block + j, 0, sizeof(*block)); } - kfree(pool); + kfree_sensitive(pool); return ERR_PTR(-ENOMEM); } EXPORT_SYMBOL_GPL(hisi_acc_create_sgl_pool); diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c index c1d379bd7af330741b1065603df4ba1d1d15d946..a02777c93c07bcbcecc3cb5a95ee8227efe52528 100644 --- a/drivers/dax/bus.c +++ b/drivers/dax/bus.c @@ -398,8 +398,8 @@ static void unregister_dev_dax(void *dev) dev_dbg(dev, "%s\n", __func__); kill_dev_dax(dev_dax); - free_dev_dax_ranges(dev_dax); device_del(dev); + free_dev_dax_ranges(dev_dax); put_device(dev); } diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c index b4368c5b6a0cc9df10cff1a2b6145e0a72bfdced..27d669f8b5f3acd8a2ef40ed7ca6ddfd02980849 100644 --- a/drivers/dax/kmem.c +++ b/drivers/dax/kmem.c @@ -114,7 +114,7 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax) if (rc) { dev_warn(dev, "mapping%d: %#llx-%#llx memory add failed\n", i, range.start, range.end); - release_resource(res); + remove_resource(res); kfree(res); data->res[i] = NULL; if (mapped) @@ -159,7 +159,7 @@ static int dev_dax_kmem_remove(struct dev_dax *dev_dax) rc = remove_memory(dev_dax->target_node, range.start, range_len(&range)); if (rc == 0) { - release_resource(data->res[i]); + remove_resource(data->res[i]); kfree(data->res[i]); data->res[i] = NULL; success++; diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index af3ee288bc11793c6b155865336f7aff288be585..4ec7bb58c195fc1408acb9210bab5cf9b9a7f34d 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -451,7 +451,8 @@ static int dma_chan_get(struct dma_chan *chan) /* The channel is already in use, update client count */ if (chan->client_count) { __module_get(owner); - goto out; + chan->client_count++; + return 0; } if (!try_module_get(owner)) @@ -470,11 +471,11 @@ static int dma_chan_get(struct dma_chan *chan) goto err_out; } + chan->client_count++; + if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) balance_ref_count(chan); -out: - chan->client_count++; return 0; err_out: diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 2283dcd8bf91d142046c58cf8b1aeaa29e537bdf..6514db824473cbb1b7f5fc3c68673e24b26e09ce 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -1363,10 +1363,12 @@ static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac, sdma_config_ownership(sdmac, false, true, false); if (sdma_load_context(sdmac)) - goto err_desc_out; + goto err_bd_out; return desc; +err_bd_out: + sdma_free_bd(desc); err_desc_out: kfree(desc); err_out: diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index e76adc31ab66f39214ad89f2e1a919976bac3964..12ad4bb3c5f282ca9025ec8b226fcb4db7765441 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -3119,8 +3119,10 @@ static int xilinx_dma_probe(struct platform_device *pdev) /* Initialize the channels */ for_each_child_of_node(node, child) { err = xilinx_dma_child_probe(xdev, child); - if (err < 0) + if (err < 0) { + of_node_put(child); goto error; + } } if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c index 8220ce5b87ca06616e4102ead54802948723699c..85c229985f905a5575f4859fc218779ee02fc491 100644 --- a/drivers/edac/edac_device.c +++ b/drivers/edac/edac_device.c @@ -34,6 +34,9 @@ static DEFINE_MUTEX(device_ctls_mutex); static LIST_HEAD(edac_device_list); +/* Default workqueue processing interval on this instance, in msecs */ +#define DEFAULT_POLL_INTERVAL 1000 + #ifdef CONFIG_EDAC_DEBUG static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev) { @@ -366,7 +369,7 @@ static void edac_device_workq_function(struct work_struct *work_req) * whole one second to save timers firing all over the period * between integral seconds */ - if (edac_dev->poll_msec == 1000) + if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL) edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay)); else edac_queue_work(&edac_dev->work, edac_dev->delay); @@ -396,7 +399,7 @@ static void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev, * timers firing on sub-second basis, while they are happy * to fire together on the 1 second exactly */ - if (edac_dev->poll_msec == 1000) + if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL) edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay)); else edac_queue_work(&edac_dev->work, edac_dev->delay); @@ -430,7 +433,7 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev, edac_dev->delay = msecs_to_jiffies(msec); /* See comment in edac_device_workq_setup() above */ - if (edac_dev->poll_msec == 1000) + if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL) edac_mod_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay)); else edac_mod_work(&edac_dev->work, edac_dev->delay); @@ -472,11 +475,7 @@ int edac_device_add_device(struct edac_device_ctl_info *edac_dev) /* This instance is NOW RUNNING */ edac_dev->op_state = OP_RUNNING_POLL; - /* - * enable workq processing on this instance, - * default = 1000 msec - */ - edac_device_workq_setup(edac_dev, 1000); + edac_device_workq_setup(edac_dev, edac_dev->poll_msec ?: DEFAULT_POLL_INTERVAL); } else { edac_dev->op_state = OP_RUNNING_INTERRUPT; } diff --git a/drivers/edac/highbank_mc_edac.c b/drivers/edac/highbank_mc_edac.c index 61b76ec226af11dcefb8a8c91585295d37fcc610..19fba258ae108876773bd9563ff4f21038e170be 100644 --- a/drivers/edac/highbank_mc_edac.c +++ b/drivers/edac/highbank_mc_edac.c @@ -174,8 +174,10 @@ static int highbank_mc_probe(struct platform_device *pdev) drvdata = mci->pvt_info; platform_set_drvdata(pdev, mci); - if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) - return -ENOMEM; + if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) { + res = -ENOMEM; + goto free; + } r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) { @@ -243,6 +245,7 @@ static int highbank_mc_probe(struct platform_device *pdev) edac_mc_del_mc(&pdev->dev); err: devres_release_group(&pdev->dev, NULL); +free: edac_mc_free(mci); return res; } diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c index 97a27e42dd610db0d3cf656357bf7421e57b56ff..c45519f59dc11b5d4954a04390c321d28b57d114 100644 --- a/drivers/edac/qcom_edac.c +++ b/drivers/edac/qcom_edac.c @@ -252,7 +252,7 @@ dump_syn_reg_values(struct llcc_drv_data *drv, u32 bank, int err_type) static int dump_syn_reg(struct edac_device_ctl_info *edev_ctl, int err_type, u32 bank) { - struct llcc_drv_data *drv = edev_ctl->pvt_info; + struct llcc_drv_data *drv = edev_ctl->dev->platform_data; int ret; ret = dump_syn_reg_values(drv, bank, err_type); @@ -289,7 +289,7 @@ static irqreturn_t llcc_ecc_irq_handler(int irq, void *edev_ctl) { struct edac_device_ctl_info *edac_dev_ctl = edev_ctl; - struct llcc_drv_data *drv = edac_dev_ctl->pvt_info; + struct llcc_drv_data *drv = edac_dev_ctl->dev->platform_data; irqreturn_t irq_rc = IRQ_NONE; u32 drp_error, trp_error, i; int ret; @@ -358,7 +358,6 @@ static int qcom_llcc_edac_probe(struct platform_device *pdev) edev_ctl->dev_name = dev_name(dev); edev_ctl->ctl_name = "llcc"; edev_ctl->panic_on_ue = LLCC_ERP_PANIC_ON_UE; - edev_ctl->pvt_info = llcc_driv_data; rc = edac_device_add_device(edev_ctl); if (rc) diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index b0cc3f1e9bb00bd08f1d3030b9ebed5b181d0785..16ea847ade5fd86462a8aaf374fea0b54cc1481f 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -818,8 +818,10 @@ static int ioctl_send_response(struct client *client, union ioctl_arg *arg) r = container_of(resource, struct inbound_transaction_resource, resource); - if (is_fcp_request(r->request)) + if (is_fcp_request(r->request)) { + kfree(r->data); goto out; + } if (a->length != fw_get_response_length(r->request)) { ret = -EINVAL; diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c index 0e3eaea5d85262964f1d6c7304b6deeaed890d3e..56a1f61aa3ff2b84aa1b69f200a120ad50221d85 100644 --- a/drivers/firmware/arm_scmi/shmem.c +++ b/drivers/firmware/arm_scmi/shmem.c @@ -58,10 +58,11 @@ u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem) void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem, struct scmi_xfer *xfer) { + size_t len = ioread32(&shmem->length); + xfer->hdr.status = ioread32(shmem->msg_payload); /* Skip the length of header and status in shmem area i.e 8 bytes */ - xfer->rx.len = min_t(size_t, xfer->rx.len, - ioread32(&shmem->length) - 8); + xfer->rx.len = min_t(size_t, xfer->rx.len, len > 8 ? len - 8 : 0); /* Take a copy to the rx buffer.. */ memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len); @@ -70,8 +71,10 @@ void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem, void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem, size_t max_len, struct scmi_xfer *xfer) { + size_t len = ioread32(&shmem->length); + /* Skip only the length of header in shmem area i.e 4 bytes */ - xfer->rx.len = min_t(size_t, max_len, ioread32(&shmem->length) - 4); + xfer->rx.len = min_t(size_t, max_len, len > 4 ? len - 4 : 0); /* Take a copy to the rx buffer.. */ memcpy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len); diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index a2765d668856e0e8de33fc7334d8038553b900b1..332739f3eded570b0f4baa74d8fa5b343cef2a2e 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -950,6 +950,8 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) /* first try to find a slot in an existing linked list entry */ for (prsv = efi_memreserve_root->next; prsv; ) { rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB); + if (!rsv) + return -ENOMEM; index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size); if (index < rsv->size) { rsv->entry[index].base = addr; diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c index 0a9aba5f9ceff0bff6c4507382afe487378afcd1..f178b2984dfb2a1c7d8bf4d4e68299b1de8f4ae7 100644 --- a/drivers/firmware/efi/memattr.c +++ b/drivers/firmware/efi/memattr.c @@ -33,7 +33,7 @@ int __init efi_memattr_init(void) return -ENOMEM; } - if (tbl->version > 1) { + if (tbl->version > 2) { pr_warn("Unexpected EFI Memory Attributes table version %d\n", tbl->version); goto unmap; diff --git a/drivers/firmware/google/framebuffer-coreboot.c b/drivers/firmware/google/framebuffer-coreboot.c index 916f26adc5955b8c021e607001558ec4028146b1..922c079d13c8a6e6b40c31e5881fd0cd2771583f 100644 --- a/drivers/firmware/google/framebuffer-coreboot.c +++ b/drivers/firmware/google/framebuffer-coreboot.c @@ -43,9 +43,7 @@ static int framebuffer_probe(struct coreboot_device *dev) fb->green_mask_pos == formats[i].green.offset && fb->green_mask_size == formats[i].green.length && fb->blue_mask_pos == formats[i].blue.offset && - fb->blue_mask_size == formats[i].blue.length && - fb->reserved_mask_pos == formats[i].transp.offset && - fb->reserved_mask_size == formats[i].transp.length) + fb->blue_mask_size == formats[i].blue.length) pdata.format = formats[i].name; } if (!pdata.format) diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c index 9e34bbbce26e2afbf66eda52743888fb299269c7..7a8269b52723d71f34a5a5c8c25a0a4609f8a5b3 100644 --- a/drivers/fpga/stratix10-soc.c +++ b/drivers/fpga/stratix10-soc.c @@ -213,9 +213,9 @@ static int s10_ops_write_init(struct fpga_manager *mgr, /* Allocate buffers from the service layer's pool. */ for (i = 0; i < NUM_SVC_BUFS; i++) { kbuf = stratix10_svc_allocate_memory(priv->chan, SVC_BUF_SIZE); - if (!kbuf) { + if (IS_ERR(kbuf)) { s10_free_buffers(mgr); - ret = -ENOMEM; + ret = PTR_ERR(kbuf); goto init_done; } diff --git a/drivers/fsi/fsi-sbefifo.c b/drivers/fsi/fsi-sbefifo.c index 84cb965bfed5c92061ee825d1cd9acf89ccfad0c..97045a8d9422456020846d74fc51b19c5c61d310 100644 --- a/drivers/fsi/fsi-sbefifo.c +++ b/drivers/fsi/fsi-sbefifo.c @@ -640,7 +640,7 @@ static void sbefifo_collect_async_ffdc(struct sbefifo *sbefifo) } ffdc_iov.iov_base = ffdc; ffdc_iov.iov_len = SBEFIFO_MAX_FFDC_SIZE; - iov_iter_kvec(&ffdc_iter, WRITE, &ffdc_iov, 1, SBEFIFO_MAX_FFDC_SIZE); + iov_iter_kvec(&ffdc_iter, READ, &ffdc_iov, 1, SBEFIFO_MAX_FFDC_SIZE); cmd[0] = cpu_to_be32(2); cmd[1] = cpu_to_be32(SBEFIFO_CMD_GET_SBE_FFDC); rc = sbefifo_do_command(sbefifo, cmd, 2, &ffdc_iter); @@ -737,7 +737,7 @@ int sbefifo_submit(struct device *dev, const __be32 *command, size_t cmd_len, rbytes = (*resp_len) * sizeof(__be32); resp_iov.iov_base = response; resp_iov.iov_len = rbytes; - iov_iter_kvec(&resp_iter, WRITE, &resp_iov, 1, rbytes); + iov_iter_kvec(&resp_iter, READ, &resp_iov, 1, rbytes); /* Perform the command */ mutex_lock(&sbefifo->lock); @@ -817,7 +817,7 @@ static ssize_t sbefifo_user_read(struct file *file, char __user *buf, /* Prepare iov iterator */ resp_iov.iov_base = buf; resp_iov.iov_len = len; - iov_iter_init(&resp_iter, WRITE, &resp_iov, 1, len); + iov_iter_init(&resp_iter, READ, &resp_iov, 1, len); /* Perform the command */ mutex_lock(&sbefifo->lock); diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c index ba6ed2a413f51c128f7d04b41f31da5f5a467cc0..0d5a9fee3c70a467f88cbc2548e5d5fd0aa5d7fa 100644 --- a/drivers/gpio/gpio-mxc.c +++ b/drivers/gpio/gpio-mxc.c @@ -231,7 +231,7 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type) writel(1 << gpio_idx, port->base + GPIO_ISR); - return 0; + return port->gc.direction_input(&port->gc, gpio_idx); } static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio) diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c index 1ae612c796eef04626c98593c8a9d96a93a9d5cb..396a687e020f5fd99857825c6989d30e7c23674b 100644 --- a/drivers/gpio/gpio-vf610.c +++ b/drivers/gpio/gpio-vf610.c @@ -304,7 +304,7 @@ static int vf610_gpio_probe(struct platform_device *pdev) gc = &port->gc; gc->of_node = np; gc->parent = dev; - gc->label = "vf610-gpio"; + gc->label = dev_name(dev); gc->ngpio = VF610_GPIO_PER_PORT; gc->base = of_alias_get_id(np, "gpio") * VF610_GPIO_PER_PORT; diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 7212b9900e0abafdf8386235789ec6f25167384d..994e6635b8347b8aecec9c8ecf91a7be1dc95410 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -382,8 +382,9 @@ static int soc15_read_register(struct amdgpu_device *adev, u32 se_num, *value = 0; for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) { en = &soc15_allowed_read_registers[i]; - if (adev->reg_offset[en->hwip][en->inst] && - reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] + if (!adev->reg_offset[en->hwip][en->inst]) + continue; + else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset)) continue; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index fbe15f4b75fd52c87fdcd9359c992b3a0c8739f6..dbdf0e210522c3e3ab330852a45b302550d93157 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2051,12 +2051,14 @@ static int dm_resume(void *handle) drm_for_each_connector_iter(connector, &iter) { aconnector = to_amdgpu_dm_connector(connector); + if (!aconnector->dc_link) + continue; + /* * this is the case when traversing through already created * MST connectors, should be skipped */ - if (aconnector->dc_link && - aconnector->dc_link->type == dc_connection_mst_branch) + if (aconnector->dc_link->type == dc_connection_mst_branch) continue; mutex_lock(&aconnector->hpd_lock); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 99887bcfada0430b3941521ee85e5d40c39cc558..7e0a55aa2b180cd685fbfe12737bb7be86782687 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -616,6 +616,7 @@ static bool dc_construct_ctx(struct dc *dc, dc_ctx->perf_trace = dc_perf_trace_create(); if (!dc_ctx->perf_trace) { + kfree(dc_ctx); ASSERT_CRITICAL(false); return false; } diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c index b3f0476899d32ff2a65da4a74e9225ce65df4f52..14e7a59a9cd13ad254265d6661d3c6f58282ed6e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c @@ -3897,14 +3897,14 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2 * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); - locals->ODMCombineEnablePerState[i][k] = false; + locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled; mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; if (mode_lib->vba.ODMCapability) { if (locals->PlaneRequiredDISPCLKWithoutODMCombine > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) { - locals->ODMCombineEnablePerState[i][k] = true; + locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1; mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; } else if (locals->HActive[k] > DCN20_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) { - locals->ODMCombineEnablePerState[i][k] = true; + locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1; mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; } } @@ -3957,7 +3957,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l locals->RequiredDISPCLK[i][j] = 0.0; locals->DISPCLK_DPPCLK_Support[i][j] = true; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { - locals->ODMCombineEnablePerState[i][k] = false; + locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled; if (locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]) { locals->NoOfDPP[i][j][k] = 1; locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k] diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c index 1bcda7eba4a6fc74309f334f24889fee143c5f7d..ee1c80366bd65619937d9335c52a983da80773ee 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c @@ -3974,17 +3974,17 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2 * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); - locals->ODMCombineEnablePerState[i][k] = false; + locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled; mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; if (mode_lib->vba.ODMCapability) { if (locals->PlaneRequiredDISPCLKWithoutODMCombine > MaxMaxDispclkRoundedDown) { - locals->ODMCombineEnablePerState[i][k] = true; + locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1; mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; } else if (locals->DSCEnabled[k] && (locals->HActive[k] > DCN20_MAX_DSC_IMAGE_WIDTH)) { - locals->ODMCombineEnablePerState[i][k] = true; + locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1; mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; } else if (locals->HActive[k] > DCN20_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) { - locals->ODMCombineEnablePerState[i][k] = true; + locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1; mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; } } @@ -4037,7 +4037,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode locals->RequiredDISPCLK[i][j] = 0.0; locals->DISPCLK_DPPCLK_Support[i][j] = true; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { - locals->ODMCombineEnablePerState[i][k] = false; + locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled; if (locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]) { locals->NoOfDPP[i][j][k] = 1; locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k] diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c index c09bca3350687fc9493cd951b7ac00646edea048..25693e62db805f455142beb89f8f435621ec1c67 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c @@ -3975,17 +3975,17 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2 * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); - locals->ODMCombineEnablePerState[i][k] = false; + locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled; mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; if (mode_lib->vba.ODMCapability) { if (locals->PlaneRequiredDISPCLKWithoutODMCombine > MaxMaxDispclkRoundedDown) { - locals->ODMCombineEnablePerState[i][k] = true; + locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1; mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; } else if (locals->DSCEnabled[k] && (locals->HActive[k] > DCN21_MAX_DSC_IMAGE_WIDTH)) { - locals->ODMCombineEnablePerState[i][k] = true; + locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1; mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; } else if (locals->HActive[k] > DCN21_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) { - locals->ODMCombineEnablePerState[i][k] = true; + locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1; mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; } } @@ -4038,7 +4038,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l locals->RequiredDISPCLK[i][j] = 0.0; locals->DISPCLK_DPPCLK_Support[i][j] = true; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { - locals->ODMCombineEnablePerState[i][k] = false; + locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled; if (locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]) { locals->NoOfDPP[i][j][k] = 1; locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k] @@ -5213,7 +5213,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.ODMCombineEnabled[k] = locals->ODMCombineEnablePerState[mode_lib->vba.VoltageLevel][k]; } else { - mode_lib->vba.ODMCombineEnabled[k] = false; + mode_lib->vba.ODMCombineEnabled[k] = dm_odm_combine_mode_disabled; } mode_lib->vba.DSCEnabled[k] = locals->RequiresDSC[mode_lib->vba.VoltageLevel][k]; diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c index 1dcc28a4d853793395a4696501e904445be79ec6..0c6dea9ccb7287fc7e395a99fba281af908960c9 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9611.c +++ b/drivers/gpu/drm/bridge/lontium-lt9611.c @@ -185,12 +185,14 @@ static void lt9611_mipi_video_setup(struct lt9611 *lt9611, regmap_write(lt9611->regmap, 0x8319, (u8)(hfront_porch % 256)); - regmap_write(lt9611->regmap, 0x831a, (u8)(hsync_porch / 256)); + regmap_write(lt9611->regmap, 0x831a, (u8)(hsync_porch / 256) | + ((hfront_porch / 256) << 4)); regmap_write(lt9611->regmap, 0x831b, (u8)(hsync_porch % 256)); } -static void lt9611_pcr_setup(struct lt9611 *lt9611, const struct drm_display_mode *mode) +static void lt9611_pcr_setup(struct lt9611 *lt9611, const struct drm_display_mode *mode, unsigned int postdiv) { + unsigned int pcr_m = mode->clock * 5 * postdiv / 27000; const struct reg_sequence reg_cfg[] = { { 0x830b, 0x01 }, { 0x830c, 0x10 }, @@ -205,7 +207,6 @@ static void lt9611_pcr_setup(struct lt9611 *lt9611, const struct drm_display_mod /* stage 2 */ { 0x834a, 0x40 }, - { 0x831d, 0x10 }, /* MK limit */ { 0x832d, 0x38 }, @@ -220,30 +221,28 @@ static void lt9611_pcr_setup(struct lt9611 *lt9611, const struct drm_display_mod { 0x8325, 0x00 }, { 0x832a, 0x01 }, { 0x834a, 0x10 }, - { 0x831d, 0x10 }, - { 0x8326, 0x37 }, }; + u8 pol = 0x10; - regmap_multi_reg_write(lt9611->regmap, reg_cfg, ARRAY_SIZE(reg_cfg)); + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + pol |= 0x2; + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + pol |= 0x1; + regmap_write(lt9611->regmap, 0x831d, pol); - switch (mode->hdisplay) { - case 640: - regmap_write(lt9611->regmap, 0x8326, 0x14); - break; - case 1920: - regmap_write(lt9611->regmap, 0x8326, 0x37); - break; - case 3840: + if (mode->hdisplay == 3840) regmap_multi_reg_write(lt9611->regmap, reg_cfg2, ARRAY_SIZE(reg_cfg2)); - break; - } + else + regmap_multi_reg_write(lt9611->regmap, reg_cfg, ARRAY_SIZE(reg_cfg)); + + regmap_write(lt9611->regmap, 0x8326, pcr_m); /* pcr rst */ regmap_write(lt9611->regmap, 0x8011, 0x5a); regmap_write(lt9611->regmap, 0x8011, 0xfa); } -static int lt9611_pll_setup(struct lt9611 *lt9611, const struct drm_display_mode *mode) +static int lt9611_pll_setup(struct lt9611 *lt9611, const struct drm_display_mode *mode, unsigned int *postdiv) { unsigned int pclk = mode->clock; const struct reg_sequence reg_cfg[] = { @@ -261,12 +260,16 @@ static int lt9611_pll_setup(struct lt9611 *lt9611, const struct drm_display_mode regmap_multi_reg_write(lt9611->regmap, reg_cfg, ARRAY_SIZE(reg_cfg)); - if (pclk > 150000) + if (pclk > 150000) { regmap_write(lt9611->regmap, 0x812d, 0x88); - else if (pclk > 70000) + *postdiv = 1; + } else if (pclk > 70000) { regmap_write(lt9611->regmap, 0x812d, 0x99); - else + *postdiv = 2; + } else { regmap_write(lt9611->regmap, 0x812d, 0xaa); + *postdiv = 4; + } /* * first divide pclk by 2 first @@ -446,12 +449,11 @@ static void lt9611_sleep_setup(struct lt9611 *lt9611) { 0x8023, 0x01 }, { 0x8157, 0x03 }, /* set addr pin as output */ { 0x8149, 0x0b }, - { 0x8151, 0x30 }, /* disable IRQ */ + { 0x8102, 0x48 }, /* MIPI Rx power down */ { 0x8123, 0x80 }, { 0x8130, 0x00 }, - { 0x8100, 0x01 }, /* bandgap power down */ - { 0x8101, 0x00 }, /* system clk power down */ + { 0x8011, 0x0a }, }; regmap_multi_reg_write(lt9611->regmap, @@ -757,7 +759,7 @@ static const struct drm_connector_funcs lt9611_bridge_connector_funcs = { static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611, struct device_node *dsi_node) { - const struct mipi_dsi_device_info info = { "lt9611", 0, NULL }; + const struct mipi_dsi_device_info info = { "lt9611", 0, lt9611->dev->of_node}; struct mipi_dsi_device *dsi; struct mipi_dsi_host *host; int ret; @@ -881,12 +883,18 @@ static enum drm_mode_status lt9611_bridge_mode_valid(struct drm_bridge *bridge, static void lt9611_bridge_pre_enable(struct drm_bridge *bridge) { struct lt9611 *lt9611 = bridge_to_lt9611(bridge); + static const struct reg_sequence reg_cfg[] = { + { 0x8102, 0x12 }, + { 0x8123, 0x40 }, + { 0x8130, 0xea }, + { 0x8011, 0xfa }, + }; if (!lt9611->sleep) return; - lt9611_reset(lt9611); - regmap_write(lt9611->regmap, 0x80ee, 0x01); + regmap_multi_reg_write(lt9611->regmap, + reg_cfg, ARRAY_SIZE(reg_cfg)); lt9611->sleep = false; } @@ -904,14 +912,15 @@ static void lt9611_bridge_mode_set(struct drm_bridge *bridge, { struct lt9611 *lt9611 = bridge_to_lt9611(bridge); struct hdmi_avi_infoframe avi_frame; + unsigned int postdiv; int ret; lt9611_bridge_pre_enable(bridge); lt9611_mipi_input_digital(lt9611, mode); - lt9611_pll_setup(lt9611, mode); + lt9611_pll_setup(lt9611, mode, &postdiv); lt9611_mipi_video_setup(lt9611, mode); - lt9611_pcr_setup(lt9611, mode); + lt9611_pcr_setup(lt9611, mode, postdiv); ret = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, <9611->connector, diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c index 72248a565579ecfad1940b6c1caca42ad826114b..e41afcc5326b10cdd77d648711212c35f00e2b26 100644 --- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c +++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c @@ -444,7 +444,11 @@ static int __init stdpxxxx_ge_b850v3_init(void) if (ret) return ret; - return i2c_add_driver(&stdp2690_ge_b850v3_fw_driver); + ret = i2c_add_driver(&stdp2690_ge_b850v3_fw_driver); + if (ret) + i2c_del_driver(&stdp4028_ge_b850v3_fw_driver); + + return ret; } module_init(stdpxxxx_ge_b850v3_init); diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 58527f151984c84100421e149e074921d3e35d46..98b659981f1ad9aa683d9c3560a76a1406e00965 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -1010,6 +1010,7 @@ static void drm_atomic_connector_print_state(struct drm_printer *p, drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name); drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)"); drm_printf(p, "\tself_refresh_aware=%d\n", state->self_refresh_aware); + drm_printf(p, "\tmax_requested_bpc=%d\n", state->max_requested_bpc); if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) if (state->writeback_job && state->writeback_job->fb) diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 0feeac52e4eb3f517711689f732bfb21ab76ee2f..b5e15933cb5f47e20cedf1fd03a78c0a2d28563f 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -3769,6 +3769,9 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms set_bit(0, &mgr->payload_mask); mgr->vcpi_mask = 0; mgr->payload_id_table_cleared = false; + + memset(&mgr->down_rep_recv, 0, sizeof(mgr->down_rep_recv)); + memset(&mgr->up_req_recv, 0, sizeof(mgr->up_req_recv)); } out_unlock: @@ -3985,7 +3988,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) struct drm_dp_sideband_msg_rx *msg = &mgr->down_rep_recv; if (!drm_dp_get_one_sb_msg(mgr, false, &mstb)) - goto out; + goto out_clear_reply; /* Multi-packet message transmission, don't clear the reply */ if (!msg->have_eomt) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 4334e466b4e059788f3e4d29c8d3652b664c7cc3..39eb39e78d7a22d65590c0d31bbd9c9081cc87f0 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -5560,8 +5560,6 @@ static u8 drm_mode_hdmi_vic(const struct drm_connector *connector, static u8 drm_mode_cea_vic(const struct drm_connector *connector, const struct drm_display_mode *mode) { - u8 vic; - /* * HDMI spec says if a mode is found in HDMI 1.4b 4K modes * we should send its VIC in vendor infoframes, else send the @@ -5571,13 +5569,18 @@ static u8 drm_mode_cea_vic(const struct drm_connector *connector, if (drm_mode_hdmi_vic(connector, mode)) return 0; - vic = drm_match_cea_mode(mode); + return drm_match_cea_mode(mode); +} - /* - * HDMI 1.4 VIC range: 1 <= VIC <= 64 (CEA-861-D) but - * HDMI 2.0 VIC range: 1 <= VIC <= 107 (CEA-861-F). So we - * have to make sure we dont break HDMI 1.4 sinks. - */ +/* + * Avoid sending VICs defined in HDMI 2.0 in AVI infoframes to sinks that + * conform to HDMI 1.4. + * + * HDMI 1.4 (CTA-861-D) VIC range: [1..64] + * HDMI 2.0 (CTA-861-F) VIC range: [1..107] + */ +static u8 vic_for_avi_infoframe(const struct drm_connector *connector, u8 vic) +{ if (!is_hdmi2_sink(connector) && vic > 64) return 0; @@ -5653,7 +5656,7 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, picture_aspect = HDMI_PICTURE_ASPECT_NONE; } - frame->video_code = vic; + frame->video_code = vic_for_avi_infoframe(connector, vic); frame->picture_aspect = picture_aspect; frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE; frame->scan_mode = HDMI_SCAN_MODE_UNDERSCAN; diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c index 92152c06b75b7972e63d0106f54b0e6ef1dc5d4a..8d1064061e836f4176a11fb50e7f8e9ff1bb9591 100644 --- a/drivers/gpu/drm/drm_fourcc.c +++ b/drivers/gpu/drm/drm_fourcc.c @@ -178,6 +178,10 @@ const struct drm_format_info *__drm_format_info(u32 format) { .format = DRM_FORMAT_BGRA5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_BGR565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, +#ifdef __BIG_ENDIAN + { .format = DRM_FORMAT_XRGB1555 | DRM_FORMAT_BIG_ENDIAN, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, + { .format = DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, +#endif { .format = DRM_FORMAT_RGB888, .depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_BGR888, .depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 }, diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index 2c43d54766f34ab629afacbca9adf808ec8dbf12..19fb1d93a4f07835ed39c8ec87b6fee00769edcb 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -1143,6 +1143,58 @@ int mipi_dsi_dcs_get_display_brightness(struct mipi_dsi_device *dsi, } EXPORT_SYMBOL(mipi_dsi_dcs_get_display_brightness); +/** + * mipi_dsi_dcs_set_display_brightness_large() - sets the 16-bit brightness value + * of the display + * @dsi: DSI peripheral device + * @brightness: brightness value + * + * Return: 0 on success or a negative error code on failure. + */ +int mipi_dsi_dcs_set_display_brightness_large(struct mipi_dsi_device *dsi, + u16 brightness) +{ + u8 payload[2] = { brightness >> 8, brightness & 0xff }; + ssize_t err; + + err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS, + payload, sizeof(payload)); + if (err < 0) + return err; + + return 0; +} +EXPORT_SYMBOL(mipi_dsi_dcs_set_display_brightness_large); + +/** + * mipi_dsi_dcs_get_display_brightness_large() - gets the current 16-bit + * brightness value of the display + * @dsi: DSI peripheral device + * @brightness: brightness value + * + * Return: 0 on success or a negative error code on failure. + */ +int mipi_dsi_dcs_get_display_brightness_large(struct mipi_dsi_device *dsi, + u16 *brightness) +{ + u8 brightness_be[2]; + ssize_t err; + + err = mipi_dsi_dcs_read(dsi, MIPI_DCS_GET_DISPLAY_BRIGHTNESS, + brightness_be, sizeof(brightness_be)); + if (err <= 0) { + if (err == 0) + err = -ENODATA; + + return err; + } + + *brightness = (brightness_be[0] << 8) | brightness_be[1]; + + return 0; +} +EXPORT_SYMBOL(mipi_dsi_dcs_get_display_brightness_large); + static int mipi_dsi_drv_probe(struct device *dev) { struct mipi_dsi_driver *drv = to_mipi_dsi_driver(dev->driver); diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c index f1affc1bb6799fdce233dbc27b4dd62754465953..fad2c118112702fc477f5f27eb10d46aedfd3ab3 100644 --- a/drivers/gpu/drm/drm_mode_config.c +++ b/drivers/gpu/drm/drm_mode_config.c @@ -398,6 +398,8 @@ static void drm_mode_config_init_release(struct drm_device *dev, void *ptr) */ int drmm_mode_config_init(struct drm_device *dev) { + int ret; + mutex_init(&dev->mode_config.mutex); drm_modeset_lock_init(&dev->mode_config.connection_mutex); mutex_init(&dev->mode_config.idr_mutex); @@ -419,7 +421,11 @@ int drmm_mode_config_init(struct drm_device *dev) init_llist_head(&dev->mode_config.connector_free_list); INIT_WORK(&dev->mode_config.connector_free_work, drm_connector_free_work_fn); - drm_mode_create_standard_properties(dev); + ret = drm_mode_create_standard_properties(dev); + if (ret) { + drm_mode_config_cleanup(dev); + return ret; + } /* Just to be sure */ dev->mode_config.num_fb = 0; diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index ca0fefeaab20b22504987bcd78c1ef9176976469..8768073794fbf618d2c81116763ed3d1a25e4c6c 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -272,6 +272,18 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"), }, .driver_data = (void *)&lcd1200x1920_rightside_up, + }, { /* Lenovo Ideapad D330-10IGL (HD) */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGL"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* Lenovo IdeaPad Duet 3 10IGL5 */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Duet 3 10IGL5"), + }, + .driver_data = (void *)&lcd1200x1920_rightside_up, }, { /* Lenovo Yoga Book X90F / X91F / X91L */ .matches = { /* Non exact match to match all versions */ diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index 9ba2fe48228f1af032330a4bf87d10ec89e0f058..44fbc0a123bf3d8fcb5f8e6a557c07aad4300f70 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c @@ -80,10 +80,10 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova, return -EINVAL; for_each_sgtable_dma_sg(sgt, sg, i) { - u32 pa = sg_dma_address(sg) - sg->offset; + phys_addr_t pa = sg_dma_address(sg) - sg->offset; size_t bytes = sg_dma_len(sg) + sg->offset; - VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes); + VERB("map[%d]: %08x %pap(%zx)", i, iova, &pa, bytes); ret = etnaviv_context_map(context, da, pa, bytes, prot); if (ret) diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c index 8eb1842f14cea704e97eb2bd881005003e433e8b..b4e74c86fae73c7c6182d1cd5de721bf308c8715 100644 --- a/drivers/gpu/drm/i915/display/intel_quirks.c +++ b/drivers/gpu/drm/i915/display/intel_quirks.c @@ -159,6 +159,8 @@ static struct intel_quirk intel_quirks[] = { /* ECS Liva Q2 */ { 0x3185, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time }, { 0x3184, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time }, + /* HP Notebook - 14-r206nv */ + { 0x0f31, 0x103c, 0x220f, quirk_invert_brightness }, }; void intel_init_quirks(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c index ffcaee74a2493beec3d58fb2b224ca75b711118f..545fa703d97575c9730623c23b83e6bf212171ec 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c @@ -296,10 +296,6 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, spin_unlock(&obj->vma.lock); obj->tiling_and_stride = tiling | stride; - i915_gem_object_unlock(obj); - - /* Force the fence to be reacquired for GTT access */ - i915_gem_object_release_mmap_gtt(obj); /* Try to preallocate memory required to save swizzling on put-pages */ if (i915_gem_object_needs_bit17_swizzle(obj)) { @@ -312,6 +308,11 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, obj->bit_17 = NULL; } + i915_gem_object_unlock(obj); + + /* Force the fence to be reacquired for GTT access */ + i915_gem_object_release_mmap_gtt(obj); + return 0; } diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c index 4034a4bac7f08669927f5e8d9f1478ca46baf57a..69b2e5509d678d4b1e0e1e9ccd9e8d4c20a0414d 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring.c +++ b/drivers/gpu/drm/i915/gt/intel_ring.c @@ -49,7 +49,7 @@ int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww) if (unlikely(ret)) goto err_unpin; - if (i915_vma_is_map_and_fenceable(vma)) + if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915)) addr = (void __force *)i915_vma_pin_iomap(vma); else addr = i915_gem_object_pin_map(vma->obj, @@ -91,7 +91,7 @@ void intel_ring_unpin(struct intel_ring *ring) return; i915_vma_unset_ggtt_write(vma); - if (i915_vma_is_map_and_fenceable(vma)) + if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915)) i915_vma_unpin_iomap(vma); else i915_gem_object_unpin_map(vma->obj); diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 4a3bde7c9f21712c8f6622bb877f63971b68fde7..ae5cf2b55e159261ad0da30850ef22986af4dd87 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -1212,6 +1212,22 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) GAMT_CHKN_BIT_REG, GAMT_CHKN_DISABLE_L3_COH_PIPE); + /* + * Wa_1408615072:icl,ehl (vsunit) + * Wa_1407596294:icl,ehl (hsunit) + */ + wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, + VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS); + + /* Wa_1407352427:icl,ehl */ + wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, + PSDUNIT_CLKGATE_DIS); + + /* Wa_1406680159:icl,ehl */ + wa_write_or(wal, + SUBSLICE_UNIT_LEVEL_CLKGATE, + GWUNIT_CLKGATE_DIS); + /* Wa_1607087056:icl,ehl,jsl */ if (IS_ICELAKE(i915) || IS_EHL_REVID(i915, EHL_REVID_A0, EHL_REVID_A0)) { @@ -1816,22 +1832,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS, GEN11_ENABLE_32_PLANE_MODE); - /* - * Wa_1408615072:icl,ehl (vsunit) - * Wa_1407596294:icl,ehl (hsunit) - */ - wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, - VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS); - - /* Wa_1407352427:icl,ehl */ - wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, - PSDUNIT_CLKGATE_DIS); - - /* Wa_1406680159:icl,ehl */ - wa_write_or(wal, - SUBSLICE_UNIT_LEVEL_CLKGATE, - GWUNIT_CLKGATE_DIS); - /* * Wa_1408767742:icl[a2..forever],ehl[all] * Wa_1605460711:icl[a0..c0] diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index dfd5ed15a7f4a7125833d7fc4c99cbe02ab05563..e83b1c406b96a2332c68402af8bb33087b071e34 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -803,6 +803,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, mtk_crtc->planes = devm_kcalloc(dev, num_comp_planes, sizeof(struct drm_plane), GFP_KERNEL); + if (!mtk_crtc->planes) + return -ENOMEM; for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { ret = mtk_drm_crtc_init_comp_planes(drm_dev, mtk_crtc, i, diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 59c85c63b7cc900f4b3d22370cbac6e07f29031c..719c46d245dd2a9dfa9a0cce3fb04977d3371391 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -378,6 +378,7 @@ static int mtk_drm_bind(struct device *dev) err_deinit: mtk_drm_kms_deinit(drm); err_free: + private->drm = NULL; drm_dev_put(drm); return ret; } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c index 0583e557ad372b5c3dcc8e830d9c13b6d5413530..29702dd8631d488e1cb884bc29b2bff48c0f82e1 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c @@ -142,8 +142,6 @@ static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj, ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie, mtk_gem->dma_addr, obj->size, mtk_gem->dma_attrs); - if (ret) - drm_gem_vm_close(vma); return ret; } @@ -266,6 +264,6 @@ void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) return; vunmap(vaddr); - mtk_gem->kvaddr = 0; + mtk_gem->kvaddr = NULL; kfree(mtk_gem->pages); } diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 146c4d04f572dfbffb884e51e512f76aa73b4c26..a6e71b7b69b833ec40019b4ce6d06d620b23f2c9 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -704,7 +704,7 @@ static void mtk_dsi_lane_ready(struct mtk_dsi *dsi) mtk_dsi_clk_ulp_mode_leave(dsi); mtk_dsi_lane0_ulp_mode_leave(dsi); mtk_dsi_clk_hs_mode(dsi, 0); - msleep(20); + usleep_range(1000, 3000); /* The reaction time after pulling up the mipi signal for dsi_rx */ } } diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 0ca7e53db112ac35e6ed332cd3b8d014e8196762..6f84db97e20e8ca384e33165c1344e3bc242aea9 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -36,7 +36,7 @@ void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, OUT_RING(ring, upper_32_bits(shadowptr(a5xx_gpu, ring))); } - spin_lock_irqsave(&ring->lock, flags); + spin_lock_irqsave(&ring->preempt_lock, flags); /* Copy the shadow to the actual register */ ring->cur = ring->next; @@ -44,7 +44,7 @@ void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, /* Make sure to wrap wptr if we need to */ wptr = get_wptr(ring); - spin_unlock_irqrestore(&ring->lock, flags); + spin_unlock_irqrestore(&ring->preempt_lock, flags); /* Make sure everything is posted before making a decision */ mb(); @@ -144,8 +144,8 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) OUT_RING(ring, 1); /* Enable local preemption for finegrain preemption */ - OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1); - OUT_RING(ring, 0x02); + OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1); + OUT_RING(ring, 0x1); /* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */ OUT_PKT7(ring, CP_YIELD_ENABLE, 1); diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c index 7e04509c4e1f0f22958ff0a85a90203475221618..b8e71ad6f8d8a5ab47e917aa95e7519d2ae26c2c 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c @@ -45,9 +45,9 @@ static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) if (!ring) return; - spin_lock_irqsave(&ring->lock, flags); + spin_lock_irqsave(&ring->preempt_lock, flags); wptr = get_wptr(ring); - spin_unlock_irqrestore(&ring->lock, flags); + spin_unlock_irqrestore(&ring->preempt_lock, flags); gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr); } @@ -62,9 +62,9 @@ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) bool empty; struct msm_ringbuffer *ring = gpu->rb[i]; - spin_lock_irqsave(&ring->lock, flags); - empty = (get_wptr(ring) == ring->memptrs->rptr); - spin_unlock_irqrestore(&ring->lock, flags); + spin_lock_irqsave(&ring->preempt_lock, flags); + empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring)); + spin_unlock_irqrestore(&ring->preempt_lock, flags); if (!empty) return ring; @@ -132,9 +132,9 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu) } /* Make sure the wptr doesn't update while we're in motion */ - spin_lock_irqsave(&ring->lock, flags); + spin_lock_irqsave(&ring->preempt_lock, flags); a5xx_gpu->preempt[ring->id]->wptr = get_wptr(ring); - spin_unlock_irqrestore(&ring->lock, flags); + spin_unlock_irqrestore(&ring->preempt_lock, flags); /* Set the address of the incoming preemption record */ gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO, @@ -210,6 +210,7 @@ void a5xx_preempt_hw_init(struct msm_gpu *gpu) a5xx_gpu->preempt[i]->wptr = 0; a5xx_gpu->preempt[i]->rptr = 0; a5xx_gpu->preempt[i]->rbase = gpu->rb[i]->iova; + a5xx_gpu->preempt[i]->rptr_addr = shadowptr(a5xx_gpu, gpu->rb[i]); } /* Write a 0 to signal that we aren't switching pagetables */ @@ -261,7 +262,6 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu, ptr->data = 0; ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE; - ptr->rptr_addr = shadowptr(a5xx_gpu, ring); ptr->counter = counters_iova; return 0; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index dffc133b8b1cc5fc756a4e38dae4511766890b1d..29b40acedb3899f6b576a459bfa6a068a6ecdd1b 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -65,7 +65,7 @@ static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) OUT_RING(ring, upper_32_bits(shadowptr(a6xx_gpu, ring))); } - spin_lock_irqsave(&ring->lock, flags); + spin_lock_irqsave(&ring->preempt_lock, flags); /* Copy the shadow to the actual register */ ring->cur = ring->next; @@ -73,7 +73,7 @@ static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) /* Make sure to wrap wptr if we need to */ wptr = get_wptr(ring); - spin_unlock_irqrestore(&ring->lock, flags); + spin_unlock_irqrestore(&ring->preempt_lock, flags); /* Make sure everything is posted before making a decision */ mb(); diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index de8cc25506d61ad21a1fa54fda79f02c4872a9d9..78181e2d78a97478f2b0e2c0b6ba976e1dec9652 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -954,13 +954,13 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu) { struct msm_gpu *gpu = &adreno_gpu->base; - struct msm_drm_private *priv = gpu->dev->dev_private; + struct msm_drm_private *priv = gpu->dev ? gpu->dev->dev_private : NULL; unsigned int i; for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) release_firmware(adreno_gpu->fw[i]); - if (pm_runtime_enabled(&priv->gpu_pdev->dev)) + if (priv && pm_runtime_enabled(&priv->gpu_pdev->dev)) pm_runtime_disable(&priv->gpu_pdev->dev); msm_gpu_cleanup(&adreno_gpu->base); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index 9bc4a1cd9ac65eea8f41563ba76abf9d54963b36..5afb3c544653c953fe4ec8c23cc4961d3f3d25d2 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -682,7 +682,10 @@ static void dpu_crtc_reset(struct drm_crtc *crtc) if (crtc->state) dpu_crtc_destroy_state(crtc, crtc->state); - __drm_atomic_helper_crtc_reset(crtc, &cstate->base); + if (cstate) + __drm_atomic_helper_crtc_reset(crtc, &cstate->base); + else + __drm_atomic_helper_crtc_reset(crtc, NULL); } /** diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c index 74a13ccad34c0bb942983385ec46db5b5554241f..9483005297438cef0ca5b05f50b2a3100fc52af5 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c @@ -633,6 +633,11 @@ int dpu_rm_get_assigned_resources(struct dpu_rm *rm, blks_size, enc_id); break; } + if (!hw_blks[i]) { + DPU_ERROR("Allocated resource %d unavailable to assign to enc %d\n", + type, enc_id); + break; + } blks[num_blks++] = hw_blks[i]; } diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c index ff4f207cbdeaff1e0865bda2f3ed78e8aa2dbc3f..60e7371cd0e0debef9d6c836bb10690c2a97bfbe 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -1124,7 +1124,10 @@ static void mdp5_crtc_reset(struct drm_crtc *crtc) if (crtc->state) mdp5_crtc_destroy_state(crtc, crtc->state); - __drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base); + if (mdp5_cstate) + __drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base); + else + __drm_atomic_helper_crtc_reset(crtc, NULL); } static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = { diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 51e8318cc8ff478519d22136082cc119fde314f0..5a76aa1389173de141b9d0ba2ba9ea4666468377 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -1913,6 +1913,9 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi) /* setup workqueue */ msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0); + if (!msm_host->workqueue) + return -ENOMEM; + INIT_WORK(&msm_host->err_work, dsi_err_worker); INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker); diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index efb14043a6ec419db9d17e1f078853445c906979..bee208773beec91dfa2a03add6cd5af44f20139e 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c @@ -264,6 +264,10 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); hdmi->workq = alloc_ordered_workqueue("msm_hdmi", 0); + if (!hdmi->workq) { + ret = -ENOMEM; + goto fail; + } hdmi->i2c = msm_hdmi_i2c_init(hdmi); if (IS_ERR(hdmi->i2c)) { diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c index cd59a591803851655426b1670530b4ca114b4689..50a25c119f4d96eec87e76f260cf05f9ec3dede9 100644 --- a/drivers/gpu/drm/msm/msm_fence.c +++ b/drivers/gpu/drm/msm/msm_fence.c @@ -20,7 +20,7 @@ msm_fence_context_alloc(struct drm_device *dev, const char *name) return ERR_PTR(-ENOMEM); fctx->dev = dev; - strncpy(fctx->name, name, sizeof(fctx->name)); + strscpy(fctx->name, name, sizeof(fctx->name)); fctx->context = dma_fence_context_alloc(1); init_waitqueue_head(&fctx->event); spin_lock_init(&fctx->spinlock); diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index aa5c60a7132d847a1911774d28178c2865de960e..c4e5037512b9d9bba2e182ad904e3688a7410530 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -494,8 +494,8 @@ static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev, int ret = 0; uint32_t i, j; - post_deps = kmalloc_array(nr_syncobjs, sizeof(*post_deps), - GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); + post_deps = kcalloc(nr_syncobjs, sizeof(*post_deps), + GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); if (!post_deps) return ERR_PTR(-ENOMEM); @@ -510,7 +510,6 @@ static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev, } post_deps[i].point = syncobj_desc.point; - post_deps[i].chain = NULL; if (syncobj_desc.flags) { ret = -EINVAL; diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index 935bf9b1d94188cba556a0e5dfca389ba008f126..1b6958e908dca7fcfa4023d3a2353c90169ffe3c 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -46,7 +46,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, ring->memptrs_iova = memptrs_iova; INIT_LIST_HEAD(&ring->submits); - spin_lock_init(&ring->lock); + spin_lock_init(&ring->preempt_lock); snprintf(name, sizeof(name), "gpu-ring-%d", ring->id); diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h index 0987d6bf848cfc90660f753eb926f30f326d71a1..4956d1bc5d0e12c18d53dbe67c3098c1bab63035 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.h +++ b/drivers/gpu/drm/msm/msm_ringbuffer.h @@ -46,7 +46,12 @@ struct msm_ringbuffer { struct msm_rbmemptrs *memptrs; uint64_t memptrs_iova; struct msm_fence_context *fctx; - spinlock_t lock; + + /* + * preempt_lock protects preemption and serializes wptr updates against + * preemption. Can be aquired from irq context. + */ + spinlock_t preempt_lock; }; struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, diff --git a/drivers/gpu/drm/mxsfb/Kconfig b/drivers/gpu/drm/mxsfb/Kconfig index ee22cd25d3e3d72f42526587668c146c5a68d87c..e7201e16119a4c467bc8e877c6df1272d3997875 100644 --- a/drivers/gpu/drm/mxsfb/Kconfig +++ b/drivers/gpu/drm/mxsfb/Kconfig @@ -8,6 +8,7 @@ config DRM_MXSFB tristate "i.MX (e)LCDIF LCD controller" depends on DRM && OF depends on COMMON_CLK + depends on ARCH_MXS || ARCH_MXC || COMPILE_TEST select DRM_MXS select DRM_KMS_HELPER select DRM_KMS_CMA_HELPER diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index c2d34c91e840c0750f9537c86d5533397c0c1ecb..804ea035fa46bcbfe358cc77ae7f2a9209eac289 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -2555,14 +2555,6 @@ nv50_display_fini(struct drm_device *dev, bool runtime, bool suspend) { struct nouveau_drm *drm = nouveau_drm(dev); struct drm_encoder *encoder; - struct drm_plane *plane; - - drm_for_each_plane(plane, dev) { - struct nv50_wndw *wndw = nv50_wndw(plane); - if (plane->funcs != &nv50_wndw) - continue; - nv50_wndw_fini(wndw); - } list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) @@ -2578,7 +2570,6 @@ nv50_display_init(struct drm_device *dev, bool resume, bool runtime) { struct nv50_core *core = nv50_disp(dev)->core; struct drm_encoder *encoder; - struct drm_plane *plane; if (resume || runtime) core->func->init(core); @@ -2591,13 +2582,6 @@ nv50_display_init(struct drm_device *dev, bool resume, bool runtime) } } - drm_for_each_plane(plane, dev) { - struct nv50_wndw *wndw = nv50_wndw(plane); - if (plane->funcs != &nv50_wndw) - continue; - nv50_wndw_init(wndw); - } - return 0; } diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c index f07916ffe42cb289e8040494e37e46f70207949b..831125b4453df759756977888ad7d890ddf726eb 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c @@ -690,18 +690,6 @@ nv50_wndw_notify(struct nvif_notify *notify) return NVIF_NOTIFY_KEEP; } -void -nv50_wndw_fini(struct nv50_wndw *wndw) -{ - nvif_notify_put(&wndw->notify); -} - -void -nv50_wndw_init(struct nv50_wndw *wndw) -{ - nvif_notify_get(&wndw->notify); -} - static const u64 nv50_cursor_format_modifiers[] = { DRM_FORMAT_MOD_LINEAR, DRM_FORMAT_MOD_INVALID, diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h index 3278e28800343c37fe7a88a29df1d7bcee6cab41..77bf124319fbdc7bf82e7a3d72a2d55dbf36895d 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h @@ -38,10 +38,9 @@ struct nv50_wndw { int nv50_wndw_new_(const struct nv50_wndw_func *, struct drm_device *, enum drm_plane_type, const char *name, int index, - const u32 *format, enum nv50_disp_interlock_type, - u32 interlock_data, u32 heads, struct nv50_wndw **); -void nv50_wndw_init(struct nv50_wndw *); -void nv50_wndw_fini(struct nv50_wndw *); + const u32 *format, u32 heads, + enum nv50_disp_interlock_type, u32 interlock_data, + struct nv50_wndw **); void nv50_wndw_flush_set(struct nv50_wndw *, u32 *interlock, struct nv50_wndw_atom *); void nv50_wndw_flush_clr(struct nv50_wndw *, u32 *interlock, bool flush, diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index b57dcad8865fa8271b189fc0aa0adb9af21c7248..7633f56bc0a4f40f0038aad701b646712cbe1906 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -823,6 +823,15 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, if (ret == 0) { ret = nouveau_fence_new(chan, false, &fence); if (ret == 0) { + /* TODO: figure out a better solution here + * + * wait on the fence here explicitly as going through + * ttm_bo_move_accel_cleanup somehow doesn't seem to do it. + * + * Without this the operation can timeout and we'll fallback to a + * software copy, which might take several minutes to finish. + */ + nouveau_fence_wait(fence, false, false); ret = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, false, diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index eeccf40bae41667adb5d5f7e2156200f21840a62..1b1ddc5fe6dcc0614566d581716ad799dd01170e 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -1444,22 +1444,26 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p) { struct dsi_data *dsi = s->private; unsigned long flags; - struct dsi_irq_stats stats; + struct dsi_irq_stats *stats; + + stats = kmalloc(sizeof(*stats), GFP_KERNEL); + if (!stats) + return -ENOMEM; spin_lock_irqsave(&dsi->irq_stats_lock, flags); - stats = dsi->irq_stats; + *stats = dsi->irq_stats; memset(&dsi->irq_stats, 0, sizeof(dsi->irq_stats)); dsi->irq_stats.last_reset = jiffies; spin_unlock_irqrestore(&dsi->irq_stats_lock, flags); seq_printf(s, "period %u ms\n", - jiffies_to_msecs(jiffies - stats.last_reset)); + jiffies_to_msecs(jiffies - stats->last_reset)); - seq_printf(s, "irqs %d\n", stats.irq_count); + seq_printf(s, "irqs %d\n", stats->irq_count); #define PIS(x) \ - seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]); + seq_printf(s, "%-20s %10d\n", #x, stats->dsi_irqs[ffs(DSI_IRQ_##x)-1]); seq_printf(s, "-- DSI%d interrupts --\n", dsi->module_id + 1); PIS(VC0); @@ -1483,10 +1487,10 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p) #define PIS(x) \ seq_printf(s, "%-20s %10d %10d %10d %10d\n", #x, \ - stats.vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \ - stats.vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \ - stats.vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \ - stats.vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]); + stats->vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \ + stats->vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \ + stats->vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \ + stats->vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]); seq_printf(s, "-- VC interrupts --\n"); PIS(CS); @@ -1502,7 +1506,7 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p) #define PIS(x) \ seq_printf(s, "%-20s %10d\n", #x, \ - stats.cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]); + stats->cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]); seq_printf(s, "-- CIO interrupts --\n"); PIS(ERRSYNCESC1); @@ -1527,6 +1531,8 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p) PIS(ULPSACTIVENOT_ALL1); #undef PIS + kfree(stats); + return 0; } #endif diff --git a/drivers/gpu/drm/panfrost/Kconfig b/drivers/gpu/drm/panfrost/Kconfig index 86cdc0ce79e6598010cb63c84440d288dbadf178..77f4d32e520456287c3e95c0a423fe770319a206 100644 --- a/drivers/gpu/drm/panfrost/Kconfig +++ b/drivers/gpu/drm/panfrost/Kconfig @@ -3,7 +3,8 @@ config DRM_PANFROST tristate "Panfrost (DRM support for ARM Mali Midgard/Bifrost GPUs)" depends on DRM - depends on ARM || ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64) + depends on ARM || ARM64 || COMPILE_TEST + depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE depends on MMU select DRM_SCHED select IOMMU_SUPPORT diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index 12aa7877a625a389a418cd3e8f3f9835595bc86e..8cca58f25c0f355212ed52264b8ac290ead3ecd0 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -2191,11 +2191,12 @@ int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder, int fe_idx) /* * On DCE32 any encoder can drive any block so usually just use crtc id, - * but Apple thinks different at least on iMac10,1, so there use linkb, + * but Apple thinks different at least on iMac10,1 and iMac11,2, so there use linkb, * otherwise the internal eDP panel will stay dark. */ if (ASIC_IS_DCE32(rdev)) { - if (dmi_match(DMI_PRODUCT_NAME, "iMac10,1")) + if (dmi_match(DMI_PRODUCT_NAME, "iMac10,1") || + dmi_match(DMI_PRODUCT_NAME, "iMac11,2")) enc_idx = (dig->linkb) ? 1 : 0; else enc_idx = radeon_crtc->crtc_id; diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 8287410f471fb88ecf5f458b3b0baad4fd4a0c62..131f425c363a571d5b004b9599292da4f0892d3e 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1022,6 +1022,7 @@ void radeon_atombios_fini(struct radeon_device *rdev) { if (rdev->mode_info.atom_context) { kfree(rdev->mode_info.atom_context->scratch); + kfree(rdev->mode_info.atom_context->iio); } kfree(rdev->mode_info.atom_context); rdev->mode_info.atom_context = NULL; diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c index b669168ae7cb2d31bb40973c62c6fd025f41ed94..33716213a821093a81194755a9a9ec58916fd868 100644 --- a/drivers/gpu/drm/tidss/tidss_dispc.c +++ b/drivers/gpu/drm/tidss/tidss_dispc.c @@ -1855,8 +1855,8 @@ static const struct { { DRM_FORMAT_XBGR4444, 0x21, }, { DRM_FORMAT_RGBX4444, 0x22, }, - { DRM_FORMAT_ARGB1555, 0x25, }, - { DRM_FORMAT_ABGR1555, 0x26, }, + { DRM_FORMAT_XRGB1555, 0x25, }, + { DRM_FORMAT_XBGR1555, 0x26, }, { DRM_FORMAT_XRGB8888, 0x27, }, { DRM_FORMAT_XBGR8888, 0x28, }, diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c index 403af68fa4400d46acdc5ae61837ddaa2d5f84b1..7ea26e5fbcb28e008dc4473a82768a861c861503 100644 --- a/drivers/gpu/drm/tiny/ili9486.c +++ b/drivers/gpu/drm/tiny/ili9486.c @@ -43,6 +43,7 @@ static int waveshare_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par, size_t num) { struct spi_device *spi = mipi->spi; + unsigned int bpw = 8; void *data = par; u32 speed_hz; int i, ret; @@ -56,8 +57,6 @@ static int waveshare_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par, * The displays are Raspberry Pi HATs and connected to the 8-bit only * SPI controller, so 16-bit command and parameters need byte swapping * before being transferred as 8-bit on the big endian SPI bus. - * Pixel data bytes have already been swapped before this function is - * called. */ buf[0] = cpu_to_be16(*cmd); gpiod_set_value_cansleep(mipi->dc, 0); @@ -71,12 +70,18 @@ static int waveshare_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par, for (i = 0; i < num; i++) buf[i] = cpu_to_be16(par[i]); num *= 2; - speed_hz = mipi_dbi_spi_cmd_max_speed(spi, num); data = buf; } + /* + * Check whether pixel data bytes needs to be swapped or not + */ + if (*cmd == MIPI_DCS_WRITE_MEMORY_START && !mipi->swap_bytes) + bpw = 16; + gpiod_set_value_cansleep(mipi->dc, 1); - ret = mipi_dbi_spi_transfer(spi, speed_hz, 8, data, num); + speed_hz = mipi_dbi_spi_cmd_max_speed(spi, num); + ret = mipi_dbi_spi_transfer(spi, speed_hz, bpw, data, num); free: kfree(buf); diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c index a90f2545baee0c7e6b541cc1d957c7dad6c286a4..9c8a71d7426a0fdf2e992f81d1601a3a210d95f6 100644 --- a/drivers/gpu/drm/vc4/vc4_dpi.c +++ b/drivers/gpu/drm/vc4/vc4_dpi.c @@ -148,35 +148,45 @@ static void vc4_dpi_encoder_enable(struct drm_encoder *encoder) } drm_connector_list_iter_end(&conn_iter); - if (connector && connector->display_info.num_bus_formats) { - u32 bus_format = connector->display_info.bus_formats[0]; - - switch (bus_format) { - case MEDIA_BUS_FMT_RGB888_1X24: - dpi_c |= VC4_SET_FIELD(DPI_FORMAT_24BIT_888_RGB, - DPI_FORMAT); - break; - case MEDIA_BUS_FMT_BGR888_1X24: - dpi_c |= VC4_SET_FIELD(DPI_FORMAT_24BIT_888_RGB, - DPI_FORMAT); - dpi_c |= VC4_SET_FIELD(DPI_ORDER_BGR, DPI_ORDER); - break; - case MEDIA_BUS_FMT_RGB666_1X24_CPADHI: - dpi_c |= VC4_SET_FIELD(DPI_FORMAT_18BIT_666_RGB_2, - DPI_FORMAT); - break; - case MEDIA_BUS_FMT_RGB666_1X18: - dpi_c |= VC4_SET_FIELD(DPI_FORMAT_18BIT_666_RGB_1, - DPI_FORMAT); - break; - case MEDIA_BUS_FMT_RGB565_1X16: - dpi_c |= VC4_SET_FIELD(DPI_FORMAT_16BIT_565_RGB_3, - DPI_FORMAT); - break; - default: - DRM_ERROR("Unknown media bus format %d\n", bus_format); - break; + if (connector) { + if (connector->display_info.num_bus_formats) { + u32 bus_format = connector->display_info.bus_formats[0]; + + switch (bus_format) { + case MEDIA_BUS_FMT_RGB888_1X24: + dpi_c |= VC4_SET_FIELD(DPI_FORMAT_24BIT_888_RGB, + DPI_FORMAT); + break; + case MEDIA_BUS_FMT_BGR888_1X24: + dpi_c |= VC4_SET_FIELD(DPI_FORMAT_24BIT_888_RGB, + DPI_FORMAT); + dpi_c |= VC4_SET_FIELD(DPI_ORDER_BGR, + DPI_ORDER); + break; + case MEDIA_BUS_FMT_RGB666_1X24_CPADHI: + dpi_c |= VC4_SET_FIELD(DPI_FORMAT_18BIT_666_RGB_2, + DPI_FORMAT); + break; + case MEDIA_BUS_FMT_RGB666_1X18: + dpi_c |= VC4_SET_FIELD(DPI_FORMAT_18BIT_666_RGB_1, + DPI_FORMAT); + break; + case MEDIA_BUS_FMT_RGB565_1X16: + dpi_c |= VC4_SET_FIELD(DPI_FORMAT_16BIT_565_RGB_1, + DPI_FORMAT); + break; + default: + DRM_ERROR("Unknown media bus format %d\n", + bus_format); + break; + } } + + if (connector->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE) + dpi_c |= DPI_PIXEL_CLK_INVERT; + + if (connector->display_info.bus_flags & DRM_BUS_FLAG_DE_LOW) + dpi_c |= DPI_OUTPUT_ENABLE_INVERT; } else { /* Default to 24bit if no connector found. */ dpi_c |= VC4_SET_FIELD(DPI_FORMAT_24BIT_888_RGB, DPI_FORMAT); diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 08175c3dd374b193d86eeefa0aaffb4d338603e4..7e8620838de9cf4e554ce97f57ec6e9aec4deb3f 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -567,11 +567,12 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi, VC4_SET_FIELD(mode->crtc_vdisplay, VC5_HDMI_VERTA_VAL)); u32 vertb = (VC4_SET_FIELD(mode->htotal >> (2 - pixel_rep), VC5_HDMI_VERTB_VSPO) | - VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end, + VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end + + interlaced, VC4_HDMI_VERTB_VBP)); u32 vertb_even = (VC4_SET_FIELD(0, VC5_HDMI_VERTB_VSPO) | VC4_SET_FIELD(mode->crtc_vtotal - - mode->crtc_vsync_end - interlaced, + mode->crtc_vsync_end, VC4_HDMI_VERTB_VBP)); HDMI_WRITE(HDMI_VEC_INTERFACE_XBAR, 0x354021); @@ -1491,7 +1492,8 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi) return 0; vc4_hdmi->cec_adap = cec_allocate_adapter(&vc4_hdmi_cec_adap_ops, - vc4_hdmi, "vc4", + vc4_hdmi, + vc4_hdmi->variant->card_name, CEC_CAP_DEFAULTS | CEC_CAP_CONNECTOR_INFO, 1); ret = PTR_ERR_OR_ZERO(vc4_hdmi->cec_adap); diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c index 95fa6fc052a72f26b30d9ed8f985373f2c79eb99..f8f2fc3d15f73ddf25246032ff292b12236d9d11 100644 --- a/drivers/gpu/drm/vc4/vc4_hvs.c +++ b/drivers/gpu/drm/vc4/vc4_hvs.c @@ -677,6 +677,17 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) SCALER_DISPCTRL_DSPEISLUR(2) | SCALER_DISPCTRL_SCLEIRQ); + /* Set AXI panic mode. + * VC4 panics when < 2 lines in FIFO. + * VC5 panics when less than 1 line in the FIFO. + */ + dispctrl &= ~(SCALER_DISPCTRL_PANIC0_MASK | + SCALER_DISPCTRL_PANIC1_MASK | + SCALER_DISPCTRL_PANIC2_MASK); + dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC0); + dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC1); + dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC2); + HVS_WRITE(SCALER_DISPCTRL, dispctrl); ret = devm_request_irq(dev, platform_get_irq(pdev, 0), diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index 4df222a830493c977ef5216b741577b14e21db32..2e03c16c60bb116c967244423723eadf53a8464c 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -72,11 +72,13 @@ static const struct hvs_format { .drm = DRM_FORMAT_ARGB1555, .hvs = HVS_PIXEL_FORMAT_RGBA5551, .pixel_order = HVS_PIXEL_ORDER_ABGR, + .pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB, }, { .drm = DRM_FORMAT_XRGB1555, .hvs = HVS_PIXEL_FORMAT_RGBA5551, .pixel_order = HVS_PIXEL_ORDER_ABGR, + .pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB, }, { .drm = DRM_FORMAT_RGB888, diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h index be2c32a519b31d503d39ca722f44347dc449f35d..a324ef88ceafb944f1858e5df633399aee6dfa69 100644 --- a/drivers/gpu/drm/vc4/vc4_regs.h +++ b/drivers/gpu/drm/vc4/vc4_regs.h @@ -220,6 +220,12 @@ #define SCALER_DISPCTRL 0x00000000 /* Global register for clock gating the HVS */ # define SCALER_DISPCTRL_ENABLE BIT(31) +# define SCALER_DISPCTRL_PANIC0_MASK VC4_MASK(25, 24) +# define SCALER_DISPCTRL_PANIC0_SHIFT 24 +# define SCALER_DISPCTRL_PANIC1_MASK VC4_MASK(27, 26) +# define SCALER_DISPCTRL_PANIC1_SHIFT 26 +# define SCALER_DISPCTRL_PANIC2_MASK VC4_MASK(29, 28) +# define SCALER_DISPCTRL_PANIC2_SHIFT 28 # define SCALER_DISPCTRL_DSP3_MUX_MASK VC4_MASK(19, 18) # define SCALER_DISPCTRL_DSP3_MUX_SHIFT 18 diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index 0c98978e2e55c392608d67abf42a402f10bd150b..49fa59e09187bd3beb6049debe5ff066c3c594a8 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -157,9 +157,11 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, * since virtio_gpu doesn't support dma-buf import from other devices. */ shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base); - if (!shmem->pages) { + if (IS_ERR(shmem->pages)) { drm_gem_shmem_unpin(&bo->base.base); - return -EINVAL; + ret = PTR_ERR(shmem->pages); + shmem->pages = NULL; + return ret; } if (use_dma_api) { diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index cb0b6230c22cefbfbc1488fb839304ae4f7b260a..838428988f79dee3ad719ad4980e4c8573d6ea42 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c @@ -61,7 +61,8 @@ static void vkms_release(struct drm_device *dev) { struct vkms_device *vkms = container_of(dev, struct vkms_device, drm); - destroy_workqueue(vkms->output.composer_workq); + if (vkms->output.composer_workq) + destroy_workqueue(vkms->output.composer_workq); } static void vkms_atomic_commit_tail(struct drm_atomic_state *old_state) diff --git a/drivers/gpu/host1x/hw/syncpt_hw.c b/drivers/gpu/host1x/hw/syncpt_hw.c index dd39d67ccec3695684ccd2bab110ab41e0b61d68..8cf35b2eff3dbdb4b1de3f01518edcd682b7fb18 100644 --- a/drivers/gpu/host1x/hw/syncpt_hw.c +++ b/drivers/gpu/host1x/hw/syncpt_hw.c @@ -106,9 +106,6 @@ static void syncpt_assign_to_channel(struct host1x_syncpt *sp, #if HOST1X_HW >= 6 struct host1x *host = sp->host; - if (!host->hv_regs) - return; - host1x_sync_writel(host, HOST1X_SYNC_SYNCPT_CH_APP_CH(ch ? ch->id : 0xff), HOST1X_SYNC_SYNCPT_CH_APP(sp->id)); diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index d166ee262ce4390f0987d07e589aebe59f25bd5e..22dae3f51051789801c852223ba2f79f63f20c89 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c @@ -1168,6 +1168,7 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base) pdev = platform_device_alloc(reg->name, id++); if (!pdev) { ret = -ENOMEM; + of_node_put(of_node); goto err_register; } diff --git a/drivers/hck/vendor_hooks.c b/drivers/hck/vendor_hooks.c index 7bea8ddcaaa8bac1809c83c5775586d2fd8024d3..b562d2fe3bcd9666e666632d51074f1573381a6e 100644 --- a/drivers/hck/vendor_hooks.c +++ b/drivers/hck/vendor_hooks.c @@ -11,3 +11,5 @@ #include #include #include +#include +#include diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c index f85c6e3309a09f55eeaaeeb1564eb9bb27729e89..6865cab33cf8ab478036820db0e89b0afdc5069a 100644 --- a/drivers/hid/hid-asus.c +++ b/drivers/hid/hid-asus.c @@ -95,6 +95,7 @@ struct asus_kbd_leds { struct hid_device *hdev; struct work_struct work; unsigned int brightness; + spinlock_t lock; bool removed; }; @@ -397,24 +398,42 @@ static int asus_kbd_get_functions(struct hid_device *hdev, return ret; } +static void asus_schedule_work(struct asus_kbd_leds *led) +{ + unsigned long flags; + + spin_lock_irqsave(&led->lock, flags); + if (!led->removed) + schedule_work(&led->work); + spin_unlock_irqrestore(&led->lock, flags); +} + static void asus_kbd_backlight_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct asus_kbd_leds *led = container_of(led_cdev, struct asus_kbd_leds, cdev); - if (led->brightness == brightness) - return; + unsigned long flags; + spin_lock_irqsave(&led->lock, flags); led->brightness = brightness; - schedule_work(&led->work); + spin_unlock_irqrestore(&led->lock, flags); + + asus_schedule_work(led); } static enum led_brightness asus_kbd_backlight_get(struct led_classdev *led_cdev) { struct asus_kbd_leds *led = container_of(led_cdev, struct asus_kbd_leds, cdev); + enum led_brightness brightness; + unsigned long flags; + + spin_lock_irqsave(&led->lock, flags); + brightness = led->brightness; + spin_unlock_irqrestore(&led->lock, flags); - return led->brightness; + return brightness; } static void asus_kbd_backlight_work(struct work_struct *work) @@ -422,11 +441,11 @@ static void asus_kbd_backlight_work(struct work_struct *work) struct asus_kbd_leds *led = container_of(work, struct asus_kbd_leds, work); u8 buf[] = { FEATURE_KBD_REPORT_ID, 0xba, 0xc5, 0xc4, 0x00 }; int ret; + unsigned long flags; - if (led->removed) - return; - + spin_lock_irqsave(&led->lock, flags); buf[4] = led->brightness; + spin_unlock_irqrestore(&led->lock, flags); ret = asus_kbd_set_report(led->hdev, buf, sizeof(buf)); if (ret < 0) @@ -488,6 +507,7 @@ static int asus_kbd_register_leds(struct hid_device *hdev) drvdata->kbd_backlight->cdev.brightness_set = asus_kbd_backlight_set; drvdata->kbd_backlight->cdev.brightness_get = asus_kbd_backlight_get; INIT_WORK(&drvdata->kbd_backlight->work, asus_kbd_backlight_work); + spin_lock_init(&drvdata->kbd_backlight->lock); ret = devm_led_classdev_register(&hdev->dev, &drvdata->kbd_backlight->cdev); if (ret < 0) { @@ -1016,9 +1036,13 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id) static void asus_remove(struct hid_device *hdev) { struct asus_drvdata *drvdata = hid_get_drvdata(hdev); + unsigned long flags; if (drvdata->kbd_backlight) { + spin_lock_irqsave(&drvdata->kbd_backlight->lock, flags); drvdata->kbd_backlight->removed = true; + spin_unlock_irqrestore(&drvdata->kbd_backlight->lock, flags); + cancel_work_sync(&drvdata->kbd_backlight->work); } diff --git a/drivers/hid/hid-betopff.c b/drivers/hid/hid-betopff.c index 467d789f9bc2d3a0d846c661b618d1b640a63188..25ed7b9a917e4f2e1b1828caf039d691b0a07fca 100644 --- a/drivers/hid/hid-betopff.c +++ b/drivers/hid/hid-betopff.c @@ -60,7 +60,6 @@ static int betopff_init(struct hid_device *hid) struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct input_dev *dev; - int field_count = 0; int error; int i, j; @@ -86,19 +85,21 @@ static int betopff_init(struct hid_device *hid) * ----------------------------------------- * Do init them with default value. */ + if (report->maxfield < 4) { + hid_err(hid, "not enough fields in the report: %d\n", + report->maxfield); + return -ENODEV; + } for (i = 0; i < report->maxfield; i++) { + if (report->field[i]->report_count < 1) { + hid_err(hid, "no values in the field\n"); + return -ENODEV; + } for (j = 0; j < report->field[i]->report_count; j++) { report->field[i]->value[j] = 0x00; - field_count++; } } - if (field_count < 4) { - hid_err(hid, "not enough fields in the report: %d\n", - field_count); - return -ENODEV; - } - betopff = kzalloc(sizeof(*betopff), GFP_KERNEL); if (!betopff) return -ENOMEM; diff --git a/drivers/hid/hid-bigbenff.c b/drivers/hid/hid-bigbenff.c index e8b16665860d6b04609d814f89bc048ec45274c8..a02cb517b4c47a28b74df9dc25ecfd0f238a8644 100644 --- a/drivers/hid/hid-bigbenff.c +++ b/drivers/hid/hid-bigbenff.c @@ -174,6 +174,7 @@ static __u8 pid0902_rdesc_fixed[] = { struct bigben_device { struct hid_device *hid; struct hid_report *report; + spinlock_t lock; bool removed; u8 led_state; /* LED1 = 1 .. LED4 = 8 */ u8 right_motor_on; /* right motor off/on 0/1 */ @@ -184,18 +185,39 @@ struct bigben_device { struct work_struct worker; }; +static inline void bigben_schedule_work(struct bigben_device *bigben) +{ + unsigned long flags; + + spin_lock_irqsave(&bigben->lock, flags); + if (!bigben->removed) + schedule_work(&bigben->worker); + spin_unlock_irqrestore(&bigben->lock, flags); +} static void bigben_worker(struct work_struct *work) { struct bigben_device *bigben = container_of(work, struct bigben_device, worker); struct hid_field *report_field = bigben->report->field[0]; - - if (bigben->removed || !report_field) + bool do_work_led = false; + bool do_work_ff = false; + u8 *buf; + u32 len; + unsigned long flags; + + buf = hid_alloc_report_buf(bigben->report, GFP_KERNEL); + if (!buf) return; + len = hid_report_len(bigben->report); + + /* LED work */ + spin_lock_irqsave(&bigben->lock, flags); + if (bigben->work_led) { bigben->work_led = false; + do_work_led = true; report_field->value[0] = 0x01; /* 1 = led message */ report_field->value[1] = 0x08; /* reserved value, always 8 */ report_field->value[2] = bigben->led_state; @@ -204,11 +226,22 @@ static void bigben_worker(struct work_struct *work) report_field->value[5] = 0x00; /* padding */ report_field->value[6] = 0x00; /* padding */ report_field->value[7] = 0x00; /* padding */ - hid_hw_request(bigben->hid, bigben->report, HID_REQ_SET_REPORT); + hid_output_report(bigben->report, buf); + } + + spin_unlock_irqrestore(&bigben->lock, flags); + + if (do_work_led) { + hid_hw_raw_request(bigben->hid, bigben->report->id, buf, len, + bigben->report->type, HID_REQ_SET_REPORT); } + /* FF work */ + spin_lock_irqsave(&bigben->lock, flags); + if (bigben->work_ff) { bigben->work_ff = false; + do_work_ff = true; report_field->value[0] = 0x02; /* 2 = rumble effect message */ report_field->value[1] = 0x08; /* reserved value, always 8 */ report_field->value[2] = bigben->right_motor_on; @@ -217,8 +250,17 @@ static void bigben_worker(struct work_struct *work) report_field->value[5] = 0x00; /* padding */ report_field->value[6] = 0x00; /* padding */ report_field->value[7] = 0x00; /* padding */ - hid_hw_request(bigben->hid, bigben->report, HID_REQ_SET_REPORT); + hid_output_report(bigben->report, buf); + } + + spin_unlock_irqrestore(&bigben->lock, flags); + + if (do_work_ff) { + hid_hw_raw_request(bigben->hid, bigben->report->id, buf, len, + bigben->report->type, HID_REQ_SET_REPORT); } + + kfree(buf); } static int hid_bigben_play_effect(struct input_dev *dev, void *data, @@ -228,6 +270,7 @@ static int hid_bigben_play_effect(struct input_dev *dev, void *data, struct bigben_device *bigben = hid_get_drvdata(hid); u8 right_motor_on; u8 left_motor_force; + unsigned long flags; if (!bigben) { hid_err(hid, "no device data\n"); @@ -242,10 +285,13 @@ static int hid_bigben_play_effect(struct input_dev *dev, void *data, if (right_motor_on != bigben->right_motor_on || left_motor_force != bigben->left_motor_force) { + spin_lock_irqsave(&bigben->lock, flags); bigben->right_motor_on = right_motor_on; bigben->left_motor_force = left_motor_force; bigben->work_ff = true; - schedule_work(&bigben->worker); + spin_unlock_irqrestore(&bigben->lock, flags); + + bigben_schedule_work(bigben); } return 0; @@ -259,6 +305,7 @@ static void bigben_set_led(struct led_classdev *led, struct bigben_device *bigben = hid_get_drvdata(hid); int n; bool work; + unsigned long flags; if (!bigben) { hid_err(hid, "no device data\n"); @@ -267,6 +314,7 @@ static void bigben_set_led(struct led_classdev *led, for (n = 0; n < NUM_LEDS; n++) { if (led == bigben->leds[n]) { + spin_lock_irqsave(&bigben->lock, flags); if (value == LED_OFF) { work = (bigben->led_state & BIT(n)); bigben->led_state &= ~BIT(n); @@ -274,10 +322,11 @@ static void bigben_set_led(struct led_classdev *led, work = !(bigben->led_state & BIT(n)); bigben->led_state |= BIT(n); } + spin_unlock_irqrestore(&bigben->lock, flags); if (work) { bigben->work_led = true; - schedule_work(&bigben->worker); + bigben_schedule_work(bigben); } return; } @@ -307,8 +356,12 @@ static enum led_brightness bigben_get_led(struct led_classdev *led) static void bigben_remove(struct hid_device *hid) { struct bigben_device *bigben = hid_get_drvdata(hid); + unsigned long flags; + spin_lock_irqsave(&bigben->lock, flags); bigben->removed = true; + spin_unlock_irqrestore(&bigben->lock, flags); + cancel_work_sync(&bigben->worker); hid_hw_stop(hid); } @@ -318,7 +371,6 @@ static int bigben_probe(struct hid_device *hid, { struct bigben_device *bigben; struct hid_input *hidinput; - struct list_head *report_list; struct led_classdev *led; char *name; size_t name_sz; @@ -343,14 +395,12 @@ static int bigben_probe(struct hid_device *hid, return error; } - report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; - if (list_empty(report_list)) { + bigben->report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 8); + if (!bigben->report) { hid_err(hid, "no output report found\n"); error = -ENODEV; goto error_hw_stop; } - bigben->report = list_entry(report_list->next, - struct hid_report, list); if (list_empty(&hid->inputs)) { hid_err(hid, "no inputs found\n"); @@ -362,6 +412,7 @@ static int bigben_probe(struct hid_device *hid, set_bit(FF_RUMBLE, hidinput->input->ffbit); INIT_WORK(&bigben->worker, bigben_worker); + spin_lock_init(&bigben->lock); error = input_ff_create_memless(hidinput->input, NULL, hid_bigben_play_effect); @@ -402,7 +453,7 @@ static int bigben_probe(struct hid_device *hid, bigben->left_motor_force = 0; bigben->work_led = true; bigben->work_ff = true; - schedule_work(&bigben->worker); + bigben_schedule_work(bigben); hid_info(hid, "LED and force feedback support for BigBen gamepad\n"); diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index baadead947c8b358c7dda8a6e945d7b1e3f3ce1d..5f9ec1d1464a28270b649f885afe404da194e5a4 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1197,6 +1197,7 @@ int hid_open_report(struct hid_device *device) __u8 *end; __u8 *next; int ret; + int i; static int (*dispatch_type[])(struct hid_parser *parser, struct hid_item *item) = { hid_parser_main, @@ -1247,6 +1248,8 @@ int hid_open_report(struct hid_device *device) goto err; } device->collection_size = HID_DEFAULT_NUM_COLLECTIONS; + for (i = 0; i < HID_DEFAULT_NUM_COLLECTIONS; i++) + device->collection[i].parent_idx = -1; ret = -EINVAL; while ((next = fetch_item(start, end, &item)) != NULL) { diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index f4e2e6937758952ed499e6adbaf89ed147221945..1f60a381ae63eb258e8d710e8220de4ddf6cd0a7 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -933,6 +933,7 @@ static const char *keys[KEY_MAX + 1] = { [KEY_VOICECOMMAND] = "VoiceCommand", [KEY_EMOJI_PICKER] = "EmojiPicker", [KEY_DICTATE] = "Dictate", + [KEY_MICMUTE] = "MicrophoneMute", [KEY_BRIGHTNESS_MIN] = "BrightnessMin", [KEY_BRIGHTNESS_MAX] = "BrightnessMax", [KEY_BRIGHTNESS_AUTO] = "BrightnessAuto", diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 09c3f30f10d3707c3d5ee76ea2e61a9df60a4424..1d1306a6004e6829b762701c2e3e11cffb4d8650 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -257,7 +257,6 @@ #define USB_DEVICE_ID_CH_AXIS_295 0x001c #define USB_VENDOR_ID_CHERRY 0x046a -#define USB_DEVICE_ID_CHERRY_MOUSE_000C 0x000c #define USB_DEVICE_ID_CHERRY_CYMOTION 0x0023 #define USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR 0x0027 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 75a4d8d6bb0fd34217e6731c109ebbb65731a0cc..3399953256d85e62ab0b5a4522429e13b0a9074a 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -675,6 +675,14 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel break; } + if ((usage->hid & 0xf0) == 0xa0) { /* SystemControl */ + switch (usage->hid & 0xf) { + case 0x9: map_key_clear(KEY_MICMUTE); break; + default: goto ignore; + } + break; + } + if ((usage->hid & 0xf0) == 0xb0) { /* SC - Display */ switch (usage->hid & 0xf) { case 0x05: map_key_clear(KEY_SWITCHVIDEOMODE); break; diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index 66b1051620390b6ccb84ecf21394e1ad1da5e742..f5ea8e1d844528692e76beb3e0fdda2be934baa1 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -3763,6 +3763,7 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) bool connected; unsigned int connect_mask = HID_CONNECT_DEFAULT; struct hidpp_ff_private_data data; + bool will_restart = false; /* report_fixup needs drvdata to be set before we call hid_parse */ hidpp = devm_kzalloc(&hdev->dev, sizeof(*hidpp), GFP_KERNEL); @@ -3818,6 +3819,10 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) return ret; } + if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT || + hidpp->quirks & HIDPP_QUIRK_UNIFYING) + will_restart = true; + INIT_WORK(&hidpp->work, delayed_work_cb); mutex_init(&hidpp->send_mutex); init_waitqueue_head(&hidpp->wait); @@ -3832,7 +3837,7 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) * Plain USB connections need to actually call start and open * on the transport driver to allow incoming data. */ - ret = hid_hw_start(hdev, 0); + ret = hid_hw_start(hdev, will_restart ? 0 : connect_mask); if (ret) { hid_err(hdev, "hw start failed\n"); goto hid_hw_start_fail; @@ -3869,6 +3874,7 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) hidpp->wireless_feature_index = 0; else if (ret) goto hid_hw_init_fail; + ret = 0; } if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)) { @@ -3883,19 +3889,21 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) hidpp_connect_event(hidpp); - /* Reset the HID node state */ - hid_device_io_stop(hdev); - hid_hw_close(hdev); - hid_hw_stop(hdev); + if (will_restart) { + /* Reset the HID node state */ + hid_device_io_stop(hdev); + hid_hw_close(hdev); + hid_hw_stop(hdev); - if (hidpp->quirks & HIDPP_QUIRK_NO_HIDINPUT) - connect_mask &= ~HID_CONNECT_HIDINPUT; + if (hidpp->quirks & HIDPP_QUIRK_NO_HIDINPUT) + connect_mask &= ~HID_CONNECT_HIDINPUT; - /* Now export the actual inputs and hidraw nodes to the world */ - ret = hid_hw_start(hdev, connect_mask); - if (ret) { - hid_err(hdev, "%s:hid_hw_start returned error\n", __func__); - goto hid_hw_start_fail; + /* Now export the actual inputs and hidraw nodes to the world */ + ret = hid_hw_start(hdev, connect_mask); + if (ret) { + hid_err(hdev, "%s:hid_hw_start returned error\n", __func__); + goto hid_hw_start_fail; + } } if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) { diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 1efde40e51364a1aace4c24beea14a18d47a480f..9f1fcbea19eb724bf93dc6ec0ff5e569fc6bc01c 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -54,7 +54,6 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_THROTTLE), HID_QUIRK_NOGET }, - { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_MOUSE_000C), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB), HID_QUIRK_NO_INIT_REPORTS }, diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index 42b84ebff057928c6602d2ba64b3337115488a30..eaae5de2ab6167cde4e49b3be0cede39c5b75205 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -550,66 +550,49 @@ static void coretemp_remove_core(struct platform_data *pdata, int indx) ida_free(&pdata->ida, indx - BASE_SYSFS_ATTR_NO); } -static int coretemp_probe(struct platform_device *pdev) +static int coretemp_device_add(int zoneid) { - struct device *dev = &pdev->dev; + struct platform_device *pdev; struct platform_data *pdata; + int err; /* Initialize the per-zone data structures */ - pdata = devm_kzalloc(dev, sizeof(struct platform_data), GFP_KERNEL); + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); if (!pdata) return -ENOMEM; - pdata->pkg_id = pdev->id; + pdata->pkg_id = zoneid; ida_init(&pdata->ida); - platform_set_drvdata(pdev, pdata); - pdata->hwmon_dev = devm_hwmon_device_register_with_groups(dev, DRVNAME, - pdata, NULL); - return PTR_ERR_OR_ZERO(pdata->hwmon_dev); -} - -static int coretemp_remove(struct platform_device *pdev) -{ - struct platform_data *pdata = platform_get_drvdata(pdev); - int i; + pdev = platform_device_alloc(DRVNAME, zoneid); + if (!pdev) { + err = -ENOMEM; + goto err_free_pdata; + } - for (i = MAX_CORE_DATA - 1; i >= 0; --i) - if (pdata->core_data[i]) - coretemp_remove_core(pdata, i); + err = platform_device_add(pdev); + if (err) + goto err_put_dev; - ida_destroy(&pdata->ida); + platform_set_drvdata(pdev, pdata); + zone_devices[zoneid] = pdev; return 0; -} -static struct platform_driver coretemp_driver = { - .driver = { - .name = DRVNAME, - }, - .probe = coretemp_probe, - .remove = coretemp_remove, -}; +err_put_dev: + platform_device_put(pdev); +err_free_pdata: + kfree(pdata); + return err; +} -static struct platform_device *coretemp_device_add(unsigned int cpu) +static void coretemp_device_remove(int zoneid) { - int err, zoneid = topology_logical_die_id(cpu); - struct platform_device *pdev; - - if (zoneid < 0) - return ERR_PTR(-ENOMEM); - - pdev = platform_device_alloc(DRVNAME, zoneid); - if (!pdev) - return ERR_PTR(-ENOMEM); - - err = platform_device_add(pdev); - if (err) { - platform_device_put(pdev); - return ERR_PTR(err); - } + struct platform_device *pdev = zone_devices[zoneid]; + struct platform_data *pdata = platform_get_drvdata(pdev); - zone_devices[zoneid] = pdev; - return pdev; + ida_destroy(&pdata->ida); + kfree(pdata); + platform_device_unregister(pdev); } static int coretemp_cpu_online(unsigned int cpu) @@ -633,7 +616,10 @@ static int coretemp_cpu_online(unsigned int cpu) if (!cpu_has(c, X86_FEATURE_DTHERM)) return -ENODEV; - if (!pdev) { + pdata = platform_get_drvdata(pdev); + if (!pdata->hwmon_dev) { + struct device *hwmon; + /* Check the microcode version of the CPU */ if (chk_ucode_version(cpu)) return -EINVAL; @@ -644,9 +630,11 @@ static int coretemp_cpu_online(unsigned int cpu) * online. So, initialize per-pkg data structures and * then bring this core online. */ - pdev = coretemp_device_add(cpu); - if (IS_ERR(pdev)) - return PTR_ERR(pdev); + hwmon = hwmon_device_register_with_groups(&pdev->dev, DRVNAME, + pdata, NULL); + if (IS_ERR(hwmon)) + return PTR_ERR(hwmon); + pdata->hwmon_dev = hwmon; /* * Check whether pkgtemp support is available. @@ -656,7 +644,6 @@ static int coretemp_cpu_online(unsigned int cpu) coretemp_add_core(pdev, cpu, 1); } - pdata = platform_get_drvdata(pdev); /* * Check whether a thread sibling is already online. If not add the * interface for this CPU core. @@ -675,18 +662,14 @@ static int coretemp_cpu_offline(unsigned int cpu) struct temp_data *tdata; int i, indx = -1, target; - /* - * Don't execute this on suspend as the device remove locks - * up the machine. - */ + /* No need to tear down any interfaces for suspend */ if (cpuhp_tasks_frozen) return 0; /* If the physical CPU device does not exist, just return */ - if (!pdev) - return 0; - pd = platform_get_drvdata(pdev); + if (!pd->hwmon_dev) + return 0; for (i = 0; i < NUM_REAL_CORES; i++) { if (pd->cpu_map[i] == topology_core_id(cpu)) { @@ -718,13 +701,14 @@ static int coretemp_cpu_offline(unsigned int cpu) } /* - * If all cores in this pkg are offline, remove the device. This - * will invoke the platform driver remove function, which cleans up - * the rest. + * If all cores in this pkg are offline, remove the interface. */ + tdata = pd->core_data[PKG_SYSFS_ATTR_NO]; if (cpumask_empty(&pd->cpumask)) { - zone_devices[topology_logical_die_id(cpu)] = NULL; - platform_device_unregister(pdev); + if (tdata) + coretemp_remove_core(pd, PKG_SYSFS_ATTR_NO); + hwmon_device_unregister(pd->hwmon_dev); + pd->hwmon_dev = NULL; return 0; } @@ -732,7 +716,6 @@ static int coretemp_cpu_offline(unsigned int cpu) * Check whether this core is the target for the package * interface. We need to assign it to some other cpu. */ - tdata = pd->core_data[PKG_SYSFS_ATTR_NO]; if (tdata && tdata->cpu == cpu) { target = cpumask_first(&pd->cpumask); mutex_lock(&tdata->update_lock); @@ -751,7 +734,7 @@ static enum cpuhp_state coretemp_hp_online; static int __init coretemp_init(void) { - int err; + int i, err; /* * CPUID.06H.EAX[0] indicates whether the CPU has thermal @@ -767,20 +750,22 @@ static int __init coretemp_init(void) if (!zone_devices) return -ENOMEM; - err = platform_driver_register(&coretemp_driver); - if (err) - goto outzone; + for (i = 0; i < max_zones; i++) { + err = coretemp_device_add(i); + if (err) + goto outzone; + } err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hwmon/coretemp:online", coretemp_cpu_online, coretemp_cpu_offline); if (err < 0) - goto outdrv; + goto outzone; coretemp_hp_online = err; return 0; -outdrv: - platform_driver_unregister(&coretemp_driver); outzone: + while (i--) + coretemp_device_remove(i); kfree(zone_devices); return err; } @@ -788,8 +773,11 @@ module_init(coretemp_init) static void __exit coretemp_exit(void) { + int i; + cpuhp_remove_state(coretemp_hp_online); - platform_driver_unregister(&coretemp_driver); + for (i = 0; i < max_zones; i++) + coretemp_device_remove(i); kfree(zone_devices); } module_exit(coretemp_exit) diff --git a/drivers/hwmon/ltc2945.c b/drivers/hwmon/ltc2945.c index ba9c868a8641e5c363ec121d47d3eb6f68b569d1..65d792f184255f5c78daea0481fc0d3859b38135 100644 --- a/drivers/hwmon/ltc2945.c +++ b/drivers/hwmon/ltc2945.c @@ -248,6 +248,8 @@ static ssize_t ltc2945_value_store(struct device *dev, /* convert to register value, then clamp and write result */ regval = ltc2945_val_to_reg(dev, reg, val); + if (regval < 0) + return regval; if (is_power_reg(reg)) { regval = clamp_val(regval, 0, 0xffffff); regbuf[0] = regval >> 16; diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c index bd8f5a3aaad9cad5bab7dd4fbd6847399ccdae8f..052c897a635d5fdf43600cf7ee97be442cab54c9 100644 --- a/drivers/hwmon/mlxreg-fan.c +++ b/drivers/hwmon/mlxreg-fan.c @@ -127,6 +127,12 @@ mlxreg_fan_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, if (err) return err; + if (MLXREG_FAN_GET_FAULT(regval, tacho->mask)) { + /* FAN is broken - return zero for FAN speed. */ + *val = 0; + return 0; + } + *val = MLXREG_FAN_GET_RPM(regval, fan->divider, fan->samples); break; diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c index 9468c6c89b3f529e085ef2240ceddc2b8324c9da..682fffaab2b40fd7c32909172d4a3f33e7cd5ccd 100644 --- a/drivers/i2c/busses/i2c-designware-common.c +++ b/drivers/i2c/busses/i2c-designware-common.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "i2c-designware-core.h" @@ -347,7 +348,8 @@ u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset) * * If your hardware is free from tHD;STA issue, try this one. */ - return (ic_clk * tSYMBOL + 500000) / 1000000 - 8 + offset; + return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * tSYMBOL, MICRO) - + 8 + offset; else /* * Conditional expression: @@ -363,8 +365,8 @@ u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset) * The reason why we need to take into account "tf" here, * is the same as described in i2c_dw_scl_lcnt(). */ - return (ic_clk * (tSYMBOL + tf) + 500000) / 1000000 - - 3 + offset; + return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * (tSYMBOL + tf), MICRO) - + 3 + offset; } u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset) @@ -380,7 +382,8 @@ u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset) * account the fall time of SCL signal (tf). Default tf value * should be 0.3 us, for safety. */ - return ((ic_clk * (tLOW + tf) + 500000) / 1000000) - 1 + offset; + return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * (tLOW + tf), MICRO) - + 1 + offset; } int i2c_dw_set_sda_hold(struct dw_i2c_dev *dev) diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index ad91c7c0faa5497b1b6379b08c1342c3dbed5fc4..474754151725269ecd4a1e4de453e95067b3afc5 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -32,12 +32,13 @@ #include #include #include +#include #include "i2c-designware-core.h" static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev) { - return clk_get_rate(dev->clk)/1000; + return clk_get_rate(dev->clk) / KILO; } #ifdef CONFIG_ACPI @@ -284,7 +285,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) if (!dev->sda_hold_time && t->sda_hold_ns) dev->sda_hold_time = - div_u64(clk_khz * t->sda_hold_ns + 500000, 1000000); + DIV_S64_ROUND_CLOSEST(clk_khz * t->sda_hold_ns, MICRO); } adap = &dev->adapter; diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c index c4b08a9244614d00bb3d5edb4049beaebd1e8646..abad24808e855b8061df5a7ce4b09f6a98dc1b79 100644 --- a/drivers/i2c/busses/i2c-mxs.c +++ b/drivers/i2c/busses/i2c-mxs.c @@ -842,8 +842,8 @@ static int mxs_i2c_probe(struct platform_device *pdev) /* Setup the DMA */ i2c->dmach = dma_request_chan(dev, "rx-tx"); if (IS_ERR(i2c->dmach)) { - dev_err(dev, "Failed to request dma\n"); - return PTR_ERR(i2c->dmach); + return dev_err_probe(dev, PTR_ERR(i2c->dmach), + "Failed to request dma\n"); } platform_set_drvdata(pdev, i2c); diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c index 02ddb237f69afdfcc766f1703e9b81297882534a..13c14eb175e94740c298727bb8779ae7e6a75b84 100644 --- a/drivers/i2c/busses/i2c-rk3x.c +++ b/drivers/i2c/busses/i2c-rk3x.c @@ -80,7 +80,7 @@ enum { #define DEFAULT_SCL_RATE (100 * 1000) /* Hz */ /** - * struct i2c_spec_values: + * struct i2c_spec_values - I2C specification values for various modes * @min_hold_start_ns: min hold time (repeated) START condition * @min_low_ns: min LOW period of the SCL clock * @min_high_ns: min HIGH period of the SCL cloc @@ -136,7 +136,7 @@ static const struct i2c_spec_values fast_mode_plus_spec = { }; /** - * struct rk3x_i2c_calced_timings: + * struct rk3x_i2c_calced_timings - calculated V1 timings * @div_low: Divider output for low * @div_high: Divider output for high * @tuning: Used to adjust setup/hold data time, @@ -159,7 +159,7 @@ enum rk3x_i2c_state { }; /** - * struct rk3x_i2c_soc_data: + * struct rk3x_i2c_soc_data - SOC-specific data * @grf_offset: offset inside the grf regmap for setting the i2c type * @calc_timings: Callback function for i2c timing information calculated */ @@ -239,7 +239,8 @@ static inline void rk3x_i2c_clean_ipd(struct rk3x_i2c *i2c) } /** - * Generate a START condition, which triggers a REG_INT_START interrupt. + * rk3x_i2c_start - Generate a START condition, which triggers a REG_INT_START interrupt. + * @i2c: target controller data */ static void rk3x_i2c_start(struct rk3x_i2c *i2c) { @@ -258,8 +259,8 @@ static void rk3x_i2c_start(struct rk3x_i2c *i2c) } /** - * Generate a STOP condition, which triggers a REG_INT_STOP interrupt. - * + * rk3x_i2c_stop - Generate a STOP condition, which triggers a REG_INT_STOP interrupt. + * @i2c: target controller data * @error: Error code to return in rk3x_i2c_xfer */ static void rk3x_i2c_stop(struct rk3x_i2c *i2c, int error) @@ -298,7 +299,8 @@ static void rk3x_i2c_stop(struct rk3x_i2c *i2c, int error) } /** - * Setup a read according to i2c->msg + * rk3x_i2c_prepare_read - Setup a read according to i2c->msg + * @i2c: target controller data */ static void rk3x_i2c_prepare_read(struct rk3x_i2c *i2c) { @@ -329,7 +331,8 @@ static void rk3x_i2c_prepare_read(struct rk3x_i2c *i2c) } /** - * Fill the transmit buffer with data from i2c->msg + * rk3x_i2c_fill_transmit_buf - Fill the transmit buffer with data from i2c->msg + * @i2c: target controller data */ static void rk3x_i2c_fill_transmit_buf(struct rk3x_i2c *i2c) { @@ -532,11 +535,10 @@ static irqreturn_t rk3x_i2c_irq(int irqno, void *dev_id) } /** - * Get timing values of I2C specification - * + * rk3x_i2c_get_spec - Get timing values of I2C specification * @speed: Desired SCL frequency * - * Returns: Matched i2c spec values. + * Return: Matched i2c_spec_values. */ static const struct i2c_spec_values *rk3x_i2c_get_spec(unsigned int speed) { @@ -549,13 +551,12 @@ static const struct i2c_spec_values *rk3x_i2c_get_spec(unsigned int speed) } /** - * Calculate divider values for desired SCL frequency - * + * rk3x_i2c_v0_calc_timings - Calculate divider values for desired SCL frequency * @clk_rate: I2C input clock rate * @t: Known I2C timing information * @t_calc: Caculated rk3x private timings that would be written into regs * - * Returns: 0 on success, -EINVAL if the goal SCL rate is too slow. In that case + * Return: %0 on success, -%EINVAL if the goal SCL rate is too slow. In that case * a best-effort divider value is returned in divs. If the target rate is * too high, we silently use the highest possible rate. */ @@ -710,13 +711,12 @@ static int rk3x_i2c_v0_calc_timings(unsigned long clk_rate, } /** - * Calculate timing values for desired SCL frequency - * + * rk3x_i2c_v1_calc_timings - Calculate timing values for desired SCL frequency * @clk_rate: I2C input clock rate * @t: Known I2C timing information * @t_calc: Caculated rk3x private timings that would be written into regs * - * Returns: 0 on success, -EINVAL if the goal SCL rate is too slow. In that case + * Return: %0 on success, -%EINVAL if the goal SCL rate is too slow. In that case * a best-effort divider value is returned in divs. If the target rate is * too high, we silently use the highest possible rate. * The following formulas are v1's method to calculate timings. @@ -960,14 +960,14 @@ static int rk3x_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long } /** - * Setup I2C registers for an I2C operation specified by msgs, num. - * - * Must be called with i2c->lock held. - * + * rk3x_i2c_setup - Setup I2C registers for an I2C operation specified by msgs, num. + * @i2c: target controller data * @msgs: I2C msgs to process * @num: Number of msgs * - * returns: Number of I2C msgs processed or negative in case of error + * Must be called with i2c->lock held. + * + * Return: Number of I2C msgs processed or negative in case of error */ static int rk3x_i2c_setup(struct rk3x_i2c *i2c, struct i2c_msg *msgs, int num) { diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c index f05840d17fb71fe220faf38520c7855415455c28..8d929a4f9110fa9f0436ebda39e53561edb50ee7 100644 --- a/drivers/iio/accel/hid-sensor-accel-3d.c +++ b/drivers/iio/accel/hid-sensor-accel-3d.c @@ -277,6 +277,7 @@ static int accel_3d_capture_sample(struct hid_sensor_hub_device *hsdev, hid_sensor_convert_timestamp( &accel_state->common_attributes, *(int64_t *)raw_data); + ret = 0; break; default: break; diff --git a/drivers/iio/accel/mma9551_core.c b/drivers/iio/accel/mma9551_core.c index 666e7a04a7d7aefef595a35e045f7d46031fa0ed..9bb5c2fea08cf9b285d2881f6ecd99820c422838 100644 --- a/drivers/iio/accel/mma9551_core.c +++ b/drivers/iio/accel/mma9551_core.c @@ -296,9 +296,12 @@ int mma9551_read_config_word(struct i2c_client *client, u8 app_id, ret = mma9551_transfer(client, app_id, MMA9551_CMD_READ_CONFIG, reg, NULL, 0, (u8 *)&v, 2); + if (ret < 0) + return ret; + *val = be16_to_cpu(v); - return ret; + return 0; } EXPORT_SYMBOL(mma9551_read_config_word); @@ -354,9 +357,12 @@ int mma9551_read_status_word(struct i2c_client *client, u8 app_id, ret = mma9551_transfer(client, app_id, MMA9551_CMD_READ_STATUS, reg, NULL, 0, (u8 *)&v, 2); + if (ret < 0) + return ret; + *val = be16_to_cpu(v); - return ret; + return 0; } EXPORT_SYMBOL(mma9551_read_status_word); diff --git a/drivers/iio/adc/berlin2-adc.c b/drivers/iio/adc/berlin2-adc.c index 8b04b95b7b7ae033e8e3f1be1350b4dd4f793b4a..fa2c87946e16fb07b5058cece55586efe9ca9c33 100644 --- a/drivers/iio/adc/berlin2-adc.c +++ b/drivers/iio/adc/berlin2-adc.c @@ -289,8 +289,10 @@ static int berlin2_adc_probe(struct platform_device *pdev) int ret; indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*priv)); - if (!indio_dev) + if (!indio_dev) { + of_node_put(parent_np); return -ENOMEM; + } priv = iio_priv(indio_dev); platform_set_drvdata(pdev, indio_dev); diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c index 9234f14167b7a2fb33635eb7f2802b8bbfa711fd..171d73efb2f886740cd92e09021ba2aab6743d1b 100644 --- a/drivers/iio/adc/stm32-dfsdm-adc.c +++ b/drivers/iio/adc/stm32-dfsdm-adc.c @@ -1521,6 +1521,7 @@ static const struct of_device_id stm32_dfsdm_adc_match[] = { }, {} }; +MODULE_DEVICE_TABLE(of, stm32_dfsdm_adc_match); static int stm32_dfsdm_adc_probe(struct platform_device *pdev) { diff --git a/drivers/iio/adc/twl6030-gpadc.c b/drivers/iio/adc/twl6030-gpadc.c index 256177b15c511de57c9e93b31a9e690d09bb7780..024bdc1ef77e621ebe88b5c25ae0784a76954319 100644 --- a/drivers/iio/adc/twl6030-gpadc.c +++ b/drivers/iio/adc/twl6030-gpadc.c @@ -57,6 +57,18 @@ #define TWL6030_GPADCS BIT(1) #define TWL6030_GPADCR BIT(0) +#define USB_VBUS_CTRL_SET 0x04 +#define USB_ID_CTRL_SET 0x06 + +#define TWL6030_MISC1 0xE4 +#define VBUS_MEAS 0x01 +#define ID_MEAS 0x01 + +#define VAC_MEAS 0x04 +#define VBAT_MEAS 0x02 +#define BB_MEAS 0x01 + + /** * struct twl6030_chnl_calib - channel calibration * @gain: slope coefficient for ideal curve @@ -927,6 +939,26 @@ static int twl6030_gpadc_probe(struct platform_device *pdev) return ret; } + ret = twl_i2c_write_u8(TWL_MODULE_USB, VBUS_MEAS, USB_VBUS_CTRL_SET); + if (ret < 0) { + dev_err(dev, "failed to wire up inputs\n"); + return ret; + } + + ret = twl_i2c_write_u8(TWL_MODULE_USB, ID_MEAS, USB_ID_CTRL_SET); + if (ret < 0) { + dev_err(dev, "failed to wire up inputs\n"); + return ret; + } + + ret = twl_i2c_write_u8(TWL6030_MODULE_ID0, + VBAT_MEAS | BB_MEAS | VAC_MEAS, + TWL6030_MISC1); + if (ret < 0) { + dev_err(dev, "failed to wire up inputs\n"); + return ret; + } + indio_dev->name = DRIVER_NAME; indio_dev->info = &twl6030_gpadc_iio_info; indio_dev->modes = INDIO_DIRECT_MODE; diff --git a/drivers/iio/imu/fxos8700_core.c b/drivers/iio/imu/fxos8700_core.c index ab288186f36e4575ac9b68467bf4d59fab606387..04d3778fcc15308e1647810f941640f34679eece 100644 --- a/drivers/iio/imu/fxos8700_core.c +++ b/drivers/iio/imu/fxos8700_core.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -144,9 +145,8 @@ #define FXOS8700_NVM_DATA_BNK0 0xa7 /* Bit definitions for FXOS8700_CTRL_REG1 */ -#define FXOS8700_CTRL_ODR_MSK 0x38 #define FXOS8700_CTRL_ODR_MAX 0x00 -#define FXOS8700_CTRL_ODR_MIN GENMASK(4, 3) +#define FXOS8700_CTRL_ODR_MSK GENMASK(5, 3) /* Bit definitions for FXOS8700_M_CTRL_REG1 */ #define FXOS8700_HMS_MASK GENMASK(1, 0) @@ -320,7 +320,7 @@ static enum fxos8700_sensor fxos8700_to_sensor(enum iio_chan_type iio_type) switch (iio_type) { case IIO_ACCEL: return FXOS8700_ACCEL; - case IIO_ANGL_VEL: + case IIO_MAGN: return FXOS8700_MAGN; default: return -EINVAL; @@ -345,15 +345,35 @@ static int fxos8700_set_active_mode(struct fxos8700_data *data, static int fxos8700_set_scale(struct fxos8700_data *data, enum fxos8700_sensor t, int uscale) { - int i; + int i, ret, val; + bool active_mode; static const int scale_num = ARRAY_SIZE(fxos8700_accel_scale); struct device *dev = regmap_get_device(data->regmap); if (t == FXOS8700_MAGN) { - dev_err(dev, "Magnetometer scale is locked at 1200uT\n"); + dev_err(dev, "Magnetometer scale is locked at 0.001Gs\n"); return -EINVAL; } + /* + * When device is in active mode, it failed to set an ACCEL + * full-scale range(2g/4g/8g) in FXOS8700_XYZ_DATA_CFG. + * This is not align with the datasheet, but it is a fxos8700 + * chip behavier. Set the device in standby mode before setting + * an ACCEL full-scale range. + */ + ret = regmap_read(data->regmap, FXOS8700_CTRL_REG1, &val); + if (ret) + return ret; + + active_mode = val & FXOS8700_ACTIVE; + if (active_mode) { + ret = regmap_write(data->regmap, FXOS8700_CTRL_REG1, + val & ~FXOS8700_ACTIVE); + if (ret) + return ret; + } + for (i = 0; i < scale_num; i++) if (fxos8700_accel_scale[i].uscale == uscale) break; @@ -361,8 +381,12 @@ static int fxos8700_set_scale(struct fxos8700_data *data, if (i == scale_num) return -EINVAL; - return regmap_write(data->regmap, FXOS8700_XYZ_DATA_CFG, + ret = regmap_write(data->regmap, FXOS8700_XYZ_DATA_CFG, fxos8700_accel_scale[i].bits); + if (ret) + return ret; + return regmap_write(data->regmap, FXOS8700_CTRL_REG1, + active_mode); } static int fxos8700_get_scale(struct fxos8700_data *data, @@ -372,7 +396,7 @@ static int fxos8700_get_scale(struct fxos8700_data *data, static const int scale_num = ARRAY_SIZE(fxos8700_accel_scale); if (t == FXOS8700_MAGN) { - *uscale = 1200; /* Magnetometer is locked at 1200uT */ + *uscale = 1000; /* Magnetometer is locked at 0.001Gs */ return 0; } @@ -394,22 +418,61 @@ static int fxos8700_get_data(struct fxos8700_data *data, int chan_type, int axis, int *val) { u8 base, reg; + s16 tmp; int ret; - enum fxos8700_sensor type = fxos8700_to_sensor(chan_type); - base = type ? FXOS8700_OUT_X_MSB : FXOS8700_M_OUT_X_MSB; + /* + * Different register base addresses varies with channel types. + * This bug hasn't been noticed before because using an enum is + * really hard to read. Use an a switch statement to take over that. + */ + switch (chan_type) { + case IIO_ACCEL: + base = FXOS8700_OUT_X_MSB; + break; + case IIO_MAGN: + base = FXOS8700_M_OUT_X_MSB; + break; + default: + return -EINVAL; + } /* Block read 6 bytes of device output registers to avoid data loss */ ret = regmap_bulk_read(data->regmap, base, data->buf, - FXOS8700_DATA_BUF_SIZE); + sizeof(data->buf)); if (ret) return ret; /* Convert axis to buffer index */ reg = axis - IIO_MOD_X; + /* + * Convert to native endianness. The accel data and magn data + * are signed, so a forced type conversion is needed. + */ + tmp = be16_to_cpu(data->buf[reg]); + + /* + * ACCEL output data registers contain the X-axis, Y-axis, and Z-axis + * 14-bit left-justified sample data and MAGN output data registers + * contain the X-axis, Y-axis, and Z-axis 16-bit sample data. Apply + * a signed 2 bits right shift to the readback raw data from ACCEL + * output data register and keep that from MAGN sensor as the origin. + * Value should be extended to 32 bit. + */ + switch (chan_type) { + case IIO_ACCEL: + tmp = tmp >> 2; + break; + case IIO_MAGN: + /* Nothing to do */ + break; + default: + return -EINVAL; + } + /* Convert to native endianness */ - *val = sign_extend32(be16_to_cpu(data->buf[reg]), 15); + *val = sign_extend32(tmp, 15); return 0; } @@ -445,10 +508,9 @@ static int fxos8700_set_odr(struct fxos8700_data *data, enum fxos8700_sensor t, if (i >= odr_num) return -EINVAL; - return regmap_update_bits(data->regmap, - FXOS8700_CTRL_REG1, - FXOS8700_CTRL_ODR_MSK + FXOS8700_ACTIVE, - fxos8700_odr[i].bits << 3 | active_mode); + val &= ~FXOS8700_CTRL_ODR_MSK; + val |= FIELD_PREP(FXOS8700_CTRL_ODR_MSK, fxos8700_odr[i].bits) | FXOS8700_ACTIVE; + return regmap_write(data->regmap, FXOS8700_CTRL_REG1, val); } static int fxos8700_get_odr(struct fxos8700_data *data, enum fxos8700_sensor t, @@ -461,7 +523,7 @@ static int fxos8700_get_odr(struct fxos8700_data *data, enum fxos8700_sensor t, if (ret) return ret; - val &= FXOS8700_CTRL_ODR_MSK; + val = FIELD_GET(FXOS8700_CTRL_ODR_MSK, val); for (i = 0; i < odr_num; i++) if (val == fxos8700_odr[i].bits) @@ -526,7 +588,7 @@ static IIO_CONST_ATTR(in_accel_sampling_frequency_available, static IIO_CONST_ATTR(in_magn_sampling_frequency_available, "1.5625 6.25 12.5 50 100 200 400 800"); static IIO_CONST_ATTR(in_accel_scale_available, "0.000244 0.000488 0.000976"); -static IIO_CONST_ATTR(in_magn_scale_available, "0.000001200"); +static IIO_CONST_ATTR(in_magn_scale_available, "0.001000"); static struct attribute *fxos8700_attrs[] = { &iio_const_attr_in_accel_sampling_frequency_available.dev_attr.attr, @@ -592,14 +654,19 @@ static int fxos8700_chip_init(struct fxos8700_data *data, bool use_spi) if (ret) return ret; - /* Max ODR (800Hz individual or 400Hz hybrid), active mode */ - ret = regmap_write(data->regmap, FXOS8700_CTRL_REG1, - FXOS8700_CTRL_ODR_MAX | FXOS8700_ACTIVE); + /* + * Set max full-scale range (+/-8G) for ACCEL sensor in chip + * initialization then activate the device. + */ + ret = regmap_write(data->regmap, FXOS8700_XYZ_DATA_CFG, MODE_8G); if (ret) return ret; - /* Set for max full-scale range (+/-8G) */ - return regmap_write(data->regmap, FXOS8700_XYZ_DATA_CFG, MODE_8G); + /* Max ODR (800Hz individual or 400Hz hybrid), active mode */ + return regmap_update_bits(data->regmap, FXOS8700_CTRL_REG1, + FXOS8700_CTRL_ODR_MSK | FXOS8700_ACTIVE, + FIELD_PREP(FXOS8700_CTRL_ODR_MSK, FXOS8700_CTRL_ODR_MAX) | + FXOS8700_ACTIVE); } static void fxos8700_chip_uninit(void *data) diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 5889639e90a1c246eaa75612cf4d13d2e4222bd7..5123be0ab02f547d800fd4d76675713661f4ec1a 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -2911,15 +2911,18 @@ EXPORT_SYMBOL(__rdma_block_iter_start); bool __rdma_block_iter_next(struct ib_block_iter *biter) { unsigned int block_offset; + unsigned int sg_delta; if (!biter->__sg_nents || !biter->__sg) return false; biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance; block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1); - biter->__sg_advance += BIT_ULL(biter->__pg_bit) - block_offset; + sg_delta = BIT_ULL(biter->__pg_bit) - block_offset; - if (biter->__sg_advance >= sg_dma_len(biter->__sg)) { + if (sg_dma_len(biter->__sg) - biter->__sg_advance > sg_delta) { + biter->__sg_advance += sg_delta; + } else { biter->__sg_advance = 0; biter->__sg = sg_next(biter->__sg); biter->__sg_nents--; diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 88476a1a601a4f8e6e701ba59c8a316db92da148..4b41f35668b2027ce841577ea10e768dcf2ef1e2 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -1097,7 +1097,7 @@ static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr); static void handle_temp_err(struct hfi1_devdata *dd); static void dc_shutdown(struct hfi1_devdata *dd); static void dc_start(struct hfi1_devdata *dd); -static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp, +static int qos_rmt_entries(unsigned int n_krcv_queues, unsigned int *mp, unsigned int *np); static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd); static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms); @@ -13403,7 +13403,6 @@ static int set_up_context_variables(struct hfi1_devdata *dd) int ret; unsigned ngroups; int rmt_count; - int user_rmt_reduced; u32 n_usr_ctxts; u32 send_contexts = chip_send_contexts(dd); u32 rcv_contexts = chip_rcv_contexts(dd); @@ -13462,28 +13461,34 @@ static int set_up_context_variables(struct hfi1_devdata *dd) (num_kernel_contexts + n_usr_ctxts), &node_affinity.real_cpu_mask); /* - * The RMT entries are currently allocated as shown below: - * 1. QOS (0 to 128 entries); - * 2. FECN (num_kernel_context - 1 + num_user_contexts + - * num_netdev_contexts); - * 3. netdev (num_netdev_contexts). - * It should be noted that FECN oversubscribe num_netdev_contexts - * entries of RMT because both netdev and PSM could allocate any receive - * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts, - * and PSM FECN must reserve an RMT entry for each possible PSM receive - * context. + * RMT entries are allocated as follows: + * 1. QOS (0 to 128 entries) + * 2. FECN (num_kernel_context - 1 [a] + num_user_contexts + + * num_netdev_contexts [b]) + * 3. netdev (NUM_NETDEV_MAP_ENTRIES) + * + * Notes: + * [a] Kernel contexts (except control) are included in FECN if kernel + * TID_RDMA is active. + * [b] Netdev and user contexts are randomly allocated from the same + * context pool, so FECN must cover all contexts in the pool. */ - rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_netdev_contexts * 2); - if (HFI1_CAP_IS_KSET(TID_RDMA)) - rmt_count += num_kernel_contexts - 1; - if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) { - user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count; - dd_dev_err(dd, - "RMT size is reducing the number of user receive contexts from %u to %d\n", - n_usr_ctxts, - user_rmt_reduced); - /* recalculate */ - n_usr_ctxts = user_rmt_reduced; + rmt_count = qos_rmt_entries(num_kernel_contexts - 1, NULL, NULL) + + (HFI1_CAP_IS_KSET(TID_RDMA) ? num_kernel_contexts - 1 + : 0) + + n_usr_ctxts + + num_netdev_contexts + + NUM_NETDEV_MAP_ENTRIES; + if (rmt_count > NUM_MAP_ENTRIES) { + int over = rmt_count - NUM_MAP_ENTRIES; + /* try to squish user contexts, minimum of 1 */ + if (over >= n_usr_ctxts) { + dd_dev_err(dd, "RMT overflow: reduce the requested number of contexts\n"); + return -EINVAL; + } + dd_dev_err(dd, "RMT overflow: reducing # user contexts from %u to %u\n", + n_usr_ctxts, n_usr_ctxts - over); + n_usr_ctxts -= over; } /* the first N are kernel contexts, the rest are user/netdev contexts */ @@ -14340,15 +14345,15 @@ static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index) } /* return the number of RSM map table entries that will be used for QOS */ -static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp, +static int qos_rmt_entries(unsigned int n_krcv_queues, unsigned int *mp, unsigned int *np) { int i; unsigned int m, n; - u8 max_by_vl = 0; + uint max_by_vl = 0; /* is QOS active at all? */ - if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS || + if (n_krcv_queues < MIN_KERNEL_KCTXTS || num_vls == 1 || krcvqsset <= 1) goto no_qos; @@ -14406,7 +14411,7 @@ static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt) if (!rmt) goto bail; - rmt_entries = qos_rmt_entries(dd, &m, &n); + rmt_entries = qos_rmt_entries(dd->n_krcv_queues - 1, &m, &n); if (rmt_entries == 0) goto bail; qpns_per_vl = 1 << m; diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index d84b1098762c10c2fb564136d2c8d50767e4338b..7828fb59312031122fa6dad262cee65e5ccf60c0 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c @@ -1359,12 +1359,15 @@ static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg, addr = arg + offsetof(struct hfi1_tid_info, tidcnt); if (copy_to_user((void __user *)addr, &tinfo.tidcnt, sizeof(tinfo.tidcnt))) - return -EFAULT; + ret = -EFAULT; addr = arg + offsetof(struct hfi1_tid_info, length); - if (copy_to_user((void __user *)addr, &tinfo.length, + if (!ret && copy_to_user((void __user *)addr, &tinfo.length, sizeof(tinfo.length))) ret = -EFAULT; + + if (ret) + hfi1_user_exp_rcv_invalid(fd, &tinfo); } return ret; diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c index b94fc7fd75a9618739e23b6cab14490d8392e0cb..0e0be6c62e3d1c76d6b3a30901272aa326dd6190 100644 --- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c +++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c @@ -65,18 +65,25 @@ static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata, static bool tid_rb_invalidate(struct mmu_interval_notifier *mni, const struct mmu_notifier_range *range, unsigned long cur_seq); +static bool tid_cover_invalidate(struct mmu_interval_notifier *mni, + const struct mmu_notifier_range *range, + unsigned long cur_seq); static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *, struct tid_group *grp, unsigned int start, u16 count, u32 *tidlist, unsigned int *tididx, unsigned int *pmapped); -static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo, - struct tid_group **grp); +static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo); +static void __clear_tid_node(struct hfi1_filedata *fd, + struct tid_rb_node *node); static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node); static const struct mmu_interval_notifier_ops tid_mn_ops = { .invalidate = tid_rb_invalidate, }; +static const struct mmu_interval_notifier_ops tid_cover_ops = { + .invalidate = tid_cover_invalidate, +}; /* * Initialize context and file private data needed for Expected @@ -195,16 +202,11 @@ static void unpin_rcv_pages(struct hfi1_filedata *fd, static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf) { int pinned; - unsigned int npages; + unsigned int npages = tidbuf->npages; unsigned long vaddr = tidbuf->vaddr; struct page **pages = NULL; struct hfi1_devdata *dd = fd->uctxt->dd; - /* Get the number of pages the user buffer spans */ - npages = num_user_pages(vaddr, tidbuf->length); - if (!npages) - return -EINVAL; - if (npages > fd->uctxt->expected_count) { dd_dev_err(dd, "Expected buffer too big\n"); return -EINVAL; @@ -231,7 +233,6 @@ static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf) return pinned; } tidbuf->pages = pages; - tidbuf->npages = npages; fd->tid_n_pinned += pinned; return pinned; } @@ -295,53 +296,66 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd, tididx = 0, mapped, mapped_pages = 0; u32 *tidlist = NULL; struct tid_user_buf *tidbuf; + unsigned long mmu_seq = 0; if (!PAGE_ALIGNED(tinfo->vaddr)) return -EINVAL; + if (tinfo->length == 0) + return -EINVAL; tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL); if (!tidbuf) return -ENOMEM; + mutex_init(&tidbuf->cover_mutex); tidbuf->vaddr = tinfo->vaddr; tidbuf->length = tinfo->length; + tidbuf->npages = num_user_pages(tidbuf->vaddr, tidbuf->length); tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets), GFP_KERNEL); if (!tidbuf->psets) { - kfree(tidbuf); - return -ENOMEM; + ret = -ENOMEM; + goto fail_release_mem; + } + + if (fd->use_mn) { + ret = mmu_interval_notifier_insert( + &tidbuf->notifier, current->mm, + tidbuf->vaddr, tidbuf->npages * PAGE_SIZE, + &tid_cover_ops); + if (ret) + goto fail_release_mem; + mmu_seq = mmu_interval_read_begin(&tidbuf->notifier); } pinned = pin_rcv_pages(fd, tidbuf); if (pinned <= 0) { - kfree(tidbuf->psets); - kfree(tidbuf); - return pinned; + ret = (pinned < 0) ? pinned : -ENOSPC; + goto fail_unpin; } /* Find sets of physically contiguous pages */ tidbuf->n_psets = find_phys_blocks(tidbuf, pinned); - /* - * We don't need to access this under a lock since tid_used is per - * process and the same process cannot be in hfi1_user_exp_rcv_clear() - * and hfi1_user_exp_rcv_setup() at the same time. - */ + /* Reserve the number of expected tids to be used. */ spin_lock(&fd->tid_lock); if (fd->tid_used + tidbuf->n_psets > fd->tid_limit) pageset_count = fd->tid_limit - fd->tid_used; else pageset_count = tidbuf->n_psets; + fd->tid_used += pageset_count; spin_unlock(&fd->tid_lock); - if (!pageset_count) - goto bail; + if (!pageset_count) { + ret = -ENOSPC; + goto fail_unreserve; + } ngroups = pageset_count / dd->rcv_entries.group_size; tidlist = kcalloc(pageset_count, sizeof(*tidlist), GFP_KERNEL); if (!tidlist) { ret = -ENOMEM; - goto nomem; + goto fail_unreserve; } tididx = 0; @@ -437,43 +451,78 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd, } unlock: mutex_unlock(&uctxt->exp_mutex); -nomem: hfi1_cdbg(TID, "total mapped: tidpairs:%u pages:%u (%d)", tididx, mapped_pages, ret); - if (tididx) { - spin_lock(&fd->tid_lock); - fd->tid_used += tididx; - spin_unlock(&fd->tid_lock); - tinfo->tidcnt = tididx; - tinfo->length = mapped_pages * PAGE_SIZE; - - if (copy_to_user(u64_to_user_ptr(tinfo->tidlist), - tidlist, sizeof(tidlist[0]) * tididx)) { - /* - * On failure to copy to the user level, we need to undo - * everything done so far so we don't leak resources. - */ - tinfo->tidlist = (unsigned long)&tidlist; - hfi1_user_exp_rcv_clear(fd, tinfo); - tinfo->tidlist = 0; - ret = -EFAULT; - goto bail; + + /* fail if nothing was programmed, set error if none provided */ + if (tididx == 0) { + if (ret >= 0) + ret = -ENOSPC; + goto fail_unreserve; + } + + /* adjust reserved tid_used to actual count */ + spin_lock(&fd->tid_lock); + fd->tid_used -= pageset_count - tididx; + spin_unlock(&fd->tid_lock); + + /* unpin all pages not covered by a TID */ + unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages, pinned - mapped_pages, + false); + + if (fd->use_mn) { + /* check for an invalidate during setup */ + bool fail = false; + + mutex_lock(&tidbuf->cover_mutex); + fail = mmu_interval_read_retry(&tidbuf->notifier, mmu_seq); + mutex_unlock(&tidbuf->cover_mutex); + + if (fail) { + ret = -EBUSY; + goto fail_unprogram; } } - /* - * If not everything was mapped (due to insufficient RcvArray entries, - * for example), unpin all unmapped pages so we can pin them nex time. - */ - if (mapped_pages != pinned) - unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages, - (pinned - mapped_pages), false); -bail: + tinfo->tidcnt = tididx; + tinfo->length = mapped_pages * PAGE_SIZE; + + if (copy_to_user(u64_to_user_ptr(tinfo->tidlist), + tidlist, sizeof(tidlist[0]) * tididx)) { + ret = -EFAULT; + goto fail_unprogram; + } + + if (fd->use_mn) + mmu_interval_notifier_remove(&tidbuf->notifier); + kfree(tidbuf->pages); kfree(tidbuf->psets); + kfree(tidbuf); kfree(tidlist); + return 0; + +fail_unprogram: + /* unprogram, unmap, and unpin all allocated TIDs */ + tinfo->tidlist = (unsigned long)tidlist; + hfi1_user_exp_rcv_clear(fd, tinfo); + tinfo->tidlist = 0; + pinned = 0; /* nothing left to unpin */ + pageset_count = 0; /* nothing left reserved */ +fail_unreserve: + spin_lock(&fd->tid_lock); + fd->tid_used -= pageset_count; + spin_unlock(&fd->tid_lock); +fail_unpin: + if (fd->use_mn) + mmu_interval_notifier_remove(&tidbuf->notifier); + if (pinned > 0) + unpin_rcv_pages(fd, tidbuf, NULL, 0, pinned, false); +fail_release_mem: kfree(tidbuf->pages); + kfree(tidbuf->psets); kfree(tidbuf); - return ret > 0 ? 0 : ret; + kfree(tidlist); + return ret; } int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd, @@ -494,7 +543,7 @@ int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd, mutex_lock(&uctxt->exp_mutex); for (tididx = 0; tididx < tinfo->tidcnt; tididx++) { - ret = unprogram_rcvarray(fd, tidinfo[tididx], NULL); + ret = unprogram_rcvarray(fd, tidinfo[tididx]); if (ret) { hfi1_cdbg(TID, "Failed to unprogram rcv array %d", ret); @@ -750,6 +799,7 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd, } node->fdata = fd; + mutex_init(&node->invalidate_mutex); node->phys = page_to_phys(pages[0]); node->npages = npages; node->rcventry = rcventry; @@ -765,11 +815,6 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd, &tid_mn_ops); if (ret) goto out_unmap; - /* - * FIXME: This is in the wrong order, the notifier should be - * established before the pages are pinned by pin_rcv_pages. - */ - mmu_interval_read_begin(&node->notifier); } fd->entry_to_rb[node->rcventry - uctxt->expected_base] = node; @@ -789,8 +834,7 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd, return -EFAULT; } -static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo, - struct tid_group **grp) +static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo) { struct hfi1_ctxtdata *uctxt = fd->uctxt; struct hfi1_devdata *dd = uctxt->dd; @@ -813,9 +857,6 @@ static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo, if (!node || node->rcventry != (uctxt->expected_base + rcventry)) return -EBADF; - if (grp) - *grp = node->grp; - if (fd->use_mn) mmu_interval_notifier_remove(&node->notifier); cacheless_tid_rb_remove(fd, node); @@ -823,23 +864,34 @@ static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo, return 0; } -static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node) +static void __clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node) { struct hfi1_ctxtdata *uctxt = fd->uctxt; struct hfi1_devdata *dd = uctxt->dd; + mutex_lock(&node->invalidate_mutex); + if (node->freed) + goto done; + node->freed = true; + trace_hfi1_exp_tid_unreg(uctxt->ctxt, fd->subctxt, node->rcventry, node->npages, node->notifier.interval_tree.start, node->phys, node->dma_addr); - /* - * Make sure device has seen the write before we unpin the - * pages. - */ + /* Make sure device has seen the write before pages are unpinned */ hfi1_put_tid(dd, node->rcventry, PT_INVALID_FLUSH, 0, 0); unpin_rcv_pages(fd, NULL, node, 0, node->npages, true); +done: + mutex_unlock(&node->invalidate_mutex); +} + +static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node) +{ + struct hfi1_ctxtdata *uctxt = fd->uctxt; + + __clear_tid_node(fd, node); node->grp->used--; node->grp->map &= ~(1 << (node->rcventry - node->grp->base)); @@ -898,10 +950,16 @@ static bool tid_rb_invalidate(struct mmu_interval_notifier *mni, if (node->freed) return true; + /* take action only if unmapping */ + if (range->event != MMU_NOTIFY_UNMAP) + return true; + trace_hfi1_exp_tid_inval(uctxt->ctxt, fdata->subctxt, node->notifier.interval_tree.start, node->rcventry, node->npages, node->dma_addr); - node->freed = true; + + /* clear the hardware rcvarray entry */ + __clear_tid_node(fdata, node); spin_lock(&fdata->invalid_lock); if (fdata->invalid_tid_idx < uctxt->expected_count) { @@ -931,6 +989,23 @@ static bool tid_rb_invalidate(struct mmu_interval_notifier *mni, return true; } +static bool tid_cover_invalidate(struct mmu_interval_notifier *mni, + const struct mmu_notifier_range *range, + unsigned long cur_seq) +{ + struct tid_user_buf *tidbuf = + container_of(mni, struct tid_user_buf, notifier); + + /* take action only if unmapping */ + if (range->event == MMU_NOTIFY_UNMAP) { + mutex_lock(&tidbuf->cover_mutex); + mmu_interval_set_seq(mni, cur_seq); + mutex_unlock(&tidbuf->cover_mutex); + } + + return true; +} + static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata, struct tid_rb_node *tnode) { diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.h b/drivers/infiniband/hw/hfi1/user_exp_rcv.h index d45c7b6988d4d56ec284669d97bc2415361f199b..849f265f2f118b5148770c397e6d92d0fe4fb926 100644 --- a/drivers/infiniband/hw/hfi1/user_exp_rcv.h +++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.h @@ -57,6 +57,8 @@ struct tid_pageset { }; struct tid_user_buf { + struct mmu_interval_notifier notifier; + struct mutex cover_mutex; unsigned long vaddr; unsigned long length; unsigned int npages; @@ -68,6 +70,7 @@ struct tid_user_buf { struct tid_rb_node { struct mmu_interval_notifier notifier; struct hfi1_filedata *fdata; + struct mutex invalidate_mutex; /* covers hw removal */ unsigned long phys; struct tid_group *grp; u32 rcventry; diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c index 760b254ba42d6b7078eb7a9482cae3e92a26919f..48a57568cad69247838ead92ab914987871c6318 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom.c @@ -281,8 +281,8 @@ static int usnic_uiom_map_sorted_intervals(struct list_head *intervals, size = pa_end - pa_start + PAGE_SIZE; usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x", va_start, &pa_start, size, flags); - err = iommu_map(pd->domain, va_start, pa_start, - size, flags); + err = iommu_map_atomic(pd->domain, va_start, + pa_start, size, flags); if (err) { usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n", va_start, &pa_start, size, err); @@ -298,8 +298,8 @@ static int usnic_uiom_map_sorted_intervals(struct list_head *intervals, size = pa - pa_start + PAGE_SIZE; usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n", va_start, &pa_start, size, flags); - err = iommu_map(pd->domain, va_start, pa_start, - size, flags); + err = iommu_map_atomic(pd->domain, va_start, + pa_start, size, flags); if (err) { usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n", va_start, &pa_start, size, err); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index abfab89423f41cae29a2b288f4e6163cb157426c..35322d23fc340ee27958d2555019415e03979bd6 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -2188,6 +2188,14 @@ int ipoib_intf_init(struct ib_device *hca, u8 port, const char *name, rn->attach_mcast = ipoib_mcast_attach; rn->detach_mcast = ipoib_mcast_detach; rn->hca = hca; + + rc = netif_set_real_num_tx_queues(dev, 1); + if (rc) + goto out; + + rc = netif_set_real_num_rx_queues(dev, 1); + if (rc) + goto out; } priv->rn_ops = dev->netdev_ops; diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c index 5c39e4c4bef7f75facf428561d2818596bb03e5b..1a5805260778bd8817cd630d12ad15ac5c0c385b 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c @@ -902,7 +902,7 @@ static void rtrs_clt_init_req(struct rtrs_clt_io_req *req, req->need_inv_comp = false; req->inv_errno = 0; - iov_iter_kvec(&iter, READ, vec, 1, usr_len); + iov_iter_kvec(&iter, WRITE, vec, 1, usr_len); len = _copy_from_iter(req->iu->buf, usr_len, &iter); WARN_ON(len != usr_len); diff --git a/drivers/input/misc/iqs269a.c b/drivers/input/misc/iqs269a.c index a348247d3d38fa6ea3412b164a7d766698dbb3bd..8b30c911f7899afab0d78103d9e71a600cf5b1de 100644 --- a/drivers/input/misc/iqs269a.c +++ b/drivers/input/misc/iqs269a.c @@ -9,6 +9,7 @@ * axial sliders presented by the device. */ +#include #include #include #include @@ -96,8 +97,6 @@ #define IQS269_MISC_B_TRACKING_UI_ENABLE BIT(4) #define IQS269_MISC_B_FILT_STR_SLIDER GENMASK(1, 0) -#define IQS269_CHx_SETTINGS 0x8C - #define IQS269_CHx_ENG_A_MEAS_CAP_SIZE BIT(15) #define IQS269_CHx_ENG_A_RX_GND_INACTIVE BIT(13) #define IQS269_CHx_ENG_A_LOCAL_CAP_SIZE BIT(12) @@ -146,14 +145,7 @@ #define IQS269_NUM_CH 8 #define IQS269_NUM_SL 2 -#define IQS269_ATI_POLL_SLEEP_US (iqs269->delay_mult * 10000) -#define IQS269_ATI_POLL_TIMEOUT_US (iqs269->delay_mult * 500000) -#define IQS269_ATI_STABLE_DELAY_MS (iqs269->delay_mult * 150) - -#define IQS269_PWR_MODE_POLL_SLEEP_US IQS269_ATI_POLL_SLEEP_US -#define IQS269_PWR_MODE_POLL_TIMEOUT_US IQS269_ATI_POLL_TIMEOUT_US - -#define iqs269_irq_wait() usleep_range(100, 150) +#define iqs269_irq_wait() usleep_range(200, 250) enum iqs269_local_cap_size { IQS269_LOCAL_CAP_SIZE_0, @@ -245,6 +237,18 @@ struct iqs269_ver_info { u8 padding; } __packed; +struct iqs269_ch_reg { + u8 rx_enable; + u8 tx_enable; + __be16 engine_a; + __be16 engine_b; + __be16 ati_comp; + u8 thresh[3]; + u8 hyst; + u8 assoc_select; + u8 assoc_weight; +} __packed; + struct iqs269_sys_reg { __be16 general; u8 active; @@ -266,18 +270,7 @@ struct iqs269_sys_reg { u8 timeout_swipe; u8 thresh_swipe; u8 redo_ati; -} __packed; - -struct iqs269_ch_reg { - u8 rx_enable; - u8 tx_enable; - __be16 engine_a; - __be16 engine_b; - __be16 ati_comp; - u8 thresh[3]; - u8 hyst; - u8 assoc_select; - u8 assoc_weight; + struct iqs269_ch_reg ch_reg[IQS269_NUM_CH]; } __packed; struct iqs269_flags { @@ -292,13 +285,11 @@ struct iqs269_private { struct regmap *regmap; struct mutex lock; struct iqs269_switch_desc switches[ARRAY_SIZE(iqs269_events)]; - struct iqs269_ch_reg ch_reg[IQS269_NUM_CH]; struct iqs269_sys_reg sys_reg; + struct completion ati_done; struct input_dev *keypad; struct input_dev *slider[IQS269_NUM_SL]; unsigned int keycode[ARRAY_SIZE(iqs269_events) * IQS269_NUM_CH]; - unsigned int suspend_mode; - unsigned int delay_mult; unsigned int ch_num; bool hall_enable; bool ati_current; @@ -307,6 +298,7 @@ struct iqs269_private { static int iqs269_ati_mode_set(struct iqs269_private *iqs269, unsigned int ch_num, unsigned int mode) { + struct iqs269_ch_reg *ch_reg = iqs269->sys_reg.ch_reg; u16 engine_a; if (ch_num >= IQS269_NUM_CH) @@ -317,12 +309,12 @@ static int iqs269_ati_mode_set(struct iqs269_private *iqs269, mutex_lock(&iqs269->lock); - engine_a = be16_to_cpu(iqs269->ch_reg[ch_num].engine_a); + engine_a = be16_to_cpu(ch_reg[ch_num].engine_a); engine_a &= ~IQS269_CHx_ENG_A_ATI_MODE_MASK; engine_a |= (mode << IQS269_CHx_ENG_A_ATI_MODE_SHIFT); - iqs269->ch_reg[ch_num].engine_a = cpu_to_be16(engine_a); + ch_reg[ch_num].engine_a = cpu_to_be16(engine_a); iqs269->ati_current = false; mutex_unlock(&iqs269->lock); @@ -333,13 +325,14 @@ static int iqs269_ati_mode_set(struct iqs269_private *iqs269, static int iqs269_ati_mode_get(struct iqs269_private *iqs269, unsigned int ch_num, unsigned int *mode) { + struct iqs269_ch_reg *ch_reg = iqs269->sys_reg.ch_reg; u16 engine_a; if (ch_num >= IQS269_NUM_CH) return -EINVAL; mutex_lock(&iqs269->lock); - engine_a = be16_to_cpu(iqs269->ch_reg[ch_num].engine_a); + engine_a = be16_to_cpu(ch_reg[ch_num].engine_a); mutex_unlock(&iqs269->lock); engine_a &= IQS269_CHx_ENG_A_ATI_MODE_MASK; @@ -351,6 +344,7 @@ static int iqs269_ati_mode_get(struct iqs269_private *iqs269, static int iqs269_ati_base_set(struct iqs269_private *iqs269, unsigned int ch_num, unsigned int base) { + struct iqs269_ch_reg *ch_reg = iqs269->sys_reg.ch_reg; u16 engine_b; if (ch_num >= IQS269_NUM_CH) @@ -379,12 +373,12 @@ static int iqs269_ati_base_set(struct iqs269_private *iqs269, mutex_lock(&iqs269->lock); - engine_b = be16_to_cpu(iqs269->ch_reg[ch_num].engine_b); + engine_b = be16_to_cpu(ch_reg[ch_num].engine_b); engine_b &= ~IQS269_CHx_ENG_B_ATI_BASE_MASK; engine_b |= base; - iqs269->ch_reg[ch_num].engine_b = cpu_to_be16(engine_b); + ch_reg[ch_num].engine_b = cpu_to_be16(engine_b); iqs269->ati_current = false; mutex_unlock(&iqs269->lock); @@ -395,13 +389,14 @@ static int iqs269_ati_base_set(struct iqs269_private *iqs269, static int iqs269_ati_base_get(struct iqs269_private *iqs269, unsigned int ch_num, unsigned int *base) { + struct iqs269_ch_reg *ch_reg = iqs269->sys_reg.ch_reg; u16 engine_b; if (ch_num >= IQS269_NUM_CH) return -EINVAL; mutex_lock(&iqs269->lock); - engine_b = be16_to_cpu(iqs269->ch_reg[ch_num].engine_b); + engine_b = be16_to_cpu(ch_reg[ch_num].engine_b); mutex_unlock(&iqs269->lock); switch (engine_b & IQS269_CHx_ENG_B_ATI_BASE_MASK) { @@ -429,6 +424,7 @@ static int iqs269_ati_base_get(struct iqs269_private *iqs269, static int iqs269_ati_target_set(struct iqs269_private *iqs269, unsigned int ch_num, unsigned int target) { + struct iqs269_ch_reg *ch_reg = iqs269->sys_reg.ch_reg; u16 engine_b; if (ch_num >= IQS269_NUM_CH) @@ -439,12 +435,12 @@ static int iqs269_ati_target_set(struct iqs269_private *iqs269, mutex_lock(&iqs269->lock); - engine_b = be16_to_cpu(iqs269->ch_reg[ch_num].engine_b); + engine_b = be16_to_cpu(ch_reg[ch_num].engine_b); engine_b &= ~IQS269_CHx_ENG_B_ATI_TARGET_MASK; engine_b |= target / 32; - iqs269->ch_reg[ch_num].engine_b = cpu_to_be16(engine_b); + ch_reg[ch_num].engine_b = cpu_to_be16(engine_b); iqs269->ati_current = false; mutex_unlock(&iqs269->lock); @@ -455,13 +451,14 @@ static int iqs269_ati_target_set(struct iqs269_private *iqs269, static int iqs269_ati_target_get(struct iqs269_private *iqs269, unsigned int ch_num, unsigned int *target) { + struct iqs269_ch_reg *ch_reg = iqs269->sys_reg.ch_reg; u16 engine_b; if (ch_num >= IQS269_NUM_CH) return -EINVAL; mutex_lock(&iqs269->lock); - engine_b = be16_to_cpu(iqs269->ch_reg[ch_num].engine_b); + engine_b = be16_to_cpu(ch_reg[ch_num].engine_b); mutex_unlock(&iqs269->lock); *target = (engine_b & IQS269_CHx_ENG_B_ATI_TARGET_MASK) * 32; @@ -531,13 +528,7 @@ static int iqs269_parse_chan(struct iqs269_private *iqs269, if (fwnode_property_present(ch_node, "azoteq,slider1-select")) iqs269->sys_reg.slider_select[1] |= BIT(reg); - ch_reg = &iqs269->ch_reg[reg]; - - error = regmap_raw_read(iqs269->regmap, - IQS269_CHx_SETTINGS + reg * sizeof(*ch_reg) / 2, - ch_reg, sizeof(*ch_reg)); - if (error) - return error; + ch_reg = &iqs269->sys_reg.ch_reg[reg]; error = iqs269_parse_mask(ch_node, "azoteq,rx-enable", &ch_reg->rx_enable); @@ -694,6 +685,7 @@ static int iqs269_parse_chan(struct iqs269_private *iqs269, dev_err(&client->dev, "Invalid channel %u threshold: %u\n", reg, val); + fwnode_handle_put(ev_node); return -EINVAL; } @@ -707,6 +699,7 @@ static int iqs269_parse_chan(struct iqs269_private *iqs269, dev_err(&client->dev, "Invalid channel %u hysteresis: %u\n", reg, val); + fwnode_handle_put(ev_node); return -EINVAL; } @@ -721,8 +714,16 @@ static int iqs269_parse_chan(struct iqs269_private *iqs269, } } - if (fwnode_property_read_u32(ev_node, "linux,code", &val)) + error = fwnode_property_read_u32(ev_node, "linux,code", &val); + fwnode_handle_put(ev_node); + if (error == -EINVAL) { continue; + } else if (error) { + dev_err(&client->dev, + "Failed to read channel %u code: %d\n", reg, + error); + return error; + } switch (reg) { case IQS269_CHx_HALL_ACTIVE: @@ -759,17 +760,6 @@ static int iqs269_parse_prop(struct iqs269_private *iqs269) iqs269->hall_enable = device_property_present(&client->dev, "azoteq,hall-enable"); - if (!device_property_read_u32(&client->dev, "azoteq,suspend-mode", - &val)) { - if (val > IQS269_SYS_SETTINGS_PWR_MODE_MAX) { - dev_err(&client->dev, "Invalid suspend mode: %u\n", - val); - return -EINVAL; - } - - iqs269->suspend_mode = val; - } - error = regmap_raw_read(iqs269->regmap, IQS269_SYS_SETTINGS, sys_reg, sizeof(*sys_reg)); if (error) @@ -980,13 +970,8 @@ static int iqs269_parse_prop(struct iqs269_private *iqs269) general = be16_to_cpu(sys_reg->general); - if (device_property_present(&client->dev, "azoteq,clk-div")) { + if (device_property_present(&client->dev, "azoteq,clk-div")) general |= IQS269_SYS_SETTINGS_CLK_DIV; - iqs269->delay_mult = 4; - } else { - general &= ~IQS269_SYS_SETTINGS_CLK_DIV; - iqs269->delay_mult = 1; - } /* * Configure the device to automatically switch between normal and low- @@ -997,6 +982,17 @@ static int iqs269_parse_prop(struct iqs269_private *iqs269) general &= ~IQS269_SYS_SETTINGS_DIS_AUTO; general &= ~IQS269_SYS_SETTINGS_PWR_MODE_MASK; + if (!device_property_read_u32(&client->dev, "azoteq,suspend-mode", + &val)) { + if (val > IQS269_SYS_SETTINGS_PWR_MODE_MAX) { + dev_err(&client->dev, "Invalid suspend mode: %u\n", + val); + return -EINVAL; + } + + general |= (val << IQS269_SYS_SETTINGS_PWR_MODE_SHIFT); + } + if (!device_property_read_u32(&client->dev, "azoteq,ulp-update", &val)) { if (val > IQS269_SYS_SETTINGS_ULP_UPDATE_MAX) { @@ -1032,10 +1028,7 @@ static int iqs269_parse_prop(struct iqs269_private *iqs269) static int iqs269_dev_init(struct iqs269_private *iqs269) { - struct iqs269_sys_reg *sys_reg = &iqs269->sys_reg; - struct iqs269_ch_reg *ch_reg; - unsigned int val; - int error, i; + int error; mutex_lock(&iqs269->lock); @@ -1045,38 +1038,17 @@ static int iqs269_dev_init(struct iqs269_private *iqs269) if (error) goto err_mutex; - for (i = 0; i < IQS269_NUM_CH; i++) { - if (!(sys_reg->active & BIT(i))) - continue; - - ch_reg = &iqs269->ch_reg[i]; - - error = regmap_raw_write(iqs269->regmap, - IQS269_CHx_SETTINGS + i * - sizeof(*ch_reg) / 2, ch_reg, - sizeof(*ch_reg)); - if (error) - goto err_mutex; - } - - /* - * The REDO-ATI and ATI channel selection fields must be written in the - * same block write, so every field between registers 0x80 through 0x8B - * (inclusive) must be written as well. - */ - error = regmap_raw_write(iqs269->regmap, IQS269_SYS_SETTINGS, sys_reg, - sizeof(*sys_reg)); + error = regmap_raw_write(iqs269->regmap, IQS269_SYS_SETTINGS, + &iqs269->sys_reg, sizeof(iqs269->sys_reg)); if (error) goto err_mutex; - error = regmap_read_poll_timeout(iqs269->regmap, IQS269_SYS_FLAGS, val, - !(val & IQS269_SYS_FLAGS_IN_ATI), - IQS269_ATI_POLL_SLEEP_US, - IQS269_ATI_POLL_TIMEOUT_US); - if (error) - goto err_mutex; + /* + * The following delay gives the device time to deassert its RDY output + * so as to prevent an interrupt from being serviced prematurely. + */ + usleep_range(2000, 2100); - msleep(IQS269_ATI_STABLE_DELAY_MS); iqs269->ati_current = true; err_mutex: @@ -1088,10 +1060,8 @@ static int iqs269_dev_init(struct iqs269_private *iqs269) static int iqs269_input_init(struct iqs269_private *iqs269) { struct i2c_client *client = iqs269->client; - struct iqs269_flags flags; unsigned int sw_code, keycode; int error, i, j; - u8 dir_mask, state; iqs269->keypad = devm_input_allocate_device(&client->dev); if (!iqs269->keypad) @@ -1104,23 +1074,7 @@ static int iqs269_input_init(struct iqs269_private *iqs269) iqs269->keypad->name = "iqs269a_keypad"; iqs269->keypad->id.bustype = BUS_I2C; - if (iqs269->hall_enable) { - error = regmap_raw_read(iqs269->regmap, IQS269_SYS_FLAGS, - &flags, sizeof(flags)); - if (error) { - dev_err(&client->dev, - "Failed to read initial status: %d\n", error); - return error; - } - } - for (i = 0; i < ARRAY_SIZE(iqs269_events); i++) { - dir_mask = flags.states[IQS269_ST_OFFS_DIR]; - if (!iqs269_events[i].dir_up) - dir_mask = ~dir_mask; - - state = flags.states[iqs269_events[i].st_offs] & dir_mask; - sw_code = iqs269->switches[i].code; for (j = 0; j < IQS269_NUM_CH; j++) { @@ -1133,13 +1087,9 @@ static int iqs269_input_init(struct iqs269_private *iqs269) switch (j) { case IQS269_CHx_HALL_ACTIVE: if (iqs269->hall_enable && - iqs269->switches[i].enabled) { + iqs269->switches[i].enabled) input_set_capability(iqs269->keypad, EV_SW, sw_code); - input_report_switch(iqs269->keypad, - sw_code, - state & BIT(j)); - } fallthrough; case IQS269_CHx_HALL_INACTIVE: @@ -1155,14 +1105,6 @@ static int iqs269_input_init(struct iqs269_private *iqs269) } } - input_sync(iqs269->keypad); - - error = input_register_device(iqs269->keypad); - if (error) { - dev_err(&client->dev, "Failed to register keypad: %d\n", error); - return error; - } - for (i = 0; i < IQS269_NUM_SL; i++) { if (!iqs269->sys_reg.slider_select[i]) continue; @@ -1222,6 +1164,9 @@ static int iqs269_report(struct iqs269_private *iqs269) return error; } + if (be16_to_cpu(flags.system) & IQS269_SYS_FLAGS_IN_ATI) + return 0; + error = regmap_raw_read(iqs269->regmap, IQS269_SLIDER_X, slider_x, sizeof(slider_x)); if (error) { @@ -1284,6 +1229,12 @@ static int iqs269_report(struct iqs269_private *iqs269) input_sync(iqs269->keypad); + /* + * The following completion signals that ATI has finished, any initial + * switch states have been reported and the keypad can be registered. + */ + complete_all(&iqs269->ati_done); + return 0; } @@ -1315,6 +1266,9 @@ static ssize_t counts_show(struct device *dev, if (!iqs269->ati_current || iqs269->hall_enable) return -EPERM; + if (!completion_done(&iqs269->ati_done)) + return -EBUSY; + /* * Unsolicited I2C communication prompts the device to assert its RDY * pin, so disable the interrupt line until the operation is finished @@ -1339,6 +1293,7 @@ static ssize_t hall_bin_show(struct device *dev, struct device_attribute *attr, char *buf) { struct iqs269_private *iqs269 = dev_get_drvdata(dev); + struct iqs269_ch_reg *ch_reg = iqs269->sys_reg.ch_reg; struct i2c_client *client = iqs269->client; unsigned int val; int error; @@ -1353,8 +1308,8 @@ static ssize_t hall_bin_show(struct device *dev, if (error) return error; - switch (iqs269->ch_reg[IQS269_CHx_HALL_ACTIVE].rx_enable & - iqs269->ch_reg[IQS269_CHx_HALL_INACTIVE].rx_enable) { + switch (ch_reg[IQS269_CHx_HALL_ACTIVE].rx_enable & + ch_reg[IQS269_CHx_HALL_INACTIVE].rx_enable) { case IQS269_HALL_PAD_R: val &= IQS269_CAL_DATA_A_HALL_BIN_R_MASK; val >>= IQS269_CAL_DATA_A_HALL_BIN_R_SHIFT; @@ -1434,9 +1389,10 @@ static ssize_t rx_enable_show(struct device *dev, struct device_attribute *attr, char *buf) { struct iqs269_private *iqs269 = dev_get_drvdata(dev); + struct iqs269_ch_reg *ch_reg = iqs269->sys_reg.ch_reg; return scnprintf(buf, PAGE_SIZE, "%u\n", - iqs269->ch_reg[iqs269->ch_num].rx_enable); + ch_reg[iqs269->ch_num].rx_enable); } static ssize_t rx_enable_store(struct device *dev, @@ -1444,6 +1400,7 @@ static ssize_t rx_enable_store(struct device *dev, size_t count) { struct iqs269_private *iqs269 = dev_get_drvdata(dev); + struct iqs269_ch_reg *ch_reg = iqs269->sys_reg.ch_reg; unsigned int val; int error; @@ -1456,7 +1413,7 @@ static ssize_t rx_enable_store(struct device *dev, mutex_lock(&iqs269->lock); - iqs269->ch_reg[iqs269->ch_num].rx_enable = val; + ch_reg[iqs269->ch_num].rx_enable = val; iqs269->ati_current = false; mutex_unlock(&iqs269->lock); @@ -1568,7 +1525,9 @@ static ssize_t ati_trigger_show(struct device *dev, { struct iqs269_private *iqs269 = dev_get_drvdata(dev); - return scnprintf(buf, PAGE_SIZE, "%u\n", iqs269->ati_current); + return scnprintf(buf, PAGE_SIZE, "%u\n", + iqs269->ati_current && + completion_done(&iqs269->ati_done)); } static ssize_t ati_trigger_store(struct device *dev, @@ -1588,6 +1547,7 @@ static ssize_t ati_trigger_store(struct device *dev, return count; disable_irq(client->irq); + reinit_completion(&iqs269->ati_done); error = iqs269_dev_init(iqs269); @@ -1597,6 +1557,10 @@ static ssize_t ati_trigger_store(struct device *dev, if (error) return error; + if (!wait_for_completion_timeout(&iqs269->ati_done, + msecs_to_jiffies(2000))) + return -ETIMEDOUT; + return count; } @@ -1655,6 +1619,7 @@ static int iqs269_probe(struct i2c_client *client) } mutex_init(&iqs269->lock); + init_completion(&iqs269->ati_done); error = regmap_raw_read(iqs269->regmap, IQS269_VER_INFO, &ver_info, sizeof(ver_info)); @@ -1690,6 +1655,22 @@ static int iqs269_probe(struct i2c_client *client) return error; } + if (!wait_for_completion_timeout(&iqs269->ati_done, + msecs_to_jiffies(2000))) { + dev_err(&client->dev, "Failed to complete ATI\n"); + return -ETIMEDOUT; + } + + /* + * The keypad may include one or more switches and is not registered + * until ATI is complete and the initial switch states are read. + */ + error = input_register_device(iqs269->keypad); + if (error) { + dev_err(&client->dev, "Failed to register keypad: %d\n", error); + return error; + } + error = devm_device_add_group(&client->dev, &iqs269_attr_group); if (error) dev_err(&client->dev, "Failed to add attributes: %d\n", error); @@ -1697,59 +1678,30 @@ static int iqs269_probe(struct i2c_client *client) return error; } +static u16 iqs269_general_get(struct iqs269_private *iqs269) +{ + u16 general = be16_to_cpu(iqs269->sys_reg.general); + + general &= ~IQS269_SYS_SETTINGS_REDO_ATI; + general &= ~IQS269_SYS_SETTINGS_ACK_RESET; + + return general | IQS269_SYS_SETTINGS_DIS_AUTO; +} + static int __maybe_unused iqs269_suspend(struct device *dev) { struct iqs269_private *iqs269 = dev_get_drvdata(dev); struct i2c_client *client = iqs269->client; - unsigned int val; int error; + u16 general = iqs269_general_get(iqs269); - if (!iqs269->suspend_mode) + if (!(general & IQS269_SYS_SETTINGS_PWR_MODE_MASK)) return 0; disable_irq(client->irq); - /* - * Automatic power mode switching must be disabled before the device is - * forced into any particular power mode. In this case, the device will - * transition into normal-power mode. - */ - error = regmap_update_bits(iqs269->regmap, IQS269_SYS_SETTINGS, - IQS269_SYS_SETTINGS_DIS_AUTO, ~0); - if (error) - goto err_irq; - - /* - * The following check ensures the device has completed its transition - * into normal-power mode before a manual mode switch is performed. - */ - error = regmap_read_poll_timeout(iqs269->regmap, IQS269_SYS_FLAGS, val, - !(val & IQS269_SYS_FLAGS_PWR_MODE_MASK), - IQS269_PWR_MODE_POLL_SLEEP_US, - IQS269_PWR_MODE_POLL_TIMEOUT_US); - if (error) - goto err_irq; - - error = regmap_update_bits(iqs269->regmap, IQS269_SYS_SETTINGS, - IQS269_SYS_SETTINGS_PWR_MODE_MASK, - iqs269->suspend_mode << - IQS269_SYS_SETTINGS_PWR_MODE_SHIFT); - if (error) - goto err_irq; + error = regmap_write(iqs269->regmap, IQS269_SYS_SETTINGS, general); - /* - * This last check ensures the device has completed its transition into - * the desired power mode to prevent any spurious interrupts from being - * triggered after iqs269_suspend has already returned. - */ - error = regmap_read_poll_timeout(iqs269->regmap, IQS269_SYS_FLAGS, val, - (val & IQS269_SYS_FLAGS_PWR_MODE_MASK) - == (iqs269->suspend_mode << - IQS269_SYS_FLAGS_PWR_MODE_SHIFT), - IQS269_PWR_MODE_POLL_SLEEP_US, - IQS269_PWR_MODE_POLL_TIMEOUT_US); - -err_irq: iqs269_irq_wait(); enable_irq(client->irq); @@ -1760,43 +1712,20 @@ static int __maybe_unused iqs269_resume(struct device *dev) { struct iqs269_private *iqs269 = dev_get_drvdata(dev); struct i2c_client *client = iqs269->client; - unsigned int val; int error; + u16 general = iqs269_general_get(iqs269); - if (!iqs269->suspend_mode) + if (!(general & IQS269_SYS_SETTINGS_PWR_MODE_MASK)) return 0; disable_irq(client->irq); - error = regmap_update_bits(iqs269->regmap, IQS269_SYS_SETTINGS, - IQS269_SYS_SETTINGS_PWR_MODE_MASK, 0); - if (error) - goto err_irq; - - /* - * This check ensures the device has returned to normal-power mode - * before automatic power mode switching is re-enabled. - */ - error = regmap_read_poll_timeout(iqs269->regmap, IQS269_SYS_FLAGS, val, - !(val & IQS269_SYS_FLAGS_PWR_MODE_MASK), - IQS269_PWR_MODE_POLL_SLEEP_US, - IQS269_PWR_MODE_POLL_TIMEOUT_US); - if (error) - goto err_irq; - - error = regmap_update_bits(iqs269->regmap, IQS269_SYS_SETTINGS, - IQS269_SYS_SETTINGS_DIS_AUTO, 0); - if (error) - goto err_irq; - - /* - * This step reports any events that may have been "swallowed" as a - * result of polling PWR_MODE (which automatically acknowledges any - * pending interrupts). - */ - error = iqs269_report(iqs269); + error = regmap_write(iqs269->regmap, IQS269_SYS_SETTINGS, + general & ~IQS269_SYS_SETTINGS_PWR_MODE_MASK); + if (!error) + error = regmap_write(iqs269->regmap, IQS269_SYS_SETTINGS, + general & ~IQS269_SYS_SETTINGS_DIS_AUTO); -err_irq: iqs269_irq_wait(); enable_irq(client->irq); diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index f1013b950d579047fb2e253408e7ccbae7a81137..82577095e175efd1969cdfbc40f9b2af8b6340f4 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -191,7 +191,6 @@ static const char * const smbus_pnp_ids[] = { "SYN3221", /* HP 15-ay000 */ "SYN323d", /* HP Spectre X360 13-w013dx */ "SYN3257", /* HP Envy 13-ad105ng */ - "SYN3286", /* HP Laptop 15-da3001TU */ NULL }; diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 148a7c5fd0e22be932e922c6ba641b6df361e7b2..65c0081838e3ddc326fd3ee6beb74f8393a2381c 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -67,25 +67,84 @@ static inline void i8042_write_command(int val) #include -static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { +#define SERIO_QUIRK_NOKBD BIT(0) +#define SERIO_QUIRK_NOAUX BIT(1) +#define SERIO_QUIRK_NOMUX BIT(2) +#define SERIO_QUIRK_FORCEMUX BIT(3) +#define SERIO_QUIRK_UNLOCK BIT(4) +#define SERIO_QUIRK_PROBE_DEFER BIT(5) +#define SERIO_QUIRK_RESET_ALWAYS BIT(6) +#define SERIO_QUIRK_RESET_NEVER BIT(7) +#define SERIO_QUIRK_DIECT BIT(8) +#define SERIO_QUIRK_DUMBKBD BIT(9) +#define SERIO_QUIRK_NOLOOP BIT(10) +#define SERIO_QUIRK_NOTIMEOUT BIT(11) +#define SERIO_QUIRK_KBDRESET BIT(12) +#define SERIO_QUIRK_DRITEK BIT(13) +#define SERIO_QUIRK_NOPNP BIT(14) + +/* Quirk table for different mainboards. Options similar or identical to i8042 + * module parameters. + * ORDERING IS IMPORTANT! The first match will be apllied and the rest ignored. + * This allows entries to overwrite vendor wide quirks on a per device basis. + * Where this is irrelevant, entries are sorted case sensitive by DMI_SYS_VENDOR + * and/or DMI_BOARD_VENDOR to make it easier to avoid dublicate entries. + */ +static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { { - /* - * Arima-Rioworks HDAMB - - * AUX LOOP command does not raise AUX IRQ - */ .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "RIOWORKS"), - DMI_MATCH(DMI_BOARD_NAME, "HDAMB"), - DMI_MATCH(DMI_BOARD_VERSION, "Rev E"), + DMI_MATCH(DMI_SYS_VENDOR, "ALIENWARE"), + DMI_MATCH(DMI_PRODUCT_NAME, "Sentia"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* ASUS G1S */ .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc."), - DMI_MATCH(DMI_BOARD_NAME, "G1S"), - DMI_MATCH(DMI_BOARD_VERSION, "1.0"), + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X750LN"), + }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) + }, + { + /* Asus X450LCP */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_NEVER) + }, + { + /* ASUS ZenBook UX425UA */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX425UA"), + }, + .driver_data = (void *)(SERIO_QUIRK_PROBE_DEFER | SERIO_QUIRK_RESET_NEVER) + }, + { + /* ASUS ZenBook UM325UA */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"), + }, + .driver_data = (void *)(SERIO_QUIRK_PROBE_DEFER | SERIO_QUIRK_RESET_NEVER) + }, + /* + * On some Asus laptops, just running self tests cause problems. + */ + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */ + }, + .driver_data = (void *)(SERIO_QUIRK_RESET_NEVER) + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */ + }, + .driver_data = (void *)(SERIO_QUIRK_RESET_NEVER) }, { /* ASUS P65UP5 - AUX LOOP command does not raise AUX IRQ */ @@ -94,585 +153,681 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { DMI_MATCH(DMI_BOARD_NAME, "P/I-P65UP5"), DMI_MATCH(DMI_BOARD_VERSION, "REV 2.X"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, { + /* ASUS G1S */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "X750LN"), + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc."), + DMI_MATCH(DMI_BOARD_NAME, "G1S"), + DMI_MATCH(DMI_BOARD_VERSION, "1.0"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, { .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Compaq"), - DMI_MATCH(DMI_PRODUCT_NAME , "ProLiant"), - DMI_MATCH(DMI_PRODUCT_VERSION, "8500"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { + /* Acer Aspire 5710 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Compaq"), - DMI_MATCH(DMI_PRODUCT_NAME , "ProLiant"), - DMI_MATCH(DMI_PRODUCT_VERSION, "DL760"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* Dell Embedded Box PC 3000 */ + /* Acer Aspire 7738 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Embedded Box PC 3000"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7738"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* OQO Model 01 */ + /* Acer Aspire 5536 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "OQO"), - DMI_MATCH(DMI_PRODUCT_NAME, "ZEPTO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "00"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5536"), + DMI_MATCH(DMI_PRODUCT_VERSION, "0100"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* ULI EV4873 - AUX LOOP does not work properly */ + /* + * Acer Aspire 5738z + * Touchpad stops working in mux mode when dis- + re-enabled + * with the touchpad enable/disable toggle hotkey + */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ULI"), - DMI_MATCH(DMI_PRODUCT_NAME, "EV4873"), - DMI_MATCH(DMI_PRODUCT_VERSION, "5a"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* Microsoft Virtual Machine */ + /* Acer Aspire One 150 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"), - DMI_MATCH(DMI_PRODUCT_VERSION, "VS2005R2"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) }, { - /* Medion MAM 2070 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Notebook"), - DMI_MATCH(DMI_PRODUCT_NAME, "MAM 2070"), - DMI_MATCH(DMI_PRODUCT_VERSION, "5a"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A114-31"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) }, { - /* Medion Akoya E7225 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Medion"), - DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"), - DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A314-31"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) }, { - /* Blue FB5601 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "blue"), - DMI_MATCH(DMI_PRODUCT_NAME, "FB5601"), - DMI_MATCH(DMI_PRODUCT_VERSION, "M606"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A315-31"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) }, { - /* Gigabyte M912 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), - DMI_MATCH(DMI_PRODUCT_NAME, "M912"), - DMI_MATCH(DMI_PRODUCT_VERSION, "01"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-132"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) }, { - /* Gigabyte M1022M netbook */ .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co.,Ltd."), - DMI_MATCH(DMI_BOARD_NAME, "M1022E"), - DMI_MATCH(DMI_BOARD_VERSION, "1.02"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-332"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) }, { - /* Gigabyte Spring Peak - defines wrong chassis type */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), - DMI_MATCH(DMI_PRODUCT_NAME, "Spring Peak"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-432"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) }, { - /* Gigabyte T1005 - defines wrong chassis type ("Other") */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), - DMI_MATCH(DMI_PRODUCT_NAME, "T1005"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate Spin B118-RN"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) }, + /* + * Some Wistron based laptops need us to explicitly enable the 'Dritek + * keyboard extension' to make their extra keys start generating scancodes. + * Originally, this was just confined to older laptops, but a few Acer laptops + * have turned up in 2007 that also need this again. + */ { - /* Gigabyte T1005M/P - defines wrong chassis type ("Other") */ + /* Acer Aspire 5100 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), - DMI_MATCH(DMI_PRODUCT_NAME, "T1005M/P"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5100"), }, + .driver_data = (void *)(SERIO_QUIRK_DRITEK) }, { + /* Acer Aspire 5610 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"), - DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5610"), }, + .driver_data = (void *)(SERIO_QUIRK_DRITEK) }, { + /* Acer Aspire 5630 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"), - DMI_MATCH(DMI_PRODUCT_NAME, "C15B"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5630"), }, + .driver_data = (void *)(SERIO_QUIRK_DRITEK) }, { + /* Acer Aspire 5650 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"), - DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5650"), }, + .driver_data = (void *)(SERIO_QUIRK_DRITEK) }, - { } -}; - -/* - * Some Fujitsu notebooks are having trouble with touchpads if - * active multiplexing mode is activated. Luckily they don't have - * external PS/2 ports so we can safely disable it. - * ... apparently some Toshibas don't like MUX mode either and - * die horrible death on reboot. - */ -static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { { - /* Fujitsu Lifebook P7010/P7010D */ + /* Acer Aspire 5680 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "P7010"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5680"), }, + .driver_data = (void *)(SERIO_QUIRK_DRITEK) }, { - /* Fujitsu Lifebook P7010 */ + /* Acer Aspire 5720 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), - DMI_MATCH(DMI_PRODUCT_NAME, "0000000000"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5720"), }, + .driver_data = (void *)(SERIO_QUIRK_DRITEK) }, { - /* Fujitsu Lifebook P5020D */ + /* Acer Aspire 9110 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook P Series"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 9110"), }, + .driver_data = (void *)(SERIO_QUIRK_DRITEK) }, { - /* Fujitsu Lifebook S2000 */ + /* Acer TravelMate 660 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S Series"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 660"), }, + .driver_data = (void *)(SERIO_QUIRK_DRITEK) }, { - /* Fujitsu Lifebook S6230 */ + /* Acer TravelMate 2490 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S6230"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2490"), }, + .driver_data = (void *)(SERIO_QUIRK_DRITEK) }, { - /* Fujitsu Lifebook T725 laptop */ + /* Acer TravelMate 4280 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T725"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4280"), }, + .driver_data = (void *)(SERIO_QUIRK_DRITEK) }, { - /* Fujitsu Lifebook U745 */ + /* Amoi M636/A737 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U745"), + DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, "M636/A737 platform"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* Fujitsu T70H */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "FMVLT70H"), + DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"), + DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, { - /* Fujitsu-Siemens Lifebook T3010 */ + /* Compal HEL80I */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), - DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T3010"), + DMI_MATCH(DMI_SYS_VENDOR, "COMPAL"), + DMI_MATCH(DMI_PRODUCT_NAME, "HEL80I"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* Fujitsu-Siemens Lifebook E4010 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), - DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E4010"), + DMI_MATCH(DMI_SYS_VENDOR, "Compaq"), + DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant"), + DMI_MATCH(DMI_PRODUCT_VERSION, "8500"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, { - /* Fujitsu-Siemens Amilo Pro 2010 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), - DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pro V2010"), + DMI_MATCH(DMI_SYS_VENDOR, "Compaq"), + DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant"), + DMI_MATCH(DMI_PRODUCT_VERSION, "DL760"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, { - /* Fujitsu-Siemens Amilo Pro 2030 */ + /* Advent 4211 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), - DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"), + DMI_MATCH(DMI_SYS_VENDOR, "DIXONSXP"), + DMI_MATCH(DMI_PRODUCT_NAME, "Advent 4211"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) }, { - /* - * No data is coming from the touchscreen unless KBC - * is in legacy mode. - */ - /* Panasonic CF-29 */ + /* Dell Embedded Box PC 3000 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Matsushita"), - DMI_MATCH(DMI_PRODUCT_NAME, "CF-29"), + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Embedded Box PC 3000"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, { - /* - * HP Pavilion DV4017EA - - * errors on MUX ports are reported without raising AUXDATA - * causing "spurious NAK" messages. - */ + /* Dell XPS M1530 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EA032EA#ABF)"), + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "XPS M1530"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* - * HP Pavilion ZT1000 - - * like DV4017EA does not raise AUXERR for errors on MUX ports. - */ + /* Dell Vostro 1510 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Notebook PC"), - DMI_MATCH(DMI_PRODUCT_VERSION, "HP Pavilion Notebook ZT1000"), + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* - * HP Pavilion DV4270ca - - * like DV4017EA does not raise AUXERR for errors on MUX ports. - */ + /* Dell Vostro V13 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EH476UA#ABL)"), + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_NOTIMEOUT) }, { + /* Dell Vostro 1320 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), - DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P10"), + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1320"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) }, { + /* Dell Vostro 1520 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), - DMI_MATCH(DMI_PRODUCT_NAME, "EQUIUM A110"), + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1520"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) }, { + /* Dell Vostro 1720 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), - DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE C850D"), + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1720"), + }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) + }, + { + /* Entroware Proteus */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Entroware"), + DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"), + DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS) }, + /* + * Some Fujitsu notebooks are having trouble with touchpads if + * active multiplexing mode is activated. Luckily they don't have + * external PS/2 ports so we can safely disable it. + * ... apparently some Toshibas don't like MUX mode either and + * die horrible death on reboot. + */ { + /* Fujitsu Lifebook P7010/P7010D */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ALIENWARE"), - DMI_MATCH(DMI_PRODUCT_NAME, "Sentia"), + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "P7010"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* Sharp Actius MM20 */ + /* Fujitsu Lifebook P5020D */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SHARP"), - DMI_MATCH(DMI_PRODUCT_NAME, "PC-MM20 Series"), + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook P Series"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* Sony Vaio FS-115b */ + /* Fujitsu Lifebook S2000 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FS115B"), + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S Series"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* - * Sony Vaio FZ-240E - - * reset and GET ID commands issued via KBD port are - * sometimes being delivered to AUX3. - */ + /* Fujitsu Lifebook S6230 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ240E"), + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S6230"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* - * Most (all?) VAIOs do not have external PS/2 ports nor - * they implement active multiplexing properly, and - * MUX discovery usually messes up keyboard/touchpad. - */ + /* Fujitsu Lifebook T725 laptop */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), - DMI_MATCH(DMI_BOARD_NAME, "VAIO"), + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T725"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_NOTIMEOUT) }, { - /* Amoi M636/A737 */ + /* Fujitsu Lifebook U745 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "M636/A737 platform"), + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U745"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* Lenovo 3000 n100 */ + /* Fujitsu T70H */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_NAME, "076804U"), + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "FMVLT70H"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* Lenovo XiaoXin Air 12 */ + /* Fujitsu A544 laptop */ + /* https://bugzilla.redhat.com/show_bug.cgi?id=1111138 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_NAME, "80UN"), + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK A544"), }, + .driver_data = (void *)(SERIO_QUIRK_NOTIMEOUT) }, { + /* Fujitsu AH544 laptop */ + /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"), + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK AH544"), + }, + .driver_data = (void *)(SERIO_QUIRK_NOTIMEOUT) + }, + { + /* Fujitsu U574 laptop */ + /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"), + }, + .driver_data = (void *)(SERIO_QUIRK_NOTIMEOUT) + }, + { + /* Fujitsu UH554 laptop */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK UH544"), + }, + .driver_data = (void *)(SERIO_QUIRK_NOTIMEOUT) + }, + { + /* Fujitsu Lifebook P7010 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), + DMI_MATCH(DMI_PRODUCT_NAME, "0000000000"), + }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) + }, + { + /* Fujitsu-Siemens Lifebook T3010 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T3010"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* Acer Aspire 5710 */ + /* Fujitsu-Siemens Lifebook E4010 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"), + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E4010"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* Acer Aspire 7738 */ + /* Fujitsu-Siemens Amilo Pro 2010 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7738"), + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), + DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pro V2010"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* Gericom Bellagio */ + /* Fujitsu-Siemens Amilo Pro 2030 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Gericom"), - DMI_MATCH(DMI_PRODUCT_NAME, "N34AS6"), + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), + DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* IBM 2656 */ + /* Gigabyte M912 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "IBM"), - DMI_MATCH(DMI_PRODUCT_NAME, "2656"), + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "M912"), + DMI_MATCH(DMI_PRODUCT_VERSION, "01"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, { - /* Dell XPS M1530 */ + /* Gigabyte Spring Peak - defines wrong chassis type */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "XPS M1530"), + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "Spring Peak"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, { - /* Compal HEL80I */ + /* Gigabyte T1005 - defines wrong chassis type ("Other") */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "COMPAL"), - DMI_MATCH(DMI_PRODUCT_NAME, "HEL80I"), + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "T1005"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, { - /* Dell Vostro 1510 */ + /* Gigabyte T1005M/P - defines wrong chassis type ("Other") */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"), + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "T1005M/P"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, + /* + * Some laptops need keyboard reset before probing for the trackpad to get + * it detected, initialised & finally work. + */ { - /* Acer Aspire 5536 */ + /* Gigabyte P35 v2 - Elantech touchpad */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5536"), - DMI_MATCH(DMI_PRODUCT_VERSION, "0100"), + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "P35V2"), }, + .driver_data = (void *)(SERIO_QUIRK_KBDRESET) }, - { - /* Dell Vostro V13 */ + { + /* Aorus branded Gigabyte X3 Plus - Elantech touchpad */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"), + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "X3"), }, + .driver_data = (void *)(SERIO_QUIRK_KBDRESET) }, { - /* Newer HP Pavilion dv4 models */ + /* Gigabyte P34 - Elantech touchpad */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"), + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "P34"), }, + .driver_data = (void *)(SERIO_QUIRK_KBDRESET) }, { - /* Asus X450LCP */ + /* Gigabyte P57 - Elantech touchpad */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"), + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "P57"), }, + .driver_data = (void *)(SERIO_QUIRK_KBDRESET) }, { - /* Avatar AVIU-145A6 */ + /* Gericom Bellagio */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Intel"), - DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"), + DMI_MATCH(DMI_SYS_VENDOR, "Gericom"), + DMI_MATCH(DMI_PRODUCT_NAME, "N34AS6"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* TUXEDO BU1406 */ + /* Gigabyte M1022M netbook */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Notebook"), - DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"), + DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co.,Ltd."), + DMI_MATCH(DMI_BOARD_NAME, "M1022E"), + DMI_MATCH(DMI_BOARD_VERSION, "1.02"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, { - /* Lenovo LaVie Z */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"), + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, { /* - * Acer Aspire 5738z - * Touchpad stops working in mux mode when dis- + re-enabled - * with the touchpad enable/disable toggle hotkey + * HP Pavilion DV4017EA - + * errors on MUX ports are reported without raising AUXDATA + * causing "spurious NAK" messages. */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"), + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EA032EA#ABF)"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* Entroware Proteus */ + /* + * HP Pavilion ZT1000 - + * like DV4017EA does not raise AUXERR for errors on MUX ports. + */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Entroware"), - DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"), - DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"), + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Notebook PC"), + DMI_MATCH(DMI_PRODUCT_VERSION, "HP Pavilion Notebook ZT1000"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, - { } -}; - -static const struct dmi_system_id i8042_dmi_forcemux_table[] __initconst = { { /* - * Sony Vaio VGN-CS series require MUX or the touch sensor - * buttons will disturb touchpad operation + * HP Pavilion DV4270ca - + * like DV4017EA does not raise AUXERR for errors on MUX ports. */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "VGN-CS"), + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EH476UA#ABL)"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, - { } -}; - -/* - * On some Asus laptops, just running self tests cause problems. - */ -static const struct dmi_system_id i8042_dmi_noselftest_table[] = { { + /* Newer HP Pavilion dv4 models */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */ - }, - }, { - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */ + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_NOTIMEOUT) }, - { } -}; -static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { { - /* MSI Wind U-100 */ + /* IBM 2656 */ .matches = { - DMI_MATCH(DMI_BOARD_NAME, "U-100"), - DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"), + DMI_MATCH(DMI_SYS_VENDOR, "IBM"), + DMI_MATCH(DMI_PRODUCT_NAME, "2656"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* LG Electronics X110 */ + /* Avatar AVIU-145A6 */ .matches = { - DMI_MATCH(DMI_BOARD_NAME, "X110"), - DMI_MATCH(DMI_BOARD_VENDOR, "LG Electronics Inc."), + DMI_MATCH(DMI_SYS_VENDOR, "Intel"), + DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* Acer Aspire One 150 */ + /* Intel MBO Desktop D845PESV */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"), + DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"), + DMI_MATCH(DMI_BOARD_NAME, "D845PESV"), }, + .driver_data = (void *)(SERIO_QUIRK_NOPNP) }, { + /* + * Intel NUC D54250WYK - does not have i8042 controller but + * declares PS/2 devices in DSDT. + */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A114-31"), + DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"), + DMI_MATCH(DMI_BOARD_NAME, "D54250WYK"), }, + .driver_data = (void *)(SERIO_QUIRK_NOPNP) }, { + /* Lenovo 3000 n100 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A314-31"), + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "076804U"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { + /* Lenovo XiaoXin Air 12 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A315-31"), + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "80UN"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { + /* Lenovo LaVie Z */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-132"), + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { + /* Lenovo Ideapad U455 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-332"), + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "20046"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) }, { + /* Lenovo ThinkPad L460 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-432"), + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L460"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) }, { + /* Lenovo ThinkPad Twist S230u */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate Spin B118-RN"), + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) }, { - /* Advent 4211 */ + /* LG Electronics X110 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "DIXONSXP"), - DMI_MATCH(DMI_PRODUCT_NAME, "Advent 4211"), + DMI_MATCH(DMI_BOARD_VENDOR, "LG Electronics Inc."), + DMI_MATCH(DMI_BOARD_NAME, "X110"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) }, { /* Medion Akoya Mini E1210 */ @@ -680,6 +835,7 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { DMI_MATCH(DMI_SYS_VENDOR, "MEDION"), DMI_MATCH(DMI_PRODUCT_NAME, "E1210"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) }, { /* Medion Akoya E1222 */ @@ -687,48 +843,62 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { DMI_MATCH(DMI_SYS_VENDOR, "MEDION"), DMI_MATCH(DMI_PRODUCT_NAME, "E122X"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) }, { - /* Mivvy M310 */ + /* MSI Wind U-100 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "VIOOO"), - DMI_MATCH(DMI_PRODUCT_NAME, "N10"), + DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"), + DMI_MATCH(DMI_BOARD_NAME, "U-100"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOPNP) }, { - /* Dell Vostro 1320 */ + /* + * No data is coming from the touchscreen unless KBC + * is in legacy mode. + */ + /* Panasonic CF-29 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1320"), + DMI_MATCH(DMI_SYS_VENDOR, "Matsushita"), + DMI_MATCH(DMI_PRODUCT_NAME, "CF-29"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* Dell Vostro 1520 */ + /* Medion Akoya E7225 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1520"), + DMI_MATCH(DMI_SYS_VENDOR, "Medion"), + DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"), + DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, { - /* Dell Vostro 1720 */ + /* Microsoft Virtual Machine */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1720"), + DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"), + DMI_MATCH(DMI_PRODUCT_VERSION, "VS2005R2"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, { - /* Lenovo Ideapad U455 */ + /* Medion MAM 2070 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_NAME, "20046"), + DMI_MATCH(DMI_SYS_VENDOR, "Notebook"), + DMI_MATCH(DMI_PRODUCT_NAME, "MAM 2070"), + DMI_MATCH(DMI_PRODUCT_VERSION, "5a"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, { - /* Lenovo ThinkPad L460 */ + /* TUXEDO BU1406 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L460"), + DMI_MATCH(DMI_SYS_VENDOR, "Notebook"), + DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { /* Clevo P650RS, 650RP6, Sager NP8152-S, and others */ @@ -736,282 +906,318 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { DMI_MATCH(DMI_SYS_VENDOR, "Notebook"), DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) + }, + { + /* OQO Model 01 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "OQO"), + DMI_MATCH(DMI_PRODUCT_NAME, "ZEPTO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "00"), + }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, { - /* Lenovo ThinkPad Twist S230u */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"), + DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"), + DMI_MATCH(DMI_PRODUCT_NAME, "C15B"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, { - /* Entroware Proteus */ + /* Acer Aspire 5 A515 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Entroware"), - DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"), - DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"), + DMI_MATCH(DMI_BOARD_VENDOR, "PK"), + DMI_MATCH(DMI_BOARD_NAME, "Grumpy_PK"), }, + .driver_data = (void *)(SERIO_QUIRK_NOPNP) }, - { } -}; - -#ifdef CONFIG_PNP -static const struct dmi_system_id __initconst i8042_dmi_nopnp_table[] = { { - /* Intel MBO Desktop D845PESV */ + /* ULI EV4873 - AUX LOOP does not work properly */ .matches = { - DMI_MATCH(DMI_BOARD_NAME, "D845PESV"), - DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"), + DMI_MATCH(DMI_SYS_VENDOR, "ULI"), + DMI_MATCH(DMI_PRODUCT_NAME, "EV4873"), + DMI_MATCH(DMI_PRODUCT_VERSION, "5a"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, { /* - * Intel NUC D54250WYK - does not have i8042 controller but - * declares PS/2 devices in DSDT. + * Arima-Rioworks HDAMB - + * AUX LOOP command does not raise AUX IRQ */ .matches = { - DMI_MATCH(DMI_BOARD_NAME, "D54250WYK"), - DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"), + DMI_MATCH(DMI_BOARD_VENDOR, "RIOWORKS"), + DMI_MATCH(DMI_BOARD_NAME, "HDAMB"), + DMI_MATCH(DMI_BOARD_VERSION, "Rev E"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, { - /* MSI Wind U-100 */ + /* Sharp Actius MM20 */ .matches = { - DMI_MATCH(DMI_BOARD_NAME, "U-100"), - DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"), + DMI_MATCH(DMI_SYS_VENDOR, "SHARP"), + DMI_MATCH(DMI_PRODUCT_NAME, "PC-MM20 Series"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* Acer Aspire 5 A515 */ + /* + * Sony Vaio FZ-240E - + * reset and GET ID commands issued via KBD port are + * sometimes being delivered to AUX3. + */ .matches = { - DMI_MATCH(DMI_BOARD_NAME, "Grumpy_PK"), - DMI_MATCH(DMI_BOARD_VENDOR, "PK"), + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ240E"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, - { } -}; - -static const struct dmi_system_id __initconst i8042_dmi_laptop_table[] = { { + /* + * Most (all?) VAIOs do not have external PS/2 ports nor + * they implement active multiplexing properly, and + * MUX discovery usually messes up keyboard/touchpad. + */ .matches = { - DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */ + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_BOARD_NAME, "VAIO"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { + /* Sony Vaio FS-115b */ .matches = { - DMI_MATCH(DMI_CHASSIS_TYPE, "9"), /* Laptop */ + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FS115B"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { + /* + * Sony Vaio VGN-CS series require MUX or the touch sensor + * buttons will disturb touchpad operation + */ .matches = { - DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */ + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-CS"), }, + .driver_data = (void *)(SERIO_QUIRK_FORCEMUX) }, { .matches = { - DMI_MATCH(DMI_CHASSIS_TYPE, "14"), /* Sub-Notebook */ + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P10"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, - { } -}; -#endif - -static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = { { - /* Dell Vostro V13 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"), + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "EQUIUM A110"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, { - /* Newer HP Pavilion dv4 models */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"), + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE C850D"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, + /* + * A lot of modern Clevo barebones have touchpad and/or keyboard issues + * after suspend fixable with nomux + reset + noloop + nopnp. Luckily, + * none of them have an external PS/2 port so this can safely be set for + * all of them. These two are based on a Clevo design, but have the + * board_name changed. + */ { - /* Fujitsu A544 laptop */ - /* https://bugzilla.redhat.com/show_bug.cgi?id=1111138 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK A544"), + DMI_MATCH(DMI_BOARD_VENDOR, "TUXEDO"), + DMI_MATCH(DMI_BOARD_NAME, "AURA1501"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) }, { - /* Fujitsu AH544 laptop */ - /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK AH544"), + DMI_MATCH(DMI_BOARD_VENDOR, "TUXEDO"), + DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) }, { - /* Fujitsu Lifebook T725 laptop */ + /* Mivvy M310 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T725"), + DMI_MATCH(DMI_SYS_VENDOR, "VIOOO"), + DMI_MATCH(DMI_PRODUCT_NAME, "N10"), }, + .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS) }, + /* + * Some laptops need keyboard reset before probing for the trackpad to get + * it detected, initialised & finally work. + */ { - /* Fujitsu U574 laptop */ - /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */ + /* Schenker XMG C504 - Elantech touchpad */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"), + DMI_MATCH(DMI_SYS_VENDOR, "XMG"), + DMI_MATCH(DMI_PRODUCT_NAME, "C504"), }, + .driver_data = (void *)(SERIO_QUIRK_KBDRESET) }, { - /* Fujitsu UH554 laptop */ + /* Blue FB5601 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK UH544"), + DMI_MATCH(DMI_SYS_VENDOR, "blue"), + DMI_MATCH(DMI_PRODUCT_NAME, "FB5601"), + DMI_MATCH(DMI_PRODUCT_VERSION, "M606"), }, + .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, - { } -}; - -/* - * Some Wistron based laptops need us to explicitly enable the 'Dritek - * keyboard extension' to make their extra keys start generating scancodes. - * Originally, this was just confined to older laptops, but a few Acer laptops - * have turned up in 2007 that also need this again. - */ -static const struct dmi_system_id __initconst i8042_dmi_dritek_table[] = { + /* + * A lot of modern Clevo barebones have touchpad and/or keyboard issues + * after suspend fixable with nomux + reset + noloop + nopnp. Luckily, + * none of them have an external PS/2 port so this can safely be set for + * all of them. + * Clevo barebones come with board_vendor and/or system_vendor set to + * either the very generic string "Notebook" and/or a different value + * for each individual reseller. The only somewhat universal way to + * identify them is by board_name. + */ { - /* Acer Aspire 5100 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5100"), + DMI_MATCH(DMI_BOARD_NAME, "LAPQC71A"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) }, { - /* Acer Aspire 5610 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5610"), + DMI_MATCH(DMI_BOARD_NAME, "LAPQC71B"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) }, { - /* Acer Aspire 5630 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5630"), + DMI_MATCH(DMI_BOARD_NAME, "N140CU"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) }, { - /* Acer Aspire 5650 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5650"), + DMI_MATCH(DMI_BOARD_NAME, "N141CU"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) }, { - /* Acer Aspire 5680 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5680"), + DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) }, { - /* Acer Aspire 5720 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5720"), + DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) }, + /* + * At least one modern Clevo barebone has the touchpad connected both + * via PS/2 and i2c interface. This causes a race condition between the + * psmouse and i2c-hid driver. Since the full capability of the touchpad + * is available via the i2c interface and the device has no external + * PS/2 port, it is safe to just ignore all ps2 mouses here to avoid + * this issue. The known affected device is the + * TUXEDO InfinityBook S17 Gen6 / Clevo NS70MU which comes with one of + * the two different dmi strings below. NS50MU is not a typo! + */ { - /* Acer Aspire 9110 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 9110"), + DMI_MATCH(DMI_BOARD_NAME, "NS50MU"), }, + .driver_data = (void *)(SERIO_QUIRK_NOAUX | SERIO_QUIRK_NOMUX | + SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP | + SERIO_QUIRK_NOPNP) }, { - /* Acer TravelMate 660 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 660"), + DMI_MATCH(DMI_BOARD_NAME, "NS50_70MU"), }, + .driver_data = (void *)(SERIO_QUIRK_NOAUX | SERIO_QUIRK_NOMUX | + SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP | + SERIO_QUIRK_NOPNP) }, { - /* Acer TravelMate 2490 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2490"), + DMI_MATCH(DMI_BOARD_NAME, "NJ50_70CU"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) }, { - /* Acer TravelMate 4280 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4280"), + DMI_MATCH(DMI_BOARD_NAME, "PB50_70DFx,DDx"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) }, - { } -}; - -/* - * Some laptops need keyboard reset before probing for the trackpad to get - * it detected, initialised & finally work. - */ -static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = { { - /* Gigabyte P35 v2 - Elantech touchpad */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), - DMI_MATCH(DMI_PRODUCT_NAME, "P35V2"), + DMI_MATCH(DMI_BOARD_NAME, "PCX0DX"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) }, - { - /* Aorus branded Gigabyte X3 Plus - Elantech touchpad */ + { .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), - DMI_MATCH(DMI_PRODUCT_NAME, "X3"), + DMI_MATCH(DMI_BOARD_NAME, "X170SM"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) }, { - /* Gigabyte P34 - Elantech touchpad */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), - DMI_MATCH(DMI_PRODUCT_NAME, "P34"), + DMI_MATCH(DMI_BOARD_NAME, "X170KM-G"), }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) }, + { } +}; + +#ifdef CONFIG_PNP +static const struct dmi_system_id i8042_dmi_laptop_table[] __initconst = { { - /* Gigabyte P57 - Elantech touchpad */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), - DMI_MATCH(DMI_PRODUCT_NAME, "P57"), + DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */ }, }, { - /* Schenker XMG C504 - Elantech touchpad */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "XMG"), - DMI_MATCH(DMI_PRODUCT_NAME, "C504"), + DMI_MATCH(DMI_CHASSIS_TYPE, "9"), /* Laptop */ }, }, - { } -}; - -static const struct dmi_system_id i8042_dmi_probe_defer_table[] __initconst = { { - /* ASUS ZenBook UX425UA */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX425UA"), + DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */ }, }, { - /* ASUS ZenBook UM325UA */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"), + DMI_MATCH(DMI_CHASSIS_TYPE, "14"), /* Sub-Notebook */ }, }, { } }; +#endif #endif /* CONFIG_X86 */ @@ -1167,11 +1373,6 @@ static int __init i8042_pnp_init(void) bool pnp_data_busted = false; int err; -#ifdef CONFIG_X86 - if (dmi_check_system(i8042_dmi_nopnp_table)) - i8042_nopnp = true; -#endif - if (i8042_nopnp) { pr_info("PNP detection disabled\n"); return 0; @@ -1275,6 +1476,59 @@ static inline int i8042_pnp_init(void) { return 0; } static inline void i8042_pnp_exit(void) { } #endif /* CONFIG_PNP */ + +#ifdef CONFIG_X86 +static void __init i8042_check_quirks(void) +{ + const struct dmi_system_id *device_quirk_info; + uintptr_t quirks; + + device_quirk_info = dmi_first_match(i8042_dmi_quirk_table); + if (!device_quirk_info) + return; + + quirks = (uintptr_t)device_quirk_info->driver_data; + + if (quirks & SERIO_QUIRK_NOKBD) + i8042_nokbd = true; + if (quirks & SERIO_QUIRK_NOAUX) + i8042_noaux = true; + if (quirks & SERIO_QUIRK_NOMUX) + i8042_nomux = true; + if (quirks & SERIO_QUIRK_FORCEMUX) + i8042_nomux = false; + if (quirks & SERIO_QUIRK_UNLOCK) + i8042_unlock = true; + if (quirks & SERIO_QUIRK_PROBE_DEFER) + i8042_probe_defer = true; + /* Honor module parameter when value is not default */ + if (i8042_reset == I8042_RESET_DEFAULT) { + if (quirks & SERIO_QUIRK_RESET_ALWAYS) + i8042_reset = I8042_RESET_ALWAYS; + if (quirks & SERIO_QUIRK_RESET_NEVER) + i8042_reset = I8042_RESET_NEVER; + } + if (quirks & SERIO_QUIRK_DIECT) + i8042_direct = true; + if (quirks & SERIO_QUIRK_DUMBKBD) + i8042_dumbkbd = true; + if (quirks & SERIO_QUIRK_NOLOOP) + i8042_noloop = true; + if (quirks & SERIO_QUIRK_NOTIMEOUT) + i8042_notimeout = true; + if (quirks & SERIO_QUIRK_KBDRESET) + i8042_kbdreset = true; + if (quirks & SERIO_QUIRK_DRITEK) + i8042_dritek = true; +#ifdef CONFIG_PNP + if (quirks & SERIO_QUIRK_NOPNP) + i8042_nopnp = true; +#endif +} +#else +static inline void i8042_check_quirks(void) {} +#endif + static int __init i8042_platform_init(void) { int retval; @@ -1297,45 +1551,17 @@ static int __init i8042_platform_init(void) i8042_kbd_irq = I8042_MAP_IRQ(1); i8042_aux_irq = I8042_MAP_IRQ(12); - retval = i8042_pnp_init(); - if (retval) - return retval; - #if defined(__ia64__) - i8042_reset = I8042_RESET_ALWAYS; + i8042_reset = I8042_RESET_ALWAYS; #endif -#ifdef CONFIG_X86 - /* Honor module parameter when value is not default */ - if (i8042_reset == I8042_RESET_DEFAULT) { - if (dmi_check_system(i8042_dmi_reset_table)) - i8042_reset = I8042_RESET_ALWAYS; - - if (dmi_check_system(i8042_dmi_noselftest_table)) - i8042_reset = I8042_RESET_NEVER; - } - - if (dmi_check_system(i8042_dmi_noloop_table)) - i8042_noloop = true; - - if (dmi_check_system(i8042_dmi_nomux_table)) - i8042_nomux = true; - - if (dmi_check_system(i8042_dmi_forcemux_table)) - i8042_nomux = false; - - if (dmi_check_system(i8042_dmi_notimeout_table)) - i8042_notimeout = true; - - if (dmi_check_system(i8042_dmi_dritek_table)) - i8042_dritek = true; - - if (dmi_check_system(i8042_dmi_kbdreset_table)) - i8042_kbdreset = true; + i8042_check_quirks(); - if (dmi_check_system(i8042_dmi_probe_defer_table)) - i8042_probe_defer = true; + retval = i8042_pnp_init(); + if (retval) + return retval; +#ifdef CONFIG_X86 /* * A20 was already enabled during early kernel init. But some buggy * BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index ff97897feaf2a8bab9860a506c61051d3c1a6361..1753288cedde7e602ddd2a6f78e5ba4a42115693 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c @@ -63,19 +63,15 @@ /* this driver doesn't aim at the peak continuous sample rate */ #define SAMPLE_BITS (8 /*cmd*/ + 16 /*sample*/ + 2 /* before, after */) -struct ts_event { - /* - * For portability, we can't read 12 bit values using SPI (which - * would make the controller deliver them as native byte order u16 - * with msbs zeroed). Instead, we read them as two 8-bit values, - * *** WHICH NEED BYTESWAPPING *** and range adjustment. - */ - u16 x; - u16 y; - u16 z1, z2; - bool ignore; - u8 x_buf[3]; - u8 y_buf[3]; +struct ads7846_buf { + u8 cmd; + __be16 data; +} __packed; + +struct ads7846_buf_layout { + unsigned int offset; + unsigned int count; + unsigned int skip; }; /* @@ -84,11 +80,18 @@ struct ts_event { * systems where main memory is not DMA-coherent (most non-x86 boards). */ struct ads7846_packet { - u8 read_x, read_y, read_z1, read_z2, pwrdown; - u16 dummy; /* for the pwrdown read */ - struct ts_event tc; - /* for ads7845 with mpc5121 psc spi we use 3-byte buffers */ - u8 read_x_cmd[3], read_y_cmd[3], pwrdown_cmd[3]; + unsigned int count; + unsigned int count_skip; + unsigned int cmds; + unsigned int last_cmd_idx; + struct ads7846_buf_layout l[5]; + struct ads7846_buf *rx; + struct ads7846_buf *tx; + + struct ads7846_buf pwrdown_cmd; + + bool ignore; + u16 x, y, z1, z2; }; struct ads7846 { @@ -187,7 +190,6 @@ struct ads7846 { #define READ_Y(vref) (READ_12BIT_DFR(y, 1, vref)) #define READ_Z1(vref) (READ_12BIT_DFR(z1, 1, vref)) #define READ_Z2(vref) (READ_12BIT_DFR(z2, 1, vref)) - #define READ_X(vref) (READ_12BIT_DFR(x, 1, vref)) #define PWRDOWN (READ_12BIT_DFR(y, 0, 0)) /* LAST */ @@ -200,6 +202,21 @@ struct ads7846 { #define REF_ON (READ_12BIT_DFR(x, 1, 1)) #define REF_OFF (READ_12BIT_DFR(y, 0, 0)) +/* Order commands in the most optimal way to reduce Vref switching and + * settling time: + * Measure: X; Vref: X+, X-; IN: Y+ + * Measure: Y; Vref: Y+, Y-; IN: X+ + * Measure: Z1; Vref: Y+, X-; IN: X+ + * Measure: Z2; Vref: Y+, X-; IN: Y- + */ +enum ads7846_cmds { + ADS7846_X, + ADS7846_Y, + ADS7846_Z1, + ADS7846_Z2, + ADS7846_PWDOWN, +}; + static int get_pendown_state(struct ads7846 *ts) { if (ts->get_pendown_state) @@ -682,32 +699,109 @@ static int ads7846_no_filter(void *ads, int data_idx, int *val) return ADS7846_FILTER_OK; } -static int ads7846_get_value(struct ads7846 *ts, struct spi_message *m) +static int ads7846_get_value(struct ads7846_buf *buf) { int value; - struct spi_transfer *t = - list_entry(m->transfers.prev, struct spi_transfer, transfer_list); - if (ts->model == 7845) { - value = be16_to_cpup((__be16 *)&(((char *)t->rx_buf)[1])); - } else { - /* - * adjust: on-wire is a must-ignore bit, a BE12 value, then - * padding; built from two 8 bit values written msb-first. - */ - value = be16_to_cpup((__be16 *)t->rx_buf); - } + value = be16_to_cpup(&buf->data); /* enforce ADC output is 12 bits width */ return (value >> 3) & 0xfff; } -static void ads7846_update_value(struct spi_message *m, int val) +static void ads7846_set_cmd_val(struct ads7846 *ts, enum ads7846_cmds cmd_idx, + u16 val) { - struct spi_transfer *t = - list_entry(m->transfers.prev, struct spi_transfer, transfer_list); + struct ads7846_packet *packet = ts->packet; + + switch (cmd_idx) { + case ADS7846_Y: + packet->y = val; + break; + case ADS7846_X: + packet->x = val; + break; + case ADS7846_Z1: + packet->z1 = val; + break; + case ADS7846_Z2: + packet->z2 = val; + break; + default: + WARN_ON_ONCE(1); + } +} + +static u8 ads7846_get_cmd(enum ads7846_cmds cmd_idx, int vref) +{ + switch (cmd_idx) { + case ADS7846_Y: + return READ_Y(vref); + case ADS7846_X: + return READ_X(vref); + + /* 7846 specific commands */ + case ADS7846_Z1: + return READ_Z1(vref); + case ADS7846_Z2: + return READ_Z2(vref); + case ADS7846_PWDOWN: + return PWRDOWN; + default: + WARN_ON_ONCE(1); + } - *(u16 *)t->rx_buf = val; + return 0; +} + +static bool ads7846_cmd_need_settle(enum ads7846_cmds cmd_idx) +{ + switch (cmd_idx) { + case ADS7846_X: + case ADS7846_Y: + case ADS7846_Z1: + case ADS7846_Z2: + return true; + case ADS7846_PWDOWN: + return false; + default: + WARN_ON_ONCE(1); + } + + return false; +} + +static int ads7846_filter(struct ads7846 *ts) +{ + struct ads7846_packet *packet = ts->packet; + int action; + int val; + unsigned int cmd_idx, b; + + packet->ignore = false; + for (cmd_idx = packet->last_cmd_idx; cmd_idx < packet->cmds - 1; cmd_idx++) { + struct ads7846_buf_layout *l = &packet->l[cmd_idx]; + + packet->last_cmd_idx = cmd_idx; + + for (b = l->skip; b < l->count; b++) { + val = ads7846_get_value(&packet->rx[l->offset + b]); + + action = ts->filter(ts->filter_data, cmd_idx, &val); + if (action == ADS7846_FILTER_REPEAT) { + if (b == l->count - 1) + return -EAGAIN; + } else if (action == ADS7846_FILTER_OK) { + ads7846_set_cmd_val(ts, cmd_idx, val); + break; + } else { + packet->ignore = true; + return 0; + } + } + } + + return 0; } static void ads7846_read_state(struct ads7846 *ts) @@ -715,52 +809,26 @@ static void ads7846_read_state(struct ads7846 *ts) struct ads7846_packet *packet = ts->packet; struct spi_message *m; int msg_idx = 0; - int val; - int action; int error; - while (msg_idx < ts->msg_count) { + packet->last_cmd_idx = 0; + while (true) { ts->wait_for_sync(); m = &ts->msg[msg_idx]; error = spi_sync(ts->spi, m); if (error) { dev_err(&ts->spi->dev, "spi_sync --> %d\n", error); - packet->tc.ignore = true; + packet->ignore = true; return; } - /* - * Last message is power down request, no need to convert - * or filter the value. - */ - if (msg_idx < ts->msg_count - 1) { - - val = ads7846_get_value(ts, m); + error = ads7846_filter(ts); + if (error) + continue; - action = ts->filter(ts->filter_data, msg_idx, &val); - switch (action) { - case ADS7846_FILTER_REPEAT: - continue; - - case ADS7846_FILTER_IGNORE: - packet->tc.ignore = true; - msg_idx = ts->msg_count - 1; - continue; - - case ADS7846_FILTER_OK: - ads7846_update_value(m, val); - packet->tc.ignore = false; - msg_idx++; - break; - - default: - BUG(); - } - } else { - msg_idx++; - } + return; } } @@ -770,35 +838,22 @@ static void ads7846_report_state(struct ads7846 *ts) unsigned int Rt; u16 x, y, z1, z2; - /* - * ads7846_get_value() does in-place conversion (including byte swap) - * from on-the-wire format as part of debouncing to get stable - * readings. - */ + x = packet->x; + y = packet->y; if (ts->model == 7845) { - x = *(u16 *)packet->tc.x_buf; - y = *(u16 *)packet->tc.y_buf; z1 = 0; z2 = 0; } else { - x = packet->tc.x; - y = packet->tc.y; - z1 = packet->tc.z1; - z2 = packet->tc.z2; + z1 = packet->z1; + z2 = packet->z2; } /* range filtering */ if (x == MAX_12BIT) x = 0; - if (ts->model == 7843) { + if (ts->model == 7843 || ts->model == 7845) { Rt = ts->pressure_max / 2; - } else if (ts->model == 7845) { - if (get_pendown_state(ts)) - Rt = ts->pressure_max / 2; - else - Rt = 0; - dev_vdbg(&ts->spi->dev, "x/y: %d/%d, PD %d\n", x, y, Rt); } else if (likely(x && z1)) { /* compute touch pressure resistance using equation #2 */ Rt = z2; @@ -817,9 +872,9 @@ static void ads7846_report_state(struct ads7846 *ts) * the maximum. Don't report it to user space, repeat at least * once more the measurement */ - if (packet->tc.ignore || Rt > ts->pressure_max) { + if (packet->ignore || Rt > ts->pressure_max) { dev_vdbg(&ts->spi->dev, "ignored %d pressure %d\n", - packet->tc.ignore, Rt); + packet->ignore, Rt); return; } @@ -980,13 +1035,62 @@ static int ads7846_setup_pendown(struct spi_device *spi, * Set up the transfers to read touchscreen state; this assumes we * use formula #2 for pressure, not #3. */ -static void ads7846_setup_spi_msg(struct ads7846 *ts, +static int ads7846_setup_spi_msg(struct ads7846 *ts, const struct ads7846_platform_data *pdata) { struct spi_message *m = &ts->msg[0]; struct spi_transfer *x = ts->xfer; struct ads7846_packet *packet = ts->packet; int vref = pdata->keep_vref_on; + unsigned int count, offset = 0; + unsigned int cmd_idx, b; + unsigned long time; + size_t size = 0; + + /* time per bit */ + time = NSEC_PER_SEC / ts->spi->max_speed_hz; + + count = pdata->settle_delay_usecs * NSEC_PER_USEC / time; + packet->count_skip = DIV_ROUND_UP(count, 24); + + if (ts->debounce_max && ts->debounce_rep) + /* ads7846_debounce_filter() is making ts->debounce_rep + 2 + * reads. So we need to get all samples for normal case. */ + packet->count = ts->debounce_rep + 2; + else + packet->count = 1; + + if (ts->model == 7846) + packet->cmds = 5; /* x, y, z1, z2, pwdown */ + else + packet->cmds = 3; /* x, y, pwdown */ + + for (cmd_idx = 0; cmd_idx < packet->cmds; cmd_idx++) { + struct ads7846_buf_layout *l = &packet->l[cmd_idx]; + unsigned int max_count; + + if (cmd_idx == packet->cmds - 1) + cmd_idx = ADS7846_PWDOWN; + + if (ads7846_cmd_need_settle(cmd_idx)) + max_count = packet->count + packet->count_skip; + else + max_count = packet->count; + + l->offset = offset; + offset += max_count; + l->count = max_count; + l->skip = packet->count_skip; + size += sizeof(*packet->tx) * max_count; + } + + packet->tx = devm_kzalloc(&ts->spi->dev, size, GFP_KERNEL); + if (!packet->tx) + return -ENOMEM; + + packet->rx = devm_kzalloc(&ts->spi->dev, size, GFP_KERNEL); + if (!packet->rx) + return -ENOMEM; if (ts->model == 7873) { /* @@ -1002,185 +1106,25 @@ static void ads7846_setup_spi_msg(struct ads7846 *ts, spi_message_init(m); m->context = ts; - if (ts->model == 7845) { - packet->read_y_cmd[0] = READ_Y(vref); - packet->read_y_cmd[1] = 0; - packet->read_y_cmd[2] = 0; - x->tx_buf = &packet->read_y_cmd[0]; - x->rx_buf = &packet->tc.y_buf[0]; - x->len = 3; - spi_message_add_tail(x, m); - } else { - /* y- still on; turn on only y+ (and ADC) */ - packet->read_y = READ_Y(vref); - x->tx_buf = &packet->read_y; - x->len = 1; - spi_message_add_tail(x, m); - - x++; - x->rx_buf = &packet->tc.y; - x->len = 2; - spi_message_add_tail(x, m); - } - - /* - * The first sample after switching drivers can be low quality; - * optionally discard it, using a second one after the signals - * have had enough time to stabilize. - */ - if (pdata->settle_delay_usecs) { - x->delay.value = pdata->settle_delay_usecs; - x->delay.unit = SPI_DELAY_UNIT_USECS; - - x++; - x->tx_buf = &packet->read_y; - x->len = 1; - spi_message_add_tail(x, m); - - x++; - x->rx_buf = &packet->tc.y; - x->len = 2; - spi_message_add_tail(x, m); - } - - ts->msg_count++; - m++; - spi_message_init(m); - m->context = ts; - - if (ts->model == 7845) { - x++; - packet->read_x_cmd[0] = READ_X(vref); - packet->read_x_cmd[1] = 0; - packet->read_x_cmd[2] = 0; - x->tx_buf = &packet->read_x_cmd[0]; - x->rx_buf = &packet->tc.x_buf[0]; - x->len = 3; - spi_message_add_tail(x, m); - } else { - /* turn y- off, x+ on, then leave in lowpower */ - x++; - packet->read_x = READ_X(vref); - x->tx_buf = &packet->read_x; - x->len = 1; - spi_message_add_tail(x, m); - - x++; - x->rx_buf = &packet->tc.x; - x->len = 2; - spi_message_add_tail(x, m); - } + for (cmd_idx = 0; cmd_idx < packet->cmds; cmd_idx++) { + struct ads7846_buf_layout *l = &packet->l[cmd_idx]; + u8 cmd; - /* ... maybe discard first sample ... */ - if (pdata->settle_delay_usecs) { - x->delay.value = pdata->settle_delay_usecs; - x->delay.unit = SPI_DELAY_UNIT_USECS; + if (cmd_idx == packet->cmds - 1) + cmd_idx = ADS7846_PWDOWN; - x++; - x->tx_buf = &packet->read_x; - x->len = 1; - spi_message_add_tail(x, m); + cmd = ads7846_get_cmd(cmd_idx, vref); - x++; - x->rx_buf = &packet->tc.x; - x->len = 2; - spi_message_add_tail(x, m); + for (b = 0; b < l->count; b++) + packet->tx[l->offset + b].cmd = cmd; } - /* turn y+ off, x- on; we'll use formula #2 */ - if (ts->model == 7846) { - ts->msg_count++; - m++; - spi_message_init(m); - m->context = ts; - - x++; - packet->read_z1 = READ_Z1(vref); - x->tx_buf = &packet->read_z1; - x->len = 1; - spi_message_add_tail(x, m); - - x++; - x->rx_buf = &packet->tc.z1; - x->len = 2; - spi_message_add_tail(x, m); - - /* ... maybe discard first sample ... */ - if (pdata->settle_delay_usecs) { - x->delay.value = pdata->settle_delay_usecs; - x->delay.unit = SPI_DELAY_UNIT_USECS; - - x++; - x->tx_buf = &packet->read_z1; - x->len = 1; - spi_message_add_tail(x, m); - - x++; - x->rx_buf = &packet->tc.z1; - x->len = 2; - spi_message_add_tail(x, m); - } - - ts->msg_count++; - m++; - spi_message_init(m); - m->context = ts; - - x++; - packet->read_z2 = READ_Z2(vref); - x->tx_buf = &packet->read_z2; - x->len = 1; - spi_message_add_tail(x, m); - - x++; - x->rx_buf = &packet->tc.z2; - x->len = 2; - spi_message_add_tail(x, m); - - /* ... maybe discard first sample ... */ - if (pdata->settle_delay_usecs) { - x->delay.value = pdata->settle_delay_usecs; - x->delay.unit = SPI_DELAY_UNIT_USECS; - - x++; - x->tx_buf = &packet->read_z2; - x->len = 1; - spi_message_add_tail(x, m); - - x++; - x->rx_buf = &packet->tc.z2; - x->len = 2; - spi_message_add_tail(x, m); - } - } - - /* power down */ - ts->msg_count++; - m++; - spi_message_init(m); - m->context = ts; - - if (ts->model == 7845) { - x++; - packet->pwrdown_cmd[0] = PWRDOWN; - packet->pwrdown_cmd[1] = 0; - packet->pwrdown_cmd[2] = 0; - x->tx_buf = &packet->pwrdown_cmd[0]; - x->len = 3; - } else { - x++; - packet->pwrdown = PWRDOWN; - x->tx_buf = &packet->pwrdown; - x->len = 1; - spi_message_add_tail(x, m); - - x++; - x->rx_buf = &packet->dummy; - x->len = 2; - } - - CS_CHANGE(*x); + x->tx_buf = packet->tx; + x->rx_buf = packet->rx; + x->len = size; spi_message_add_tail(x, m); + + return 0; } #ifdef CONFIG_OF @@ -1381,8 +1325,9 @@ static int ads7846_probe(struct spi_device *spi) pdata->y_min ? : 0, pdata->y_max ? : MAX_12BIT, 0, 0); - input_set_abs_params(input_dev, ABS_PRESSURE, - pdata->pressure_min, pdata->pressure_max, 0, 0); + if (ts->model != 7845) + input_set_abs_params(input_dev, ABS_PRESSURE, + pdata->pressure_min, pdata->pressure_max, 0, 0); /* * Parse common framework properties. Must be done here to ensure the diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index ce822347f7470ba55a798983cbb43f7692440fc0..603f625a74e5472dd143dcce4e1d51bcbff095f1 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -3124,15 +3124,26 @@ static int __init parse_ivrs_hpet(char *str) return 1; } +#define ACPIID_LEN (ACPIHID_UID_LEN + ACPIHID_HID_LEN) + static int __init parse_ivrs_acpihid(char *str) { u32 seg = 0, bus, dev, fn; char *hid, *uid, *p, *addr; - char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0}; + char acpiid[ACPIID_LEN] = {0}; int i; addr = strchr(str, '@'); if (!addr) { + addr = strchr(str, '='); + if (!addr) + goto not_found; + + ++addr; + + if (strlen(addr) > ACPIID_LEN) + goto not_found; + if (sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid) == 4 || sscanf(str, "[%x:%x:%x.%x]=%s", &seg, &bus, &dev, &fn, acpiid) == 5) { pr_warn("ivrs_acpihid%s option format deprecated; use ivrs_acpihid=%s@%04x:%02x:%02x.%d instead\n", @@ -3145,6 +3156,9 @@ static int __init parse_ivrs_acpihid(char *str) /* We have the '@', make it the terminator to get just the acpiid */ *addr++ = 0; + if (strlen(str) > ACPIID_LEN + 1) + goto not_found; + if (sscanf(str, "=%s", acpiid) != 1) goto not_found; diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index 86fd49ae7f612d7c64d42a59fa16a7a0c670715b..80d6412e2c5463fe170615ca2f94dc6cd1015ede 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -24,7 +24,6 @@ /* * Intel IOMMU system wide PASID name space: */ -static DEFINE_SPINLOCK(pasid_lock); u32 intel_pasid_max_id = PASID_MAX; int vcmd_alloc_pasid(struct intel_iommu *iommu, u32 *pasid) @@ -187,6 +186,9 @@ int intel_pasid_alloc_table(struct device *dev) attach_out: device_attach_pasid_table(info, pasid_table); + if (!ecap_coherent(info->iommu->ecap)) + clflush_cache_range(pasid_table->table, size); + return 0; } @@ -259,19 +261,29 @@ struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid) dir_index = pasid >> PASID_PDE_SHIFT; index = pasid & PASID_PTE_MASK; - spin_lock(&pasid_lock); +retry: entries = get_pasid_table_from_pde(&dir[dir_index]); if (!entries) { entries = alloc_pgtable_page(info->iommu->node); - if (!entries) { - spin_unlock(&pasid_lock); + if (!entries) return NULL; - } - WRITE_ONCE(dir[dir_index].val, - (u64)virt_to_phys(entries) | PASID_PTE_PRESENT); + /* + * The pasid directory table entry won't be freed after + * allocation. No worry about the race with free and + * clear. However, this entry might be populated by others + * while we are preparing it. Use theirs with a retry. + */ + if (cmpxchg64(&dir[dir_index].val, 0ULL, + (u64)virt_to_phys(entries) | PASID_PTE_PRESENT)) { + free_pgtable_page(entries); + goto retry; + } + if (!ecap_coherent(info->iommu->ecap)) { + clflush_cache_range(entries, VTD_PAGE_SIZE); + clflush_cache_range(&dir[dir_index].val, sizeof(*dir)); + } } - spin_unlock(&pasid_lock); return &entries[index]; } diff --git a/drivers/irqchip/irq-alpine-msi.c b/drivers/irqchip/irq-alpine-msi.c index ede02dc2bcd0b9cdb811a9c679aa412568a6eba6..1819bb1d27230a6d4a09ccba7f6d07c6a2e7c9a9 100644 --- a/drivers/irqchip/irq-alpine-msi.c +++ b/drivers/irqchip/irq-alpine-msi.c @@ -199,6 +199,7 @@ static int alpine_msix_init_domains(struct alpine_msix_data *priv, } gic_domain = irq_find_host(gic_node); + of_node_put(gic_node); if (!gic_domain) { pr_err("Failed to find the GIC domain\n"); return -ENXIO; diff --git a/drivers/irqchip/irq-aspeed-vic.c b/drivers/irqchip/irq-aspeed-vic.c index 6567ed782f82c95ae08c470b5f9c01ba78806c16..58717cd44f99f1545fab75a56bb4fecad43f1906 100644 --- a/drivers/irqchip/irq-aspeed-vic.c +++ b/drivers/irqchip/irq-aspeed-vic.c @@ -71,7 +71,7 @@ static void vic_init_hw(struct aspeed_vic *vic) writel(0, vic->base + AVIC_INT_SELECT); writel(0, vic->base + AVIC_INT_SELECT + 4); - /* Some interrupts have a programable high/low level trigger + /* Some interrupts have a programmable high/low level trigger * (4 GPIO direct inputs), for now we assume this was configured * by firmware. We read which ones are edge now. */ @@ -203,7 +203,7 @@ static int __init avic_of_init(struct device_node *node, } vic->base = regs; - /* Initialize soures, all masked */ + /* Initialize sources, all masked */ vic_init_hw(vic); /* Ready to receive interrupts */ diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c index c7c9e976acbb9f06e32cb8246a0bfda05073f122..1c2c5bd5a9fc1ccb24c8a305ad51eeeb6bf72fe1 100644 --- a/drivers/irqchip/irq-bcm7120-l2.c +++ b/drivers/irqchip/irq-bcm7120-l2.c @@ -273,7 +273,8 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn, flags |= IRQ_GC_BE_IO; ret = irq_alloc_domain_generic_chips(data->domain, IRQS_PER_WORD, 1, - dn->full_name, handle_level_irq, clr, 0, flags); + dn->full_name, handle_level_irq, clr, + IRQ_LEVEL, flags); if (ret) { pr_err("failed to allocate generic irq chip\n"); goto out_free_domain; @@ -309,7 +310,7 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn, if (data->can_wake) { /* This IRQ chip can wake the system, set all - * relevant child interupts in wake_enabled mask + * relevant child interrupts in wake_enabled mask */ gc->wake_enabled = 0xffffffff; gc->wake_enabled &= ~gc->unused; diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c index cdd6a42d4efa48cb404448d67cc55d4a698643a2..a4aee16db5314f94a472b0516fa5b83531a6dfc5 100644 --- a/drivers/irqchip/irq-brcmstb-l2.c +++ b/drivers/irqchip/irq-brcmstb-l2.c @@ -161,6 +161,7 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np, *init_params) { unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; + unsigned int set = 0; struct brcmstb_l2_intc_data *data; struct irq_chip_type *ct; int ret; @@ -208,9 +209,12 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np, if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) flags |= IRQ_GC_BE_IO; + if (init_params->handler == handle_level_irq) + set |= IRQ_LEVEL; + /* Allocate a single Generic IRQ chip for this node */ ret = irq_alloc_domain_generic_chips(data->domain, 32, 1, - np->full_name, init_params->handler, clr, 0, flags); + np->full_name, init_params->handler, clr, set, flags); if (ret) { pr_err("failed to allocate generic irq chip\n"); goto out_free_domain; diff --git a/drivers/irqchip/irq-csky-apb-intc.c b/drivers/irqchip/irq-csky-apb-intc.c index 5a2ec43b7ddd49311be11d71fd8dc998e926934e..ab91afa867557bf9c13221091ef185d66f2b12c3 100644 --- a/drivers/irqchip/irq-csky-apb-intc.c +++ b/drivers/irqchip/irq-csky-apb-intc.c @@ -176,7 +176,7 @@ gx_intc_init(struct device_node *node, struct device_node *parent) writel(0x0, reg_base + GX_INTC_NEN63_32); /* - * Initial mask reg with all unmasked, because we only use enalbe reg + * Initial mask reg with all unmasked, because we only use enable reg */ writel(0x0, reg_base + GX_INTC_NMASK31_00); writel(0x0, reg_base + GX_INTC_NMASK63_32); diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c index fbec07d634ad28855d870d97ec51e51e100e3ffe..4116b48e60aff8710ccf7e4472c6a6e79a4f0724 100644 --- a/drivers/irqchip/irq-gic-v2m.c +++ b/drivers/irqchip/irq-gic-v2m.c @@ -371,7 +371,7 @@ static int __init gicv2m_init_one(struct fwnode_handle *fwnode, * the MSI data is the absolute value within the range from * spi_start to (spi_start + num_spis). * - * Broadom NS2 GICv2m implementation has an erratum where the MSI data + * Broadcom NS2 GICv2m implementation has an erratum where the MSI data * is 'spi_number - 32' * * Reading that register fails on the Graviton implementation diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index d8cb5bcd6b10e3982c31a0cfedfaecd772cd1d2b..5ec091c64d47fcae68f40fbec1cf8385190519e0 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -1492,7 +1492,7 @@ static void its_vlpi_set_doorbell(struct irq_data *d, bool enable) * * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI * value or to 1023, depending on the enable bit. But that - * would be issueing a mapping for an /existing/ DevID+EventID + * would be issuing a mapping for an /existing/ DevID+EventID * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI * to the /same/ vPE, using this opportunity to adjust the * doorbell. Mouahahahaha. We loves it, Precious. @@ -3122,7 +3122,7 @@ static void its_cpu_init_lpis(void) /* * It's possible for CPU to receive VLPIs before it is - * sheduled as a vPE, especially for the first CPU, and the + * scheduled as a vPE, especially for the first CPU, and the * VLPI with INTID larger than 2^(IDbits+1) will be considered * as out of range and dropped by GIC. * So we initialize IDbits to known value to avoid VLPI drop. @@ -3613,7 +3613,7 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, /* * If all interrupts have been freed, start mopping the - * floor. This is conditionned on the device not being shared. + * floor. This is conditioned on the device not being shared. */ if (!its_dev->shared && bitmap_empty(its_dev->event_map.lpi_map, @@ -4187,7 +4187,7 @@ static int its_sgi_set_affinity(struct irq_data *d, { /* * There is no notion of affinity for virtual SGIs, at least - * not on the host (since they can only be targetting a vPE). + * not on the host (since they can only be targeting a vPE). * Tell the kernel we've done whatever it asked for. */ irq_data_update_effective_affinity(d, mask_val); @@ -4232,7 +4232,7 @@ static int its_sgi_get_irqchip_state(struct irq_data *d, /* * Locking galore! We can race against two different events: * - * - Concurent vPE affinity change: we must make sure it cannot + * - Concurrent vPE affinity change: we must make sure it cannot * happen, or we'll talk to the wrong redistributor. This is * identical to what happens with vLPIs. * diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 4c8f18f0cecf896f57b269c708fc23d9260d3e81..2805969e4f15a510fa9e3e15f5f0bc7f147975d3 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -1456,7 +1456,7 @@ static int gic_irq_domain_translate(struct irq_domain *d, /* * Make it clear that broken DTs are... broken. - * Partitionned PPIs are an unfortunate exception. + * Partitioned PPIs are an unfortunate exception. */ WARN_ON(*type == IRQ_TYPE_NONE && fwspec->param[0] != GIC_IRQ_TYPE_PARTITION); diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c index 90e1ad6e361202db1bda036b5a34b50e7cf60d72..a4eb8a2181c7f1c0b5c23bc8756c0588c34e4c6a 100644 --- a/drivers/irqchip/irq-loongson-pch-pic.c +++ b/drivers/irqchip/irq-loongson-pch-pic.c @@ -180,7 +180,7 @@ static void pch_pic_reset(struct pch_pic *priv) int i; for (i = 0; i < PIC_COUNT; i++) { - /* Write vectore ID */ + /* Write vectored ID */ writeb(priv->ht_vec_base + i, priv->base + PCH_INT_HTVEC(i)); /* Hardcode route to HT0 Lo */ writeb(1, priv->base + PCH_INT_ROUTE(i)); diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c index bc7aebcc96e9cdd1f31eecc435ca9454a40acacc..e50676ce2ec84b3b59dc6b5a1562c82fde87151c 100644 --- a/drivers/irqchip/irq-meson-gpio.c +++ b/drivers/irqchip/irq-meson-gpio.c @@ -227,7 +227,7 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl, /* * Get the hwirq number assigned to this channel through - * a pointer the channel_irq table. The added benifit of this + * a pointer the channel_irq table. The added benefit of this * method is that we can also retrieve the channel index with * it, using the table base. */ diff --git a/drivers/irqchip/irq-mtk-cirq.c b/drivers/irqchip/irq-mtk-cirq.c index 69ba8ce3c17856be68c9330323f59acb00d55f78..9bca0918078e88cb242ec0d8f873dc9e25a15101 100644 --- a/drivers/irqchip/irq-mtk-cirq.c +++ b/drivers/irqchip/irq-mtk-cirq.c @@ -217,7 +217,7 @@ static void mtk_cirq_resume(void) { u32 value; - /* flush recored interrupts, will send signals to parent controller */ + /* flush recorded interrupts, will send signals to parent controller */ value = readl_relaxed(cirq_data->base + CIRQ_CONTROL); writel_relaxed(value | CIRQ_FLUSH, cirq_data->base + CIRQ_CONTROL); diff --git a/drivers/irqchip/irq-mvebu-gicp.c b/drivers/irqchip/irq-mvebu-gicp.c index 3be5c5dba1dab0a503b07dd3274d9eebbeabf5aa..5caec411059f53d4318e3d4b1523d54cd34c1efb 100644 --- a/drivers/irqchip/irq-mvebu-gicp.c +++ b/drivers/irqchip/irq-mvebu-gicp.c @@ -223,6 +223,7 @@ static int mvebu_gicp_probe(struct platform_device *pdev) } parent_domain = irq_find_host(irq_parent_dn); + of_node_put(irq_parent_dn); if (!parent_domain) { dev_err(&pdev->dev, "failed to find parent IRQ domain\n"); return -ENODEV; diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c index a671938fd97f6fd25cb2055db3ee52d44c64a3cd..d1f5740cd5755cbc4858e9ff4dbd2d09e86f3eb1 100644 --- a/drivers/irqchip/irq-mxs.c +++ b/drivers/irqchip/irq-mxs.c @@ -58,7 +58,7 @@ struct icoll_priv { static struct icoll_priv icoll_priv; static struct irq_domain *icoll_domain; -/* calculate bit offset depending on number of intterupt per register */ +/* calculate bit offset depending on number of interrupt per register */ static u32 icoll_intr_bitshift(struct irq_data *d, u32 bit) { /* @@ -68,7 +68,7 @@ static u32 icoll_intr_bitshift(struct irq_data *d, u32 bit) return bit << ((d->hwirq & 3) << 3); } -/* calculate mem offset depending on number of intterupt per register */ +/* calculate mem offset depending on number of interrupt per register */ static void __iomem *icoll_intr_reg(struct irq_data *d) { /* offset = hwirq / intr_per_reg * 0x10 */ diff --git a/drivers/irqchip/irq-sun4i.c b/drivers/irqchip/irq-sun4i.c index fb78d6623556c15f065b2053275caa45b838673e..9ea94456b178ccce9fa39ae12d54f7ff50c10a60 100644 --- a/drivers/irqchip/irq-sun4i.c +++ b/drivers/irqchip/irq-sun4i.c @@ -189,7 +189,7 @@ static void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs) * 3) spurious irq * So if we immediately get a reading of 0, check the irq-pending reg * to differentiate between 2 and 3. We only do this once to avoid - * the extra check in the common case of 1 hapening after having + * the extra check in the common case of 1 happening after having * read the vector-reg once. */ hwirq = readl(irq_ic_data->irq_base + SUN4I_IRQ_VECTOR_REG) >> 2; diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c index 532d0ae172d9f8f47b9ae7f398f7087a3c8f74be..ca1f593f4d13aa2fd39db911cb230d818fa365ec 100644 --- a/drivers/irqchip/irq-ti-sci-inta.c +++ b/drivers/irqchip/irq-ti-sci-inta.c @@ -78,7 +78,7 @@ struct ti_sci_inta_vint_desc { * struct ti_sci_inta_irq_domain - Structure representing a TISCI based * Interrupt Aggregator IRQ domain. * @sci: Pointer to TISCI handle - * @vint: TISCI resource pointer representing IA inerrupts. + * @vint: TISCI resource pointer representing IA interrupts. * @global_event: TISCI resource pointer representing global events. * @vint_list: List of the vints active in the system * @vint_mutex: Mutex to protect vint_list diff --git a/drivers/irqchip/irq-ti-sci-intr.c b/drivers/irqchip/irq-ti-sci-intr.c index fe8fad22bcf9625b8007f744940bc882cc052063..020ddf29efb8058ae69c109dcdde0364b4f5be82 100644 --- a/drivers/irqchip/irq-ti-sci-intr.c +++ b/drivers/irqchip/irq-ti-sci-intr.c @@ -236,6 +236,7 @@ static int ti_sci_intr_irq_domain_probe(struct platform_device *pdev) } parent_domain = irq_find_host(parent_node); + of_node_put(parent_node); if (!parent_domain) { dev_err(dev, "Failed to find IRQ parent domain\n"); return -ENODEV; diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c index e46036374227297ed9aa1185724df3a735d8bbec..62f3d29f90420e56674f79b10a8f78e1ce5acecd 100644 --- a/drivers/irqchip/irq-vic.c +++ b/drivers/irqchip/irq-vic.c @@ -163,7 +163,7 @@ static struct syscore_ops vic_syscore_ops = { }; /** - * vic_pm_init - initicall to register VIC pm + * vic_pm_init - initcall to register VIC pm * * This is called via late_initcall() to register * the resources for the VICs due to the early @@ -397,7 +397,7 @@ static void __init vic_clear_interrupts(void __iomem *base) /* * The PL190 cell from ARM has been modified by ST to handle 64 interrupts. * The original cell has 32 interrupts, while the modified one has 64, - * replocating two blocks 0x00..0x1f in 0x20..0x3f. In that case + * replicating two blocks 0x00..0x1f in 0x20..0x3f. In that case * the probe function is called twice, with base set to offset 000 * and 020 within the page. We call this "second block". */ diff --git a/drivers/irqchip/irq-xilinx-intc.c b/drivers/irqchip/irq-xilinx-intc.c index 1d3d273309bd3a561510298093f9d3b67703d62a..8cd1bfc7305722f1777502f2c21f88b4be205508 100644 --- a/drivers/irqchip/irq-xilinx-intc.c +++ b/drivers/irqchip/irq-xilinx-intc.c @@ -210,7 +210,7 @@ static int __init xilinx_intc_of_init(struct device_node *intc, /* * Disable all external interrupts until they are - * explicity requested. + * explicitly requested. */ xintc_write(irqc, IER, 0); diff --git a/drivers/irqchip/irqchip.c b/drivers/irqchip/irqchip.c index 3570f0a588c4b0f6ceb561ab15e287fc23b5ba5a..7899607fbee8d289f503bf583e177dd821d70510 100644 --- a/drivers/irqchip/irqchip.c +++ b/drivers/irqchip/irqchip.c @@ -38,8 +38,10 @@ int platform_irqchip_probe(struct platform_device *pdev) struct device_node *par_np = of_irq_find_parent(np); of_irq_init_cb_t irq_init_cb = of_device_get_match_data(&pdev->dev); - if (!irq_init_cb) + if (!irq_init_cb) { + of_node_put(par_np); return -EINVAL; + } if (par_np == np) par_np = NULL; @@ -52,8 +54,10 @@ int platform_irqchip_probe(struct platform_device *pdev) * interrupt controller. The actual initialization callback of this * interrupt controller can check for specific domains as necessary. */ - if (par_np && !irq_find_matching_host(par_np, DOMAIN_BUS_ANY)) + if (par_np && !irq_find_matching_host(par_np, DOMAIN_BUS_ANY)) { + of_node_put(par_np); return -EPROBE_DEFER; + } return irq_init_cb(np, par_np); } diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index 4365c1cc4505ffa9b7851e92aa1273c9899de293..fcb9eee3b6097750b701cc1f3ed9b67c00bd46e1 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c @@ -236,14 +236,17 @@ struct led_classdev *of_led_get(struct device_node *np, int index) led_dev = class_find_device_by_of_node(leds_class, led_node); of_node_put(led_node); + put_device(led_dev); if (!led_dev) return ERR_PTR(-EPROBE_DEFER); led_cdev = dev_get_drvdata(led_dev); - if (!try_module_get(led_cdev->dev->parent->driver->owner)) + if (!try_module_get(led_cdev->dev->parent->driver->owner)) { + put_device(led_cdev->dev); return ERR_PTR(-ENODEV); + } return led_cdev; } @@ -256,6 +259,7 @@ EXPORT_SYMBOL_GPL(of_led_get); void led_put(struct led_classdev *led_cdev) { module_put(led_cdev->dev->parent->driver->owner); + put_device(led_cdev->dev); } EXPORT_SYMBOL_GPL(led_put); diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c index 29f48c2028b6ddf8879abc9bcb65882a43ca9d05..e90ad1b78e9363fcf33538ed5ee07141c0964921 100644 --- a/drivers/macintosh/windfarm_lm75_sensor.c +++ b/drivers/macintosh/windfarm_lm75_sensor.c @@ -34,8 +34,8 @@ #endif struct wf_lm75_sensor { - int ds1775 : 1; - int inited : 1; + unsigned int ds1775 : 1; + unsigned int inited : 1; struct i2c_client *i2c; struct wf_sensor sens; }; diff --git a/drivers/macintosh/windfarm_smu_sensors.c b/drivers/macintosh/windfarm_smu_sensors.c index c8706cfb83fd8e96845c84c2c42cc675df702823..714c1e14074edffbb802e90e29e1087c0f91c451 100644 --- a/drivers/macintosh/windfarm_smu_sensors.c +++ b/drivers/macintosh/windfarm_smu_sensors.c @@ -273,8 +273,8 @@ struct smu_cpu_power_sensor { struct list_head link; struct wf_sensor *volts; struct wf_sensor *amps; - int fake_volts : 1; - int quadratic : 1; + unsigned int fake_volts : 1; + unsigned int quadratic : 1; struct wf_sensor sens; }; #define to_smu_cpu_power(c) container_of(c, struct smu_cpu_power_sensor, sens) diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 9b2aec30980108ea5cad4f56066a7ed4d99b45e0..f98ad4366301b5681c85057e1c3edc31efd6f22f 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -1883,6 +1883,7 @@ static void process_deferred_bios(struct work_struct *ws) else commit_needed = process_bio(cache, bio) || commit_needed; + cond_resched(); } if (commit_needed) @@ -1905,6 +1906,7 @@ static void requeue_deferred_bios(struct cache *cache) while ((bio = bio_list_pop(&bios))) { bio->bi_status = BLK_STS_DM_REQUEUE; bio_endio(bio); + cond_resched(); } } @@ -1945,6 +1947,8 @@ static void check_migrations(struct work_struct *ws) r = mg_start(cache, op, NULL); if (r) break; + + cond_resched(); } } diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index a2cc9e45cbba1639ede3cf51d324a4ec4685d2f1..36a4ef51ecaa8103597d689ce68fad2f598041f0 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -301,8 +301,11 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc) */ bio_for_each_segment(bvec, bio, iter) { if (bio_iter_len(bio, iter) > corrupt_bio_byte) { - char *segment = (page_address(bio_iter_page(bio, iter)) - + bio_iter_offset(bio, iter)); + char *segment; + struct page *page = bio_iter_page(bio, iter); + if (unlikely(page == ZERO_PAGE(0))) + break; + segment = (page_address(page) + bio_iter_offset(bio, iter)); segment[corrupt_bio_byte] = fc->corrupt_bio_value; DMDEBUG("Corrupting data bio=%p by writing %u to byte %u " "(rw=%c bi_opf=%u bi_sector=%llu size=%u)\n", @@ -359,9 +362,11 @@ static int flakey_map(struct dm_target *ti, struct bio *bio) /* * Corrupt matching writes. */ - if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == WRITE)) { - if (all_corrupt_bio_flags_match(bio, fc)) - corrupt_bio_data(bio, fc); + if (fc->corrupt_bio_byte) { + if (fc->corrupt_bio_rw == WRITE) { + if (all_corrupt_bio_flags_match(bio, fc)) + corrupt_bio_data(bio, fc); + } goto map_bio; } @@ -387,13 +392,14 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, return DM_ENDIO_DONE; if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) { - if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) && - all_corrupt_bio_flags_match(bio, fc)) { - /* - * Corrupt successful matching READs while in down state. - */ - corrupt_bio_data(bio, fc); - + if (fc->corrupt_bio_byte) { + if ((fc->corrupt_bio_rw == READ) && + all_corrupt_bio_flags_match(bio, fc)) { + /* + * Corrupt successful matching READs while in down state. + */ + corrupt_bio_data(bio, fc); + } } else if (!test_bit(DROP_WRITES, &fc->flags) && !test_bit(ERROR_WRITES, &fc->flags)) { /* diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index f3c519e18a12f990b9fc0570b348fe6c5183d852..c890bb3e5185208e61d13a0b52826fa417f9e517 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -2217,6 +2217,7 @@ static void process_thin_deferred_bios(struct thin_c *tc) throttle_work_update(&pool->throttle); dm_pool_issue_prefetches(pool->pmd); } + cond_resched(); } blk_finish_plug(&plug); } @@ -2299,6 +2300,7 @@ static void process_thin_deferred_cells(struct thin_c *tc) else pool->process_cell(tc, cell); } + cond_resched(); } while (!list_empty(&cells)); } diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 70e2f73df52779e823f9b9c657e89fa08e157d65..c3819388b9a457d156b8bb77e98b82d1e0f07306 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -265,7 +265,6 @@ static int __init local_init(void) static void local_exit(void) { - flush_scheduled_work(); destroy_workqueue(deferred_remove_workqueue); unregister_blkdev(_major, _name); @@ -2384,6 +2383,7 @@ static void dm_wq_work(struct work_struct *work) break; submit_bio_noacct(bio); + cond_resched(); } } diff --git a/drivers/md/md.c b/drivers/md/md.c index da423bcb0b109951ac3e481c423dbc05690acd57..84726dd359a84971a3668ee525c4dfdf968d1800 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5671,6 +5671,7 @@ static int md_alloc(dev_t dev, char *name) * completely removed (mddev_delayed_delete). */ flush_workqueue(md_misc_wq); + flush_workqueue(md_rdev_misc_wq); mutex_lock(&disks_mutex); error = -EEXIST; diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c index 4771d0ef2c46f15ccce6eec2e33fceefdc6c18d2..b975636d944019163442b60e07dafdbf1b82dc14 100644 --- a/drivers/media/i2c/imx219.c +++ b/drivers/media/i2c/imx219.c @@ -89,6 +89,12 @@ #define IMX219_REG_ORIENTATION 0x0172 +/* Binning Mode */ +#define IMX219_REG_BINNING_MODE 0x0174 +#define IMX219_BINNING_NONE 0x0000 +#define IMX219_BINNING_2X2 0x0101 +#define IMX219_BINNING_2X2_ANALOG 0x0303 + /* Test Pattern Control */ #define IMX219_REG_TEST_PATTERN 0x0600 #define IMX219_TEST_PATTERN_DISABLE 0 @@ -143,25 +149,66 @@ struct imx219_mode { /* Default register values */ struct imx219_reg_list reg_list; + + /* 2x2 binning is used */ + bool binning; }; -/* - * Register sets lifted off the i2C interface from the Raspberry Pi firmware - * driver. - * 3280x2464 = mode 2, 1920x1080 = mode 1, 1640x1232 = mode 4, 640x480 = mode 7. - */ -static const struct imx219_reg mode_3280x2464_regs[] = { - {0x0100, 0x00}, +static const struct imx219_reg imx219_common_regs[] = { + {0x0100, 0x00}, /* Mode Select */ + + /* To Access Addresses 3000-5fff, send the following commands */ {0x30eb, 0x0c}, {0x30eb, 0x05}, {0x300a, 0xff}, {0x300b, 0xff}, {0x30eb, 0x05}, {0x30eb, 0x09}, - {0x0114, 0x01}, - {0x0128, 0x00}, - {0x012a, 0x18}, + + /* PLL Clock Table */ + {0x0301, 0x05}, /* VTPXCK_DIV */ + {0x0303, 0x01}, /* VTSYSCK_DIV */ + {0x0304, 0x03}, /* PREPLLCK_VT_DIV 0x03 = AUTO set */ + {0x0305, 0x03}, /* PREPLLCK_OP_DIV 0x03 = AUTO set */ + {0x0306, 0x00}, /* PLL_VT_MPY */ + {0x0307, 0x39}, + {0x030b, 0x01}, /* OP_SYS_CLK_DIV */ + {0x030c, 0x00}, /* PLL_OP_MPY */ + {0x030d, 0x72}, + + /* Undocumented registers */ + {0x455e, 0x00}, + {0x471e, 0x4b}, + {0x4767, 0x0f}, + {0x4750, 0x14}, + {0x4540, 0x00}, + {0x47b4, 0x14}, + {0x4713, 0x30}, + {0x478b, 0x10}, + {0x478f, 0x10}, + {0x4793, 0x10}, + {0x4797, 0x0e}, + {0x479b, 0x0e}, + + /* Frame Bank Register Group "A" */ + {0x0162, 0x0d}, /* Line_Length_A */ + {0x0163, 0x78}, + {0x0170, 0x01}, /* X_ODD_INC_A */ + {0x0171, 0x01}, /* Y_ODD_INC_A */ + + /* Output setup registers */ + {0x0114, 0x01}, /* CSI 2-Lane Mode */ + {0x0128, 0x00}, /* DPHY Auto Mode */ + {0x012a, 0x18}, /* EXCK_Freq */ {0x012b, 0x00}, +}; + +/* + * Register sets lifted off the i2C interface from the Raspberry Pi firmware + * driver. + * 3280x2464 = mode 2, 1920x1080 = mode 1, 1640x1232 = mode 4, 640x480 = mode 7. + */ +static const struct imx219_reg mode_3280x2464_regs[] = { {0x0164, 0x00}, {0x0165, 0x00}, {0x0166, 0x0c}, @@ -174,53 +221,13 @@ static const struct imx219_reg mode_3280x2464_regs[] = { {0x016d, 0xd0}, {0x016e, 0x09}, {0x016f, 0xa0}, - {0x0170, 0x01}, - {0x0171, 0x01}, - {0x0174, 0x00}, - {0x0175, 0x00}, - {0x0301, 0x05}, - {0x0303, 0x01}, - {0x0304, 0x03}, - {0x0305, 0x03}, - {0x0306, 0x00}, - {0x0307, 0x39}, - {0x030b, 0x01}, - {0x030c, 0x00}, - {0x030d, 0x72}, {0x0624, 0x0c}, {0x0625, 0xd0}, {0x0626, 0x09}, {0x0627, 0xa0}, - {0x455e, 0x00}, - {0x471e, 0x4b}, - {0x4767, 0x0f}, - {0x4750, 0x14}, - {0x4540, 0x00}, - {0x47b4, 0x14}, - {0x4713, 0x30}, - {0x478b, 0x10}, - {0x478f, 0x10}, - {0x4793, 0x10}, - {0x4797, 0x0e}, - {0x479b, 0x0e}, - {0x0162, 0x0d}, - {0x0163, 0x78}, }; static const struct imx219_reg mode_1920_1080_regs[] = { - {0x0100, 0x00}, - {0x30eb, 0x05}, - {0x30eb, 0x0c}, - {0x300a, 0xff}, - {0x300b, 0xff}, - {0x30eb, 0x05}, - {0x30eb, 0x09}, - {0x0114, 0x01}, - {0x0128, 0x00}, - {0x012a, 0x18}, - {0x012b, 0x00}, - {0x0162, 0x0d}, - {0x0163, 0x78}, {0x0164, 0x02}, {0x0165, 0xa8}, {0x0166, 0x0a}, @@ -233,51 +240,13 @@ static const struct imx219_reg mode_1920_1080_regs[] = { {0x016d, 0x80}, {0x016e, 0x04}, {0x016f, 0x38}, - {0x0170, 0x01}, - {0x0171, 0x01}, - {0x0174, 0x00}, - {0x0175, 0x00}, - {0x0301, 0x05}, - {0x0303, 0x01}, - {0x0304, 0x03}, - {0x0305, 0x03}, - {0x0306, 0x00}, - {0x0307, 0x39}, - {0x030b, 0x01}, - {0x030c, 0x00}, - {0x030d, 0x72}, {0x0624, 0x07}, {0x0625, 0x80}, {0x0626, 0x04}, {0x0627, 0x38}, - {0x455e, 0x00}, - {0x471e, 0x4b}, - {0x4767, 0x0f}, - {0x4750, 0x14}, - {0x4540, 0x00}, - {0x47b4, 0x14}, - {0x4713, 0x30}, - {0x478b, 0x10}, - {0x478f, 0x10}, - {0x4793, 0x10}, - {0x4797, 0x0e}, - {0x479b, 0x0e}, - {0x0162, 0x0d}, - {0x0163, 0x78}, }; static const struct imx219_reg mode_1640_1232_regs[] = { - {0x0100, 0x00}, - {0x30eb, 0x0c}, - {0x30eb, 0x05}, - {0x300a, 0xff}, - {0x300b, 0xff}, - {0x30eb, 0x05}, - {0x30eb, 0x09}, - {0x0114, 0x01}, - {0x0128, 0x00}, - {0x012a, 0x18}, - {0x012b, 0x00}, {0x0164, 0x00}, {0x0165, 0x00}, {0x0166, 0x0c}, @@ -290,53 +259,13 @@ static const struct imx219_reg mode_1640_1232_regs[] = { {0x016d, 0x68}, {0x016e, 0x04}, {0x016f, 0xd0}, - {0x0170, 0x01}, - {0x0171, 0x01}, - {0x0174, 0x01}, - {0x0175, 0x01}, - {0x0301, 0x05}, - {0x0303, 0x01}, - {0x0304, 0x03}, - {0x0305, 0x03}, - {0x0306, 0x00}, - {0x0307, 0x39}, - {0x030b, 0x01}, - {0x030c, 0x00}, - {0x030d, 0x72}, {0x0624, 0x06}, {0x0625, 0x68}, {0x0626, 0x04}, {0x0627, 0xd0}, - {0x455e, 0x00}, - {0x471e, 0x4b}, - {0x4767, 0x0f}, - {0x4750, 0x14}, - {0x4540, 0x00}, - {0x47b4, 0x14}, - {0x4713, 0x30}, - {0x478b, 0x10}, - {0x478f, 0x10}, - {0x4793, 0x10}, - {0x4797, 0x0e}, - {0x479b, 0x0e}, - {0x0162, 0x0d}, - {0x0163, 0x78}, }; static const struct imx219_reg mode_640_480_regs[] = { - {0x0100, 0x00}, - {0x30eb, 0x05}, - {0x30eb, 0x0c}, - {0x300a, 0xff}, - {0x300b, 0xff}, - {0x30eb, 0x05}, - {0x30eb, 0x09}, - {0x0114, 0x01}, - {0x0128, 0x00}, - {0x012a, 0x18}, - {0x012b, 0x00}, - {0x0162, 0x0d}, - {0x0163, 0x78}, {0x0164, 0x03}, {0x0165, 0xe8}, {0x0166, 0x08}, @@ -349,35 +278,10 @@ static const struct imx219_reg mode_640_480_regs[] = { {0x016d, 0x80}, {0x016e, 0x01}, {0x016f, 0xe0}, - {0x0170, 0x01}, - {0x0171, 0x01}, - {0x0174, 0x03}, - {0x0175, 0x03}, - {0x0301, 0x05}, - {0x0303, 0x01}, - {0x0304, 0x03}, - {0x0305, 0x03}, - {0x0306, 0x00}, - {0x0307, 0x39}, - {0x030b, 0x01}, - {0x030c, 0x00}, - {0x030d, 0x72}, {0x0624, 0x06}, {0x0625, 0x68}, {0x0626, 0x04}, {0x0627, 0xd0}, - {0x455e, 0x00}, - {0x471e, 0x4b}, - {0x4767, 0x0f}, - {0x4750, 0x14}, - {0x4540, 0x00}, - {0x47b4, 0x14}, - {0x4713, 0x30}, - {0x478b, 0x10}, - {0x478f, 0x10}, - {0x4793, 0x10}, - {0x4797, 0x0e}, - {0x479b, 0x0e}, }; static const struct imx219_reg raw8_framefmt_regs[] = { @@ -483,6 +387,7 @@ static const struct imx219_mode supported_modes[] = { .num_of_regs = ARRAY_SIZE(mode_3280x2464_regs), .regs = mode_3280x2464_regs, }, + .binning = false, }, { /* 1080P 30fps cropped */ @@ -499,6 +404,7 @@ static const struct imx219_mode supported_modes[] = { .num_of_regs = ARRAY_SIZE(mode_1920_1080_regs), .regs = mode_1920_1080_regs, }, + .binning = false, }, { /* 2x2 binned 30fps mode */ @@ -515,6 +421,7 @@ static const struct imx219_mode supported_modes[] = { .num_of_regs = ARRAY_SIZE(mode_1640_1232_regs), .regs = mode_1640_1232_regs, }, + .binning = true, }, { /* 640x480 30fps mode */ @@ -531,6 +438,7 @@ static const struct imx219_mode supported_modes[] = { .num_of_regs = ARRAY_SIZE(mode_640_480_regs), .regs = mode_640_480_regs, }, + .binning = true, }, }; @@ -969,6 +877,35 @@ static int imx219_set_framefmt(struct imx219 *imx219) return -EINVAL; } +static int imx219_set_binning(struct imx219 *imx219) +{ + if (!imx219->mode->binning) { + return imx219_write_reg(imx219, IMX219_REG_BINNING_MODE, + IMX219_REG_VALUE_16BIT, + IMX219_BINNING_NONE); + } + + switch (imx219->fmt.code) { + case MEDIA_BUS_FMT_SRGGB8_1X8: + case MEDIA_BUS_FMT_SGRBG8_1X8: + case MEDIA_BUS_FMT_SGBRG8_1X8: + case MEDIA_BUS_FMT_SBGGR8_1X8: + return imx219_write_reg(imx219, IMX219_REG_BINNING_MODE, + IMX219_REG_VALUE_16BIT, + IMX219_BINNING_2X2_ANALOG); + + case MEDIA_BUS_FMT_SRGGB10_1X10: + case MEDIA_BUS_FMT_SGRBG10_1X10: + case MEDIA_BUS_FMT_SGBRG10_1X10: + case MEDIA_BUS_FMT_SBGGR10_1X10: + return imx219_write_reg(imx219, IMX219_REG_BINNING_MODE, + IMX219_REG_VALUE_16BIT, + IMX219_BINNING_2X2); + } + + return -EINVAL; +} + static const struct v4l2_rect * __imx219_get_pad_crop(struct imx219 *imx219, struct v4l2_subdev_pad_config *cfg, unsigned int pad, enum v4l2_subdev_format_whence which) @@ -1032,6 +969,13 @@ static int imx219_start_streaming(struct imx219 *imx219) return ret; } + /* Send all registers that are common to all modes */ + ret = imx219_write_regs(imx219, imx219_common_regs, ARRAY_SIZE(imx219_common_regs)); + if (ret) { + dev_err(&client->dev, "%s failed to send mfg header\n", __func__); + goto err_rpm_put; + } + /* Apply default values of current mode */ reg_list = &imx219->mode->reg_list; ret = imx219_write_regs(imx219, reg_list->regs, reg_list->num_of_regs); @@ -1047,6 +991,13 @@ static int imx219_start_streaming(struct imx219 *imx219) goto err_rpm_put; } + ret = imx219_set_binning(imx219); + if (ret) { + dev_err(&client->dev, "%s failed to set binning: %d\n", + __func__, ret); + goto err_rpm_put; + } + /* Apply customized values from user */ ret = __v4l2_ctrl_handler_setup(imx219->sd.ctrl_handler); if (ret) diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c index b1e2476d3c9e6819902736ffaa6c68bd7c97fb0e..79a11c0184c65ab653605d297130f31b80d5e06d 100644 --- a/drivers/media/i2c/max9286.c +++ b/drivers/media/i2c/max9286.c @@ -890,6 +890,7 @@ static int max9286_v4l2_register(struct max9286_priv *priv) err_put_node: fwnode_handle_put(ep); err_async: + v4l2_ctrl_handler_free(&priv->ctrls); max9286_v4l2_notifier_unregister(priv); return ret; diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c index bd0d45b0d43f5704e01a19c45a8866863c279258..34d74e575a431f74d6d865726d18cd22e9556eb4 100644 --- a/drivers/media/i2c/ov2740.c +++ b/drivers/media/i2c/ov2740.c @@ -577,8 +577,10 @@ static int ov2740_init_controls(struct ov2740 *ov2740) V4L2_CID_TEST_PATTERN, ARRAY_SIZE(ov2740_test_pattern_menu) - 1, 0, 0, ov2740_test_pattern_menu); - if (ctrl_hdlr->error) + if (ctrl_hdlr->error) { + v4l2_ctrl_handler_free(ctrl_hdlr); return ctrl_hdlr->error; + } ov2740->sd.ctrl_handler = ctrl_hdlr; diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c index 8f0812e859012896702df944cea6e0a9550f5b2e..92a5f9aff9b53d68d9d14df97e750236772b5944 100644 --- a/drivers/media/i2c/ov5640.c +++ b/drivers/media/i2c/ov5640.c @@ -2748,7 +2748,7 @@ static int ov5640_init_controls(struct ov5640_dev *sensor) /* Auto/manual gain */ ctrls->auto_gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_AUTOGAIN, 0, 1, 1, 1); - ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAIN, + ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_ANALOGUE_GAIN, 0, 1023, 1, 0); ctrls->saturation = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_SATURATION, diff --git a/drivers/media/i2c/ov5675.c b/drivers/media/i2c/ov5675.c index 9540ce8918f0cfff9cbca26c6f67d6f606e0d633..aa35a9546177ae017c83c2ff77e29abce4ce3790 100644 --- a/drivers/media/i2c/ov5675.c +++ b/drivers/media/i2c/ov5675.c @@ -791,8 +791,10 @@ static int ov5675_init_controls(struct ov5675 *ov5675) v4l2_ctrl_new_std(ctrl_hdlr, &ov5675_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0); - if (ctrl_hdlr->error) + if (ctrl_hdlr->error) { + v4l2_ctrl_handler_free(ctrl_hdlr); return ctrl_hdlr->error; + } ov5675->sd.ctrl_handler = ctrl_hdlr; diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c index 154776d0069eac9be150e888a9371f9b5b739a2a..e47800cb6c0f70acbc3fe3430ab43d3a9346cd6a 100644 --- a/drivers/media/i2c/ov7670.c +++ b/drivers/media/i2c/ov7670.c @@ -1824,7 +1824,7 @@ static int ov7670_parse_dt(struct device *dev, if (bus_cfg.bus_type != V4L2_MBUS_PARALLEL) { dev_err(dev, "Unsupported media bus type\n"); - return ret; + return -EINVAL; } info->mbus_config = bus_cfg.bus.parallel.flags; diff --git a/drivers/media/i2c/ov772x.c b/drivers/media/i2c/ov772x.c index 2cc6a678069a2ef1668df5224b30e08e0b3c386b..5033950a48ab66fcf6295304c88f3005c40e5b82 100644 --- a/drivers/media/i2c/ov772x.c +++ b/drivers/media/i2c/ov772x.c @@ -1397,7 +1397,7 @@ static int ov772x_probe(struct i2c_client *client) priv->subdev.ctrl_handler = &priv->hdl; if (priv->hdl.error) { ret = priv->hdl.error; - goto error_mutex_destroy; + goto error_ctrl_free; } priv->clk = clk_get(&client->dev, NULL); @@ -1446,7 +1446,6 @@ static int ov772x_probe(struct i2c_client *client) clk_put(priv->clk); error_ctrl_free: v4l2_ctrl_handler_free(&priv->hdl); -error_mutex_destroy: mutex_destroy(&priv->lock); return ret; diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c index 2fe4a0bd028449602ba9daee1a5c3cd238c9b2fe..d6838c8ebd7e859f0c818d79a0a1a1e633032f40 100644 --- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c +++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c @@ -1831,6 +1831,9 @@ static void cio2_pci_remove(struct pci_dev *pci_dev) v4l2_device_unregister(&cio2->v4l2_dev); media_device_cleanup(&cio2->media_dev); mutex_destroy(&cio2->lock); + + pm_runtime_forbid(&pci_dev->dev); + pm_runtime_get_noresume(&pci_dev->dev); } static int __maybe_unused cio2_runtime_suspend(struct device *dev) diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c index efb757d5168a637db8a5835f506f26dae7148fa8..e97c30070fc64bb52e46f53c42e9e6c5ebfbe011 100644 --- a/drivers/media/pci/saa7134/saa7134-core.c +++ b/drivers/media/pci/saa7134/saa7134-core.c @@ -977,7 +977,7 @@ static void saa7134_unregister_video(struct saa7134_dev *dev) } if (dev->radio_dev) { if (video_is_registered(dev->radio_dev)) - vb2_video_unregister_device(dev->radio_dev); + video_unregister_device(dev->radio_dev); else video_device_release(dev->radio_dev); dev->radio_dev = NULL; diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c index 1311b4996ecebbd32bd987c8b88f29562bb50bc4..21c16698cc2db33e8f11e03a642d2b3f9e2bb2fc 100644 --- a/drivers/media/platform/omap3isp/isp.c +++ b/drivers/media/platform/omap3isp/isp.c @@ -2297,7 +2297,16 @@ static int isp_probe(struct platform_device *pdev) /* Regulators */ isp->isp_csiphy1.vdd = devm_regulator_get(&pdev->dev, "vdd-csiphy1"); + if (IS_ERR(isp->isp_csiphy1.vdd)) { + ret = PTR_ERR(isp->isp_csiphy1.vdd); + goto error; + } + isp->isp_csiphy2.vdd = devm_regulator_get(&pdev->dev, "vdd-csiphy2"); + if (IS_ERR(isp->isp_csiphy2.vdd)) { + ret = PTR_ERR(isp->isp_csiphy2.vdd); + goto error; + } /* Clocks * diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c index 2eef245c31a179e061f3502caaacc379b627c083..93121c90d76ae27cceac1ca997e93e2b3ba47972 100644 --- a/drivers/media/platform/ti-vpe/cal.c +++ b/drivers/media/platform/ti-vpe/cal.c @@ -624,8 +624,10 @@ static struct cal_ctx *cal_ctx_create(struct cal_dev *cal, int inst) ctx->cport = inst; ret = cal_ctx_v4l2_init(ctx); - if (ret) + if (ret) { + kfree(ctx); return NULL; + } return ctx; } diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c index 22e524b69806acd81a9aa3328e0bdc23a6bb8d0b..a56c844d7f8163112600029051d69cefc1fca904 100644 --- a/drivers/media/rc/gpio-ir-recv.c +++ b/drivers/media/rc/gpio-ir-recv.c @@ -130,6 +130,23 @@ static int gpio_ir_recv_probe(struct platform_device *pdev) "gpio-ir-recv-irq", gpio_dev); } +static int gpio_ir_recv_remove(struct platform_device *pdev) +{ + struct gpio_rc_dev *gpio_dev = platform_get_drvdata(pdev); + struct device *pmdev = gpio_dev->pmdev; + + if (pmdev) { + pm_runtime_get_sync(pmdev); + cpu_latency_qos_remove_request(&gpio_dev->qos); + + pm_runtime_disable(pmdev); + pm_runtime_put_noidle(pmdev); + pm_runtime_set_suspended(pmdev); + } + + return 0; +} + #ifdef CONFIG_PM static int gpio_ir_recv_suspend(struct device *dev) { @@ -189,6 +206,7 @@ MODULE_DEVICE_TABLE(of, gpio_ir_recv_of_match); static struct platform_driver gpio_ir_recv_driver = { .probe = gpio_ir_recv_probe, + .remove = gpio_ir_recv_remove, .driver = { .name = KBUILD_MODNAME, .of_match_table = of_match_ptr(gpio_ir_recv_of_match), diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c index df4c5dcba39cd7a60205f80a6f90f10e923da3fd..5c223b5498b4b14237e969bdf519728112b3dce3 100644 --- a/drivers/media/usb/siano/smsusb.c +++ b/drivers/media/usb/siano/smsusb.c @@ -179,6 +179,8 @@ static void smsusb_stop_streaming(struct smsusb_device_t *dev) for (i = 0; i < MAX_URBS; i++) { usb_kill_urb(&dev->surbs[i].urb); + if (dev->surbs[i].wq.func) + cancel_work_sync(&dev->surbs[i].wq); if (dev->surbs[i].cb) { smscore_putbuffer(dev->coredev, dev->surbs[i].cb); diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c index f479d8971dfbb32e39c5501a3c60e3ae5e7e16bd..5e0acabed37a0ffff3c2ba71620e99005166637a 100644 --- a/drivers/media/usb/uvc/uvc_ctrl.c +++ b/drivers/media/usb/uvc/uvc_ctrl.c @@ -6,6 +6,7 @@ * Laurent Pinchart (laurent.pinchart@ideasonboard.com) */ +#include #include #include #include @@ -1275,17 +1276,12 @@ static void uvc_ctrl_send_slave_event(struct uvc_video_chain *chain, uvc_ctrl_send_event(chain, handle, ctrl, mapping, val, changes); } -static void uvc_ctrl_status_event_work(struct work_struct *work) +void uvc_ctrl_status_event(struct uvc_video_chain *chain, + struct uvc_control *ctrl, const u8 *data) { - struct uvc_device *dev = container_of(work, struct uvc_device, - async_ctrl.work); - struct uvc_ctrl_work *w = &dev->async_ctrl; - struct uvc_video_chain *chain = w->chain; struct uvc_control_mapping *mapping; - struct uvc_control *ctrl = w->ctrl; struct uvc_fh *handle; unsigned int i; - int ret; mutex_lock(&chain->ctrl_mutex); @@ -1293,7 +1289,7 @@ static void uvc_ctrl_status_event_work(struct work_struct *work) ctrl->handle = NULL; list_for_each_entry(mapping, &ctrl->info.mappings, list) { - s32 value = __uvc_ctrl_get_value(mapping, w->data); + s32 value = __uvc_ctrl_get_value(mapping, data); /* * handle may be NULL here if the device sends auto-update @@ -1312,6 +1308,20 @@ static void uvc_ctrl_status_event_work(struct work_struct *work) } mutex_unlock(&chain->ctrl_mutex); +} + +static void uvc_ctrl_status_event_work(struct work_struct *work) +{ + struct uvc_device *dev = container_of(work, struct uvc_device, + async_ctrl.work); + struct uvc_ctrl_work *w = &dev->async_ctrl; + int ret; + + uvc_ctrl_status_event(w->chain, w->ctrl, w->data); + + /* The barrier is needed to synchronize with uvc_status_stop(). */ + if (smp_load_acquire(&dev->flush_status)) + return; /* Resubmit the URB. */ w->urb->interval = dev->int_ep->desc.bInterval; @@ -1321,8 +1331,8 @@ static void uvc_ctrl_status_event_work(struct work_struct *work) ret); } -bool uvc_ctrl_status_event(struct urb *urb, struct uvc_video_chain *chain, - struct uvc_control *ctrl, const u8 *data) +bool uvc_ctrl_status_event_async(struct urb *urb, struct uvc_video_chain *chain, + struct uvc_control *ctrl, const u8 *data) { struct uvc_device *dev = chain->dev; struct uvc_ctrl_work *w = &dev->async_ctrl; diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c index 282f3d2388cc20612d612fa39622d99f208f4095..6334f99f1854db9656ba502daa712da95ce7cdb9 100644 --- a/drivers/media/usb/uvc/uvc_driver.c +++ b/drivers/media/usb/uvc/uvc_driver.c @@ -1121,10 +1121,8 @@ static int uvc_parse_vendor_control(struct uvc_device *dev, + n; memcpy(unit->extension.bmControls, &buffer[23+p], 2*n); - if (buffer[24+p+2*n] != 0) - usb_string(udev, buffer[24+p+2*n], unit->name, - sizeof(unit->name)); - else + if (buffer[24+p+2*n] == 0 || + usb_string(udev, buffer[24+p+2*n], unit->name, sizeof(unit->name)) < 0) sprintf(unit->name, "Extension %u", buffer[3]); list_add_tail(&unit->list, &dev->entities); @@ -1249,15 +1247,15 @@ static int uvc_parse_standard_control(struct uvc_device *dev, memcpy(term->media.bmTransportModes, &buffer[10+n], p); } - if (buffer[7] != 0) - usb_string(udev, buffer[7], term->name, - sizeof(term->name)); - else if (UVC_ENTITY_TYPE(term) == UVC_ITT_CAMERA) - sprintf(term->name, "Camera %u", buffer[3]); - else if (UVC_ENTITY_TYPE(term) == UVC_ITT_MEDIA_TRANSPORT_INPUT) - sprintf(term->name, "Media %u", buffer[3]); - else - sprintf(term->name, "Input %u", buffer[3]); + if (buffer[7] == 0 || + usb_string(udev, buffer[7], term->name, sizeof(term->name)) < 0) { + if (UVC_ENTITY_TYPE(term) == UVC_ITT_CAMERA) + sprintf(term->name, "Camera %u", buffer[3]); + if (UVC_ENTITY_TYPE(term) == UVC_ITT_MEDIA_TRANSPORT_INPUT) + sprintf(term->name, "Media %u", buffer[3]); + else + sprintf(term->name, "Input %u", buffer[3]); + } list_add_tail(&term->list, &dev->entities); break; @@ -1289,10 +1287,8 @@ static int uvc_parse_standard_control(struct uvc_device *dev, memcpy(term->baSourceID, &buffer[7], 1); - if (buffer[8] != 0) - usb_string(udev, buffer[8], term->name, - sizeof(term->name)); - else + if (buffer[8] == 0 || + usb_string(udev, buffer[8], term->name, sizeof(term->name)) < 0) sprintf(term->name, "Output %u", buffer[3]); list_add_tail(&term->list, &dev->entities); @@ -1314,10 +1310,8 @@ static int uvc_parse_standard_control(struct uvc_device *dev, memcpy(unit->baSourceID, &buffer[5], p); - if (buffer[5+p] != 0) - usb_string(udev, buffer[5+p], unit->name, - sizeof(unit->name)); - else + if (buffer[5+p] == 0 || + usb_string(udev, buffer[5+p], unit->name, sizeof(unit->name)) < 0) sprintf(unit->name, "Selector %u", buffer[3]); list_add_tail(&unit->list, &dev->entities); @@ -1347,10 +1341,8 @@ static int uvc_parse_standard_control(struct uvc_device *dev, if (dev->uvc_version >= 0x0110) unit->processing.bmVideoStandards = buffer[9+n]; - if (buffer[8+n] != 0) - usb_string(udev, buffer[8+n], unit->name, - sizeof(unit->name)); - else + if (buffer[8+n] == 0 || + usb_string(udev, buffer[8+n], unit->name, sizeof(unit->name)) < 0) sprintf(unit->name, "Processing %u", buffer[3]); list_add_tail(&unit->list, &dev->entities); @@ -1378,10 +1370,8 @@ static int uvc_parse_standard_control(struct uvc_device *dev, unit->extension.bmControls = (u8 *)unit + sizeof(*unit); memcpy(unit->extension.bmControls, &buffer[23+p], n); - if (buffer[23+p+n] != 0) - usb_string(udev, buffer[23+p+n], unit->name, - sizeof(unit->name)); - else + if (buffer[23+p+n] == 0 || + usb_string(udev, buffer[23+p+n], unit->name, sizeof(unit->name)) < 0) sprintf(unit->name, "Extension %u", buffer[3]); list_add_tail(&unit->list, &dev->entities); @@ -2565,6 +2555,24 @@ static const struct usb_device_id uvc_ids[] = { .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax }, + /* Logitech, Webcam C910 */ + { .match_flags = USB_DEVICE_ID_MATCH_DEVICE + | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = 0x046d, + .idProduct = 0x0821, + .bInterfaceClass = USB_CLASS_VIDEO, + .bInterfaceSubClass = 1, + .bInterfaceProtocol = 0, + .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_WAKE_AUTOSUSPEND)}, + /* Logitech, Webcam B910 */ + { .match_flags = USB_DEVICE_ID_MATCH_DEVICE + | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = 0x046d, + .idProduct = 0x0823, + .bInterfaceClass = USB_CLASS_VIDEO, + .bInterfaceSubClass = 1, + .bInterfaceProtocol = 0, + .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_WAKE_AUTOSUSPEND)}, /* Logitech Quickcam Fusion */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, diff --git a/drivers/media/usb/uvc/uvc_entity.c b/drivers/media/usb/uvc/uvc_entity.c index ca3a9c2eec2711833f2112e82514b4a97c0521c2..7c9895377118c75fe991e777d38ede137828fd19 100644 --- a/drivers/media/usb/uvc/uvc_entity.c +++ b/drivers/media/usb/uvc/uvc_entity.c @@ -37,7 +37,7 @@ static int uvc_mc_create_links(struct uvc_video_chain *chain, continue; remote = uvc_entity_by_id(chain->dev, entity->baSourceID[i]); - if (remote == NULL) + if (remote == NULL || remote->num_pads == 0) return -EINVAL; source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING) diff --git a/drivers/media/usb/uvc/uvc_status.c b/drivers/media/usb/uvc/uvc_status.c index 2bdb0ff203f8e61553c7806a4295e62445f0d2aa..73725051cc163db5a0a7f800350760ba77382dd9 100644 --- a/drivers/media/usb/uvc/uvc_status.c +++ b/drivers/media/usb/uvc/uvc_status.c @@ -6,6 +6,7 @@ * Laurent Pinchart (laurent.pinchart@ideasonboard.com) */ +#include #include #include #include @@ -179,7 +180,8 @@ static bool uvc_event_control(struct urb *urb, switch (status->bAttribute) { case UVC_CTRL_VALUE_CHANGE: - return uvc_ctrl_status_event(urb, chain, ctrl, status->bValue); + return uvc_ctrl_status_event_async(urb, chain, ctrl, + status->bValue); case UVC_CTRL_INFO_CHANGE: case UVC_CTRL_FAILURE_CHANGE: @@ -309,5 +311,41 @@ int uvc_status_start(struct uvc_device *dev, gfp_t flags) void uvc_status_stop(struct uvc_device *dev) { + struct uvc_ctrl_work *w = &dev->async_ctrl; + + /* + * Prevent the asynchronous control handler from requeing the URB. The + * barrier is needed so the flush_status change is visible to other + * CPUs running the asynchronous handler before usb_kill_urb() is + * called below. + */ + smp_store_release(&dev->flush_status, true); + + /* + * Cancel any pending asynchronous work. If any status event was queued, + * process it synchronously. + */ + if (cancel_work_sync(&w->work)) + uvc_ctrl_status_event(w->chain, w->ctrl, w->data); + + /* Kill the urb. */ usb_kill_urb(dev->int_urb); + + /* + * The URB completion handler may have queued asynchronous work. This + * won't resubmit the URB as flush_status is set, but it needs to be + * cancelled before returning or it could then race with a future + * uvc_status_start() call. + */ + if (cancel_work_sync(&w->work)) + uvc_ctrl_status_event(w->chain, w->ctrl, w->data); + + /* + * From this point, there are no events on the queue and the status URB + * is dead. No events will be queued until uvc_status_start() is called. + * The barrier is needed to make sure that flush_status is visible to + * uvc_ctrl_status_event_work() when uvc_status_start() will be called + * again. + */ + smp_store_release(&dev->flush_status, false); } diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c index f6373d678d256361b14024406688680af7d3a91e..03dfe96bcebacbd46553a6e3c074a16e0e4e6ab4 100644 --- a/drivers/media/usb/uvc/uvc_video.c +++ b/drivers/media/usb/uvc/uvc_video.c @@ -1308,7 +1308,9 @@ static void uvc_video_decode_meta(struct uvc_streaming *stream, if (has_scr) memcpy(stream->clock.last_scr, scr, 6); - memcpy(&meta->length, mem, length); + meta->length = mem[0]; + meta->flags = mem[1]; + memcpy(meta->buf, &mem[2], length - 2); meta_buf->bytesused += length + sizeof(meta->ns) + sizeof(meta->sof); uvc_trace(UVC_TRACE_FRAME, @@ -1903,6 +1905,17 @@ static int uvc_video_start_transfer(struct uvc_streaming *stream, uvc_trace(UVC_TRACE_VIDEO, "Selecting alternate setting %u " "(%u B/frame bandwidth).\n", altsetting, best_psize); + /* + * Some devices, namely the Logitech C910 and B910, are unable + * to recover from a USB autosuspend, unless the alternate + * setting of the streaming interface is toggled. + */ + if (stream->dev->quirks & UVC_QUIRK_WAKE_AUTOSUSPEND) { + usb_set_interface(stream->dev->udev, intfnum, + altsetting); + usb_set_interface(stream->dev->udev, intfnum, 0); + } + ret = usb_set_interface(stream->dev->udev, intfnum, altsetting); if (ret < 0) return ret; diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h index c884020b287843270872f88b36e0856ff0f18f0d..c75990c0957e79555ea2f93ec8317fe4ed5099ae 100644 --- a/drivers/media/usb/uvc/uvcvideo.h +++ b/drivers/media/usb/uvc/uvcvideo.h @@ -203,6 +203,7 @@ #define UVC_QUIRK_RESTORE_CTRLS_ON_INIT 0x00000400 #define UVC_QUIRK_FORCE_Y8 0x00000800 #define UVC_QUIRK_FORCE_BPP 0x00001000 +#define UVC_QUIRK_WAKE_AUTOSUSPEND 0x00002000 /* Format flags */ #define UVC_FMT_FLAG_COMPRESSED 0x00000001 @@ -669,6 +670,7 @@ struct uvc_device { /* Status Interrupt Endpoint */ struct usb_host_endpoint *int_ep; struct urb *int_urb; + bool flush_status; u8 *status; struct input_dev *input; char input_phys[64]; @@ -838,7 +840,9 @@ int uvc_ctrl_add_mapping(struct uvc_video_chain *chain, int uvc_ctrl_init_device(struct uvc_device *dev); void uvc_ctrl_cleanup_device(struct uvc_device *dev); int uvc_ctrl_restore_values(struct uvc_device *dev); -bool uvc_ctrl_status_event(struct urb *urb, struct uvc_video_chain *chain, +bool uvc_ctrl_status_event_async(struct urb *urb, struct uvc_video_chain *chain, + struct uvc_control *ctrl, const u8 *data); +void uvc_ctrl_status_event(struct uvc_video_chain *chain, struct uvc_control *ctrl, const u8 *data); int uvc_ctrl_begin(struct uvc_video_chain *chain); diff --git a/drivers/memory/atmel-sdramc.c b/drivers/memory/atmel-sdramc.c index 9c49d00c2a966b2859e30d8699f6c1cfcd3f9caa..ea6e9e1eaf04687cd6a9cec822a72467782cbe4e 100644 --- a/drivers/memory/atmel-sdramc.c +++ b/drivers/memory/atmel-sdramc.c @@ -47,19 +47,17 @@ static int atmel_ramc_probe(struct platform_device *pdev) caps = of_device_get_match_data(&pdev->dev); if (caps->has_ddrck) { - clk = devm_clk_get(&pdev->dev, "ddrck"); + clk = devm_clk_get_enabled(&pdev->dev, "ddrck"); if (IS_ERR(clk)) return PTR_ERR(clk); - clk_prepare_enable(clk); } if (caps->has_mpddr_clk) { - clk = devm_clk_get(&pdev->dev, "mpddr"); + clk = devm_clk_get_enabled(&pdev->dev, "mpddr"); if (IS_ERR(clk)) { pr_err("AT91 RAMC: couldn't get mpddr clock\n"); return PTR_ERR(clk); } - clk_prepare_enable(clk); } return 0; diff --git a/drivers/memory/mvebu-devbus.c b/drivers/memory/mvebu-devbus.c index 8450638e86700763d52bb85ec8131c27019bb33c..efc6c08db2b70a78ae2d6eb902305153fb8cf36e 100644 --- a/drivers/memory/mvebu-devbus.c +++ b/drivers/memory/mvebu-devbus.c @@ -280,10 +280,9 @@ static int mvebu_devbus_probe(struct platform_device *pdev) if (IS_ERR(devbus->base)) return PTR_ERR(devbus->base); - clk = devm_clk_get(&pdev->dev, NULL); + clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(clk)) return PTR_ERR(clk); - clk_prepare_enable(clk); /* * Obtain clock period in picoseconds, diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c index 000cb82023e358904e9036241260e7a582eb9a28..afdc4908362556d0d2ca7cfe894b11add5d689f5 100644 --- a/drivers/mfd/arizona-core.c +++ b/drivers/mfd/arizona-core.c @@ -45,7 +45,7 @@ int arizona_clk32k_enable(struct arizona *arizona) if (arizona->clk32k_ref == 1) { switch (arizona->pdata.clk32k_src) { case ARIZONA_32KZ_MCLK1: - ret = pm_runtime_get_sync(arizona->dev); + ret = pm_runtime_resume_and_get(arizona->dev); if (ret != 0) goto err_ref; ret = clk_prepare_enable(arizona->mclk[ARIZONA_MCLK1]); diff --git a/drivers/mfd/pcf50633-adc.c b/drivers/mfd/pcf50633-adc.c index 5cd653e615125a2631e9370686389a6c603265f6..191b1bc6141c2fa4f2526a6b847d836f0b664664 100644 --- a/drivers/mfd/pcf50633-adc.c +++ b/drivers/mfd/pcf50633-adc.c @@ -136,6 +136,7 @@ int pcf50633_adc_async_read(struct pcf50633 *pcf, int mux, int avg, void *callback_param) { struct pcf50633_adc_request *req; + int ret; /* req is freed when the result is ready, in interrupt handler */ req = kmalloc(sizeof(*req), GFP_KERNEL); @@ -147,7 +148,11 @@ int pcf50633_adc_async_read(struct pcf50633 *pcf, int mux, int avg, req->callback = callback; req->callback_param = callback_param; - return adc_enqueue_request(pcf, req); + ret = adc_enqueue_request(pcf, req); + if (ret) + kfree(req); + + return ret; } EXPORT_SYMBOL_GPL(pcf50633_adc_async_read); diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c index 4e30fa98fe7d31ec79957f66e17123f290179d05..c4c1275581ec93f0918df49f6afbafcd4a5de16e 100644 --- a/drivers/misc/mei/bus-fixup.c +++ b/drivers/misc/mei/bus-fixup.c @@ -172,7 +172,7 @@ static int mei_fwver(struct mei_cl_device *cldev) ret = __mei_cl_send(cldev->cl, (u8 *)&req, sizeof(req), MEI_CL_IO_TX_BLOCKING); if (ret < 0) { - dev_err(&cldev->dev, "Could not send ReqFWVersion cmd\n"); + dev_err(&cldev->dev, "Could not send ReqFWVersion cmd ret = %d\n", ret); return ret; } @@ -184,7 +184,7 @@ static int mei_fwver(struct mei_cl_device *cldev) * Should be at least one version block, * error out if nothing found */ - dev_err(&cldev->dev, "Could not read FW version\n"); + dev_err(&cldev->dev, "Could not read FW version ret = %d\n", bytes_recv); return -EIO; } @@ -332,7 +332,7 @@ static int mei_nfc_if_version(struct mei_cl *cl, ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(cmd), MEI_CL_IO_TX_BLOCKING); if (ret < 0) { - dev_err(bus->dev, "Could not send IF version cmd\n"); + dev_err(bus->dev, "Could not send IF version cmd ret = %d\n", ret); return ret; } @@ -346,7 +346,7 @@ static int mei_nfc_if_version(struct mei_cl *cl, ret = 0; bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length, 0, 0); if (bytes_recv < 0 || (size_t)bytes_recv < if_version_length) { - dev_err(bus->dev, "Could not read IF version\n"); + dev_err(bus->dev, "Could not read IF version ret = %d\n", bytes_recv); ret = -EIO; goto err; } diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c index a448535c1265d6385878f21de341330d4d23e402..89dd49260080b443847368ae9854d22b4372635c 100644 --- a/drivers/mmc/core/sdio_bus.c +++ b/drivers/mmc/core/sdio_bus.c @@ -295,6 +295,12 @@ static void sdio_release_func(struct device *dev) if (!(func->card->quirks & MMC_QUIRK_NONSTD_SDIO)) sdio_free_func_cis(func); + /* + * We have now removed the link to the tuples in the + * card structure, so remove the reference. + */ + put_device(&func->card->dev); + kfree(func->info); kfree(func->tmpbuf); kfree(func); @@ -325,6 +331,12 @@ struct sdio_func *sdio_alloc_func(struct mmc_card *card) device_initialize(&func->dev); + /* + * We may link to tuples in the card structure, + * we need make sure we have a reference to it. + */ + get_device(&func->card->dev); + func->dev.parent = &card->dev; func->dev.bus = &sdio_bus_type; func->dev.release = sdio_release_func; @@ -378,10 +390,9 @@ int sdio_add_func(struct sdio_func *func) */ void sdio_remove_func(struct sdio_func *func) { - if (!sdio_func_present(func)) - return; + if (sdio_func_present(func)) + device_del(&func->dev); - device_del(&func->dev); of_node_put(func->dev.of_node); put_device(&func->dev); } diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c index b23773583179df8071bce6f64772c0a6e2560159..ce524f7e11fb2170d5ca03909515ed21cf99fb02 100644 --- a/drivers/mmc/core/sdio_cis.c +++ b/drivers/mmc/core/sdio_cis.c @@ -391,12 +391,6 @@ int sdio_read_func_cis(struct sdio_func *func) if (ret) return ret; - /* - * Since we've linked to tuples in the card structure, - * we must make sure we have a reference to it. - */ - get_device(&func->card->dev); - /* * Vendor/device id is optional for function CIS, so * copy it from the card structure as needed. @@ -422,11 +416,5 @@ void sdio_free_func_cis(struct sdio_func *func) } func->tuples = NULL; - - /* - * We have now removed the link to the tuples in the - * card structure, so remove the reference. - */ - put_device(&func->card->dev); } diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c index aa3dfb9c1071b25c198db32214e3190b8106f6fd..62d00232f85ea9a230cedb5d3704bef27f787ef6 100644 --- a/drivers/mmc/host/jz4740_mmc.c +++ b/drivers/mmc/host/jz4740_mmc.c @@ -1041,6 +1041,16 @@ static int jz4740_mmc_probe(struct platform_device* pdev) mmc->ops = &jz4740_mmc_ops; if (!mmc->f_max) mmc->f_max = JZ_MMC_CLK_RATE; + + /* + * There seems to be a problem with this driver on the JZ4760 and + * JZ4760B SoCs. There, when using the maximum rate supported (50 MHz), + * the communication fails with many SD cards. + * Until this bug is sorted out, limit the maximum rate to 24 MHz. + */ + if (host->version == JZ_MMC_JZ4760 && mmc->f_max > JZ_MMC_CLK_RATE) + mmc->f_max = JZ_MMC_CLK_RATE; + mmc->f_min = mmc->f_max / 128; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index 02f4fd26e76a92694ca6475c269a94aea0223889..1d814919eb6be435f109b04fc415871467101bac 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -1450,7 +1450,7 @@ static int mmc_spi_probe(struct spi_device *spi) status = mmc_add_host(mmc); if (status != 0) - goto fail_add_host; + goto fail_glue_init; /* * Index 0 is card detect @@ -1458,7 +1458,7 @@ static int mmc_spi_probe(struct spi_device *spi) */ status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000); if (status == -EPROBE_DEFER) - goto fail_add_host; + goto fail_gpiod_request; if (!status) { /* * The platform has a CD GPIO signal that may support @@ -1473,7 +1473,7 @@ static int mmc_spi_probe(struct spi_device *spi) /* Index 1 is write protect/read only */ status = mmc_gpiod_request_ro(mmc, NULL, 1, 0); if (status == -EPROBE_DEFER) - goto fail_add_host; + goto fail_gpiod_request; if (!status) has_ro = true; @@ -1487,7 +1487,7 @@ static int mmc_spi_probe(struct spi_device *spi) ? ", cd polling" : ""); return 0; -fail_add_host: +fail_gpiod_request: mmc_remove_host(mmc); fail_glue_init: mmc_spi_dma_free(host); diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c index 2a7ca3072f35724123679542adecf9e29378023e..52eb28f3277cdb029371a34cf80d419e82cf2d5a 100644 --- a/drivers/mtd/nand/raw/sunxi_nand.c +++ b/drivers/mtd/nand/raw/sunxi_nand.c @@ -1587,7 +1587,7 @@ static int sunxi_nand_ooblayout_free(struct mtd_info *mtd, int section, if (section < ecc->steps) oobregion->length = 4; else - oobregion->offset = mtd->oobsize - oobregion->offset; + oobregion->length = mtd->oobsize - oobregion->offset; return 0; } diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c index 2c256d455c9f3d55d5d86f30da7e8fe068858bdb..342215231932144ccb4bee5459c0adfc4429c84c 100644 --- a/drivers/mtd/spi-nor/core.c +++ b/drivers/mtd/spi-nor/core.c @@ -2424,6 +2424,15 @@ void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size, erase->size_mask = (1 << erase->size_shift) - 1; } +/** + * spi_nor_mask_erase_type() - mask out a SPI NOR erase type + * @erase: pointer to a structure that describes a SPI NOR erase type + */ +void spi_nor_mask_erase_type(struct spi_nor_erase_type *erase) +{ + erase->size = 0; +} + /** * spi_nor_init_uniform_erase_map() - Initialize uniform erase map * @map: the erase map of the SPI NOR diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h index 6f62ee861231ae3ff66e860f2e4bd2ced9f93a0c..788775bb67958d8529ffa7782bc477a49b00fb9b 100644 --- a/drivers/mtd/spi-nor/core.h +++ b/drivers/mtd/spi-nor/core.h @@ -424,6 +424,7 @@ void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode, void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size, u8 opcode); +void spi_nor_mask_erase_type(struct spi_nor_erase_type *erase); struct spi_nor_erase_region * spi_nor_region_next(struct spi_nor_erase_region *region); void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map, diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c index 08de2a2b44520f4b489917816eb2bd068668dbab..9dc0528ea88426ecaed0e912b43f0b4e341843c6 100644 --- a/drivers/mtd/spi-nor/sfdp.c +++ b/drivers/mtd/spi-nor/sfdp.c @@ -852,7 +852,7 @@ spi_nor_init_non_uniform_erase_map(struct spi_nor *nor, */ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) if (!(regions_erase_type & BIT(erase[i].idx))) - spi_nor_set_erase_type(&erase[i], 0, 0xFF); + spi_nor_mask_erase_type(&erase[i]); return 0; } @@ -1063,7 +1063,7 @@ static int spi_nor_parse_4bait(struct spi_nor *nor, erase_type[i].opcode = (dwords[1] >> erase_type[i].idx * 8) & 0xFF; else - spi_nor_set_erase_type(&erase_type[i], 0u, 0xFF); + spi_nor_mask_erase_type(&erase_type[i]); } /* diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 4153e0d15c5f913711507fbaf35e50e2ae297173..e45fdc1bf66a4aed592bdf83be7cbaedd98865e1 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -467,6 +467,7 @@ static int uif_init(struct ubi_device *ubi) err = ubi_add_volume(ubi, ubi->volumes[i]); if (err) { ubi_err(ubi, "cannot add volume %d", i); + ubi->volumes[i] = NULL; goto out_volumes; } } @@ -664,6 +665,12 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024) ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size); ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size); + if (ubi->vid_hdr_offset && ((ubi->vid_hdr_offset + UBI_VID_HDR_SIZE) > + ubi->vid_hdr_alsize)) { + ubi_err(ubi, "VID header offset %d too large.", ubi->vid_hdr_offset); + return -EINVAL; + } + dbg_gen("min_io_size %d", ubi->min_io_size); dbg_gen("max_write_size %d", ubi->max_write_size); dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size); diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c index 053ab52668e8bf3c1b8f39b8430e56fbdd9161a5..69592be33adfcc7480cf4a2e2c7161d8c759fbc8 100644 --- a/drivers/mtd/ubi/fastmap-wl.c +++ b/drivers/mtd/ubi/fastmap-wl.c @@ -146,13 +146,15 @@ void ubi_refill_pools(struct ubi_device *ubi) if (ubi->fm_anchor) { wl_tree_add(ubi->fm_anchor, &ubi->free); ubi->free_count++; + ubi->fm_anchor = NULL; } - /* - * All available PEBs are in ubi->free, now is the time to get - * the best anchor PEBs. - */ - ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1); + if (!ubi->fm_disabled) + /* + * All available PEBs are in ubi->free, now is the time to get + * the best anchor PEBs. + */ + ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1); for (;;) { enough = 0; diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c index 90e5f8d2fe0a9a373c4ab61552d4c8b945c1943b..c8ab065ff8a1f9fa97f731e61070d2af6bc3105a 100644 --- a/drivers/mtd/ubi/vmt.c +++ b/drivers/mtd/ubi/vmt.c @@ -591,6 +591,7 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol) if (err) { ubi_err(ubi, "cannot add character device for volume %d, error %d", vol_id, err); + vol_release(&vol->dev); return err; } @@ -601,15 +602,14 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol) vol->dev.groups = volume_dev_groups; dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id); err = device_register(&vol->dev); - if (err) - goto out_cdev; + if (err) { + cdev_del(&vol->cdev); + put_device(&vol->dev); + return err; + } self_check_volumes(ubi); return err; - -out_cdev: - cdev_del(&vol->cdev); - return err; } /** diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 820b5c1c8e8e7cfb24813628e1d40c7edaed8a93..6da09263e0b9f2800951bac0c008b9e86d779c0c 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -885,8 +885,11 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, err = do_sync_erase(ubi, e1, vol_id, lnum, 0); if (err) { - if (e2) + if (e2) { + spin_lock(&ubi->wl_lock); wl_entry_destroy(ubi, e2); + spin_unlock(&ubi->wl_lock); + } goto out_ro; } @@ -968,11 +971,11 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, spin_lock(&ubi->wl_lock); ubi->move_from = ubi->move_to = NULL; ubi->move_to_put = ubi->wl_scheduled = 0; + wl_entry_destroy(ubi, e1); + wl_entry_destroy(ubi, e2); spin_unlock(&ubi->wl_lock); ubi_free_vid_buf(vidb); - wl_entry_destroy(ubi, e1); - wl_entry_destroy(ubi, e2); out_ro: ubi_ro_mode(ubi); @@ -1121,14 +1124,18 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk) /* Re-schedule the LEB for erasure */ err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false); if (err1) { + spin_lock(&ubi->wl_lock); wl_entry_destroy(ubi, e); + spin_unlock(&ubi->wl_lock); err = err1; goto out_ro; } return err; } + spin_lock(&ubi->wl_lock); wl_entry_destroy(ubi, e); + spin_unlock(&ubi->wl_lock); if (err != -EIO) /* * If this is not %-EIO, we have no idea what to do. Scheduling @@ -1244,6 +1251,18 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum, retry: spin_lock(&ubi->wl_lock); e = ubi->lookuptbl[pnum]; + if (!e) { + /* + * This wl entry has been removed for some errors by other + * process (eg. wear leveling worker), corresponding process + * (except __erase_worker, which cannot concurrent with + * ubi_wl_put_peb) will set ubi ro_mode at the same time, + * just ignore this wl entry. + */ + spin_unlock(&ubi->wl_lock); + up_read(&ubi->fm_protect); + return 0; + } if (e == ubi->move_from) { /* * User is putting the physical eraseblock which was selected to diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c index f3f86ef68ae0ce9c126d2e66108e6124a7190e01..8b6cf2bf9025abca4aac49dd0392b4144148cc08 100644 --- a/drivers/net/bonding/bond_debugfs.c +++ b/drivers/net/bonding/bond_debugfs.c @@ -76,7 +76,7 @@ void bond_debug_reregister(struct bonding *bond) d = debugfs_rename(bonding_debug_root, bond->debug_dir, bonding_debug_root, bond->dev->name); - if (d) { + if (!IS_ERR(d)) { bond->debug_dir = d; } else { netdev_warn(bond->dev, "failed to reregister, so just unregister old one\n"); diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c index 73c5343e609bc495bdb7e30f9eb7767b5d0ac1b6..c9ccce6c60b46df15d6a5dd5205cbc9f91f6b428 100644 --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c @@ -278,7 +278,6 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv, cf->data[2] |= CAN_ERR_PROT_STUFF; break; default: - cf->data[3] = ecc & SJA1000_ECC_SEG; break; } @@ -286,6 +285,9 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv, if (!(ecc & SJA1000_ECC_DIR)) cf->data[2] |= CAN_ERR_PROT_TX; + /* Bit stream position in CAN frame as the error was detected */ + cf->data[3] = ecc & SJA1000_ECC_SEG; + if (priv->can.state == CAN_STATE_ERROR_WARNING || priv->can.state == CAN_STATE_ERROR_PASSIVE) { cf->data[1] = (txerr > rxerr) ? diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c index d3f17f05ebef9b324250954cb4903ceb8f53c181..4e831aea79d13047023e5380fdd75e3d24ecba43 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c @@ -518,6 +518,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev, u8 cmd_no, int channel) { struct kvaser_cmd *cmd; + size_t cmd_len; int err; cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); @@ -525,6 +526,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev, return -ENOMEM; cmd->header.cmd_no = cmd_no; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); if (channel < 0) { kvaser_usb_hydra_set_cmd_dest_he (cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL); @@ -541,7 +543,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev, kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); - err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + err = kvaser_usb_send_cmd(dev, cmd, cmd_len); if (err) goto end; @@ -557,6 +559,7 @@ kvaser_usb_hydra_send_simple_cmd_async(struct kvaser_usb_net_priv *priv, { struct kvaser_cmd *cmd; struct kvaser_usb *dev = priv->dev; + size_t cmd_len; int err; cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC); @@ -564,14 +567,14 @@ kvaser_usb_hydra_send_simple_cmd_async(struct kvaser_usb_net_priv *priv, return -ENOMEM; cmd->header.cmd_no = cmd_no; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); kvaser_usb_hydra_set_cmd_dest_he (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); - err = kvaser_usb_send_cmd_async(priv, cmd, - kvaser_usb_hydra_cmd_size(cmd)); + err = kvaser_usb_send_cmd_async(priv, cmd, cmd_len); if (err) kfree(cmd); @@ -715,6 +718,7 @@ static int kvaser_usb_hydra_get_single_capability(struct kvaser_usb *dev, { struct kvaser_usb_dev_card_data *card_data = &dev->card_data; struct kvaser_cmd *cmd; + size_t cmd_len; u32 value = 0; u32 mask = 0; u16 cap_cmd_res; @@ -726,13 +730,14 @@ static int kvaser_usb_hydra_get_single_capability(struct kvaser_usb *dev, return -ENOMEM; cmd->header.cmd_no = CMD_GET_CAPABILITIES_REQ; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); cmd->cap_req.cap_cmd = cpu_to_le16(cap_cmd_req); kvaser_usb_hydra_set_cmd_dest_he(cmd, card_data->hydra.sysdbg_he); kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); - err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + err = kvaser_usb_send_cmd(dev, cmd, cmd_len); if (err) goto end; @@ -1555,6 +1560,7 @@ static int kvaser_usb_hydra_get_busparams(struct kvaser_usb_net_priv *priv, struct kvaser_usb *dev = priv->dev; struct kvaser_usb_net_hydra_priv *hydra = priv->sub_priv; struct kvaser_cmd *cmd; + size_t cmd_len; int err; if (!hydra) @@ -1565,6 +1571,7 @@ static int kvaser_usb_hydra_get_busparams(struct kvaser_usb_net_priv *priv, return -ENOMEM; cmd->header.cmd_no = CMD_GET_BUSPARAMS_REQ; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); kvaser_usb_hydra_set_cmd_dest_he (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); kvaser_usb_hydra_set_cmd_transid @@ -1574,7 +1581,7 @@ static int kvaser_usb_hydra_get_busparams(struct kvaser_usb_net_priv *priv, reinit_completion(&priv->get_busparams_comp); - err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + err = kvaser_usb_send_cmd(dev, cmd, cmd_len); if (err) return err; @@ -1601,6 +1608,7 @@ static int kvaser_usb_hydra_set_bittiming(const struct net_device *netdev, struct kvaser_cmd *cmd; struct kvaser_usb_net_priv *priv = netdev_priv(netdev); struct kvaser_usb *dev = priv->dev; + size_t cmd_len; int err; cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); @@ -1608,6 +1616,7 @@ static int kvaser_usb_hydra_set_bittiming(const struct net_device *netdev, return -ENOMEM; cmd->header.cmd_no = CMD_SET_BUSPARAMS_REQ; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); memcpy(&cmd->set_busparams_req.busparams_nominal, busparams, sizeof(cmd->set_busparams_req.busparams_nominal)); @@ -1616,7 +1625,7 @@ static int kvaser_usb_hydra_set_bittiming(const struct net_device *netdev, kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); - err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + err = kvaser_usb_send_cmd(dev, cmd, cmd_len); kfree(cmd); @@ -1629,6 +1638,7 @@ static int kvaser_usb_hydra_set_data_bittiming(const struct net_device *netdev, struct kvaser_cmd *cmd; struct kvaser_usb_net_priv *priv = netdev_priv(netdev); struct kvaser_usb *dev = priv->dev; + size_t cmd_len; int err; cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); @@ -1636,6 +1646,7 @@ static int kvaser_usb_hydra_set_data_bittiming(const struct net_device *netdev, return -ENOMEM; cmd->header.cmd_no = CMD_SET_BUSPARAMS_FD_REQ; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); memcpy(&cmd->set_busparams_req.busparams_data, busparams, sizeof(cmd->set_busparams_req.busparams_data)); @@ -1653,7 +1664,7 @@ static int kvaser_usb_hydra_set_data_bittiming(const struct net_device *netdev, kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); - err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + err = kvaser_usb_send_cmd(dev, cmd, cmd_len); kfree(cmd); @@ -1781,6 +1792,7 @@ static int kvaser_usb_hydra_get_software_info(struct kvaser_usb *dev) static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev) { struct kvaser_cmd *cmd; + size_t cmd_len; int err; u32 flags; struct kvaser_usb_dev_card_data *card_data = &dev->card_data; @@ -1790,6 +1802,7 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev) return -ENOMEM; cmd->header.cmd_no = CMD_GET_SOFTWARE_DETAILS_REQ; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); cmd->sw_detail_req.use_ext_cmd = 1; kvaser_usb_hydra_set_cmd_dest_he (cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL); @@ -1797,7 +1810,7 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev) kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); - err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + err = kvaser_usb_send_cmd(dev, cmd, cmd_len); if (err) goto end; @@ -1913,6 +1926,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv) { struct kvaser_usb *dev = priv->dev; struct kvaser_cmd *cmd; + size_t cmd_len; int err; if ((priv->can.ctrlmode & @@ -1928,6 +1942,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv) return -ENOMEM; cmd->header.cmd_no = CMD_SET_DRIVERMODE_REQ; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); kvaser_usb_hydra_set_cmd_dest_he (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); kvaser_usb_hydra_set_cmd_transid @@ -1937,7 +1952,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv) else cmd->set_ctrlmode.mode = KVASER_USB_HYDRA_CTRLMODE_NORMAL; - err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + err = kvaser_usb_send_cmd(dev, cmd, cmd_len); kfree(cmd); return err; diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index ece4c0512ee2de95b183077a5d72743790dbde8c..f42f2f4e4b60e3e72644decd6f2262cbead7a887 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -678,10 +678,10 @@ static int ksz9477_port_fdb_del(struct dsa_switch *ds, int port, ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]); /* clear forwarding port */ - alu_table[2] &= ~BIT(port); + alu_table[1] &= ~BIT(port); /* if there is no port to forward, clear table */ - if ((alu_table[2] & ALU_V_PORT_MAP) == 0) { + if ((alu_table[1] & ALU_V_PORT_MAP) == 0) { alu_table[0] = 0; alu_table[1] = 0; alu_table[2] = 0; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index d5fd49dd25f336aedbe166a8c6bdd74ef6296204..decc1c09a031b4b61e554e08e5480c66734034ae 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -524,19 +524,28 @@ static void xgbe_disable_vxlan(struct xgbe_prv_data *pdata) netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration disabled\n"); } +static unsigned int xgbe_get_fc_queue_count(struct xgbe_prv_data *pdata) +{ + unsigned int max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; + + /* From MAC ver 30H the TFCR is per priority, instead of per queue */ + if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) >= 0x30) + return max_q_count; + else + return min_t(unsigned int, pdata->tx_q_count, max_q_count); +} + static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata) { - unsigned int max_q_count, q_count; unsigned int reg, reg_val; - unsigned int i; + unsigned int i, q_count; /* Clear MTL flow control */ for (i = 0; i < pdata->rx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0); /* Clear MAC flow control */ - max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; - q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); + q_count = xgbe_get_fc_queue_count(pdata); reg = MAC_Q0TFCR; for (i = 0; i < q_count; i++) { reg_val = XGMAC_IOREAD(pdata, reg); @@ -553,9 +562,8 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata) { struct ieee_pfc *pfc = pdata->pfc; struct ieee_ets *ets = pdata->ets; - unsigned int max_q_count, q_count; unsigned int reg, reg_val; - unsigned int i; + unsigned int i, q_count; /* Set MTL flow control */ for (i = 0; i < pdata->rx_q_count; i++) { @@ -579,8 +587,7 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata) } /* Set MAC flow control */ - max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; - q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); + q_count = xgbe_get_fc_queue_count(pdata); reg = MAC_Q0TFCR; for (i = 0; i < q_count; i++) { reg_val = XGMAC_IOREAD(pdata, reg); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 0c5c1b1556830b0b62449225a2bd9fa0d2dad45d..43fdd111235a66ab17ce66b47ce694f6f7a49937 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -496,6 +496,7 @@ static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata, reg |= XGBE_KR_TRAINING_ENABLE; reg |= XGBE_KR_TRAINING_START; XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); + pdata->kr_start_time = jiffies; netif_dbg(pdata, link, pdata->netdev, "KR training initiated\n"); @@ -632,6 +633,8 @@ static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata) xgbe_switch_mode(pdata); + pdata->an_result = XGBE_AN_READY; + xgbe_an_restart(pdata); return XGBE_AN_INCOMPAT_LINK; @@ -1275,9 +1278,30 @@ static bool xgbe_phy_aneg_done(struct xgbe_prv_data *pdata) static void xgbe_check_link_timeout(struct xgbe_prv_data *pdata) { unsigned long link_timeout; + unsigned long kr_time; + int wait; link_timeout = pdata->link_check + (XGBE_LINK_TIMEOUT * HZ); if (time_after(jiffies, link_timeout)) { + if ((xgbe_cur_mode(pdata) == XGBE_MODE_KR) && + pdata->phy.autoneg == AUTONEG_ENABLE) { + /* AN restart should not happen while KR training is in progress. + * The while loop ensures no AN restart during KR training, + * waits up to 500ms and AN restart is triggered only if KR + * training is failed. + */ + wait = XGBE_KR_TRAINING_WAIT_ITER; + while (wait--) { + kr_time = pdata->kr_start_time + + msecs_to_jiffies(XGBE_AN_MS_TIMEOUT); + if (time_after(jiffies, kr_time)) + break; + /* AN restart is not required, if AN result is COMPLETE */ + if (pdata->an_result == XGBE_AN_COMPLETE) + return; + usleep_range(10000, 11000); + } + } netif_dbg(pdata, link, pdata->netdev, "AN link timeout\n"); xgbe_phy_config_aneg(pdata); } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 3305979a9f7c1fc43dba9efd9cec54d3c1286d07..e0b8f3c4cc0b2117c51ddf02388270f898093745 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -289,6 +289,7 @@ /* Auto-negotiation */ #define XGBE_AN_MS_TIMEOUT 500 #define XGBE_LINK_TIMEOUT 5 +#define XGBE_KR_TRAINING_WAIT_ITER 50 #define XGBE_SGMII_AN_LINK_STATUS BIT(1) #define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3)) @@ -1253,6 +1254,7 @@ struct xgbe_prv_data { unsigned int parallel_detect; unsigned int fec_ability; unsigned long an_start; + unsigned long kr_start_time; enum xgbe_an_mode an_mode; /* I2C support */ diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c index 26746197515fcf5ecb6d6404b489d989bc156f7d..022aebb68f4621d290d70b233f933f450f140192 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c @@ -228,12 +228,12 @@ static int bgmac_probe(struct bcma_device *core) bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST; bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1; bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY; - if (ci->pkg == BCMA_PKG_ID_BCM47188 || - ci->pkg == BCMA_PKG_ID_BCM47186) { + if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) || + (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) { bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII; bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED; } - if (ci->pkg == BCMA_PKG_ID_BCM5358) + if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM5358) bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_EPHYRMII; break; case BCMA_CHIP_ID_BCM53573: diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 9960127f612eaf54f79ca06c8f6ac2901b2871ea..bb999e67d7736a02c346d48ff82222876b284bf1 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -890,13 +890,13 @@ static void bgmac_chip_reset_idm_config(struct bgmac *bgmac) if (iost & BGMAC_BCMA_IOST_ATTACHED) { flags = BGMAC_BCMA_IOCTL_SW_CLKEN; - if (!bgmac->has_robosw) + if (bgmac->in_init || !bgmac->has_robosw) flags |= BGMAC_BCMA_IOCTL_SW_RESET; } bgmac_clk_enable(bgmac, flags); } - if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw) + if (iost & BGMAC_BCMA_IOST_ATTACHED && (bgmac->in_init || !bgmac->has_robosw)) bgmac_idm_write(bgmac, BCMA_IOCTL, bgmac_idm_read(bgmac, BCMA_IOCTL) & ~BGMAC_BCMA_IOCTL_SW_RESET); @@ -1490,6 +1490,8 @@ int bgmac_enet_probe(struct bgmac *bgmac) struct net_device *net_dev = bgmac->net_dev; int err; + bgmac->in_init = true; + bgmac_chip_intrs_off(bgmac); net_dev->irq = bgmac->irq; @@ -1542,6 +1544,8 @@ int bgmac_enet_probe(struct bgmac *bgmac) /* Omit FCS from max MTU size */ net_dev->max_mtu = BGMAC_RX_MAX_FRAME_SIZE - ETH_FCS_LEN; + bgmac->in_init = false; + err = register_netdev(bgmac->net_dev); if (err) { dev_err(bgmac->dev, "Cannot register net device\n"); diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h index 351c598a3ec6d110008002c19c12b50dbf494ab1..d1200b27af1eda79f84d4d722ce71c4238a2037f 100644 --- a/drivers/net/ethernet/broadcom/bgmac.h +++ b/drivers/net/ethernet/broadcom/bgmac.h @@ -512,6 +512,8 @@ struct bgmac { int irq; u32 int_mask; + bool in_init; + /* Current MAC state */ int mac_speed; int mac_duplex; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 92f54e333395834ef13a0e82790f35df53133682..6928c0b578abb4878902d0e913507511c801b639 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2854,7 +2854,7 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) static void bnxt_free_tpa_info(struct bnxt *bp) { - int i; + int i, j; for (i = 0; i < bp->rx_nr_rings; i++) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; @@ -2862,8 +2862,10 @@ static void bnxt_free_tpa_info(struct bnxt *bp) kfree(rxr->rx_tpa_idx_map); rxr->rx_tpa_idx_map = NULL; if (rxr->rx_tpa) { - kfree(rxr->rx_tpa[0].agg_arr); - rxr->rx_tpa[0].agg_arr = NULL; + for (j = 0; j < bp->max_tpa; j++) { + kfree(rxr->rx_tpa[j].agg_arr); + rxr->rx_tpa[j].agg_arr = NULL; + } } kfree(rxr->rx_tpa); rxr->rx_tpa = NULL; @@ -2872,14 +2874,13 @@ static void bnxt_free_tpa_info(struct bnxt *bp) static int bnxt_alloc_tpa_info(struct bnxt *bp) { - int i, j, total_aggs = 0; + int i, j; bp->max_tpa = MAX_TPA; if (bp->flags & BNXT_FLAG_CHIP_P5) { if (!bp->max_tpa_v2) return 0; bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5); - total_aggs = bp->max_tpa * MAX_SKB_FRAGS; } for (i = 0; i < bp->rx_nr_rings; i++) { @@ -2893,12 +2894,12 @@ static int bnxt_alloc_tpa_info(struct bnxt *bp) if (!(bp->flags & BNXT_FLAG_CHIP_P5)) continue; - agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL); - rxr->rx_tpa[0].agg_arr = agg; - if (!agg) - return -ENOMEM; - for (j = 1; j < bp->max_tpa; j++) - rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS; + for (j = 0; j < bp->max_tpa; j++) { + agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL); + if (!agg) + return -ENOMEM; + rxr->rx_tpa[j].agg_arr = agg; + } rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map), GFP_KERNEL); if (!rxr->rx_tpa_idx_map) @@ -8761,10 +8762,14 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init) netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc); return rc; } - if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) { + if (tcs && (bp->tx_nr_rings_per_tc * tcs != + bp->tx_nr_rings - bp->tx_nr_rings_xdp)) { netdev_err(bp->dev, "tx ring reservation failure\n"); netdev_reset_tc(bp->dev); - bp->tx_nr_rings_per_tc = bp->tx_nr_rings; + if (bp->tx_nr_rings_xdp) + bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp; + else + bp->tx_nr_rings_per_tc = bp->tx_nr_rings; return -ENOMEM; } return 0; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index e0a6a2e62d23bb1edf3570687e3acd336e489f71..7667cbb5adfd6d38f63b1530a64f605da497a094 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -2263,6 +2263,14 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, __func__, p_index, ring->c_index, ring->read_ptr, dma_length_status); + if (unlikely(len > RX_BUF_LENGTH)) { + netif_err(priv, rx_status, dev, "oversized packet\n"); + dev->stats.rx_length_errors++; + dev->stats.rx_errors++; + dev_kfree_skb_any(skb); + goto next; + } + if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { netif_err(priv, rx_status, dev, "dropping fragmented packet!\n"); diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index f9e91304d232788977fd9473dfec2266f1615f82..4b875838a6467066546e291fc9cc7d8771d0bb0d 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -165,15 +165,6 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable) static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv) { - u32 reg; - - if (!GENET_IS_V5(priv)) { - /* Speed settings are set in bcmgenet_mii_setup() */ - reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL); - reg |= LED_ACT_SOURCE_MAC; - bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL); - } - if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) fixed_phy_set_link_update(priv->dev->phydev, bcmgenet_fixed_phy_link_update); @@ -206,6 +197,8 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) if (!phy_name) { phy_name = "MoCA"; + if (!GENET_IS_V5(priv)) + port_ctrl |= LED_ACT_SOURCE_MAC; bcmgenet_moca_phy_setup(priv); } break; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index be96116dc2ccbf8bd3b42d22b36d6e0b720e8ab9..613ca6124e3ce71985ce0a69eaf319686971e48d 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -11185,7 +11185,7 @@ static void tg3_reset_task(struct work_struct *work) rtnl_lock(); tg3_full_lock(tp, 0); - if (!netif_running(tp->dev)) { + if (tp->pcierr_recovery || !netif_running(tp->dev)) { tg3_flag_clear(tp, RESET_TASK_PENDING); tg3_full_unlock(tp); rtnl_unlock(); @@ -18179,6 +18179,9 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, netdev_info(netdev, "PCI I/O error detected\n"); + /* Want to make sure that the reset task doesn't run */ + tg3_reset_task_cancel(tp); + rtnl_lock(); /* Could be second call or maybe we don't have netdev yet */ @@ -18195,9 +18198,6 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, tg3_timer_stop(tp); - /* Want to make sure that the reset task doesn't run */ - tg3_reset_task_cancel(tp); - netif_device_detach(netdev); /* Clean up software state, even if MMIO is blocked */ diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 792c8147c2c4c48cbf3ada365649431814df0be1..e0d62e2513879ca727de3d7f33e7628e76dccdc6 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -1963,7 +1963,6 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev) bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) || skb_is_nonlinear(*skb); int padlen = ETH_ZLEN - (*skb)->len; - int headroom = skb_headroom(*skb); int tailroom = skb_tailroom(*skb); struct sk_buff *nskb; u32 fcs; @@ -1977,9 +1976,6 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev) /* FCS could be appeded to tailroom. */ if (tailroom >= ETH_FCS_LEN) goto add_fcs; - /* FCS could be appeded by moving data to headroom. */ - else if (!cloned && headroom + tailroom >= ETH_FCS_LEN) - padlen = 0; /* No room for FCS, need to reallocate skb. */ else padlen = ETH_FCS_LEN; @@ -1988,10 +1984,7 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev) padlen += ETH_FCS_LEN; } - if (!cloned && headroom + tailroom >= padlen) { - (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len); - skb_set_tail_pointer(*skb, (*skb)->len); - } else { + if (cloned || tailroom < padlen) { nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC); if (!nskb) return -ENOMEM; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 2c60d2a9333086168e18ca2dbd584356b0447a70..9e8a20a94862fb603415eae3bc14ad39e0d03483 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -2788,7 +2788,7 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu) struct i40e_pf *pf = vsi->back; if (i40e_enabled_xdp_vsi(vsi)) { - int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + int frame_size = new_mtu + I40E_PACKET_HDR_PAD; if (frame_size > i40e_max_xdp_frame_size(vsi)) return -EINVAL; @@ -12520,6 +12520,8 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev, } br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + if (!br_spec) + return -EINVAL; nla_for_each_nested(attr, br_spec, rem) { __u16 mode; diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 96424748464cc20e397a2f62c75422d287820066..daf155c36bb8ecab20d8c0639f204e4c882dbc92 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -4853,7 +4853,7 @@ static int __init ice_module_init(void) pr_info("%s\n", ice_driver_string); pr_info("%s\n", ice_copyright); - ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME); + ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME); if (!ice_wq) { pr_err("Failed to create workqueue\n"); return -ENOMEM; @@ -5200,15 +5200,12 @@ int ice_vsi_cfg(struct ice_vsi *vsi) { int err; - if (vsi->netdev) { + if (vsi->netdev && vsi->type == ICE_VSI_PF) { ice_set_rx_mode(vsi->netdev); - if (vsi->type != ICE_VSI_LB) { - err = ice_vsi_vlan_setup(vsi); - - if (err) - return err; - } + err = ice_vsi_vlan_setup(vsi); + if (err) + return err; } ice_vsi_cfg_dcb_rings(vsi); @@ -5267,7 +5264,7 @@ static int ice_up_complete(struct ice_vsi *vsi) if (vsi->port_info && (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) && - vsi->netdev) { + vsi->netdev && vsi->type == ICE_VSI_PF) { ice_print_link_msg(vsi, true); netif_tx_start_all_queues(vsi->netdev); netif_carrier_on(vsi->netdev); @@ -5277,7 +5274,9 @@ static int ice_up_complete(struct ice_vsi *vsi) * set the baseline so counters are ready when interface is up */ ice_update_eth_stats(vsi); - ice_service_task_schedule(pf); + + if (vsi->type == ICE_VSI_PF) + ice_service_task_schedule(pf); return 0; } diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index 4ab46eee3d938fbe10e3058ee22facf515520f7c..ef53f7665b58c78cb4b258ffbd01d7994ae740c5 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -134,10 +134,12 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp, * * We need to convert the system time value stored in the RX/TXSTMP registers * into a hwtstamp which can be used by the upper level timestamping functions. + * + * Returns 0 on success. **/ -static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter, - struct skb_shared_hwtstamps *hwtstamps, - u64 systim) +static int igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter, + struct skb_shared_hwtstamps *hwtstamps, + u64 systim) { switch (adapter->hw.mac.type) { case igc_i225: @@ -147,8 +149,9 @@ static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter, systim & 0xFFFFFFFF); break; default: - break; + return -EINVAL; } + return 0; } /** @@ -372,7 +375,8 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter) regval = rd32(IGC_TXSTMPL); regval |= (u64)rd32(IGC_TXSTMPH) << 32; - igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval); + if (igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval)) + return; switch (adapter->link_speed) { case SPEED_10: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 93828720da57e50fdd7bd7a135a607e8109bbc89..cc1d9241e1cb42f177cd617edd4b4d4e4b5e24f4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -67,6 +67,8 @@ #define IXGBE_RXBUFFER_4K 4096 #define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ +#define IXGBE_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) + /* Attempt to maximize the headroom available for incoming frames. We * use a 2K buffer for receives and need 1536/1534 to store the data for * the frame. This leaves us with 512 bytes of room. From that we need diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 7e5c6def0698ac3dcbab62512661f536425c1bac..8f20901b9ef5d4c6f515e02ce38ff51d3552702e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6728,6 +6728,18 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) ixgbe_free_rx_resources(adapter->rx_ring[i]); } +/** + * ixgbe_max_xdp_frame_size - returns the maximum allowed frame size for XDP + * @adapter: device handle, pointer to adapter + */ +static int ixgbe_max_xdp_frame_size(struct ixgbe_adapter *adapter) +{ + if (PAGE_SIZE >= 8192 || adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) + return IXGBE_RXBUFFER_2K; + else + return IXGBE_RXBUFFER_3K; +} + /** * ixgbe_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure @@ -6739,18 +6751,12 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - if (adapter->xdp_prog) { - int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + - VLAN_HLEN; - int i; - - for (i = 0; i < adapter->num_rx_queues; i++) { - struct ixgbe_ring *ring = adapter->rx_ring[i]; + if (ixgbe_enabled_xdp_adapter(adapter)) { + int new_frame_size = new_mtu + IXGBE_PKT_HDR_PAD; - if (new_frame_size > ixgbe_rx_bufsz(ring)) { - e_warn(probe, "Requested MTU size is not supported with XDP\n"); - return -EINVAL; - } + if (new_frame_size > ixgbe_max_xdp_frame_size(adapter)) { + e_warn(probe, "Requested MTU size is not supported with XDP\n"); + return -EINVAL; } } diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 217dc67c48fa247192b24f3de609fcf684bcff39..a8319295f1ab201e4627621456353b217870fab4 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -354,7 +354,8 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode, mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); mcr_new = mcr_cur; mcr_new |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE | - MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK; + MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK | + MAC_MCR_RX_FIFO_CLR_DIS; /* Only update control register when needed! */ if (mcr_new != mcr_cur) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 54a7cd93cc0febb8314cf847a5a69d3b30f7508f..0ca3223ad54573c753a6a74809a41f9fe6b4be77 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -339,6 +339,7 @@ #define MAC_MCR_FORCE_MODE BIT(15) #define MAC_MCR_TX_EN BIT(14) #define MAC_MCR_RX_EN BIT(13) +#define MAC_MCR_RX_FIFO_CLR_DIS BIT(12) #define MAC_MCR_BACKOFF_EN BIT(9) #define MAC_MCR_BACKPR_EN BIT(8) #define MAC_MCR_FORCE_RX_FC BIT(5) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index f800e1ca5ba6263e21d9f9ab4743ee550e458bae..0a011a41c039ecd77027fe0e0d68e7d50f22d3a3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -64,6 +64,7 @@ static int mlx5_query_mtrc_caps(struct mlx5_fw_tracer *tracer) MLX5_GET(mtrc_cap, out, num_string_trace); tracer->str_db.num_string_db = MLX5_GET(mtrc_cap, out, num_string_db); tracer->owner = !!MLX5_GET(mtrc_cap, out, trace_owner); + tracer->str_db.loaded = false; for (i = 0; i < tracer->str_db.num_string_db; i++) { mtrc_cap_sp = MLX5_ADDR_OF(mtrc_cap, out, string_db_param[i]); @@ -602,7 +603,7 @@ static int mlx5_tracer_handle_string_trace(struct mlx5_fw_tracer *tracer, } else { cur_string = mlx5_tracer_message_get(tracer, tracer_event); if (!cur_string) { - pr_debug("%s Got string event for unknown string tdsm: %d\n", + pr_debug("%s Got string event for unknown string tmsn: %d\n", __func__, tracer_event->string_event.tmsn); return -1; } @@ -756,6 +757,7 @@ static int mlx5_fw_tracer_set_mtrc_conf(struct mlx5_fw_tracer *tracer) if (err) mlx5_core_warn(dev, "FWTracer: Failed to set tracer configurations %d\n", err); + tracer->buff.consumer_index = 0; return err; } @@ -820,7 +822,6 @@ static void mlx5_fw_tracer_ownership_change(struct work_struct *work) mlx5_core_dbg(tracer->dev, "FWTracer: ownership changed, current=(%d)\n", tracer->owner); if (tracer->owner) { tracer->owner = false; - tracer->buff.consumer_index = 0; return; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c index cac8f085b16d74fabff10d162ea1625cfb9b9c0a..2cf7f0fc170b83834810dd7ae78411771f153ba2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c @@ -166,16 +166,16 @@ static inline int mlx5_ptys_rate_enum_to_int(enum mlx5_ptys_rate rate) } } -static int mlx5i_get_speed_settings(u16 ib_link_width_oper, u16 ib_proto_oper) +static u32 mlx5i_get_speed_settings(u16 ib_link_width_oper, u16 ib_proto_oper) { int rate, width; rate = mlx5_ptys_rate_enum_to_int(ib_proto_oper); if (rate < 0) - return -EINVAL; + return SPEED_UNKNOWN; width = mlx5_ptys_width_enum_to_int(ib_link_width_oper); if (width < 0) - return -EINVAL; + return SPEED_UNKNOWN; return rate * width; } @@ -198,16 +198,13 @@ static int mlx5i_get_link_ksettings(struct net_device *netdev, ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); speed = mlx5i_get_speed_settings(ib_link_width_oper, ib_proto_oper); - if (speed < 0) - return -EINVAL; + link_ksettings->base.speed = speed; + link_ksettings->base.duplex = speed == SPEED_UNKNOWN ? DUPLEX_UNKNOWN : DUPLEX_FULL; - link_ksettings->base.duplex = DUPLEX_FULL; link_ksettings->base.port = PORT_OTHER; link_ksettings->base.autoneg = AUTONEG_DISABLE; - link_ksettings->base.speed = speed; - return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c index 23361a9ae4fa0c77af9e7e34d05058b64a368556..6dc83e871cd76b2c4bfc4abd5afc2cf36bcdd3d0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c @@ -105,6 +105,7 @@ int mlx5_geneve_tlv_option_add(struct mlx5_geneve *geneve, struct geneve_opt *op geneve->opt_type = opt->type; geneve->obj_id = res; geneve->refcount++; + res = 0; } unlock: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 29bc1df28aeb30f51c7bdd48a2e3f860148e8b97..112eaef186e19051db03498ae13325ce0d7a7caf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1642,7 +1642,7 @@ static void mlx5_core_verify_params(void) } } -static int __init init(void) +static int __init mlx5_init(void) { int err; @@ -1667,7 +1667,7 @@ static int __init init(void) return err; } -static void __exit cleanup(void) +static void __exit mlx5_cleanup(void) { #ifdef CONFIG_MLX5_CORE_EN mlx5e_cleanup(); @@ -1676,5 +1676,5 @@ static void __exit cleanup(void) mlx5_unregister_debugfs(); } -module_init(init); -module_exit(cleanup); +module_init(mlx5_init); +module_exit(mlx5_cleanup); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index a44a2bad5bbb519458448296edd74559dc81c776..1ea71f06fdb1c8ed60e886cbf0f8d4abb7e48b4e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -216,7 +216,8 @@ static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u32 function) n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask)); if (n >= MLX5_NUM_4K_IN_PAGE) { - mlx5_core_warn(dev, "alloc 4k bug\n"); + mlx5_core_warn(dev, "alloc 4k bug: fw page = 0x%llx, n = %u, bitmask: %lu, max num of 4K pages: %d\n", + fp->addr, n, fp->bitmask, MLX5_NUM_4K_IN_PAGE); return -ENOENT; } clear_bit(n, &fp->bitmask); diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index b221b83ec5a6f4a438502ea14f2aea5a5bef5e36..e56e540ce05c366a20d2c027510ba77891c4e3d6 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -468,6 +468,18 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress, flow_rule_match_control(rule, &match); } + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + + flow_rule_match_vlan(rule, &match); + filter->key_type = OCELOT_VCAP_KEY_ANY; + filter->vlan.vid.value = match.key->vlan_id; + filter->vlan.vid.mask = match.mask->vlan_id; + filter->vlan.pcp.value[0] = match.key->vlan_priority; + filter->vlan.pcp.mask[0] = match.mask->vlan_priority; + match_protocol = false; + } + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { struct flow_match_eth_addrs match; @@ -600,18 +612,6 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress, match_protocol = false; } - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { - struct flow_match_vlan match; - - flow_rule_match_vlan(rule, &match); - filter->key_type = OCELOT_VCAP_KEY_ANY; - filter->vlan.vid.value = match.key->vlan_id; - filter->vlan.vid.mask = match.mask->vlan_id; - filter->vlan.pcp.value[0] = match.key->vlan_priority; - filter->vlan.pcp.mask[0] = match.mask->vlan_priority; - match_protocol = false; - } - finished_key_parsing: if (match_protocol && proto != ETH_P_ALL) { if (filter->block_id == VCAP_ES0) { diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index cb12d0171517e8a154ed1e488fff9a850cb23ee8..fcd4213c99b83bfaecb4d917543479fb857718d9 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -257,6 +257,7 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq) .oper = IONIC_Q_ENABLE, }, }; + int ret; idev = &lif->ionic->idev; dev = lif->ionic->dev; @@ -264,16 +265,24 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq) dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n", ctx.cmd.q_control.index, ctx.cmd.q_control.type); + if (qcq->flags & IONIC_QCQ_F_INTR) + ionic_intr_clean(idev->intr_ctrl, qcq->intr.index); + + ret = ionic_adminq_post_wait(lif, &ctx); + if (ret) + return ret; + + if (qcq->napi.poll) + napi_enable(&qcq->napi); + if (qcq->flags & IONIC_QCQ_F_INTR) { irq_set_affinity_hint(qcq->intr.vector, &qcq->intr.affinity_mask); - napi_enable(&qcq->napi); - ionic_intr_clean(idev->intr_ctrl, qcq->intr.index); ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, IONIC_INTR_MASK_CLEAR); } - return ionic_adminq_post_wait(lif, &ctx); + return 0; } static int ionic_qcq_disable(struct ionic_qcq *qcq, bool send_to_hw) diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index d210632676d32ff97e8df7326dc2e336adb771ed..a632de208a7dc6a91e78505134da5c9c93660eb8 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -1456,7 +1456,12 @@ int qede_poll(struct napi_struct *napi, int budget) rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) && qede_has_rx_work(fp->rxq)) ? qede_rx_int(fp, budget) : 0; - if (rx_work_done < budget) { + + if (fp->xdp_xmit & QEDE_XDP_REDIRECT) + xdp_do_flush(); + + /* Handle case where we are called by netpoll with a budget of 0 */ + if (rx_work_done < budget || !budget) { if (!qede_poll_is_more_work(fp)) { napi_complete_done(napi, rx_work_done); @@ -1474,9 +1479,6 @@ int qede_poll(struct napi_struct *napi, int budget) qede_update_tx_producer(fp->xdp_tx); } - if (fp->xdp_xmit & QEDE_XDP_REDIRECT) - xdp_do_flush_map(); - return rx_work_done; } diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 9ec6d63691aab4926845fc2365fdcc37d2103146..410ccd28f6531b7fb4cec13e6680fb4c25375f9d 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -736,14 +736,14 @@ static void ravb_error_interrupt(struct net_device *ndev) ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS); if (eis & EIS_QFS) { ris2 = ravb_read(ndev, RIS2); - ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED), + ravb_write(ndev, ~(RIS2_QFF0 | RIS2_QFF1 | RIS2_RFFF | RIS2_RESERVED), RIS2); /* Receive Descriptor Empty int */ if (ris2 & RIS2_QFF0) priv->stats[RAVB_BE].rx_over_errors++; - /* Receive Descriptor Empty int */ + /* Receive Descriptor Empty int */ if (ris2 & RIS2_QFF1) priv->stats[RAVB_NC].rx_over_errors++; diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 7183080763417154c4e25d177b5122303c2da428..29c8d2c990044975ef6b7008d01b886cf72867fc 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -1047,8 +1047,11 @@ static int efx_pci_probe_post_io(struct efx_nic *efx) /* Determine netdevice features */ net_dev->features |= (efx->type->offload_features | NETIF_F_SG | NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_RXALL); - if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) + if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) { net_dev->features |= NETIF_F_TSO6; + if (efx_has_cap(efx, TX_TSO_V2_ENCAP)) + net_dev->hw_enc_features |= NETIF_F_TSO6; + } /* Check whether device supports TSO */ if (!efx->type->tso_versions || !efx->type->tso_versions(efx)) net_dev->features &= ~NETIF_F_ALL_TSO; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index bfc4a92f1d92b8c9cf6fa94b27d9ab8a7f724b37..78be62ecc9a9a0f90e8f8d1d42b90a494a8fc693 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -505,6 +505,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev) plat_dat->has_gmac4 = 1; plat_dat->pmt = 1; plat_dat->tso_en = of_property_read_bool(np, "snps,tso"); + if (of_device_is_compatible(np, "qcom,qcs404-ethqos")) + plat_dat->rx_clk_runs_in_lpi = 1; ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (ret) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c index de5255b951e1475004df387b5e7f750397a5be69..d1b8b51bf6ad9b8324ab1f6e028c834c2fef8337 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c @@ -520,9 +520,9 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index, return 0; } - val |= PPSCMDx(index, 0x2); val |= TRGTMODSELx(index, 0x2); val |= PPSEN0; + writel(val, ioaddr + MAC_PPS_CONTROL); writel(cfg->start.tv_sec, ioaddr + MAC_PPSx_TARGET_TIME_SEC(index)); @@ -547,6 +547,7 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index, writel(period - 1, ioaddr + MAC_PPSx_WIDTH(index)); /* Finally, activate it */ + val |= PPSCMDx(index, 0x2); writel(val, ioaddr + MAC_PPS_CONTROL); return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 14ea0168b548db22e9aeb1232bb4a6ae82f09bf5..04c59102a286323666fda4a51ad02b2639e9dd71 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1058,7 +1058,8 @@ static void stmmac_mac_link_up(struct phylink_config *config, stmmac_mac_set(priv, priv->ioaddr, true); if (phy && priv->dma_cap.eee) { - priv->eee_active = phy_init_eee(phy, 1) >= 0; + priv->eee_active = + phy_init_eee(phy, !priv->plat->rx_clk_runs_in_lpi) >= 0; priv->eee_enabled = stmmac_eee_init(priv); priv->tx_lpi_enabled = priv->eee_enabled; stmmac_set_eee_pls(priv, priv->hw, true); @@ -1125,6 +1126,11 @@ static int stmmac_init_phy(struct net_device *dev) int addr = priv->plat->phy_addr; struct phy_device *phydev; + if (addr < 0) { + netdev_err(priv->dev, "no phy found\n"); + return -ENODEV; + } + phydev = mdiobus_get_phy(priv->mii, addr); if (!phydev) { netdev_err(priv->dev, "no phy at addr %d\n", addr); @@ -1139,6 +1145,7 @@ static int stmmac_init_phy(struct net_device *dev) phylink_ethtool_get_wol(priv->phylink, &wol); device_set_wakeup_capable(priv->device, !!wol.supported); + device_set_wakeup_enable(priv->device, !!wol.wolopts); } return ret; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 1ed74cfb61fc568cd9c558224da605e0835bbdb6..f02ce09020fbccf36abab2d891126397fe28ce72 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -559,7 +559,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst"); plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode"); - if (plat->force_thresh_dma_mode) { + if (plat->force_thresh_dma_mode && plat->force_sf_dma_mode) { plat->force_sf_dma_mode = 0; dev_warn(&pdev->dev, "force_sf_dma_mode is ignored if force_thresh_dma_mode is set.\n"); diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 059d68d48f1e9c5f5cfb7582950ffffa36a6dca2..4074310abcff4fedbadba6c858adbe2533f04b8d 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -426,9 +426,7 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common, writel(common->rx_flow_id_base, host_p->port_base + AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET); /* en tx crc offload */ - if (features & NETIF_F_HW_CSUM) - writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN, - host_p->port_base + AM65_CPSW_P0_REG_CTL); + writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN, host_p->port_base + AM65_CPSW_P0_REG_CTL); am65_cpsw_nuss_set_p0_ptype(common); @@ -1369,31 +1367,6 @@ static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev, stats->tx_dropped = dev->stats.tx_dropped; } -static int am65_cpsw_nuss_ndo_slave_set_features(struct net_device *ndev, - netdev_features_t features) -{ - struct am65_cpsw_common *common = am65_ndev_to_common(ndev); - netdev_features_t changes = features ^ ndev->features; - struct am65_cpsw_host *host_p; - - host_p = am65_common_get_host(common); - - if (changes & NETIF_F_HW_CSUM) { - bool enable = !!(features & NETIF_F_HW_CSUM); - - dev_info(common->dev, "Turn %s tx-checksum-ip-generic\n", - enable ? "ON" : "OFF"); - if (enable) - writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN, - host_p->port_base + AM65_CPSW_P0_REG_CTL); - else - writel(0, - host_p->port_base + AM65_CPSW_P0_REG_CTL); - } - - return 0; -} - static const struct net_device_ops am65_cpsw_nuss_netdev_ops_2g = { .ndo_open = am65_cpsw_nuss_ndo_slave_open, .ndo_stop = am65_cpsw_nuss_ndo_slave_stop, @@ -1406,7 +1379,6 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops_2g = { .ndo_vlan_rx_add_vid = am65_cpsw_nuss_ndo_slave_add_vid, .ndo_vlan_rx_kill_vid = am65_cpsw_nuss_ndo_slave_kill_vid, .ndo_do_ioctl = am65_cpsw_nuss_ndo_slave_ioctl, - .ndo_set_features = am65_cpsw_nuss_ndo_slave_set_features, .ndo_setup_tc = am65_cpsw_qos_ndo_setup_tc, }; @@ -1515,9 +1487,8 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common) tx_chn->tx_chn_name, &tx_cfg); if (IS_ERR(tx_chn->tx_chn)) { - ret = PTR_ERR(tx_chn->tx_chn); - dev_err(dev, "Failed to request tx dma channel %d\n", - ret); + ret = dev_err_probe(dev, PTR_ERR(tx_chn->tx_chn), + "Failed to request tx dma channel\n"); goto err; } @@ -1588,8 +1559,8 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg); if (IS_ERR(rx_chn->rx_chn)) { - ret = PTR_ERR(rx_chn->rx_chn); - dev_err(dev, "Failed to request rx dma channel %d\n", ret); + ret = dev_err_probe(dev, PTR_ERR(rx_chn->rx_chn), + "Failed to request rx dma channel\n"); goto err; } @@ -1753,13 +1724,14 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) if (ret < 0) { dev_err(dev, "%pOF error reading port_id %d\n", port_np, ret); - return ret; + goto of_node_put; } if (!port_id || port_id > common->port_num) { dev_err(dev, "%pOF has invalid port_id %u %s\n", port_np, port_id, port_np->name); - return -EINVAL; + ret = -EINVAL; + goto of_node_put; } port = am65_common_get_port(common, port_id); @@ -1775,8 +1747,10 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) (AM65_CPSW_NU_FRAM_PORT_OFFSET * (port_id - 1)); port->slave.mac_sl = cpsw_sl_get("am65", dev, port->port_base); - if (IS_ERR(port->slave.mac_sl)) - return PTR_ERR(port->slave.mac_sl); + if (IS_ERR(port->slave.mac_sl)) { + ret = PTR_ERR(port->slave.mac_sl); + goto of_node_put; + } port->disabled = !of_device_is_available(port_np); if (port->disabled) @@ -1787,7 +1761,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) ret = PTR_ERR(port->slave.ifphy); dev_err(dev, "%pOF error retrieving port phy: %d\n", port_np, ret); - return ret; + goto of_node_put; } port->slave.mac_only = @@ -1797,10 +1771,10 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) if (of_phy_is_fixed_link(port_np)) { ret = of_phy_register_fixed_link(port_np); if (ret) { - if (ret != -EPROBE_DEFER) - dev_err(dev, "%pOF failed to register fixed-link phy: %d\n", - port_np, ret); - return ret; + ret = dev_err_probe(dev, ret, + "failed to register fixed-link phy %pOF\n", + port_np); + goto of_node_put; } port->slave.phy_node = of_node_get(port_np); } else { @@ -1811,14 +1785,15 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) if (!port->slave.phy_node) { dev_err(dev, "slave[%d] no phy found\n", port_id); - return -ENODEV; + ret = -ENODEV; + goto of_node_put; } ret = of_get_phy_mode(port_np, &port->slave.phy_if); if (ret) { dev_err(dev, "%pOF read phy-mode err %d\n", port_np, ret); - return ret; + goto of_node_put; } mac_addr = of_get_mac_address(port_np); @@ -1835,6 +1810,11 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) of_node_put(node); return 0; + +of_node_put: + of_node_put(port_np); + of_node_put(node); + return ret; } static void am65_cpsw_pcpu_stats_free(void *data) @@ -2090,13 +2070,8 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev) return -ENOMEM; clk = devm_clk_get(dev, "fck"); - if (IS_ERR(clk)) { - ret = PTR_ERR(clk); - - if (ret != -EPROBE_DEFER) - dev_err(dev, "error getting fck clock %d\n", ret); - return ret; - } + if (IS_ERR(clk)) + return dev_err_probe(dev, PTR_ERR(clk), "getting fck clock\n"); common->bus_freq = clk_get_rate(clk); pm_runtime_enable(dev); diff --git a/drivers/net/mdio/mdio-mux-meson-g12a.c b/drivers/net/mdio/mdio-mux-meson-g12a.c index bf86c9c7a288366fe6ff5849764626cc0e6c5ea9..ab863530c9e891b685229f4e68cf95a4b4407992 100644 --- a/drivers/net/mdio/mdio-mux-meson-g12a.c +++ b/drivers/net/mdio/mdio-mux-meson-g12a.c @@ -4,6 +4,7 @@ */ #include +#include #include #include #include @@ -150,6 +151,7 @@ static const struct clk_ops g12a_ephy_pll_ops = { static int g12a_enable_internal_mdio(struct g12a_mdio_mux *priv) { + u32 value; int ret; /* Enable the phy clock */ @@ -163,18 +165,25 @@ static int g12a_enable_internal_mdio(struct g12a_mdio_mux *priv) /* Initialize ephy control */ writel(EPHY_G12A_ID, priv->regs + ETH_PHY_CNTL0); - writel(FIELD_PREP(PHY_CNTL1_ST_MODE, 3) | - FIELD_PREP(PHY_CNTL1_ST_PHYADD, EPHY_DFLT_ADD) | - FIELD_PREP(PHY_CNTL1_MII_MODE, EPHY_MODE_RMII) | - PHY_CNTL1_CLK_EN | - PHY_CNTL1_CLKFREQ | - PHY_CNTL1_PHY_ENB, - priv->regs + ETH_PHY_CNTL1); + + /* Make sure we get a 0 -> 1 transition on the enable bit */ + value = FIELD_PREP(PHY_CNTL1_ST_MODE, 3) | + FIELD_PREP(PHY_CNTL1_ST_PHYADD, EPHY_DFLT_ADD) | + FIELD_PREP(PHY_CNTL1_MII_MODE, EPHY_MODE_RMII) | + PHY_CNTL1_CLK_EN | + PHY_CNTL1_CLKFREQ; + writel(value, priv->regs + ETH_PHY_CNTL1); writel(PHY_CNTL2_USE_INTERNAL | PHY_CNTL2_SMI_SRC_MAC | PHY_CNTL2_RX_CLK_EPHY, priv->regs + ETH_PHY_CNTL2); + value |= PHY_CNTL1_PHY_ENB; + writel(value, priv->regs + ETH_PHY_CNTL1); + + /* The phy needs a bit of time to power up */ + mdelay(10); + return 0; } diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c index db651649e0b80665d9fdbf40e06e5639af21633c..81412999445d8864201303f2f9fedd2c3e9fd218 100644 --- a/drivers/net/phy/dp83822.c +++ b/drivers/net/phy/dp83822.c @@ -247,7 +247,8 @@ static int dp83822_config_intr(struct phy_device *phydev) DP83822_ENERGY_DET_INT_EN | DP83822_LINK_QUAL_INT_EN); - if (!dp83822->fx_enabled) + /* Private data pointer is NULL on DP83825/26 */ + if (!dp83822 || !dp83822->fx_enabled) misr_status |= DP83822_ANEG_COMPLETE_INT_EN | DP83822_DUP_MODE_CHANGE_INT_EN | DP83822_SPEED_CHANGED_INT_EN; @@ -267,7 +268,8 @@ static int dp83822_config_intr(struct phy_device *phydev) DP83822_PAGE_RX_INT_EN | DP83822_EEE_ERROR_CHANGE_INT_EN); - if (!dp83822->fx_enabled) + /* Private data pointer is NULL on DP83825/26 */ + if (!dp83822 || !dp83822->fx_enabled) misr_status |= DP83822_ANEG_ERR_INT_EN | DP83822_WOL_PKT_INT_EN; diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 77ba6c3c7a0904714a3e261412804066f57e9b3b..e9303be486556580506cf84611c1f643b6967178 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -108,7 +108,12 @@ EXPORT_SYMBOL(mdiobus_unregister_device); struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr) { - struct mdio_device *mdiodev = bus->mdio_map[addr]; + struct mdio_device *mdiodev; + + if (addr < 0 || addr >= ARRAY_SIZE(bus->mdio_map)) + return NULL; + + mdiodev = bus->mdio_map[addr]; if (!mdiodev) return NULL; diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c index e8f2ca625837f3e8abaed3274bab41cf74c6b552..39151ec6f65e23c90b3cab6eef8a6d7e64a6a97f 100644 --- a/drivers/net/phy/meson-gxl.c +++ b/drivers/net/phy/meson-gxl.c @@ -235,6 +235,8 @@ static struct phy_driver meson_gxl_phy[] = { .config_intr = meson_gxl_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, + .read_mmd = genphy_read_mmd_unsupported, + .write_mmd = genphy_write_mmd_unsupported, }, { PHY_ID_MATCH_EXACT(0x01803301), .name = "Meson G12A Internal PHY", @@ -245,6 +247,8 @@ static struct phy_driver meson_gxl_phy[] = { .config_intr = meson_gxl_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, + .read_mmd = genphy_read_mmd_unsupported, + .write_mmd = genphy_write_mmd_unsupported, }, }; diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c index a644e8e5071c3636d663c4a4181d5664f8a1cc06..375bbd60b38af694ae29f5e0193875014fb75443 100644 --- a/drivers/net/phy/microchip.c +++ b/drivers/net/phy/microchip.c @@ -326,6 +326,37 @@ static int lan88xx_config_aneg(struct phy_device *phydev) return genphy_config_aneg(phydev); } +static void lan88xx_link_change_notify(struct phy_device *phydev) +{ + int temp; + + /* At forced 100 F/H mode, chip may fail to set mode correctly + * when cable is switched between long(~50+m) and short one. + * As workaround, set to 10 before setting to 100 + * at forced 100 F/H mode. + */ + if (!phydev->autoneg && phydev->speed == 100) { + /* disable phy interrupt */ + temp = phy_read(phydev, LAN88XX_INT_MASK); + temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_; + phy_write(phydev, LAN88XX_INT_MASK, temp); + + temp = phy_read(phydev, MII_BMCR); + temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000); + phy_write(phydev, MII_BMCR, temp); /* set to 10 first */ + temp |= BMCR_SPEED100; + phy_write(phydev, MII_BMCR, temp); /* set to 100 later */ + + /* clear pending interrupt generated while workaround */ + temp = phy_read(phydev, LAN88XX_INT_STS); + + /* enable phy interrupt back */ + temp = phy_read(phydev, LAN88XX_INT_MASK); + temp |= LAN88XX_INT_MASK_MDINTPIN_EN_; + phy_write(phydev, LAN88XX_INT_MASK, temp); + } +} + static struct phy_driver microchip_phy_driver[] = { { .phy_id = 0x0007c130, @@ -339,6 +370,7 @@ static struct phy_driver microchip_phy_driver[] = { .config_init = lan88xx_config_init, .config_aneg = lan88xx_config_aneg, + .link_change_notify = lan88xx_link_change_notify, .ack_interrupt = lan88xx_phy_ack_interrupt, .config_intr = lan88xx_phy_config_intr, diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 3ef5aa6b72a7e0ae079c754c0f8377319e2f69dd..e771e0e8a9bc68f42c251c7c836df3eee9cc6fbb 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -2833,8 +2833,6 @@ static int phy_probe(struct device *dev) if (phydrv->flags & PHY_IS_INTERNAL) phydev->is_internal = true; - mutex_lock(&phydev->lock); - /* Deassert the reset signal */ phy_device_reset(phydev, 0); @@ -2903,12 +2901,10 @@ static int phy_probe(struct device *dev) phydev->state = PHY_READY; out: - /* Assert the reset signal */ + /* Re-assert the reset signal on error */ if (err) phy_device_reset(phydev, 1); - mutex_unlock(&phydev->lock); - return err; } @@ -2918,9 +2914,7 @@ static int phy_remove(struct device *dev) cancel_delayed_work_sync(&phydev->state_queue); - mutex_lock(&phydev->lock); phydev->state = PHY_DOWN; - mutex_unlock(&phydev->lock); sfp_bus_del_upstream(phydev->sfp_bus); phydev->sfp_bus = NULL; diff --git a/drivers/net/tap.c b/drivers/net/tap.c index d9018d9fe3106c2d60ba231e1215f864530a1dc2..2c9ae02ada3e0565d6fb535fc41929b52acaf3f1 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -523,7 +523,7 @@ static int tap_open(struct inode *inode, struct file *file) q->sock.state = SS_CONNECTED; q->sock.file = file; q->sock.ops = &tap_socket_ops; - sock_init_data_uid(&q->sock, &q->sk, inode->i_uid); + sock_init_data_uid(&q->sock, &q->sk, current_fsuid()); q->sk.sk_write_space = tap_sock_write_space; q->sk.sk_destruct = tap_sock_destruct; q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index f1d46aea8a2ba97119c3af88be8fd94566a60132..0e70877c932c7129ced008834cda3ed72d2dc347 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -3457,7 +3457,7 @@ static int tun_chr_open(struct inode *inode, struct file * file) tfile->socket.file = file; tfile->socket.ops = &tun_socket_ops; - sock_init_data_uid(&tfile->socket, &tfile->sk, inode->i_uid); + sock_init_data_uid(&tfile->socket, &tfile->sk, current_fsuid()); tfile->sk.sk_write_space = tun_sock_write_space; tfile->sk.sk_sndbuf = INT_MAX; diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c index fc5895f85cee2f8256661f37de0802054826d421..a552bb1665b8a05a8918b7dc8c5dd752774d4f5d 100644 --- a/drivers/net/usb/kalmia.c +++ b/drivers/net/usb/kalmia.c @@ -65,8 +65,8 @@ kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 init_msg_len, init_msg, init_msg_len, &act_len, KALMIA_USB_TIMEOUT); if (status != 0) { netdev_err(dev->net, - "Error sending init packet. Status %i, length %i\n", - status, act_len); + "Error sending init packet. Status %i\n", + status); return status; } else if (act_len != init_msg_len) { @@ -83,8 +83,8 @@ kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 init_msg_len, if (status != 0) netdev_err(dev->net, - "Error receiving init result. Status %i, length %i\n", - status, act_len); + "Error receiving init result. Status %i\n", + status); else if (act_len != expected_len) netdev_err(dev->net, "Unexpected init result length: %i\n", act_len); diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 6f7b70522d926b610d273a36a57e83efa032f15a..667984efeb3be21206b316187acc152b0da9f717 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -824,20 +824,19 @@ static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset, u32 length, u8 *data) { int i; - int ret; u32 buf; unsigned long timeout; - ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf); + lan78xx_read_reg(dev, OTP_PWR_DN, &buf); if (buf & OTP_PWR_DN_PWRDN_N_) { /* clear it and wait to be cleared */ - ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0); + lan78xx_write_reg(dev, OTP_PWR_DN, 0); timeout = jiffies + HZ; do { usleep_range(1, 10); - ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf); + lan78xx_read_reg(dev, OTP_PWR_DN, &buf); if (time_after(jiffies, timeout)) { netdev_warn(dev->net, "timeout on OTP_PWR_DN"); @@ -847,18 +846,18 @@ static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset, } for (i = 0; i < length; i++) { - ret = lan78xx_write_reg(dev, OTP_ADDR1, + lan78xx_write_reg(dev, OTP_ADDR1, ((offset + i) >> 8) & OTP_ADDR1_15_11); - ret = lan78xx_write_reg(dev, OTP_ADDR2, + lan78xx_write_reg(dev, OTP_ADDR2, ((offset + i) & OTP_ADDR2_10_3)); - ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_); - ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_); + lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_); + lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_); timeout = jiffies + HZ; do { udelay(1); - ret = lan78xx_read_reg(dev, OTP_STATUS, &buf); + lan78xx_read_reg(dev, OTP_STATUS, &buf); if (time_after(jiffies, timeout)) { netdev_warn(dev->net, "timeout on OTP_STATUS"); @@ -866,7 +865,7 @@ static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset, } } while (buf & OTP_STATUS_BUSY_); - ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf); + lan78xx_read_reg(dev, OTP_RD_DATA, &buf); data[i] = (u8)(buf & 0xFF); } @@ -878,20 +877,19 @@ static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset, u32 length, u8 *data) { int i; - int ret; u32 buf; unsigned long timeout; - ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf); + lan78xx_read_reg(dev, OTP_PWR_DN, &buf); if (buf & OTP_PWR_DN_PWRDN_N_) { /* clear it and wait to be cleared */ - ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0); + lan78xx_write_reg(dev, OTP_PWR_DN, 0); timeout = jiffies + HZ; do { udelay(1); - ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf); + lan78xx_read_reg(dev, OTP_PWR_DN, &buf); if (time_after(jiffies, timeout)) { netdev_warn(dev->net, "timeout on OTP_PWR_DN completion"); @@ -901,21 +899,21 @@ static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset, } /* set to BYTE program mode */ - ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_); + lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_); for (i = 0; i < length; i++) { - ret = lan78xx_write_reg(dev, OTP_ADDR1, + lan78xx_write_reg(dev, OTP_ADDR1, ((offset + i) >> 8) & OTP_ADDR1_15_11); - ret = lan78xx_write_reg(dev, OTP_ADDR2, + lan78xx_write_reg(dev, OTP_ADDR2, ((offset + i) & OTP_ADDR2_10_3)); - ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]); - ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_); - ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_); + lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]); + lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_); + lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_); timeout = jiffies + HZ; do { udelay(1); - ret = lan78xx_read_reg(dev, OTP_STATUS, &buf); + lan78xx_read_reg(dev, OTP_STATUS, &buf); if (time_after(jiffies, timeout)) { netdev_warn(dev->net, "Timeout on OTP_STATUS completion"); @@ -1040,7 +1038,6 @@ static void lan78xx_deferred_multicast_write(struct work_struct *param) container_of(param, struct lan78xx_priv, set_multicast); struct lan78xx_net *dev = pdata->dev; int i; - int ret; netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n", pdata->rfe_ctl); @@ -1049,14 +1046,14 @@ static void lan78xx_deferred_multicast_write(struct work_struct *param) DP_SEL_VHF_HASH_LEN, pdata->mchash_table); for (i = 1; i < NUM_OF_MAF; i++) { - ret = lan78xx_write_reg(dev, MAF_HI(i), 0); - ret = lan78xx_write_reg(dev, MAF_LO(i), + lan78xx_write_reg(dev, MAF_HI(i), 0); + lan78xx_write_reg(dev, MAF_LO(i), pdata->pfilter_table[i][1]); - ret = lan78xx_write_reg(dev, MAF_HI(i), + lan78xx_write_reg(dev, MAF_HI(i), pdata->pfilter_table[i][0]); } - ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); + lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); } static void lan78xx_set_multicast(struct net_device *netdev) @@ -1126,7 +1123,6 @@ static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex, u16 lcladv, u16 rmtadv) { u32 flow = 0, fct_flow = 0; - int ret; u8 cap; if (dev->fc_autoneg) @@ -1149,10 +1145,10 @@ static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex, (cap & FLOW_CTRL_RX ? "enabled" : "disabled"), (cap & FLOW_CTRL_TX ? "enabled" : "disabled")); - ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow); + lan78xx_write_reg(dev, FCT_FLOW, fct_flow); /* threshold value should be set before enabling flow */ - ret = lan78xx_write_reg(dev, FLOW, flow); + lan78xx_write_reg(dev, FLOW, flow); return 0; } @@ -1673,11 +1669,10 @@ static const struct ethtool_ops lan78xx_ethtool_ops = { static void lan78xx_init_mac_address(struct lan78xx_net *dev) { u32 addr_lo, addr_hi; - int ret; u8 addr[6]; - ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo); - ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi); + lan78xx_read_reg(dev, RX_ADDRL, &addr_lo); + lan78xx_read_reg(dev, RX_ADDRH, &addr_hi); addr[0] = addr_lo & 0xFF; addr[1] = (addr_lo >> 8) & 0xFF; @@ -1710,12 +1705,12 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev) (addr[2] << 16) | (addr[3] << 24); addr_hi = addr[4] | (addr[5] << 8); - ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo); - ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi); + lan78xx_write_reg(dev, RX_ADDRL, addr_lo); + lan78xx_write_reg(dev, RX_ADDRH, addr_hi); } - ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo); - ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_); + lan78xx_write_reg(dev, MAF_LO(0), addr_lo); + lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_); ether_addr_copy(dev->net->dev_addr, addr); } @@ -1848,33 +1843,8 @@ static void lan78xx_remove_mdio(struct lan78xx_net *dev) static void lan78xx_link_status_change(struct net_device *net) { struct phy_device *phydev = net->phydev; - int ret, temp; - - /* At forced 100 F/H mode, chip may fail to set mode correctly - * when cable is switched between long(~50+m) and short one. - * As workaround, set to 10 before setting to 100 - * at forced 100 F/H mode. - */ - if (!phydev->autoneg && (phydev->speed == 100)) { - /* disable phy interrupt */ - temp = phy_read(phydev, LAN88XX_INT_MASK); - temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_; - ret = phy_write(phydev, LAN88XX_INT_MASK, temp); - temp = phy_read(phydev, MII_BMCR); - temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000); - phy_write(phydev, MII_BMCR, temp); /* set to 10 first */ - temp |= BMCR_SPEED100; - phy_write(phydev, MII_BMCR, temp); /* set to 100 later */ - - /* clear pending interrupt generated while workaround */ - temp = phy_read(phydev, LAN88XX_INT_STS); - - /* enable phy interrupt back */ - temp = phy_read(phydev, LAN88XX_INT_MASK); - temp |= LAN88XX_INT_MASK_MDINTPIN_EN_; - ret = phy_write(phydev, LAN88XX_INT_MASK, temp); - } + phy_print_status(phydev); } static int irq_map(struct irq_domain *d, unsigned int irq, @@ -1927,14 +1897,13 @@ static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd) struct lan78xx_net *dev = container_of(data, struct lan78xx_net, domain_data); u32 buf; - int ret; /* call register access here because irq_bus_lock & irq_bus_sync_unlock * are only two callbacks executed in non-atomic contex. */ - ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf); + lan78xx_read_reg(dev, INT_EP_CTL, &buf); if (buf != data->irqenable) - ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable); + lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable); mutex_unlock(&data->irq_lock); } @@ -2001,7 +1970,6 @@ static void lan78xx_remove_irq_domain(struct lan78xx_net *dev) static int lan8835_fixup(struct phy_device *phydev) { int buf; - int ret; struct lan78xx_net *dev = netdev_priv(phydev->attached_dev); /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */ @@ -2011,11 +1979,11 @@ static int lan8835_fixup(struct phy_device *phydev) phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf); /* RGMII MAC TXC Delay Enable */ - ret = lan78xx_write_reg(dev, MAC_RGMII_ID, + lan78xx_write_reg(dev, MAC_RGMII_ID, MAC_RGMII_ID_TXC_DELAY_EN_); /* RGMII TX DLL Tune Adjust */ - ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00); + lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00); dev->interface = PHY_INTERFACE_MODE_RGMII_TXID; @@ -2199,28 +2167,27 @@ static int lan78xx_phy_init(struct lan78xx_net *dev) static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size) { - int ret = 0; u32 buf; bool rxenabled; - ret = lan78xx_read_reg(dev, MAC_RX, &buf); + lan78xx_read_reg(dev, MAC_RX, &buf); rxenabled = ((buf & MAC_RX_RXEN_) != 0); if (rxenabled) { buf &= ~MAC_RX_RXEN_; - ret = lan78xx_write_reg(dev, MAC_RX, buf); + lan78xx_write_reg(dev, MAC_RX, buf); } /* add 4 to size for FCS */ buf &= ~MAC_RX_MAX_SIZE_MASK_; buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_); - ret = lan78xx_write_reg(dev, MAC_RX, buf); + lan78xx_write_reg(dev, MAC_RX, buf); if (rxenabled) { buf |= MAC_RX_RXEN_; - ret = lan78xx_write_reg(dev, MAC_RX, buf); + lan78xx_write_reg(dev, MAC_RX, buf); } return 0; @@ -2277,13 +2244,12 @@ static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu) int ll_mtu = new_mtu + netdev->hard_header_len; int old_hard_mtu = dev->hard_mtu; int old_rx_urb_size = dev->rx_urb_size; - int ret; /* no second zero-length packet read wanted after mtu-sized packets */ if ((ll_mtu % dev->maxpacket) == 0) return -EDOM; - ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN); + lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN); netdev->mtu = new_mtu; @@ -2306,7 +2272,6 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p) struct lan78xx_net *dev = netdev_priv(netdev); struct sockaddr *addr = p; u32 addr_lo, addr_hi; - int ret; if (netif_running(netdev)) return -EBUSY; @@ -2323,12 +2288,12 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p) addr_hi = netdev->dev_addr[4] | netdev->dev_addr[5] << 8; - ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo); - ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi); + lan78xx_write_reg(dev, RX_ADDRL, addr_lo); + lan78xx_write_reg(dev, RX_ADDRH, addr_hi); /* Added to support MAC address changes */ - ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo); - ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_); + lan78xx_write_reg(dev, MAF_LO(0), addr_lo); + lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_); return 0; } @@ -2340,7 +2305,6 @@ static int lan78xx_set_features(struct net_device *netdev, struct lan78xx_net *dev = netdev_priv(netdev); struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); unsigned long flags; - int ret; spin_lock_irqsave(&pdata->rfe_ctl_lock, flags); @@ -2364,7 +2328,7 @@ static int lan78xx_set_features(struct net_device *netdev, spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags); - ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); + lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); return 0; } @@ -3820,7 +3784,6 @@ static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len) static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol) { u32 buf; - int ret; int mask_index; u16 crc; u32 temp_wucsr; @@ -3829,26 +3792,26 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol) const u8 ipv6_multicast[3] = { 0x33, 0x33 }; const u8 arp_type[2] = { 0x08, 0x06 }; - ret = lan78xx_read_reg(dev, MAC_TX, &buf); + lan78xx_read_reg(dev, MAC_TX, &buf); buf &= ~MAC_TX_TXEN_; - ret = lan78xx_write_reg(dev, MAC_TX, buf); - ret = lan78xx_read_reg(dev, MAC_RX, &buf); + lan78xx_write_reg(dev, MAC_TX, buf); + lan78xx_read_reg(dev, MAC_RX, &buf); buf &= ~MAC_RX_RXEN_; - ret = lan78xx_write_reg(dev, MAC_RX, buf); + lan78xx_write_reg(dev, MAC_RX, buf); - ret = lan78xx_write_reg(dev, WUCSR, 0); - ret = lan78xx_write_reg(dev, WUCSR2, 0); - ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL); + lan78xx_write_reg(dev, WUCSR, 0); + lan78xx_write_reg(dev, WUCSR2, 0); + lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL); temp_wucsr = 0; temp_pmt_ctl = 0; - ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl); + lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl); temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_; temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_; for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) - ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0); + lan78xx_write_reg(dev, WUF_CFG(mask_index), 0); mask_index = 0; if (wol & WAKE_PHY) { @@ -3877,30 +3840,30 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol) /* set WUF_CFG & WUF_MASK for IPv4 Multicast */ crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3); - ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), + lan78xx_write_reg(dev, WUF_CFG(mask_index), WUF_CFGX_EN_ | WUF_CFGX_TYPE_MCAST_ | (0 << WUF_CFGX_OFFSET_SHIFT_) | (crc & WUF_CFGX_CRC16_MASK_)); - ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7); - ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0); - ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0); - ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0); + lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7); + lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0); + lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0); + lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0); mask_index++; /* for IPv6 Multicast */ crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2); - ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), + lan78xx_write_reg(dev, WUF_CFG(mask_index), WUF_CFGX_EN_ | WUF_CFGX_TYPE_MCAST_ | (0 << WUF_CFGX_OFFSET_SHIFT_) | (crc & WUF_CFGX_CRC16_MASK_)); - ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3); - ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0); - ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0); - ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0); + lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3); + lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0); + lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0); + lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0); mask_index++; temp_pmt_ctl |= PMT_CTL_WOL_EN_; @@ -3921,16 +3884,16 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol) * for packettype (offset 12,13) = ARP (0x0806) */ crc = lan78xx_wakeframe_crc16(arp_type, 2); - ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), + lan78xx_write_reg(dev, WUF_CFG(mask_index), WUF_CFGX_EN_ | WUF_CFGX_TYPE_ALL_ | (0 << WUF_CFGX_OFFSET_SHIFT_) | (crc & WUF_CFGX_CRC16_MASK_)); - ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000); - ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0); - ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0); - ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0); + lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000); + lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0); + lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0); + lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0); mask_index++; temp_pmt_ctl |= PMT_CTL_WOL_EN_; @@ -3938,7 +3901,7 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol) temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_; } - ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr); + lan78xx_write_reg(dev, WUCSR, temp_wucsr); /* when multiple WOL bits are set */ if (hweight_long((unsigned long)wol) > 1) { @@ -3946,16 +3909,16 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol) temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_; temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_; } - ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl); + lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl); /* clear WUPS */ - ret = lan78xx_read_reg(dev, PMT_CTL, &buf); + lan78xx_read_reg(dev, PMT_CTL, &buf); buf |= PMT_CTL_WUPS_MASK_; - ret = lan78xx_write_reg(dev, PMT_CTL, buf); + lan78xx_write_reg(dev, PMT_CTL, buf); - ret = lan78xx_read_reg(dev, MAC_RX, &buf); + lan78xx_read_reg(dev, MAC_RX, &buf); buf |= MAC_RX_RXEN_; - ret = lan78xx_write_reg(dev, MAC_RX, buf); + lan78xx_write_reg(dev, MAC_RX, buf); return 0; } diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c index 17c9c63b8eebbe7c9af38a162bd0a696ca7f96a1..ce7862dac2b75ebee23317fb5618959491ff4c16 100644 --- a/drivers/net/usb/plusb.c +++ b/drivers/net/usb/plusb.c @@ -57,9 +57,7 @@ static inline int pl_vendor_req(struct usbnet *dev, u8 req, u8 val, u8 index) { - return usbnet_read_cmd(dev, req, - USB_DIR_IN | USB_TYPE_VENDOR | - USB_RECIP_DEVICE, + return usbnet_write_cmd(dev, req, USB_TYPE_VENDOR | USB_RECIP_DEVICE, val, index, NULL, 0); } diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c index fce6713e970badda0337a49a518c07d88ecd5fa9..811c8751308c60b03c0c194e5a40f7468b5c7e96 100644 --- a/drivers/net/usb/sr9700.c +++ b/drivers/net/usb/sr9700.c @@ -410,7 +410,7 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb) /* ignore the CRC length */ len = (skb->data[1] | (skb->data[2] << 8)) - 4; - if (len > ETH_FRAME_LEN || len > skb->len) + if (len > ETH_FRAME_LEN || len > skb->len || len < 0) return 0; /* the last packet of current skb */ diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index c942cd6a2c65ee30ced8991e11d5d8729a7eec6a..d533211161366a043af8902f2d853e53dcd36643 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1525,13 +1525,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget) received = virtnet_receive(rq, budget, &xdp_xmit); + if (xdp_xmit & VIRTIO_XDP_REDIR) + xdp_do_flush(); + /* Out of packets? */ if (received < budget) virtqueue_napi_complete(napi, rq->vq, received); - if (xdp_xmit & VIRTIO_XDP_REDIR) - xdp_do_flush(); - if (xdp_xmit & VIRTIO_XDP_TX) { sq = virtnet_xdp_get_sq(vi); if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { @@ -1928,8 +1928,8 @@ static int virtnet_close(struct net_device *dev) cancel_delayed_work_sync(&vi->refill); for (i = 0; i < vi->max_queue_pairs; i++) { - xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq); napi_disable(&vi->rq[i].napi); + xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq); virtnet_napi_tx_disable(&vi->sq[i].napi); } diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index 7eac6a3e1cdee1e90de74fa17ce75e899a4bda1c..ae1ae65e7f90a2a02708cefc87db6fa073a8ef09 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -1245,9 +1245,11 @@ static int ucc_hdlc_probe(struct platform_device *pdev) free_dev: free_netdev(dev); undo_uhdlc_init: - iounmap(utdm->siram); + if (utdm) + iounmap(utdm->siram); unmap_si_regs: - iounmap(utdm->si_regs); + if (utdm) + iounmap(utdm->si_regs); free_utdm: if (uhdlc_priv->tsa) kfree(utdm); diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index d2f2898d17b498967e646289bdd61d772d62f528..a66e275af1eb4f6993d289560a62de34836aa2ba 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -712,7 +712,6 @@ struct ath11k_base { enum ath11k_dfs_region dfs_region; #ifdef CONFIG_ATH11K_DEBUGFS struct dentry *debugfs_soc; - struct dentry *debugfs_ath11k; #endif struct ath11k_soc_dp_stats soc_stats; diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c index 1b914e67d314df19e0b65c7f92c6e1907293a28b..196314ab4ff0bc8078ebb07f132e4775d6a6f009 100644 --- a/drivers/net/wireless/ath/ath11k/debugfs.c +++ b/drivers/net/wireless/ath/ath11k/debugfs.c @@ -836,10 +836,6 @@ int ath11k_debugfs_pdev_create(struct ath11k_base *ab) if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)) return 0; - ab->debugfs_soc = debugfs_create_dir(ab->hw_params.name, ab->debugfs_ath11k); - if (IS_ERR(ab->debugfs_soc)) - return PTR_ERR(ab->debugfs_soc); - debugfs_create_file("simulate_fw_crash", 0600, ab->debugfs_soc, ab, &fops_simulate_fw_crash); @@ -857,15 +853,51 @@ void ath11k_debugfs_pdev_destroy(struct ath11k_base *ab) int ath11k_debugfs_soc_create(struct ath11k_base *ab) { - ab->debugfs_ath11k = debugfs_create_dir("ath11k", NULL); + struct dentry *root; + bool dput_needed; + char name[64]; + int ret; + + root = debugfs_lookup("ath11k", NULL); + if (!root) { + root = debugfs_create_dir("ath11k", NULL); + if (IS_ERR_OR_NULL(root)) + return PTR_ERR(root); + + dput_needed = false; + } else { + /* a dentry from lookup() needs dput() after we don't use it */ + dput_needed = true; + } + + scnprintf(name, sizeof(name), "%s-%s", ath11k_bus_str(ab->hif.bus), + dev_name(ab->dev)); + + ab->debugfs_soc = debugfs_create_dir(name, root); + if (IS_ERR_OR_NULL(ab->debugfs_soc)) { + ret = PTR_ERR(ab->debugfs_soc); + goto out; + } + + ret = 0; - return PTR_ERR_OR_ZERO(ab->debugfs_ath11k); +out: + if (dput_needed) + dput(root); + + return ret; } void ath11k_debugfs_soc_destroy(struct ath11k_base *ab) { - debugfs_remove_recursive(ab->debugfs_ath11k); - ab->debugfs_ath11k = NULL; + debugfs_remove_recursive(ab->debugfs_soc); + ab->debugfs_soc = NULL; + + /* We are not removing ath11k directory on purpose, even if it + * would be empty. This simplifies the directory handling and it's + * a minor cosmetic issue to leave an empty ath11k directory to + * debugfs. + */ } void ath11k_debugfs_fw_stats_init(struct ath11k *ar) diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c index 2e77dca6b1ad62d2c06b1acb6f6311e86f4985bc..578fdc446bc03f8ae7e4126c6f015bc069faed55 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.c +++ b/drivers/net/wireless/ath/ath11k/dp_rx.c @@ -3022,6 +3022,7 @@ int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id if (!peer) { ath11k_warn(ab, "failed to find the peer to set up fragment info\n"); spin_unlock_bh(&ab->base_lock); + crypto_free_shash(tfm); return -ENOENT; } diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index f938ac1a4abd4e52d9530c61b25e888ba73e14c8..f521dfa2f19450bd505e76e0c3f6dea43552b7ec 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -244,11 +244,11 @@ static inline void ath9k_skb_queue_complete(struct hif_device_usb *hif_dev, ath9k_htc_txcompletion_cb(hif_dev->htc_handle, skb, txok); if (txok) { - TX_STAT_INC(skb_success); - TX_STAT_ADD(skb_success_bytes, ln); + TX_STAT_INC(hif_dev, skb_success); + TX_STAT_ADD(hif_dev, skb_success_bytes, ln); } else - TX_STAT_INC(skb_failed); + TX_STAT_INC(hif_dev, skb_failed); } } @@ -302,7 +302,7 @@ static void hif_usb_tx_cb(struct urb *urb) hif_dev->tx.tx_buf_cnt++; if (!(hif_dev->tx.flags & HIF_USB_TX_STOP)) __hif_usb_tx(hif_dev); /* Check for pending SKBs */ - TX_STAT_INC(buf_completed); + TX_STAT_INC(hif_dev, buf_completed); spin_unlock(&hif_dev->tx.tx_lock); } @@ -353,7 +353,7 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev) tx_buf->len += tx_buf->offset; __skb_queue_tail(&tx_buf->skb_queue, nskb); - TX_STAT_INC(skb_queued); + TX_STAT_INC(hif_dev, skb_queued); } usb_fill_bulk_urb(tx_buf->urb, hif_dev->udev, @@ -368,11 +368,10 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev) __skb_queue_head_init(&tx_buf->skb_queue); list_move_tail(&tx_buf->list, &hif_dev->tx.tx_buf); hif_dev->tx.tx_buf_cnt++; + } else { + TX_STAT_INC(hif_dev, buf_queued); } - if (!ret) - TX_STAT_INC(buf_queued); - return ret; } @@ -515,7 +514,7 @@ static void hif_usb_sta_drain(void *hif_handle, u8 idx) ath9k_htc_txcompletion_cb(hif_dev->htc_handle, skb, false); hif_dev->tx.tx_skb_cnt--; - TX_STAT_INC(skb_failed); + TX_STAT_INC(hif_dev, skb_failed); } } @@ -562,11 +561,11 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev, memcpy(ptr, skb->data, rx_remain_len); rx_pkt_len += rx_remain_len; - hif_dev->rx_remain_len = 0; skb_put(remain_skb, rx_pkt_len); skb_pool[pool_index++] = remain_skb; - + hif_dev->remain_skb = NULL; + hif_dev->rx_remain_len = 0; } else { index = rx_remain_len; } @@ -585,16 +584,21 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev, pkt_len = get_unaligned_le16(ptr + index); pkt_tag = get_unaligned_le16(ptr + index + 2); + /* It is supposed that if we have an invalid pkt_tag or + * pkt_len then the whole input SKB is considered invalid + * and dropped; the associated packets already in skb_pool + * are dropped, too. + */ if (pkt_tag != ATH_USB_RX_STREAM_MODE_TAG) { - RX_STAT_INC(skb_dropped); - return; + RX_STAT_INC(hif_dev, skb_dropped); + goto invalid_pkt; } if (pkt_len > 2 * MAX_RX_BUF_SIZE) { dev_err(&hif_dev->udev->dev, "ath9k_htc: invalid pkt_len (%x)\n", pkt_len); - RX_STAT_INC(skb_dropped); - return; + RX_STAT_INC(hif_dev, skb_dropped); + goto invalid_pkt; } pad_len = 4 - (pkt_len & 0x3); @@ -606,11 +610,6 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev, if (index > MAX_RX_BUF_SIZE) { spin_lock(&hif_dev->rx_lock); - hif_dev->rx_remain_len = index - MAX_RX_BUF_SIZE; - hif_dev->rx_transfer_len = - MAX_RX_BUF_SIZE - chk_idx - 4; - hif_dev->rx_pad_len = pad_len; - nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC); if (!nskb) { dev_err(&hif_dev->udev->dev, @@ -618,8 +617,14 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev, spin_unlock(&hif_dev->rx_lock); goto err; } + + hif_dev->rx_remain_len = index - MAX_RX_BUF_SIZE; + hif_dev->rx_transfer_len = + MAX_RX_BUF_SIZE - chk_idx - 4; + hif_dev->rx_pad_len = pad_len; + skb_reserve(nskb, 32); - RX_STAT_INC(skb_allocated); + RX_STAT_INC(hif_dev, skb_allocated); memcpy(nskb->data, &(skb->data[chk_idx+4]), hif_dev->rx_transfer_len); @@ -640,7 +645,7 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev, goto err; } skb_reserve(nskb, 32); - RX_STAT_INC(skb_allocated); + RX_STAT_INC(hif_dev, skb_allocated); memcpy(nskb->data, &(skb->data[chk_idx+4]), pkt_len); skb_put(nskb, pkt_len); @@ -650,11 +655,18 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev, err: for (i = 0; i < pool_index; i++) { - RX_STAT_ADD(skb_completed_bytes, skb_pool[i]->len); + RX_STAT_ADD(hif_dev, skb_completed_bytes, skb_pool[i]->len); ath9k_htc_rx_msg(hif_dev->htc_handle, skb_pool[i], skb_pool[i]->len, USB_WLAN_RX_PIPE); - RX_STAT_INC(skb_completed); + RX_STAT_INC(hif_dev, skb_completed); } + return; +invalid_pkt: + for (i = 0; i < pool_index; i++) { + dev_kfree_skb_any(skb_pool[i]); + RX_STAT_INC(hif_dev, skb_dropped); + } + return; } static void ath9k_hif_usb_rx_cb(struct urb *urb) @@ -1412,8 +1424,6 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface) if (hif_dev->flags & HIF_USB_READY) { ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged); - ath9k_hif_usb_dev_deinit(hif_dev); - ath9k_destroy_wmi(hif_dev->htc_handle->drv_priv); ath9k_htc_hw_free(hif_dev->htc_handle); } diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h index e3d546ef71ddc5bd43af3e8d8eb1d43473415622..237f4ec2cffd7b0c53f8d1b253fe81072cf1ceab 100644 --- a/drivers/net/wireless/ath/ath9k/htc.h +++ b/drivers/net/wireless/ath/ath9k/htc.h @@ -327,14 +327,18 @@ static inline struct ath9k_htc_tx_ctl *HTC_SKB_CB(struct sk_buff *skb) } #ifdef CONFIG_ATH9K_HTC_DEBUGFS -#define __STAT_SAFE(expr) (hif_dev->htc_handle->drv_priv ? (expr) : 0) -#define TX_STAT_INC(c) __STAT_SAFE(hif_dev->htc_handle->drv_priv->debug.tx_stats.c++) -#define TX_STAT_ADD(c, a) __STAT_SAFE(hif_dev->htc_handle->drv_priv->debug.tx_stats.c += a) -#define RX_STAT_INC(c) __STAT_SAFE(hif_dev->htc_handle->drv_priv->debug.skbrx_stats.c++) -#define RX_STAT_ADD(c, a) __STAT_SAFE(hif_dev->htc_handle->drv_priv->debug.skbrx_stats.c += a) -#define CAB_STAT_INC priv->debug.tx_stats.cab_queued++ - -#define TX_QSTAT_INC(q) (priv->debug.tx_stats.queue_stats[q]++) +#define __STAT_SAFE(hif_dev, expr) do { ((hif_dev)->htc_handle->drv_priv ? (expr) : 0); } while (0) +#define CAB_STAT_INC(priv) do { ((priv)->debug.tx_stats.cab_queued++); } while (0) +#define TX_QSTAT_INC(priv, q) do { ((priv)->debug.tx_stats.queue_stats[q]++); } while (0) + +#define TX_STAT_INC(hif_dev, c) \ + __STAT_SAFE((hif_dev), (hif_dev)->htc_handle->drv_priv->debug.tx_stats.c++) +#define TX_STAT_ADD(hif_dev, c, a) \ + __STAT_SAFE((hif_dev), (hif_dev)->htc_handle->drv_priv->debug.tx_stats.c += a) +#define RX_STAT_INC(hif_dev, c) \ + __STAT_SAFE((hif_dev), (hif_dev)->htc_handle->drv_priv->debug.skbrx_stats.c++) +#define RX_STAT_ADD(hif_dev, c, a) \ + __STAT_SAFE((hif_dev), (hif_dev)->htc_handle->drv_priv->debug.skbrx_stats.c += a) void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv, struct ath_rx_status *rs); @@ -374,13 +378,13 @@ void ath9k_htc_get_et_stats(struct ieee80211_hw *hw, struct ethtool_stats *stats, u64 *data); #else -#define TX_STAT_INC(c) do { } while (0) -#define TX_STAT_ADD(c, a) do { } while (0) -#define RX_STAT_INC(c) do { } while (0) -#define RX_STAT_ADD(c, a) do { } while (0) -#define CAB_STAT_INC do { } while (0) +#define TX_STAT_INC(hif_dev, c) do { } while (0) +#define TX_STAT_ADD(hif_dev, c, a) do { } while (0) +#define RX_STAT_INC(hif_dev, c) do { } while (0) +#define RX_STAT_ADD(hif_dev, c, a) do { } while (0) -#define TX_QSTAT_INC(c) do { } while (0) +#define CAB_STAT_INC(priv) +#define TX_QSTAT_INC(priv, c) static inline void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv, struct ath_rx_status *rs) diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index 07ac88fb1c577c4602d99a7f81cfa59bd5f5d1fd..96a3185a96d75070bf71b07905a085210ac21f1e 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -988,6 +988,8 @@ void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug) ath9k_deinit_device(htc_handle->drv_priv); ath9k_stop_wmi(htc_handle->drv_priv); + ath9k_hif_usb_dealloc_urbs((struct hif_device_usb *)htc_handle->hif_dev); + ath9k_destroy_wmi(htc_handle->drv_priv); ieee80211_free_hw(htc_handle->drv_priv->hw); } } diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index 43a743ec9d9e018493447ca8254a81f754170528..622fc7f170402c63a57def23845d6a2daafaa943 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -106,20 +106,20 @@ static inline enum htc_endpoint_id get_htc_epid(struct ath9k_htc_priv *priv, switch (qnum) { case 0: - TX_QSTAT_INC(IEEE80211_AC_VO); + TX_QSTAT_INC(priv, IEEE80211_AC_VO); epid = priv->data_vo_ep; break; case 1: - TX_QSTAT_INC(IEEE80211_AC_VI); + TX_QSTAT_INC(priv, IEEE80211_AC_VI); epid = priv->data_vi_ep; break; case 2: - TX_QSTAT_INC(IEEE80211_AC_BE); + TX_QSTAT_INC(priv, IEEE80211_AC_BE); epid = priv->data_be_ep; break; case 3: default: - TX_QSTAT_INC(IEEE80211_AC_BK); + TX_QSTAT_INC(priv, IEEE80211_AC_BK); epid = priv->data_bk_ep; break; } @@ -323,7 +323,7 @@ static void ath9k_htc_tx_data(struct ath9k_htc_priv *priv, memcpy(tx_fhdr, (u8 *) &tx_hdr, sizeof(tx_hdr)); if (is_cab) { - CAB_STAT_INC; + CAB_STAT_INC(priv); tx_ctl->epid = priv->cab_ep; return; } diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c index ca05b07a45e67c714e58ac5f9c708acfb1b8f1c8..fe62ff668f757b621ac773e457f5c42ce7073e20 100644 --- a/drivers/net/wireless/ath/ath9k/htc_hst.c +++ b/drivers/net/wireless/ath/ath9k/htc_hst.c @@ -391,7 +391,7 @@ static void ath9k_htc_fw_panic_report(struct htc_target *htc_handle, * HTC Messages are handled directly here and the obtained SKB * is freed. * - * Service messages (Data, WMI) passed to the corresponding + * Service messages (Data, WMI) are passed to the corresponding * endpoint RX handlers, which have to free the SKB. */ void ath9k_htc_rx_msg(struct htc_target *htc_handle, @@ -478,6 +478,8 @@ void ath9k_htc_rx_msg(struct htc_target *htc_handle, if (endpoint->ep_callbacks.rx) endpoint->ep_callbacks.rx(endpoint->ep_callbacks.priv, skb, epid); + else + goto invalid; } } diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c index f315c54bd3ac0001b733ac1494f91ad63c696fe3..19345b8f7bfd59f04361356668cbe22506af633a 100644 --- a/drivers/net/wireless/ath/ath9k/wmi.c +++ b/drivers/net/wireless/ath/ath9k/wmi.c @@ -341,6 +341,7 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, if (!time_left) { ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n", wmi_cmd_to_name(cmd_id)); + wmi->last_seq_id = 0; mutex_unlock(&wmi->op_mutex); return -ETIMEDOUT; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 66fdd073009cd31a319a1d499326c882e02793f0..b8c571d80b80485acaa6949b6b2ff059da36d1cd 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -90,6 +90,9 @@ #define BRCMF_ASSOC_PARAMS_FIXED_SIZE \ (sizeof(struct brcmf_assoc_params_le) - sizeof(u16)) +#define BRCMF_MAX_CHANSPEC_LIST \ + (BRCMF_DCMD_MEDLEN / sizeof(__le32) - 1) + static bool check_vif_up(struct brcmf_cfg80211_vif *vif) { if (!test_bit(BRCMF_VIF_STATUS_READY, &vif->sme_state)) { @@ -6464,6 +6467,13 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, band->channels[i].flags = IEEE80211_CHAN_DISABLED; total = le32_to_cpu(list->count); + if (total > BRCMF_MAX_CHANSPEC_LIST) { + bphy_err(drvr, "Invalid count of channel Spec. (%u)\n", + total); + err = -EINVAL; + goto fail_pbuf; + } + for (i = 0; i < total; i++) { ch.chspec = (u16)le32_to_cpu(list->element[i]); cfg->d11inf.decchspec(&ch); @@ -6609,6 +6619,13 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg) band = cfg_to_wiphy(cfg)->bands[NL80211_BAND_2GHZ]; list = (struct brcmf_chanspec_list *)pbuf; num_chan = le32_to_cpu(list->count); + if (num_chan > BRCMF_MAX_CHANSPEC_LIST) { + bphy_err(drvr, "Invalid count of channel Spec. (%u)\n", + num_chan); + kfree(pbuf); + return -EINVAL; + } + for (i = 0; i < num_chan; i++) { ch.chspec = (u16)le32_to_cpu(list->element[i]); cfg->d11inf.decchspec(&ch); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c index e3758bd86acf0cba5a9fb08403f049dfa36fed72..f29de630908d7a8ecbffced434b4894fbb36d78e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c @@ -264,6 +264,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) err); goto done; } + buf[sizeof(buf) - 1] = '\0'; ptr = (char *)buf; strsep(&ptr, "\n"); @@ -280,15 +281,17 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) if (err) { brcmf_dbg(TRACE, "retrieving clmver failed, %d\n", err); } else { + buf[sizeof(buf) - 1] = '\0'; clmver = (char *)buf; - /* store CLM version for adding it to revinfo debugfs file */ - memcpy(ifp->drvr->clmver, clmver, sizeof(ifp->drvr->clmver)); /* Replace all newline/linefeed characters with space * character */ strreplace(clmver, '\n', ' '); + /* store CLM version for adding it to revinfo debugfs file */ + memcpy(ifp->drvr->clmver, clmver, sizeof(ifp->drvr->clmver)); + brcmf_dbg(INFO, "CLM version = %s\n", clmver); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index c8e1d505f7b5dddfb04d0c5d39f3091ee4ac4828..3d544eedc1a395ef41f0643e9ae94b968a8c0d8c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -333,6 +333,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb, bphy_err(drvr, "%s: failed to expand headroom\n", brcmf_ifname(ifp)); atomic_inc(&drvr->bus_if->stats.pktcow_failed); + dev_kfree_skb(skb); goto done; } } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c index 7c8e08ee8f0ff437bf6e5776aea83058d7bc2e49..bd3b234b7803805cc0c5ac57eace5534c6669f9c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c @@ -346,8 +346,11 @@ brcmf_msgbuf_alloc_pktid(struct device *dev, count++; } while (count < pktids->array_size); - if (count == pktids->array_size) + if (count == pktids->array_size) { + dma_unmap_single(dev, *physaddr, skb->len - data_offset, + pktids->direction); return -ENOMEM; + } array[*idx].data_offset = data_offset; array[*idx].physaddr = *physaddr; diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c index ada6ce32c1f19e37ce63e074bc620d717cd8322a..bb728fb24b8a4030ead2f352b3d4c02b027632d3 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c @@ -3444,7 +3444,7 @@ static void ipw_rx_queue_reset(struct ipw_priv *priv, dma_unmap_single(&priv->pci_dev->dev, rxq->pool[i].dma_addr, IPW_RX_BUF_SIZE, DMA_FROM_DEVICE); - dev_kfree_skb(rxq->pool[i].skb); + dev_kfree_skb_irq(rxq->pool[i].skb); rxq->pool[i].skb = NULL; } list_add_tail(&rxq->pool[i].list, &rxq->rx_used); @@ -11400,9 +11400,14 @@ static int ipw_wdev_init(struct net_device *dev) set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev); /* With that information in place, we can now register the wiphy... */ - if (wiphy_register(wdev->wiphy)) - rc = -EIO; + rc = wiphy_register(wdev->wiphy); + if (rc) + goto out; + + return 0; out: + kfree(priv->ieee->a_band.channels); + kfree(priv->ieee->bg_band.channels); return rc; } diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c index 4ca8212d4fa4bb7a5b380a9ac93d83265d8d36f8..ef0ac42a55a2ac0c89cdf6a5a5c10c6334cd9540 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c @@ -3380,10 +3380,12 @@ static DEVICE_ATTR(dump_errors, 0200, NULL, il3945_dump_error_log); * *****************************************************************************/ -static void +static int il3945_setup_deferred_work(struct il_priv *il) { il->workqueue = create_singlethread_workqueue(DRV_NAME); + if (!il->workqueue) + return -ENOMEM; init_waitqueue_head(&il->wait_command_queue); @@ -3400,6 +3402,8 @@ il3945_setup_deferred_work(struct il_priv *il) timer_setup(&il->watchdog, il_bg_watchdog, 0); tasklet_setup(&il->irq_tasklet, il3945_irq_tasklet); + + return 0; } static void @@ -3721,7 +3725,10 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } il_set_rxon_channel(il, &il->bands[NL80211_BAND_2GHZ].channels[5]); - il3945_setup_deferred_work(il); + err = il3945_setup_deferred_work(il); + if (err) + goto out_remove_sysfs; + il3945_setup_handlers(il); il_power_initialize(il); @@ -3733,7 +3740,7 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = il3945_setup_mac(il); if (err) - goto out_remove_sysfs; + goto out_destroy_workqueue; il_dbgfs_register(il, DRV_NAME); @@ -3742,9 +3749,10 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; -out_remove_sysfs: +out_destroy_workqueue: destroy_workqueue(il->workqueue); il->workqueue = NULL; +out_remove_sysfs: sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group); out_release_irq: free_irq(il->pci_dev->irq, il); diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c index 28675a4ad8612f71ac5a5fed79602bd58569cb7d..12cf22d0e9949050e23a47a2c2e9b77a7d0147b2 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c @@ -6212,10 +6212,12 @@ il4965_bg_txpower_work(struct work_struct *work) mutex_unlock(&il->mutex); } -static void +static int il4965_setup_deferred_work(struct il_priv *il) { il->workqueue = create_singlethread_workqueue(DRV_NAME); + if (!il->workqueue) + return -ENOMEM; init_waitqueue_head(&il->wait_command_queue); @@ -6234,6 +6236,8 @@ il4965_setup_deferred_work(struct il_priv *il) timer_setup(&il->watchdog, il_bg_watchdog, 0); tasklet_setup(&il->irq_tasklet, il4965_irq_tasklet); + + return 0; } static void @@ -6623,7 +6627,10 @@ il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_disable_msi; } - il4965_setup_deferred_work(il); + err = il4965_setup_deferred_work(il); + if (err) + goto out_free_irq; + il4965_setup_handlers(il); /********************************************* @@ -6661,6 +6668,7 @@ il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) out_destroy_workqueue: destroy_workqueue(il->workqueue); il->workqueue = NULL; +out_free_irq: free_irq(il->pci_dev->irq, il); out_disable_msi: pci_disable_msi(il->pci_dev); diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c index 0651a6a416d1dfc685d10ec448f9fbc5e956ad62..4b55779de00a98912e2bfd72f990a35224212d60 100644 --- a/drivers/net/wireless/intel/iwlegacy/common.c +++ b/drivers/net/wireless/intel/iwlegacy/common.c @@ -5176,7 +5176,7 @@ il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) memset(&il->current_ht_config, 0, sizeof(struct il_ht_config)); /* new association get rid of ibss beacon skb */ - dev_kfree_skb(il->beacon_skb); + dev_consume_skb_irq(il->beacon_skb); il->beacon_skb = NULL; il->timestamp = 0; @@ -5295,7 +5295,7 @@ il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif) } spin_lock_irqsave(&il->lock, flags); - dev_kfree_skb(il->beacon_skb); + dev_consume_skb_irq(il->beacon_skb); il->beacon_skb = skb; timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp; diff --git a/drivers/net/wireless/intersil/orinoco/hw.c b/drivers/net/wireless/intersil/orinoco/hw.c index 61af5a28f269fd562ffceb1eea1a01122a080b73..af49aa421e47f00b66fba554ba4c0b4e0f6dcec9 100644 --- a/drivers/net/wireless/intersil/orinoco/hw.c +++ b/drivers/net/wireless/intersil/orinoco/hw.c @@ -931,6 +931,8 @@ int __orinoco_hw_setup_enc(struct orinoco_private *priv) err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFAUTHENTICATION_AGERE, auth_flag); + if (err) + return err; } err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFWEPENABLED_AGERE, diff --git a/drivers/net/wireless/marvell/libertas/cmdresp.c b/drivers/net/wireless/marvell/libertas/cmdresp.c index cb515c5584c1ff19e18bcd756d8a5b8d62ad9b3b..74cb7551f4275cfe02e8bfec2877bc7f69002662 100644 --- a/drivers/net/wireless/marvell/libertas/cmdresp.c +++ b/drivers/net/wireless/marvell/libertas/cmdresp.c @@ -48,7 +48,7 @@ void lbs_mac_event_disconnected(struct lbs_private *priv, /* Free Tx and Rx packets */ spin_lock_irqsave(&priv->driver_lock, flags); - kfree_skb(priv->currenttxskb); + dev_kfree_skb_irq(priv->currenttxskb); priv->currenttxskb = NULL; priv->tx_pending_len = 0; spin_unlock_irqrestore(&priv->driver_lock, flags); diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c index 32fdc4150b605d71669cba1ad10cad57837b61f2..2240b4db8c03663c04b2aa4ec2d1ec8dc79712a2 100644 --- a/drivers/net/wireless/marvell/libertas/if_usb.c +++ b/drivers/net/wireless/marvell/libertas/if_usb.c @@ -637,7 +637,7 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff, priv->resp_len[i] = (recvlength - MESSAGE_HEADER_LEN); memcpy(priv->resp_buf[i], recvbuff + MESSAGE_HEADER_LEN, priv->resp_len[i]); - kfree_skb(skb); + dev_kfree_skb_irq(skb); lbs_notify_command_response(priv, i); spin_unlock_irqrestore(&priv->driver_lock, flags); diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c index ee4cf3437e28ac8b785e2060f0192f6a774e62e7..1c56cc2742b07e26bd5e267c2647287bc644d3f9 100644 --- a/drivers/net/wireless/marvell/libertas/main.c +++ b/drivers/net/wireless/marvell/libertas/main.c @@ -217,7 +217,7 @@ int lbs_stop_iface(struct lbs_private *priv) spin_lock_irqsave(&priv->driver_lock, flags); priv->iface_running = false; - kfree_skb(priv->currenttxskb); + dev_kfree_skb_irq(priv->currenttxskb); priv->currenttxskb = NULL; priv->tx_pending_len = 0; spin_unlock_irqrestore(&priv->driver_lock, flags); @@ -870,6 +870,7 @@ static int lbs_init_adapter(struct lbs_private *priv) ret = kfifo_alloc(&priv->event_fifo, sizeof(u32) * 16, GFP_KERNEL); if (ret) { pr_err("Out of memory allocating event FIFO buffer\n"); + lbs_free_cmd_buffer(priv); goto out; } diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c index ecce8b56f8a2835c3e96e79ccefb42b1fa550d84..2c45ef6e040775b2dbe3475ece700ea96317bb16 100644 --- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c +++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c @@ -613,7 +613,7 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff, spin_lock_irqsave(&priv->driver_lock, flags); memcpy(priv->cmd_resp_buff, recvbuff + MESSAGE_HEADER_LEN, recvlength - MESSAGE_HEADER_LEN); - kfree_skb(skb); + dev_kfree_skb_irq(skb); lbtf_cmd_response_rx(priv); spin_unlock_irqrestore(&priv->driver_lock, flags); } diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c index cf08a4af84d6d7fa82b1ba925eb6c34026095220..b99381ebb82a14322b3508cd75285602cf55e965 100644 --- a/drivers/net/wireless/marvell/mwifiex/11n.c +++ b/drivers/net/wireless/marvell/mwifiex/11n.c @@ -890,7 +890,7 @@ mwifiex_send_delba_txbastream_tbl(struct mwifiex_private *priv, u8 tid) */ void mwifiex_update_ampdu_txwinsize(struct mwifiex_adapter *adapter) { - u8 i; + u8 i, j; u32 tx_win_size; struct mwifiex_private *priv; @@ -921,8 +921,8 @@ void mwifiex_update_ampdu_txwinsize(struct mwifiex_adapter *adapter) if (tx_win_size != priv->add_ba_param.tx_win_size) { if (!priv->media_connected) continue; - for (i = 0; i < MAX_NUM_TID; i++) - mwifiex_send_delba_txbastream_tbl(priv, i); + for (j = 0; j < MAX_NUM_TID; j++) + mwifiex_send_delba_txbastream_tbl(priv, j); } } } diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index bde9e4bbfffe79d3a4ab7d2beaa68223d75ad26a..7fb6eef4092850402ea314d65bf1cb2de5f77662 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -485,6 +485,7 @@ static struct memory_type_mapping mem_type_mapping_tbl[] = { }; static const struct of_device_id mwifiex_sdio_of_match_table[] = { + { .compatible = "marvell,sd8787" }, { .compatible = "marvell,sd8897" }, { .compatible = "marvell,sd8997" }, { } diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index f01b455783b2340d97b314a453c48cfa0d101faa..7991705e9d134cb6c65187a32fe5174d5ea6344d 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -476,6 +476,7 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) bool more; spin_lock_bh(&q->lock); + do { buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more); if (!buf) @@ -483,6 +484,12 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) skb_free_frag(buf); } while (1); + + if (q->rx_head) { + dev_kfree_skb(q->rx_head); + q->rx_head = NULL; + } + spin_unlock_bh(&q->lock); if (!q->rx_page.va) @@ -505,12 +512,6 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid) mt76_dma_rx_cleanup(dev, q); mt76_dma_sync_idx(dev, q); mt76_dma_rx_fill(dev, q); - - if (!q->rx_head) - return; - - dev_kfree_skb(q->rx_head); - q->rx_head = NULL; } static void diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c index 11071519fce815c2d9d874086b98e487c7dcc635..8ba291abecff8d08c4eb8cdc0b93993902498c10 100644 --- a/drivers/net/wireless/mediatek/mt7601u/dma.c +++ b/drivers/net/wireless/mediatek/mt7601u/dma.c @@ -118,7 +118,8 @@ static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len) if (data_len < min_seg_len || WARN_ON_ONCE(!dma_len) || WARN_ON_ONCE(dma_len + MT_DMA_HDRS > data_len) || - WARN_ON_ONCE(dma_len & 0x3)) + WARN_ON_ONCE(dma_len & 0x3) || + WARN_ON_ONCE(dma_len < min_seg_len)) return 0; return MT_DMA_HDRS + dma_len; diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c index 20615c7ec16831977142f58ca56fcc7fe29072f4..c508f429984abb48fdbc5039f5abd538e4c3f48e 100644 --- a/drivers/net/wireless/microchip/wilc1000/netdev.c +++ b/drivers/net/wireless/microchip/wilc1000/netdev.c @@ -684,6 +684,7 @@ netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev) if (skb->dev != ndev) { netdev_err(ndev, "Packet not destined to this device\n"); + dev_kfree_skb(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c index 199e7e031d7d90d8dc12be691b730d32424a6d39..3b3cb3a6e2e89acdf77b9e0697c0fd61d0d7b752 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c @@ -1671,6 +1671,11 @@ static void rtl8192e_enable_rf(struct rtl8xxxu_priv *priv) val8 = rtl8xxxu_read8(priv, REG_PAD_CTRL1); val8 &= ~BIT(0); rtl8xxxu_write8(priv, REG_PAD_CTRL1, val8); + + /* + * Fix transmission failure of rtl8192e. + */ + rtl8xxxu_write8(priv, REG_TXPAUSE, 0x00); } struct rtl8xxxu_fileops rtl8192eu_fops = { diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index 9a12f1d38007b5476ac85a93864b49334f152626..deef1c09de31976fe6a9693b05cbdc7723dffb74 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -4369,12 +4369,9 @@ void rtl8xxxu_gen1_report_connect(struct rtl8xxxu_priv *priv, void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv, u8 macid, bool connect) { -#ifdef RTL8XXXU_GEN2_REPORT_CONNECT /* - * Barry Day reports this causes issues with 8192eu and 8723bu - * devices reconnecting. The reason for this is unclear, but - * until it is better understood, leave the code in place but - * disabled, so it is not lost. + * The firmware turns on the rate control when it knows it's + * connected to a network. */ struct h2c_cmd h2c; @@ -4387,7 +4384,6 @@ void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv, h2c.media_status_rpt.parm &= ~BIT(0); rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.media_status_rpt)); -#endif } void rtl8xxxu_gen1_init_aggregation(struct rtl8xxxu_priv *priv) @@ -5188,7 +5184,7 @@ static void rtl8xxxu_queue_rx_urb(struct rtl8xxxu_priv *priv, pending = priv->rx_urb_pending_count; } else { skb = (struct sk_buff *)rx_urb->urb.context; - dev_kfree_skb(skb); + dev_kfree_skb_irq(skb); usb_free_urb(&rx_urb->urb); } @@ -5495,9 +5491,6 @@ static void rtl8xxxu_c2hcmd_callback(struct work_struct *work) btcoex = &priv->bt_coex; rarpt = &priv->ra_report; - if (priv->rf_paths > 1) - goto out; - while (!skb_queue_empty(&priv->c2hcmd_queue)) { spin_lock_irqsave(&priv->c2hcmd_lock, flags); skb = __skb_dequeue(&priv->c2hcmd_queue); @@ -5551,10 +5544,9 @@ static void rtl8xxxu_c2hcmd_callback(struct work_struct *work) default: break; } - } -out: - dev_kfree_skb(skb); + dev_kfree_skb(skb); + } } static void rtl8723bu_handle_c2h(struct rtl8xxxu_priv *priv, @@ -5916,7 +5908,6 @@ static int rtl8xxxu_config(struct ieee80211_hw *hw, u32 changed) { struct rtl8xxxu_priv *priv = hw->priv; struct device *dev = &priv->udev->dev; - u16 val16; int ret = 0, channel; bool ht40; @@ -5926,14 +5917,6 @@ static int rtl8xxxu_config(struct ieee80211_hw *hw, u32 changed) __func__, hw->conf.chandef.chan->hw_value, changed, hw->conf.chandef.width); - if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) { - val16 = ((hw->conf.long_frame_max_tx_count << - RETRY_LIMIT_LONG_SHIFT) & RETRY_LIMIT_LONG_MASK) | - ((hw->conf.short_frame_max_tx_count << - RETRY_LIMIT_SHORT_SHIFT) & RETRY_LIMIT_SHORT_MASK); - rtl8xxxu_write16(priv, REG_RETRY_LIMIT, val16); - } - if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { switch (hw->conf.chandef.width) { case NL80211_CHAN_WIDTH_20_NOHT: diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c index 63f9ea21962f2548380b1045006ee232029b330d..335a3c9cdbab60ac592a438c1be38c43d0f2b8a6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c @@ -68,8 +68,10 @@ static void _rtl88ee_return_beacon_queue_skb(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE]; + struct sk_buff_head free_list; unsigned long flags; + skb_queue_head_init(&free_list); spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); while (skb_queue_len(&ring->queue)) { struct rtl_tx_desc *entry = &ring->desc[ring->idx]; @@ -79,10 +81,12 @@ static void _rtl88ee_return_beacon_queue_skb(struct ieee80211_hw *hw) rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry, true, HW_DESC_TXBUFF_ADDR), skb->len, DMA_TO_DEVICE); - kfree_skb(skb); + __skb_queue_tail(&free_list, skb); ring->idx = (ring->idx + 1) % ring->entries; } spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); + + __skb_queue_purge(&free_list); } static void _rtl88ee_disable_bcn_sub_func(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c index 0748aedce2adb08ca8059c159796360808a53bc4..ccbb082d5e928f7f4974900899ca1f65e095f18f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c @@ -30,8 +30,10 @@ static void _rtl8723be_return_beacon_queue_skb(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE]; + struct sk_buff_head free_list; unsigned long flags; + skb_queue_head_init(&free_list); spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); while (skb_queue_len(&ring->queue)) { struct rtl_tx_desc *entry = &ring->desc[ring->idx]; @@ -41,10 +43,12 @@ static void _rtl8723be_return_beacon_queue_skb(struct ieee80211_hw *hw) rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry, true, HW_DESC_TXBUFF_ADDR), skb->len, DMA_TO_DEVICE); - kfree_skb(skb); + __skb_queue_tail(&free_list, skb); ring->idx = (ring->idx + 1) % ring->entries; } spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); + + __skb_queue_purge(&free_list); } static void _rtl8723be_set_bcn_ctrl_reg(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c index 33ffc24d367595d0ca5de01fb5e2a8cd34431a70..c4ee65cc2d5e6f42cf28f4b5f35194d254e0a819 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c @@ -26,8 +26,10 @@ static void _rtl8821ae_return_beacon_queue_skb(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE]; + struct sk_buff_head free_list; unsigned long flags; + skb_queue_head_init(&free_list); spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); while (skb_queue_len(&ring->queue)) { struct rtl_tx_desc *entry = &ring->desc[ring->idx]; @@ -37,10 +39,12 @@ static void _rtl8821ae_return_beacon_queue_skb(struct ieee80211_hw *hw) rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry, true, HW_DESC_TXBUFF_ADDR), skb->len, DMA_TO_DEVICE); - kfree_skb(skb); + __skb_queue_tail(&free_list, skb); ring->idx = (ring->idx + 1) % ring->entries; } spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); + + __skb_queue_purge(&free_list); } static void _rtl8821ae_set_bcn_ctrl_reg(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c index f41a7643b9c42b9a339bf1199a868f7f72c1e4d1..c0c06ab6d3e768297210eb43a53254b0fafbf49c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c @@ -1581,7 +1581,7 @@ static void _rtl8821ae_phy_txpower_by_rate_configuration(struct ieee80211_hw *hw } /* string is in decimal */ -static bool _rtl8812ae_get_integer_from_string(char *str, u8 *pint) +static bool _rtl8812ae_get_integer_from_string(const char *str, u8 *pint) { u16 i = 0; *pint = 0; @@ -1599,18 +1599,6 @@ static bool _rtl8812ae_get_integer_from_string(char *str, u8 *pint) return true; } -static bool _rtl8812ae_eq_n_byte(u8 *str1, u8 *str2, u32 num) -{ - if (num == 0) - return false; - while (num > 0) { - num--; - if (str1[num] != str2[num]) - return false; - } - return true; -} - static s8 _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(struct ieee80211_hw *hw, u8 band, u8 channel) { @@ -1637,10 +1625,11 @@ static s8 _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(struct ieee80211_hw *hw, return channel_index; } -static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw, u8 *pregulation, - u8 *pband, u8 *pbandwidth, - u8 *prate_section, u8 *prf_path, - u8 *pchannel, u8 *ppower_limit) +static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw, + const char *pregulation, + const char *pband, const char *pbandwidth, + const char *prate_section, const char *prf_path, + const char *pchannel, const char *ppower_limit) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &rtlpriv->phy; @@ -1648,8 +1637,8 @@ static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw, u8 *pregul u8 channel_index; s8 power_limit = 0, prev_power_limit, ret; - if (!_rtl8812ae_get_integer_from_string((char *)pchannel, &channel) || - !_rtl8812ae_get_integer_from_string((char *)ppower_limit, + if (!_rtl8812ae_get_integer_from_string(pchannel, &channel) || + !_rtl8812ae_get_integer_from_string(ppower_limit, &power_limit)) { rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Illegal index of pwr_lmt table [chnl %d][val %d]\n", @@ -1659,42 +1648,42 @@ static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw, u8 *pregul power_limit = power_limit > MAX_POWER_INDEX ? MAX_POWER_INDEX : power_limit; - if (_rtl8812ae_eq_n_byte(pregulation, (u8 *)("FCC"), 3)) + if (strcmp(pregulation, "FCC") == 0) regulation = 0; - else if (_rtl8812ae_eq_n_byte(pregulation, (u8 *)("MKK"), 3)) + else if (strcmp(pregulation, "MKK") == 0) regulation = 1; - else if (_rtl8812ae_eq_n_byte(pregulation, (u8 *)("ETSI"), 4)) + else if (strcmp(pregulation, "ETSI") == 0) regulation = 2; - else if (_rtl8812ae_eq_n_byte(pregulation, (u8 *)("WW13"), 4)) + else if (strcmp(pregulation, "WW13") == 0) regulation = 3; - if (_rtl8812ae_eq_n_byte(prate_section, (u8 *)("CCK"), 3)) + if (strcmp(prate_section, "CCK") == 0) rate_section = 0; - else if (_rtl8812ae_eq_n_byte(prate_section, (u8 *)("OFDM"), 4)) + else if (strcmp(prate_section, "OFDM") == 0) rate_section = 1; - else if (_rtl8812ae_eq_n_byte(prate_section, (u8 *)("HT"), 2) && - _rtl8812ae_eq_n_byte(prf_path, (u8 *)("1T"), 2)) + else if (strcmp(prate_section, "HT") == 0 && + strcmp(prf_path, "1T") == 0) rate_section = 2; - else if (_rtl8812ae_eq_n_byte(prate_section, (u8 *)("HT"), 2) && - _rtl8812ae_eq_n_byte(prf_path, (u8 *)("2T"), 2)) + else if (strcmp(prate_section, "HT") == 0 && + strcmp(prf_path, "2T") == 0) rate_section = 3; - else if (_rtl8812ae_eq_n_byte(prate_section, (u8 *)("VHT"), 3) && - _rtl8812ae_eq_n_byte(prf_path, (u8 *)("1T"), 2)) + else if (strcmp(prate_section, "VHT") == 0 && + strcmp(prf_path, "1T") == 0) rate_section = 4; - else if (_rtl8812ae_eq_n_byte(prate_section, (u8 *)("VHT"), 3) && - _rtl8812ae_eq_n_byte(prf_path, (u8 *)("2T"), 2)) + else if (strcmp(prate_section, "VHT") == 0 && + strcmp(prf_path, "2T") == 0) rate_section = 5; - if (_rtl8812ae_eq_n_byte(pbandwidth, (u8 *)("20M"), 3)) + if (strcmp(pbandwidth, "20M") == 0) bandwidth = 0; - else if (_rtl8812ae_eq_n_byte(pbandwidth, (u8 *)("40M"), 3)) + else if (strcmp(pbandwidth, "40M") == 0) bandwidth = 1; - else if (_rtl8812ae_eq_n_byte(pbandwidth, (u8 *)("80M"), 3)) + else if (strcmp(pbandwidth, "80M") == 0) bandwidth = 2; - else if (_rtl8812ae_eq_n_byte(pbandwidth, (u8 *)("160M"), 4)) + else if (strcmp(pbandwidth, "160M") == 0) bandwidth = 3; - if (_rtl8812ae_eq_n_byte(pband, (u8 *)("2.4G"), 4)) { + if (strcmp(pband, "2.4G") == 0) { ret = _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(hw, BAND_ON_2_4G, channel); @@ -1718,7 +1707,7 @@ static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw, u8 *pregul regulation, bandwidth, rate_section, channel_index, rtlphy->txpwr_limit_2_4g[regulation][bandwidth] [rate_section][channel_index][RF90_PATH_A]); - } else if (_rtl8812ae_eq_n_byte(pband, (u8 *)("5G"), 2)) { + } else if (strcmp(pband, "5G") == 0) { ret = _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(hw, BAND_ON_5G, channel); @@ -1749,10 +1738,10 @@ static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw, u8 *pregul } static void _rtl8812ae_phy_config_bb_txpwr_lmt(struct ieee80211_hw *hw, - u8 *regulation, u8 *band, - u8 *bandwidth, u8 *rate_section, - u8 *rf_path, u8 *channel, - u8 *power_limit) + const char *regulation, const char *band, + const char *bandwidth, const char *rate_section, + const char *rf_path, const char *channel, + const char *power_limit) { _rtl8812ae_phy_set_txpower_limit(hw, regulation, band, bandwidth, rate_section, rf_path, channel, @@ -1765,7 +1754,7 @@ static void _rtl8821ae_phy_read_and_config_txpwr_lmt(struct ieee80211_hw *hw) struct rtl_hal *rtlhal = rtl_hal(rtlpriv); u32 i = 0; u32 array_len; - u8 **array; + const char **array; if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { array_len = RTL8812AE_TXPWR_LMT_ARRAY_LEN; @@ -1778,13 +1767,13 @@ static void _rtl8821ae_phy_read_and_config_txpwr_lmt(struct ieee80211_hw *hw) rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "\n"); for (i = 0; i < array_len; i += 7) { - u8 *regulation = array[i]; - u8 *band = array[i+1]; - u8 *bandwidth = array[i+2]; - u8 *rate = array[i+3]; - u8 *rf_path = array[i+4]; - u8 *chnl = array[i+5]; - u8 *val = array[i+6]; + const char *regulation = array[i]; + const char *band = array[i+1]; + const char *bandwidth = array[i+2]; + const char *rate = array[i+3]; + const char *rf_path = array[i+4]; + const char *chnl = array[i+5]; + const char *val = array[i+6]; _rtl8812ae_phy_config_bb_txpwr_lmt(hw, regulation, band, bandwidth, rate, rf_path, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c index ed72a2aeb6c8e805bd19ac73d4c15a68146cf18f..fcaaf664cbec5b2be1f4af434583e973d68a9f54 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c @@ -2894,7 +2894,7 @@ u32 RTL8821AE_AGC_TAB_1TARRAYLEN = ARRAY_SIZE(RTL8821AE_AGC_TAB_ARRAY); * TXPWR_LMT.TXT ******************************************************************************/ -u8 *RTL8812AE_TXPWR_LMT[] = { +const char *RTL8812AE_TXPWR_LMT[] = { "FCC", "2.4G", "20M", "CCK", "1T", "01", "36", "ETSI", "2.4G", "20M", "CCK", "1T", "01", "32", "MKK", "2.4G", "20M", "CCK", "1T", "01", "32", @@ -3463,7 +3463,7 @@ u8 *RTL8812AE_TXPWR_LMT[] = { u32 RTL8812AE_TXPWR_LMT_ARRAY_LEN = ARRAY_SIZE(RTL8812AE_TXPWR_LMT); -u8 *RTL8821AE_TXPWR_LMT[] = { +const char *RTL8821AE_TXPWR_LMT[] = { "FCC", "2.4G", "20M", "CCK", "1T", "01", "32", "ETSI", "2.4G", "20M", "CCK", "1T", "01", "32", "MKK", "2.4G", "20M", "CCK", "1T", "01", "32", diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h index 540159c25078a177b08dbb27cc14bf989702df1e..76c62b7c0fb246e03abe7e29fa7020c3ff4d370f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h @@ -28,7 +28,7 @@ extern u32 RTL8821AE_AGC_TAB_ARRAY[]; extern u32 RTL8812AE_AGC_TAB_1TARRAYLEN; extern u32 RTL8812AE_AGC_TAB_ARRAY[]; extern u32 RTL8812AE_TXPWR_LMT_ARRAY_LEN; -extern u8 *RTL8812AE_TXPWR_LMT[]; +extern const char *RTL8812AE_TXPWR_LMT[]; extern u32 RTL8821AE_TXPWR_LMT_ARRAY_LEN; -extern u8 *RTL8821AE_TXPWR_LMT[]; +extern const char *RTL8821AE_TXPWR_LMT[]; #endif diff --git a/drivers/net/wireless/rsi/rsi_91x_coex.c b/drivers/net/wireless/rsi/rsi_91x_coex.c index a0c5d02ae88cf8486a862e1338062e966d9565ed..7395359b43b77b5211e332cde9dc40b32d26a774 100644 --- a/drivers/net/wireless/rsi/rsi_91x_coex.c +++ b/drivers/net/wireless/rsi/rsi_91x_coex.c @@ -160,6 +160,7 @@ int rsi_coex_attach(struct rsi_common *common) rsi_coex_scheduler_thread, "Coex-Tx-Thread")) { rsi_dbg(ERR_ZONE, "%s: Unable to init tx thrd\n", __func__); + kfree(coex_cb); return -EINVAL; } return 0; diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c index ff1701adbb179eaed019b88c1d03e15c0be47dad..ccf6344ed6fd20f42e46c5ef283a192ba5691908 100644 --- a/drivers/net/wireless/wl3501_cs.c +++ b/drivers/net/wireless/wl3501_cs.c @@ -1330,7 +1330,7 @@ static netdev_tx_t wl3501_hard_start_xmit(struct sk_buff *skb, } else { ++dev->stats.tx_packets; dev->stats.tx_bytes += skb->len; - kfree_skb(skb); + dev_kfree_skb_irq(skb); if (this->tx_buffer_cnt < 2) netif_stop_queue(dev); diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index f9373a88cf37c1126c4d2d95d414a49f78a1410c..24b4f73fb336898d98e0cf97820053d1508cca54 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -393,7 +393,7 @@ static void xenvif_get_requests(struct xenvif_queue *queue, struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops; struct xen_netif_tx_request *txp = first; - nr_slots = shinfo->nr_frags + 1; + nr_slots = shinfo->nr_frags + frag_overflow + 1; copy_count(skb) = 0; @@ -448,8 +448,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue, } } - for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; - shinfo->nr_frags++, gop++) { + for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS; + shinfo->nr_frags++, gop++, nr_slots--) { index = pending_index(queue->pending_cons++); pending_idx = queue->pending_ring[index]; xenvif_tx_create_map_op(queue, pending_idx, txp, @@ -462,12 +462,12 @@ static void xenvif_get_requests(struct xenvif_queue *queue, txp++; } - if (frag_overflow) { + if (nr_slots > 0) { shinfo = skb_shinfo(nskb); frags = shinfo->frags; - for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow; + for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; shinfo->nr_frags++, txp++, gop++) { index = pending_index(queue->pending_cons++); pending_idx = queue->pending_ring[index]; @@ -478,6 +478,11 @@ static void xenvif_get_requests(struct xenvif_queue *queue, } skb_shinfo(skb)->frag_list = nskb; + } else if (nskb) { + /* A frag_list skb was allocated but it is no longer needed + * because enough slots were converted to copy ops above. + */ + kfree_skb(nskb); } (*copy_ops) = cop - queue->tx_copy_ops; diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c index 5e300788be52555d46868a8d73b8ab67d0ceb21f..808d73050afd01973d31b5da65779eaa6a77dbd1 100644 --- a/drivers/nfc/fdp/i2c.c +++ b/drivers/nfc/fdp/i2c.c @@ -249,6 +249,9 @@ static void fdp_nci_i2c_read_device_properties(struct device *dev, len, sizeof(**fw_vsc_cfg), GFP_KERNEL); + if (!*fw_vsc_cfg) + goto alloc_err; + r = device_property_read_u8_array(dev, FDP_DP_FW_VSC_CFG_NAME, *fw_vsc_cfg, len); @@ -262,6 +265,7 @@ static void fdp_nci_i2c_read_device_properties(struct device *dev, *fw_vsc_cfg = NULL; } +alloc_err: dev_dbg(dev, "Clock type: %d, clock frequency: %d, VSC: %s", *clock_type, *clock_freq, *fw_vsc_cfg != NULL ? "yes" : "no"); } diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c index 37d397aae9b9d0b53df99830f5992677278ca304..a14afceaf5e92ee585d326e31d7ac1f25c82bd60 100644 --- a/drivers/nfc/st-nci/se.c +++ b/drivers/nfc/st-nci/se.c @@ -664,6 +664,12 @@ int st_nci_se_io(struct nci_dev *ndev, u32 se_idx, ST_NCI_EVT_TRANSMIT_DATA, apdu, apdu_length); default: + /* Need to free cb_context here as at the moment we can't + * clearly indicate to the caller if the callback function + * would be called (and free it) or not. In both cases a + * negative value may be returned to the caller. + */ + kfree(cb_context); return -ENODEV; } } diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c index d41636504246210a4aca3756be42101975fe8273..6a1d3b2752fbfc4737e9bfd6434f81f1c941b24c 100644 --- a/drivers/nfc/st21nfca/se.c +++ b/drivers/nfc/st21nfca/se.c @@ -236,6 +236,12 @@ int st21nfca_hci_se_io(struct nfc_hci_dev *hdev, u32 se_idx, ST21NFCA_EVT_TRANSMIT_DATA, apdu, apdu_length); default: + /* Need to free cb_context here as at the moment we can't + * clearly indicate to the caller if the callback function + * would be called (and free it) or not. In both cases a + * negative value may be returned to the caller. + */ + kfree(cb_context); return -ENODEV; } } diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 0fe61baef2163e2d52c8a0bd95105bb3f705fc75..5a3d79f92c111005467f415bf27892296ee572a8 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1292,7 +1292,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) else nvme_poll_irqdisable(nvmeq); - if (blk_mq_request_completed(req)) { + if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) { dev_warn(dev->ctrl.device, "I/O %d QID %d timeout, completion polled\n", req->tag, nvmeq->qid); diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 640031cbda7cc6b4171fa724b7c0a4448d7d4588..46fc44ce86712e5cdbd2e85b560a4585fdd04ff7 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -1675,8 +1675,10 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport, else { queue = nvmet_fc_alloc_target_queue(iod->assoc, 0, be16_to_cpu(rqst->assoc_cmd.sqsize)); - if (!queue) + if (!queue) { ret = VERR_QUEUE_ALLOC_FAIL; + nvmet_fc_tgt_a_put(iod->assoc); + } } } diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index 21d89d80d083834ad0b6c7d0189fc68f236118cc..1505c745154e7b375d103bed25a415bca36efab8 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -625,31 +625,32 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) return ERR_PTR(rval); } - if (config->wp_gpio) - nvmem->wp_gpio = config->wp_gpio; - else + nvmem->id = rval; + + nvmem->dev.type = &nvmem_provider_type; + nvmem->dev.bus = &nvmem_bus_type; + nvmem->dev.parent = config->dev; + + device_initialize(&nvmem->dev); + + if (!config->ignore_wp) nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp", GPIOD_OUT_HIGH); if (IS_ERR(nvmem->wp_gpio)) { - ida_free(&nvmem_ida, nvmem->id); rval = PTR_ERR(nvmem->wp_gpio); - kfree(nvmem); - return ERR_PTR(rval); + nvmem->wp_gpio = NULL; + goto err_put_device; } kref_init(&nvmem->refcnt); INIT_LIST_HEAD(&nvmem->cells); - nvmem->id = rval; nvmem->owner = config->owner; if (!nvmem->owner && config->dev->driver) nvmem->owner = config->dev->driver->owner; nvmem->stride = config->stride ?: 1; nvmem->word_size = config->word_size ?: 1; nvmem->size = config->size; - nvmem->dev.type = &nvmem_provider_type; - nvmem->dev.bus = &nvmem_bus_type; - nvmem->dev.parent = config->dev; nvmem->root_only = config->root_only; nvmem->priv = config->priv; nvmem->type = config->type; @@ -660,18 +661,21 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) switch (config->id) { case NVMEM_DEVID_NONE: - dev_set_name(&nvmem->dev, "%s", config->name); + rval = dev_set_name(&nvmem->dev, "%s", config->name); break; case NVMEM_DEVID_AUTO: - dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id); + rval = dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id); break; default: - dev_set_name(&nvmem->dev, "%s%d", + rval = dev_set_name(&nvmem->dev, "%s%d", config->name ? : "nvmem", config->name ? config->id : nvmem->id); break; } + if (rval) + goto err_put_device; + nvmem->read_only = device_property_present(config->dev, "read-only") || config->read_only || !nvmem->reg_write; @@ -679,22 +683,16 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) nvmem->dev.groups = nvmem_dev_groups; #endif - dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); - - rval = device_register(&nvmem->dev); - if (rval) - goto err_put_device; - if (config->compat) { rval = nvmem_sysfs_setup_compat(nvmem, config); if (rval) - goto err_device_del; + goto err_put_device; } if (config->cells) { rval = nvmem_add_cells(nvmem, config->cells, config->ncells); if (rval) - goto err_teardown_compat; + goto err_remove_cells; } rval = nvmem_add_cells_from_table(nvmem); @@ -705,17 +703,20 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) if (rval) goto err_remove_cells; + dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); + + rval = device_add(&nvmem->dev); + if (rval) + goto err_remove_cells; + blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem); return nvmem; err_remove_cells: nvmem_device_remove_all_cells(nvmem); -err_teardown_compat: if (config->compat) nvmem_sysfs_remove_compat(nvmem, config); -err_device_del: - device_del(&nvmem->dev); err_put_device: put_device(&nvmem->dev); diff --git a/drivers/nvmem/qcom-spmi-sdam.c b/drivers/nvmem/qcom-spmi-sdam.c index f6e9f96933ca291d65b2d5f8b8118bd4bfd6c3d4..1549bfcc4c2d968c5d5f318cc20e87d6a166cc60 100644 --- a/drivers/nvmem/qcom-spmi-sdam.c +++ b/drivers/nvmem/qcom-spmi-sdam.c @@ -166,6 +166,7 @@ static const struct of_device_id sdam_match_table[] = { { .compatible = "qcom,spmi-sdam" }, {}, }; +MODULE_DEVICE_TABLE(of, sdam_match_table); static struct platform_driver sdam_driver = { .driver = { diff --git a/drivers/of/address.c b/drivers/of/address.c index 73ddf2540f3fe6f088ddc10d665d72a678745599..f686fb5011b875457a8a2fcefe383014a893f241 100644 --- a/drivers/of/address.c +++ b/drivers/of/address.c @@ -990,8 +990,19 @@ int of_dma_get_range(struct device_node *np, const struct bus_dma_region **map) } of_dma_range_parser_init(&parser, node); - for_each_of_range(&parser, &range) + for_each_of_range(&parser, &range) { + if (range.cpu_addr == OF_BAD_ADDR) { + pr_err("translation of DMA address(%llx) to CPU address failed node(%pOF)\n", + range.bus_addr, node); + continue; + } num_ranges++; + } + + if (!num_ranges) { + ret = -EINVAL; + goto out; + } r = kcalloc(num_ranges + 1, sizeof(*r), GFP_KERNEL); if (!r) { @@ -1000,18 +1011,16 @@ int of_dma_get_range(struct device_node *np, const struct bus_dma_region **map) } /* - * Record all info in the generic DMA ranges array for struct device. + * Record all info in the generic DMA ranges array for struct device, + * returning an error if we don't find any parsable ranges. */ *map = r; of_dma_range_parser_init(&parser, node); for_each_of_range(&parser, &range) { pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n", range.bus_addr, range.cpu_addr, range.size); - if (range.cpu_addr == OF_BAD_ADDR) { - pr_err("translation of DMA address(%llx) to CPU address failed node(%pOF)\n", - range.bus_addr, node); + if (range.cpu_addr == OF_BAD_ADDR) continue; - } r->cpu_start = range.cpu_addr; r->dma_start = range.bus_addr; r->size = range.size; diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c index 596c185b5dda4501c119921f5860eda843d1ab74..60f4ff8e044d19d142205e1146b97da0f74fa590 100644 --- a/drivers/opp/debugfs.c +++ b/drivers/opp/debugfs.c @@ -204,7 +204,7 @@ static void opp_migrate_dentry(struct opp_device *opp_dev, dentry = debugfs_rename(rootdir, opp_dev->dentry, rootdir, opp_table->dentry_name); - if (!dentry) { + if (IS_ERR(dentry)) { dev_err(dev, "%s: Failed to rename link from: %s to %s\n", __func__, dev_name(opp_dev->dev), dev_name(dev)); return; diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c index 48169b1e38171d307897db0d355a097c9a0980a2..e73e18a73833be40f95efcd8e3836453366292a6 100644 --- a/drivers/pci/controller/pci-loongson.c +++ b/drivers/pci/controller/pci-loongson.c @@ -13,9 +13,14 @@ #include "../pci.h" /* Device IDs */ -#define DEV_PCIE_PORT_0 0x7a09 -#define DEV_PCIE_PORT_1 0x7a19 -#define DEV_PCIE_PORT_2 0x7a29 +#define DEV_LS2K_PCIE_PORT0 0x1a05 +#define DEV_LS7A_PCIE_PORT0 0x7a09 +#define DEV_LS7A_PCIE_PORT1 0x7a19 +#define DEV_LS7A_PCIE_PORT2 0x7a29 +#define DEV_LS7A_PCIE_PORT3 0x7a39 +#define DEV_LS7A_PCIE_PORT4 0x7a49 +#define DEV_LS7A_PCIE_PORT5 0x7a59 +#define DEV_LS7A_PCIE_PORT6 0x7a69 #define DEV_LS2K_APB 0x7a02 #define DEV_LS7A_CONF 0x7a10 @@ -38,11 +43,11 @@ static void bridge_class_quirk(struct pci_dev *dev) dev->class = PCI_CLASS_BRIDGE_PCI << 8; } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, - DEV_PCIE_PORT_0, bridge_class_quirk); + DEV_LS7A_PCIE_PORT0, bridge_class_quirk); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, - DEV_PCIE_PORT_1, bridge_class_quirk); + DEV_LS7A_PCIE_PORT1, bridge_class_quirk); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, - DEV_PCIE_PORT_2, bridge_class_quirk); + DEV_LS7A_PCIE_PORT2, bridge_class_quirk); static void system_bus_quirk(struct pci_dev *pdev) { @@ -60,37 +65,33 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_LPC, system_bus_quirk); -static void loongson_mrrs_quirk(struct pci_dev *dev) +static void loongson_mrrs_quirk(struct pci_dev *pdev) { - struct pci_bus *bus = dev->bus; - struct pci_dev *bridge; - static const struct pci_device_id bridge_devids[] = { - { PCI_VDEVICE(LOONGSON, DEV_PCIE_PORT_0) }, - { PCI_VDEVICE(LOONGSON, DEV_PCIE_PORT_1) }, - { PCI_VDEVICE(LOONGSON, DEV_PCIE_PORT_2) }, - { 0, }, - }; - - /* look for the matching bridge */ - while (!pci_is_root_bus(bus)) { - bridge = bus->self; - bus = bus->parent; - /* - * Some Loongson PCIe ports have a h/w limitation of - * 256 bytes maximum read request size. They can't handle - * anything larger than this. So force this limit on - * any devices attached under these ports. - */ - if (pci_match_id(bridge_devids, bridge)) { - if (pcie_get_readrq(dev) > 256) { - pci_info(dev, "limiting MRRS to 256\n"); - pcie_set_readrq(dev, 256); - } - break; - } - } + /* + * Some Loongson PCIe ports have h/w limitations of maximum read + * request size. They can't handle anything larger than this. So + * force this limit on any devices attached under these ports. + */ + struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus); + + bridge->no_inc_mrrs = 1; } -DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, loongson_mrrs_quirk); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, + DEV_LS2K_PCIE_PORT0, loongson_mrrs_quirk); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, + DEV_LS7A_PCIE_PORT0, loongson_mrrs_quirk); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, + DEV_LS7A_PCIE_PORT1, loongson_mrrs_quirk); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, + DEV_LS7A_PCIE_PORT2, loongson_mrrs_quirk); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, + DEV_LS7A_PCIE_PORT3, loongson_mrrs_quirk); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, + DEV_LS7A_PCIE_PORT4, loongson_mrrs_quirk); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, + DEV_LS7A_PCIE_PORT5, loongson_mrrs_quirk); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, + DEV_LS7A_PCIE_PORT6, loongson_mrrs_quirk); static void __iomem *cfg1_map(struct loongson_pci *priv, int bus, unsigned int devfn, int where) diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 262577c81d307b14974c09fa71b3f68865d15a55..744a2e05635b926c925c6e9bb60b3dbe9650584c 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -4808,7 +4808,7 @@ void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev) if (pci_dev_is_disconnected(dev)) return; - if (!pci_is_bridge(dev) || !dev->bridge_d3) + if (!pci_is_bridge(dev)) return; down_read(&pci_bus_sem); @@ -5739,6 +5739,7 @@ int pcie_set_readrq(struct pci_dev *dev, int rq) { u16 v; int ret; + struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) return -EINVAL; @@ -5757,6 +5758,15 @@ int pcie_set_readrq(struct pci_dev *dev, int rq) v = (ffs(rq) - 8) << 12; + if (bridge->no_inc_mrrs) { + int max_mrrs = pcie_get_readrq(dev); + + if (rq > max_mrrs) { + pci_info(dev, "can't set Max_Read_Request_Size to %d; max is %d\n", rq, max_mrrs); + return -EINVAL; + } + } + ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_READRQ, v); diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 0039460c6ab027abefb80486aaf45ee105bfca69..9197d7362731e074a29db2d9fc186865f0a680aa 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -351,53 +351,36 @@ struct pci_sriov { * @dev - pci device to set new error_state * @new - the state we want dev to be in * - * Must be called with device_lock held. + * If the device is experiencing perm_failure, it has to remain in that state. + * Any other transition is allowed. * * Returns true if state has been changed to the requested state. */ static inline bool pci_dev_set_io_state(struct pci_dev *dev, pci_channel_state_t new) { - bool changed = false; + pci_channel_state_t old; - device_lock_assert(&dev->dev); switch (new) { case pci_channel_io_perm_failure: - switch (dev->error_state) { - case pci_channel_io_frozen: - case pci_channel_io_normal: - case pci_channel_io_perm_failure: - changed = true; - break; - } - break; + xchg(&dev->error_state, pci_channel_io_perm_failure); + return true; case pci_channel_io_frozen: - switch (dev->error_state) { - case pci_channel_io_frozen: - case pci_channel_io_normal: - changed = true; - break; - } - break; + old = cmpxchg(&dev->error_state, pci_channel_io_normal, + pci_channel_io_frozen); + return old != pci_channel_io_perm_failure; case pci_channel_io_normal: - switch (dev->error_state) { - case pci_channel_io_frozen: - case pci_channel_io_normal: - changed = true; - break; - } - break; + old = cmpxchg(&dev->error_state, pci_channel_io_frozen, + pci_channel_io_normal); + return old != pci_channel_io_perm_failure; + default: + return false; } - if (changed) - dev->error_state = new; - return changed; } static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused) { - device_lock(&dev->dev); pci_dev_set_io_state(dev, pci_channel_io_perm_failure); - device_unlock(&dev->dev); return 0; } diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index fb2e52fd01b3979ea63fcbd502ee1ed6b8e3ac82..c1ebd5e12b06e51e6177b2805a76a7dbbc901c46 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -4797,6 +4797,26 @@ static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags) PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); } +/* + * Wangxun 10G/1G NICs have no ACS capability, and on multi-function + * devices, peer-to-peer transactions are not be used between the functions. + * So add an ACS quirk for below devices to isolate functions. + * SFxxx 1G NICs(em). + * RP1000/RP2000 10G NICs(sp). + */ +static int pci_quirk_wangxun_nic_acs(struct pci_dev *dev, u16 acs_flags) +{ + switch (dev->device) { + case 0x0100 ... 0x010F: + case 0x1001: + case 0x2001: + return pci_acs_ctrl_enabled(acs_flags, + PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); + } + + return false; +} + static const struct pci_dev_acs_enabled { u16 vendor; u16 device; @@ -4942,6 +4962,8 @@ static const struct pci_dev_acs_enabled { { PCI_VENDOR_ID_NXP, 0x8d9b, pci_quirk_nxp_rp_acs }, /* Zhaoxin Root/Downstream Ports */ { PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs }, + /* Wangxun nics */ + { PCI_VENDOR_ID_WANGXUN, PCI_ANY_ID, pci_quirk_wangxun_nic_acs }, { 0 } }; @@ -5302,6 +5324,7 @@ static void quirk_no_flr(struct pci_dev *dev) DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1487, quirk_no_flr); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x148c, quirk_no_flr); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x149c, quirk_no_flr); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x7901, quirk_no_flr); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_no_flr); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_no_flr); diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 2ce636937c6eaf5b2c23045a1de00d368fb8cb01..16d291e10627b07ac8db52ec5538d354527da130 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -1878,12 +1878,67 @@ static void adjust_bridge_window(struct pci_dev *bridge, struct resource *res, add_size = size - new_size; pci_dbg(bridge, "bridge window %pR shrunken by %pa\n", res, &add_size); + } else { + return; } res->end = res->start + new_size - 1; remove_from_list(add_list, res); } +static void remove_dev_resource(struct resource *avail, struct pci_dev *dev, + struct resource *res) +{ + resource_size_t size, align, tmp; + + size = resource_size(res); + if (!size) + return; + + align = pci_resource_alignment(dev, res); + align = align ? ALIGN(avail->start, align) - avail->start : 0; + tmp = align + size; + avail->start = min(avail->start + tmp, avail->end + 1); +} + +static void remove_dev_resources(struct pci_dev *dev, struct resource *io, + struct resource *mmio, + struct resource *mmio_pref) +{ + int i; + + for (i = 0; i < PCI_NUM_RESOURCES; i++) { + struct resource *res = &dev->resource[i]; + + if (resource_type(res) == IORESOURCE_IO) { + remove_dev_resource(io, dev, res); + } else if (resource_type(res) == IORESOURCE_MEM) { + + /* + * Make sure prefetchable memory is reduced from + * the correct resource. Specifically we put 32-bit + * prefetchable memory in non-prefetchable window + * if there is an 64-bit pretchable window. + * + * See comments in __pci_bus_size_bridges() for + * more information. + */ + if ((res->flags & IORESOURCE_PREFETCH) && + ((res->flags & IORESOURCE_MEM_64) == + (mmio_pref->flags & IORESOURCE_MEM_64))) + remove_dev_resource(mmio_pref, dev, res); + else + remove_dev_resource(mmio, dev, res); + } + } +} + +/* + * io, mmio and mmio_pref contain the total amount of bridge window space + * available. This includes the minimal space needed to cover all the + * existing devices on the bus and the possible extra space that can be + * shared with the bridges. + */ static void pci_bus_distribute_available_resources(struct pci_bus *bus, struct list_head *add_list, struct resource io, @@ -1893,7 +1948,7 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus, unsigned int normal_bridges = 0, hotplug_bridges = 0; struct resource *io_res, *mmio_res, *mmio_pref_res; struct pci_dev *dev, *bridge = bus->self; - resource_size_t io_per_hp, mmio_per_hp, mmio_pref_per_hp, align; + resource_size_t io_per_b, mmio_per_b, mmio_pref_per_b, align; io_res = &bridge->resource[PCI_BRIDGE_IO_WINDOW]; mmio_res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW]; @@ -1937,94 +1992,88 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus, normal_bridges++; } + if (!(hotplug_bridges + normal_bridges)) + return; + /* - * There is only one bridge on the bus so it gets all available - * resources which it can then distribute to the possible hotplug - * bridges below. + * Calculate the amount of space we can forward from "bus" to any + * downstream buses, i.e., the space left over after assigning the + * BARs and windows on "bus". */ - if (hotplug_bridges + normal_bridges == 1) { - dev = list_first_entry(&bus->devices, struct pci_dev, bus_list); - if (dev->subordinate) - pci_bus_distribute_available_resources(dev->subordinate, - add_list, io, mmio, mmio_pref); - return; + list_for_each_entry(dev, &bus->devices, bus_list) { + if (!dev->is_virtfn) + remove_dev_resources(dev, &io, &mmio, &mmio_pref); } - if (hotplug_bridges == 0) - return; - /* - * Calculate the total amount of extra resource space we can - * pass to bridges below this one. This is basically the - * extra space reduced by the minimal required space for the - * non-hotplug bridges. + * If there is at least one hotplug bridge on this bus it gets all + * the extra resource space that was left after the reductions + * above. + * + * If there are no hotplug bridges the extra resource space is + * split between non-hotplug bridges. This is to allow possible + * hotplug bridges below them to get the extra space as well. */ + if (hotplug_bridges) { + io_per_b = div64_ul(resource_size(&io), hotplug_bridges); + mmio_per_b = div64_ul(resource_size(&mmio), hotplug_bridges); + mmio_pref_per_b = div64_ul(resource_size(&mmio_pref), + hotplug_bridges); + } else { + io_per_b = div64_ul(resource_size(&io), normal_bridges); + mmio_per_b = div64_ul(resource_size(&mmio), normal_bridges); + mmio_pref_per_b = div64_ul(resource_size(&mmio_pref), + normal_bridges); + } + for_each_pci_bridge(dev, bus) { - resource_size_t used_size; struct resource *res; + struct pci_bus *b; - if (dev->is_hotplug_bridge) + b = dev->subordinate; + if (!b) + continue; + if (hotplug_bridges && !dev->is_hotplug_bridge) continue; + res = &dev->resource[PCI_BRIDGE_IO_WINDOW]; + /* - * Reduce the available resource space by what the - * bridge and devices below it occupy. + * Make sure the split resource space is properly aligned + * for bridge windows (align it down to avoid going above + * what is available). */ - res = &dev->resource[PCI_BRIDGE_IO_WINDOW]; align = pci_resource_alignment(dev, res); - align = align ? ALIGN(io.start, align) - io.start : 0; - used_size = align + resource_size(res); - if (!res->parent) - io.start = min(io.start + used_size, io.end + 1); + io.end = align ? io.start + ALIGN_DOWN(io_per_b, align) - 1 + : io.start + io_per_b - 1; + + /* + * The x_per_b holds the extra resource space that can be + * added for each bridge but there is the minimal already + * reserved as well so adjust x.start down accordingly to + * cover the whole space. + */ + io.start -= resource_size(res); res = &dev->resource[PCI_BRIDGE_MEM_WINDOW]; align = pci_resource_alignment(dev, res); - align = align ? ALIGN(mmio.start, align) - mmio.start : 0; - used_size = align + resource_size(res); - if (!res->parent) - mmio.start = min(mmio.start + used_size, mmio.end + 1); + mmio.end = align ? mmio.start + ALIGN_DOWN(mmio_per_b, align) - 1 + : mmio.start + mmio_per_b - 1; + mmio.start -= resource_size(res); res = &dev->resource[PCI_BRIDGE_PREF_MEM_WINDOW]; align = pci_resource_alignment(dev, res); - align = align ? ALIGN(mmio_pref.start, align) - - mmio_pref.start : 0; - used_size = align + resource_size(res); - if (!res->parent) - mmio_pref.start = min(mmio_pref.start + used_size, - mmio_pref.end + 1); - } - - io_per_hp = div64_ul(resource_size(&io), hotplug_bridges); - mmio_per_hp = div64_ul(resource_size(&mmio), hotplug_bridges); - mmio_pref_per_hp = div64_ul(resource_size(&mmio_pref), - hotplug_bridges); - - /* - * Go over devices on this bus and distribute the remaining - * resource space between hotplug bridges. - */ - for_each_pci_bridge(dev, bus) { - struct pci_bus *b; - - b = dev->subordinate; - if (!b || !dev->is_hotplug_bridge) - continue; - - /* - * Distribute available extra resources equally between - * hotplug-capable downstream ports taking alignment into - * account. - */ - io.end = io.start + io_per_hp - 1; - mmio.end = mmio.start + mmio_per_hp - 1; - mmio_pref.end = mmio_pref.start + mmio_pref_per_hp - 1; + mmio_pref.end = align ? mmio_pref.start + + ALIGN_DOWN(mmio_pref_per_b, align) - 1 + : mmio_pref.start + mmio_pref_per_b - 1; + mmio_pref.start -= resource_size(res); pci_bus_distribute_available_resources(b, add_list, io, mmio, mmio_pref); - io.start += io_per_hp; - mmio.start += mmio_per_hp; - mmio_pref.start += mmio_pref_per_hp; + io.start += io.end + 1; + mmio.start += mmio.end + 1; + mmio_pref.start += mmio_pref.end + 1; } } diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c index 46ebdb1460a3de4753794adb2577e035e6666b9b..cab6a94bf1610c23ce2d1d5299d4ccb4ed3cf6bf 100644 --- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c +++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c @@ -467,8 +467,10 @@ static int rockchip_usb2phy_power_on(struct phy *phy) return ret; ret = property_enable(base, &rport->port_cfg->phy_sus, false); - if (ret) + if (ret) { + clk_disable_unprepare(rphy->clk480m); return ret; + } /* waiting for the utmi_clk to become stable */ usleep_range(1500, 2000); diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c index 70a31251b202bc5b4e6b6cbf98ae69179ec96df0..20f787d5ec581779a9248fe4f931a0facee5f4ee 100644 --- a/drivers/phy/rockchip/phy-rockchip-typec.c +++ b/drivers/phy/rockchip/phy-rockchip-typec.c @@ -808,9 +808,8 @@ static int tcphy_get_mode(struct rockchip_typec_phy *tcphy) struct extcon_dev *edev = tcphy->extcon; union extcon_property_value property; unsigned int id; - bool ufp, dp; u8 mode; - int ret; + int ret, ufp, dp; if (!edev) return MODE_DFP_USB; diff --git a/drivers/phy/ti/Kconfig b/drivers/phy/ti/Kconfig index 15a3bcf32308603597a9ae5d177c2091e20a6dcd..b905902d57508d0c6f9d05fae9b0d9576d2cf0d9 100644 --- a/drivers/phy/ti/Kconfig +++ b/drivers/phy/ti/Kconfig @@ -23,7 +23,7 @@ config PHY_DM816X_USB config PHY_AM654_SERDES tristate "TI AM654 SERDES support" - depends on OF && ARCH_K3 || COMPILE_TEST + depends on OF && (ARCH_K3 || COMPILE_TEST) depends on COMMON_CLK select GENERIC_PHY select MULTIPLEXER @@ -35,7 +35,7 @@ config PHY_AM654_SERDES config PHY_J721E_WIZ tristate "TI J721E WIZ (SERDES Wrapper) support" - depends on OF && ARCH_K3 || COMPILE_TEST + depends on OF && (ARCH_K3 || COMPILE_TEST) depends on HAS_IOMEM && OF_ADDRESS depends on COMMON_CLK select GENERIC_PHY diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c index e792318c389468236d39f97cf5166e52f22f5f2e..d26d859546275708f522a1ef517707b54981c9e8 100644 --- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c +++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c @@ -121,7 +121,7 @@ static int aspeed_disable_sig(struct aspeed_pinmux_data *ctx, int ret = 0; if (!exprs) - return true; + return -EINVAL; while (*exprs && !ret) { ret = aspeed_sig_expr_disable(ctx, *exprs); diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c index 39d2024dc2ee57cb4f39b58fa3286048b55d3903..c7ae9f900b5327ff4b7fb06b6ac37c2200c76d45 100644 --- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c +++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c @@ -356,8 +356,6 @@ static int bcm2835_of_gpio_ranges_fallback(struct gpio_chip *gc, { struct pinctrl_dev *pctldev = of_pinctrl_get(np); - of_node_put(np); - if (!pctldev) return 0; diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index db9087c129c0d3f29653ce9a99d035445b7d313d..2ef9e2d8fd9c5f9afaf8b54e24adb4849547fc3a 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -1606,6 +1606,12 @@ const struct intel_pinctrl_soc_data *intel_pinctrl_get_soc_data(struct platform_ EXPORT_SYMBOL_GPL(intel_pinctrl_get_soc_data); #ifdef CONFIG_PM_SLEEP +static bool __intel_gpio_is_direct_irq(u32 value) +{ + return (value & PADCFG0_GPIROUTIOXAPIC) && (value & PADCFG0_GPIOTXDIS) && + (__intel_gpio_get_gpio_mode(value) == PADCFG0_PMODE_GPIO); +} + static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned int pin) { const struct pin_desc *pd = pin_desc_get(pctrl->pctldev, pin); @@ -1639,8 +1645,7 @@ static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned int * See https://bugzilla.kernel.org/show_bug.cgi?id=214749. */ value = readl(intel_get_padcfg(pctrl, pin, PADCFG0)); - if ((value & PADCFG0_GPIROUTIOXAPIC) && (value & PADCFG0_GPIOTXDIS) && - (__intel_gpio_get_gpio_mode(value) == PADCFG0_PMODE_GPIO)) + if (__intel_gpio_is_direct_irq(value)) return true; return false; @@ -1770,7 +1775,12 @@ int intel_pinctrl_resume_noirq(struct device *dev) for (i = 0; i < pctrl->soc->npins; i++) { const struct pinctrl_pin_desc *desc = &pctrl->soc->pins[i]; - if (!intel_pinctrl_should_save(pctrl, desc->number)) + if (!(intel_pinctrl_should_save(pctrl, desc->number) || + /* + * If the firmware mangled the register contents too much, + * check the saved value for the Direct IRQ mode. + */ + __intel_gpio_is_direct_irq(pads[i].padcfg0))) continue; intel_restore_padcfg(pctrl, desc->number, PADCFG0, pads[i].padcfg0); diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c index d0a4ebbe1e7e65f6e04532bc02f8101afbb3fbd1..e486d66e220b04c69d1c2c679cf67d9e79fc5efe 100644 --- a/drivers/pinctrl/mediatek/pinctrl-paris.c +++ b/drivers/pinctrl/mediatek/pinctrl-paris.c @@ -574,7 +574,7 @@ static int mtk_hw_get_value_wrap(struct mtk_pinctrl *hw, unsigned int gpio, int ssize_t mtk_pctrl_show_one_pin(struct mtk_pinctrl *hw, unsigned int gpio, char *buf, unsigned int bufLen) { - int pinmux, pullup, pullen, len = 0, r1 = -1, r0 = -1; + int pinmux, pullup = 0, pullen = 0, len = 0, r1 = -1, r0 = -1; const struct mtk_pin_desc *desc; if (gpio >= hw->soc->npins) @@ -637,7 +637,7 @@ static void mtk_pctrl_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, unsigned int gpio) { struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev); - char buf[PIN_DBG_BUF_SZ]; + char buf[PIN_DBG_BUF_SZ] = { 0 }; (void)mtk_pctrl_show_one_pin(hw, gpio, buf, PIN_DBG_BUF_SZ); diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c index 578b387100d9bb5343ffe162f0e89a197756b3ed..d2e2b101978f8fe9656234b2a9101ebf976393e6 100644 --- a/drivers/pinctrl/pinctrl-at91-pio4.c +++ b/drivers/pinctrl/pinctrl-at91-pio4.c @@ -1081,8 +1081,8 @@ static int atmel_pinctrl_probe(struct platform_device *pdev) pin_desc[i].number = i; /* Pin naming convention: P(bank_name)(bank_pin_number). */ - pin_desc[i].name = kasprintf(GFP_KERNEL, "P%c%d", - bank + 'A', line); + pin_desc[i].name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "P%c%d", + bank + 'A', line); group->name = group_names[i] = pin_desc[i].name; group->pin = pin_desc[i].number; diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c index 9015486e38c18fd12a3ba54b231bdbc7d5733390..52ecd47c18e2d6be482c7114a6decbdb1f8d91ef 100644 --- a/drivers/pinctrl/pinctrl-at91.c +++ b/drivers/pinctrl/pinctrl-at91.c @@ -1891,7 +1891,7 @@ static int at91_gpio_probe(struct platform_device *pdev) } for (i = 0; i < chip->ngpio; i++) - names[i] = kasprintf(GFP_KERNEL, "pio%c%d", alias_idx + 'A', i); + names[i] = devm_kasprintf(&pdev->dev, GFP_KERNEL, "pio%c%d", alias_idx + 'A', i); chip->names = (const char *const *)names; diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c index e0df5ad6741dcc479719057ae6c3c628dbaed4d4..4d07c531371cd19061cf5cbe6e4b26a3a4a4ab13 100644 --- a/drivers/pinctrl/pinctrl-ingenic.c +++ b/drivers/pinctrl/pinctrl-ingenic.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -2826,6 +2827,8 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev) return 0; } +#define IF_ENABLED(cfg, ptr) PTR_IF(IS_ENABLED(cfg), (ptr)) + static const struct of_device_id ingenic_pinctrl_of_match[] = { { .compatible = "ingenic,jz4740-pinctrl", .data = &jz4740_chip_info }, { .compatible = "ingenic,jz4725b-pinctrl", .data = &jz4725b_chip_info }, diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c index 07b1204174bf15928de2e0870abdef75890d1457..2a454098eaaa5163fc70f5b2d2203b255660fec0 100644 --- a/drivers/pinctrl/pinctrl-rockchip.c +++ b/drivers/pinctrl/pinctrl-rockchip.c @@ -61,8 +61,17 @@ enum rockchip_pinctrl_type { RK3308, RK3368, RK3399, + RK3568, }; + +/** + * Generate a bitmask for setting a value (v) with a write mask bit in hiword + * register 31:16 area. + */ +#define WRITE_MASK_VAL(h, l, v) \ + (GENMASK(((h) + 16), ((l) + 16)) | (((v) << (l)) & GENMASK((h), (l)))) + /* * Encode variants of iomux registers into a type variable */ @@ -290,6 +299,25 @@ struct rockchip_pin_bank { .pull_type[3] = pull3, \ } +#define PIN_BANK_MUX_ROUTE_FLAGS(ID, PIN, FUNC, REG, VAL, FLAG) \ + { \ + .bank_num = ID, \ + .pin = PIN, \ + .func = FUNC, \ + .route_offset = REG, \ + .route_val = VAL, \ + .route_location = FLAG, \ + } + +#define RK_MUXROUTE_SAME(ID, PIN, FUNC, REG, VAL) \ + PIN_BANK_MUX_ROUTE_FLAGS(ID, PIN, FUNC, REG, VAL, ROCKCHIP_ROUTE_SAME) + +#define RK_MUXROUTE_GRF(ID, PIN, FUNC, REG, VAL) \ + PIN_BANK_MUX_ROUTE_FLAGS(ID, PIN, FUNC, REG, VAL, ROCKCHIP_ROUTE_GRF) + +#define RK_MUXROUTE_PMU(ID, PIN, FUNC, REG, VAL) \ + PIN_BANK_MUX_ROUTE_FLAGS(ID, PIN, FUNC, REG, VAL, ROCKCHIP_ROUTE_PMU) + /** * struct rockchip_mux_recalced_data: represent a pin iomux data. * @num: bank number. @@ -816,597 +844,203 @@ static void rockchip_get_recalced_mux(struct rockchip_pin_bank *bank, int pin, } static struct rockchip_mux_route_data px30_mux_route_data[] = { - { - /* cif-d2m0 */ - .bank_num = 2, - .pin = 0, - .func = 1, - .route_offset = 0x184, - .route_val = BIT(16 + 7), - }, { - /* cif-d2m1 */ - .bank_num = 3, - .pin = 3, - .func = 3, - .route_offset = 0x184, - .route_val = BIT(16 + 7) | BIT(7), - }, { - /* pdm-m0 */ - .bank_num = 3, - .pin = 22, - .func = 2, - .route_offset = 0x184, - .route_val = BIT(16 + 8), - }, { - /* pdm-m1 */ - .bank_num = 2, - .pin = 22, - .func = 1, - .route_offset = 0x184, - .route_val = BIT(16 + 8) | BIT(8), - }, { - /* uart2-rxm0 */ - .bank_num = 1, - .pin = 27, - .func = 2, - .route_offset = 0x184, - .route_val = BIT(16 + 10), - }, { - /* uart2-rxm1 */ - .bank_num = 2, - .pin = 14, - .func = 2, - .route_offset = 0x184, - .route_val = BIT(16 + 10) | BIT(10), - }, { - /* uart3-rxm0 */ - .bank_num = 0, - .pin = 17, - .func = 2, - .route_offset = 0x184, - .route_val = BIT(16 + 9), - }, { - /* uart3-rxm1 */ - .bank_num = 1, - .pin = 15, - .func = 2, - .route_offset = 0x184, - .route_val = BIT(16 + 9) | BIT(9), - }, + RK_MUXROUTE_SAME(2, RK_PA0, 1, 0x184, BIT(16 + 7)), /* cif-d2m0 */ + RK_MUXROUTE_SAME(3, RK_PA3, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d2m1 */ + RK_MUXROUTE_SAME(3, RK_PC6, 2, 0x184, BIT(16 + 8)), /* pdm-m0 */ + RK_MUXROUTE_SAME(2, RK_PC6, 1, 0x184, BIT(16 + 8) | BIT(8)), /* pdm-m1 */ + RK_MUXROUTE_SAME(1, RK_PD3, 2, 0x184, BIT(16 + 10)), /* uart2-rxm0 */ + RK_MUXROUTE_SAME(2, RK_PB6, 2, 0x184, BIT(16 + 10) | BIT(10)), /* uart2-rxm1 */ + RK_MUXROUTE_SAME(0, RK_PC1, 2, 0x184, BIT(16 + 9)), /* uart3-rxm0 */ + RK_MUXROUTE_SAME(1, RK_PB7, 2, 0x184, BIT(16 + 9) | BIT(9)), /* uart3-rxm1 */ }; static struct rockchip_mux_route_data rk3128_mux_route_data[] = { - { - /* spi-0 */ - .bank_num = 1, - .pin = 10, - .func = 1, - .route_offset = 0x144, - .route_val = BIT(16 + 3) | BIT(16 + 4), - }, { - /* spi-1 */ - .bank_num = 1, - .pin = 27, - .func = 3, - .route_offset = 0x144, - .route_val = BIT(16 + 3) | BIT(16 + 4) | BIT(3), - }, { - /* spi-2 */ - .bank_num = 0, - .pin = 13, - .func = 2, - .route_offset = 0x144, - .route_val = BIT(16 + 3) | BIT(16 + 4) | BIT(4), - }, { - /* i2s-0 */ - .bank_num = 1, - .pin = 5, - .func = 1, - .route_offset = 0x144, - .route_val = BIT(16 + 5), - }, { - /* i2s-1 */ - .bank_num = 0, - .pin = 14, - .func = 1, - .route_offset = 0x144, - .route_val = BIT(16 + 5) | BIT(5), - }, { - /* emmc-0 */ - .bank_num = 1, - .pin = 22, - .func = 2, - .route_offset = 0x144, - .route_val = BIT(16 + 6), - }, { - /* emmc-1 */ - .bank_num = 2, - .pin = 4, - .func = 2, - .route_offset = 0x144, - .route_val = BIT(16 + 6) | BIT(6), - }, + RK_MUXROUTE_SAME(1, RK_PB2, 1, 0x144, BIT(16 + 3) | BIT(16 + 4)), /* spi-0 */ + RK_MUXROUTE_SAME(1, RK_PD3, 3, 0x144, BIT(16 + 3) | BIT(16 + 4) | BIT(3)), /* spi-1 */ + RK_MUXROUTE_SAME(0, RK_PB5, 2, 0x144, BIT(16 + 3) | BIT(16 + 4) | BIT(4)), /* spi-2 */ + RK_MUXROUTE_SAME(1, RK_PA5, 1, 0x144, BIT(16 + 5)), /* i2s-0 */ + RK_MUXROUTE_SAME(0, RK_PB6, 1, 0x144, BIT(16 + 5) | BIT(5)), /* i2s-1 */ + RK_MUXROUTE_SAME(1, RK_PC6, 2, 0x144, BIT(16 + 6)), /* emmc-0 */ + RK_MUXROUTE_SAME(2, RK_PA4, 2, 0x144, BIT(16 + 6) | BIT(6)), /* emmc-1 */ }; static struct rockchip_mux_route_data rk3188_mux_route_data[] = { - { - /* non-iomuxed emmc/flash pins on flash-dqs */ - .bank_num = 0, - .pin = 24, - .func = 1, - .route_location = ROCKCHIP_ROUTE_GRF, - .route_offset = 0xa0, - .route_val = BIT(16 + 11), - }, { - /* non-iomuxed emmc/flash pins on emmc-clk */ - .bank_num = 0, - .pin = 24, - .func = 2, - .route_location = ROCKCHIP_ROUTE_GRF, - .route_offset = 0xa0, - .route_val = BIT(16 + 11) | BIT(11), - }, + RK_MUXROUTE_SAME(0, RK_PD0, 1, 0xa0, BIT(16 + 11)), /* non-iomuxed emmc/flash pins on flash-dqs */ + RK_MUXROUTE_SAME(0, RK_PD0, 2, 0xa0, BIT(16 + 11) | BIT(11)), /* non-iomuxed emmc/flash pins on emmc-clk */ }; static struct rockchip_mux_route_data rk3228_mux_route_data[] = { - { - /* pwm0-0 */ - .bank_num = 0, - .pin = 26, - .func = 1, - .route_offset = 0x50, - .route_val = BIT(16), - }, { - /* pwm0-1 */ - .bank_num = 3, - .pin = 21, - .func = 1, - .route_offset = 0x50, - .route_val = BIT(16) | BIT(0), - }, { - /* pwm1-0 */ - .bank_num = 0, - .pin = 27, - .func = 1, - .route_offset = 0x50, - .route_val = BIT(16 + 1), - }, { - /* pwm1-1 */ - .bank_num = 0, - .pin = 30, - .func = 2, - .route_offset = 0x50, - .route_val = BIT(16 + 1) | BIT(1), - }, { - /* pwm2-0 */ - .bank_num = 0, - .pin = 28, - .func = 1, - .route_offset = 0x50, - .route_val = BIT(16 + 2), - }, { - /* pwm2-1 */ - .bank_num = 1, - .pin = 12, - .func = 2, - .route_offset = 0x50, - .route_val = BIT(16 + 2) | BIT(2), - }, { - /* pwm3-0 */ - .bank_num = 3, - .pin = 26, - .func = 1, - .route_offset = 0x50, - .route_val = BIT(16 + 3), - }, { - /* pwm3-1 */ - .bank_num = 1, - .pin = 11, - .func = 2, - .route_offset = 0x50, - .route_val = BIT(16 + 3) | BIT(3), - }, { - /* sdio-0_d0 */ - .bank_num = 1, - .pin = 1, - .func = 1, - .route_offset = 0x50, - .route_val = BIT(16 + 4), - }, { - /* sdio-1_d0 */ - .bank_num = 3, - .pin = 2, - .func = 1, - .route_offset = 0x50, - .route_val = BIT(16 + 4) | BIT(4), - }, { - /* spi-0_rx */ - .bank_num = 0, - .pin = 13, - .func = 2, - .route_offset = 0x50, - .route_val = BIT(16 + 5), - }, { - /* spi-1_rx */ - .bank_num = 2, - .pin = 0, - .func = 2, - .route_offset = 0x50, - .route_val = BIT(16 + 5) | BIT(5), - }, { - /* emmc-0_cmd */ - .bank_num = 1, - .pin = 22, - .func = 2, - .route_offset = 0x50, - .route_val = BIT(16 + 7), - }, { - /* emmc-1_cmd */ - .bank_num = 2, - .pin = 4, - .func = 2, - .route_offset = 0x50, - .route_val = BIT(16 + 7) | BIT(7), - }, { - /* uart2-0_rx */ - .bank_num = 1, - .pin = 19, - .func = 2, - .route_offset = 0x50, - .route_val = BIT(16 + 8), - }, { - /* uart2-1_rx */ - .bank_num = 1, - .pin = 10, - .func = 2, - .route_offset = 0x50, - .route_val = BIT(16 + 8) | BIT(8), - }, { - /* uart1-0_rx */ - .bank_num = 1, - .pin = 10, - .func = 1, - .route_offset = 0x50, - .route_val = BIT(16 + 11), - }, { - /* uart1-1_rx */ - .bank_num = 3, - .pin = 13, - .func = 1, - .route_offset = 0x50, - .route_val = BIT(16 + 11) | BIT(11), - }, + RK_MUXROUTE_SAME(0, RK_PD2, 1, 0x50, BIT(16)), /* pwm0-0 */ + RK_MUXROUTE_SAME(3, RK_PC5, 1, 0x50, BIT(16) | BIT(0)), /* pwm0-1 */ + RK_MUXROUTE_SAME(0, RK_PD3, 1, 0x50, BIT(16 + 1)), /* pwm1-0 */ + RK_MUXROUTE_SAME(0, RK_PD6, 2, 0x50, BIT(16 + 1) | BIT(1)), /* pwm1-1 */ + RK_MUXROUTE_SAME(0, RK_PD4, 1, 0x50, BIT(16 + 2)), /* pwm2-0 */ + RK_MUXROUTE_SAME(1, RK_PB4, 2, 0x50, BIT(16 + 2) | BIT(2)), /* pwm2-1 */ + RK_MUXROUTE_SAME(3, RK_PD2, 1, 0x50, BIT(16 + 3)), /* pwm3-0 */ + RK_MUXROUTE_SAME(1, RK_PB3, 2, 0x50, BIT(16 + 3) | BIT(3)), /* pwm3-1 */ + RK_MUXROUTE_SAME(1, RK_PA1, 1, 0x50, BIT(16 + 4)), /* sdio-0_d0 */ + RK_MUXROUTE_SAME(3, RK_PA2, 1, 0x50, BIT(16 + 4) | BIT(4)), /* sdio-1_d0 */ + RK_MUXROUTE_SAME(0, RK_PB5, 2, 0x50, BIT(16 + 5)), /* spi-0_rx */ + RK_MUXROUTE_SAME(2, RK_PA0, 2, 0x50, BIT(16 + 5) | BIT(5)), /* spi-1_rx */ + RK_MUXROUTE_SAME(1, RK_PC6, 2, 0x50, BIT(16 + 7)), /* emmc-0_cmd */ + RK_MUXROUTE_SAME(2, RK_PA4, 2, 0x50, BIT(16 + 7) | BIT(7)), /* emmc-1_cmd */ + RK_MUXROUTE_SAME(1, RK_PC3, 2, 0x50, BIT(16 + 8)), /* uart2-0_rx */ + RK_MUXROUTE_SAME(1, RK_PB2, 2, 0x50, BIT(16 + 8) | BIT(8)), /* uart2-1_rx */ + RK_MUXROUTE_SAME(1, RK_PB2, 1, 0x50, BIT(16 + 11)), /* uart1-0_rx */ + RK_MUXROUTE_SAME(3, RK_PB5, 1, 0x50, BIT(16 + 11) | BIT(11)), /* uart1-1_rx */ }; static struct rockchip_mux_route_data rk3288_mux_route_data[] = { - { - /* edphdmi_cecinoutt1 */ - .bank_num = 7, - .pin = 16, - .func = 2, - .route_offset = 0x264, - .route_val = BIT(16 + 12) | BIT(12), - }, { - /* edphdmi_cecinout */ - .bank_num = 7, - .pin = 23, - .func = 4, - .route_offset = 0x264, - .route_val = BIT(16 + 12), - }, + RK_MUXROUTE_SAME(7, RK_PC0, 2, 0x264, BIT(16 + 12) | BIT(12)), /* edphdmi_cecinoutt1 */ + RK_MUXROUTE_SAME(7, RK_PC7, 4, 0x264, BIT(16 + 12)), /* edphdmi_cecinout */ }; static struct rockchip_mux_route_data rk3308_mux_route_data[] = { - { - /* rtc_clk */ - .bank_num = 0, - .pin = 19, - .func = 1, - .route_offset = 0x314, - .route_val = BIT(16 + 0) | BIT(0), - }, { - /* uart2_rxm0 */ - .bank_num = 1, - .pin = 22, - .func = 2, - .route_offset = 0x314, - .route_val = BIT(16 + 2) | BIT(16 + 3), - }, { - /* uart2_rxm1 */ - .bank_num = 4, - .pin = 26, - .func = 2, - .route_offset = 0x314, - .route_val = BIT(16 + 2) | BIT(16 + 3) | BIT(2), - }, { - /* i2c3_sdam0 */ - .bank_num = 0, - .pin = 15, - .func = 2, - .route_offset = 0x608, - .route_val = BIT(16 + 8) | BIT(16 + 9), - }, { - /* i2c3_sdam1 */ - .bank_num = 3, - .pin = 12, - .func = 2, - .route_offset = 0x608, - .route_val = BIT(16 + 8) | BIT(16 + 9) | BIT(8), - }, { - /* i2c3_sdam2 */ - .bank_num = 2, - .pin = 0, - .func = 3, - .route_offset = 0x608, - .route_val = BIT(16 + 8) | BIT(16 + 9) | BIT(9), - }, { - /* i2s-8ch-1-sclktxm0 */ - .bank_num = 1, - .pin = 3, - .func = 2, - .route_offset = 0x308, - .route_val = BIT(16 + 3), - }, { - /* i2s-8ch-1-sclkrxm0 */ - .bank_num = 1, - .pin = 4, - .func = 2, - .route_offset = 0x308, - .route_val = BIT(16 + 3), - }, { - /* i2s-8ch-1-sclktxm1 */ - .bank_num = 1, - .pin = 13, - .func = 2, - .route_offset = 0x308, - .route_val = BIT(16 + 3) | BIT(3), - }, { - /* i2s-8ch-1-sclkrxm1 */ - .bank_num = 1, - .pin = 14, - .func = 2, - .route_offset = 0x308, - .route_val = BIT(16 + 3) | BIT(3), - }, { - /* pdm-clkm0 */ - .bank_num = 1, - .pin = 4, - .func = 3, - .route_offset = 0x308, - .route_val = BIT(16 + 12) | BIT(16 + 13), - }, { - /* pdm-clkm1 */ - .bank_num = 1, - .pin = 14, - .func = 4, - .route_offset = 0x308, - .route_val = BIT(16 + 12) | BIT(16 + 13) | BIT(12), - }, { - /* pdm-clkm2 */ - .bank_num = 2, - .pin = 6, - .func = 2, - .route_offset = 0x308, - .route_val = BIT(16 + 12) | BIT(16 + 13) | BIT(13), - }, { - /* pdm-clkm-m2 */ - .bank_num = 2, - .pin = 4, - .func = 3, - .route_offset = 0x600, - .route_val = BIT(16 + 2) | BIT(2), - }, { - /* spi1_miso */ - .bank_num = 3, - .pin = 10, - .func = 3, - .route_offset = 0x314, - .route_val = BIT(16 + 9), - }, { - /* spi1_miso_m1 */ - .bank_num = 2, - .pin = 4, - .func = 2, - .route_offset = 0x314, - .route_val = BIT(16 + 9) | BIT(9), - }, { - /* owire_m0 */ - .bank_num = 0, - .pin = 11, - .func = 3, - .route_offset = 0x314, - .route_val = BIT(16 + 10) | BIT(16 + 11), - }, { - /* owire_m1 */ - .bank_num = 1, - .pin = 22, - .func = 7, - .route_offset = 0x314, - .route_val = BIT(16 + 10) | BIT(16 + 11) | BIT(10), - }, { - /* owire_m2 */ - .bank_num = 2, - .pin = 2, - .func = 5, - .route_offset = 0x314, - .route_val = BIT(16 + 10) | BIT(16 + 11) | BIT(11), - }, { - /* can_rxd_m0 */ - .bank_num = 0, - .pin = 11, - .func = 2, - .route_offset = 0x314, - .route_val = BIT(16 + 12) | BIT(16 + 13), - }, { - /* can_rxd_m1 */ - .bank_num = 1, - .pin = 22, - .func = 5, - .route_offset = 0x314, - .route_val = BIT(16 + 12) | BIT(16 + 13) | BIT(12), - }, { - /* can_rxd_m2 */ - .bank_num = 2, - .pin = 2, - .func = 4, - .route_offset = 0x314, - .route_val = BIT(16 + 12) | BIT(16 + 13) | BIT(13), - }, { - /* mac_rxd0_m0 */ - .bank_num = 1, - .pin = 20, - .func = 3, - .route_offset = 0x314, - .route_val = BIT(16 + 14), - }, { - /* mac_rxd0_m1 */ - .bank_num = 4, - .pin = 2, - .func = 2, - .route_offset = 0x314, - .route_val = BIT(16 + 14) | BIT(14), - }, { - /* uart3_rx */ - .bank_num = 3, - .pin = 12, - .func = 4, - .route_offset = 0x314, - .route_val = BIT(16 + 15), - }, { - /* uart3_rx_m1 */ - .bank_num = 0, - .pin = 17, - .func = 3, - .route_offset = 0x314, - .route_val = BIT(16 + 15) | BIT(15), - }, + RK_MUXROUTE_SAME(0, RK_PC3, 1, 0x314, BIT(16 + 0) | BIT(0)), /* rtc_clk */ + RK_MUXROUTE_SAME(1, RK_PC6, 2, 0x314, BIT(16 + 2) | BIT(16 + 3)), /* uart2_rxm0 */ + RK_MUXROUTE_SAME(4, RK_PD2, 2, 0x314, BIT(16 + 2) | BIT(16 + 3) | BIT(2)), /* uart2_rxm1 */ + RK_MUXROUTE_SAME(0, RK_PB7, 2, 0x608, BIT(16 + 8) | BIT(16 + 9)), /* i2c3_sdam0 */ + RK_MUXROUTE_SAME(3, RK_PB4, 2, 0x608, BIT(16 + 8) | BIT(16 + 9) | BIT(8)), /* i2c3_sdam1 */ + RK_MUXROUTE_SAME(2, RK_PA0, 3, 0x608, BIT(16 + 8) | BIT(16 + 9) | BIT(9)), /* i2c3_sdam2 */ + RK_MUXROUTE_SAME(1, RK_PA3, 2, 0x308, BIT(16 + 3)), /* i2s-8ch-1-sclktxm0 */ + RK_MUXROUTE_SAME(1, RK_PA4, 2, 0x308, BIT(16 + 3)), /* i2s-8ch-1-sclkrxm0 */ + RK_MUXROUTE_SAME(1, RK_PB5, 2, 0x308, BIT(16 + 3) | BIT(3)), /* i2s-8ch-1-sclktxm1 */ + RK_MUXROUTE_SAME(1, RK_PB6, 2, 0x308, BIT(16 + 3) | BIT(3)), /* i2s-8ch-1-sclkrxm1 */ + RK_MUXROUTE_SAME(1, RK_PA4, 3, 0x308, BIT(16 + 12) | BIT(16 + 13)), /* pdm-clkm0 */ + RK_MUXROUTE_SAME(1, RK_PB6, 4, 0x308, BIT(16 + 12) | BIT(16 + 13) | BIT(12)), /* pdm-clkm1 */ + RK_MUXROUTE_SAME(2, RK_PA6, 2, 0x308, BIT(16 + 12) | BIT(16 + 13) | BIT(13)), /* pdm-clkm2 */ + RK_MUXROUTE_SAME(2, RK_PA4, 3, 0x600, BIT(16 + 2) | BIT(2)), /* pdm-clkm-m2 */ + RK_MUXROUTE_SAME(3, RK_PB2, 3, 0x314, BIT(16 + 9)), /* spi1_miso */ + RK_MUXROUTE_SAME(2, RK_PA4, 2, 0x314, BIT(16 + 9) | BIT(9)), /* spi1_miso_m1 */ + RK_MUXROUTE_SAME(0, RK_PB3, 3, 0x314, BIT(16 + 10) | BIT(16 + 11)), /* owire_m0 */ + RK_MUXROUTE_SAME(1, RK_PC6, 7, 0x314, BIT(16 + 10) | BIT(16 + 11) | BIT(10)), /* owire_m1 */ + RK_MUXROUTE_SAME(2, RK_PA2, 5, 0x314, BIT(16 + 10) | BIT(16 + 11) | BIT(11)), /* owire_m2 */ + RK_MUXROUTE_SAME(0, RK_PB3, 2, 0x314, BIT(16 + 12) | BIT(16 + 13)), /* can_rxd_m0 */ + RK_MUXROUTE_SAME(1, RK_PC6, 5, 0x314, BIT(16 + 12) | BIT(16 + 13) | BIT(12)), /* can_rxd_m1 */ + RK_MUXROUTE_SAME(2, RK_PA2, 4, 0x314, BIT(16 + 12) | BIT(16 + 13) | BIT(13)), /* can_rxd_m2 */ + RK_MUXROUTE_SAME(1, RK_PC4, 3, 0x314, BIT(16 + 14)), /* mac_rxd0_m0 */ + RK_MUXROUTE_SAME(4, RK_PA2, 2, 0x314, BIT(16 + 14) | BIT(14)), /* mac_rxd0_m1 */ + RK_MUXROUTE_SAME(3, RK_PB4, 4, 0x314, BIT(16 + 15)), /* uart3_rx */ + RK_MUXROUTE_SAME(0, RK_PC1, 3, 0x314, BIT(16 + 15) | BIT(15)), /* uart3_rx_m1 */ }; static struct rockchip_mux_route_data rk3328_mux_route_data[] = { - { - /* uart2dbg_rxm0 */ - .bank_num = 1, - .pin = 1, - .func = 2, - .route_offset = 0x50, - .route_val = BIT(16) | BIT(16 + 1), - }, { - /* uart2dbg_rxm1 */ - .bank_num = 2, - .pin = 1, - .func = 1, - .route_offset = 0x50, - .route_val = BIT(16) | BIT(16 + 1) | BIT(0), - }, { - /* gmac-m1_rxd0 */ - .bank_num = 1, - .pin = 11, - .func = 2, - .route_offset = 0x50, - .route_val = BIT(16 + 2) | BIT(2), - }, { - /* gmac-m1-optimized_rxd3 */ - .bank_num = 1, - .pin = 14, - .func = 2, - .route_offset = 0x50, - .route_val = BIT(16 + 10) | BIT(10), - }, { - /* pdm_sdi0m0 */ - .bank_num = 2, - .pin = 19, - .func = 2, - .route_offset = 0x50, - .route_val = BIT(16 + 3), - }, { - /* pdm_sdi0m1 */ - .bank_num = 1, - .pin = 23, - .func = 3, - .route_offset = 0x50, - .route_val = BIT(16 + 3) | BIT(3), - }, { - /* spi_rxdm2 */ - .bank_num = 3, - .pin = 2, - .func = 4, - .route_offset = 0x50, - .route_val = BIT(16 + 4) | BIT(16 + 5) | BIT(5), - }, { - /* i2s2_sdim0 */ - .bank_num = 1, - .pin = 24, - .func = 1, - .route_offset = 0x50, - .route_val = BIT(16 + 6), - }, { - /* i2s2_sdim1 */ - .bank_num = 3, - .pin = 2, - .func = 6, - .route_offset = 0x50, - .route_val = BIT(16 + 6) | BIT(6), - }, { - /* card_iom1 */ - .bank_num = 2, - .pin = 22, - .func = 3, - .route_offset = 0x50, - .route_val = BIT(16 + 7) | BIT(7), - }, { - /* tsp_d5m1 */ - .bank_num = 2, - .pin = 16, - .func = 3, - .route_offset = 0x50, - .route_val = BIT(16 + 8) | BIT(8), - }, { - /* cif_data5m1 */ - .bank_num = 2, - .pin = 16, - .func = 4, - .route_offset = 0x50, - .route_val = BIT(16 + 9) | BIT(9), - }, + RK_MUXROUTE_SAME(1, RK_PA1, 2, 0x50, BIT(16) | BIT(16 + 1)), /* uart2dbg_rxm0 */ + RK_MUXROUTE_SAME(2, RK_PA1, 1, 0x50, BIT(16) | BIT(16 + 1) | BIT(0)), /* uart2dbg_rxm1 */ + RK_MUXROUTE_SAME(1, RK_PB3, 2, 0x50, BIT(16 + 2) | BIT(2)), /* gmac-m1_rxd0 */ + RK_MUXROUTE_SAME(1, RK_PB6, 2, 0x50, BIT(16 + 10) | BIT(10)), /* gmac-m1-optimized_rxd3 */ + RK_MUXROUTE_SAME(2, RK_PC3, 2, 0x50, BIT(16 + 3)), /* pdm_sdi0m0 */ + RK_MUXROUTE_SAME(1, RK_PC7, 3, 0x50, BIT(16 + 3) | BIT(3)), /* pdm_sdi0m1 */ + RK_MUXROUTE_SAME(3, RK_PA2, 4, 0x50, BIT(16 + 4) | BIT(16 + 5) | BIT(5)), /* spi_rxdm2 */ + RK_MUXROUTE_SAME(1, RK_PD0, 1, 0x50, BIT(16 + 6)), /* i2s2_sdim0 */ + RK_MUXROUTE_SAME(3, RK_PA2, 6, 0x50, BIT(16 + 6) | BIT(6)), /* i2s2_sdim1 */ + RK_MUXROUTE_SAME(2, RK_PC6, 3, 0x50, BIT(16 + 7) | BIT(7)), /* card_iom1 */ + RK_MUXROUTE_SAME(2, RK_PC0, 3, 0x50, BIT(16 + 8) | BIT(8)), /* tsp_d5m1 */ + RK_MUXROUTE_SAME(2, RK_PC0, 4, 0x50, BIT(16 + 9) | BIT(9)), /* cif_data5m1 */ }; static struct rockchip_mux_route_data rk3399_mux_route_data[] = { - { - /* uart2dbga_rx */ - .bank_num = 4, - .pin = 8, - .func = 2, - .route_offset = 0xe21c, - .route_val = BIT(16 + 10) | BIT(16 + 11), - }, { - /* uart2dbgb_rx */ - .bank_num = 4, - .pin = 16, - .func = 2, - .route_offset = 0xe21c, - .route_val = BIT(16 + 10) | BIT(16 + 11) | BIT(10), - }, { - /* uart2dbgc_rx */ - .bank_num = 4, - .pin = 19, - .func = 1, - .route_offset = 0xe21c, - .route_val = BIT(16 + 10) | BIT(16 + 11) | BIT(11), - }, { - /* pcie_clkreqn */ - .bank_num = 2, - .pin = 26, - .func = 2, - .route_offset = 0xe21c, - .route_val = BIT(16 + 14), - }, { - /* pcie_clkreqnb */ - .bank_num = 4, - .pin = 24, - .func = 1, - .route_offset = 0xe21c, - .route_val = BIT(16 + 14) | BIT(14), - }, + RK_MUXROUTE_SAME(4, RK_PB0, 2, 0xe21c, BIT(16 + 10) | BIT(16 + 11)), /* uart2dbga_rx */ + RK_MUXROUTE_SAME(4, RK_PC0, 2, 0xe21c, BIT(16 + 10) | BIT(16 + 11) | BIT(10)), /* uart2dbgb_rx */ + RK_MUXROUTE_SAME(4, RK_PC3, 1, 0xe21c, BIT(16 + 10) | BIT(16 + 11) | BIT(11)), /* uart2dbgc_rx */ + RK_MUXROUTE_SAME(2, RK_PD2, 2, 0xe21c, BIT(16 + 14)), /* pcie_clkreqn */ + RK_MUXROUTE_SAME(4, RK_PD0, 1, 0xe21c, BIT(16 + 14) | BIT(14)), /* pcie_clkreqnb */ +}; + +static struct rockchip_mux_route_data rk3568_mux_route_data[] = { + RK_MUXROUTE_PMU(0, RK_PB7, 1, 0x0110, WRITE_MASK_VAL(1, 0, 0)), /* PWM0 IO mux M0 */ + RK_MUXROUTE_PMU(0, RK_PC7, 2, 0x0110, WRITE_MASK_VAL(1, 0, 1)), /* PWM0 IO mux M1 */ + RK_MUXROUTE_PMU(0, RK_PC0, 1, 0x0110, WRITE_MASK_VAL(3, 2, 0)), /* PWM1 IO mux M0 */ + RK_MUXROUTE_PMU(0, RK_PB5, 4, 0x0110, WRITE_MASK_VAL(3, 2, 1)), /* PWM1 IO mux M1 */ + RK_MUXROUTE_PMU(0, RK_PC1, 1, 0x0110, WRITE_MASK_VAL(5, 4, 0)), /* PWM2 IO mux M0 */ + RK_MUXROUTE_PMU(0, RK_PB6, 4, 0x0110, WRITE_MASK_VAL(5, 4, 1)), /* PWM2 IO mux M1 */ + RK_MUXROUTE_GRF(0, RK_PB3, 2, 0x0300, WRITE_MASK_VAL(0, 0, 0)), /* CAN0 IO mux M0 */ + RK_MUXROUTE_GRF(2, RK_PA1, 4, 0x0300, WRITE_MASK_VAL(0, 0, 1)), /* CAN0 IO mux M1 */ + RK_MUXROUTE_GRF(1, RK_PA1, 3, 0x0300, WRITE_MASK_VAL(2, 2, 0)), /* CAN1 IO mux M0 */ + RK_MUXROUTE_GRF(4, RK_PC3, 3, 0x0300, WRITE_MASK_VAL(2, 2, 1)), /* CAN1 IO mux M1 */ + RK_MUXROUTE_GRF(4, RK_PB5, 3, 0x0300, WRITE_MASK_VAL(4, 4, 0)), /* CAN2 IO mux M0 */ + RK_MUXROUTE_GRF(2, RK_PB2, 4, 0x0300, WRITE_MASK_VAL(4, 4, 1)), /* CAN2 IO mux M1 */ + RK_MUXROUTE_GRF(4, RK_PC4, 1, 0x0300, WRITE_MASK_VAL(6, 6, 0)), /* HPDIN IO mux M0 */ + RK_MUXROUTE_GRF(0, RK_PC2, 2, 0x0300, WRITE_MASK_VAL(6, 6, 1)), /* HPDIN IO mux M1 */ + RK_MUXROUTE_GRF(3, RK_PB1, 3, 0x0300, WRITE_MASK_VAL(8, 8, 0)), /* GMAC1 IO mux M0 */ + RK_MUXROUTE_GRF(4, RK_PA7, 3, 0x0300, WRITE_MASK_VAL(8, 8, 1)), /* GMAC1 IO mux M1 */ + RK_MUXROUTE_GRF(4, RK_PD1, 1, 0x0300, WRITE_MASK_VAL(10, 10, 0)), /* HDMITX IO mux M0 */ + RK_MUXROUTE_GRF(0, RK_PC7, 1, 0x0300, WRITE_MASK_VAL(10, 10, 1)), /* HDMITX IO mux M1 */ + RK_MUXROUTE_GRF(0, RK_PB6, 1, 0x0300, WRITE_MASK_VAL(14, 14, 0)), /* I2C2 IO mux M0 */ + RK_MUXROUTE_GRF(4, RK_PB4, 1, 0x0300, WRITE_MASK_VAL(14, 14, 1)), /* I2C2 IO mux M1 */ + RK_MUXROUTE_GRF(1, RK_PA0, 1, 0x0304, WRITE_MASK_VAL(0, 0, 0)), /* I2C3 IO mux M0 */ + RK_MUXROUTE_GRF(3, RK_PB6, 4, 0x0304, WRITE_MASK_VAL(0, 0, 1)), /* I2C3 IO mux M1 */ + RK_MUXROUTE_GRF(4, RK_PB2, 1, 0x0304, WRITE_MASK_VAL(2, 2, 0)), /* I2C4 IO mux M0 */ + RK_MUXROUTE_GRF(2, RK_PB1, 2, 0x0304, WRITE_MASK_VAL(2, 2, 1)), /* I2C4 IO mux M1 */ + RK_MUXROUTE_GRF(3, RK_PB4, 4, 0x0304, WRITE_MASK_VAL(4, 4, 0)), /* I2C5 IO mux M0 */ + RK_MUXROUTE_GRF(4, RK_PD0, 2, 0x0304, WRITE_MASK_VAL(4, 4, 1)), /* I2C5 IO mux M1 */ + RK_MUXROUTE_GRF(3, RK_PB1, 5, 0x0304, WRITE_MASK_VAL(14, 14, 0)), /* PWM8 IO mux M0 */ + RK_MUXROUTE_GRF(1, RK_PD5, 4, 0x0304, WRITE_MASK_VAL(14, 14, 1)), /* PWM8 IO mux M1 */ + RK_MUXROUTE_GRF(3, RK_PB2, 5, 0x0308, WRITE_MASK_VAL(0, 0, 0)), /* PWM9 IO mux M0 */ + RK_MUXROUTE_GRF(1, RK_PD6, 4, 0x0308, WRITE_MASK_VAL(0, 0, 1)), /* PWM9 IO mux M1 */ + RK_MUXROUTE_GRF(3, RK_PB5, 5, 0x0308, WRITE_MASK_VAL(2, 2, 0)), /* PWM10 IO mux M0 */ + RK_MUXROUTE_GRF(2, RK_PA1, 2, 0x0308, WRITE_MASK_VAL(2, 2, 1)), /* PWM10 IO mux M1 */ + RK_MUXROUTE_GRF(3, RK_PB6, 5, 0x0308, WRITE_MASK_VAL(4, 4, 0)), /* PWM11 IO mux M0 */ + RK_MUXROUTE_GRF(4, RK_PC0, 3, 0x0308, WRITE_MASK_VAL(4, 4, 1)), /* PWM11 IO mux M1 */ + RK_MUXROUTE_GRF(3, RK_PB7, 2, 0x0308, WRITE_MASK_VAL(6, 6, 0)), /* PWM12 IO mux M0 */ + RK_MUXROUTE_GRF(4, RK_PC5, 1, 0x0308, WRITE_MASK_VAL(6, 6, 1)), /* PWM12 IO mux M1 */ + RK_MUXROUTE_GRF(3, RK_PC0, 2, 0x0308, WRITE_MASK_VAL(8, 8, 0)), /* PWM13 IO mux M0 */ + RK_MUXROUTE_GRF(4, RK_PC6, 1, 0x0308, WRITE_MASK_VAL(8, 8, 1)), /* PWM13 IO mux M1 */ + RK_MUXROUTE_GRF(3, RK_PC4, 1, 0x0308, WRITE_MASK_VAL(10, 10, 0)), /* PWM14 IO mux M0 */ + RK_MUXROUTE_GRF(4, RK_PC2, 1, 0x0308, WRITE_MASK_VAL(10, 10, 1)), /* PWM14 IO mux M1 */ + RK_MUXROUTE_GRF(3, RK_PC5, 1, 0x0308, WRITE_MASK_VAL(12, 12, 0)), /* PWM15 IO mux M0 */ + RK_MUXROUTE_GRF(4, RK_PC3, 1, 0x0308, WRITE_MASK_VAL(12, 12, 1)), /* PWM15 IO mux M1 */ + RK_MUXROUTE_GRF(3, RK_PD2, 3, 0x0308, WRITE_MASK_VAL(14, 14, 0)), /* SDMMC2 IO mux M0 */ + RK_MUXROUTE_GRF(3, RK_PA5, 5, 0x0308, WRITE_MASK_VAL(14, 14, 1)), /* SDMMC2 IO mux M1 */ + RK_MUXROUTE_GRF(0, RK_PB5, 2, 0x030c, WRITE_MASK_VAL(0, 0, 0)), /* SPI0 IO mux M0 */ + RK_MUXROUTE_GRF(2, RK_PD3, 3, 0x030c, WRITE_MASK_VAL(0, 0, 1)), /* SPI0 IO mux M1 */ + RK_MUXROUTE_GRF(2, RK_PB5, 3, 0x030c, WRITE_MASK_VAL(2, 2, 0)), /* SPI1 IO mux M0 */ + RK_MUXROUTE_GRF(3, RK_PC3, 3, 0x030c, WRITE_MASK_VAL(2, 2, 1)), /* SPI1 IO mux M1 */ + RK_MUXROUTE_GRF(2, RK_PC1, 4, 0x030c, WRITE_MASK_VAL(4, 4, 0)), /* SPI2 IO mux M0 */ + RK_MUXROUTE_GRF(3, RK_PA0, 3, 0x030c, WRITE_MASK_VAL(4, 4, 1)), /* SPI2 IO mux M1 */ + RK_MUXROUTE_GRF(4, RK_PB3, 4, 0x030c, WRITE_MASK_VAL(6, 6, 0)), /* SPI3 IO mux M0 */ + RK_MUXROUTE_GRF(4, RK_PC2, 2, 0x030c, WRITE_MASK_VAL(6, 6, 1)), /* SPI3 IO mux M1 */ + RK_MUXROUTE_GRF(2, RK_PB4, 2, 0x030c, WRITE_MASK_VAL(8, 8, 0)), /* UART1 IO mux M0 */ + RK_MUXROUTE_GRF(3, RK_PD6, 4, 0x030c, WRITE_MASK_VAL(8, 8, 1)), /* UART1 IO mux M1 */ + RK_MUXROUTE_GRF(0, RK_PD1, 1, 0x030c, WRITE_MASK_VAL(10, 10, 0)), /* UART2 IO mux M0 */ + RK_MUXROUTE_GRF(1, RK_PD5, 2, 0x030c, WRITE_MASK_VAL(10, 10, 1)), /* UART2 IO mux M1 */ + RK_MUXROUTE_GRF(1, RK_PA1, 2, 0x030c, WRITE_MASK_VAL(12, 12, 0)), /* UART3 IO mux M0 */ + RK_MUXROUTE_GRF(3, RK_PB7, 4, 0x030c, WRITE_MASK_VAL(12, 12, 1)), /* UART3 IO mux M1 */ + RK_MUXROUTE_GRF(1, RK_PA6, 2, 0x030c, WRITE_MASK_VAL(14, 14, 0)), /* UART4 IO mux M0 */ + RK_MUXROUTE_GRF(3, RK_PB2, 4, 0x030c, WRITE_MASK_VAL(14, 14, 1)), /* UART4 IO mux M1 */ + RK_MUXROUTE_GRF(2, RK_PA2, 3, 0x0310, WRITE_MASK_VAL(0, 0, 0)), /* UART5 IO mux M0 */ + RK_MUXROUTE_GRF(3, RK_PC2, 4, 0x0310, WRITE_MASK_VAL(0, 0, 1)), /* UART5 IO mux M1 */ + RK_MUXROUTE_GRF(2, RK_PA4, 3, 0x0310, WRITE_MASK_VAL(2, 2, 0)), /* UART6 IO mux M0 */ + RK_MUXROUTE_GRF(1, RK_PD5, 3, 0x0310, WRITE_MASK_VAL(2, 2, 1)), /* UART6 IO mux M1 */ + RK_MUXROUTE_GRF(2, RK_PA6, 3, 0x0310, WRITE_MASK_VAL(5, 4, 0)), /* UART7 IO mux M0 */ + RK_MUXROUTE_GRF(3, RK_PC4, 4, 0x0310, WRITE_MASK_VAL(5, 4, 1)), /* UART7 IO mux M1 */ + RK_MUXROUTE_GRF(4, RK_PA2, 4, 0x0310, WRITE_MASK_VAL(5, 4, 2)), /* UART7 IO mux M2 */ + RK_MUXROUTE_GRF(2, RK_PC5, 3, 0x0310, WRITE_MASK_VAL(6, 6, 0)), /* UART8 IO mux M0 */ + RK_MUXROUTE_GRF(2, RK_PD7, 4, 0x0310, WRITE_MASK_VAL(6, 6, 1)), /* UART8 IO mux M1 */ + RK_MUXROUTE_GRF(2, RK_PB0, 3, 0x0310, WRITE_MASK_VAL(9, 8, 0)), /* UART9 IO mux M0 */ + RK_MUXROUTE_GRF(4, RK_PC5, 4, 0x0310, WRITE_MASK_VAL(9, 8, 1)), /* UART9 IO mux M1 */ + RK_MUXROUTE_GRF(4, RK_PA4, 4, 0x0310, WRITE_MASK_VAL(9, 8, 2)), /* UART9 IO mux M2 */ + RK_MUXROUTE_GRF(1, RK_PA2, 1, 0x0310, WRITE_MASK_VAL(11, 10, 0)), /* I2S1 IO mux M0 */ + RK_MUXROUTE_GRF(3, RK_PC6, 4, 0x0310, WRITE_MASK_VAL(11, 10, 1)), /* I2S1 IO mux M1 */ + RK_MUXROUTE_GRF(2, RK_PD0, 5, 0x0310, WRITE_MASK_VAL(11, 10, 2)), /* I2S1 IO mux M2 */ + RK_MUXROUTE_GRF(2, RK_PC1, 1, 0x0310, WRITE_MASK_VAL(12, 12, 0)), /* I2S2 IO mux M0 */ + RK_MUXROUTE_GRF(4, RK_PB6, 5, 0x0310, WRITE_MASK_VAL(12, 12, 1)), /* I2S2 IO mux M1 */ + RK_MUXROUTE_GRF(3, RK_PA2, 4, 0x0310, WRITE_MASK_VAL(14, 14, 0)), /* I2S3 IO mux M0 */ + RK_MUXROUTE_GRF(4, RK_PC2, 5, 0x0310, WRITE_MASK_VAL(14, 14, 1)), /* I2S3 IO mux M1 */ + RK_MUXROUTE_GRF(1, RK_PA4, 3, 0x0314, WRITE_MASK_VAL(1, 0, 0)), /* PDM IO mux M0 */ + RK_MUXROUTE_GRF(1, RK_PA6, 3, 0x0314, WRITE_MASK_VAL(1, 0, 0)), /* PDM IO mux M0 */ + RK_MUXROUTE_GRF(3, RK_PD6, 5, 0x0314, WRITE_MASK_VAL(1, 0, 1)), /* PDM IO mux M1 */ + RK_MUXROUTE_GRF(4, RK_PA0, 4, 0x0314, WRITE_MASK_VAL(1, 0, 1)), /* PDM IO mux M1 */ + RK_MUXROUTE_GRF(3, RK_PC4, 5, 0x0314, WRITE_MASK_VAL(1, 0, 2)), /* PDM IO mux M2 */ + RK_MUXROUTE_GRF(0, RK_PA5, 3, 0x0314, WRITE_MASK_VAL(3, 2, 0)), /* PCIE20 IO mux M0 */ + RK_MUXROUTE_GRF(2, RK_PD0, 4, 0x0314, WRITE_MASK_VAL(3, 2, 1)), /* PCIE20 IO mux M1 */ + RK_MUXROUTE_GRF(1, RK_PB0, 4, 0x0314, WRITE_MASK_VAL(3, 2, 2)), /* PCIE20 IO mux M2 */ + RK_MUXROUTE_GRF(0, RK_PA4, 3, 0x0314, WRITE_MASK_VAL(5, 4, 0)), /* PCIE30X1 IO mux M0 */ + RK_MUXROUTE_GRF(2, RK_PD2, 4, 0x0314, WRITE_MASK_VAL(5, 4, 1)), /* PCIE30X1 IO mux M1 */ + RK_MUXROUTE_GRF(1, RK_PA5, 4, 0x0314, WRITE_MASK_VAL(5, 4, 2)), /* PCIE30X1 IO mux M2 */ + RK_MUXROUTE_GRF(0, RK_PA6, 2, 0x0314, WRITE_MASK_VAL(7, 6, 0)), /* PCIE30X2 IO mux M0 */ + RK_MUXROUTE_GRF(2, RK_PD4, 4, 0x0314, WRITE_MASK_VAL(7, 6, 1)), /* PCIE30X2 IO mux M1 */ + RK_MUXROUTE_GRF(4, RK_PC2, 4, 0x0314, WRITE_MASK_VAL(7, 6, 2)), /* PCIE30X2 IO mux M2 */ }; static bool rockchip_get_mux_route(struct rockchip_pin_bank *bank, int pin, @@ -2117,6 +1751,68 @@ static void rk3399_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank, *bit = (pin_num % 8) * 2; } +#define RK3568_PULL_PMU_OFFSET 0x20 +#define RK3568_PULL_GRF_OFFSET 0x80 +#define RK3568_PULL_BITS_PER_PIN 2 +#define RK3568_PULL_PINS_PER_REG 8 +#define RK3568_PULL_BANK_STRIDE 0x10 + +static void rk3568_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, + int pin_num, struct regmap **regmap, + int *reg, u8 *bit) +{ + struct rockchip_pinctrl *info = bank->drvdata; + + if (bank->bank_num == 0) { + *regmap = info->regmap_pmu; + *reg = RK3568_PULL_PMU_OFFSET; + *reg += bank->bank_num * RK3568_PULL_BANK_STRIDE; + *reg += ((pin_num / RK3568_PULL_PINS_PER_REG) * 4); + + *bit = pin_num % RK3568_PULL_PINS_PER_REG; + *bit *= RK3568_PULL_BITS_PER_PIN; + } else { + *regmap = info->regmap_base; + *reg = RK3568_PULL_GRF_OFFSET; + *reg += (bank->bank_num - 1) * RK3568_PULL_BANK_STRIDE; + *reg += ((pin_num / RK3568_PULL_PINS_PER_REG) * 4); + + *bit = (pin_num % RK3568_PULL_PINS_PER_REG); + *bit *= RK3568_PULL_BITS_PER_PIN; + } +} + +#define RK3568_DRV_PMU_OFFSET 0x70 +#define RK3568_DRV_GRF_OFFSET 0x200 +#define RK3568_DRV_BITS_PER_PIN 8 +#define RK3568_DRV_PINS_PER_REG 2 +#define RK3568_DRV_BANK_STRIDE 0x40 + +static void rk3568_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank, + int pin_num, struct regmap **regmap, + int *reg, u8 *bit) +{ + struct rockchip_pinctrl *info = bank->drvdata; + + /* The first 32 pins of the first bank are located in PMU */ + if (bank->bank_num == 0) { + *regmap = info->regmap_pmu; + *reg = RK3568_DRV_PMU_OFFSET; + *reg += ((pin_num / RK3568_DRV_PINS_PER_REG) * 4); + + *bit = pin_num % RK3568_DRV_PINS_PER_REG; + *bit *= RK3568_DRV_BITS_PER_PIN; + } else { + *regmap = info->regmap_base; + *reg = RK3568_DRV_GRF_OFFSET; + *reg += (bank->bank_num - 1) * RK3568_DRV_BANK_STRIDE; + *reg += ((pin_num / RK3568_DRV_PINS_PER_REG) * 4); + + *bit = (pin_num % RK3568_DRV_PINS_PER_REG); + *bit *= RK3568_DRV_BITS_PER_PIN; + } +} + static int rockchip_perpin_drv_list[DRV_TYPE_MAX][8] = { { 2, 4, 8, 12, -1, -1, -1, -1 }, { 3, 6, 9, 12, -1, -1, -1, -1 }, @@ -2217,6 +1913,11 @@ static int rockchip_set_drive_perpin(struct rockchip_pin_bank *bank, bank->bank_num, pin_num, strength); ctrl->drv_calc_reg(bank, pin_num, ®map, ®, &bit); + if (ctrl->type == RK3568) { + rmask_bits = RK3568_DRV_BITS_PER_PIN; + ret = (1 << (strength + 1)) - 1; + goto config; + } ret = -EINVAL; for (i = 0; i < ARRAY_SIZE(rockchip_perpin_drv_list[drv_type]); i++) { @@ -2286,6 +1987,7 @@ static int rockchip_set_drive_perpin(struct rockchip_pin_bank *bank, return -EINVAL; } +config: /* enable the write to the equivalent lower bits */ data = ((1 << rmask_bits) - 1) << (bit + 16); rmask = data | (data >> 16); @@ -2343,9 +2045,18 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num) case RK3308: case RK3368: case RK3399: + case RK3568: pull_type = bank->pull_type[pin_num / 8]; data >>= bit; data &= (1 << RK3188_PULL_BITS_PER_PIN) - 1; + /* + * In the TRM, pull-up being 1 for everything except the GPIO0_D3-D6, + * where that pull up value becomes 3. + */ + if (ctrl->type == RK3568 && bank->bank_num == 0 && pin_num >= 27 && pin_num <= 30) { + if (data == 3) + data = 1; + } return rockchip_pull_list[pull_type][data]; default: @@ -2388,6 +2099,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank, case RK3308: case RK3368: case RK3399: + case RK3568: pull_type = bank->pull_type[pin_num / 8]; ret = -EINVAL; for (i = 0; i < ARRAY_SIZE(rockchip_pull_list[pull_type]); @@ -2397,6 +2109,14 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank, break; } } + /* + * In the TRM, pull-up being 1 for everything except the GPIO0_D3-D6, + * where that pull up value becomes 3. + */ + if (ctrl->type == RK3568 && bank->bank_num == 0 && pin_num >= 27 && pin_num <= 30) { + if (ret == 1) + ret = 3; + } if (ret < 0) { dev_err(info->dev, "unsupported pull setting %d\n", @@ -2441,6 +2161,35 @@ static int rk3328_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank, return 0; } +#define RK3568_SCHMITT_BITS_PER_PIN 2 +#define RK3568_SCHMITT_PINS_PER_REG 8 +#define RK3568_SCHMITT_BANK_STRIDE 0x10 +#define RK3568_SCHMITT_GRF_OFFSET 0xc0 +#define RK3568_SCHMITT_PMUGRF_OFFSET 0x30 + +static int rk3568_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank, + int pin_num, + struct regmap **regmap, + int *reg, u8 *bit) +{ + struct rockchip_pinctrl *info = bank->drvdata; + + if (bank->bank_num == 0) { + *regmap = info->regmap_pmu; + *reg = RK3568_SCHMITT_PMUGRF_OFFSET; + } else { + *regmap = info->regmap_base; + *reg = RK3568_SCHMITT_GRF_OFFSET; + *reg += (bank->bank_num - 1) * RK3568_SCHMITT_BANK_STRIDE; + } + + *reg += ((pin_num / RK3568_SCHMITT_PINS_PER_REG) * 4); + *bit = pin_num % RK3568_SCHMITT_PINS_PER_REG; + *bit *= RK3568_SCHMITT_BITS_PER_PIN; + + return 0; +} + static int rockchip_get_schmitt(struct rockchip_pin_bank *bank, int pin_num) { struct rockchip_pinctrl *info = bank->drvdata; @@ -2459,6 +2208,13 @@ static int rockchip_get_schmitt(struct rockchip_pin_bank *bank, int pin_num) return ret; data >>= bit; + switch (ctrl->type) { + case RK3568: + return data & ((1 << RK3568_SCHMITT_BITS_PER_PIN) - 1); + default: + break; + } + return data & 0x1; } @@ -2480,8 +2236,17 @@ static int rockchip_set_schmitt(struct rockchip_pin_bank *bank, return ret; /* enable the write to the equivalent lower bits */ - data = BIT(bit + 16) | (enable << bit); - rmask = BIT(bit + 16) | BIT(bit); + switch (ctrl->type) { + case RK3568: + data = ((1 << RK3568_SCHMITT_BITS_PER_PIN) - 1) << (bit + 16); + rmask = data | (data >> 16); + data |= ((enable ? 0x2 : 0x1) << bit); + break; + default: + data = BIT(bit + 16) | (enable << bit); + rmask = BIT(bit + 16) | BIT(bit); + break; + } return regmap_update_bits(regmap, reg, rmask, data); } @@ -2655,6 +2420,7 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl, case RK3308: case RK3368: case RK3399: + case RK3568: return (pull != PIN_CONFIG_BIAS_PULL_PIN_DEFAULT); } @@ -2893,6 +2659,7 @@ static int rockchip_pinctrl_parse_groups(struct device_node *np, np_config = of_find_node_by_phandle(be32_to_cpup(phandle)); ret = pinconf_generic_parse_dt_config(np_config, NULL, &grp->data[j].configs, &grp->data[j].nconfigs); + of_node_put(np_config); if (ret) return ret; } @@ -4230,6 +3997,45 @@ static struct rockchip_pin_ctrl rk3399_pin_ctrl = { .drv_calc_reg = rk3399_calc_drv_reg_and_bit, }; +static struct rockchip_pin_bank rk3568_pin_banks[] = { + PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", IOMUX_SOURCE_PMU | IOMUX_WIDTH_4BIT, + IOMUX_SOURCE_PMU | IOMUX_WIDTH_4BIT, + IOMUX_SOURCE_PMU | IOMUX_WIDTH_4BIT, + IOMUX_SOURCE_PMU | IOMUX_WIDTH_4BIT), + PIN_BANK_IOMUX_FLAGS(1, 32, "gpio1", IOMUX_WIDTH_4BIT, + IOMUX_WIDTH_4BIT, + IOMUX_WIDTH_4BIT, + IOMUX_WIDTH_4BIT), + PIN_BANK_IOMUX_FLAGS(2, 32, "gpio2", IOMUX_WIDTH_4BIT, + IOMUX_WIDTH_4BIT, + IOMUX_WIDTH_4BIT, + IOMUX_WIDTH_4BIT), + PIN_BANK_IOMUX_FLAGS(3, 32, "gpio3", IOMUX_WIDTH_4BIT, + IOMUX_WIDTH_4BIT, + IOMUX_WIDTH_4BIT, + IOMUX_WIDTH_4BIT), + PIN_BANK_IOMUX_FLAGS(4, 32, "gpio4", IOMUX_WIDTH_4BIT, + IOMUX_WIDTH_4BIT, + IOMUX_WIDTH_4BIT, + IOMUX_WIDTH_4BIT), +}; + +static struct rockchip_pin_ctrl rk3568_pin_ctrl = { + .pin_banks = rk3568_pin_banks, + .nr_banks = ARRAY_SIZE(rk3568_pin_banks), + .label = "RK3568-GPIO", + .type = RK3568, + .grf_mux_offset = 0x0, + .pmu_mux_offset = 0x0, + .grf_drv_offset = 0x0200, + .pmu_drv_offset = 0x0070, + .iomux_routes = rk3568_mux_route_data, + .niomux_routes = ARRAY_SIZE(rk3568_mux_route_data), + .pull_calc_reg = rk3568_calc_pull_reg_and_bit, + .drv_calc_reg = rk3568_calc_drv_reg_and_bit, + .schmitt_calc_reg = rk3568_calc_schmitt_reg_and_bit, +}; + static const struct of_device_id rockchip_pinctrl_dt_match[] = { { .compatible = "rockchip,px30-pinctrl", .data = &px30_pin_ctrl }, @@ -4259,6 +4065,8 @@ static const struct of_device_id rockchip_pinctrl_dt_match[] = { .data = &rk3368_pin_ctrl }, { .compatible = "rockchip,rk3399-pinctrl", .data = &rk3399_pin_ctrl }, + { .compatible = "rockchip,rk3568-pinctrl", + .data = &rk3568_pin_ctrl }, {}, }; diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index d139cd9e6d1306996419e93a08ee560f73c126c0..22e471933b373ec03e182cad9226e5fda364b373 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c @@ -372,6 +372,8 @@ static int pcs_set_mux(struct pinctrl_dev *pctldev, unsigned fselector, if (!pcs->fmask) return 0; function = pinmux_generic_get_function(pctldev, fselector); + if (!function) + return -EINVAL; func = function->data; if (!func) return -EINVAL; diff --git a/drivers/pinctrl/qcom/pinctrl-msm8976.c b/drivers/pinctrl/qcom/pinctrl-msm8976.c index ec43edf9b660a2c5eb99356e52c1ff8efc97b786..e11d8458471905280774863f842a294ee255a32a 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm8976.c +++ b/drivers/pinctrl/qcom/pinctrl-msm8976.c @@ -733,7 +733,7 @@ static const char * const codec_int2_groups[] = { "gpio74", }; static const char * const wcss_bt_groups[] = { - "gpio39", "gpio47", "gpio88", + "gpio39", "gpio47", "gpio48", }; static const char * const sdc3_groups[] = { "gpio39", "gpio40", "gpio41", @@ -958,9 +958,9 @@ static const struct msm_pingroup msm8976_groups[] = { PINGROUP(37, NA, NA, NA, qdss_tracedata_b, NA, NA, NA, NA, NA), PINGROUP(38, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b, NA), PINGROUP(39, wcss_bt, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA), - PINGROUP(40, wcss_wlan, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA), - PINGROUP(41, wcss_wlan, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA), - PINGROUP(42, wcss_wlan, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA), + PINGROUP(40, wcss_wlan2, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA), + PINGROUP(41, wcss_wlan1, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA), + PINGROUP(42, wcss_wlan0, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA), PINGROUP(43, wcss_wlan, sdc3, NA, NA, qdss_tracedata_a, NA, NA, NA, NA), PINGROUP(44, wcss_wlan, sdc3, NA, NA, NA, NA, NA, NA, NA), PINGROUP(45, wcss_fm, NA, qdss_tracectl_a, NA, NA, NA, NA, NA, NA), diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c index 60406f1f8337ee05daf15574deae530cbd47e916..2d852f15cc501d4c7ebb556fbefcaa7999b4db42 100644 --- a/drivers/pinctrl/stm32/pinctrl-stm32.c +++ b/drivers/pinctrl/stm32/pinctrl-stm32.c @@ -1338,6 +1338,7 @@ static struct irq_domain *stm32_pctrl_get_irq_domain(struct device_node *np) return ERR_PTR(-ENXIO); domain = irq_find_host(parent); + of_node_put(parent); if (!domain) /* domain not registered yet */ return ERR_PTR(-EPROBE_DEFER); diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index a1858689d6e10b499ddbae04cc4f92b49a9999f9..84c5b922f245e0a44c1f2f14d5beba7c0a99897b 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -1195,7 +1195,8 @@ config I2C_MULTI_INSTANTIATE config MLX_PLATFORM tristate "Mellanox Technologies platform support" - depends on I2C && REGMAP + depends on I2C + select REGMAP help This option enables system support for the Mellanox Technologies platform. The Mellanox systems provide data center networking diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index 949ddeb673bc564e4bc88b4d45535dc0a90f04cc..74637bd0433e6df7931cec83a5916a3ec448fa5b 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -478,6 +478,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = { { KE_KEY, 0x30, { KEY_VOLUMEUP } }, { KE_KEY, 0x31, { KEY_VOLUMEDOWN } }, { KE_KEY, 0x32, { KEY_MUTE } }, + { KE_KEY, 0x33, { KEY_SCREENLOCK } }, { KE_KEY, 0x35, { KEY_SCREENLOCK } }, { KE_KEY, 0x40, { KEY_PREVIOUSSONG } }, { KE_KEY, 0x41, { KEY_NEXTSONG } }, diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c index bbdb3e8608927d795c729b6f31995cb19981b297..6ef327a80ccf4a2d37caee78573797f7ca6ac301 100644 --- a/drivers/platform/x86/dell-wmi.c +++ b/drivers/platform/x86/dell-wmi.c @@ -259,6 +259,9 @@ static const struct key_entry dell_wmi_keymap_type_0010[] = { { KE_KEY, 0x57, { KEY_BRIGHTNESSDOWN } }, { KE_KEY, 0x58, { KEY_BRIGHTNESSUP } }, + /*Speaker Mute*/ + { KE_KEY, 0x109, { KEY_MUTE} }, + /* Mic mute */ { KE_KEY, 0x150, { KEY_MICMUTE } }, diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c index 110ff1e6ef81f968325eeab0d2ccf2d661527c2c..b96fbc8dba09d9fad1b3de09202d1fff0bba5ae5 100644 --- a/drivers/platform/x86/touchscreen_dmi.c +++ b/drivers/platform/x86/touchscreen_dmi.c @@ -255,6 +255,23 @@ static const struct ts_dmi_data connect_tablet9_data = { .properties = connect_tablet9_props, }; +static const struct property_entry csl_panther_tab_hd_props[] = { + PROPERTY_ENTRY_U32("touchscreen-min-x", 1), + PROPERTY_ENTRY_U32("touchscreen-min-y", 20), + PROPERTY_ENTRY_U32("touchscreen-size-x", 1980), + PROPERTY_ENTRY_U32("touchscreen-size-y", 1526), + PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), + PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-csl-panther-tab-hd.fw"), + PROPERTY_ENTRY_U32("silead,max-fingers", 10), + { } +}; + +static const struct ts_dmi_data csl_panther_tab_hd_data = { + .acpi_name = "MSSL1680:00", + .properties = csl_panther_tab_hd_props, +}; + static const struct property_entry cube_iwork8_air_props[] = { PROPERTY_ENTRY_U32("touchscreen-min-x", 1), PROPERTY_ENTRY_U32("touchscreen-min-y", 3), @@ -1013,6 +1030,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = { DMI_MATCH(DMI_BIOS_DATE, "05/07/2016"), }, }, + { + /* Chuwi Vi8 (CWI501) */ + .driver_data = (void *)&chuwi_vi8_data, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Insyde"), + DMI_MATCH(DMI_PRODUCT_NAME, "i86"), + DMI_MATCH(DMI_BIOS_VERSION, "CHUWI.W86JLBNR01"), + }, + }, { /* Chuwi Vi8 (CWI506) */ .driver_data = (void *)&chuwi_vi8_data, @@ -1057,6 +1083,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Tablet 9"), }, }, + { + /* CSL Panther Tab HD */ + .driver_data = (void *)&csl_panther_tab_hd_data, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "CSL Computer GmbH & Co. KG"), + DMI_MATCH(DMI_PRODUCT_NAME, "CSL Panther Tab HD"), + }, + }, { /* CUBE iwork8 Air */ .driver_data = (void *)&cube_iwork8_air_data, diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c index 3f0b8e2ef3d46b88f2dddda3ef5a9b912483ec59..7a3109a538813d61318d881c6670c52f75dc9d0b 100644 --- a/drivers/powercap/powercap_sys.c +++ b/drivers/powercap/powercap_sys.c @@ -530,9 +530,6 @@ struct powercap_zone *powercap_register_zone( power_zone->name = kstrdup(name, GFP_KERNEL); if (!power_zone->name) goto err_name_alloc; - dev_set_name(&power_zone->dev, "%s:%x", - dev_name(power_zone->dev.parent), - power_zone->id); power_zone->constraints = kcalloc(nr_constraints, sizeof(*power_zone->constraints), GFP_KERNEL); @@ -555,9 +552,16 @@ struct powercap_zone *powercap_register_zone( power_zone->dev_attr_groups[0] = &power_zone->dev_zone_attr_group; power_zone->dev_attr_groups[1] = NULL; power_zone->dev.groups = power_zone->dev_attr_groups; + dev_set_name(&power_zone->dev, "%s:%x", + dev_name(power_zone->dev.parent), + power_zone->id); result = device_register(&power_zone->dev); - if (result) - goto err_dev_ret; + if (result) { + put_device(&power_zone->dev); + mutex_unlock(&control_type->lock); + + return ERR_PTR(result); + } control_type->nr_zones++; mutex_unlock(&control_type->lock); diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c index 12e9e23272ab1230d53ca708ec3a29f84c4461da..52a55bae033de380a5e911e21afaa635eb3bbc3e 100644 --- a/drivers/pwm/pwm-sifive.c +++ b/drivers/pwm/pwm-sifive.c @@ -41,7 +41,7 @@ struct pwm_sifive_ddata { struct pwm_chip chip; - struct mutex lock; /* lock to protect user_count */ + struct mutex lock; /* lock to protect user_count and approx_period */ struct notifier_block notifier; struct clk *clk; void __iomem *regs; @@ -76,6 +76,7 @@ static void pwm_sifive_free(struct pwm_chip *chip, struct pwm_device *pwm) mutex_unlock(&ddata->lock); } +/* Called holding ddata->lock */ static void pwm_sifive_update_clock(struct pwm_sifive_ddata *ddata, unsigned long rate) { @@ -163,7 +164,6 @@ static int pwm_sifive_apply(struct pwm_chip *chip, struct pwm_device *pwm, return ret; } - mutex_lock(&ddata->lock); cur_state = pwm->state; enabled = cur_state.enabled; @@ -182,14 +182,23 @@ static int pwm_sifive_apply(struct pwm_chip *chip, struct pwm_device *pwm, /* The hardware cannot generate a 100% duty cycle */ frac = min(frac, (1U << PWM_SIFIVE_CMPWIDTH) - 1); + mutex_lock(&ddata->lock); if (state->period != ddata->approx_period) { - if (ddata->user_count != 1) { + /* + * Don't let a 2nd user change the period underneath the 1st user. + * However if ddate->approx_period == 0 this is the first time we set + * any period, so let whoever gets here first set the period so other + * users who agree on the period won't fail. + */ + if (ddata->user_count != 1 && ddata->approx_period) { + mutex_unlock(&ddata->lock); ret = -EBUSY; goto exit; } ddata->approx_period = state->period; pwm_sifive_update_clock(ddata, clk_get_rate(ddata->clk)); } + mutex_unlock(&ddata->lock); writel(frac, ddata->regs + PWM_SIFIVE_PWMCMP(pwm->hwpwm)); @@ -198,7 +207,6 @@ static int pwm_sifive_apply(struct pwm_chip *chip, struct pwm_device *pwm, exit: clk_disable(ddata->clk); - mutex_unlock(&ddata->lock); return ret; } diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c index 945a8b2b85648c835ecdfa422df8fc61d488e2fa..c8a847fcb775b96a83b7cdc2e76e7afcfbb1e987 100644 --- a/drivers/pwm/pwm-stm32-lp.c +++ b/drivers/pwm/pwm-stm32-lp.c @@ -127,7 +127,7 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm, /* ensure CMP & ARR registers are properly written */ ret = regmap_read_poll_timeout(priv->regmap, STM32_LPTIM_ISR, val, - (val & STM32_LPTIM_CMPOK_ARROK), + (val & STM32_LPTIM_CMPOK_ARROK) == STM32_LPTIM_CMPOK_ARROK, 100, 1000); if (ret) { dev_err(priv->chip.dev, "ARR/CMP registers write issue\n"); diff --git a/drivers/regulator/max77802-regulator.c b/drivers/regulator/max77802-regulator.c index 7b8ec8c0bd1511a283ef4619867f98be59260c3c..660e179a82a2c18237c4a3d6f71224499c265fc0 100644 --- a/drivers/regulator/max77802-regulator.c +++ b/drivers/regulator/max77802-regulator.c @@ -95,9 +95,11 @@ static int max77802_set_suspend_disable(struct regulator_dev *rdev) { unsigned int val = MAX77802_OFF_PWRREQ; struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev); - int id = rdev_get_id(rdev); + unsigned int id = rdev_get_id(rdev); int shift = max77802_get_opmode_shift(id); + if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode))) + return -EINVAL; max77802->opmode[id] = val; return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, rdev->desc->enable_mask, val << shift); @@ -111,7 +113,7 @@ static int max77802_set_suspend_disable(struct regulator_dev *rdev) static int max77802_set_mode(struct regulator_dev *rdev, unsigned int mode) { struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev); - int id = rdev_get_id(rdev); + unsigned int id = rdev_get_id(rdev); unsigned int val; int shift = max77802_get_opmode_shift(id); @@ -128,6 +130,9 @@ static int max77802_set_mode(struct regulator_dev *rdev, unsigned int mode) return -EINVAL; } + if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode))) + return -EINVAL; + max77802->opmode[id] = val; return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, rdev->desc->enable_mask, val << shift); @@ -136,8 +141,10 @@ static int max77802_set_mode(struct regulator_dev *rdev, unsigned int mode) static unsigned max77802_get_mode(struct regulator_dev *rdev) { struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev); - int id = rdev_get_id(rdev); + unsigned int id = rdev_get_id(rdev); + if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode))) + return -EINVAL; return max77802_map_mode(max77802->opmode[id]); } @@ -161,10 +168,13 @@ static int max77802_set_suspend_mode(struct regulator_dev *rdev, unsigned int mode) { struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev); - int id = rdev_get_id(rdev); + unsigned int id = rdev_get_id(rdev); unsigned int val; int shift = max77802_get_opmode_shift(id); + if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode))) + return -EINVAL; + /* * If the regulator has been disabled for suspend * then is invalid to try setting a suspend mode. @@ -210,9 +220,11 @@ static int max77802_set_suspend_mode(struct regulator_dev *rdev, static int max77802_enable(struct regulator_dev *rdev) { struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev); - int id = rdev_get_id(rdev); + unsigned int id = rdev_get_id(rdev); int shift = max77802_get_opmode_shift(id); + if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode))) + return -EINVAL; if (max77802->opmode[id] == MAX77802_OFF_PWRREQ) max77802->opmode[id] = MAX77802_OPMODE_NORMAL; @@ -541,7 +553,7 @@ static int max77802_pmic_probe(struct platform_device *pdev) for (i = 0; i < MAX77802_REG_MAX; i++) { struct regulator_dev *rdev; - int id = regulators[i].id; + unsigned int id = regulators[i].id; int shift = max77802_get_opmode_shift(id); int ret; @@ -559,10 +571,12 @@ static int max77802_pmic_probe(struct platform_device *pdev) * the hardware reports OFF as the regulator operating mode. * Default to operating mode NORMAL in that case. */ - if (val == MAX77802_STATUS_OFF) - max77802->opmode[id] = MAX77802_OPMODE_NORMAL; - else - max77802->opmode[id] = val; + if (id < ARRAY_SIZE(max77802->opmode)) { + if (val == MAX77802_STATUS_OFF) + max77802->opmode[id] = MAX77802_OPMODE_NORMAL; + else + max77802->opmode[id] = val; + } rdev = devm_regulator_register(&pdev->dev, ®ulators[i], &config); diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c index 35269f998210534ecc77c9432eb035cc8d174bc7..754c6fcc6e642f5494c5ee600e66ada567a703ca 100644 --- a/drivers/regulator/s5m8767.c +++ b/drivers/regulator/s5m8767.c @@ -923,10 +923,14 @@ static int s5m8767_pmic_probe(struct platform_device *pdev) for (i = 0; i < pdata->num_regulators; i++) { const struct sec_voltage_desc *desc; - int id = pdata->regulators[i].id; + unsigned int id = pdata->regulators[i].id; int enable_reg, enable_val; struct regulator_dev *rdev; + BUILD_BUG_ON(ARRAY_SIZE(regulators) != ARRAY_SIZE(reg_voltage_map)); + if (WARN_ON_ONCE(id >= ARRAY_SIZE(regulators))) + continue; + desc = reg_voltage_map[id]; if (desc) { regulators[id].n_voltages = diff --git a/drivers/remoteproc/mtk_scp_ipi.c b/drivers/remoteproc/mtk_scp_ipi.c index 6dc955ecab80f98c319bf67b500bdf32399f2209..968128b78e59c2b00e42d5680dccb7320e80fddf 100644 --- a/drivers/remoteproc/mtk_scp_ipi.c +++ b/drivers/remoteproc/mtk_scp_ipi.c @@ -164,21 +164,21 @@ int scp_ipi_send(struct mtk_scp *scp, u32 id, void *buf, unsigned int len, WARN_ON(len > sizeof(send_obj->share_buf)) || WARN_ON(!buf)) return -EINVAL; - mutex_lock(&scp->send_lock); - ret = clk_prepare_enable(scp->clk); if (ret) { dev_err(scp->dev, "failed to enable clock\n"); - goto unlock_mutex; + return ret; } + mutex_lock(&scp->send_lock); + /* Wait until SCP receives the last command */ timeout = jiffies + msecs_to_jiffies(2000); do { if (time_after(jiffies, timeout)) { dev_err(scp->dev, "%s: IPI timeout!\n", __func__); ret = -ETIMEDOUT; - goto clock_disable; + goto unlock_mutex; } } while (readl(scp->reg_base + scp->data->host_to_scp_reg)); @@ -205,10 +205,9 @@ int scp_ipi_send(struct mtk_scp *scp, u32 id, void *buf, unsigned int len, ret = 0; } -clock_disable: - clk_disable_unprepare(scp->clk); unlock_mutex: mutex_unlock(&scp->send_lock); + clk_disable_unprepare(scp->clk); return ret; } diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c index 1b3aa84e36e7ad051daeab86cd776d6236268a96..3d975ecd933601f695e0ebd3add36182445ffb96 100644 --- a/drivers/remoteproc/qcom_q6v5_mss.c +++ b/drivers/remoteproc/qcom_q6v5_mss.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -190,6 +191,9 @@ struct q6v5 { size_t mba_size; size_t dp_size; + phys_addr_t mdata_phys; + size_t mdata_size; + phys_addr_t mpss_phys; phys_addr_t mpss_reloc; size_t mpss_size; @@ -816,15 +820,35 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw) if (IS_ERR(metadata)) return PTR_ERR(metadata); - ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs); - if (!ptr) { - kfree(metadata); - dev_err(qproc->dev, "failed to allocate mdt buffer\n"); - return -ENOMEM; + if (qproc->mdata_phys) { + if (size > qproc->mdata_size) { + ret = -EINVAL; + dev_err(qproc->dev, "metadata size outside memory range\n"); + goto free_metadata; + } + + phys = qproc->mdata_phys; + ptr = memremap(qproc->mdata_phys, size, MEMREMAP_WC); + if (!ptr) { + ret = -EBUSY; + dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", + &qproc->mdata_phys, size); + goto free_metadata; + } + } else { + ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs); + if (!ptr) { + ret = -ENOMEM; + dev_err(qproc->dev, "failed to allocate mdt buffer\n"); + goto free_metadata; + } } memcpy(ptr, metadata, size); + if (qproc->mdata_phys) + memunmap(ptr); + /* Hypervisor mapping to access metadata by modem */ mdata_perm = BIT(QCOM_SCM_VMID_HLOS); ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, true, @@ -853,7 +877,9 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw) "mdt buffer not reclaimed system may become unstable\n"); free_dma_attrs: - dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs); + if (!qproc->mdata_phys) + dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs); +free_metadata: kfree(metadata); return ret < 0 ? ret : 0; @@ -1585,6 +1611,7 @@ static int q6v5_init_reset(struct q6v5 *qproc) static int q6v5_alloc_memory_region(struct q6v5 *qproc) { struct device_node *child; + struct reserved_mem *rmem; struct device_node *node; struct resource r; int ret; @@ -1637,6 +1664,26 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc) qproc->mpss_phys = qproc->mpss_reloc = r.start; qproc->mpss_size = resource_size(&r); + if (!child) { + node = of_parse_phandle(qproc->dev->of_node, "memory-region", 2); + } else { + child = of_get_child_by_name(qproc->dev->of_node, "metadata"); + node = of_parse_phandle(child, "memory-region", 0); + of_node_put(child); + } + + if (!node) + return 0; + + rmem = of_reserved_mem_lookup(node); + if (!rmem) { + dev_err(qproc->dev, "unable to resolve metadata region\n"); + return -EINVAL; + } + + qproc->mdata_phys = rmem->base; + qproc->mdata_size = rmem->size; + return 0; } diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index 7cbed0310c09f24250a7b719173c00df143aec94..98b6d4c09c82c6483ba56f795a4a1da73e35e3ca 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c @@ -929,6 +929,7 @@ static void qcom_glink_handle_intent(struct qcom_glink *glink, spin_unlock_irqrestore(&glink->idr_lock, flags); if (!channel) { dev_err(glink->dev, "intents for non-existing channel\n"); + qcom_glink_rx_advance(glink, ALIGN(msglen, 8)); return; } diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c index b45ee2cb2c0449cbb9e5a12524864bee6491665a..3417eef0aca3fd383369570ca1bc9f444f1ba69e 100644 --- a/drivers/rtc/rtc-pm8xxx.c +++ b/drivers/rtc/rtc-pm8xxx.c @@ -219,7 +219,6 @@ static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) { int rc, i; u8 value[NUM_8_BIT_RTC_REGS]; - unsigned int ctrl_reg; unsigned long secs, irq_flags; struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev); const struct pm8xxx_rtc_regs *regs = rtc_dd->regs; @@ -231,6 +230,11 @@ static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) secs >>= 8; } + rc = regmap_update_bits(rtc_dd->regmap, regs->alarm_ctrl, + regs->alarm_en, 0); + if (rc) + return rc; + spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags); rc = regmap_bulk_write(rtc_dd->regmap, regs->alarm_rw, value, @@ -240,19 +244,11 @@ static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) goto rtc_rw_fail; } - rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl, &ctrl_reg); - if (rc) - goto rtc_rw_fail; - - if (alarm->enabled) - ctrl_reg |= regs->alarm_en; - else - ctrl_reg &= ~regs->alarm_en; - - rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl, ctrl_reg); - if (rc) { - dev_err(dev, "Write to RTC alarm control register failed\n"); - goto rtc_rw_fail; + if (alarm->enabled) { + rc = regmap_update_bits(rtc_dd->regmap, regs->alarm_ctrl, + regs->alarm_en, regs->alarm_en); + if (rc) + goto rtc_rw_fail; } dev_dbg(dev, "Alarm Set for h:m:s=%ptRt, y-m-d=%ptRdr\n", diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c index 52b36b7c6129857211ac3908a56ca38c0677062b..a72856fb5252c298dfaee3d9942bb70b009b386d 100644 --- a/drivers/rtc/rtc-sun6i.c +++ b/drivers/rtc/rtc-sun6i.c @@ -128,7 +128,6 @@ struct sun6i_rtc_clk_data { unsigned int fixed_prescaler : 16; unsigned int has_prescaler : 1; unsigned int has_out_clk : 1; - unsigned int export_iosc : 1; unsigned int has_losc_en : 1; unsigned int has_auto_swt : 1; }; @@ -260,10 +259,8 @@ static void __init sun6i_rtc_clk_init(struct device_node *node, /* Yes, I know, this is ugly. */ sun6i_rtc = rtc; - /* Only read IOSC name from device tree if it is exported */ - if (rtc->data->export_iosc) - of_property_read_string_index(node, "clock-output-names", 2, - &iosc_name); + of_property_read_string_index(node, "clock-output-names", 2, + &iosc_name); rtc->int_osc = clk_hw_register_fixed_rate_with_accuracy(NULL, iosc_name, @@ -304,13 +301,10 @@ static void __init sun6i_rtc_clk_init(struct device_node *node, goto err_register; } - clk_data->num = 2; + clk_data->num = 3; clk_data->hws[0] = &rtc->hw; clk_data->hws[1] = __clk_get_hw(rtc->ext_losc); - if (rtc->data->export_iosc) { - clk_data->hws[2] = rtc->int_osc; - clk_data->num = 3; - } + clk_data->hws[2] = rtc->int_osc; of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); return; @@ -350,7 +344,6 @@ static const struct sun6i_rtc_clk_data sun8i_h3_rtc_data = { .fixed_prescaler = 32, .has_prescaler = 1, .has_out_clk = 1, - .export_iosc = 1, }; static void __init sun8i_h3_rtc_clk_init(struct device_node *node) @@ -368,7 +361,6 @@ static const struct sun6i_rtc_clk_data sun50i_h6_rtc_data = { .fixed_prescaler = 32, .has_prescaler = 1, .has_out_clk = 1, - .export_iosc = 1, .has_losc_en = 1, .has_auto_swt = 1, }; diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index f4edfe383e9d915dc62c9079d1799c57cc77330c..9f26f55e4988a9a6b0c6333a98c30d2f7e7291e4 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -2128,8 +2128,8 @@ static void __dasd_device_check_path_events(struct dasd_device *device) if (device->stopped & ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM)) return; - rc = device->discipline->verify_path(device, - dasd_path_get_tbvpm(device)); + rc = device->discipline->pe_handler(device, + dasd_path_get_tbvpm(device)); if (rc) dasd_device_set_timer(device, 50); else diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 53d22975a32fdb62198b57350a1244308ccaa2c0..c6930c159d2a64407b506082b49602df3fec5015 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -103,7 +103,7 @@ struct ext_pool_exhaust_work_data { }; /* definitions for the path verification worker */ -struct path_verification_work_data { +struct pe_handler_work_data { struct work_struct worker; struct dasd_device *device; struct dasd_ccw_req cqr; @@ -112,8 +112,8 @@ struct path_verification_work_data { int isglobal; __u8 tbvpm; }; -static struct path_verification_work_data *path_verification_worker; -static DEFINE_MUTEX(dasd_path_verification_mutex); +static struct pe_handler_work_data *pe_handler_worker; +static DEFINE_MUTEX(dasd_pe_handler_mutex); struct check_attention_work_data { struct work_struct worker; @@ -1219,7 +1219,7 @@ static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm) } static int rebuild_device_uid(struct dasd_device *device, - struct path_verification_work_data *data) + struct pe_handler_work_data *data) { struct dasd_eckd_private *private = device->private; __u8 lpm, opm = dasd_path_get_opm(device); @@ -1257,10 +1257,9 @@ static int rebuild_device_uid(struct dasd_device *device, return rc; } -static void do_path_verification_work(struct work_struct *work) +static void dasd_eckd_path_available_action(struct dasd_device *device, + struct pe_handler_work_data *data) { - struct path_verification_work_data *data; - struct dasd_device *device; struct dasd_eckd_private path_private; struct dasd_uid *uid; __u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE]; @@ -1269,19 +1268,6 @@ static void do_path_verification_work(struct work_struct *work) char print_uid[60]; int rc; - data = container_of(work, struct path_verification_work_data, worker); - device = data->device; - - /* delay path verification until device was resumed */ - if (test_bit(DASD_FLAG_SUSPENDED, &device->flags)) { - schedule_work(work); - return; - } - /* check if path verification already running and delay if so */ - if (test_and_set_bit(DASD_FLAG_PATH_VERIFY, &device->flags)) { - schedule_work(work); - return; - } opm = 0; npm = 0; ppm = 0; @@ -1418,30 +1404,54 @@ static void do_path_verification_work(struct work_struct *work) dasd_path_add_nohpfpm(device, hpfpm); spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); } +} + +static void do_pe_handler_work(struct work_struct *work) +{ + struct pe_handler_work_data *data; + struct dasd_device *device; + + data = container_of(work, struct pe_handler_work_data, worker); + device = data->device; + + /* delay path verification until device was resumed */ + if (test_bit(DASD_FLAG_SUSPENDED, &device->flags)) { + schedule_work(work); + return; + } + /* check if path verification already running and delay if so */ + if (test_and_set_bit(DASD_FLAG_PATH_VERIFY, &device->flags)) { + schedule_work(work); + return; + } + + dasd_eckd_path_available_action(device, data); + clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags); dasd_put_device(device); if (data->isglobal) - mutex_unlock(&dasd_path_verification_mutex); + mutex_unlock(&dasd_pe_handler_mutex); else kfree(data); } -static int dasd_eckd_verify_path(struct dasd_device *device, __u8 lpm) +static int dasd_eckd_pe_handler(struct dasd_device *device, __u8 lpm) { - struct path_verification_work_data *data; + struct pe_handler_work_data *data; data = kmalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA); if (!data) { - if (mutex_trylock(&dasd_path_verification_mutex)) { - data = path_verification_worker; + if (mutex_trylock(&dasd_pe_handler_mutex)) { + data = pe_handler_worker; data->isglobal = 1; - } else + } else { return -ENOMEM; + } } else { memset(data, 0, sizeof(*data)); data->isglobal = 0; } - INIT_WORK(&data->worker, do_path_verification_work); + INIT_WORK(&data->worker, do_pe_handler_work); dasd_get_device(device); data->device = device; data->tbvpm = lpm; @@ -6694,7 +6704,7 @@ static struct dasd_discipline dasd_eckd_discipline = { .check_device = dasd_eckd_check_characteristics, .uncheck_device = dasd_eckd_uncheck_device, .do_analysis = dasd_eckd_do_analysis, - .verify_path = dasd_eckd_verify_path, + .pe_handler = dasd_eckd_pe_handler, .basic_to_ready = dasd_eckd_basic_to_ready, .online_to_ready = dasd_eckd_online_to_ready, .basic_to_known = dasd_eckd_basic_to_known, @@ -6753,18 +6763,20 @@ dasd_eckd_init(void) return -ENOMEM; dasd_vol_info_req = kmalloc(sizeof(*dasd_vol_info_req), GFP_KERNEL | GFP_DMA); - if (!dasd_vol_info_req) + if (!dasd_vol_info_req) { + kfree(dasd_reserve_req); return -ENOMEM; - path_verification_worker = kmalloc(sizeof(*path_verification_worker), - GFP_KERNEL | GFP_DMA); - if (!path_verification_worker) { + } + pe_handler_worker = kmalloc(sizeof(*pe_handler_worker), + GFP_KERNEL | GFP_DMA); + if (!pe_handler_worker) { kfree(dasd_reserve_req); kfree(dasd_vol_info_req); return -ENOMEM; } rawpadpage = (void *)__get_free_page(GFP_KERNEL); if (!rawpadpage) { - kfree(path_verification_worker); + kfree(pe_handler_worker); kfree(dasd_reserve_req); kfree(dasd_vol_info_req); return -ENOMEM; @@ -6773,7 +6785,7 @@ dasd_eckd_init(void) if (!ret) wait_for_device_probe(); else { - kfree(path_verification_worker); + kfree(pe_handler_worker); kfree(dasd_reserve_req); kfree(dasd_vol_info_req); free_page((unsigned long)rawpadpage); @@ -6785,7 +6797,7 @@ static void __exit dasd_eckd_cleanup(void) { ccw_driver_unregister(&dasd_eckd_driver); - kfree(path_verification_worker); + kfree(pe_handler_worker); kfree(dasd_reserve_req); free_page((unsigned long)rawpadpage); } diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 9d9685c25253d5549089338d7c6d419f3892cad9..e8a06d85d6f723626fe42a08b0553299380933ec 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -299,6 +299,7 @@ struct dasd_discipline { * configuration. */ int (*verify_path)(struct dasd_device *, __u8); + int (*pe_handler)(struct dasd_device *, __u8); /* * Last things to do when a device is set online, and first things diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c index f923ed019d4a1a0b89529fdbd5debe8c146ee49a..593b167ceefee827a9c3cc4969eb4ecbb6bc38a6 100644 --- a/drivers/scsi/aic94xx/aic94xx_task.c +++ b/drivers/scsi/aic94xx/aic94xx_task.c @@ -50,6 +50,9 @@ static int asd_map_scatterlist(struct sas_task *task, dma_addr_t dma = dma_map_single(&asd_ha->pcidev->dev, p, task->total_xfer_len, task->data_dir); + if (dma_mapping_error(&asd_ha->pcidev->dev, dma)) + return -ENOMEM; + sg_arr[0].bus_addr = cpu_to_le64((u64)dma); sg_arr[0].size = cpu_to_le32(task->total_xfer_len); sg_arr[0].flags |= ASD_SG_EL_LIST_EOL; diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c index 4251212acbbe987f55dadb1f21eafaabe6e51cb1..85f4d6535154a76a3054561b785e7b75dbe920ad 100644 --- a/drivers/scsi/dpt_i2o.c +++ b/drivers/scsi/dpt_i2o.c @@ -582,51 +582,6 @@ static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host) return 0; } -/* - * Turn a pointer to ioctl reply data into an u32 'context' - */ -static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply) -{ -#if BITS_PER_LONG == 32 - return (u32)(unsigned long)reply; -#else - ulong flags = 0; - u32 nr, i; - - spin_lock_irqsave(pHba->host->host_lock, flags); - nr = ARRAY_SIZE(pHba->ioctl_reply_context); - for (i = 0; i < nr; i++) { - if (pHba->ioctl_reply_context[i] == NULL) { - pHba->ioctl_reply_context[i] = reply; - break; - } - } - spin_unlock_irqrestore(pHba->host->host_lock, flags); - if (i >= nr) { - printk(KERN_WARNING"%s: Too many outstanding " - "ioctl commands\n", pHba->name); - return (u32)-1; - } - - return i; -#endif -} - -/* - * Go from an u32 'context' to a pointer to ioctl reply data. - */ -static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context) -{ -#if BITS_PER_LONG == 32 - return (void *)(unsigned long)context; -#else - void *p = pHba->ioctl_reply_context[context]; - pHba->ioctl_reply_context[context] = NULL; - - return p; -#endif -} - /*=========================================================================== * Error Handling routines *=========================================================================== @@ -1648,208 +1603,6 @@ static int adpt_close(struct inode *inode, struct file *file) return 0; } - -static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg) -{ - u32 msg[MAX_MESSAGE_SIZE]; - u32* reply = NULL; - u32 size = 0; - u32 reply_size = 0; - u32 __user *user_msg = arg; - u32 __user * user_reply = NULL; - void **sg_list = NULL; - u32 sg_offset = 0; - u32 sg_count = 0; - int sg_index = 0; - u32 i = 0; - u32 rcode = 0; - void *p = NULL; - dma_addr_t addr; - ulong flags = 0; - - memset(&msg, 0, MAX_MESSAGE_SIZE*4); - // get user msg size in u32s - if(get_user(size, &user_msg[0])){ - return -EFAULT; - } - size = size>>16; - - user_reply = &user_msg[size]; - if(size > MAX_MESSAGE_SIZE){ - return -EFAULT; - } - size *= 4; // Convert to bytes - - /* Copy in the user's I2O command */ - if(copy_from_user(msg, user_msg, size)) { - return -EFAULT; - } - get_user(reply_size, &user_reply[0]); - reply_size = reply_size>>16; - if(reply_size > REPLY_FRAME_SIZE){ - reply_size = REPLY_FRAME_SIZE; - } - reply_size *= 4; - reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL); - if(reply == NULL) { - printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name); - return -ENOMEM; - } - sg_offset = (msg[0]>>4)&0xf; - msg[2] = 0x40000000; // IOCTL context - msg[3] = adpt_ioctl_to_context(pHba, reply); - if (msg[3] == (u32)-1) { - rcode = -EBUSY; - goto free; - } - - sg_list = kcalloc(pHba->sg_tablesize, sizeof(*sg_list), GFP_KERNEL); - if (!sg_list) { - rcode = -ENOMEM; - goto free; - } - if(sg_offset) { - // TODO add 64 bit API - struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset); - sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element); - if (sg_count > pHba->sg_tablesize){ - printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count); - rcode = -EINVAL; - goto free; - } - - for(i = 0; i < sg_count; i++) { - int sg_size; - - if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) { - printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count); - rcode = -EINVAL; - goto cleanup; - } - sg_size = sg[i].flag_count & 0xffffff; - /* Allocate memory for the transfer */ - p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL); - if(!p) { - printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n", - pHba->name,sg_size,i,sg_count); - rcode = -ENOMEM; - goto cleanup; - } - sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame. - /* Copy in the user's SG buffer if necessary */ - if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) { - // sg_simple_element API is 32 bit - if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) { - printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i); - rcode = -EFAULT; - goto cleanup; - } - } - /* sg_simple_element API is 32 bit, but addr < 4GB */ - sg[i].addr_bus = addr; - } - } - - do { - /* - * Stop any new commands from enterring the - * controller while processing the ioctl - */ - if (pHba->host) { - scsi_block_requests(pHba->host); - spin_lock_irqsave(pHba->host->host_lock, flags); - } - rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER); - if (rcode != 0) - printk("adpt_i2o_passthru: post wait failed %d %p\n", - rcode, reply); - if (pHba->host) { - spin_unlock_irqrestore(pHba->host->host_lock, flags); - scsi_unblock_requests(pHba->host); - } - } while (rcode == -ETIMEDOUT); - - if(rcode){ - goto cleanup; - } - - if(sg_offset) { - /* Copy back the Scatter Gather buffers back to user space */ - u32 j; - // TODO add 64 bit API - struct sg_simple_element* sg; - int sg_size; - - // re-acquire the original message to handle correctly the sg copy operation - memset(&msg, 0, MAX_MESSAGE_SIZE*4); - // get user msg size in u32s - if(get_user(size, &user_msg[0])){ - rcode = -EFAULT; - goto cleanup; - } - size = size>>16; - size *= 4; - if (size > MAX_MESSAGE_SIZE) { - rcode = -EINVAL; - goto cleanup; - } - /* Copy in the user's I2O command */ - if (copy_from_user (msg, user_msg, size)) { - rcode = -EFAULT; - goto cleanup; - } - sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element); - - // TODO add 64 bit API - sg = (struct sg_simple_element*)(msg + sg_offset); - for (j = 0; j < sg_count; j++) { - /* Copy out the SG list to user's buffer if necessary */ - if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) { - sg_size = sg[j].flag_count & 0xffffff; - // sg_simple_element API is 32 bit - if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) { - printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus); - rcode = -EFAULT; - goto cleanup; - } - } - } - } - - /* Copy back the reply to user space */ - if (reply_size) { - // we wrote our own values for context - now restore the user supplied ones - if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) { - printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name); - rcode = -EFAULT; - } - if(copy_to_user(user_reply, reply, reply_size)) { - printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name); - rcode = -EFAULT; - } - } - - -cleanup: - if (rcode != -ETIME && rcode != -EINTR) { - struct sg_simple_element *sg = - (struct sg_simple_element*) (msg +sg_offset); - while(sg_index) { - if(sg_list[--sg_index]) { - dma_free_coherent(&pHba->pDev->dev, - sg[sg_index].flag_count & 0xffffff, - sg_list[sg_index], - sg[sg_index].addr_bus); - } - } - } - -free: - kfree(sg_list); - kfree(reply); - return rcode; -} - #if defined __ia64__ static void adpt_ia64_info(sysInfo_S* si) { @@ -1976,8 +1729,6 @@ static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong ar return -EFAULT; } break; - case I2OUSRCMD: - return adpt_i2o_passthru(pHba, argp); case DPT_CTRLINFO:{ drvrHBAinfo_S HbaInfo; @@ -2134,13 +1885,6 @@ static irqreturn_t adpt_isr(int irq, void *dev_id) adpt_send_nop(pHba, old_m); } context = readl(reply+8); - if(context & 0x40000000){ // IOCTL - void *p = adpt_ioctl_from_context(pHba, readl(reply+12)); - if( p != NULL) { - memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4); - } - // All IOCTLs will also be post wait - } if(context & 0x80000000){ // Post wait message status = readl(reply+16); if(status >> 24){ @@ -2148,16 +1892,14 @@ static irqreturn_t adpt_isr(int irq, void *dev_id) } else { status = I2O_POST_WAIT_OK; } - if(!(context & 0x40000000)) { - /* - * The request tag is one less than the command tag - * as the firmware might treat a 0 tag as invalid - */ - cmd = scsi_host_find_tag(pHba->host, - readl(reply + 12) - 1); - if(cmd != NULL) { - printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context); - } + /* + * The request tag is one less than the command tag + * as the firmware might treat a 0 tag as invalid + */ + cmd = scsi_host_find_tag(pHba->host, + readl(reply + 12) - 1); + if(cmd != NULL) { + printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context); } adpt_i2o_post_wait_complete(context, status); } else { // SCSI message diff --git a/drivers/scsi/dpti.h b/drivers/scsi/dpti.h index 8a079e8d7f65f19d7408df1114268cb3d749f589..0565533e8095a1372c98b5c090b82d9aae8be64f 100644 --- a/drivers/scsi/dpti.h +++ b/drivers/scsi/dpti.h @@ -248,7 +248,6 @@ typedef struct _adpt_hba { void __iomem *FwDebugBLEDflag_P;// Virtual Addr Of FW Debug BLED void __iomem *FwDebugBLEDvalue_P;// Virtual Addr Of FW Debug BLED u32 FwDebugFlags; - u32 *ioctl_reply_context[4]; } adpt_hba; struct sg_simple_element { diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 1feca45384c7ad599cbbfc1ff37c68e6f85c5cee..e5b9229310a0fa7ba02649bcca349d57574f1c39 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -1408,7 +1408,7 @@ static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba) device->linkrate = phy->sas_phy.linkrate; hisi_hba->hw->setup_itct(hisi_hba, sas_dev); - } else + } else if (!port->port_attached) port->id = 0xff; } } diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index d664c4650b2dd3c7a9d312d7d94ff4b4e09f26a8..fae0323242103cbc80eb111d3ba65a8cc05d08f0 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -180,6 +180,7 @@ void scsi_remove_host(struct Scsi_Host *shost) scsi_forget_host(shost); mutex_unlock(&shost->scan_mutex); scsi_proc_host_rm(shost); + scsi_proc_hostdir_rm(shost->hostt); spin_lock_irqsave(shost->host_lock, flags); if (scsi_host_set_state(shost, SHOST_DEL)) @@ -321,6 +322,7 @@ static void scsi_host_dev_release(struct device *dev) struct Scsi_Host *shost = dev_to_shost(dev); struct device *parent = dev->parent; + /* In case scsi_remove_host() has not been called. */ scsi_proc_hostdir_rm(shost->hostt); /* Wait for functions invoked through call_rcu(&shost->rcu, ...) */ diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index b2d4b6c78b5c3b7d59280f0dba607ea030919800..a44a098dbb9c7aa20379bcb43c9010a1b5d7e3d7 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -5834,7 +5834,7 @@ static int hpsa_scsi_host_alloc(struct ctlr_info *h) { struct Scsi_Host *sh; - sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h)); + sh = scsi_host_alloc(&hpsa_driver_template, sizeof(struct ctlr_info)); if (sh == NULL) { dev_err(&h->pdev->dev, "scsi_host_alloc failed\n"); return -ENOMEM; diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index a5e6fbd86ad4581dc8f762afc12b9bb339a6d2fc..8c376736a8f51db2863129de115414be55a8489e 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -1516,23 +1516,22 @@ static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd) } /** - * strip_and_pad_whitespace - Strip and pad trailing whitespace. - * @i: index into buffer - * @buf: string to modify + * strip_whitespace - Strip and pad trailing whitespace. + * @i: size of buffer + * @buf: string to modify * - * This function will strip all trailing whitespace, pad the end - * of the string with a single space, and NULL terminate the string. + * This function will strip all trailing whitespace and + * NUL terminate the string. * - * Return value: - * new length of string **/ -static int strip_and_pad_whitespace(int i, char *buf) +static void strip_whitespace(int i, char *buf) { + if (i < 1) + return; + i--; while (i && buf[i] == ' ') i--; - buf[i+1] = ' '; - buf[i+2] = '\0'; - return i + 2; + buf[i+1] = '\0'; } /** @@ -1547,19 +1546,21 @@ static int strip_and_pad_whitespace(int i, char *buf) static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb, struct ipr_vpd *vpd) { - char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3]; - int i = 0; + char vendor_id[IPR_VENDOR_ID_LEN + 1]; + char product_id[IPR_PROD_ID_LEN + 1]; + char sn[IPR_SERIAL_NUM_LEN + 1]; - memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN); - i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer); + memcpy(vendor_id, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN); + strip_whitespace(IPR_VENDOR_ID_LEN, vendor_id); - memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN); - i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer); + memcpy(product_id, vpd->vpids.product_id, IPR_PROD_ID_LEN); + strip_whitespace(IPR_PROD_ID_LEN, product_id); - memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN); - buffer[IPR_SERIAL_NUM_LEN + i] = '\0'; + memcpy(sn, vpd->sn, IPR_SERIAL_NUM_LEN); + strip_whitespace(IPR_SERIAL_NUM_LEN, sn); - ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer); + ipr_hcam_err(hostrcb, "%s VPID/SN: %s %s %s\n", prefix, + vendor_id, product_id, sn); } /** diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index c088a848776efc4e96baa1009e25e8d2308fe8e3..2d5b1d5978664eb153afb45ece887eb1e8e148f5 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -1517,6 +1517,8 @@ struct megasas_ctrl_info { #define MEGASAS_MAX_LD_IDS (MEGASAS_MAX_LD_CHANNELS * \ MEGASAS_MAX_DEV_PER_CHANNEL) +#define MEGASAS_MAX_SUPPORTED_LD_IDS 240 + #define MEGASAS_MAX_SECTORS (2*1024) #define MEGASAS_MAX_SECTORS_IEEE (2*128) #define MEGASAS_DBG_LVL 1 diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index 83f69c33b01a97ff71cb9d005799a56739c7d3db..ec10d35b4685a2ecf5f96fadad265ca83db9dcb8 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -358,7 +358,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id) ld = MR_TargetIdToLdGet(i, drv_map); /* For non existing VDs, iterate to next VD*/ - if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1)) + if (ld >= MEGASAS_MAX_SUPPORTED_LD_IDS) continue; raid = MR_LdRaidGet(ld, drv_map); diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index c1b76cda60dbc7ec06b54596ebfeb2ffba04ff64..26b15a24300ef856f079d2a5e689ea5798a84e20 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -2822,19 +2822,25 @@ static int _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) { struct sysinfo s; + u64 coherent_dma_mask, dma_mask; - if (ioc->is_mcpu_endpoint || - sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma || - dma_get_required_mask(&pdev->dev) <= DMA_BIT_MASK(32)) + if (ioc->is_mcpu_endpoint || sizeof(dma_addr_t) == 4) { ioc->dma_mask = 32; + coherent_dma_mask = dma_mask = DMA_BIT_MASK(32); /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */ - else if (ioc->hba_mpi_version_belonged > MPI2_VERSION) + } else if (ioc->hba_mpi_version_belonged > MPI2_VERSION) { ioc->dma_mask = 63; - else + coherent_dma_mask = dma_mask = DMA_BIT_MASK(63); + } else { ioc->dma_mask = 64; + coherent_dma_mask = dma_mask = DMA_BIT_MASK(64); + } - if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask)) || - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask))) + if (ioc->use_32bit_dma) + coherent_dma_mask = DMA_BIT_MASK(32); + + if (dma_set_mask(&pdev->dev, dma_mask) || + dma_set_coherent_mask(&pdev->dev, coherent_dma_mask)) return -ENODEV; if (ioc->dma_mask > 32) { @@ -4905,6 +4911,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) } dma_pool_destroy(ioc->pcie_sgl_dma_pool); } + kfree(ioc->pcie_sg_lookup); + ioc->pcie_sg_lookup = NULL; + if (ioc->config_page) { dexitprintk(ioc, ioc_info(ioc, "config_page(0x%p): free\n", diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index d63ccdf6e9887c2a471e621b07e8c594532d7c42..695dd89be330710deaed07aab861d809272a0b3b 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -165,18 +165,6 @@ static void qla_nvme_release_fcp_cmd_kref(struct kref *kref) qla2xxx_rel_qpair_sp(sp->qpair, sp); } -static void qla_nvme_ls_unmap(struct srb *sp, struct nvmefc_ls_req *fd) -{ - if (sp->flags & SRB_DMA_VALID) { - struct srb_iocb *nvme = &sp->u.iocb_cmd; - struct qla_hw_data *ha = sp->fcport->vha->hw; - - dma_unmap_single(&ha->pdev->dev, nvme->u.nvme.cmd_dma, - fd->rqstlen, DMA_TO_DEVICE); - sp->flags &= ~SRB_DMA_VALID; - } -} - static void qla_nvme_release_ls_cmd_kref(struct kref *kref) { struct srb *sp = container_of(kref, struct srb, cmd_kref); @@ -194,7 +182,6 @@ static void qla_nvme_release_ls_cmd_kref(struct kref *kref) fd = priv->fd; - qla_nvme_ls_unmap(sp, fd); fd->done(fd, priv->comp_status); out: qla2x00_rel_sp(sp); @@ -336,13 +323,10 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport, nvme->u.nvme.rsp_len = fd->rsplen; nvme->u.nvme.rsp_dma = fd->rspdma; nvme->u.nvme.timeout_sec = fd->timeout; - nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr, - fd->rqstlen, DMA_TO_DEVICE); + nvme->u.nvme.cmd_dma = fd->rqstdma; dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma, fd->rqstlen, DMA_TO_DEVICE); - sp->flags |= SRB_DMA_VALID; - rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x700e, @@ -350,7 +334,6 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport, wake_up(&sp->nvme_ls_waitq); sp->priv = NULL; priv->sp = NULL; - qla_nvme_ls_unmap(sp, fd); qla2x00_rel_sp(sp); return rval; } diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 419156121cb59a9ededeef864994c7c9a3b6d05a..e1132970f189222d96ce899eff0ceb3139ecc30d 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -6899,9 +6899,12 @@ qla2x00_do_dpc(void *data) } } loop_resync_check: - if (test_and_clear_bit(LOOP_RESYNC_NEEDED, + if (!qla2x00_reset_active(base_vha) && + test_and_clear_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags)) { - + /* + * Allow abort_isp to complete before moving on to scanning. + */ ql_dbg(ql_dbg_dpc, base_vha, 0x400f, "Loop resync scheduled.\n"); @@ -7145,7 +7148,7 @@ qla2x00_timer(struct timer_list *t) /* if the loop has been down for 4 minutes, reinit adapter */ if (atomic_dec_and_test(&vha->loop_down_timer) != 0) { - if (!(vha->device_flags & DFLG_NO_CABLE)) { + if (!(vha->device_flags & DFLG_NO_CABLE) && !vha->vp_idx) { ql_log(ql_log_warn, vha, 0x6009, "Loop down - aborting ISP.\n"); diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 8e474b1452495819224e5928d86c875933dcdf0d..6f7c4d41c51de1cbd0a5b5fcac71ec949774d04f 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -1129,8 +1129,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget, * that no LUN is present, so don't add sdev in these cases. * Two specific examples are: * 1) NetApp targets: return PQ=1, PDT=0x1f - * 2) IBM/2145 targets: return PQ=1, PDT=0 - * 3) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved" + * 2) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved" * in the UFI 1.0 spec (we cannot rely on reserved bits). * * References: @@ -1144,8 +1143,8 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget, * PDT=00h Direct-access device (floppy) * PDT=1Fh none (no FDD connected to the requested logical unit) */ - if (((result[0] >> 5) == 1 || - (starget->pdt_1f_for_no_lun && (result[0] & 0x1f) == 0x1f)) && + if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) && + (result[0] & 0x1f) == 0x1f && !scsi_is_wlun(lun)) { SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev, "scsi scan: peripheral device type" diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index ef7cd7520e7c7cf23d5bd51c91fb766f068b96d4..092bd6a3d64a1997e09f3f541645b4625fde16ca 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -1674,6 +1674,13 @@ static const char *iscsi_session_state_name(int state) return name; } +static char *iscsi_session_target_state_name[] = { + [ISCSI_SESSION_TARGET_UNBOUND] = "UNBOUND", + [ISCSI_SESSION_TARGET_ALLOCATED] = "ALLOCATED", + [ISCSI_SESSION_TARGET_SCANNED] = "SCANNED", + [ISCSI_SESSION_TARGET_UNBINDING] = "UNBINDING", +}; + int iscsi_session_chkready(struct iscsi_cls_session *session) { unsigned long flags; @@ -1805,9 +1812,13 @@ static int iscsi_user_scan_session(struct device *dev, void *data) if ((scan_data->channel == SCAN_WILD_CARD || scan_data->channel == 0) && (scan_data->id == SCAN_WILD_CARD || - scan_data->id == id)) + scan_data->id == id)) { scsi_scan_target(&session->dev, 0, id, scan_data->lun, scan_data->rescan); + spin_lock_irqsave(&session->lock, flags); + session->target_state = ISCSI_SESSION_TARGET_SCANNED; + spin_unlock_irqrestore(&session->lock, flags); + } } user_scan_exit: @@ -1996,31 +2007,41 @@ static void __iscsi_unbind_session(struct work_struct *work) struct iscsi_cls_host *ihost = shost->shost_data; unsigned long flags; unsigned int target_id; + bool remove_target = true; ISCSI_DBG_TRANS_SESSION(session, "Unbinding session\n"); /* Prevent new scans and make sure scanning is not in progress */ mutex_lock(&ihost->mutex); spin_lock_irqsave(&session->lock, flags); - if (session->target_id == ISCSI_MAX_TARGET) { + if (session->target_state == ISCSI_SESSION_TARGET_ALLOCATED) { + remove_target = false; + } else if (session->target_state != ISCSI_SESSION_TARGET_SCANNED) { spin_unlock_irqrestore(&session->lock, flags); mutex_unlock(&ihost->mutex); - goto unbind_session_exit; + ISCSI_DBG_TRANS_SESSION(session, + "Skipping target unbinding: Session is unbound/unbinding.\n"); + return; } + session->target_state = ISCSI_SESSION_TARGET_UNBINDING; target_id = session->target_id; session->target_id = ISCSI_MAX_TARGET; spin_unlock_irqrestore(&session->lock, flags); mutex_unlock(&ihost->mutex); - scsi_remove_target(&session->dev); + if (remove_target) + scsi_remove_target(&session->dev); if (session->ida_used) ida_simple_remove(&iscsi_sess_ida, target_id); -unbind_session_exit: iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION); ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n"); + + spin_lock_irqsave(&session->lock, flags); + session->target_state = ISCSI_SESSION_TARGET_UNBOUND; + spin_unlock_irqrestore(&session->lock, flags); } static void __iscsi_destroy_session(struct work_struct *work) @@ -2089,6 +2110,9 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id) session->ida_used = true; } else session->target_id = target_id; + spin_lock_irqsave(&session->lock, flags); + session->target_state = ISCSI_SESSION_TARGET_ALLOCATED; + spin_unlock_irqrestore(&session->lock, flags); dev_set_name(&session->dev, "session%u", session->sid); err = device_add(&session->dev); @@ -4343,6 +4367,19 @@ iscsi_session_attr(def_taskmgmt_tmo, ISCSI_PARAM_DEF_TASKMGMT_TMO, 0); iscsi_session_attr(discovery_parent_idx, ISCSI_PARAM_DISCOVERY_PARENT_IDX, 0); iscsi_session_attr(discovery_parent_type, ISCSI_PARAM_DISCOVERY_PARENT_TYPE, 0); +static ssize_t +show_priv_session_target_state(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent); + + return sysfs_emit(buf, "%s\n", + iscsi_session_target_state_name[session->target_state]); +} + +static ISCSI_CLASS_ATTR(priv_sess, target_state, S_IRUGO, + show_priv_session_target_state, NULL); + static ssize_t show_priv_session_state(struct device *dev, struct device_attribute *attr, char *buf) @@ -4445,6 +4482,7 @@ static struct attribute *iscsi_session_attrs[] = { &dev_attr_sess_boot_target.attr, &dev_attr_priv_sess_recovery_tmo.attr, &dev_attr_priv_sess_state.attr, + &dev_attr_priv_sess_target_state.attr, &dev_attr_priv_sess_creator.attr, &dev_attr_sess_chap_out_idx.attr, &dev_attr_sess_chap_in_idx.attr, @@ -4558,6 +4596,8 @@ static umode_t iscsi_session_attr_is_visible(struct kobject *kobj, return S_IRUGO | S_IWUSR; else if (attr == &dev_attr_priv_sess_state.attr) return S_IRUGO; + else if (attr == &dev_attr_priv_sess_target_state.attr) + return S_IRUGO; else if (attr == &dev_attr_priv_sess_creator.attr) return S_IRUGO; else if (attr == &dev_attr_priv_sess_target_id.attr) diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index 0a1734f34587dd4dc9e8b78468b05d37336e86b5..1707d6d144d217f65d875f3cae9e4a31f4537eca 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c @@ -433,8 +433,8 @@ int ses_match_host(struct enclosure_device *edev, void *data) } #endif /* 0 */ -static void ses_process_descriptor(struct enclosure_component *ecomp, - unsigned char *desc) +static int ses_process_descriptor(struct enclosure_component *ecomp, + unsigned char *desc, int max_desc_len) { int eip = desc[0] & 0x10; int invalid = desc[0] & 0x80; @@ -445,22 +445,32 @@ static void ses_process_descriptor(struct enclosure_component *ecomp, unsigned char *d; if (invalid) - return; + return 0; switch (proto) { case SCSI_PROTOCOL_FCP: if (eip) { + if (max_desc_len <= 7) + return 1; d = desc + 4; slot = d[3]; } break; case SCSI_PROTOCOL_SAS: + if (eip) { + if (max_desc_len <= 27) + return 1; d = desc + 4; slot = d[3]; d = desc + 8; - } else + } else { + if (max_desc_len <= 23) + return 1; d = desc + 4; + } + + /* only take the phy0 addr */ addr = (u64)d[12] << 56 | (u64)d[13] << 48 | @@ -477,6 +487,8 @@ static void ses_process_descriptor(struct enclosure_component *ecomp, } ecomp->slot = slot; scomp->addr = addr; + + return 0; } struct efd { @@ -549,7 +561,7 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, /* skip past overall descriptor */ desc_ptr += len + 4; } - if (ses_dev->page10) + if (ses_dev->page10 && ses_dev->page10_len > 9) addl_desc_ptr = ses_dev->page10 + 8; type_ptr = ses_dev->page1_types; components = 0; @@ -557,17 +569,22 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, for (j = 0; j < type_ptr[1]; j++) { char *name = NULL; struct enclosure_component *ecomp; + int max_desc_len; if (desc_ptr) { - if (desc_ptr >= buf + page7_len) { + if (desc_ptr + 3 >= buf + page7_len) { desc_ptr = NULL; } else { len = (desc_ptr[2] << 8) + desc_ptr[3]; desc_ptr += 4; - /* Add trailing zero - pushes into - * reserved space */ - desc_ptr[len] = '\0'; - name = desc_ptr; + if (desc_ptr + len > buf + page7_len) + desc_ptr = NULL; + else { + /* Add trailing zero - pushes into + * reserved space */ + desc_ptr[len] = '\0'; + name = desc_ptr; + } } } if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE || @@ -583,10 +600,14 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, ecomp = &edev->component[components++]; if (!IS_ERR(ecomp)) { - if (addl_desc_ptr) - ses_process_descriptor( - ecomp, - addl_desc_ptr); + if (addl_desc_ptr) { + max_desc_len = ses_dev->page10_len - + (addl_desc_ptr - ses_dev->page10); + if (ses_process_descriptor(ecomp, + addl_desc_ptr, + max_desc_len)) + addl_desc_ptr = NULL; + } if (create) enclosure_component_register( ecomp); @@ -603,9 +624,11 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, /* these elements are optional */ type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_TARGET_PORT || type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT || - type_ptr[0] == ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS)) + type_ptr[0] == ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS)) { addl_desc_ptr += addl_desc_ptr[1] + 2; - + if (addl_desc_ptr + 1 >= ses_dev->page10 + ses_dev->page10_len) + addl_desc_ptr = NULL; + } } } kfree(buf); @@ -704,6 +727,12 @@ static int ses_intf_add(struct device *cdev, type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) components += type_ptr[1]; } + + if (components == 0) { + sdev_printk(KERN_WARNING, sdev, "enclosure has no enumerated components\n"); + goto err_free; + } + ses_dev->page1 = buf; ses_dev->page1_len = len; buf = NULL; @@ -827,7 +856,8 @@ static void ses_intf_remove_enclosure(struct scsi_device *sdev) kfree(ses_dev->page2); kfree(ses_dev); - kfree(edev->component[0].scratch); + if (edev->components) + kfree(edev->component[0].scratch); put_device(&edev->edev); enclosure_unregister(edev); diff --git a/drivers/soc/qcom/cpr.c b/drivers/soc/qcom/cpr.c index 6298561bc29c9dfd30739416c73e87585ae5d611..fac0414c37314431559c17c0d65fe47043a6aedd 100644 --- a/drivers/soc/qcom/cpr.c +++ b/drivers/soc/qcom/cpr.c @@ -1743,12 +1743,16 @@ static int cpr_probe(struct platform_device *pdev) ret = of_genpd_add_provider_simple(dev->of_node, &drv->pd); if (ret) - return ret; + goto err_remove_genpd; platform_set_drvdata(pdev, drv); cpr_debugfs_init(drv); return 0; + +err_remove_genpd: + pm_genpd_remove(&drv->pd); + return ret; } static int cpr_remove(struct platform_device *pdev) diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c index a3247692ddc077508f8547a1c3c1584dea045ebd..18e7d158fcca4d6a8b338714980fd95a99fe9c60 100644 --- a/drivers/soundwire/cadence_master.c +++ b/drivers/soundwire/cadence_master.c @@ -511,6 +511,29 @@ cdns_fill_msg_resp(struct sdw_cdns *cdns, return SDW_CMD_OK; } +static void cdns_read_response(struct sdw_cdns *cdns) +{ + u32 num_resp, cmd_base; + int i; + + /* RX_FIFO_AVAIL can be 2 entries more than the FIFO size */ + BUILD_BUG_ON(ARRAY_SIZE(cdns->response_buf) < CDNS_MCP_CMD_LEN + 2); + + num_resp = cdns_readl(cdns, CDNS_MCP_FIFOSTAT); + num_resp &= CDNS_MCP_RX_FIFO_AVAIL; + if (num_resp > ARRAY_SIZE(cdns->response_buf)) { + dev_warn(cdns->dev, "RX AVAIL %d too long\n", num_resp); + num_resp = ARRAY_SIZE(cdns->response_buf); + } + + cmd_base = CDNS_MCP_CMD_BASE; + + for (i = 0; i < num_resp; i++) { + cdns->response_buf[i] = cdns_readl(cdns, cmd_base); + cmd_base += CDNS_MCP_CMD_WORD_LEN; + } +} + static enum sdw_command_response _cdns_xfer_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int cmd, int offset, int count, bool defer) @@ -552,6 +575,10 @@ _cdns_xfer_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int cmd, dev_err(cdns->dev, "IO transfer timed out, cmd %d device %d addr %x len %d\n", cmd, msg->dev_num, msg->addr, msg->len); msg->len = 0; + + /* Drain anything in the RX_FIFO */ + cdns_read_response(cdns); + return SDW_CMD_TIMEOUT; } @@ -720,22 +747,6 @@ EXPORT_SYMBOL(cdns_reset_page_addr); * IRQ handling */ -static void cdns_read_response(struct sdw_cdns *cdns) -{ - u32 num_resp, cmd_base; - int i; - - num_resp = cdns_readl(cdns, CDNS_MCP_FIFOSTAT); - num_resp &= CDNS_MCP_RX_FIFO_AVAIL; - - cmd_base = CDNS_MCP_CMD_BASE; - - for (i = 0; i < num_resp; i++) { - cdns->response_buf[i] = cdns_readl(cdns, cmd_base); - cmd_base += CDNS_MCP_CMD_WORD_LEN; - } -} - static int cdns_update_slave_status(struct sdw_cdns *cdns, u32 slave0, u32 slave1) { diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h index 4d1aab5b5ec2d6563617cde98d4a3986f73a5914..e7f0108d417ca69f630de1d4a37c973f54389773 100644 --- a/drivers/soundwire/cadence_master.h +++ b/drivers/soundwire/cadence_master.h @@ -8,6 +8,12 @@ #define SDW_CADENCE_GSYNC_KHZ 4 /* 4 kHz */ #define SDW_CADENCE_GSYNC_HZ (SDW_CADENCE_GSYNC_KHZ * 1000) +/* + * The Cadence IP supports up to 32 entries in the FIFO, though implementations + * can configure the IP to have a smaller FIFO. + */ +#define CDNS_MCP_IP_MAX_CMD_LEN 32 + /** * struct sdw_cdns_pdi: PDI (Physical Data Interface) instance * @@ -119,7 +125,12 @@ struct sdw_cdns { struct sdw_bus bus; unsigned int instance; - u32 response_buf[0x80]; + /* + * The datasheet says the RX FIFO AVAIL can be 2 entries more + * than the FIFO capacity, so allow for this. + */ + u32 response_buf[CDNS_MCP_IP_MAX_CMD_LEN + 2]; + struct completion tx_complete; struct sdw_defer *defer; diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index aadaea052f51d3604e873aaceb2f3248989f1e8a..4d98ce7571df015180d0ff971eee6f2f38521d60 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -256,7 +256,6 @@ config SPI_DW_BT1 tristate "Baikal-T1 SPI driver for DW SPI core" depends on MIPS_BAIKAL_T1 || COMPILE_TEST select MULTIPLEXER - select MUX_MMIO help Baikal-T1 SoC is equipped with three DW APB SSI-based MMIO SPI controllers. Two of them are pretty much normal: with IRQ, DMA, diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c index 1f08d7553f079c29c8995ce4a0517d313f85d50f..02f56fc001b473c4bd0fb565cbf9b6d93943be21 100644 --- a/drivers/spi/spi-bcm63xx-hsspi.c +++ b/drivers/spi/spi-bcm63xx-hsspi.c @@ -21,6 +21,7 @@ #include #include #include +#include #define HSSPI_GLOBAL_CTRL_REG 0x0 #define GLOBAL_CTRL_CS_POLARITY_SHIFT 0 @@ -162,6 +163,7 @@ static int bcm63xx_hsspi_do_txrx(struct spi_device *spi, struct spi_transfer *t) int step_size = HSSPI_BUFFER_LEN; const u8 *tx = t->tx_buf; u8 *rx = t->rx_buf; + u32 val = 0; bcm63xx_hsspi_set_clk(bs, spi, t->speed_hz); bcm63xx_hsspi_set_cs(bs, spi->chip_select, true); @@ -177,11 +179,16 @@ static int bcm63xx_hsspi_do_txrx(struct spi_device *spi, struct spi_transfer *t) step_size -= HSSPI_OPCODE_LEN; if ((opcode == HSSPI_OP_READ && t->rx_nbits == SPI_NBITS_DUAL) || - (opcode == HSSPI_OP_WRITE && t->tx_nbits == SPI_NBITS_DUAL)) + (opcode == HSSPI_OP_WRITE && t->tx_nbits == SPI_NBITS_DUAL)) { opcode |= HSSPI_OP_MULTIBIT; - __raw_writel(1 << MODE_CTRL_MULTIDATA_WR_SIZE_SHIFT | - 1 << MODE_CTRL_MULTIDATA_RD_SIZE_SHIFT | 0xff, + if (t->rx_nbits == SPI_NBITS_DUAL) + val |= 1 << MODE_CTRL_MULTIDATA_RD_SIZE_SHIFT; + if (t->tx_nbits == SPI_NBITS_DUAL) + val |= 1 << MODE_CTRL_MULTIDATA_WR_SIZE_SHIFT; + } + + __raw_writel(val | 0xff, bs->regs + HSSPI_PROFILE_MODE_CTRL_REG(chip_select)); while (pending > 0) { @@ -439,13 +446,17 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev) if (ret) goto out_put_master; + pm_runtime_enable(&pdev->dev); + /* register and we are done */ ret = devm_spi_register_master(dev, master); if (ret) - goto out_put_master; + goto out_pm_disable; return 0; +out_pm_disable: + pm_runtime_disable(&pdev->dev); out_put_master: spi_master_put(master); out_disable_pll_clk: diff --git a/drivers/spi/spi-dw-core.c b/drivers/spi/spi-dw-core.c index c33866f747dbedb590825329f760c63ca893f336..aa116cee1fd8d535849130776d105745f9c4c65e 100644 --- a/drivers/spi/spi-dw-core.c +++ b/drivers/spi/spi-dw-core.c @@ -353,7 +353,7 @@ static void dw_spi_irq_setup(struct dw_spi *dws) * will be adjusted at the final stage of the IRQ-based SPI transfer * execution so not to lose the leftover of the incoming data. */ - level = min_t(u16, dws->fifo_len / 2, dws->tx_len); + level = min_t(unsigned int, dws->fifo_len / 2, dws->tx_len); dw_writel(dws, DW_SPI_TXFTLR, level); dw_writel(dws, DW_SPI_RXFTLR, level - 1); diff --git a/drivers/spi/spi-synquacer.c b/drivers/spi/spi-synquacer.c index 47cbe73137c23c930c1f4d1a309d5badba2ae7cf..dc188f9202c97b733a6a45d85c6a55278cdf6915 100644 --- a/drivers/spi/spi-synquacer.c +++ b/drivers/spi/spi-synquacer.c @@ -472,10 +472,9 @@ static int synquacer_spi_transfer_one(struct spi_master *master, read_fifo(sspi); } - if (status < 0) { - dev_err(sspi->dev, "failed to transfer. status: 0x%x\n", - status); - return status; + if (status == 0) { + dev_err(sspi->dev, "failed to transfer. Timeout.\n"); + return -ETIMEDOUT; } return 0; diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index 9c5ec99431d2e2394f9d14f19c60126f7f7078f5..aee960a7d7f918ebbdb8cc2c3bd0b3ed966a4794 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -592,7 +592,6 @@ static int spidev_open(struct inode *inode, struct file *filp) if (!spidev->tx_buffer) { spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL); if (!spidev->tx_buffer) { - dev_dbg(&spidev->spi->dev, "open/ENOMEM\n"); status = -ENOMEM; goto err_find_dev; } @@ -601,7 +600,6 @@ static int spidev_open(struct inode *inode, struct file *filp) if (!spidev->rx_buffer) { spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL); if (!spidev->rx_buffer) { - dev_dbg(&spidev->spi->dev, "open/ENOMEM\n"); status = -ENOMEM; goto err_alloc_rx_buf; } diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 6efe436abd9bd1b5cbfd4d86a87e942b8a018a42..51fb00602a14358cfcb204dfb400041d9c42ebcb 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -130,4 +130,5 @@ source "drivers/staging/hungtask/Kconfig" source "drivers/staging/blackbox/Kconfig" +source "drivers/staging/ucollection/Kconfig" endif # STAGING diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index 9dacda71f751d353c3bfa2b969ab44ce14595d9d..a8824bb550b44cd9352fd1da713252525d3b078d 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -55,3 +55,4 @@ obj-$(CONFIG_HISYSEVENT) += hisysevent/ obj-$(CONFIG_DFX_ZEROHUNG) += zerohung/ obj-$(CONFIG_DFX_HUNGTASK) += hungtask/ obj-$(CONFIG_BLACKBOX) += blackbox/ +obj-$(CONFIG_UNIFIED_COLLECTION) += ucollection/ diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c index 3897f8e8f5e0d3f70adbc48fca662f79d5b22573..6870a33d4ccf35c1b4588f8c17ecde6cf492e870 100644 --- a/drivers/staging/emxx_udc/emxx_udc.c +++ b/drivers/staging/emxx_udc/emxx_udc.c @@ -2591,10 +2591,15 @@ static int nbu2ss_ep_queue(struct usb_ep *_ep, req->unaligned = false; if (req->unaligned) { - if (!ep->virt_buf) + if (!ep->virt_buf) { ep->virt_buf = dma_alloc_coherent(udc->dev, PAGE_SIZE, &ep->phys_buf, GFP_ATOMIC | GFP_DMA); + if (!ep->virt_buf) { + spin_unlock_irqrestore(&udc->lock, flags); + return -ENOMEM; + } + } if (ep->epnum > 0) { if (ep->direct == USB_DIR_IN) memcpy(ep->virt_buf, req->req.buf, diff --git a/drivers/staging/hisysevent/hisysevent_builder.c b/drivers/staging/hisysevent/hisysevent_builder.c index 67b1241b76638b49b0f65bd29da30144bc89570d..d9dc011da2e2213af1ec0fa3c5479638c6ef7378 100644 --- a/drivers/staging/hisysevent/hisysevent_builder.c +++ b/drivers/staging/hisysevent/hisysevent_builder.c @@ -107,8 +107,11 @@ static bool is_valid_string(const char *str, unsigned int max_len) if (len == 0 || len > max_len) return false; - for (i = 0; i < len; i++) { - if (!isalpha(str[i]) && str[i] != '_') + if (!isalpha(str[0])) + return false; + + for (i = 1; i < len; i++) { + if (!isalnum(str[i]) && str[i] != '_') return false; } return true; diff --git a/drivers/staging/hisysevent/hisysevent_raw_data.c b/drivers/staging/hisysevent/hisysevent_raw_data.c index de3c64a5edec575c8b32e427f254139c34fdd489..5e37ad726e1686947b7db440c114e80c1c59f741 100644 --- a/drivers/staging/hisysevent/hisysevent_raw_data.c +++ b/drivers/staging/hisysevent/hisysevent_raw_data.c @@ -60,10 +60,11 @@ int raw_data_update(struct hisysevent_raw_data *dest, u8 *src, unsigned int len, pr_err("failed to expand memory for raw data"); return -ENOMEM; } - memcpy(resize_data, dest->data, dest->len); - dest->capacity += expanded_size; - if (!dest->data) + if (dest->data) { + memcpy(resize_data, dest->data, dest->len); + dest->capacity += expanded_size; kfree(dest->data); + } dest->data = resize_data; } @@ -108,7 +109,7 @@ void raw_data_destroy(struct hisysevent_raw_data *raw_data) return; } - if (!(raw_data->data)) + if (raw_data->data) kfree(raw_data->data); kfree(raw_data); diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c index 462835684e8b0c1fa5c64fc39bf3655a092d5cf5..916ff5058ae79074b7df082a29a8636528f136d2 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c @@ -185,7 +185,6 @@ static void _rtl92e_dm_init_fsync(struct net_device *dev); static void _rtl92e_dm_deinit_fsync(struct net_device *dev); static void _rtl92e_dm_check_txrateandretrycount(struct net_device *dev); -static void _rtl92e_dm_check_ac_dc_power(struct net_device *dev); static void _rtl92e_dm_check_fsync(struct net_device *dev); static void _rtl92e_dm_check_rf_ctrl_gpio(void *data); static void _rtl92e_dm_fsync_timer_callback(struct timer_list *t); @@ -238,8 +237,6 @@ void rtl92e_dm_watchdog(struct net_device *dev) if (priv->being_init_adapter) return; - _rtl92e_dm_check_ac_dc_power(dev); - _rtl92e_dm_check_txrateandretrycount(dev); _rtl92e_dm_check_edca_turbo(dev); @@ -257,30 +254,6 @@ void rtl92e_dm_watchdog(struct net_device *dev) _rtl92e_dm_cts_to_self(dev); } -static void _rtl92e_dm_check_ac_dc_power(struct net_device *dev) -{ - struct r8192_priv *priv = rtllib_priv(dev); - static char const ac_dc_script[] = "/etc/acpi/wireless-rtl-ac-dc-power.sh"; - char *argv[] = {(char *)ac_dc_script, DRV_NAME, NULL}; - static char *envp[] = {"HOME=/", - "TERM=linux", - "PATH=/usr/bin:/bin", - NULL}; - - if (priv->ResetProgress == RESET_TYPE_SILENT) { - RT_TRACE((COMP_INIT | COMP_POWER | COMP_RF), - "GPIOChangeRFWorkItemCallBack(): Silent Reset!!!!!!!\n"); - return; - } - - if (priv->rtllib->state != RTLLIB_LINKED) - return; - call_usermodehelper(ac_dc_script, argv, envp, UMH_WAIT_PROC); - - return; -}; - - void rtl92e_init_adaptive_rate(struct net_device *dev) { @@ -1800,10 +1773,6 @@ static void _rtl92e_dm_check_rf_ctrl_gpio(void *data) u8 tmp1byte; enum rt_rf_power_state eRfPowerStateToSet; bool bActuallySet = false; - char *argv[3]; - static char const RadioPowerPath[] = "/etc/acpi/events/RadioPower.sh"; - static char *envp[] = {"HOME=/", "TERM=linux", "PATH=/usr/bin:/bin", - NULL}; bActuallySet = false; @@ -1835,14 +1804,6 @@ static void _rtl92e_dm_check_rf_ctrl_gpio(void *data) mdelay(1000); priv->bHwRfOffAction = 1; rtl92e_set_rf_state(dev, eRfPowerStateToSet, RF_CHANGE_BY_HW); - if (priv->bHwRadioOff) - argv[1] = "RFOFF"; - else - argv[1] = "RFON"; - - argv[0] = (char *)RadioPowerPath; - argv[2] = NULL; - call_usermodehelper(RadioPowerPath, argv, envp, UMH_WAIT_PROC); } } diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 7143d03f0e027e916e179cd2b44301609816fb6e..18fbbe510d018578522b8daa38ebe115f646eeff 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -340,7 +340,7 @@ static int fd_do_rw(struct se_cmd *cmd, struct file *fd, len += sg->length; } - iov_iter_bvec(&iter, READ, bvec, sgl_nents, len); + iov_iter_bvec(&iter, is_write, bvec, sgl_nents, len); if (is_write) ret = vfs_iter_write(fd, &iter, &pos, 0); else @@ -477,7 +477,7 @@ fd_execute_write_same(struct se_cmd *cmd) len += se_dev->dev_attrib.block_size; } - iov_iter_bvec(&iter, READ, bvec, nolb, len); + iov_iter_bvec(&iter, WRITE, bvec, nolb, len); ret = vfs_iter_write(fd_dev->fd_file, &iter, &pos, 0); kfree(bvec); diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index e4513ef091593f8560c9815ac55be306a0f84118..3efd5a3bd69d1f87df6a532b65b26e0e636e8187 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -82,8 +82,8 @@ static bool __target_check_io_state(struct se_cmd *se_cmd, { struct se_session *sess = se_cmd->se_sess; - assert_spin_locked(&sess->sess_cmd_lock); - WARN_ON_ONCE(!irqs_disabled()); + lockdep_assert_held(&sess->sess_cmd_lock); + /* * If command already reached CMD_T_COMPLETE state within * target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown, diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c index ee05950afd2f97df7d954eb0830a2f688b52270c..7b1e81912ccf72dd6ec5e471acdd05f3bf262e50 100644 --- a/drivers/thermal/hisi_thermal.c +++ b/drivers/thermal/hisi_thermal.c @@ -435,10 +435,6 @@ static int hi3660_thermal_probe(struct hisi_thermal_data *data) data->sensor[0].irq_name = "tsensor_a73"; data->sensor[0].data = data; - data->sensor[1].id = HI3660_LITTLE_SENSOR; - data->sensor[1].irq_name = "tsensor_a53"; - data->sensor[1].data = data; - return 0; } diff --git a/drivers/thermal/intel/Kconfig b/drivers/thermal/intel/Kconfig index 8025b21f43fa541bad16f989611b1ca9f738c89e..b5427579fae59788eb8cfea0cd92fc713d071232 100644 --- a/drivers/thermal/intel/Kconfig +++ b/drivers/thermal/intel/Kconfig @@ -60,7 +60,8 @@ endmenu config INTEL_BXT_PMIC_THERMAL tristate "Intel Broxton PMIC thermal driver" - depends on X86 && INTEL_SOC_PMIC_BXTWC && REGMAP + depends on X86 && INTEL_SOC_PMIC_BXTWC + select REGMAP help Select this driver for Intel Broxton PMIC with ADC channels monitoring system temperature measurements and alerts. diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c index a337600d5bc4cbbfceb5c5191331bc88f76848c1..6952f4e237e1f6954c049a7308b6f567cfe20ba6 100644 --- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c +++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c @@ -44,11 +44,13 @@ static int int340x_thermal_get_trip_temp(struct thermal_zone_device *zone, int trip, int *temp) { struct int34x_thermal_zone *d = zone->devdata; - int i; + int i, ret = 0; if (d->override_ops && d->override_ops->get_trip_temp) return d->override_ops->get_trip_temp(zone, trip, temp); + mutex_lock(&d->trip_mutex); + if (trip < d->aux_trip_nr) *temp = d->aux_trips[trip]; else if (trip == d->crt_trip_id) @@ -66,10 +68,12 @@ static int int340x_thermal_get_trip_temp(struct thermal_zone_device *zone, } } if (i == INT340X_THERMAL_MAX_ACT_TRIP_COUNT) - return -EINVAL; + ret = -EINVAL; } - return 0; + mutex_unlock(&d->trip_mutex); + + return ret; } static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone, @@ -77,11 +81,13 @@ static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone, enum thermal_trip_type *type) { struct int34x_thermal_zone *d = zone->devdata; - int i; + int i, ret = 0; if (d->override_ops && d->override_ops->get_trip_type) return d->override_ops->get_trip_type(zone, trip, type); + mutex_lock(&d->trip_mutex); + if (trip < d->aux_trip_nr) *type = THERMAL_TRIP_PASSIVE; else if (trip == d->crt_trip_id) @@ -99,10 +105,12 @@ static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone, } } if (i == INT340X_THERMAL_MAX_ACT_TRIP_COUNT) - return -EINVAL; + ret = -EINVAL; } - return 0; + mutex_unlock(&d->trip_mutex); + + return ret; } static int int340x_thermal_set_trip_temp(struct thermal_zone_device *zone, @@ -174,6 +182,8 @@ int int340x_thermal_read_trips(struct int34x_thermal_zone *int34x_zone) int trip_cnt = int34x_zone->aux_trip_nr; int i; + mutex_lock(&int34x_zone->trip_mutex); + int34x_zone->crt_trip_id = -1; if (!int340x_thermal_get_trip_config(int34x_zone->adev->handle, "_CRT", &int34x_zone->crt_temp)) @@ -201,6 +211,8 @@ int int340x_thermal_read_trips(struct int34x_thermal_zone *int34x_zone) int34x_zone->act_trips[i].valid = true; } + mutex_unlock(&int34x_zone->trip_mutex); + return trip_cnt; } EXPORT_SYMBOL_GPL(int340x_thermal_read_trips); @@ -224,6 +236,8 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev, if (!int34x_thermal_zone) return ERR_PTR(-ENOMEM); + mutex_init(&int34x_thermal_zone->trip_mutex); + int34x_thermal_zone->adev = adev; int34x_thermal_zone->override_ops = override_ops; @@ -275,6 +289,7 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev, acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table); kfree(int34x_thermal_zone->aux_trips); err_trip_alloc: + mutex_destroy(&int34x_thermal_zone->trip_mutex); kfree(int34x_thermal_zone); return ERR_PTR(ret); } @@ -286,6 +301,7 @@ void int340x_thermal_zone_remove(struct int34x_thermal_zone thermal_zone_device_unregister(int34x_thermal_zone->zone); acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table); kfree(int34x_thermal_zone->aux_trips); + mutex_destroy(&int34x_thermal_zone->trip_mutex); kfree(int34x_thermal_zone); } EXPORT_SYMBOL_GPL(int340x_thermal_zone_remove); diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h index 3b4971df1b33b00534123783f3da2e2670d4abb9..8f9872afd0d3c70a7af2060c09de71474760a6fc 100644 --- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h +++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h @@ -32,6 +32,7 @@ struct int34x_thermal_zone { struct thermal_zone_device_ops *override_ops; void *priv_data; struct acpi_lpat_conversion_table *lpat_table; + struct mutex trip_mutex; }; struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *, diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c index fb04470d7d4bbb2b77b3655ee880839e7de45d98..6e7c230d308f730e680dfa93bf1eaf1b7bf45a4f 100644 --- a/drivers/thermal/intel/intel_powerclamp.c +++ b/drivers/thermal/intel/intel_powerclamp.c @@ -57,6 +57,7 @@ static unsigned int target_mwait; static struct dentry *debug_dir; +static bool poll_pkg_cstate_enable; /* user selected target */ static unsigned int set_target_ratio; @@ -262,6 +263,9 @@ static unsigned int get_compensation(int ratio) { unsigned int comp = 0; + if (!poll_pkg_cstate_enable) + return 0; + /* we only use compensation if all adjacent ones are good */ if (ratio == 1 && cal_data[ratio].confidence >= CONFIDENCE_OK && @@ -534,7 +538,8 @@ static int start_power_clamp(void) control_cpu = cpumask_first(cpu_online_mask); clamping = true; - schedule_delayed_work(&poll_pkg_cstate_work, 0); + if (poll_pkg_cstate_enable) + schedule_delayed_work(&poll_pkg_cstate_work, 0); /* start one kthread worker per online cpu */ for_each_online_cpu(cpu) { @@ -603,11 +608,15 @@ static int powerclamp_get_max_state(struct thermal_cooling_device *cdev, static int powerclamp_get_cur_state(struct thermal_cooling_device *cdev, unsigned long *state) { - if (true == clamping) - *state = pkg_cstate_ratio_cur; - else + if (clamping) { + if (poll_pkg_cstate_enable) + *state = pkg_cstate_ratio_cur; + else + *state = set_target_ratio; + } else { /* to save power, do not poll idle ratio while not clamping */ *state = -1; /* indicates invalid state */ + } return 0; } @@ -732,6 +741,9 @@ static int __init powerclamp_init(void) goto exit_unregister; } + if (topology_max_packages() == 1 && topology_max_die_per_package() == 1) + poll_pkg_cstate_enable = true; + cooling_dev = thermal_cooling_device_register("intel_powerclamp", NULL, &powerclamp_cooling_ops); if (IS_ERR(cooling_dev)) { diff --git a/drivers/thermal/intel/intel_quark_dts_thermal.c b/drivers/thermal/intel/intel_quark_dts_thermal.c index 3eafc6b0e6c307a951148f4dd5543aaef133b550..b43fbd5eaa6b41ed4276ba92623293eb56877eaf 100644 --- a/drivers/thermal/intel/intel_quark_dts_thermal.c +++ b/drivers/thermal/intel/intel_quark_dts_thermal.c @@ -415,22 +415,14 @@ MODULE_DEVICE_TABLE(x86cpu, qrk_thermal_ids); static int __init intel_quark_thermal_init(void) { - int err = 0; - if (!x86_match_cpu(qrk_thermal_ids) || !iosf_mbi_available()) return -ENODEV; soc_dts = alloc_soc_dts(); - if (IS_ERR(soc_dts)) { - err = PTR_ERR(soc_dts); - goto err_free; - } + if (IS_ERR(soc_dts)) + return PTR_ERR(soc_dts); return 0; - -err_free: - free_soc_dts(soc_dts); - return err; } static void __exit intel_quark_thermal_exit(void) diff --git a/drivers/thermal/intel/intel_soc_dts_iosf.c b/drivers/thermal/intel/intel_soc_dts_iosf.c index 4f1a2f7c016cc14edc25a1af043a9d30e3a0e650..8d6707e48d02332ff756edbe32d0f27cd09e6f5b 100644 --- a/drivers/thermal/intel/intel_soc_dts_iosf.c +++ b/drivers/thermal/intel/intel_soc_dts_iosf.c @@ -404,7 +404,7 @@ struct intel_soc_dts_sensors *intel_soc_dts_iosf_init( { struct intel_soc_dts_sensors *sensors; bool notification; - u32 tj_max; + int tj_max; int ret; int i; diff --git a/drivers/thermal/qcom/tsens-v1.c b/drivers/thermal/qcom/tsens-v1.c index 3c19a3800c6d685425f35c46d0a7545bc78749ac..faa4576fa028ffc32756f765d298c45fbaca36b4 100644 --- a/drivers/thermal/qcom/tsens-v1.c +++ b/drivers/thermal/qcom/tsens-v1.c @@ -78,11 +78,6 @@ #define MSM8976_CAL_SEL_MASK 0x3 -#define MSM8976_CAL_DEGC_PT1 30 -#define MSM8976_CAL_DEGC_PT2 120 -#define MSM8976_SLOPE_FACTOR 1000 -#define MSM8976_SLOPE_DEFAULT 3200 - /* eeprom layout data for qcs404/405 (v1) */ #define BASE0_MASK 0x000007f8 #define BASE1_MASK 0x0007f800 @@ -142,30 +137,6 @@ #define CAL_SEL_MASK 7 #define CAL_SEL_SHIFT 0 -static void compute_intercept_slope_8976(struct tsens_priv *priv, - u32 *p1, u32 *p2, u32 mode) -{ - int i; - - priv->sensor[0].slope = 3313; - priv->sensor[1].slope = 3275; - priv->sensor[2].slope = 3320; - priv->sensor[3].slope = 3246; - priv->sensor[4].slope = 3279; - priv->sensor[5].slope = 3257; - priv->sensor[6].slope = 3234; - priv->sensor[7].slope = 3269; - priv->sensor[8].slope = 3255; - priv->sensor[9].slope = 3239; - priv->sensor[10].slope = 3286; - - for (i = 0; i < priv->num_sensors; i++) { - priv->sensor[i].offset = (p1[i] * MSM8976_SLOPE_FACTOR) - - (MSM8976_CAL_DEGC_PT1 * - priv->sensor[i].slope); - } -} - static int calibrate_v1(struct tsens_priv *priv) { u32 base0 = 0, base1 = 0; @@ -291,7 +262,7 @@ static int calibrate_8976(struct tsens_priv *priv) break; } - compute_intercept_slope_8976(priv, p1, p2, mode); + compute_intercept_slope(priv, p1, p2, mode); kfree(qfprom_cdata); return 0; @@ -362,6 +333,22 @@ static const struct reg_field tsens_v1_regfields[MAX_REGFIELDS] = { [TRDY] = REG_FIELD(TM_TRDY_OFF, 0, 0), }; +static int __init init_8956(struct tsens_priv *priv) { + priv->sensor[0].slope = 3313; + priv->sensor[1].slope = 3275; + priv->sensor[2].slope = 3320; + priv->sensor[3].slope = 3246; + priv->sensor[4].slope = 3279; + priv->sensor[5].slope = 3257; + priv->sensor[6].slope = 3234; + priv->sensor[7].slope = 3269; + priv->sensor[8].slope = 3255; + priv->sensor[9].slope = 3239; + priv->sensor[10].slope = 3286; + + return init_common(priv); +} + static const struct tsens_ops ops_generic_v1 = { .init = init_common, .calibrate = calibrate_v1, @@ -374,17 +361,29 @@ struct tsens_plat_data data_tsens_v1 = { .fields = tsens_v1_regfields, }; +static const struct tsens_ops ops_8956 = { + .init = init_8956, + .calibrate = calibrate_8976, + .get_temp = get_temp_tsens_valid, +}; + +struct tsens_plat_data data_8956 = { + .num_sensors = 11, + .ops = &ops_8956, + .feat = &tsens_v1_feat, + .fields = tsens_v1_regfields, +}; + static const struct tsens_ops ops_8976 = { .init = init_common, .calibrate = calibrate_8976, .get_temp = get_temp_tsens_valid, }; -/* Valid for both MSM8956 and MSM8976. Sensor ID 3 is unused. */ struct tsens_plat_data data_8976 = { .num_sensors = 11, .ops = &ops_8976, - .hw_ids = (unsigned int[]){0, 1, 2, 4, 5, 6, 7, 8, 9, 10}, + .hw_ids = (unsigned int[]){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, .feat = &tsens_v1_feat, .fields = tsens_v1_regfields, }; diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c index cb4f4b52244603026b98a2fb982f58cf6dc48d6c..c73792ca727a1667e3fcdbb76d1b1827523a5971 100644 --- a/drivers/thermal/qcom/tsens.c +++ b/drivers/thermal/qcom/tsens.c @@ -902,6 +902,12 @@ static const struct of_device_id tsens_table[] = { }, { .compatible = "qcom,msm8939-tsens", .data = &data_8939, + }, { + .compatible = "qcom,msm8956-tsens", + .data = &data_8956, + }, { + .compatible = "qcom,msm8960-tsens", + .data = &data_8960, }, { .compatible = "qcom,msm8974-tsens", .data = &data_8974, diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h index f40b625f897e8c096b9884c90c0ee0bedc59a012..bbb1e8332821ccf99eb415524f1ba80b340f36e0 100644 --- a/drivers/thermal/qcom/tsens.h +++ b/drivers/thermal/qcom/tsens.h @@ -588,7 +588,7 @@ extern struct tsens_plat_data data_8960; extern struct tsens_plat_data data_8916, data_8939, data_8974; /* TSENS v1 targets */ -extern struct tsens_plat_data data_tsens_v1, data_8976; +extern struct tsens_plat_data data_tsens_v1, data_8976, data_8956; /* TSENS v2 targets */ extern struct tsens_plat_data data_8996, data_tsens_v2; diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c index b3c3f7e5851aba3a17ccb7f80f18586c5aac4e4c..33ce4b218d9efdefbb6b130bcaa70d8ebaf9142a 100644 --- a/drivers/tty/serial/8250/8250_dma.c +++ b/drivers/tty/serial/8250/8250_dma.c @@ -46,19 +46,39 @@ static void __dma_rx_complete(void *param) struct uart_8250_dma *dma = p->dma; struct tty_port *tty_port = &p->port.state->port; struct dma_tx_state state; + enum dma_status dma_status; int count; - dma->rx_running = 0; - dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state); + /* + * New DMA Rx can be started during the completion handler before it + * could acquire port's lock and it might still be ongoing. Don't to + * anything in such case. + */ + dma_status = dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state); + if (dma_status == DMA_IN_PROGRESS) + return; count = dma->rx_size - state.residue; tty_insert_flip_string(tty_port, dma->rx_buf, count); p->port.icount.rx += count; + dma->rx_running = 0; tty_flip_buffer_push(tty_port); } +static void dma_rx_complete(void *param) +{ + struct uart_8250_port *p = param; + struct uart_8250_dma *dma = p->dma; + unsigned long flags; + + spin_lock_irqsave(&p->port.lock, flags); + if (dma->rx_running) + __dma_rx_complete(p); + spin_unlock_irqrestore(&p->port.lock, flags); +} + int serial8250_tx_dma(struct uart_8250_port *p) { struct uart_8250_dma *dma = p->dma; @@ -130,7 +150,7 @@ int serial8250_rx_dma(struct uart_8250_port *p) return -EBUSY; dma->rx_running = 1; - desc->callback = __dma_rx_complete; + desc->callback = dma_rx_complete; desc->callback_param = p; dma->rx_cookie = dmaengine_submit(desc); diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index 223695947b654269d3b9a844e78f294f27606ad6..9cb0e8673f826238b246112966535b3ff8522493 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -1448,12 +1448,32 @@ static void lpuart_break_ctl(struct uart_port *port, int break_state) static void lpuart32_break_ctl(struct uart_port *port, int break_state) { - unsigned long temp; + unsigned long temp, modem; + struct tty_struct *tty; + unsigned int cflag = 0; + + tty = tty_port_tty_get(&port->state->port); + if (tty) { + cflag = tty->termios.c_cflag; + tty_kref_put(tty); + } temp = lpuart32_read(port, UARTCTRL) & ~UARTCTRL_SBK; + modem = lpuart32_read(port, UARTMODIR); - if (break_state != 0) + if (break_state != 0) { temp |= UARTCTRL_SBK; + /* + * LPUART CTS has higher priority than SBK, need to disable CTS before + * asserting SBK to avoid any interference if flow control is enabled. + */ + if (cflag & CRTSCTS && modem & UARTMODIR_TXCTSE) + lpuart32_write(port, modem & ~UARTMODIR_TXCTSE, UARTMODIR); + } else { + /* Re-enable the CTS when break off. */ + if (cflag & CRTSCTS && !(modem & UARTMODIR_TXCTSE)) + lpuart32_write(port, modem | UARTMODIR_TXCTSE, UARTMODIR); + } lpuart32_write(port, temp, UARTCTRL); } diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index 04b4ed5d06341f76e96c6f6a7cf7ac2b4f4130ed..7ece8d1a23cb3b1948d5f7afaa847728d690589c 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -1243,25 +1243,6 @@ static int sc16is7xx_probe(struct device *dev, } sched_set_fifo(s->kworker_task); -#ifdef CONFIG_GPIOLIB - if (devtype->nr_gpio) { - /* Setup GPIO cotroller */ - s->gpio.owner = THIS_MODULE; - s->gpio.parent = dev; - s->gpio.label = dev_name(dev); - s->gpio.direction_input = sc16is7xx_gpio_direction_input; - s->gpio.get = sc16is7xx_gpio_get; - s->gpio.direction_output = sc16is7xx_gpio_direction_output; - s->gpio.set = sc16is7xx_gpio_set; - s->gpio.base = -1; - s->gpio.ngpio = devtype->nr_gpio; - s->gpio.can_sleep = 1; - ret = gpiochip_add_data(&s->gpio, s); - if (ret) - goto out_thread; - } -#endif - /* reset device, purging any pending irq / data */ regmap_write(s->regmap, SC16IS7XX_IOCONTROL_REG << SC16IS7XX_REG_SHIFT, SC16IS7XX_IOCONTROL_SRESET_BIT); @@ -1327,6 +1308,25 @@ static int sc16is7xx_probe(struct device *dev, s->p[u].irda_mode = true; } +#ifdef CONFIG_GPIOLIB + if (devtype->nr_gpio) { + /* Setup GPIO cotroller */ + s->gpio.owner = THIS_MODULE; + s->gpio.parent = dev; + s->gpio.label = dev_name(dev); + s->gpio.direction_input = sc16is7xx_gpio_direction_input; + s->gpio.get = sc16is7xx_gpio_get; + s->gpio.direction_output = sc16is7xx_gpio_direction_output; + s->gpio.set = sc16is7xx_gpio_set; + s->gpio.base = -1; + s->gpio.ngpio = devtype->nr_gpio; + s->gpio.can_sleep = 1; + ret = gpiochip_add_data(&s->gpio, s); + if (ret) + goto out_thread; + } +#endif + /* * Setup interrupt. We first try to acquire the IRQ line as level IRQ. * If that succeeds, we can allow sharing the interrupt as well. @@ -1346,18 +1346,19 @@ static int sc16is7xx_probe(struct device *dev, if (!ret) return 0; -out_ports: - for (i--; i >= 0; i--) { - uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port); - clear_bit(s->p[i].port.line, &sc16is7xx_lines); - } - #ifdef CONFIG_GPIOLIB if (devtype->nr_gpio) gpiochip_remove(&s->gpio); out_thread: #endif + +out_ports: + for (i--; i >= 0; i--) { + uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port); + clear_bit(s->p[i].port.line, &sc16is7xx_lines); + } + kthread_stop(s->kworker_task); out_clk: diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 669aef77a0bd0e2a5bcbcaba7cd6938b2e301ade..c37d2657308cd7062a5ff1d49740e63149e0e93e 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -1237,14 +1237,16 @@ static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver, { struct tty_struct *tty; - if (driver->ops->lookup) + if (driver->ops->lookup) { if (!file) tty = ERR_PTR(-EIO); else tty = driver->ops->lookup(driver, file, idx); - else + } else { + if (idx >= driver->num) + return ERR_PTR(-EINVAL); tty = driver->ttys[idx]; - + } if (!IS_ERR(tty)) tty_kref_get(tty); return tty; diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c index f566eb1839dc501edb1942f2ea41ab0a9ed75afd..1dc07f9214d575e0ce583f7ddfbef68a06b28351 100644 --- a/drivers/tty/vt/vc_screen.c +++ b/drivers/tty/vt/vc_screen.c @@ -403,10 +403,11 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) unsigned int this_round, skip = 0; int size; - ret = -ENXIO; vc = vcs_vc(inode, &viewed); - if (!vc) - goto unlock_out; + if (!vc) { + ret = -ENXIO; + break; + } /* Check whether we are above size each round, * as copy_to_user at the end of this loop @@ -414,10 +415,8 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) */ size = vcs_size(vc, attr, uni_mode); if (size < 0) { - if (read) - break; ret = size; - goto unlock_out; + break; } if (pos >= size) break; diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index ac347f9d5ef0b455391b594c681a5794f478eb22..63bb04d262d847ef40f6c0585e8856a337cb2927 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -982,6 +982,7 @@ static int register_root_hub(struct usb_hcd *hcd) { struct device *parent_dev = hcd->self.controller; struct usb_device *usb_dev = hcd->self.root_hub; + struct usb_device_descriptor *descr; const int devnum = 1; int retval; @@ -993,13 +994,16 @@ static int register_root_hub(struct usb_hcd *hcd) mutex_lock(&usb_bus_idr_lock); usb_dev->ep0.desc.wMaxPacketSize = cpu_to_le16(64); - retval = usb_get_device_descriptor(usb_dev, USB_DT_DEVICE_SIZE); - if (retval != sizeof usb_dev->descriptor) { + descr = usb_get_device_descriptor(usb_dev); + if (IS_ERR(descr)) { + retval = PTR_ERR(descr); mutex_unlock(&usb_bus_idr_lock); dev_dbg (parent_dev, "can't read %s device descriptor %d\n", dev_name(&usb_dev->dev), retval); - return (retval < 0) ? retval : -EMSGSIZE; + return retval; } + usb_dev->descriptor = *descr; + kfree(descr); if (le16_to_cpu(usb_dev->descriptor.bcdUSB) >= 0x0201) { retval = usb_get_bos_descriptor(usb_dev); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 5925b8eb9ee38181084a65e44a66aed49469bfdc..580604596499a7a46fe55b685d14cf65ca39b54b 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -2380,9 +2380,8 @@ static int usb_enumerate_device_otg(struct usb_device *udev) * usb_enumerate_device - Read device configs/intfs/otg (usbcore-internal) * @udev: newly addressed device (in ADDRESS state) * - * This is only called by usb_new_device() and usb_authorize_device() - * and FIXME -- all comments that apply to them apply here wrt to - * environment. + * This is only called by usb_new_device() -- all comments that apply there + * apply here wrt to environment. * * If the device is WUSB and not authorized, we don't attempt to read * the string descriptors, as they will be errored out by the device @@ -2647,12 +2646,17 @@ int usb_authorize_device(struct usb_device *usb_dev) } if (usb_dev->wusb) { - result = usb_get_device_descriptor(usb_dev, sizeof(usb_dev->descriptor)); - if (result < 0) { + struct usb_device_descriptor *descr; + + descr = usb_get_device_descriptor(usb_dev); + if (IS_ERR(descr)) { + result = PTR_ERR(descr); dev_err(&usb_dev->dev, "can't re-read device descriptor for " "authorization: %d\n", result); goto error_device_descriptor; } + usb_dev->descriptor = *descr; + kfree(descr); } usb_dev->authorized = 1; @@ -4595,6 +4599,67 @@ static int hub_enable_device(struct usb_device *udev) return hcd->driver->enable_device(hcd, udev); } +/* + * Get the bMaxPacketSize0 value during initialization by reading the + * device's device descriptor. Since we don't already know this value, + * the transfer is unsafe and it ignores I/O errors, only testing for + * reasonable received values. + * + * For "old scheme" initialization, size will be 8 so we read just the + * start of the device descriptor, which should work okay regardless of + * the actual bMaxPacketSize0 value. For "new scheme" initialization, + * size will be 64 (and buf will point to a sufficiently large buffer), + * which might not be kosher according to the USB spec but it's what + * Windows does and what many devices expect. + * + * Returns: bMaxPacketSize0 or a negative error code. + */ +static int get_bMaxPacketSize0(struct usb_device *udev, + struct usb_device_descriptor *buf, int size, bool first_time) +{ + int i, rc; + + /* + * Retry on all errors; some devices are flakey. + * 255 is for WUSB devices, we actually need to use + * 512 (WUSB1.0[4.8.1]). + */ + for (i = 0; i < GET_MAXPACKET0_TRIES; ++i) { + /* Start with invalid values in case the transfer fails */ + buf->bDescriptorType = buf->bMaxPacketSize0 = 0; + rc = usb_control_msg(udev, usb_rcvaddr0pipe(), + USB_REQ_GET_DESCRIPTOR, USB_DIR_IN, + USB_DT_DEVICE << 8, 0, + buf, size, + initial_descriptor_timeout); + switch (buf->bMaxPacketSize0) { + case 8: case 16: case 32: case 64: case 9: + if (buf->bDescriptorType == USB_DT_DEVICE) { + rc = buf->bMaxPacketSize0; + break; + } + fallthrough; + default: + if (rc >= 0) + rc = -EPROTO; + break; + } + + /* + * Some devices time out if they are powered on + * when already connected. They need a second + * reset, so return early. But only on the first + * attempt, lest we get into a time-out/reset loop. + */ + if (rc > 0 || (rc == -ETIMEDOUT && first_time && + udev->speed > USB_SPEED_FULL)) + break; + } + return rc; +} + +#define GET_DESCRIPTOR_BUFSIZE 64 + /* Reset device, (re)assign address, get device descriptor. * Device connection must be stable, no more debouncing needed. * Returns device in USB_STATE_ADDRESS, except on error. @@ -4604,10 +4669,17 @@ static int hub_enable_device(struct usb_device *udev) * the port lock. For a newly detected device that is not accessible * through any global pointers, it's not necessary to lock the device, * but it is still necessary to lock the port. + * + * For a newly detected device, @dev_descr must be NULL. The device + * descriptor retrieved from the device will then be stored in + * @udev->descriptor. For an already existing device, @dev_descr + * must be non-NULL. The device descriptor will be stored there, + * not in @udev->descriptor, because descriptors for registered + * devices are meant to be immutable. */ static int hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, - int retry_counter) + int retry_counter, struct usb_device_descriptor *dev_descr) { struct usb_device *hdev = hub->hdev; struct usb_hcd *hcd = bus_to_hcd(hdev->bus); @@ -4619,6 +4691,13 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, int devnum = udev->devnum; const char *driver_name; bool do_new_scheme; + const bool initial = !dev_descr; + int maxp0; + struct usb_device_descriptor *buf, *descr; + + buf = kmalloc(GET_DESCRIPTOR_BUFSIZE, GFP_NOIO); + if (!buf) + return -ENOMEM; /* root hub ports have a slightly longer reset period * (from USB 2.0 spec, section 7.1.7.5) @@ -4651,32 +4730,34 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, } oldspeed = udev->speed; - /* USB 2.0 section 5.5.3 talks about ep0 maxpacket ... - * it's fixed size except for full speed devices. - * For Wireless USB devices, ep0 max packet is always 512 (tho - * reported as 0xff in the device descriptor). WUSB1.0[4.8.1]. - */ - switch (udev->speed) { - case USB_SPEED_SUPER_PLUS: - case USB_SPEED_SUPER: - case USB_SPEED_WIRELESS: /* fixed at 512 */ - udev->ep0.desc.wMaxPacketSize = cpu_to_le16(512); - break; - case USB_SPEED_HIGH: /* fixed at 64 */ - udev->ep0.desc.wMaxPacketSize = cpu_to_le16(64); - break; - case USB_SPEED_FULL: /* 8, 16, 32, or 64 */ - /* to determine the ep0 maxpacket size, try to read - * the device descriptor to get bMaxPacketSize0 and - * then correct our initial guess. + if (initial) { + /* USB 2.0 section 5.5.3 talks about ep0 maxpacket ... + * it's fixed size except for full speed devices. + * For Wireless USB devices, ep0 max packet is always 512 (tho + * reported as 0xff in the device descriptor). WUSB1.0[4.8.1]. */ - udev->ep0.desc.wMaxPacketSize = cpu_to_le16(64); - break; - case USB_SPEED_LOW: /* fixed at 8 */ - udev->ep0.desc.wMaxPacketSize = cpu_to_le16(8); - break; - default: - goto fail; + switch (udev->speed) { + case USB_SPEED_SUPER_PLUS: + case USB_SPEED_SUPER: + case USB_SPEED_WIRELESS: /* fixed at 512 */ + udev->ep0.desc.wMaxPacketSize = cpu_to_le16(512); + break; + case USB_SPEED_HIGH: /* fixed at 64 */ + udev->ep0.desc.wMaxPacketSize = cpu_to_le16(64); + break; + case USB_SPEED_FULL: /* 8, 16, 32, or 64 */ + /* to determine the ep0 maxpacket size, try to read + * the device descriptor to get bMaxPacketSize0 and + * then correct our initial guess. + */ + udev->ep0.desc.wMaxPacketSize = cpu_to_le16(64); + break; + case USB_SPEED_LOW: /* fixed at 8 */ + udev->ep0.desc.wMaxPacketSize = cpu_to_le16(8); + break; + default: + goto fail; + } } if (udev->speed == USB_SPEED_WIRELESS) @@ -4699,22 +4780,24 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, if (udev->speed < USB_SPEED_SUPER) dev_info(&udev->dev, "%s %s USB device number %d using %s\n", - (udev->config) ? "reset" : "new", speed, + (initial ? "new" : "reset"), speed, devnum, driver_name); - /* Set up TT records, if needed */ - if (hdev->tt) { - udev->tt = hdev->tt; - udev->ttport = hdev->ttport; - } else if (udev->speed != USB_SPEED_HIGH - && hdev->speed == USB_SPEED_HIGH) { - if (!hub->tt.hub) { - dev_err(&udev->dev, "parent hub has no TT\n"); - retval = -EINVAL; - goto fail; + if (initial) { + /* Set up TT records, if needed */ + if (hdev->tt) { + udev->tt = hdev->tt; + udev->ttport = hdev->ttport; + } else if (udev->speed != USB_SPEED_HIGH + && hdev->speed == USB_SPEED_HIGH) { + if (!hub->tt.hub) { + dev_err(&udev->dev, "parent hub has no TT\n"); + retval = -EINVAL; + goto fail; + } + udev->tt = &hub->tt; + udev->ttport = port1; } - udev->tt = &hub->tt; - udev->ttport = port1; } /* Why interleave GET_DESCRIPTOR and SET_ADDRESS this way? @@ -4733,9 +4816,6 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, for (retries = 0; retries < GET_DESCRIPTOR_TRIES; (++retries, msleep(100))) { if (do_new_scheme) { - struct usb_device_descriptor *buf; - int r = 0; - retval = hub_enable_device(udev); if (retval < 0) { dev_err(&udev->dev, @@ -4744,52 +4824,14 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, goto fail; } -#define GET_DESCRIPTOR_BUFSIZE 64 - buf = kmalloc(GET_DESCRIPTOR_BUFSIZE, GFP_NOIO); - if (!buf) { - retval = -ENOMEM; - continue; - } - - /* Retry on all errors; some devices are flakey. - * 255 is for WUSB devices, we actually need to use - * 512 (WUSB1.0[4.8.1]). - */ - for (operations = 0; operations < GET_MAXPACKET0_TRIES; - ++operations) { - buf->bMaxPacketSize0 = 0; - r = usb_control_msg(udev, usb_rcvaddr0pipe(), - USB_REQ_GET_DESCRIPTOR, USB_DIR_IN, - USB_DT_DEVICE << 8, 0, - buf, GET_DESCRIPTOR_BUFSIZE, - initial_descriptor_timeout); - switch (buf->bMaxPacketSize0) { - case 8: case 16: case 32: case 64: case 255: - if (buf->bDescriptorType == - USB_DT_DEVICE) { - r = 0; - break; - } - fallthrough; - default: - if (r == 0) - r = -EPROTO; - break; - } - /* - * Some devices time out if they are powered on - * when already connected. They need a second - * reset. But only on the first attempt, - * lest we get into a time out/reset loop - */ - if (r == 0 || (r == -ETIMEDOUT && - retries == 0 && - udev->speed > USB_SPEED_FULL)) - break; + maxp0 = get_bMaxPacketSize0(udev, buf, + GET_DESCRIPTOR_BUFSIZE, retries == 0); + if (maxp0 > 0 && !initial && + maxp0 != udev->descriptor.bMaxPacketSize0) { + dev_err(&udev->dev, "device reset changed ep0 maxpacket size!\n"); + retval = -ENODEV; + goto fail; } - udev->descriptor.bMaxPacketSize0 = - buf->bMaxPacketSize0; - kfree(buf); retval = hub_port_reset(hub, port1, udev, delay, false); if (retval < 0) /* error or disconnect */ @@ -4800,14 +4842,13 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, retval = -ENODEV; goto fail; } - if (r) { - if (r != -ENODEV) + if (maxp0 < 0) { + if (maxp0 != -ENODEV) dev_err(&udev->dev, "device descriptor read/64, error %d\n", - r); - retval = -EMSGSIZE; + maxp0); + retval = maxp0; continue; } -#undef GET_DESCRIPTOR_BUFSIZE } /* @@ -4849,18 +4890,22 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, break; } - retval = usb_get_device_descriptor(udev, 8); - if (retval < 8) { + /* !do_new_scheme || wusb */ + maxp0 = get_bMaxPacketSize0(udev, buf, 8, retries == 0); + if (maxp0 < 0) { + retval = maxp0; if (retval != -ENODEV) dev_err(&udev->dev, "device descriptor read/8, error %d\n", retval); - if (retval >= 0) - retval = -EMSGSIZE; } else { u32 delay; - retval = 0; + if (!initial && maxp0 != udev->descriptor.bMaxPacketSize0) { + dev_err(&udev->dev, "device reset changed ep0 maxpacket size!\n"); + retval = -ENODEV; + goto fail; + } delay = udev->parent->hub_delay; udev->hub_delay = min_t(u32, delay, @@ -4879,48 +4924,61 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, goto fail; /* - * Some superspeed devices have finished the link training process - * and attached to a superspeed hub port, but the device descriptor - * got from those devices show they aren't superspeed devices. Warm - * reset the port attached by the devices can fix them. + * Check the ep0 maxpacket guess and correct it if necessary. + * maxp0 is the value stored in the device descriptor; + * i is the value it encodes (logarithmic for SuperSpeed or greater). */ - if ((udev->speed >= USB_SPEED_SUPER) && - (le16_to_cpu(udev->descriptor.bcdUSB) < 0x0300)) { - dev_err(&udev->dev, "got a wrong device descriptor, " - "warm reset device\n"); - hub_port_reset(hub, port1, udev, - HUB_BH_RESET_TIME, true); - retval = -EINVAL; - goto fail; - } - - if (udev->descriptor.bMaxPacketSize0 == 0xff || - udev->speed >= USB_SPEED_SUPER) - i = 512; - else - i = udev->descriptor.bMaxPacketSize0; - if (usb_endpoint_maxp(&udev->ep0.desc) != i) { - if (udev->speed == USB_SPEED_LOW || - !(i == 8 || i == 16 || i == 32 || i == 64)) { - dev_err(&udev->dev, "Invalid ep0 maxpacket: %d\n", i); - retval = -EMSGSIZE; - goto fail; - } + i = maxp0; + if (udev->speed >= USB_SPEED_SUPER) { + if (maxp0 <= 16) + i = 1 << maxp0; + else + i = 0; /* Invalid */ + } + if (usb_endpoint_maxp(&udev->ep0.desc) == i) { + ; /* Initial ep0 maxpacket guess is right */ + } else if ((udev->speed == USB_SPEED_FULL || + udev->speed == USB_SPEED_HIGH) && + (i == 8 || i == 16 || i == 32 || i == 64)) { + /* Initial guess is wrong; use the descriptor's value */ if (udev->speed == USB_SPEED_FULL) dev_dbg(&udev->dev, "ep0 maxpacket = %d\n", i); else dev_warn(&udev->dev, "Using ep0 maxpacket: %d\n", i); udev->ep0.desc.wMaxPacketSize = cpu_to_le16(i); usb_ep0_reinit(udev); + } else { + /* Initial guess is wrong and descriptor's value is invalid */ + dev_err(&udev->dev, "Invalid ep0 maxpacket: %d\n", maxp0); + retval = -EMSGSIZE; + goto fail; } - retval = usb_get_device_descriptor(udev, USB_DT_DEVICE_SIZE); - if (retval < (signed)sizeof(udev->descriptor)) { + descr = usb_get_device_descriptor(udev); + if (IS_ERR(descr)) { + retval = PTR_ERR(descr); if (retval != -ENODEV) dev_err(&udev->dev, "device descriptor read/all, error %d\n", retval); - if (retval >= 0) - retval = -ENOMSG; + goto fail; + } + if (initial) + udev->descriptor = *descr; + else + *dev_descr = *descr; + kfree(descr); + + /* + * Some superspeed devices have finished the link training process + * and attached to a superspeed hub port, but the device descriptor + * got from those devices show they aren't superspeed devices. Warm + * reset the port attached by the devices can fix them. + */ + if ((udev->speed >= USB_SPEED_SUPER) && + (le16_to_cpu(udev->descriptor.bcdUSB) < 0x0300)) { + dev_err(&udev->dev, "got a wrong device descriptor, warm reset device\n"); + hub_port_reset(hub, port1, udev, HUB_BH_RESET_TIME, true); + retval = -EINVAL; goto fail; } @@ -4944,6 +5002,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, hub_port_disable(hub, port1, 0); update_devnum(udev, devnum); /* for disconnect processing */ } + kfree(buf); return retval; } @@ -5024,7 +5083,7 @@ hub_power_remaining(struct usb_hub *hub) static int descriptors_changed(struct usb_device *udev, - struct usb_device_descriptor *old_device_descriptor, + struct usb_device_descriptor *new_device_descriptor, struct usb_host_bos *old_bos) { int changed = 0; @@ -5035,8 +5094,8 @@ static int descriptors_changed(struct usb_device *udev, int length; char *buf; - if (memcmp(&udev->descriptor, old_device_descriptor, - sizeof(*old_device_descriptor)) != 0) + if (memcmp(&udev->descriptor, new_device_descriptor, + sizeof(*new_device_descriptor)) != 0) return 1; if ((old_bos && !udev->bos) || (!old_bos && udev->bos)) @@ -5209,7 +5268,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, } /* reset (non-USB 3.0 devices) and get descriptor */ - status = hub_port_init(hub, udev, port1, i); + status = hub_port_init(hub, udev, port1, i, NULL); if (status < 0) goto loop; @@ -5356,9 +5415,8 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1, { struct usb_port *port_dev = hub->ports[port1 - 1]; struct usb_device *udev = port_dev->child; - struct usb_device_descriptor descriptor; + struct usb_device_descriptor *descr; int status = -ENODEV; - int retval; dev_dbg(&port_dev->dev, "status %04x, change %04x, %s\n", portstatus, portchange, portspeed(hub, portstatus)); @@ -5385,23 +5443,20 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1, * changed device descriptors before resuscitating the * device. */ - descriptor = udev->descriptor; - retval = usb_get_device_descriptor(udev, - sizeof(udev->descriptor)); - if (retval < 0) { + descr = usb_get_device_descriptor(udev); + if (IS_ERR(descr)) { dev_dbg(&udev->dev, - "can't read device descriptor %d\n", - retval); + "can't read device descriptor %ld\n", + PTR_ERR(descr)); } else { - if (descriptors_changed(udev, &descriptor, + if (descriptors_changed(udev, descr, udev->bos)) { dev_dbg(&udev->dev, "device descriptor has changed\n"); - /* for disconnect() calls */ - udev->descriptor = descriptor; } else { status = 0; /* Nothing to do */ } + kfree(descr); } #ifdef CONFIG_PM } else if (udev->state == USB_STATE_SUSPENDED && @@ -5829,7 +5884,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev) struct usb_device *parent_hdev = udev->parent; struct usb_hub *parent_hub; struct usb_hcd *hcd = bus_to_hcd(udev->bus); - struct usb_device_descriptor descriptor = udev->descriptor; + struct usb_device_descriptor descriptor; struct usb_host_bos *bos; int i, j, ret = 0; int port1 = udev->portnum; @@ -5871,7 +5926,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev) /* ep0 maxpacket size may change; let the HCD know about it. * Other endpoints will be handled by re-enumeration. */ usb_ep0_reinit(udev); - ret = hub_port_init(parent_hub, udev, port1, i); + ret = hub_port_init(parent_hub, udev, port1, i, &descriptor); if (ret >= 0 || ret == -ENOTCONN || ret == -ENODEV) break; } @@ -5883,7 +5938,6 @@ static int usb_reset_and_verify_device(struct usb_device *udev) /* Device might have changed firmware (DFU or similar) */ if (descriptors_changed(udev, &descriptor, bos)) { dev_info(&udev->dev, "device firmware changed\n"); - udev->descriptor = descriptor; /* for disconnect() calls */ goto re_enumerate; } diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index dba2baca486e758d7f33b6fb2ab603acdbb32fd0..d64aaff223e79c2ce7f97c09a5c1eb2ec52af252 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -1039,39 +1039,34 @@ char *usb_cache_string(struct usb_device *udev, int index) } /* - * usb_get_device_descriptor - (re)reads the device descriptor (usbcore) - * @dev: the device whose device descriptor is being updated - * @size: how much of the descriptor to read + * usb_get_device_descriptor - read the device descriptor + * @udev: the device whose device descriptor should be read * Context: !in_interrupt () * - * Updates the copy of the device descriptor stored in the device structure, - * which dedicates space for this purpose. - * * Not exported, only for use by the core. If drivers really want to read * the device descriptor directly, they can call usb_get_descriptor() with * type = USB_DT_DEVICE and index = 0. * - * This call is synchronous, and may not be used in an interrupt context. - * - * Return: The number of bytes received on success, or else the status code - * returned by the underlying usb_control_msg() call. + * Returns: a pointer to a dynamically allocated usb_device_descriptor + * structure (which the caller must deallocate), or an ERR_PTR value. */ -int usb_get_device_descriptor(struct usb_device *dev, unsigned int size) +struct usb_device_descriptor *usb_get_device_descriptor(struct usb_device *udev) { struct usb_device_descriptor *desc; int ret; - if (size > sizeof(*desc)) - return -EINVAL; desc = kmalloc(sizeof(*desc), GFP_NOIO); if (!desc) - return -ENOMEM; + return ERR_PTR(-ENOMEM); + + ret = usb_get_descriptor(udev, USB_DT_DEVICE, 0, desc, sizeof(*desc)); + if (ret == sizeof(*desc)) + return desc; - ret = usb_get_descriptor(dev, USB_DT_DEVICE, 0, desc, size); if (ret >= 0) - memcpy(&dev->descriptor, desc, size); + ret = -EMSGSIZE; kfree(desc); - return ret; + return ERR_PTR(ret); } /* diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 6d24d138cc77e8567013cf4b4b91bf7f04444610..4ac1c22f13be0d759931388971781ef21dd2b099 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -527,6 +527,9 @@ static const struct usb_device_id usb_quirk_list[] = { /* DJI CineSSD */ { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM }, + /* Alcor Link AK9563 SC Reader used in 2022 Lenovo ThinkPads */ + { USB_DEVICE(0x2ce3, 0x9563), .driver_info = USB_QUIRK_NO_LPM }, + /* DELL USB GEN2 */ { USB_DEVICE(0x413c, 0xb062), .driver_info = USB_QUIRK_NO_LPM | USB_QUIRK_RESET_RESUME }, diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c index 8d134193fa0cf0ff7da568e5ee7f7b4bbdb30598..a2ca38e25e0c33ab6a2c7aace36573068e3d9e36 100644 --- a/drivers/usb/core/sysfs.c +++ b/drivers/usb/core/sysfs.c @@ -889,11 +889,7 @@ read_descriptors(struct file *filp, struct kobject *kobj, size_t srclen, n; int cfgno; void *src; - int retval; - retval = usb_lock_device_interruptible(udev); - if (retval < 0) - return -EINTR; /* The binary attribute begins with the device descriptor. * Following that are the raw descriptor entries for all the * configurations (config plus subsidiary descriptors). @@ -918,7 +914,6 @@ read_descriptors(struct file *filp, struct kobject *kobj, off -= srclen; } } - usb_unlock_device(udev); return count - nleft; } diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h index 82538daac8b8962b975dead6752706f44046b82a..3bb2e1db42b5d20493e23b452b75cf18290a44a6 100644 --- a/drivers/usb/core/usb.h +++ b/drivers/usb/core/usb.h @@ -42,8 +42,8 @@ extern bool usb_endpoint_is_ignored(struct usb_device *udev, struct usb_endpoint_descriptor *epd); extern int usb_remove_device(struct usb_device *udev); -extern int usb_get_device_descriptor(struct usb_device *dev, - unsigned int size); +extern struct usb_device_descriptor *usb_get_device_descriptor( + struct usb_device *udev); extern int usb_set_isoch_delay(struct usb_device *dev); extern int usb_get_bos_descriptor(struct usb_device *dev); extern void usb_release_bos_descriptor(struct usb_device *dev); diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c index 5f31f55c16ba66538fff084423102f74d557cd63..416c94c612f55bed6da11177fb621bc86bea6890 100644 --- a/drivers/usb/dwc3/dwc3-qcom.c +++ b/drivers/usb/dwc3/dwc3-qcom.c @@ -115,7 +115,7 @@ static inline void dwc3_qcom_clrbits(void __iomem *base, u32 offset, u32 val) readl(base + offset); } -static void dwc3_qcom_vbus_overrride_enable(struct dwc3_qcom *qcom, bool enable) +static void dwc3_qcom_vbus_override_enable(struct dwc3_qcom *qcom, bool enable) { if (enable) { dwc3_qcom_setbits(qcom->qscratch_base, QSCRATCH_SS_PHY_CTRL, @@ -136,7 +136,7 @@ static int dwc3_qcom_vbus_notifier(struct notifier_block *nb, struct dwc3_qcom *qcom = container_of(nb, struct dwc3_qcom, vbus_nb); /* enable vbus override for device mode */ - dwc3_qcom_vbus_overrride_enable(qcom, event); + dwc3_qcom_vbus_override_enable(qcom, event); qcom->mode = event ? USB_DR_MODE_PERIPHERAL : USB_DR_MODE_HOST; return NOTIFY_DONE; @@ -148,7 +148,7 @@ static int dwc3_qcom_host_notifier(struct notifier_block *nb, struct dwc3_qcom *qcom = container_of(nb, struct dwc3_qcom, host_nb); /* disable vbus override in host mode */ - dwc3_qcom_vbus_overrride_enable(qcom, !event); + dwc3_qcom_vbus_override_enable(qcom, !event); qcom->mode = event ? USB_DR_MODE_HOST : USB_DR_MODE_PERIPHERAL; return NOTIFY_DONE; @@ -839,8 +839,8 @@ static int dwc3_qcom_probe(struct platform_device *pdev) qcom->mode = usb_get_dr_mode(&qcom->dwc3->dev); /* enable vbus override for device mode */ - if (qcom->mode == USB_DR_MODE_PERIPHERAL) - dwc3_qcom_vbus_overrride_enable(qcom, true); + if (qcom->mode != USB_DR_MODE_HOST) + dwc3_qcom_vbus_override_enable(qcom, true); /* register extcon to override sw_vbus on Vbus change later */ ret = dwc3_qcom_register_extcon(qcom); diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index bb0d92837f6770f804310b25faaa2bd7d5453c4a..8c48c9f801be201b5dfd4ac048b5123f49f89f47 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -278,6 +278,11 @@ static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len) struct usb_request *req = ffs->ep0req; int ret; + if (!req) { + spin_unlock_irq(&ffs->ev.waitq.lock); + return -EINVAL; + } + req->zero = len < le16_to_cpu(ffs->ev.setup.wLength); spin_unlock_irq(&ffs->ev.waitq.lock); @@ -1881,10 +1886,14 @@ static void functionfs_unbind(struct ffs_data *ffs) ENTER(); if (!WARN_ON(!ffs->gadget)) { + /* dequeue before freeing ep0req */ + usb_ep_dequeue(ffs->gadget->ep0, ffs->ep0req); + mutex_lock(&ffs->mutex); usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req); ffs->ep0req = NULL; ffs->gadget = NULL; clear_bit(FFS_FL_BOUND, &ffs->flags); + mutex_unlock(&ffs->mutex); ffs_data_put(ffs); } } diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c index 2caccbb6e014063c03fa35d061d8efd9c28cac67..7b54e814aefb1ccc76e6facf2fbf05f2b595bd5d 100644 --- a/drivers/usb/gadget/function/u_serial.c +++ b/drivers/usb/gadget/function/u_serial.c @@ -81,6 +81,9 @@ #define WRITE_BUF_SIZE 8192 /* TX only */ #define GS_CONSOLE_BUF_SIZE 8192 +/* Prevents race conditions while accessing gser->ioport */ +static DEFINE_SPINLOCK(serial_port_lock); + /* console info */ struct gs_console { struct console console; @@ -1376,8 +1379,10 @@ void gserial_disconnect(struct gserial *gser) if (!port) return; + spin_lock_irqsave(&serial_port_lock, flags); + /* tell the TTY glue not to do I/O here any more */ - spin_lock_irqsave(&port->port_lock, flags); + spin_lock(&port->port_lock); gs_console_disconnect(port); @@ -1392,7 +1397,8 @@ void gserial_disconnect(struct gserial *gser) tty_hangup(port->port.tty); } port->suspended = false; - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock(&port->port_lock); + spin_unlock_irqrestore(&serial_port_lock, flags); /* disable endpoints, aborting down any active I/O */ usb_ep_disable(gser->out); @@ -1426,10 +1432,19 @@ EXPORT_SYMBOL_GPL(gserial_suspend); void gserial_resume(struct gserial *gser) { - struct gs_port *port = gser->ioport; + struct gs_port *port; unsigned long flags; - spin_lock_irqsave(&port->port_lock, flags); + spin_lock_irqsave(&serial_port_lock, flags); + port = gser->ioport; + + if (!port) { + spin_unlock_irqrestore(&serial_port_lock, flags); + return; + } + + spin_lock(&port->port_lock); + spin_unlock(&serial_port_lock); port->suspended = false; if (!port->start_delayed) { spin_unlock_irqrestore(&port->port_lock, flags); diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c index 00fb58e50a1554ec2e7a53e4d65a071553245b71..2db01170d096dd4f3f3613340f025a3780f12788 100644 --- a/drivers/usb/gadget/function/uvc_configfs.c +++ b/drivers/usb/gadget/function/uvc_configfs.c @@ -505,11 +505,68 @@ UVC_ATTR_RO(uvcg_default_output_, cname, aname) UVCG_DEFAULT_OUTPUT_ATTR(b_terminal_id, bTerminalID, 8); UVCG_DEFAULT_OUTPUT_ATTR(w_terminal_type, wTerminalType, 16); UVCG_DEFAULT_OUTPUT_ATTR(b_assoc_terminal, bAssocTerminal, 8); -UVCG_DEFAULT_OUTPUT_ATTR(b_source_id, bSourceID, 8); UVCG_DEFAULT_OUTPUT_ATTR(i_terminal, iTerminal, 8); #undef UVCG_DEFAULT_OUTPUT_ATTR +static ssize_t uvcg_default_output_b_source_id_show(struct config_item *item, + char *page) +{ + struct config_group *group = to_config_group(item); + struct f_uvc_opts *opts; + struct config_item *opts_item; + struct mutex *su_mutex = &group->cg_subsys->su_mutex; + struct uvc_output_terminal_descriptor *cd; + int result; + + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ + + opts_item = group->cg_item.ci_parent->ci_parent-> + ci_parent->ci_parent; + opts = to_f_uvc_opts(opts_item); + cd = &opts->uvc_output_terminal; + + mutex_lock(&opts->lock); + result = sprintf(page, "%u\n", le8_to_cpu(cd->bSourceID)); + mutex_unlock(&opts->lock); + + mutex_unlock(su_mutex); + + return result; +} + +static ssize_t uvcg_default_output_b_source_id_store(struct config_item *item, + const char *page, size_t len) +{ + struct config_group *group = to_config_group(item); + struct f_uvc_opts *opts; + struct config_item *opts_item; + struct mutex *su_mutex = &group->cg_subsys->su_mutex; + struct uvc_output_terminal_descriptor *cd; + int result; + u8 num; + + result = kstrtou8(page, 0, &num); + if (result) + return result; + + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ + + opts_item = group->cg_item.ci_parent->ci_parent-> + ci_parent->ci_parent; + opts = to_f_uvc_opts(opts_item); + cd = &opts->uvc_output_terminal; + + mutex_lock(&opts->lock); + cd->bSourceID = num; + mutex_unlock(&opts->lock); + + mutex_unlock(su_mutex); + + return len; +} +UVC_ATTR(uvcg_default_output_, b_source_id, bSourceID); + static struct configfs_attribute *uvcg_default_output_attrs[] = { &uvcg_default_output_attr_b_terminal_id, &uvcg_default_output_attr_w_terminal_type, diff --git a/drivers/usb/host/xhci-mvebu.c b/drivers/usb/host/xhci-mvebu.c index 8ca1a235d1645144824e1cdd129926f699f4a298..eabccf25796b2785f29c3d9dcf20bce7c0a4defa 100644 --- a/drivers/usb/host/xhci-mvebu.c +++ b/drivers/usb/host/xhci-mvebu.c @@ -33,7 +33,7 @@ static void xhci_mvebu_mbus_config(void __iomem *base, /* Program each DRAM CS in a seperate window */ for (win = 0; win < dram->num_cs; win++) { - const struct mbus_dram_window *cs = dram->cs + win; + const struct mbus_dram_window *cs = &dram->cs[win]; writel(((cs->size - 1) & 0xffff0000) | (cs->mbus_attr << 8) | (dram->mbus_dram_target_id << 4) | 1, diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 2967372a99880804c0ad181113a048cfdee47217..473b0b64dd57288bda2df84db69cf9d3ea1cc0f4 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -696,6 +696,8 @@ int xhci_run(struct usb_hcd *hcd) xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_run for USB2 roothub"); + set_bit(HCD_FLAG_DEFER_RH_REGISTER, &hcd->flags); + xhci_dbc_init(xhci); xhci_debugfs_init(xhci); diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 2fc65cbbfea957a9a984327364f7f58b5ae7a806..14a7af7f3bcd7c3f969d0695ed257030bb309fbc 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -402,6 +402,8 @@ static void option_instat_callback(struct urb *urb); #define LONGCHEER_VENDOR_ID 0x1c9e /* 4G Systems products */ +/* This one was sold as the VW and Skoda "Carstick LTE" */ +#define FOUR_G_SYSTEMS_PRODUCT_CARSTICK_LTE 0x7605 /* This is the 4G XS Stick W14 a.k.a. Mobilcom Debitel Surf-Stick * * It seems to contain a Qualcomm QSC6240/6290 chipset */ #define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603 @@ -1976,6 +1978,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(2) }, { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) }, { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) }, + { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_CARSTICK_LTE), + .driver_info = RSVD(0) }, { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14), .driver_info = NCTRL(0) | NCTRL(1) }, { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100), diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c index c9ce1c25c80cc59b61701c868d0d8ff8206e993c..737398f1b896a1c984e7a6f4990ee0044a90774e 100644 --- a/drivers/usb/storage/ene_ub6250.c +++ b/drivers/usb/storage/ene_ub6250.c @@ -938,7 +938,7 @@ static int ms_lib_process_bootblock(struct us_data *us, u16 PhyBlock, u8 *PageDa struct ms_lib_type_extdat ExtraData; struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; - PageBuffer = kmalloc(MS_BYTES_PER_PAGE, GFP_KERNEL); + PageBuffer = kzalloc(MS_BYTES_PER_PAGE * 2, GFP_KERNEL); if (PageBuffer == NULL) return (u32)-1; diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c index eed719cf55525f8d63d5566e1f4628ae394cefec..e8eaca5a84db2bd759a7b2cd3e19fc193d5210b2 100644 --- a/drivers/usb/typec/altmodes/displayport.c +++ b/drivers/usb/typec/altmodes/displayport.c @@ -524,10 +524,10 @@ int dp_altmode_probe(struct typec_altmode *alt) /* FIXME: Port can only be DFP_U. */ /* Make sure we have compatiple pin configurations */ - if (!(DP_CAP_DFP_D_PIN_ASSIGN(port->vdo) & - DP_CAP_UFP_D_PIN_ASSIGN(alt->vdo)) && - !(DP_CAP_UFP_D_PIN_ASSIGN(port->vdo) & - DP_CAP_DFP_D_PIN_ASSIGN(alt->vdo))) + if (!(DP_CAP_PIN_ASSIGN_DFP_D(port->vdo) & + DP_CAP_PIN_ASSIGN_UFP_D(alt->vdo)) && + !(DP_CAP_PIN_ASSIGN_UFP_D(port->vdo) & + DP_CAP_PIN_ASSIGN_DFP_D(alt->vdo))) return -ENODEV; ret = sysfs_create_group(&alt->dev.kobj, &dp_altmode_group); diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c index 32c9925de473638fc1ea4fa9aad67db9c30411da..1f94ea46c01a52993be8ebc4fb9171d964675571 100644 --- a/drivers/vdpa/mlx5/core/mr.c +++ b/drivers/vdpa/mlx5/core/mr.c @@ -448,7 +448,6 @@ void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev) unmap_direct_mr(mvdev, dmr); kfree(dmr); } - memset(mr, 0, sizeof(*mr)); mr->initialized = false; out: mutex_unlock(&mr->mkey_mtx); diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index ce50ca9a320c7ce3af74c06903e46931c0f481c6..ec1428dbdf9d992726c22304ac87e0eb245f674b 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -96,6 +96,7 @@ struct vfio_dma { struct task_struct *task; struct rb_root pfn_list; /* Ex-user pinned pfn list */ unsigned long *bitmap; + struct mm_struct *mm; }; struct vfio_batch { @@ -391,8 +392,8 @@ static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async) if (!npage) return 0; - mm = async ? get_task_mm(dma->task) : dma->task->mm; - if (!mm) + mm = dma->mm; + if (async && !mmget_not_zero(mm)) return -ESRCH; /* process exited */ ret = mmap_write_lock_killable(mm); @@ -666,8 +667,8 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr, struct mm_struct *mm; int ret; - mm = get_task_mm(dma->task); - if (!mm) + mm = dma->mm; + if (!mmget_not_zero(mm)) return -ENODEV; ret = vaddr_get_pfns(mm, vaddr, 1, dma->prot, pfn_base, pages); @@ -677,7 +678,7 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr, ret = 0; if (do_accounting && !is_invalid_reserved_pfn(*pfn_base)) { - ret = vfio_lock_acct(dma, 1, true); + ret = vfio_lock_acct(dma, 1, false); if (ret) { put_pfn(*pfn_base, dma->prot); if (ret == -ENOMEM) @@ -1031,6 +1032,7 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma) vfio_unmap_unpin(iommu, dma, true); vfio_unlink_dma(iommu, dma); put_task_struct(dma->task); + mmdrop(dma->mm); vfio_dma_bitmap_free(dma); kfree(dma); iommu->dma_avail++; @@ -1452,29 +1454,15 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu, * against the locked memory limit and we need to be able to do both * outside of this call path as pinning can be asynchronous via the * external interfaces for mdev devices. RLIMIT_MEMLOCK requires a - * task_struct and VM locked pages requires an mm_struct, however - * holding an indefinite mm reference is not recommended, therefore we - * only hold a reference to a task. We could hold a reference to - * current, however QEMU uses this call path through vCPU threads, - * which can be killed resulting in a NULL mm and failure in the unmap - * path when called via a different thread. Avoid this problem by - * using the group_leader as threads within the same group require - * both CLONE_THREAD and CLONE_VM and will therefore use the same - * mm_struct. - * - * Previously we also used the task for testing CAP_IPC_LOCK at the - * time of pinning and accounting, however has_capability() makes use - * of real_cred, a copy-on-write field, so we can't guarantee that it - * matches group_leader, or in fact that it might not change by the - * time it's evaluated. If a process were to call MAP_DMA with - * CAP_IPC_LOCK but later drop it, it doesn't make sense that they - * possibly see different results for an iommu_mapped vfio_dma vs - * externally mapped. Therefore track CAP_IPC_LOCK in vfio_dma at the - * time of calling MAP_DMA. + * task_struct. Save the group_leader so that all DMA tracking uses + * the same task, to make debugging easier. VM locked pages requires + * an mm_struct, so grab the mm in case the task dies. */ get_task_struct(current->group_leader); dma->task = current->group_leader; dma->lock_cap = capable(CAP_IPC_LOCK); + dma->mm = current->mm; + mmgrab(dma->mm); dma->pfn_list = RB_ROOT; @@ -2998,9 +2986,8 @@ static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu, !(dma->prot & IOMMU_READ)) return -EPERM; - mm = get_task_mm(dma->task); - - if (!mm) + mm = dma->mm; + if (!mmget_not_zero(mm)) return -EPERM; if (kthread) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index a5731e40d3405fde63ea5c60ad70f6ccea3c534a..fb24b4f1957f8001df6d8c535efa0979f3392715 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -1515,6 +1515,9 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) nvq = &n->vqs[index]; mutex_lock(&vq->mutex); + if (fd == -1) + vhost_clear_msg(&n->dev); + /* Verify that ring has been setup correctly. */ if (!vhost_vq_access_ok(vq)) { r = -EFAULT; diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index da00a5c57db65f4b9f019f0d090d1f35a3c3aa70..1f9a1554ce5f4dd8832ce9d2cc06ee51c7ab0451 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -669,7 +669,7 @@ void vhost_dev_stop(struct vhost_dev *dev) } EXPORT_SYMBOL_GPL(vhost_dev_stop); -static void vhost_clear_msg(struct vhost_dev *dev) +void vhost_clear_msg(struct vhost_dev *dev) { struct vhost_msg_node *node, *n; @@ -687,6 +687,7 @@ static void vhost_clear_msg(struct vhost_dev *dev) spin_unlock(&dev->iotlb_lock); } +EXPORT_SYMBOL_GPL(vhost_clear_msg); void vhost_dev_cleanup(struct vhost_dev *dev) { diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index b063324c7669d20bda8b0b45f822ae419c047997..8f80d6b0d843e4327961cf0d1549c1660f64e22b 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -183,6 +183,7 @@ long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp); long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp); bool vhost_vq_access_ok(struct vhost_virtqueue *vq); bool vhost_log_access_ok(struct vhost_dev *); +void vhost_clear_msg(struct vhost_dev *dev); int vhost_get_vq_desc(struct vhost_virtqueue *, struct iovec iov[], unsigned int iov_count, diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c index 5fa3f1e5dfe886af77a81bb758358a55420e204c..b3295cd7fd4f963a030fc8e31931195527ef30be 100644 --- a/drivers/video/fbdev/smscufx.c +++ b/drivers/video/fbdev/smscufx.c @@ -1621,7 +1621,7 @@ static int ufx_usb_probe(struct usb_interface *interface, struct usb_device *usbdev; struct ufx_data *dev; struct fb_info *info; - int retval; + int retval = -ENOMEM; u32 id_rev, fpga_rev; /* usb initialization */ @@ -1653,15 +1653,17 @@ static int ufx_usb_probe(struct usb_interface *interface, if (!ufx_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) { dev_err(dev->gdev, "ufx_alloc_urb_list failed\n"); - goto e_nomem; + goto put_ref; } /* We don't register a new USB class. Our client interface is fbdev */ /* allocates framebuffer driver structure, not framebuffer memory */ info = framebuffer_alloc(0, &usbdev->dev); - if (!info) - goto e_nomem; + if (!info) { + dev_err(dev->gdev, "framebuffer_alloc failed\n"); + goto free_urb_list; + } dev->info = info; info->par = dev; @@ -1704,22 +1706,34 @@ static int ufx_usb_probe(struct usb_interface *interface, check_warn_goto_error(retval, "unable to find common mode for display and adapter"); retval = ufx_reg_set_bits(dev, 0x4000, 0x00000001); - check_warn_goto_error(retval, "error %d enabling graphics engine", retval); + if (retval < 0) { + dev_err(dev->gdev, "error %d enabling graphics engine", retval); + goto setup_modes; + } /* ready to begin using device */ atomic_set(&dev->usb_active, 1); dev_dbg(dev->gdev, "checking var"); retval = ufx_ops_check_var(&info->var, info); - check_warn_goto_error(retval, "error %d ufx_ops_check_var", retval); + if (retval < 0) { + dev_err(dev->gdev, "error %d ufx_ops_check_var", retval); + goto reset_active; + } dev_dbg(dev->gdev, "setting par"); retval = ufx_ops_set_par(info); - check_warn_goto_error(retval, "error %d ufx_ops_set_par", retval); + if (retval < 0) { + dev_err(dev->gdev, "error %d ufx_ops_set_par", retval); + goto reset_active; + } dev_dbg(dev->gdev, "registering framebuffer"); retval = register_framebuffer(info); - check_warn_goto_error(retval, "error %d register_framebuffer", retval); + if (retval < 0) { + dev_err(dev->gdev, "error %d register_framebuffer", retval); + goto reset_active; + } dev_info(dev->gdev, "SMSC UDX USB device /dev/fb%d attached. %dx%d resolution." " Using %dK framebuffer memory\n", info->node, @@ -1727,21 +1741,23 @@ static int ufx_usb_probe(struct usb_interface *interface, return 0; -error: - fb_dealloc_cmap(&info->cmap); -destroy_modedb: +reset_active: + atomic_set(&dev->usb_active, 0); +setup_modes: fb_destroy_modedb(info->monspecs.modedb); vfree(info->screen_base); fb_destroy_modelist(&info->modelist); +error: + fb_dealloc_cmap(&info->cmap); +destroy_modedb: framebuffer_release(info); +free_urb_list: + if (dev->urbs.count > 0) + ufx_free_urb_list(dev); put_ref: kref_put(&dev->kref, ufx_free); /* ref for framebuffer */ kref_put(&dev->kref, ufx_free); /* last ref from kref_init */ return retval; - -e_nomem: - retval = -ENOMEM; - goto put_ref; } static void ufx_usb_disconnect(struct usb_interface *interface) diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c index 15a2ee32f116ea9e7801a8047c9c492ae91a5b89..15842377c8d2c5c7c4b35f54489e14436343c950 100644 --- a/drivers/w1/w1.c +++ b/drivers/w1/w1.c @@ -1131,6 +1131,8 @@ int w1_process(void *data) /* remainder if it woke up early */ unsigned long jremain = 0; + atomic_inc(&dev->refcnt); + for (;;) { if (!jremain && dev->search_count) { @@ -1158,8 +1160,10 @@ int w1_process(void *data) */ mutex_unlock(&dev->list_mutex); - if (kthread_should_stop()) + if (kthread_should_stop()) { + __set_current_state(TASK_RUNNING); break; + } /* Only sleep when the search is active. */ if (dev->search_count) { diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c index b3e1792d9c49fce5da50cdaf67e6278f4a95454e..3a71c5eb2f837fee980adcf2d3fbcfce3346e342 100644 --- a/drivers/w1/w1_int.c +++ b/drivers/w1/w1_int.c @@ -51,10 +51,9 @@ static struct w1_master *w1_alloc_dev(u32 id, int slave_count, int slave_ttl, dev->search_count = w1_search_count; dev->enable_pullup = w1_enable_pullup; - /* 1 for w1_process to decrement - * 1 for __w1_remove_master_device to decrement + /* For __w1_remove_master_device to decrement */ - atomic_set(&dev->refcnt, 2); + atomic_set(&dev->refcnt, 1); INIT_LIST_HEAD(&dev->slist); INIT_LIST_HEAD(&dev->async_list); diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c index 292b5a1ca8318a50040957f68effe0685095530b..fed7be24644209919bcbfd0332e294dbd62c44db 100644 --- a/drivers/watchdog/at91sam9_wdt.c +++ b/drivers/watchdog/at91sam9_wdt.c @@ -206,10 +206,9 @@ static int at91_wdt_init(struct platform_device *pdev, struct at91wdt *wdt) "min heartbeat and max heartbeat might be too close for the system to handle it correctly\n"); if ((tmp & AT91_WDT_WDFIEN) && wdt->irq) { - err = request_irq(wdt->irq, wdt_interrupt, - IRQF_SHARED | IRQF_IRQPOLL | - IRQF_NO_SUSPEND, - pdev->name, wdt); + err = devm_request_irq(dev, wdt->irq, wdt_interrupt, + IRQF_SHARED | IRQF_IRQPOLL | IRQF_NO_SUSPEND, + pdev->name, wdt); if (err) return err; } diff --git a/drivers/watchdog/diag288_wdt.c b/drivers/watchdog/diag288_wdt.c index aafc8d98bf9fd4dbd8a97c038438b5a2e39f608e..370f648cb4b1a13f7edfdf4b2e12aef9ed7e4ffe 100644 --- a/drivers/watchdog/diag288_wdt.c +++ b/drivers/watchdog/diag288_wdt.c @@ -86,7 +86,7 @@ static int __diag288(unsigned int func, unsigned int timeout, "1:\n" EX_TABLE(0b, 1b) : "+d" (err) : "d"(__func), "d"(__timeout), - "d"(__action), "d"(__len) : "1", "cc"); + "d"(__action), "d"(__len) : "1", "cc", "memory"); return err; } @@ -272,12 +272,21 @@ static int __init diag288_init(void) char ebc_begin[] = { 194, 197, 199, 201, 213 }; + char *ebc_cmd; watchdog_set_nowayout(&wdt_dev, nowayout_info); if (MACHINE_IS_VM) { - if (__diag288_vm(WDT_FUNC_INIT, 15, - ebc_begin, sizeof(ebc_begin)) != 0) { + ebc_cmd = kmalloc(sizeof(ebc_begin), GFP_KERNEL); + if (!ebc_cmd) { + pr_err("The watchdog cannot be initialized\n"); + return -ENOMEM; + } + memcpy(ebc_cmd, ebc_begin, sizeof(ebc_begin)); + ret = __diag288_vm(WDT_FUNC_INIT, 15, + ebc_cmd, sizeof(ebc_begin)); + kfree(ebc_cmd); + if (ret != 0) { pr_err("The watchdog cannot be initialized\n"); return -EINVAL; } diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c index 1bdaf17c1d38d7d7d6610f39577033445f880270..8202f0a6b0935361efa3d00adcbdf6c3f6b9fc74 100644 --- a/drivers/watchdog/pcwd_usb.c +++ b/drivers/watchdog/pcwd_usb.c @@ -325,7 +325,8 @@ static int usb_pcwd_set_heartbeat(struct usb_pcwd_private *usb_pcwd, int t) static int usb_pcwd_get_temperature(struct usb_pcwd_private *usb_pcwd, int *temperature) { - unsigned char msb, lsb; + unsigned char msb = 0x00; + unsigned char lsb = 0x00; usb_pcwd_send_command(usb_pcwd, CMD_READ_TEMP, &msb, &lsb); @@ -341,7 +342,8 @@ static int usb_pcwd_get_temperature(struct usb_pcwd_private *usb_pcwd, static int usb_pcwd_get_timeleft(struct usb_pcwd_private *usb_pcwd, int *time_left) { - unsigned char msb, lsb; + unsigned char msb = 0x00; + unsigned char lsb = 0x00; /* Read the time that's left before rebooting */ /* Note: if the board is not yet armed then we will read 0xFFFF */ diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c index 2ee017442dfcd1c87275b2d4787fef2ffbbf3714..f37255cd75fdf4d6e785cc4a72f522e45c345426 100644 --- a/drivers/watchdog/watchdog_dev.c +++ b/drivers/watchdog/watchdog_dev.c @@ -1037,8 +1037,8 @@ static int watchdog_cdev_register(struct watchdog_device *wdd) if (wdd->id == 0) { misc_deregister(&watchdog_miscdev); old_wd_data = NULL; - put_device(&wd_data->dev); } + put_device(&wd_data->dev); return err; } diff --git a/fs/Kconfig b/fs/Kconfig index efc725d7c628e7a0e864bcc02e40422466a1f374..3b4a3f49188aab0799e02d36c95c0e5c1d05c916 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -43,6 +43,7 @@ source "fs/btrfs/Kconfig" source "fs/nilfs2/Kconfig" source "fs/f2fs/Kconfig" source "fs/zonefs/Kconfig" +source "fs/proc/memory_security/Kconfig" config FS_DAX bool "Direct Access (DAX) support" @@ -115,6 +116,8 @@ config MANDATORY_FILE_LOCKING source "fs/crypto/Kconfig" +source "fs/code_sign/Kconfig" + source "fs/verity/Kconfig" source "fs/notify/Kconfig" diff --git a/fs/Makefile b/fs/Makefile index 74989c30c3fb240cee76863f008b5ce9ecc198c0..5036d569163229243ed1e5cf45b82117e34523bd 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -34,6 +34,7 @@ obj-$(CONFIG_USERFAULTFD) += userfaultfd.o obj-$(CONFIG_AIO) += aio.o obj-$(CONFIG_FS_DAX) += dax.o obj-$(CONFIG_FS_ENCRYPTION) += crypto/ +obj-$(CONFIG_SECURITY_CODE_SIGN) += code_sign/ obj-$(CONFIG_FS_VERITY) += verity/ obj-$(CONFIG_FILE_LOCKING) += locks.o obj-$(CONFIG_BINFMT_AOUT) += binfmt_aout.o diff --git a/fs/affs/file.c b/fs/affs/file.c index d91b0133d95da5e225e01f3a6e9ef76b89c575d0..c3d89fa1bab7736b6b41e974aeb55a27decde4b3 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -879,7 +879,7 @@ affs_truncate(struct inode *inode) if (inode->i_size > AFFS_I(inode)->mmu_private) { struct address_space *mapping = inode->i_mapping; struct page *page; - void *fsdata; + void *fsdata = NULL; loff_t isize = inode->i_size; int res; diff --git a/fs/aio.c b/fs/aio.c index 5e5333d72c6997a8aa45f3822638c2d2cdef0eb6..b2396cd4a7755cc33292639368295fa0c87b8940 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -335,6 +335,9 @@ static int aio_ring_mremap(struct vm_area_struct *vma) spin_lock(&mm->ioctx_lock); rcu_read_lock(); table = rcu_dereference(mm->ioctx_table); + if (!table) + goto out_unlock; + for (i = 0; i < table->nr; i++) { struct kioctx *ctx; @@ -348,6 +351,7 @@ static int aio_ring_mremap(struct vm_area_struct *vma) } } +out_unlock: rcu_read_unlock(); spin_unlock(&mm->ioctx_lock); return res; diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 4a6ba0997e3998185b43a318ccfdccee7d0a7c34..b081b61e97c8d8279e6adf37a79cae82e757acbb 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -7276,10 +7276,10 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg) /* * Check that we don't overflow at later allocations, we request * clone_sources_count + 1 items, and compare to unsigned long inside - * access_ok. + * access_ok. Also set an upper limit for allocation size so this can't + * easily exhaust memory. Max number of clone sources is about 200K. */ - if (arg->clone_sources_count > - ULONG_MAX / sizeof(struct clone_root) - 1) { + if (arg->clone_sources_count > SZ_8M / sizeof(struct clone_root)) { ret = -EINVAL; goto out; } diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index d4d89e0738ff4402911fb0f424ea9c9a43920552..15435f983180fd1ee7d3b4b7cde242f526b31166 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -381,6 +381,7 @@ void btrfs_free_device(struct btrfs_device *device) static void free_fs_devices(struct btrfs_fs_devices *fs_devices) { struct btrfs_device *device; + WARN_ON(fs_devices->opened); while (!list_empty(&fs_devices->devices)) { device = list_entry(fs_devices->devices.next, @@ -1227,9 +1228,22 @@ void btrfs_close_devices(struct btrfs_fs_devices *fs_devices) mutex_lock(&uuid_mutex); close_fs_devices(fs_devices); - if (!fs_devices->opened) + if (!fs_devices->opened) { list_splice_init(&fs_devices->seed_list, &list); + /* + * If the struct btrfs_fs_devices is not assembled with any + * other device, it can be re-initialized during the next mount + * without the needing device-scan step. Therefore, it can be + * fully freed. + */ + if (fs_devices->num_devices == 1) { + list_del(&fs_devices->fs_list); + free_fs_devices(fs_devices); + } + } + + list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) { close_fs_devices(fs_devices); list_del(&fs_devices->seed_list); @@ -1580,7 +1594,7 @@ static int find_free_dev_extent_start(struct btrfs_device *device, goto out; } - while (1) { + while (search_start < search_end) { l = path->nodes[0]; slot = path->slots[0]; if (slot >= btrfs_header_nritems(l)) { @@ -1603,6 +1617,9 @@ static int find_free_dev_extent_start(struct btrfs_device *device, if (key.type != BTRFS_DEV_EXTENT_KEY) goto next; + if (key.offset > search_end) + break; + if (key.offset > search_start) { hole_size = key.offset - search_start; dev_extent_hole_check(device, &search_start, &hole_size, @@ -1663,6 +1680,7 @@ static int find_free_dev_extent_start(struct btrfs_device *device, else ret = 0; + ASSERT(max_hole_start + max_hole_size <= search_end); out: btrfs_free_path(path); *start = max_hole_start; diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index 05615a1099dbc5f6c2267357a0a6be51a9e5097f..673d74d7f71820ef8c4bca21134e8e89271ad6e2 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -63,7 +63,7 @@ struct list_head *zlib_alloc_workspace(unsigned int level) workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL), zlib_inflate_workspacesize()); - workspace->strm.workspace = kvmalloc(workspacesize, GFP_KERNEL); + workspace->strm.workspace = kvzalloc(workspacesize, GFP_KERNEL); workspace->level = level; workspace->buf = NULL; /* diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index fa51872ff850482b19d291984ef51079e2141ffd..87a9e9096421accd243912cce5d3609da4809f06 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -3496,6 +3496,12 @@ static void handle_session(struct ceph_mds_session *session, break; case CEPH_SESSION_FLUSHMSG: + /* flush cap releases */ + spin_lock(&session->s_cap_lock); + if (session->s_num_cap_releases) + ceph_flush_cap_releases(mdsc, session); + spin_unlock(&session->s_cap_lock); + send_flushmsg_ack(mdsc, session, seq); break; diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 144064dc0d38a892f1f209437fed85bba800dd38..5fe85dc0e2651e2c0d8981968c82a71e3aaf9ac3 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -3539,7 +3539,7 @@ uncached_fill_pages(struct TCP_Server_Info *server, rdata->got_bytes += result; } - return rdata->got_bytes > 0 && result != -ECONNABORTED ? + return result != -ECONNABORTED && rdata->got_bytes > 0 ? rdata->got_bytes : result; } @@ -4302,7 +4302,7 @@ readpages_fill_pages(struct TCP_Server_Info *server, rdata->got_bytes += result; } - return rdata->got_bytes > 0 && result != -ECONNABORTED ? + return result != -ECONNABORTED && rdata->got_bytes > 0 ? rdata->got_bytes : result; } diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 844db4652dd1d9247219afe60126648a415d5fbd..8fdd34ff20ef541c1f60b7116e21778024b25c55 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -859,12 +859,13 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon, bool no_cached_open = tcon->nohandlecache; struct cached_fid *cfid = NULL; - oparms.tcon = tcon; - oparms.desired_access = FILE_READ_ATTRIBUTES; - oparms.disposition = FILE_OPEN; - oparms.create_options = cifs_create_options(cifs_sb, 0); - oparms.fid = &fid; - oparms.reconnect = false; + oparms = (struct cifs_open_parms) { + .tcon = tcon, + .desired_access = FILE_READ_ATTRIBUTES, + .disposition = FILE_OPEN, + .create_options = cifs_create_options(cifs_sb, 0), + .fid = &fid, + }; if (no_cached_open) { rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL, diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c index b029ed31ef916d979bde6d3523973c1b14e3276d..bcc611069308ac89a53ad60f5abae8f795ee387b 100644 --- a/fs/cifs/smbdirect.c +++ b/fs/cifs/smbdirect.c @@ -1394,6 +1394,7 @@ void smbd_destroy(struct TCP_Server_Info *server) destroy_workqueue(info->workqueue); log_rdma_event(INFO, "rdma session destroyed\n"); kfree(info); + server->smbd_conn = NULL; } /* @@ -1690,6 +1691,7 @@ static struct smbd_connection *_smbd_get_connection( allocate_mr_failed: /* At this point, need to a full transport shutdown */ + server->smbd_conn = info; smbd_destroy(server); return NULL; @@ -2238,6 +2240,7 @@ static int allocate_mr_list(struct smbd_connection *info) atomic_set(&info->mr_ready_count, 0); atomic_set(&info->mr_used_count, 0); init_waitqueue_head(&info->wait_for_mr_cleanup); + INIT_WORK(&info->mr_recovery_work, smbd_mr_recovery_work); /* Allocate more MRs (2x) than hardware responder_resources */ for (i = 0; i < info->responder_resources * 2; i++) { smbdirect_mr = kzalloc(sizeof(*smbdirect_mr), GFP_KERNEL); @@ -2265,13 +2268,13 @@ static int allocate_mr_list(struct smbd_connection *info) list_add_tail(&smbdirect_mr->list, &info->mr_list); atomic_inc(&info->mr_ready_count); } - INIT_WORK(&info->mr_recovery_work, smbd_mr_recovery_work); return 0; out: kfree(smbdirect_mr); list_for_each_entry_safe(smbdirect_mr, tmp, &info->mr_list, list) { + list_del(&smbdirect_mr->list); ib_dereg_mr(smbdirect_mr->mr); kfree(smbdirect_mr->sgl); kfree(smbdirect_mr); diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c index eb3b1898da462afd84c62bd9646eb968d4c84225..610484c90260bf044f7eb2119d2c42dc739dbc54 100644 --- a/fs/coda/upcall.c +++ b/fs/coda/upcall.c @@ -790,7 +790,7 @@ static int coda_upcall(struct venus_comm *vcp, sig_req = kmalloc(sizeof(struct upc_req), GFP_KERNEL); if (!sig_req) goto exit; - sig_inputArgs = kvzalloc(sizeof(struct coda_in_hdr), GFP_KERNEL); + sig_inputArgs = kvzalloc(sizeof(*sig_inputArgs), GFP_KERNEL); if (!sig_inputArgs) { kfree(sig_req); goto exit; diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c index dedbc55cd48f55f1e895dea0dd56ea590bd17ae3..6a61aef49c2a1110f172601b2f66c3f3c6fc235b 100644 --- a/fs/exfat/dir.c +++ b/fs/exfat/dir.c @@ -33,6 +33,7 @@ static void exfat_get_uniname_from_ext_entry(struct super_block *sb, { int i; struct exfat_entry_set_cache *es; + unsigned int uni_len = 0, len; es = exfat_get_dentry_set(sb, p_dir, entry, ES_ALL_ENTRIES); if (!es) @@ -51,7 +52,10 @@ static void exfat_get_uniname_from_ext_entry(struct super_block *sb, if (exfat_get_entry_type(ep) != TYPE_EXTEND) break; - exfat_extract_uni_name(ep, uniname); + len = exfat_extract_uni_name(ep, uniname); + uni_len += len; + if (len != EXFAT_FILE_NAME_LEN || uni_len >= MAX_NAME_LENGTH) + break; uniname += EXFAT_FILE_NAME_LEN; } @@ -102,7 +106,7 @@ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_ent clu.dir = ei->hint_bmap.clu; } - while (clu_offset > 0) { + while (clu_offset > 0 && clu.dir != EXFAT_EOF_CLUSTER) { if (exfat_get_next_cluster(sb, &(clu.dir))) return -EIO; @@ -236,10 +240,7 @@ static int exfat_iterate(struct file *filp, struct dir_context *ctx) fake_offset = 1; } - if (cpos & (DENTRY_SIZE - 1)) { - err = -ENOENT; - goto unlock; - } + cpos = round_up(cpos, DENTRY_SIZE); /* name buffer should be allocated before use */ err = exfat_alloc_namebuf(nb); @@ -1026,7 +1027,8 @@ int exfat_find_dir_entry(struct super_block *sb, struct exfat_inode_info *ei, if (entry_type == TYPE_EXTEND) { unsigned short entry_uniname[16], unichar; - if (step != DIRENT_STEP_NAME) { + if (step != DIRENT_STEP_NAME || + name_len >= MAX_NAME_LENGTH) { step = DIRENT_STEP_FILE; continue; } diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h index 0d139c7d150d9b6741befcd0d407e7dba340f8e5..07b09af57436fba33ca5033a37ff0ca2b7a73439 100644 --- a/fs/exfat/exfat_fs.h +++ b/fs/exfat/exfat_fs.h @@ -42,7 +42,7 @@ enum { #define ES_2_ENTRIES 2 #define ES_ALL_ENTRIES 0 -#define DIR_DELETED 0xFFFF0321 +#define DIR_DELETED 0xFFFFFFF7 /* type values */ #define TYPE_UNUSED 0x0000 diff --git a/fs/exfat/file.c b/fs/exfat/file.c index c819e8427ea577f1734c783986fbddc19445b721..819f47278305e9463023df56d58a5381fb2a7739 100644 --- a/fs/exfat/file.c +++ b/fs/exfat/file.c @@ -250,8 +250,7 @@ void exfat_truncate(struct inode *inode, loff_t size) else mark_inode_dirty(inode); - inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> - inode->i_blkbits; + inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> 9; write_size: aligned_size = i_size_read(inode); if (aligned_size & (blocksize - 1)) { diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c index 2a9f6a80584ee3d63d9325b3f540d5457fa9a217..4bd73820a4ac0620936cd2eb9f9ff21184474363 100644 --- a/fs/exfat/inode.c +++ b/fs/exfat/inode.c @@ -242,8 +242,7 @@ static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset, return err; } /* end of if != DIR_DELETED */ - inode->i_blocks += - num_to_be_allocated << sbi->sect_per_clus_bits; + inode->i_blocks += EXFAT_CLU_TO_B(num_to_be_allocated, sbi) >> 9; /* * Move *clu pointer along FAT chains (hole care) because the @@ -600,8 +599,7 @@ static int exfat_fill_inode(struct inode *inode, struct exfat_dir_entry *info) exfat_save_attr(inode, info->attr); - inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> - inode->i_blkbits; + inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> 9; inode->i_mtime = info->mtime; inode->i_ctime = info->mtime; ei->i_crtime = info->crtime; diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c index 935f6005090091342c109456c50c0e5a27b8bc5b..1382d816912c8c00b25970c11208a62ee61259c2 100644 --- a/fs/exfat/namei.c +++ b/fs/exfat/namei.c @@ -398,7 +398,7 @@ static int exfat_find_empty_entry(struct inode *inode, ei->i_size_ondisk += sbi->cluster_size; ei->i_size_aligned += sbi->cluster_size; ei->flags = p_dir->flags; - inode->i_blocks += 1 << sbi->sect_per_clus_bits; + inode->i_blocks += sbi->cluster_size >> 9; } return dentry; diff --git a/fs/exfat/super.c b/fs/exfat/super.c index ba70ed1c980490cf593232722fe1e579459e0e7d..62d79af257a909c94cc4bde7936b68126ada5088 100644 --- a/fs/exfat/super.c +++ b/fs/exfat/super.c @@ -364,8 +364,7 @@ static int exfat_read_root(struct inode *inode) inode->i_op = &exfat_dir_inode_operations; inode->i_fop = &exfat_dir_operations; - inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> - inode->i_blkbits; + inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> 9; ei->i_pos = ((loff_t)sbi->root_dir << 32) | 0xffffffff; ei->i_size_aligned = i_size_read(inode); ei->i_size_ondisk = i_size_read(inode); diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index e502a060c173f4d4712ca25651d0ec0132f2948b..baf28781b2264f403684c512b7a8a28b1d2ff7c0 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -157,7 +157,6 @@ int ext4_find_inline_data_nolock(struct inode *inode) (void *)ext4_raw_inode(&is.iloc)); EXT4_I(inode)->i_inline_size = EXT4_MIN_INLINE_DATA_SIZE + le32_to_cpu(is.s.here->e_value_size); - ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA); } out: brelse(is.iloc.bh); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 5c373ebde016e43897229300b75acedd0393aa17..bcd6bde02cba40b2752b3da88a63dc8f06dacfd2 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -4737,8 +4737,13 @@ static inline int ext4_iget_extra_inode(struct inode *inode, if (EXT4_INODE_HAS_XATTR_SPACE(inode) && *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) { + int err; + ext4_set_inode_state(inode, EXT4_STATE_XATTR); - return ext4_find_inline_data_nolock(inode); + err = ext4_find_inline_data_nolock(inode); + if (!err && ext4_has_inline_data(inode)) + ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA); + return err; } else EXT4_I(inode)->i_inline_off = 0; return 0; diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index 240d792db9f783af43457ba42e48f4447e56b686..53bdc67a815f6cb555ec9834fe2a05c5ac116b7a 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -180,6 +180,7 @@ static long swap_inode_boot_loader(struct super_block *sb, ei_bl->i_flags = 0; inode_set_iversion(inode_bl, 1); i_size_write(inode_bl, 0); + EXT4_I(inode_bl)->i_disksize = inode_bl->i_size; inode_bl->i_mode = S_IFREG; if (ext4_has_feature_extents(sb)) { ext4_set_inode_flag(inode_bl, EXT4_INODE_EXTENTS); diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 1531a53d09f107529df78015a10f99da05f64cf2..12de9f696d0c8b9d040e9e90f7b90a5011917abe 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -1512,11 +1512,10 @@ static struct buffer_head *__ext4_find_entry(struct inode *dir, int has_inline_data = 1; ret = ext4_find_inline_entry(dir, fname, res_dir, &has_inline_data); - if (has_inline_data) { - if (inlined) - *inlined = 1; + if (inlined) + *inlined = has_inline_data; + if (has_inline_data) goto cleanup_and_exit; - } } if ((namelen <= 2) && (name[0] == '.') && @@ -3724,7 +3723,8 @@ static void ext4_resetent(handle_t *handle, struct ext4_renament *ent, * so the old->de may no longer valid and need to find it again * before reset old inode info. */ - old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL); + old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, + &old.inlined); if (IS_ERR(old.bh)) retval = PTR_ERR(old.bh); if (!old.bh) @@ -3889,9 +3889,20 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, return retval; } - old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL); - if (IS_ERR(old.bh)) - return PTR_ERR(old.bh); + /* + * We need to protect against old.inode directory getting converted + * from inline directory format into a normal one. + */ + if (S_ISDIR(old.inode->i_mode)) + inode_lock_nested(old.inode, I_MUTEX_NONDIR2); + + old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, + &old.inlined); + if (IS_ERR(old.bh)) { + retval = PTR_ERR(old.bh); + goto unlock_moved_dir; + } + /* * Check for inode number is _not_ due to possible IO errors. * We might rmdir the source, keep it as pwd of some process @@ -3949,8 +3960,10 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, goto end_rename; } retval = ext4_rename_dir_prepare(handle, &old); - if (retval) + if (retval) { + inode_unlock(old.inode); goto end_rename; + } } /* * If we're renaming a file within an inline_data dir and adding or @@ -4079,6 +4092,11 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, brelse(old.dir_bh); brelse(old.bh); brelse(new.bh); + +unlock_moved_dir: + if (S_ISDIR(old.inode->i_mode)) + inode_unlock(old.inode); + return retval; } diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 4569075a7da0c87f1b559bfc6958da1598756dd8..a94cc7b22d7ea59c17ab2ebd5b2ba1ef5a223ea1 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -416,7 +416,8 @@ static void io_submit_init_bio(struct ext4_io_submit *io, static void io_submit_add_bh(struct ext4_io_submit *io, struct inode *inode, - struct page *page, + struct page *pagecache_page, + struct page *bounce_page, struct buffer_head *bh) { int ret; @@ -430,10 +431,11 @@ static void io_submit_add_bh(struct ext4_io_submit *io, io_submit_init_bio(io, bh); io->io_bio->bi_write_hint = inode->i_write_hint; } - ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh)); + ret = bio_add_page(io->io_bio, bounce_page ?: pagecache_page, + bh->b_size, bh_offset(bh)); if (ret != bh->b_size) goto submit_and_retry; - wbc_account_cgroup_owner(io->io_wbc, page, bh->b_size); + wbc_account_cgroup_owner(io->io_wbc, pagecache_page, bh->b_size); io->io_next_block++; } @@ -551,8 +553,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io, do { if (!buffer_async_write(bh)) continue; - io_submit_add_bh(io, inode, - bounce_page ? bounce_page : page, bh); + io_submit_add_bh(io, inode, page, bounce_page, bh); nr_submitted++; clear_buffer_dirty(bh); } while ((bh = bh->b_this_page) != head); diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c index f24bef3be48a3e8ef462970efcc63be97eb934b4..ce74cde6d8faa738d91432b10f5dd68ae4f761ce 100644 --- a/fs/ext4/sysfs.c +++ b/fs/ext4/sysfs.c @@ -487,6 +487,11 @@ static void ext4_sb_release(struct kobject *kobj) complete(&sbi->s_kobj_unregister); } +static void ext4_feat_release(struct kobject *kobj) +{ + kfree(kobj); +} + static const struct sysfs_ops ext4_attr_ops = { .show = ext4_attr_show, .store = ext4_attr_store, @@ -501,7 +506,7 @@ static struct kobj_type ext4_sb_ktype = { static struct kobj_type ext4_feat_ktype = { .default_groups = ext4_feat_groups, .sysfs_ops = &ext4_attr_ops, - .release = (void (*)(struct kobject *))kfree, + .release = ext4_feat_release, }; static struct kobject *ext4_root; diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index d9826897135cb244d39075bbdefa708b70d89ba6..65338360bb9b7e546de5b71933dcbae1dd9fb538 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -1418,6 +1418,13 @@ static struct inode *ext4_xattr_inode_create(handle_t *handle, uid_t owner[2] = { i_uid_read(inode), i_gid_read(inode) }; int err; + if (inode->i_sb->s_root == NULL) { + ext4_warning(inode->i_sb, + "refuse to create EA inode when umounting"); + WARN_ON(1); + return ERR_PTR(-EINVAL); + } + /* * Let the next inode be the goal, so we try and allocate the EA inode * in the same group, or nearby one. @@ -2801,6 +2808,9 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize, (void *)header, total_ino); EXT4_I(inode)->i_extra_isize = new_extra_isize; + if (ext4_has_inline_data(inode)) + error = ext4_find_inline_data_nolock(inode); + cleanup: if (error && (mnt_count != le16_to_cpu(sbi->s_es->s_mnt_count))) { ext4_warning(inode->i_sb, "Unable to expand inode %lu. Delete some EAs or run e2fsck.", diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index c31bc50d79b95d8b82aa9c3c301c6fa3c2c9769b..31315229ea8107b0c1f436e6d281bc0239993060 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -721,7 +721,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio) } if (fio->io_wbc && !is_read_io(fio->op)) - wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE); + wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE); __attach_io_flag(fio); bio_set_op_attrs(bio, fio->op, fio->op_flags); @@ -929,7 +929,7 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio) } if (fio->io_wbc) - wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE); + wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE); inc_page_count(fio->sbi, WB_DATA_TYPE(page)); @@ -1003,7 +1003,7 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio) } if (fio->io_wbc) - wbc_account_cgroup_owner(fio->io_wbc, bio_page, PAGE_SIZE); + wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE); io->last_block_in_bio = fio->new_blkaddr; f2fs_trace_ios(fio, 0); @@ -1021,8 +1021,8 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio) static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx) { - return fsverity_active(inode) && - idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE); + return fsverity_active(inode) && (idx < + DIV_ROUND_UP(fsverity_get_verified_data_size(inode), PAGE_SIZE)); } static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr, diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index d56fcace182119702dc44044ac1bb3bf7a247bef..e3ffe09ce39c62a476e546ba9f78c723fd4c667e 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -3358,7 +3358,7 @@ static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg) return f2fs_resize_fs(sbi, block_count); } -static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg) +static inline int f2fs_has_feature_verity(struct file *filp) { struct inode *inode = file_inode(filp); @@ -3370,10 +3370,29 @@ static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg) inode->i_ino); return -EOPNOTSUPP; } + return 0; +} + +static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg) +{ + int err = f2fs_has_feature_verity(filp); + + if (err) + return err; return fsverity_ioctl_enable(filp, (const void __user *)arg); } +static int f2fs_ioc_enable_code_sign(struct file *filp, unsigned long arg) +{ + int err = f2fs_has_feature_verity(filp); + + if (err) + return err; + + return fsverity_ioctl_enable_code_sign(filp, (const void __user *)arg); +} + static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg) { if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp)))) @@ -4040,6 +4059,8 @@ static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return f2fs_ioc_enable_verity(filp, arg); case FS_IOC_MEASURE_VERITY: return f2fs_ioc_measure_verity(filp, arg); + case FS_IOC_ENABLE_CODE_SIGN: + return f2fs_ioc_enable_code_sign(filp, arg); case FS_IOC_GETFSLABEL: return f2fs_ioc_getfslabel(filp, arg); case FS_IOC_SETFSLABEL: @@ -4289,6 +4310,7 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case F2FS_IOC_RESIZE_FS: case FS_IOC_ENABLE_VERITY: case FS_IOC_MEASURE_VERITY: + case FS_IOC_ENABLE_CODE_SIGN: case FS_IOC_GETFSLABEL: case FS_IOC_SETFSLABEL: case F2FS_IOC_GET_COMPRESS_BLOCKS: diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 340cf44ce735e497b35137f878aa86309fd89758..10f9d453de213c02fcb66c10570b50efb1945370 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -995,7 +995,7 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, { struct page *node_page; nid_t nid; - unsigned int ofs_in_node, max_addrs; + unsigned int ofs_in_node, max_addrs, base; block_t source_blkaddr; nid = le32_to_cpu(sum->nid); @@ -1021,11 +1021,17 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, return false; } - max_addrs = IS_INODE(node_page) ? DEF_ADDRS_PER_INODE : - DEF_ADDRS_PER_BLOCK; - if (ofs_in_node >= max_addrs) { - f2fs_err(sbi, "Inconsistent ofs_in_node:%u in summary, ino:%u, nid:%u, max:%u", - ofs_in_node, dni->ino, dni->nid, max_addrs); + if (IS_INODE(node_page)) { + base = offset_in_addr(F2FS_INODE(node_page)); + max_addrs = DEF_ADDRS_PER_INODE; + } else { + base = 0; + max_addrs = DEF_ADDRS_PER_BLOCK; + } + + if (base + ofs_in_node >= max_addrs) { + f2fs_err(sbi, "Inconsistent blkaddr offset: base:%u, ofs_in_node:%u, max:%u, ino:%u, nid:%u", + base, ofs_in_node, max_addrs, dni->ino, dni->nid); f2fs_put_page(node_page, 1); return false; } diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index f97c23ec93ce5b539627458fae648f65c07c2c08..df1a0cbfa1be4613f68bf732d005c679bca53b7b 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -64,7 +64,6 @@ bool f2fs_may_inline_dentry(struct inode *inode) void f2fs_do_read_inline_data(struct page *page, struct page *ipage) { struct inode *inode = page->mapping->host; - void *src_addr, *dst_addr; if (PageUptodate(page)) return; @@ -74,11 +73,8 @@ void f2fs_do_read_inline_data(struct page *page, struct page *ipage) zero_user_segment(page, MAX_INLINE_DATA(inode), PAGE_SIZE); /* Copy the whole inline data block */ - src_addr = inline_data_addr(inode, ipage); - dst_addr = kmap_atomic(page); - memcpy(dst_addr, src_addr, MAX_INLINE_DATA(inode)); - flush_dcache_page(page); - kunmap_atomic(dst_addr); + memcpy_to_page(page, 0, inline_data_addr(inode, ipage), + MAX_INLINE_DATA(inode)); if (!PageUptodate(page)) SetPageUptodate(page); } @@ -245,7 +241,6 @@ int f2fs_convert_inline_inode(struct inode *inode) int f2fs_write_inline_data(struct inode *inode, struct page *page) { - void *src_addr, *dst_addr; struct dnode_of_data dn; int err; @@ -262,10 +257,8 @@ int f2fs_write_inline_data(struct inode *inode, struct page *page) f2fs_bug_on(F2FS_I_SB(inode), page->index); f2fs_wait_on_page_writeback(dn.inode_page, NODE, true, true); - src_addr = kmap_atomic(page); - dst_addr = inline_data_addr(inode, dn.inode_page); - memcpy(dst_addr, src_addr, MAX_INLINE_DATA(inode)); - kunmap_atomic(src_addr); + memcpy_from_page(inline_data_addr(inode, dn.inode_page), + page, 0, MAX_INLINE_DATA(inode)); set_page_dirty(dn.inode_page); f2fs_clear_page_cache_dirty_tag(page); @@ -420,18 +413,17 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage, dentry_blk = page_address(page); + /* + * Start by zeroing the full block, to ensure that all unused space is + * zeroed and no uninitialized memory is leaked to disk. + */ + memset(dentry_blk, 0, F2FS_BLKSIZE); + make_dentry_ptr_inline(dir, &src, inline_dentry); make_dentry_ptr_block(dir, &dst, dentry_blk); /* copy data from inline dentry block to new dentry block */ memcpy(dst.bitmap, src.bitmap, src.nr_bitmap); - memset(dst.bitmap + src.nr_bitmap, 0, dst.nr_bitmap - src.nr_bitmap); - /* - * we do not need to zero out remainder part of dentry and filename - * field, since we have used bitmap for marking the usage status of - * them, besides, we can also ignore copying/zeroing reserved space - * of dentry block, because them haven't been used so far. - */ memcpy(dst.dentry, src.dentry, SIZE_OF_DIR_ENTRY * src.max); memcpy(dst.filename, src.filename, src.max * F2FS_SLOT_LEN); diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 77e27960bbc7189ac39968a823774a5b377afd8e..4387d4226c8219a5470788174239d1343a15178e 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -2061,7 +2061,6 @@ static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data, size_t toread; loff_t i_size = i_size_read(inode); struct page *page; - char *kaddr; if (off > i_size) return 0; @@ -2095,9 +2094,7 @@ static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data, return -EIO; } - kaddr = kmap_atomic(page); - memcpy(data, kaddr + offset, tocopy); - kunmap_atomic(kaddr); + memcpy_from_page(data, page, offset, tocopy); f2fs_put_page(page, 1); offset = 0; @@ -2119,7 +2116,6 @@ static ssize_t f2fs_quota_write(struct super_block *sb, int type, size_t towrite = len; struct page *page; void *fsdata = NULL; - char *kaddr; int err = 0; int tocopy; @@ -2139,10 +2135,7 @@ static ssize_t f2fs_quota_write(struct super_block *sb, int type, break; } - kaddr = kmap_atomic(page); - memcpy(kaddr + offset, data, tocopy); - kunmap_atomic(kaddr); - flush_dcache_page(page); + memcpy_to_page(page, offset, data, tocopy); a_ops->write_end(NULL, mapping, off, tocopy, tocopy, page, fsdata); diff --git a/fs/f2fs/verity.c b/fs/f2fs/verity.c index cff94d095d0fe6fe60a6bcdb85b18341fd178b87..cef40d92268f79d17a342edd816a44876697afaf 100644 --- a/fs/f2fs/verity.c +++ b/fs/f2fs/verity.c @@ -47,16 +47,13 @@ static int pagecache_read(struct inode *inode, void *buf, size_t count, size_t n = min_t(size_t, count, PAGE_SIZE - offset_in_page(pos)); struct page *page; - void *addr; page = read_mapping_page(inode->i_mapping, pos >> PAGE_SHIFT, NULL); if (IS_ERR(page)) return PTR_ERR(page); - addr = kmap_atomic(page); - memcpy(buf, addr + offset_in_page(pos), n); - kunmap_atomic(addr); + memcpy_from_page(buf, page, offset_in_page(pos), n); put_page(page); @@ -81,8 +78,7 @@ static int pagecache_write(struct inode *inode, const void *buf, size_t count, size_t n = min_t(size_t, count, PAGE_SIZE - offset_in_page(pos)); struct page *page; - void *fsdata; - void *addr; + void *fsdata = NULL; int res; res = pagecache_write_begin(NULL, inode->i_mapping, pos, n, 0, @@ -90,9 +86,7 @@ static int pagecache_write(struct inode *inode, const void *buf, size_t count, if (res) return res; - addr = kmap_atomic(page); - memcpy(addr + offset_in_page(pos), buf, n); - kunmap_atomic(addr); + memcpy_to_page(page, offset_in_page(pos), buf, n); res = pagecache_write_end(NULL, inode->i_mapping, pos, n, n, page, fsdata); diff --git a/fs/file.c b/fs/file.c index 97a0cd31faec404ceaefb1b64bfb7dabd0cd41ff..173d318208b85af293b96eea9b804635f442ce4a 100644 --- a/fs/file.c +++ b/fs/file.c @@ -677,6 +677,7 @@ static struct file *pick_file(struct files_struct *files, unsigned fd) fdt = files_fdtable(files); if (fd >= fdt->max_fds) goto out_unlock; + fd = array_index_nospec(fd, fdt->max_fds); file = fdt->fd[fd]; if (!file) goto out_unlock; diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index cc4f987687f3c1ff611d968b311e9724aeca7f4f..5306595548703430d09b90d2c719d61fbc00dff3 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c @@ -152,7 +152,6 @@ static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *w { struct inode *inode = page->mapping->host; struct gfs2_inode *ip = GFS2_I(inode); - struct gfs2_sbd *sdp = GFS2_SB(inode); if (PageChecked(page)) { ClearPageChecked(page); @@ -160,7 +159,7 @@ static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *w create_empty_buffers(page, inode->i_sb->s_blocksize, BIT(BH_Dirty)|BIT(BH_Uptodate)); } - gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize); + gfs2_page_add_databufs(ip, page, 0, PAGE_SIZE); } return gfs2_write_jdata_page(page, wbc); } diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index 9820aad4b19a85d8202efa0ec87f0d0279733831..e01b6a2d12d30642f0fecf6b4363a16cdbce10f1 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -145,8 +145,10 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp) return -EIO; error = gfs2_find_jhead(sdp->sd_jdesc, &head, false); - if (error || gfs2_withdrawn(sdp)) + if (error) { + gfs2_consist(sdp); return error; + } if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) { gfs2_consist(sdp); @@ -158,7 +160,9 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp) gfs2_log_pointers_init(sdp, head.lh_blkno); error = gfs2_quota_init(sdp); - if (!error && !gfs2_withdrawn(sdp)) + if (!error && gfs2_withdrawn(sdp)) + error = -EIO; + if (!error) set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); return error; } diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c index c0a73a6ffb28bd9c6201e3469f5e8314924ff336..397e02a566970fdf14a80f2bd377f1fe159c4f90 100644 --- a/fs/hfs/bnode.c +++ b/fs/hfs/bnode.c @@ -281,6 +281,7 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid) tree->node_hash[hash] = node; tree->node_hash_cnt++; } else { + hfs_bnode_get(node2); spin_unlock(&tree->hash_lock); kfree(node); wait_event(node2->lock_wq, !test_bit(HFS_BNODE_NEW, &node2->flags)); diff --git a/fs/hmdfs/file_merge.c b/fs/hmdfs/file_merge.c index ca83a04a92a15c4a68fe7af11721a039d0f28db5..cbe9358247ce443e8099d8d1dd7c5857bf47aded 100644 --- a/fs/hmdfs/file_merge.c +++ b/fs/hmdfs/file_merge.c @@ -242,7 +242,7 @@ static int hmdfs_actor_merge(struct dir_context *ctx, const char *name, insert_filename(iterate_callback_merge->root, &cache_entry); if (d_type == DT_DIR && insert_res == DT_DIR) { goto done; - } else if (d_type == DT_DIR && + } else if (d_type == DT_DIR && (insert_res == DT_REG || insert_res == DT_LNK)) { if (strlen(CONFLICTING_DIR_SUFFIX) > NAME_MAX - dentry_len) { ret = -ENAMETOOLONG; @@ -386,7 +386,7 @@ int do_dir_open_merge(struct file *file, const struct cred *cred, if (IS_ERR_OR_NULL(cred)) return ret; - + wait_event(dim->wait_queue, !has_merge_lookup_work(dim)); mutex_lock(&dim->comrade_list_lock); @@ -601,39 +601,40 @@ static int copy_string_from_user(unsigned long pos, unsigned long len, { char *tmp_data; - if (!access_ok((char *)pos, len)) + if (len >= PATH_MAX) + return -EINVAL; + if (!access_ok(pos, len)) return -EFAULT; - tmp_data = kmalloc(len, GFP_KERNEL); + tmp_data = kzalloc(len + 1, GFP_KERNEL); if (!tmp_data) return -ENOMEM; *data = tmp_data; - if (copy_from_user(tmp_data, (char __user *)pos, len)){ + if (copy_from_user(tmp_data, (char __user *)pos, len)) return -EFAULT; - } return 0; } -static int hmdfs_get_info_from_user(unsigned long pos, +static int hmdfs_get_info_from_user(unsigned long pos, struct hmdfs_dst_info *hdi, struct hmdfs_user_info *data) { int ret = 0; - if (!access_ok((struct hmdfs_dst_info __user *)pos, + if (!access_ok((struct hmdfs_dst_info __user *)pos, sizeof(struct hmdfs_dst_info))) return -ENOMEM; if (copy_from_user(hdi, (struct hmdfs_dst_info __user *)pos, sizeof(struct hmdfs_dst_info))) return -EFAULT; - + ret = copy_string_from_user(hdi->local_path_pos, hdi->local_path_len, &data->local_path); if (ret != 0) return ret; - ret = copy_string_from_user(hdi->distributed_path_pos, + ret = copy_string_from_user(hdi->distributed_path_pos, hdi->distributed_path_len, &data->distributed_path); if (ret != 0) @@ -647,7 +648,7 @@ static int hmdfs_get_info_from_user(unsigned long pos, return 0; } -static const struct cred *change_cred(struct dentry *dentry, +static const struct cred *change_cred(struct dentry *dentry, const char *bundle_name) { int bid; @@ -664,7 +665,7 @@ static const struct cred *change_cred(struct dentry *dentry, cred->fsgid = KGIDT_INIT(bid); old_cred = override_creds(cred); } - + return old_cred; } @@ -701,7 +702,7 @@ static int create_link_file(struct hmdfs_user_info *data) path_put(&path); return ret; } - + dentry = kern_path_create(AT_FDCWD, data->distributed_path, &path, 0); if (IS_ERR(dentry)) return PTR_ERR(dentry); @@ -727,7 +728,7 @@ static int create_dir(const char *path_value, mode_t mode) if (err && err != -EEXIST) hmdfs_err("vfs_mkdir failed, err = %d", err); done_path_create(&path, dentry); - + return err; } diff --git a/fs/hmdfs/hmdfs.h b/fs/hmdfs/hmdfs.h index 1a14d2e228ef8ea3e520b92a77ad6f829f369024..cd3980c1deaf12379e70b1f450964a141a436571 100644 --- a/fs/hmdfs/hmdfs.h +++ b/fs/hmdfs/hmdfs.h @@ -167,6 +167,8 @@ struct hmdfs_sb_info { /* For merge & device view */ unsigned int s_merge_switch; + /* For cloud disk*/ + unsigned int s_cloud_disk_switch; /* For writeback */ struct hmdfs_writeback *h_wb; /* For server writeback */ diff --git a/fs/hmdfs/hmdfs_dentryfile.c b/fs/hmdfs/hmdfs_dentryfile.c index 0d1662d685fe086d4e76fde0899d8955984a0ce6..f93e05e924f9feb5dcb895c785ac754096210d90 100644 --- a/fs/hmdfs/hmdfs_dentryfile.c +++ b/fs/hmdfs/hmdfs_dentryfile.c @@ -283,10 +283,11 @@ static char *hmdfs_merge_dentry_path_raw(struct dentry *d, char *buf, int buflen unsigned int len; unsigned int seq = 0; int error = 0; - struct hmdfs_dentry_info_merge *mdi = hmdfs_dm(d); + struct hmdfs_dentry_info_merge *mdi = NULL; rcu_read_lock(); restart: + mdi = hmdfs_dm(d); dentry = d; end = buf + buflen; len = buflen; @@ -381,20 +382,23 @@ char *hmdfs_get_dentry_absolute_path(const char *rootdir, char *hmdfs_connect_path(const char *path, const char *name) { char *buf = 0; + size_t path_len, name_len; if (!path || !name) return NULL; - if (strlen(path) + strlen(name) + 1 >= PATH_MAX) + path_len = strnlen(path, PATH_MAX); + name_len = strnlen(name, PATH_MAX); + if (path_len + name_len + 1 >= PATH_MAX) return NULL; buf = kzalloc(PATH_MAX, GFP_KERNEL); if (!buf) return NULL; - strcpy(buf, path); + strncpy(buf, path, path_len); strcat(buf, "/"); - strcat(buf, name); + strncat(buf, name, name_len); return buf; } diff --git a/fs/hmdfs/hmdfs_device_view.h b/fs/hmdfs/hmdfs_device_view.h index 5c525a9c7622905d3860f0ff5349c920e591c44e..0d1eaf0a066ee95e2d913e5c6dd746e6dbccda7a 100644 --- a/fs/hmdfs/hmdfs_device_view.h +++ b/fs/hmdfs/hmdfs_device_view.h @@ -182,7 +182,7 @@ void hmdfs_set_time(struct dentry *dentry, unsigned long time); struct inode *fill_inode_local(struct super_block *sb, struct inode *lower_inode, const char *name); struct inode *fill_root_inode(struct super_block *sb, - struct inode *lower_inode); + struct hmdfs_sb_info *sbi, struct inode *lower_inode); struct inode *fill_device_inode(struct super_block *sb, struct inode *lower_inode); struct hmdfs_lookup_ret *hmdfs_lookup_by_con(struct hmdfs_peer *con, diff --git a/fs/hmdfs/inode_cloud.c b/fs/hmdfs/inode_cloud.c index 8e137c51af4d20a6af1171cfd488a9166fb0dae9..289e05a398425c3666b219f603d49ba149e508a5 100644 --- a/fs/hmdfs/inode_cloud.c +++ b/fs/hmdfs/inode_cloud.c @@ -364,19 +364,7 @@ int hmdfs_rmdir_cloud(struct inode *dir, struct dentry *dentry) int hmdfs_unlink_cloud(struct inode *dir, struct dentry *dentry) { - struct hmdfs_lookup_cloud_ret *lookup_result = NULL; - int ret = -EPERM; - - lookup_result = hmdfs_lookup_by_cloud(dentry, 0); - /* - * unlink is allowed only after the file item has been removed from - * the dentryfile(lookup fail). - */ - if (!lookup_result) - ret = 0; - - kfree(lookup_result); - return ret; + return 0; } int hmdfs_rename_cloud(struct inode *old_dir, struct dentry *old_dentry, diff --git a/fs/hmdfs/inode_cloud_merge.c b/fs/hmdfs/inode_cloud_merge.c index 37e99f0554e29028eb3698aaead056a7ceaacbf8..d0b5e58f9fbe00115ff6000aab77aafb1048624a 100644 --- a/fs/hmdfs/inode_cloud_merge.c +++ b/fs/hmdfs/inode_cloud_merge.c @@ -179,11 +179,7 @@ static int lookup_merge_normal(struct dentry *dentry, unsigned int flags) snprintf(cpath, PATH_MAX, "device_view/%s%s/%s", CLOUD_CID, ppath, rname); - mutex_lock(&mdi->work_lock); - merge_lookup_async(mdi, sbi, CLOUD_DEVICE, cpath, flags); - mutex_unlock(&mdi->work_lock); - if (is_comrade_list_empty(mdi)) - wait_event(mdi->wait_queue, is_merge_lookup_end(mdi)); + merge_lookup_sync(mdi, sbi, CLOUD_DEVICE, cpath, flags); ret = -ENOENT; if (!is_comrade_list_empty(mdi)) @@ -516,11 +512,8 @@ static int create_lo_d_parent_recur(struct dentry *d_parent, struct hmdfs_recursive_para *rec_op_para) { struct dentry *lo_d_parent, *d_pparent; - struct hmdfs_dentry_info_merge *pmdi = NULL; int ret = 0; - pmdi = hmdfs_dm(d_parent); - wait_event(pmdi->wait_queue, !has_merge_lookup_work(pmdi)); lo_d_parent = hmdfs_get_lo_d(d_parent, HMDFS_DEVID_LOCAL); if (!lo_d_parent) { d_pparent = dget_parent(d_parent); @@ -550,12 +543,9 @@ int create_lo_d_cloud_child(struct inode *i_parent, struct dentry *d_child, { struct dentry *d_pparent, *lo_d_parent, *lo_d_child; struct dentry *d_parent = dget_parent(d_child); - struct hmdfs_dentry_info_merge *pmdi = hmdfs_dm(d_parent); int ret = 0; mode_t d_child_mode = rec_op_para->mode; - wait_event(pmdi->wait_queue, !has_merge_lookup_work(pmdi)); - lo_d_parent = hmdfs_get_lo_d(d_parent, HMDFS_DEVID_LOCAL); if (!lo_d_parent) { d_pparent = dget_parent(d_parent); @@ -639,11 +629,8 @@ static int rename_lo_d_cloud_child(struct hmdfs_rename_para *rename_para, { struct dentry *d_pparent, *lo_d_parent; struct dentry *d_parent = dget_parent(rename_para->new_dentry); - struct hmdfs_dentry_info_merge *pmdi = hmdfs_dm(d_parent); int ret = 0; - wait_event(pmdi->wait_queue, !has_merge_lookup_work(pmdi)); - lo_d_parent = hmdfs_get_lo_d(d_parent, HMDFS_DEVID_LOCAL); if (!lo_d_parent) { d_pparent = dget_parent(d_parent); diff --git a/fs/hmdfs/inode_local.c b/fs/hmdfs/inode_local.c index ef36f5e33240ffd7a7a5d66741047ec56f4655be..c799c04789ab2a0bb65b790a5167b9e3473ee909 100644 --- a/fs/hmdfs/inode_local.c +++ b/fs/hmdfs/inode_local.c @@ -742,18 +742,28 @@ int hmdfs_rename_local(struct inode *old_dir, struct dentry *old_dentry, static bool symname_is_allowed(const char *symname) { - size_t symname_len = strlen(symname); - int i; - - if (symname_len == 1) - return true; - - for (i = 0; i < symname_len - 1; i++) - if (symname[i] == '.' && symname[i + 1] == '.') { - hmdfs_err("Prohibited link path"); - return false; - } + char *p; + char *buf = 0; + size_t symname_len; + + symname_len = strnlen(symname, PATH_MAX); + if (symname_len >= PATH_MAX) + return false; + + buf = kzalloc(PATH_MAX + 2, GFP_KERNEL); + if (!buf) + return false; + + buf[0] = '/'; + strncpy(buf + 1, symname, symname_len); + strcat(buf, "/"); + p = strstr(symname, "/../"); + if (p) { + kfree(buf); + return false; + } + kfree(buf); return true; } diff --git a/fs/hmdfs/inode_root.c b/fs/hmdfs/inode_root.c index ba3eac8352a93bc9eb93b1e27502d41e82830e99..91ca37bb6b7c3872533fd2d42230459359b2c13a 100644 --- a/fs/hmdfs/inode_root.c +++ b/fs/hmdfs/inode_root.c @@ -320,7 +320,7 @@ struct inode *fill_device_inode(struct super_block *sb, return inode; } -struct inode *fill_root_inode(struct super_block *sb, struct inode *lower_inode) +struct inode *fill_root_inode(struct super_block *sb, struct hmdfs_sb_info *sbi, struct inode *lower_inode) { struct inode *inode = NULL; struct hmdfs_inode_info *info = NULL; @@ -328,8 +328,14 @@ struct inode *fill_root_inode(struct super_block *sb, struct inode *lower_inode) if (!igrab(lower_inode)) return ERR_PTR(-ESTALE); - inode = hmdfs_iget_locked_root(sb, HMDFS_ROOT_ANCESTOR, lower_inode, + if (sbi->s_cloud_disk_switch) { + inode = hmdfs_iget_locked_root(sb, HMDFS_ROOT_DEV_LOCAL, lower_inode, NULL); + } else { + inode = hmdfs_iget_locked_root(sb, HMDFS_ROOT_ANCESTOR, lower_inode, + NULL); + } + if (!inode) { hmdfs_err("iget5_locked get inode NULL"); iput(lower_inode); @@ -341,7 +347,15 @@ struct inode *fill_root_inode(struct super_block *sb, struct inode *lower_inode) } info = hmdfs_i(inode); - info->inode_type = HMDFS_LAYER_ZERO; + if (sbi->s_cloud_disk_switch) { + info->inode_type = HMDFS_LAYER_SECOND_LOCAL; + inode->i_op = &hmdfs_dir_inode_ops_local; + inode->i_fop = &hmdfs_dir_ops_local; + } else { + info->inode_type = HMDFS_LAYER_ZERO; + inode->i_op = &hmdfs_root_ops; + inode->i_fop = &hmdfs_root_fops; + } inode->i_mode = (lower_inode->i_mode & S_IFMT) | S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | S_IXOTH; @@ -355,9 +369,7 @@ struct inode *fill_root_inode(struct super_block *sb, struct inode *lower_inode) inode->i_atime = lower_inode->i_atime; inode->i_ctime = lower_inode->i_ctime; inode->i_mtime = lower_inode->i_mtime; - - inode->i_op = &hmdfs_root_ops; - inode->i_fop = &hmdfs_root_fops; + fsstack_copy_inode_size(inode, lower_inode); unlock_new_inode(inode); return inode; diff --git a/fs/hmdfs/main.c b/fs/hmdfs/main.c index da282cc28106a1577c635ccb6127a22764ae82c8..db5d0ee561303c45f95157819cfc0180781d9e82 100644 --- a/fs/hmdfs/main.c +++ b/fs/hmdfs/main.c @@ -719,6 +719,7 @@ static int hmdfs_init_sbi(struct hmdfs_sb_info *sbi) HMDFS_FEATURE_READPAGES_OPEN | HMDFS_ATOMIC_OPEN; sbi->s_merge_switch = false; + sbi->s_cloud_disk_switch = false; sbi->dcache_threshold = DEFAULT_DCACHE_THRESHOLD; sbi->dcache_precision = DEFAULT_DCACHE_PRECISION; sbi->dcache_timeout = DEFAULT_DCACHE_TIMEOUT; @@ -912,7 +913,7 @@ static int hmdfs_fill_super(struct super_block *sb, void *data, int silent) err = -EINVAL; goto out_sput; } - root_inode = fill_root_inode(sb, d_inode(lower_path.dentry)); + root_inode = fill_root_inode(sb, sbi, d_inode(lower_path.dentry)); if (IS_ERR(root_inode)) { err = PTR_ERR(root_inode); goto out_sput; @@ -957,6 +958,7 @@ static int hmdfs_fill_super(struct super_block *sb, void *data, int silent) out_freesbi: if (sbi) { sb->s_fs_info = NULL; + hmdfs_clear_share_table(sbi); hmdfs_exit_stash(sbi); hmdfs_destroy_writeback(sbi); hmdfs_destroy_server_writeback(sbi); diff --git a/fs/hmdfs/super.c b/fs/hmdfs/super.c index 665d0b684f4fa52138c1e504038c3319270dee34..7de0971ede4f1abcdb7a13668fa7c4961725f1c5 100644 --- a/fs/hmdfs/super.c +++ b/fs/hmdfs/super.c @@ -19,6 +19,7 @@ enum { OPT_CLOUD_DIR, OPT_S_CASE, OPT_VIEW_TYPE, + OPT_CLOUD_DISK_TYPE, OPT_NO_OFFLINE_STASH, OPT_NO_DENTRY_CACHE, OPT_USER_ID, @@ -32,6 +33,7 @@ static match_table_t hmdfs_tokens = { { OPT_CLOUD_DIR, "cloud_dir=%s" }, { OPT_S_CASE, "sensitive" }, { OPT_VIEW_TYPE, "merge" }, + { OPT_CLOUD_DISK_TYPE, "cloud_disk"}, { OPT_NO_OFFLINE_STASH, "no_offline_stash" }, { OPT_NO_DENTRY_CACHE, "no_dentry_cache" }, { OPT_USER_ID, "user_id=%s"}, @@ -140,6 +142,9 @@ int hmdfs_parse_options(struct hmdfs_sb_info *sbi, const char *data) case OPT_VIEW_TYPE: sbi->s_merge_switch = true; break; + case OPT_CLOUD_DISK_TYPE: + sbi->s_cloud_disk_switch = true; + break; case OPT_NO_OFFLINE_STASH: sbi->s_offline_stash = false; break; diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 9f776bdba2696bacc96b0942544fd7950c7a89f2..6d9cce22106c94cfe99b1aa6b39dafdb031c06ec 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -984,36 +984,28 @@ do_get_write_access(handle_t *handle, struct journal_head *jh, * ie. locked but not dirty) or tune2fs (which may actually have * the buffer dirtied, ugh.) */ - if (buffer_dirty(bh)) { + if (buffer_dirty(bh) && jh->b_transaction) { + warn_dirty_buffer(bh); /* - * First question: is this buffer already part of the current - * transaction or the existing committing transaction? - */ - if (jh->b_transaction) { - J_ASSERT_JH(jh, - jh->b_transaction == transaction || - jh->b_transaction == - journal->j_committing_transaction); - if (jh->b_next_transaction) - J_ASSERT_JH(jh, jh->b_next_transaction == - transaction); - warn_dirty_buffer(bh); - } - /* - * In any case we need to clean the dirty flag and we must - * do it under the buffer lock to be sure we don't race - * with running write-out. + * We need to clean the dirty flag and we must do it under the + * buffer lock to be sure we don't race with running write-out. */ JBUFFER_TRACE(jh, "Journalling dirty buffer"); clear_buffer_dirty(bh); + /* + * The buffer is going to be added to BJ_Reserved list now and + * nothing guarantees jbd2_journal_dirty_metadata() will be + * ever called for it. So we need to set jbddirty bit here to + * make sure the buffer is dirtied and written out when the + * journaling machinery is done with it. + */ set_buffer_jbddirty(bh); } - unlock_buffer(bh); - error = -EROFS; if (is_handle_aborted(handle)) { spin_unlock(&jh->b_state_lock); + unlock_buffer(bh); goto out; } error = 0; @@ -1023,8 +1015,10 @@ do_get_write_access(handle_t *handle, struct journal_head *jh, * b_next_transaction points to it */ if (jh->b_transaction == transaction || - jh->b_next_transaction == transaction) + jh->b_next_transaction == transaction) { + unlock_buffer(bh); goto done; + } /* * this is the first time this transaction is touching this buffer, @@ -1048,10 +1042,24 @@ do_get_write_access(handle_t *handle, struct journal_head *jh, */ smp_wmb(); spin_lock(&journal->j_list_lock); + if (test_clear_buffer_dirty(bh)) { + /* + * Execute buffer dirty clearing and jh->b_transaction + * assignment under journal->j_list_lock locked to + * prevent bh being removed from checkpoint list if + * the buffer is in an intermediate state (not dirty + * and jh->b_transaction is NULL). + */ + JBUFFER_TRACE(jh, "Journalling dirty buffer"); + set_buffer_jbddirty(bh); + } __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved); spin_unlock(&journal->j_list_lock); + unlock_buffer(bh); goto done; } + unlock_buffer(bh); + /* * If there is already a copy-out version of this buffer, then we don't * need to make another one diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c index 2c9493011aec3a7996272fe8ee51253d5efca127..501263355ef48bcd562a5760d0805736e3fee4aa 100644 --- a/fs/jfs/jfs_dmap.c +++ b/fs/jfs/jfs_dmap.c @@ -193,7 +193,8 @@ int dbMount(struct inode *ipbmap) bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth); bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart); bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size); - if (bmp->db_agl2size > L2MAXL2SIZE - L2MAXAG) { + if (bmp->db_agl2size > L2MAXL2SIZE - L2MAXAG || + bmp->db_agl2size < 0) { err = -EINVAL; goto err_release_metapage; } diff --git a/fs/nfs/file.c b/fs/nfs/file.c index ad856b7b9a46cdd760af700d760b3572ee6f583a..7be1a7f7fcb2a992af3010c264208294530dd33b 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -487,8 +487,9 @@ static int nfs_swap_activate(struct swap_info_struct *sis, struct file *file, { unsigned long blocks; long long isize; - struct rpc_clnt *clnt = NFS_CLIENT(file->f_mapping->host); - struct inode *inode = file->f_mapping->host; + struct inode *inode = file_inode(file); + struct rpc_clnt *clnt = NFS_CLIENT(inode); + struct nfs_client *cl = NFS_SERVER(inode)->nfs_client; spin_lock(&inode->i_lock); blocks = inode->i_blocks; @@ -501,14 +502,22 @@ static int nfs_swap_activate(struct swap_info_struct *sis, struct file *file, *span = sis->pages; + + if (cl->rpc_ops->enable_swap) + cl->rpc_ops->enable_swap(inode); + return rpc_clnt_swap_activate(clnt); } static void nfs_swap_deactivate(struct file *file) { - struct rpc_clnt *clnt = NFS_CLIENT(file->f_mapping->host); + struct inode *inode = file_inode(file); + struct rpc_clnt *clnt = NFS_CLIENT(inode); + struct nfs_client *cl = NFS_SERVER(inode)->nfs_client; rpc_clnt_swap_deactivate(clnt); + if (cl->rpc_ops->disable_swap) + cl->rpc_ops->disable_swap(file_inode(file)); } const struct address_space_operations nfs_file_aops = { diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 1adece1cff3ed9cacc449845a7927a1364328743..36f415278c04209e8626ade77438e4a40328ceda 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -1906,7 +1906,11 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) nfs_wcc_update_inode(inode, fattr); if (pnfs_layoutcommit_outstanding(inode)) { - nfsi->cache_validity |= save_cache_validity & NFS_INO_INVALID_ATTR; + nfsi->cache_validity |= + save_cache_validity & + (NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_CTIME | + NFS_INO_INVALID_MTIME | NFS_INO_INVALID_SIZE | + NFS_INO_REVAL_FORCED); cache_revalidated = false; } diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 6d916563356ef372d594a482a4b2246888d5f090..8b41c0b8624e3fb0a16af25e5f681fa2e0ad6a21 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -42,6 +42,7 @@ enum nfs4_client_state { NFS4CLNT_LEASE_MOVED, NFS4CLNT_DELEGATION_EXPIRED, NFS4CLNT_RUN_MANAGER, + NFS4CLNT_MANAGER_AVAILABLE, NFS4CLNT_RECALL_RUNNING, NFS4CLNT_RECALL_ANY_LAYOUT_READ, NFS4CLNT_RECALL_ANY_LAYOUT_RW, diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index ee46ab09e330626905117650c2d56864ee14dcbf..8653335c17b671747b673b1e417a0238ef38da29 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -10385,6 +10385,26 @@ static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size) return error + error2 + error3; } +static void nfs4_enable_swap(struct inode *inode) +{ + /* The state manager thread must always be running. + * It will notice the client is a swapper, and stay put. + */ + struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; + + nfs4_schedule_state_manager(clp); +} + +static void nfs4_disable_swap(struct inode *inode) +{ + /* The state manager thread will now exit once it is + * woken. + */ + struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; + + nfs4_schedule_state_manager(clp); +} + static const struct inode_operations nfs4_dir_inode_operations = { .create = nfs_create, .lookup = nfs_lookup, @@ -10461,6 +10481,8 @@ const struct nfs_rpc_ops nfs_v4_clientops = { .free_client = nfs4_free_client, .create_server = nfs4_create_server, .clone_server = nfs_clone_server, + .enable_swap = nfs4_enable_swap, + .disable_swap = nfs4_disable_swap, }; static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = { diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 175b2e064003eb45c94dfb7e46a29941ba0d62f6..628e030f8e3ba53f2563fb025cbd25d6e6ff9606 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1208,10 +1208,17 @@ void nfs4_schedule_state_manager(struct nfs_client *clp) { struct task_struct *task; char buf[INET6_ADDRSTRLEN + sizeof("-manager") + 1]; + struct rpc_clnt *cl = clp->cl_rpcclient; + + while (cl != cl->cl_parent) + cl = cl->cl_parent; set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state); - if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0) + if (test_and_set_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state) != 0) { + wake_up_var(&clp->cl_state); return; + } + set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state); __module_get(THIS_MODULE); refcount_inc(&clp->cl_count); @@ -1229,6 +1236,7 @@ void nfs4_schedule_state_manager(struct nfs_client *clp) if (!nfs_client_init_is_complete(clp)) nfs_mark_client_ready(clp, PTR_ERR(task)); nfs4_clear_state_manager_bit(clp); + clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state); nfs_put_client(clp); module_put(THIS_MODULE); } @@ -2680,12 +2688,8 @@ static void nfs4_state_manager(struct nfs_client *clp) clear_bit(NFS4CLNT_RECALL_RUNNING, &clp->cl_state); } - /* Did we race with an attempt to give us more work? */ - if (!test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state)) - return; - if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0) - return; - memflags = memalloc_nofs_save(); + return; + } while (refcount_read(&clp->cl_count) > 1 && !signalled()); goto out_drain; @@ -2706,9 +2710,31 @@ static void nfs4_state_manager(struct nfs_client *clp) static int nfs4_run_state_manager(void *ptr) { struct nfs_client *clp = ptr; + struct rpc_clnt *cl = clp->cl_rpcclient; + + while (cl != cl->cl_parent) + cl = cl->cl_parent; allow_signal(SIGKILL); +again: + set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state); nfs4_state_manager(clp); + if (atomic_read(&cl->cl_swapper)) { + wait_var_event_interruptible(&clp->cl_state, + test_bit(NFS4CLNT_RUN_MANAGER, + &clp->cl_state)); + if (atomic_read(&cl->cl_swapper) && + test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state)) + goto again; + /* Either no longer a swapper, or were signalled */ + } + clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state); + + if (refcount_read(&clp->cl_count) > 1 && !signalled() && + test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state) && + !test_and_set_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state)) + goto again; + nfs_put_client(clp); module_put_and_exit(0); return 0; diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h index 484c1da96dea2f40b5a69d93ac1441ca29748ffa..d862df9761e7750d91f971ed0f6e73cac516010e 100644 --- a/fs/nfs/nfs4trace.h +++ b/fs/nfs/nfs4trace.h @@ -584,32 +584,34 @@ TRACE_DEFINE_ENUM(NFS4CLNT_MOVED); TRACE_DEFINE_ENUM(NFS4CLNT_LEASE_MOVED); TRACE_DEFINE_ENUM(NFS4CLNT_DELEGATION_EXPIRED); TRACE_DEFINE_ENUM(NFS4CLNT_RUN_MANAGER); +TRACE_DEFINE_ENUM(NFS4CLNT_MANAGER_AVAILABLE); TRACE_DEFINE_ENUM(NFS4CLNT_RECALL_RUNNING); TRACE_DEFINE_ENUM(NFS4CLNT_RECALL_ANY_LAYOUT_READ); TRACE_DEFINE_ENUM(NFS4CLNT_RECALL_ANY_LAYOUT_RW); +TRACE_DEFINE_ENUM(NFS4CLNT_DELEGRETURN_DELAYED); #define show_nfs4_clp_state(state) \ __print_flags(state, "|", \ - { NFS4CLNT_MANAGER_RUNNING, "MANAGER_RUNNING" }, \ - { NFS4CLNT_CHECK_LEASE, "CHECK_LEASE" }, \ - { NFS4CLNT_LEASE_EXPIRED, "LEASE_EXPIRED" }, \ - { NFS4CLNT_RECLAIM_REBOOT, "RECLAIM_REBOOT" }, \ - { NFS4CLNT_RECLAIM_NOGRACE, "RECLAIM_NOGRACE" }, \ - { NFS4CLNT_DELEGRETURN, "DELEGRETURN" }, \ - { NFS4CLNT_SESSION_RESET, "SESSION_RESET" }, \ - { NFS4CLNT_LEASE_CONFIRM, "LEASE_CONFIRM" }, \ - { NFS4CLNT_SERVER_SCOPE_MISMATCH, \ - "SERVER_SCOPE_MISMATCH" }, \ - { NFS4CLNT_PURGE_STATE, "PURGE_STATE" }, \ - { NFS4CLNT_BIND_CONN_TO_SESSION, \ - "BIND_CONN_TO_SESSION" }, \ - { NFS4CLNT_MOVED, "MOVED" }, \ - { NFS4CLNT_LEASE_MOVED, "LEASE_MOVED" }, \ - { NFS4CLNT_DELEGATION_EXPIRED, "DELEGATION_EXPIRED" }, \ - { NFS4CLNT_RUN_MANAGER, "RUN_MANAGER" }, \ - { NFS4CLNT_RECALL_RUNNING, "RECALL_RUNNING" }, \ - { NFS4CLNT_RECALL_ANY_LAYOUT_READ, "RECALL_ANY_LAYOUT_READ" }, \ - { NFS4CLNT_RECALL_ANY_LAYOUT_RW, "RECALL_ANY_LAYOUT_RW" }) + { BIT(NFS4CLNT_MANAGER_RUNNING), "MANAGER_RUNNING" }, \ + { BIT(NFS4CLNT_CHECK_LEASE), "CHECK_LEASE" }, \ + { BIT(NFS4CLNT_LEASE_EXPIRED), "LEASE_EXPIRED" }, \ + { BIT(NFS4CLNT_RECLAIM_REBOOT), "RECLAIM_REBOOT" }, \ + { BIT(NFS4CLNT_RECLAIM_NOGRACE), "RECLAIM_NOGRACE" }, \ + { BIT(NFS4CLNT_DELEGRETURN), "DELEGRETURN" }, \ + { BIT(NFS4CLNT_SESSION_RESET), "SESSION_RESET" }, \ + { BIT(NFS4CLNT_LEASE_CONFIRM), "LEASE_CONFIRM" }, \ + { BIT(NFS4CLNT_SERVER_SCOPE_MISMATCH), "SERVER_SCOPE_MISMATCH" }, \ + { BIT(NFS4CLNT_PURGE_STATE), "PURGE_STATE" }, \ + { BIT(NFS4CLNT_BIND_CONN_TO_SESSION), "BIND_CONN_TO_SESSION" }, \ + { BIT(NFS4CLNT_MOVED), "MOVED" }, \ + { BIT(NFS4CLNT_LEASE_MOVED), "LEASE_MOVED" }, \ + { BIT(NFS4CLNT_DELEGATION_EXPIRED), "DELEGATION_EXPIRED" }, \ + { BIT(NFS4CLNT_RUN_MANAGER), "RUN_MANAGER" }, \ + { BIT(NFS4CLNT_MANAGER_AVAILABLE), "MANAGER_AVAILABLE" }, \ + { BIT(NFS4CLNT_RECALL_RUNNING), "RECALL_RUNNING" }, \ + { BIT(NFS4CLNT_RECALL_ANY_LAYOUT_READ), "RECALL_ANY_LAYOUT_READ" }, \ + { BIT(NFS4CLNT_RECALL_ANY_LAYOUT_RW), "RECALL_ANY_LAYOUT_RW" }, \ + { BIT(NFS4CLNT_DELEGRETURN_DELAYED), "DELERETURN_DELAYED" }) TRACE_EVENT(nfs4_state_mgr, TP_PROTO( diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c index 07d26f61f22aa5b92d28adba4bed97791b735f92..3a1dea5d1448471d5b2fe3eb5ea2dff440d1fa93 100644 --- a/fs/nilfs2/ioctl.c +++ b/fs/nilfs2/ioctl.c @@ -1129,7 +1129,14 @@ static int nilfs_ioctl_set_alloc_range(struct inode *inode, void __user *argp) minseg = range[0] + segbytes - 1; do_div(minseg, segbytes); + + if (range[1] < 4096) + goto out; + maxseg = NILFS_SB2_OFFSET_BYTES(range[1]); + if (maxseg < segbytes) + goto out; + do_div(maxseg, segbytes); maxseg--; diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 7a41c9727c9e219ff19a94d20f388f299e6c6116..7751848687635c816e2966e279a4762867f1b116 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -408,6 +408,15 @@ int nilfs_resize_fs(struct super_block *sb, __u64 newsize) if (newsize > devsize) goto out; + /* + * Prevent underflow in second superblock position calculation. + * The exact minimum size check is done in nilfs_sufile_resize(). + */ + if (newsize < 4096) { + ret = -ENOSPC; + goto out; + } + /* * Write lock is required to protect some functions depending * on the number of segments, the number of reserved segments, diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index 211937054c31f97f13ddacb8eff5daa1137b252a..38a1206cf9481159f9290b316f0256d853a2fa94 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c @@ -544,9 +544,15 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs, { struct nilfs_super_block **sbp = nilfs->ns_sbp; struct buffer_head **sbh = nilfs->ns_sbh; - u64 sb2off = NILFS_SB2_OFFSET_BYTES(nilfs->ns_bdev->bd_inode->i_size); + u64 sb2off, devsize = nilfs->ns_bdev->bd_inode->i_size; int valid[2], swp = 0; + if (devsize < NILFS_SEG_MIN_BLOCKS * NILFS_MIN_BLOCK_SIZE + 4096) { + nilfs_err(sb, "device size too small"); + return -EINVAL; + } + sb2off = NILFS_SB2_OFFSET_BYTES(devsize); + sbp[0] = nilfs_read_super_block(sb, NILFS_SB_OFFSET_BYTES, blocksize, &sbh[0]); sbp[1] = nilfs_read_super_block(sb, sb2off, blocksize, &sbh[1]); diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c index 758d9661ef1e4a01bcdd104899b89fdaa5f8939b..98e77ea957ff3945836aff55fca02e36615ab50e 100644 --- a/fs/ocfs2/move_extents.c +++ b/fs/ocfs2/move_extents.c @@ -107,14 +107,6 @@ static int __ocfs2_move_extent(handle_t *handle, */ replace_rec.e_flags = ext_flags & ~OCFS2_EXT_REFCOUNTED; - ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), - context->et.et_root_bh, - OCFS2_JOURNAL_ACCESS_WRITE); - if (ret) { - mlog_errno(ret); - goto out; - } - ret = ocfs2_split_extent(handle, &context->et, path, index, &replace_rec, context->meta_ac, &context->dealloc); @@ -123,8 +115,6 @@ static int __ocfs2_move_extent(handle_t *handle, goto out; } - ocfs2_journal_dirty(handle, context->et.et_root_bh); - context->new_phys_cpos = new_p_cpos; /* @@ -446,7 +436,7 @@ static int ocfs2_find_victim_alloc_group(struct inode *inode, bg = (struct ocfs2_group_desc *)gd_bh->b_data; if (vict_blkno < (le64_to_cpu(bg->bg_blkno) + - le16_to_cpu(bg->bg_bits))) { + (le16_to_cpu(bg->bg_bits) << bits_per_unit))) { *ret_bh = gd_bh; *vict_bit = (vict_blkno - blkno) >> @@ -561,6 +551,7 @@ static void ocfs2_probe_alloc_group(struct inode *inode, struct buffer_head *bh, last_free_bits++; if (last_free_bits == move_len) { + i -= move_len; *goal_bit = i; *phys_cpos = base_cpos + i; break; @@ -1032,18 +1023,19 @@ int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp) context->range = ⦥ + /* + * ok, the default theshold for the defragmentation + * is 1M, since our maximum clustersize was 1M also. + * any thought? + */ + if (!range.me_threshold) + range.me_threshold = 1024 * 1024; + + if (range.me_threshold > i_size_read(inode)) + range.me_threshold = i_size_read(inode); + if (range.me_flags & OCFS2_MOVE_EXT_FL_AUTO_DEFRAG) { context->auto_defrag = 1; - /* - * ok, the default theshold for the defragmentation - * is 1M, since our maximum clustersize was 1M also. - * any thought? - */ - if (!range.me_threshold) - range.me_threshold = 1024 * 1024; - - if (range.me_threshold > i_size_read(inode)) - range.me_threshold = i_size_read(inode); if (range.me_flags & OCFS2_MOVE_EXT_FL_PART_DEFRAG) context->partial = 1; diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c index b019f27c1360145f8f4be59b729ea2a9536c6a65..0e734c8b4dfa202c87d9adba602ef11374d9eaf0 100644 --- a/fs/overlayfs/file.c +++ b/fs/overlayfs/file.c @@ -531,9 +531,16 @@ static long ovl_fallocate(struct file *file, int mode, loff_t offset, loff_t len const struct cred *old_cred; int ret; + inode_lock(inode); + /* Update mode */ + ovl_copyattr(ovl_inode_real(inode), inode); + ret = file_remove_privs(file); + if (ret) + goto out_unlock; + ret = ovl_real_fdget(file, &real); if (ret) - return ret; + goto out_unlock; old_cred = ovl_override_creds(file_inode(file)->i_sb); ret = vfs_fallocate(real.file, mode, offset, len); @@ -544,6 +551,9 @@ static long ovl_fallocate(struct file *file, int mode, loff_t offset, loff_t len fdput(real); +out_unlock: + inode_unlock(inode); + return ret; } @@ -687,14 +697,23 @@ static loff_t ovl_copyfile(struct file *file_in, loff_t pos_in, const struct cred *old_cred; loff_t ret; + inode_lock(inode_out); + if (op != OVL_DEDUPE) { + /* Update mode */ + ovl_copyattr(ovl_inode_real(inode_out), inode_out); + ret = file_remove_privs(file_out); + if (ret) + goto out_unlock; + } + ret = ovl_real_fdget(file_out, &real_out); if (ret) - return ret; + goto out_unlock; ret = ovl_real_fdget(file_in, &real_in); if (ret) { fdput(real_out); - return ret; + goto out_unlock; } old_cred = ovl_override_creds(file_inode(file_out)->i_sb); @@ -723,6 +742,9 @@ static loff_t ovl_copyfile(struct file *file_in, loff_t pos_in, fdput(real_in); fdput(real_out); +out_unlock: + inode_unlock(inode_out); + return ret; } diff --git a/fs/proc/Makefile b/fs/proc/Makefile index bcbca3ed17c9f261c504154d16fee66f71538bd3..ada0c2115b18a4a9e1a571ebeb19e249cf20d708 100644 --- a/fs/proc/Makefile +++ b/fs/proc/Makefile @@ -35,3 +35,4 @@ proc-$(CONFIG_PRINTK) += kmsg.o proc-$(CONFIG_PROC_PAGE_MONITOR) += page.o proc-$(CONFIG_BOOT_CONFIG) += bootconfig.o proc-$(CONFIG_SECURITY_XPM) += xpm_region.o +obj-$(CONFIG_HIDE_MEM_ADDRESS) += memory_security/ diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index ffed75f833b7b362a1e5266f94d76c989b467b98..df435cd91a5bd50e492dc09962f8b2d3950db713 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "internal.h" static const struct dentry_operations proc_sys_dentry_operations; @@ -1380,6 +1381,38 @@ struct ctl_table_header *register_sysctl(const char *path, struct ctl_table *tab } EXPORT_SYMBOL(register_sysctl); +/** + * __register_sysctl_init() - register sysctl table to path + * @path: path name for sysctl base + * @table: This is the sysctl table that needs to be registered to the path + * @table_name: The name of sysctl table, only used for log printing when + * registration fails + * + * The sysctl interface is used by userspace to query or modify at runtime + * a predefined value set on a variable. These variables however have default + * values pre-set. Code which depends on these variables will always work even + * if register_sysctl() fails. If register_sysctl() fails you'd just loose the + * ability to query or modify the sysctls dynamically at run time. Chances of + * register_sysctl() failing on init are extremely low, and so for both reasons + * this function does not return any error as it is used by initialization code. + * + * Context: Can only be called after your respective sysctl base path has been + * registered. So for instance, most base directories are registered early on + * init before init levels are processed through proc_sys_init() and + * sysctl_init(). + */ +void __init __register_sysctl_init(const char *path, struct ctl_table *table, + const char *table_name) +{ + struct ctl_table_header *hdr = register_sysctl(path, table); + + if (unlikely(!hdr)) { + pr_err("failed when register_sysctl %s to %s\n", table_name, path); + return; + } + kmemleak_not_leak(hdr); +} + static char *append_path(const char *path, char *pos, const char *name) { int namelen; diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 5b486198a96889544323fe0ba626b29a27f133e8..28e63a7e33dbeb53336079e01131e38fc6b29ac2 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -28,6 +28,7 @@ #include #include #include "internal.h" +#include #define SEQ_PUT_DEC(str, val) \ seq_put_decimal_ull_width(m, str, (val) << (PAGE_SHIFT-10), 8) @@ -301,6 +302,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma) start = vma->vm_start; end = vma->vm_end; + CALL_HCK_LITE_HOOK(hideaddr_header_prefix_lhck, &start, &end, &flags, m, vma); show_vma_header_prefix(m, start, end, flags, pgoff, dev, ino); /* @@ -737,9 +739,7 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask, page = device_private_entry_to_page(swpent); } if (page) { - int mapcount = page_mapcount(page); - - if (mapcount >= 2) + if (page_mapcount(page) >= 2 || hugetlb_pmd_shared(pte)) mss->shared_hugetlb += huge_page_size(hstate_vma(vma)); else mss->private_hugetlb += huge_page_size(hstate_vma(vma)); diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index 913f5af9bf248988d16cfa53973da58a041a7b2c..0ebb6e6849082f5838d45a9904c053a21d87120b 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -1437,7 +1437,6 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) unsigned long safe_mask = 0; unsigned int commit_max_age = (unsigned int)-1; struct reiserfs_journal *journal = SB_JOURNAL(s); - char *new_opts; int err; char *qf_names[REISERFS_MAXQUOTAS]; unsigned int qfmt = 0; @@ -1445,10 +1444,6 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) int i; #endif - new_opts = kstrdup(arg, GFP_KERNEL); - if (arg && !new_opts) - return -ENOMEM; - sync_filesystem(s); reiserfs_write_lock(s); @@ -1599,7 +1594,6 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) out_err_unlock: reiserfs_write_unlock(s); out_err: - kfree(new_opts); return err; } diff --git a/fs/sharefs/file.c b/fs/sharefs/file.c index ffaa993767852b854e8dbe23fef7e87799ba55c5..c18380e9c841a6cfba2a08fe80443376417ba4c3 100644 --- a/fs/sharefs/file.c +++ b/fs/sharefs/file.c @@ -11,6 +11,21 @@ #include "sharefs.h" +static int sharefs_readdir(struct file *file, struct dir_context *ctx) +{ + int err; + struct file *lower_file = NULL; + struct dentry *dentry = file->f_path.dentry; + + lower_file = sharefs_lower_file(file); + err = iterate_dir(lower_file, ctx); + file->f_pos = lower_file->f_pos; + if (err >= 0) /* copy the atime */ + fsstack_copy_attr_atime(d_inode(dentry), + file_inode(lower_file)); + return err; +} + static int sharefs_open(struct inode *inode, struct file *file) { int err = 0; @@ -128,7 +143,7 @@ static int sharefs_fasync(int fd, struct file *file, int flag) */ static loff_t sharefs_file_llseek(struct file *file, loff_t offset, int whence) { - int err; + loff_t err; struct file *lower_file; err = generic_file_llseek(file, offset, whence); @@ -247,6 +262,8 @@ const struct file_operations sharefs_main_fops = { /* trimmed directory options */ const struct file_operations sharefs_dir_fops = { .llseek = sharefs_file_llseek, + .read = generic_read_dir, + .iterate = sharefs_readdir, .open = sharefs_open, .release = sharefs_file_release, .flush = sharefs_flush, diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h index b3fdc8212c5f52d940391ec0345d6f1ab805efcf..95f8e890176899ea5fdde45c3d161007d3acd47f 100644 --- a/fs/squashfs/squashfs_fs.h +++ b/fs/squashfs/squashfs_fs.h @@ -183,7 +183,7 @@ static inline int squashfs_block_size(__le32 raw) #define SQUASHFS_ID_BLOCK_BYTES(A) (SQUASHFS_ID_BLOCKS(A) *\ sizeof(u64)) /* xattr id lookup table defines */ -#define SQUASHFS_XATTR_BYTES(A) ((A) * sizeof(struct squashfs_xattr_id)) +#define SQUASHFS_XATTR_BYTES(A) (((u64) (A)) * sizeof(struct squashfs_xattr_id)) #define SQUASHFS_XATTR_BLOCK(A) (SQUASHFS_XATTR_BYTES(A) / \ SQUASHFS_METADATA_SIZE) diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h index 166e98806265bf4e695668ebbb7a9658d53c2e53..8f9445e290e72a78a684e4401724d833052f44c8 100644 --- a/fs/squashfs/squashfs_fs_sb.h +++ b/fs/squashfs/squashfs_fs_sb.h @@ -63,7 +63,7 @@ struct squashfs_sb_info { long long bytes_used; unsigned int inodes; unsigned int fragments; - int xattr_ids; + unsigned int xattr_ids; unsigned int ids; }; #endif diff --git a/fs/squashfs/xattr.h b/fs/squashfs/xattr.h index d8a270d3ac4cb267dbbbc4097c2b1e7034e66072..f1a463d8bfa020d60605d2d4530e746dd15d300d 100644 --- a/fs/squashfs/xattr.h +++ b/fs/squashfs/xattr.h @@ -10,12 +10,12 @@ #ifdef CONFIG_SQUASHFS_XATTR extern __le64 *squashfs_read_xattr_id_table(struct super_block *, u64, - u64 *, int *); + u64 *, unsigned int *); extern int squashfs_xattr_lookup(struct super_block *, unsigned int, int *, unsigned int *, unsigned long long *); #else static inline __le64 *squashfs_read_xattr_id_table(struct super_block *sb, - u64 start, u64 *xattr_table_start, int *xattr_ids) + u64 start, u64 *xattr_table_start, unsigned int *xattr_ids) { struct squashfs_xattr_id_table *id_table; diff --git a/fs/squashfs/xattr_id.c b/fs/squashfs/xattr_id.c index 087cab8c78f4e86ea7c97e3d3b4266896159f29d..c8469c656e0dcfd3cecffd56f58bd8a1e0ff009d 100644 --- a/fs/squashfs/xattr_id.c +++ b/fs/squashfs/xattr_id.c @@ -56,7 +56,7 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index, * Read uncompressed xattr id lookup table indexes from disk into memory */ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start, - u64 *xattr_table_start, int *xattr_ids) + u64 *xattr_table_start, unsigned int *xattr_ids) { struct squashfs_sb_info *msblk = sb->s_fs_info; unsigned int len, indexes; diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c index c0b84e960b20c138b1e3fe1cfe77c13f32a9428e..9cb05ef9b9dd99f6ac8bb04d37bf8d35a8737878 100644 --- a/fs/ubifs/budget.c +++ b/fs/ubifs/budget.c @@ -212,11 +212,10 @@ long long ubifs_calc_available(const struct ubifs_info *c, int min_idx_lebs) subtract_lebs += 1; /* - * The GC journal head LEB is not really accessible. And since - * different write types go to different heads, we may count only on - * one head's space. + * Since different write types go to different heads, we should + * reserve one leb for each head. */ - subtract_lebs += c->jhead_cnt - 1; + subtract_lebs += c->jhead_cnt; /* We also reserve one LEB for deletions, which bypass budgeting */ subtract_lebs += 1; @@ -403,7 +402,7 @@ static int calc_dd_growth(const struct ubifs_info *c, dd_growth = req->dirtied_page ? c->bi.page_budget : 0; if (req->dirtied_ino) - dd_growth += c->bi.inode_budget << (req->dirtied_ino - 1); + dd_growth += c->bi.inode_budget * req->dirtied_ino; if (req->mod_dent) dd_growth += c->bi.dent_budget; dd_growth += req->dirtied_ino_d; diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index f5777f59a1012510626685aa72225100c73b62c7..1686e7aea82318e572ef987d26ecc8de8d0dde24 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c @@ -1145,7 +1145,6 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry, int err, sz_change, len = strlen(symname); struct fscrypt_str disk_link; struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1, - .new_ino_d = ALIGN(len, 8), .dirtied_ino = 1 }; struct fscrypt_name nm; @@ -1161,6 +1160,7 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry, * Budget request settings: new inode, new direntry and changing parent * directory inode. */ + req.new_ino_d = ALIGN(disk_link.len - 1, 8); err = ubifs_budget_space(c, &req); if (err) return err; @@ -1318,6 +1318,8 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry, if (unlink) { ubifs_assert(c, inode_is_locked(new_inode)); + /* Budget for old inode's data when its nlink > 1. */ + req.dirtied_ino_d = ALIGN(ubifs_inode(new_inode)->data_len, 8); err = ubifs_purge_xattrs(new_inode); if (err) return err; @@ -1570,6 +1572,10 @@ static int ubifs_xrename(struct inode *old_dir, struct dentry *old_dentry, return err; } + err = ubifs_budget_space(c, &req); + if (err) + goto out; + lock_4_inodes(old_dir, new_dir, NULL, NULL); time = current_time(old_dir); @@ -1595,6 +1601,7 @@ static int ubifs_xrename(struct inode *old_dir, struct dentry *old_dentry, unlock_4_inodes(old_dir, new_dir, NULL, NULL); ubifs_release_budget(c, &req); +out: fscrypt_free_filename(&fst_nm); fscrypt_free_filename(&snd_nm); return err; diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index df46f2d3ff8bfe9b798c25723bf409b8a91a9a71..d89f6a2f184a1905d6207d22769e89012189f867 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -1031,7 +1031,7 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc) if (page->index >= synced_i_size >> PAGE_SHIFT) { err = inode->i_sb->s_op->write_inode(inode, NULL); if (err) - goto out_unlock; + goto out_redirty; /* * The inode has been written, but the write-buffer has * not been synchronized, so in case of an unclean @@ -1059,11 +1059,17 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc) if (i_size > synced_i_size) { err = inode->i_sb->s_op->write_inode(inode, NULL); if (err) - goto out_unlock; + goto out_redirty; } return do_writepage(page, len); - +out_redirty: + /* + * redirty_page_for_writepage() won't call ubifs_dirty_inode() because + * it passes I_DIRTY_PAGES flag while calling __mark_inode_dirty(), so + * there is no need to do space budget for dirty inode. + */ + redirty_page_for_writepage(wbc, page); out_unlock: unlock_page(page); return err; diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 2dd79fd123ff7b8a654c47608340bc1ea390cd08..0343051db99b1519384d88681b6efefe776559c4 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -833,7 +833,7 @@ static int alloc_wbufs(struct ubifs_info *c) INIT_LIST_HEAD(&c->jheads[i].buds_list); err = ubifs_wbuf_init(c, &c->jheads[i].wbuf); if (err) - return err; + goto out_wbuf; c->jheads[i].wbuf.sync_callback = &bud_wbuf_callback; c->jheads[i].wbuf.jhead = i; @@ -841,7 +841,7 @@ static int alloc_wbufs(struct ubifs_info *c) c->jheads[i].log_hash = ubifs_hash_get_desc(c); if (IS_ERR(c->jheads[i].log_hash)) { err = PTR_ERR(c->jheads[i].log_hash); - goto out; + goto out_log_hash; } } @@ -854,9 +854,18 @@ static int alloc_wbufs(struct ubifs_info *c) return 0; -out: - while (i--) +out_log_hash: + kfree(c->jheads[i].wbuf.buf); + kfree(c->jheads[i].wbuf.inodes); + +out_wbuf: + while (i--) { + kfree(c->jheads[i].wbuf.buf); + kfree(c->jheads[i].wbuf.inodes); kfree(c->jheads[i].log_hash); + } + kfree(c->jheads); + c->jheads = NULL; return err; } diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index 414266d4d1ba33256784b5865d8e362e2aa5157d..f2ea9912ac5cd921edbdf62c8b98a12d046c504d 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c @@ -267,11 +267,18 @@ static struct ubifs_znode *dirty_cow_znode(struct ubifs_info *c, if (zbr->len) { err = insert_old_idx(c, zbr->lnum, zbr->offs); if (unlikely(err)) - return ERR_PTR(err); + /* + * Obsolete znodes will be freed by tnc_destroy_cnext() + * or free_obsolete_znodes(), copied up znodes should + * be added back to tnc and freed by + * ubifs_destroy_tnc_subtree(). + */ + goto out; err = add_idx_dirt(c, zbr->lnum, zbr->len); } else err = 0; +out: zbr->znode = zn; zbr->lnum = 0; zbr->offs = 0; @@ -3053,6 +3060,21 @@ static void tnc_destroy_cnext(struct ubifs_info *c) cnext = cnext->cnext; if (ubifs_zn_obsolete(znode)) kfree(znode); + else if (!ubifs_zn_cow(znode)) { + /* + * Don't forget to update clean znode count after + * committing failed, because ubifs will check this + * count while closing tnc. Non-obsolete znode could + * be re-dirtied during committing process, so dirty + * flag is untrustable. The flag 'COW_ZNODE' is set + * for each dirty znode before committing, and it is + * cleared as long as the znode become clean, so we + * can statistic clean znode count according to this + * flag. + */ + atomic_long_inc(&c->clean_zn_cnt); + atomic_long_inc(&ubifs_clean_zn_cnt); + } } while (cnext && cnext != c->cnext); } diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 9a4a3191ed07ca0b1ca2f53da66dd37d4e260271..6caac1774067565049ad9a9b7f780b0197217abd 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -1594,8 +1594,13 @@ static inline int ubifs_check_hmac(const struct ubifs_info *c, return crypto_memneq(expected, got, c->hmac_desc_len); } +#ifdef CONFIG_UBIFS_FS_AUTHENTICATION void ubifs_bad_hash(const struct ubifs_info *c, const void *node, const u8 *hash, int lnum, int offs); +#else +static inline void ubifs_bad_hash(const struct ubifs_info *c, const void *node, + const u8 *hash, int lnum, int offs) {}; +#endif int __ubifs_node_check_hash(const struct ubifs_info *c, const void *buf, const u8 *expected); diff --git a/fs/udf/file.c b/fs/udf/file.c index ad8eefad27d7f031d937aef7fdd5bb09083c2712..e283a62701b838065cca5a4d3318f8038dee3885 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c @@ -147,26 +147,24 @@ static ssize_t udf_file_write_iter(struct kiocb *iocb, struct iov_iter *from) goto out; down_write(&iinfo->i_data_sem); - if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { - loff_t end = iocb->ki_pos + iov_iter_count(from); - - if (inode->i_sb->s_blocksize < - (udf_file_entry_alloc_offset(inode) + end)) { - err = udf_expand_file_adinicb(inode); - if (err) { - inode_unlock(inode); - udf_debug("udf_expand_adinicb: err=%d\n", err); - return err; - } - } else { - iinfo->i_lenAlloc = max(end, inode->i_size); - up_write(&iinfo->i_data_sem); + if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB && + inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) + + iocb->ki_pos + iov_iter_count(from))) { + err = udf_expand_file_adinicb(inode); + if (err) { + inode_unlock(inode); + udf_debug("udf_expand_adinicb: err=%d\n", err); + return err; } } else up_write(&iinfo->i_data_sem); retval = __generic_file_write_iter(iocb, from); out: + down_write(&iinfo->i_data_sem); + if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB && retval > 0) + iinfo->i_lenAlloc = inode->i_size; + up_write(&iinfo->i_data_sem); inode_unlock(inode); if (retval > 0) { diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 2132bfab67f35d55b9cb26158231443b601c6442..d114774ecdea8d301a12db78a66d54b08253c687 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -442,7 +442,7 @@ static int udf_get_block(struct inode *inode, sector_t block, * Block beyond EOF and prealloc extents? Just discard preallocation * as it is not useful and complicates things. */ - if (((loff_t)block) << inode->i_blkbits > iinfo->i_lenExtents) + if (((loff_t)block) << inode->i_blkbits >= iinfo->i_lenExtents) udf_discard_prealloc(inode); udf_clear_extent_cache(inode); phys = inode_getblk(inode, block, &err, &new); @@ -525,8 +525,10 @@ static int udf_do_extend_file(struct inode *inode, } if (fake) { - udf_add_aext(inode, last_pos, &last_ext->extLocation, - last_ext->extLength, 1); + err = udf_add_aext(inode, last_pos, &last_ext->extLocation, + last_ext->extLength, 1); + if (err < 0) + goto out_err; count++; } else { struct kernel_lb_addr tmploc; @@ -560,7 +562,7 @@ static int udf_do_extend_file(struct inode *inode, err = udf_add_aext(inode, last_pos, &last_ext->extLocation, last_ext->extLength, 1); if (err) - return err; + goto out_err; count++; } if (new_block_bytes) { @@ -569,7 +571,7 @@ static int udf_do_extend_file(struct inode *inode, err = udf_add_aext(inode, last_pos, &last_ext->extLocation, last_ext->extLength, 1); if (err) - return err; + goto out_err; count++; } @@ -583,6 +585,11 @@ static int udf_do_extend_file(struct inode *inode, return -EIO; return count; +out_err: + /* Remove extents we've created so far */ + udf_clear_extent_cache(inode); + udf_truncate_extents(inode); + return err; } /* Extend the final block of the file to final_block_len bytes */ @@ -797,19 +804,17 @@ static sector_t inode_getblk(struct inode *inode, sector_t block, c = 0; offset = 0; count += ret; - /* We are not covered by a preallocated extent? */ - if ((laarr[0].extLength & UDF_EXTENT_FLAG_MASK) != - EXT_NOT_RECORDED_ALLOCATED) { - /* Is there any real extent? - otherwise we overwrite - * the fake one... */ - if (count) - c = !c; - laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | - inode->i_sb->s_blocksize; - memset(&laarr[c].extLocation, 0x00, - sizeof(struct kernel_lb_addr)); - count++; - } + /* + * Is there any real extent? - otherwise we overwrite the fake + * one... + */ + if (count) + c = !c; + laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | + inode->i_sb->s_blocksize; + memset(&laarr[c].extLocation, 0x00, + sizeof(struct kernel_lb_addr)); + count++; endnum = c + 1; lastblock = 1; } else { @@ -1086,23 +1091,8 @@ static void udf_merge_extents(struct inode *inode, struct kernel_long_ad *laarr, blocksize - 1) >> blocksize_bits)))) { if (((li->extLength & UDF_EXTENT_LENGTH_MASK) + - (lip1->extLength & UDF_EXTENT_LENGTH_MASK) + - blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) { - lip1->extLength = (lip1->extLength - - (li->extLength & - UDF_EXTENT_LENGTH_MASK) + - UDF_EXTENT_LENGTH_MASK) & - ~(blocksize - 1); - li->extLength = (li->extLength & - UDF_EXTENT_FLAG_MASK) + - (UDF_EXTENT_LENGTH_MASK + 1) - - blocksize; - lip1->extLocation.logicalBlockNum = - li->extLocation.logicalBlockNum + - ((li->extLength & - UDF_EXTENT_LENGTH_MASK) >> - blocksize_bits); - } else { + (lip1->extLength & UDF_EXTENT_LENGTH_MASK) + + blocksize - 1) <= UDF_EXTENT_LENGTH_MASK) { li->extLength = lip1->extLength + (((li->extLength & UDF_EXTENT_LENGTH_MASK) + @@ -1393,6 +1383,7 @@ static int udf_read_inode(struct inode *inode, bool hidden_inode) ret = -EIO; goto out; } + iinfo->i_hidden = hidden_inode; iinfo->i_unique = 0; iinfo->i_lenEAttr = 0; iinfo->i_lenExtents = 0; @@ -1728,8 +1719,12 @@ static int udf_update_inode(struct inode *inode, int do_sync) if (S_ISDIR(inode->i_mode) && inode->i_nlink > 0) fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1); - else - fe->fileLinkCount = cpu_to_le16(inode->i_nlink); + else { + if (iinfo->i_hidden) + fe->fileLinkCount = cpu_to_le16(0); + else + fe->fileLinkCount = cpu_to_le16(inode->i_nlink); + } fe->informationLength = cpu_to_le64(inode->i_size); @@ -1900,8 +1895,13 @@ struct inode *__udf_iget(struct super_block *sb, struct kernel_lb_addr *ino, if (!inode) return ERR_PTR(-ENOMEM); - if (!(inode->i_state & I_NEW)) + if (!(inode->i_state & I_NEW)) { + if (UDF_I(inode)->i_hidden != hidden_inode) { + iput(inode); + return ERR_PTR(-EFSCORRUPTED); + } return inode; + } memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr)); err = udf_read_inode(inode, hidden_inode); diff --git a/fs/udf/super.c b/fs/udf/super.c index 3448098e547681001c6a24ea76031b3ecc464583..4af9ce34ee8047ddbff8285b0cfbfc581756d4c6 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -147,6 +147,7 @@ static struct inode *udf_alloc_inode(struct super_block *sb) ei->i_next_alloc_goal = 0; ei->i_strat4096 = 0; ei->i_streamdir = 0; + ei->i_hidden = 0; init_rwsem(&ei->i_data_sem); ei->cached_extent.lstart = -1; spin_lock_init(&ei->i_extent_cache_lock); diff --git a/fs/udf/udf_i.h b/fs/udf/udf_i.h index 06ff7006b822738ef73845b28fe68ae34f9890bf..312b7c9ef10e230df91bd03192b848181b6d8c5e 100644 --- a/fs/udf/udf_i.h +++ b/fs/udf/udf_i.h @@ -44,7 +44,8 @@ struct udf_inode_info { unsigned i_use : 1; /* unallocSpaceEntry */ unsigned i_strat4096 : 1; unsigned i_streamdir : 1; - unsigned reserved : 25; + unsigned i_hidden : 1; /* hidden system inode */ + unsigned reserved : 24; __u8 *i_data; struct kernel_lb_addr i_locStreamdir; __u64 i_lenStreams; diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h index 4fa620543d302db892b59a399b92619632e5aa56..2205859731dc230ac62947769d351340bfaafbb6 100644 --- a/fs/udf/udf_sb.h +++ b/fs/udf/udf_sb.h @@ -51,6 +51,8 @@ #define MF_DUPLICATE_MD 0x01 #define MF_MIRROR_FE_LOADED 0x02 +#define EFSCORRUPTED EUCLEAN + struct udf_meta_data { __u32 s_meta_file_loc; __u32 s_mirror_file_loc; diff --git a/fs/verity/enable.c b/fs/verity/enable.c index 734862e608fd3d0a96c7f03ca564c5cfd763ac45..a74b107dc87ef9c1bc7650ce5afa8b1cea0716a3 100644 --- a/fs/verity/enable.c +++ b/fs/verity/enable.c @@ -14,6 +14,33 @@ #include #include +static int check_file_and_enable_verity(struct file *filp, + const struct fsverity_enable_arg *arg); + +#ifdef CONFIG_SECURITY_CODE_SIGN + +static int code_sign_init_descriptor(struct inode *inode, + const struct fsverity_enable_arg *_arg, struct fsverity_descriptor *_desc); + +static int code_sign_copy_merkle_tree(struct file *filp, const void *_desc, + const struct merkle_tree_params *params); + +#else /* !CONFIG_SECURITY_CODE_SIGN */ + +static inline int code_sign_init_descriptor(struct inode *inode, + const struct fsverity_enable_arg *_arg, struct fsverity_descriptor *_desc) +{ + return 0; +} + +static int code_sign_copy_merkle_tree(struct file *filp, + const void *_desc, + const struct merkle_tree_params *params) +{ + return 0; +} +#endif /* !CONFIG_SECURITY_CODE_SIGN */ + /* * Read a file data page for Merkle tree construction. Do aggressive readahead, * since we're sequentially reading the entire file. @@ -150,7 +177,8 @@ static int build_merkle_tree_level(struct file *filp, unsigned int level, */ static int build_merkle_tree(struct file *filp, const struct merkle_tree_params *params, - u8 *root_hash) + u8 *root_hash, + size_t data_size) { struct inode *inode = file_inode(filp); u8 *pending_hashes; @@ -159,7 +187,7 @@ static int build_merkle_tree(struct file *filp, unsigned int level; int err = -ENOMEM; - if (inode->i_size == 0) { + if (data_size == 0) { /* Empty file is a special case; root hash is all 0's */ memset(root_hash, 0, params->digest_size); return 0; @@ -177,7 +205,7 @@ static int build_merkle_tree(struct file *filp, * (level 0) and ascending to the root node (level 'num_levels - 1'). * Then at the end (level 'num_levels'), calculate the root hash. */ - blocks = ((u64)inode->i_size + params->block_size - 1) >> + blocks = ((u64)data_size + params->block_size - 1) >> params->log_blocksize; for (level = 0; level <= params->num_levels; level++) { err = build_merkle_tree_level(filp, level, blocks, params, @@ -199,11 +227,8 @@ static int enable_verity(struct file *filp, const struct fsverity_enable_arg *arg) { struct inode *inode = file_inode(filp); - const struct fsverity_operations *vops = inode->i_sb->s_vop; - struct merkle_tree_params params = { }; struct fsverity_descriptor *desc; size_t desc_size = sizeof(*desc) + arg->sig_size; - struct fsverity_info *vi; int err; /* Start initializing the fsverity_descriptor */ @@ -234,11 +259,34 @@ static int enable_verity(struct file *filp, desc->data_size = cpu_to_le64(inode->i_size); + err = code_sign_init_descriptor(inode, arg, desc); + if (err) { + fsverity_err(inode, "Init code sign descriptor err: %u", err); + goto out; + } + + err = fsverity_enable_with_descriptor(filp, (void *)desc, desc_size); +out: + kfree(desc); + return err; +} + +int fsverity_enable_with_descriptor(struct file *filp, + void *_desc, size_t desc_size) +{ + struct inode *inode = file_inode(filp); + const struct fsverity_operations *vops = inode->i_sb->s_vop; + struct merkle_tree_params params = { }; + struct fsverity_descriptor *desc = (struct fsverity_descriptor *)_desc; + struct fsverity_info *vi; + int err; + /* Prepare the Merkle tree parameters */ err = fsverity_init_merkle_tree_params(¶ms, inode, - arg->hash_algorithm, + desc->hash_algorithm, desc->log_blocksize, - desc->salt, desc->salt_size); + desc->salt, desc->salt_size, + desc->data_size); if (err) goto out; @@ -255,6 +303,13 @@ static int enable_verity(struct file *filp, if (err) goto out; + err = code_sign_copy_merkle_tree(filp, _desc, ¶ms); + if (err < 0) { + fsverity_err(inode, "Error %d copying Merkle tree", err); + goto rollback; + } else if (err == 1) /* already copy merkle tree */ + goto skip_build; + /* * Build the Merkle tree. Don't hold the inode lock during this, since * on huge files this may take a very long time and we don't want to @@ -266,11 +321,13 @@ static int enable_verity(struct file *filp, */ pr_debug("Building Merkle tree...\n"); BUILD_BUG_ON(sizeof(desc->root_hash) < FS_VERITY_MAX_DIGEST_SIZE); - err = build_merkle_tree(filp, ¶ms, desc->root_hash); + err = build_merkle_tree(filp, ¶ms, desc->root_hash, desc->data_size); if (err) { fsverity_err(inode, "Error %d building Merkle tree", err); goto rollback; } + +skip_build: pr_debug("Done building Merkle tree. Root hash is %s:%*phN\n", params.hash_alg->name, params.digest_size, desc->root_hash); @@ -287,9 +344,9 @@ static int enable_verity(struct file *filp, goto rollback; } - if (arg->sig_size) + if (desc->sig_size) pr_debug("Storing a %u-byte PKCS#7 signature alongside the file\n", - arg->sig_size); + desc->sig_size); /* * Tell the filesystem to finish enabling verity on the file. @@ -317,7 +374,6 @@ static int enable_verity(struct file *filp, } out: kfree(params.hashstate); - kfree(desc); return err; rollback: @@ -326,6 +382,7 @@ static int enable_verity(struct file *filp, inode_unlock(inode); goto out; } +EXPORT_SYMBOL_GPL(fsverity_enable_with_descriptor); /** * fsverity_ioctl_enable() - enable verity on a file @@ -341,7 +398,6 @@ int fsverity_ioctl_enable(struct file *filp, const void __user *uarg) { struct inode *inode = file_inode(filp); struct fsverity_enable_arg arg; - int err; if (copy_from_user(&arg, uarg, sizeof(arg))) return -EFAULT; @@ -362,6 +418,15 @@ int fsverity_ioctl_enable(struct file *filp, const void __user *uarg) if (arg.sig_size > FS_VERITY_MAX_SIGNATURE_SIZE) return -EMSGSIZE; + return check_file_and_enable_verity(filp, &arg); +} +EXPORT_SYMBOL_GPL(fsverity_ioctl_enable); + +static int check_file_and_enable_verity(struct file *filp, + const struct fsverity_enable_arg *arg) +{ + struct inode *inode = file_inode(filp); + int err; /* * Require a regular file with write access. But the actual fd must * still be readonly so that we can lock out all writers. This is @@ -390,7 +455,7 @@ int fsverity_ioctl_enable(struct file *filp, const void __user *uarg) if (err) /* -ETXTBSY */ goto out_drop_write; - err = enable_verity(filp, &arg); + err = enable_verity(filp, arg); if (err) goto out_allow_write_access; @@ -415,4 +480,152 @@ int fsverity_ioctl_enable(struct file *filp, const void __user *uarg) mnt_drop_write_file(filp); return err; } -EXPORT_SYMBOL_GPL(fsverity_ioctl_enable); + +#ifdef CONFIG_SECURITY_CODE_SIGN +static int code_sign_copy_merkle_tree(struct file *filp, + const void *_desc, + const struct merkle_tree_params *params) +{ + struct inode *inode = file_inode(filp); + const struct fsverity_operations *vops = inode->i_sb->s_vop; + u8 *tree_data; + u64 blocks, i; + int err = -ENOMEM; + struct file_ra_state ra = { 0 }; + struct page *src_page; + void *addr; + u64 tree_offset, tree_start_index; + + if (!is_inside_tree_compact(_desc)) + return 0; + + tree_offset = get_tree_offset_compact(_desc); + + if (inode->i_size < tree_offset + params->tree_size) { + fsverity_err(inode, "File is too small to contain Merkle tree."); + return -EFAULT; + } + + tree_data = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!tree_data) + goto out; + + file_ra_state_init(&ra, filp->f_mapping); + + tree_start_index = tree_offset >> PAGE_SHIFT; + blocks = params->tree_size >> PAGE_SHIFT; + for (i = 0; i < blocks; i++) { + pr_debug("Copy Merkle tree page at %d\n", tree_start_index + i); + src_page = read_file_data_page(filp, tree_start_index + i, &ra, + blocks - i); + if (IS_ERR(src_page)) { + err = PTR_ERR(src_page); + fsverity_err(inode, + "Error %d reading Merkle tree page %llu", + err, tree_start_index + i); + goto out; + } + + addr = kmap_atomic(src_page); + memcpy(tree_data, addr, PAGE_SIZE); + kunmap_atomic(addr); + put_page(src_page); + err = vops->write_merkle_tree_block(inode, tree_data, i, + params->log_blocksize); + if (err) { + fsverity_err(inode, + "Error %d writing Merkle tree block %llu", + err, i); + goto out; + } + } + /* already copy merkle tree */ + err = 1; +out: + kfree(tree_data); + return err; +} + +static int code_sign_init_descriptor(struct inode *inode, + const struct fsverity_enable_arg *_arg, + struct fsverity_descriptor *_desc) +{ + struct code_sign_descriptor *desc = CAST_CODE_SIGN_DESC(_desc); + const struct code_sign_enable_arg *arg = (const struct code_sign_enable_arg *)_arg; + int algo_index; + + if (!arg->cs_version) + return 0; + + /* init extended fields */ + desc->flags = cpu_to_le32(arg->flags); + desc->data_size = cpu_to_le64(arg->data_size); + desc->tree_offset = cpu_to_le64(arg->tree_offset); + desc->cs_version = arg->cs_version; + + /* Get root hash if a Merkle tree carried in file */ + if (!IS_INSIDE_TREE(desc)) + return 0; + + /* Get size of root hash */ + algo_index = desc->hash_algorithm; + if (algo_index >= g_fsverity_hash_algs_num || + !fsverity_hash_algs[algo_index].name) { + fsverity_err(inode, "Unknown hash algorithm: %u", algo_index); + return -EINVAL; + } + + if (copy_from_user(desc->root_hash, u64_to_user_ptr(arg->root_hash_ptr), + fsverity_hash_algs[algo_index].digest_size)) { + return -EFAULT; + } + + return 0; +} + +/** + * fsverity_ioctl_enable_code_sign() - enable code signing on a file + * @filp: file to enable code signing on + * @uarg: user pointer to code_sign_enable_arg + * + * Enable fs-verity on a file with code signing features. + * + * Return: 0 on success, -errno on failure + */ +int fsverity_ioctl_enable_code_sign(struct file *filp, const void __user *uarg) +{ + struct inode *inode = file_inode(filp); + struct code_sign_enable_arg arg; + + if (copy_from_user(&arg, uarg, sizeof(arg))) + return -EFAULT; + + if (arg.version != 1) + return -EINVAL; + + if (arg.cs_version != 1) + return -EINVAL; + + if (arg.__reserved1 || + memchr_inv(arg.__reserved2, 0, sizeof(arg.__reserved2))) + return -EINVAL; + + if (arg.data_size > inode->i_size) + return -EINVAL; + + if (arg.tree_offset % PAGE_SIZE != 0) + return -EINVAL; + + if (arg.block_size != PAGE_SIZE) + return -EINVAL; + + if (arg.salt_size > sizeof_field(struct code_sign_descriptor, salt)) + return -EMSGSIZE; + + if (arg.sig_size > FS_VERITY_MAX_SIGNATURE_SIZE) + return -EMSGSIZE; + + return check_file_and_enable_verity(filp, (struct fsverity_enable_arg *)&arg); +} +EXPORT_SYMBOL_GPL(fsverity_ioctl_enable_code_sign); +#endif /* CONFIG_SECURITY_CODE_SIGN */ diff --git a/fs/verity/fsverity_private.h b/fs/verity/fsverity_private.h index e96d99d5145e1da25fe07f9e26650d57df28b2b6..6c329093327798b8f26af2ad3d72f7a566d7ad3f 100644 --- a/fs/verity/fsverity_private.h +++ b/fs/verity/fsverity_private.h @@ -17,6 +17,7 @@ #include #include #include +#include struct ahash_request; @@ -75,6 +76,9 @@ struct fsverity_info { u8 root_hash[FS_VERITY_MAX_DIGEST_SIZE]; u8 measurement[FS_VERITY_MAX_DIGEST_SIZE]; const struct inode *inode; +#ifdef CONFIG_SECURITY_CODE_SIGN + u64 verified_data_size; +#endif }; /* @@ -117,6 +121,8 @@ struct fsverity_signed_digest { extern struct fsverity_hash_alg fsverity_hash_algs[]; +extern int g_fsverity_hash_algs_num; + struct fsverity_hash_alg *fsverity_get_hash_alg(const struct inode *inode, unsigned int num); struct ahash_request *fsverity_alloc_hash_request(struct fsverity_hash_alg *alg, @@ -149,7 +155,8 @@ int fsverity_init_merkle_tree_params(struct merkle_tree_params *params, const struct inode *inode, unsigned int hash_algorithm, unsigned int log_blocksize, - const u8 *salt, size_t salt_size); + const u8 *salt, size_t salt_size, + u64 data_size); struct fsverity_info *fsverity_create_info(const struct inode *inode, void *desc, size_t desc_size); diff --git a/fs/verity/hash_algs.c b/fs/verity/hash_algs.c index c37e186ebeb6c455db68c6580a889d4378161fd7..488d392b3cc4e5afdb716d9a597a6d47fe37eba0 100644 --- a/fs/verity/hash_algs.c +++ b/fs/verity/hash_algs.c @@ -24,6 +24,8 @@ struct fsverity_hash_alg fsverity_hash_algs[] = { }, }; +int g_fsverity_hash_algs_num = ARRAY_SIZE(fsverity_hash_algs); + static DEFINE_MUTEX(fsverity_hash_alg_init_mutex); /** diff --git a/fs/verity/open.c b/fs/verity/open.c index 67d71f7b1b48350562f79f6a239b7cb95cdc7906..feae1845c9ddcf7627a5814d720817643b0de9c3 100644 --- a/fs/verity/open.c +++ b/fs/verity/open.c @@ -19,6 +19,7 @@ static struct kmem_cache *fsverity_info_cachep; * @log_blocksize: log base 2 of block size to use * @salt: pointer to salt (optional) * @salt_size: size of salt, possibly 0 + * @data_size: verified data size * * Validate the hash algorithm and block size, then compute the tree topology * (num levels, num blocks in each level, etc.) and initialize @params. @@ -29,7 +30,8 @@ int fsverity_init_merkle_tree_params(struct merkle_tree_params *params, const struct inode *inode, unsigned int hash_algorithm, unsigned int log_blocksize, - const u8 *salt, size_t salt_size) + const u8 *salt, size_t salt_size, + u64 data_size) { struct fsverity_hash_alg *hash_alg; int err; @@ -89,8 +91,8 @@ int fsverity_init_merkle_tree_params(struct merkle_tree_params *params, */ /* Compute number of levels and the number of blocks in each level */ - blocks = ((u64)inode->i_size + params->block_size - 1) >> log_blocksize; - pr_debug("Data is %lld bytes (%llu blocks)\n", inode->i_size, blocks); + blocks = ((u64)data_size + params->block_size - 1) >> params->log_blocksize; + pr_debug("Data is %lld bytes (%llu blocks)\n", data_size, blocks); while (blocks > 1) { if (params->num_levels >= FS_VERITY_MAX_LEVELS) { fsverity_err(inode, "Too many levels in Merkle tree"); @@ -132,11 +134,13 @@ static int compute_file_measurement(struct fsverity_hash_alg *hash_alg, u8 *measurement) { __le32 sig_size = desc->sig_size; - int err; + int err, cs_version; + cs_version = code_sign_before_measurement_hook(desc); desc->sig_size = 0; err = fsverity_hash_buffer(hash_alg, desc, sizeof(*desc), measurement); desc->sig_size = sig_size; + code_sign_after_measurement_hook(desc, cs_version); return err; } @@ -158,6 +162,13 @@ struct fsverity_info *fsverity_create_info(const struct inode *inode, return ERR_PTR(-EINVAL); } + err = code_sign_check_descriptor_hook(inode, _desc); + if (err < 0) { + fsverity_err(inode, "Invalid code sign descriptor."); + return ERR_PTR(err); + } else if (err == 1) + goto skip_part_check; + if (desc->version != 1) { fsverity_err(inode, "Unrecognized descriptor version: %u", desc->version); @@ -181,15 +192,21 @@ struct fsverity_info *fsverity_create_info(const struct inode *inode, return ERR_PTR(-EINVAL); } +skip_part_check: vi = kmem_cache_zalloc(fsverity_info_cachep, GFP_KERNEL); if (!vi) return ERR_PTR(-ENOMEM); vi->inode = inode; +#ifdef CONFIG_SECURITY_CODE_SIGN + vi->verified_data_size = le64_to_cpu(desc->data_size); +#endif + err = fsverity_init_merkle_tree_params(&vi->tree_params, inode, desc->hash_algorithm, desc->log_blocksize, - desc->salt, desc->salt_size); + desc->salt, desc->salt_size, + le64_to_cpu(desc->data_size)); if (err) { fsverity_err(inode, "Error %d initializing Merkle tree parameters", diff --git a/fs/verity/signature.c b/fs/verity/signature.c index b14ed96387ece05635780ef34e267729fe27ccb5..15e5879817e7714b5ed392580cdf63a00b423ba4 100644 --- a/fs/verity/signature.c +++ b/fs/verity/signature.c @@ -11,6 +11,7 @@ #include #include #include +#include /* * /proc/sys/fs/verity/require_signatures @@ -26,6 +27,14 @@ static int fsverity_require_signatures; */ static struct key *fsverity_keyring; +static inline int fsverity_verify_certchain(const void *raw_pkcs7, size_t pkcs7_len) +{ + int ret = 0; + + CALL_HCK_LITE_HOOK(code_sign_verify_certchain_lhck, raw_pkcs7, pkcs7_len, &ret); + return ret; +} + /** * fsverity_verify_signature() - check a verity file's signature * @vi: the file's fsverity_info @@ -69,6 +78,13 @@ int fsverity_verify_signature(const struct fsverity_info *vi, d->digest_size = cpu_to_le16(hash_alg->digest_size); memcpy(d->digest, vi->measurement, hash_alg->digest_size); + err = fsverity_verify_certchain(desc->signature, sig_size); + if (err) { + fsverity_err(inode, "verify cert chain failed, err = %d", err); + return err; + } + pr_debug("verify cert chain success\n"); + err = verify_pkcs7_signature(d, sizeof(*d) + hash_alg->digest_size, desc->signature, sig_size, fsverity_keyring, diff --git a/fs/verity/verify.c b/fs/verity/verify.c index a8b68c6f663d123c18ae1238dd3b1186ae846968..e77c8b374139a7d2f6f5f565096eab24f800177b 100644 --- a/fs/verity/verify.c +++ b/fs/verity/verify.c @@ -103,6 +103,13 @@ static bool verify_page(struct inode *inode, const struct fsverity_info *vi, pr_debug_ratelimited("Verifying data page %lu...\n", index); +#ifdef CONFIG_SECURITY_CODE_SIGN + if (index > (vi->verified_data_size >> PAGE_SHIFT)) { + pr_debug_ratelimited("Data out of verity range %lu\n", + vi->verified_data_size >> PAGE_SHIFT); + return true; + } +#endif /* * Starting at the leaf level, ascend the tree saving hash pages along * the way until we find a verified hash page, indicated by PageChecked; @@ -264,6 +271,23 @@ void fsverity_verify_bio(struct bio *bio) EXPORT_SYMBOL_GPL(fsverity_verify_bio); #endif /* CONFIG_BLOCK */ + +/** + * fsverity_get_verified_data_size() - get verified data size of a verity file + * @inode: the file's inode + * + * Return: verified data size + */ +u64 fsverity_get_verified_data_size(const struct inode *inode) +{ +#ifdef CONFIG_SECURITY_CODE_SIGN + return fsverity_get_info(inode)->verified_data_size; +#else + return inode->i_size; +#endif +} + + /** * fsverity_enqueue_verify_work() - enqueue work on the fs-verity workqueue * @work: the work to enqueue diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h index 360e6377e84ba84fee8076ded401ca69a7791c99..31ba85a4110a8f5b5c8a358f5a00979b61b4a8ac 100644 --- a/include/drm/drm_mipi_dsi.h +++ b/include/drm/drm_mipi_dsi.h @@ -283,6 +283,10 @@ int mipi_dsi_dcs_set_display_brightness(struct mipi_dsi_device *dsi, u16 brightness); int mipi_dsi_dcs_get_display_brightness(struct mipi_dsi_device *dsi, u16 *brightness); +int mipi_dsi_dcs_set_display_brightness_large(struct mipi_dsi_device *dsi, + u16 brightness); +int mipi_dsi_dcs_get_display_brightness_large(struct mipi_dsi_device *dsi, + u16 *brightness); /** * struct mipi_dsi_driver - DSI driver diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h index 2696eb0fc14976a3420f89a9e63d4a585f017807..df9cbf02d03031104fc566d6ee8ead35df67fcc9 100644 --- a/include/linux/bootconfig.h +++ b/include/linux/bootconfig.h @@ -29,7 +29,7 @@ struct xbc_node { /* Maximum size of boot config is 32KB - 1 */ #define XBC_DATA_MAX (XBC_VALUE - 1) -#define XBC_NODE_MAX 1024 +#define XBC_NODE_MAX 8192 #define XBC_KEYLEN_MAX 256 #define XBC_DEPTH_MAX 16 diff --git a/include/linux/clk.h b/include/linux/clk.h index 7fd6a1febcf4f400dd0aaa5b7f128c3a36035793..1814eabb7c204beeafb0570949fad26719296fd2 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -418,6 +418,47 @@ int __must_check devm_clk_bulk_get_all(struct device *dev, */ struct clk *devm_clk_get(struct device *dev, const char *id); +/** + * devm_clk_get_prepared - devm_clk_get() + clk_prepare() + * @dev: device for clock "consumer" + * @id: clock consumer ID + * + * Context: May sleep. + * + * Return: a struct clk corresponding to the clock producer, or + * valid IS_ERR() condition containing errno. The implementation + * uses @dev and @id to determine the clock consumer, and thereby + * the clock producer. (IOW, @id may be identical strings, but + * clk_get may return different clock producers depending on @dev.) + * + * The returned clk (if valid) is prepared. Drivers must however assume + * that the clock is not enabled. + * + * The clock will automatically be unprepared and freed when the device + * is unbound from the bus. + */ +struct clk *devm_clk_get_prepared(struct device *dev, const char *id); + +/** + * devm_clk_get_enabled - devm_clk_get() + clk_prepare_enable() + * @dev: device for clock "consumer" + * @id: clock consumer ID + * + * Context: May sleep. + * + * Return: a struct clk corresponding to the clock producer, or + * valid IS_ERR() condition containing errno. The implementation + * uses @dev and @id to determine the clock consumer, and thereby + * the clock producer. (IOW, @id may be identical strings, but + * clk_get may return different clock producers depending on @dev.) + * + * The returned clk (if valid) is prepared and enabled. + * + * The clock will automatically be disabled, unprepared and freed + * when the device is unbound from the bus. + */ +struct clk *devm_clk_get_enabled(struct device *dev, const char *id); + /** * devm_clk_get_optional - lookup and obtain a managed reference to an optional * clock producer. @@ -429,6 +470,50 @@ struct clk *devm_clk_get(struct device *dev, const char *id); */ struct clk *devm_clk_get_optional(struct device *dev, const char *id); +/** + * devm_clk_get_optional_prepared - devm_clk_get_optional() + clk_prepare() + * @dev: device for clock "consumer" + * @id: clock consumer ID + * + * Context: May sleep. + * + * Return: a struct clk corresponding to the clock producer, or + * valid IS_ERR() condition containing errno. The implementation + * uses @dev and @id to determine the clock consumer, and thereby + * the clock producer. If no such clk is found, it returns NULL + * which serves as a dummy clk. That's the only difference compared + * to devm_clk_get_prepared(). + * + * The returned clk (if valid) is prepared. Drivers must however + * assume that the clock is not enabled. + * + * The clock will automatically be unprepared and freed when the + * device is unbound from the bus. + */ +struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id); + +/** + * devm_clk_get_optional_enabled - devm_clk_get_optional() + + * clk_prepare_enable() + * @dev: device for clock "consumer" + * @id: clock consumer ID + * + * Context: May sleep. + * + * Return: a struct clk corresponding to the clock producer, or + * valid IS_ERR() condition containing errno. The implementation + * uses @dev and @id to determine the clock consumer, and thereby + * the clock producer. If no such clk is found, it returns NULL + * which serves as a dummy clk. That's the only difference compared + * to devm_clk_get_enabled(). + * + * The returned clk (if valid) is prepared and enabled. + * + * The clock will automatically be disabled, unprepared and freed + * when the device is unbound from the bus. + */ +struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id); + /** * devm_get_clk_from_child - lookup and obtain a managed reference to a * clock producer from child node. @@ -773,12 +858,36 @@ static inline struct clk *devm_clk_get(struct device *dev, const char *id) return NULL; } +static inline struct clk *devm_clk_get_prepared(struct device *dev, + const char *id) +{ + return NULL; +} + +static inline struct clk *devm_clk_get_enabled(struct device *dev, + const char *id) +{ + return NULL; +} + static inline struct clk *devm_clk_get_optional(struct device *dev, const char *id) { return NULL; } +static inline struct clk *devm_clk_get_optional_prepared(struct device *dev, + const char *id) +{ + return NULL; +} + +static inline struct clk *devm_clk_get_optional_enabled(struct device *dev, + const char *id) +{ + return NULL; +} + static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, struct clk_bulk_data *clks) { diff --git a/include/linux/code_sign.h b/include/linux/code_sign.h new file mode 100644 index 0000000000000000000000000000000000000000..c2264a1d6c37494cf03478097d2e600d5f7a11da --- /dev/null +++ b/include/linux/code_sign.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + */ + +#ifndef LINUX_INCLUDE_CODE_SIGN_H +#define LINUX_INCLUDE_CODE_SIGN_H + +#include + +/* + * Merkle tree properties. The file measurement is the hash of this structure + * excluding the signature and with the sig_size field set to 0, while version + * is replaced by code sign version. + */ +struct code_sign_descriptor { + __u8 version; /* must be 1 */ + __u8 hash_algorithm; /* Merkle tree hash algorithm */ + __u8 log_blocksize; /* log2 of size of data and tree blocks */ + __u8 salt_size; /* size of salt in bytes; 0 if none */ + __le32 sig_size; /* size of signature in bytes; 0 if none */ + __le64 data_size; /* size of file the Merkle tree is built over */ + __u8 root_hash[64]; /* Merkle tree root hash */ + __u8 salt[32]; /* salt prepended to each hashed block */ + __u32 flags; + __u32 __reserved1; /* must be 0 */ + __u64 tree_offset; /* merkle tree offset in file */ + __u8 __reserved2[127]; /* must be 0's */ + __u8 cs_version; /* code sign version */ + __u8 signature[]; /* optional PKCS#7 signature */ +}; + +#define FLAG_INSIDE_TREE (1 << 0) /* Merkle tree in file */ +#define IS_INSIDE_TREE(desc) ((desc)->flags & FLAG_INSIDE_TREE) + +#define CONST_CAST_CODE_SIGN_DESC(desc) ((const struct code_sign_descriptor *)(desc)) +#define CAST_CODE_SIGN_DESC(desc) ((struct code_sign_descriptor *)(desc)) + +static inline u64 get_tree_offset_compact(const void *desc) +{ + return CONST_CAST_CODE_SIGN_DESC(desc)->tree_offset; +} + +static inline bool is_inside_tree_compact(const void *_desc) +{ + const struct code_sign_descriptor *desc = CONST_CAST_CODE_SIGN_DESC(_desc); + + return desc->cs_version && IS_INSIDE_TREE(desc); +} + +static inline int code_sign_check_descriptor_hook(const struct inode *inode, const void *desc) +{ + int ret = 0; + + CALL_HCK_LITE_HOOK(code_sign_check_descriptor_lhck, inode, desc, &ret); + return ret; +} + +static inline int code_sign_before_measurement_hook(void *desc) +{ + int ret = 0; + + CALL_HCK_LITE_HOOK(code_sign_before_measurement_lhck, desc, &ret); + return ret; +} + +static inline void code_sign_after_measurement_hook(void *desc, int version) +{ + CALL_HCK_LITE_HOOK(code_sign_after_measurement_lhck, desc, version); +} + +#endif /* LINUX_INCLUDE_CODE_SIGN_H */ diff --git a/include/linux/fsverity.h b/include/linux/fsverity.h index c1144a4503920933cf13908c43d6882e8cf310e9..713cd5df939497594350675145e66c08f029e8bc 100644 --- a/include/linux/fsverity.h +++ b/include/linux/fsverity.h @@ -127,6 +127,8 @@ static inline struct fsverity_info *fsverity_get_info(const struct inode *inode) /* enable.c */ int fsverity_ioctl_enable(struct file *filp, const void __user *arg); +int fsverity_enable_with_descriptor(struct file *filp, + void *desc, size_t desc_size); /* measure.c */ @@ -143,6 +145,7 @@ void fsverity_cleanup_inode(struct inode *inode); bool fsverity_verify_page(struct page *page); void fsverity_verify_bio(struct bio *bio); void fsverity_enqueue_verify_work(struct work_struct *work); +u64 fsverity_get_verified_data_size(const struct inode *inode); #else /* !CONFIG_FS_VERITY */ @@ -159,6 +162,12 @@ static inline int fsverity_ioctl_enable(struct file *filp, return -EOPNOTSUPP; } +static inline int fsverity_enable_with_descriptor(struct file *filp, + void *desc, size_t desc_size) +{ + return -EOPNOTSUPP; +} + /* measure.c */ static inline int fsverity_ioctl_measure(struct file *filp, void __user *arg) @@ -201,8 +210,29 @@ static inline void fsverity_enqueue_verify_work(struct work_struct *work) WARN_ON(1); } +static inline u64 fsverity_get_verified_data_size(const struct inode *inode) +{ + WARN_ON(1); + return inode->i_size; +} + #endif /* !CONFIG_FS_VERITY */ +#ifdef CONFIG_SECURITY_CODE_SIGN + +/* enable.c */ + +int fsverity_ioctl_enable_code_sign(struct file *filp, const void __user *uarg); + +#else /* !CONFIG_SECURITY_CODE_SIGN */ + +static inline int fsverity_ioctl_enable_code_sign(struct file *filp, const void __user *uarg) +{ + return -EOPNOTSUPP; +} + +#endif /* !CONFIG_SECURITY_CODE_SIGN */ + /** * fsverity_active() - do reads from the inode need to go through fs-verity? * @inode: inode to check diff --git a/include/linux/hck/lite_hck_code_sign.h b/include/linux/hck/lite_hck_code_sign.h new file mode 100644 index 0000000000000000000000000000000000000000..13d139fc5d6b64a446aeae3b0b2c4769938693e4 --- /dev/null +++ b/include/linux/hck/lite_hck_code_sign.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + */ + +#ifndef LITE_HCK_CODE_SIGN_H +#define LITE_HCK_CODE_SIGN_H + +#include + +#ifndef CONFIG_HCK + +#define CALL_HCK_LITE_HOOK(name, args...) +#define REGISTER_HCK_LITE_HOOK(name, probe) +#define REGISTER_HCK_LITE_DATA_HOOK(name, probe, data) + +#else + +DECLARE_HCK_LITE_HOOK(code_sign_verify_certchain_lhck, + TP_PROTO(const void *raw_pkcs7, size_t pkcs7_len, int *ret), + TP_ARGS(raw_pkcs7, pkcs7_len, ret)); + +DECLARE_HCK_LITE_HOOK(code_sign_check_descriptor_lhck, + TP_PROTO(const struct inode *inode, const void *desc, int *ret), + TP_ARGS(inode, desc, ret)); + +DECLARE_HCK_LITE_HOOK(code_sign_before_measurement_lhck, + TP_PROTO(void *desc, int *ret), + TP_ARGS(desc, ret)); + +DECLARE_HCK_LITE_HOOK(code_sign_after_measurement_lhck, + TP_PROTO(void *desc, int version), + TP_ARGS(desc, version)); + +#endif /* CONFIG_HCK */ + +#endif /* LITE_HCK_CODE_SIGN_H */ diff --git a/include/linux/hck/lite_hck_hideaddr.h b/include/linux/hck/lite_hck_hideaddr.h new file mode 100644 index 0000000000000000000000000000000000000000..e7dbf9695b5538341ebceaab9a5c2093ac4ada50 --- /dev/null +++ b/include/linux/hck/lite_hck_hideaddr.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + */ + +#ifndef _LITE_HCK_HIDEADDR_H +#define _LITE_HCK_HIDEADDR_H + +#include "linux/seq_file.h" +#include "linux/mm_types.h" +#include + +#ifndef CONFIG_HCK +#define CALL_HCK_LITE_HOOK(name, args...) +#define REGISTER_HCK_LITE_HOOK(name, probe) +#define REGISTER_HCK_LITE_DATA_HOOK(name, probe, data) +#else + + +DECLARE_HCK_LITE_HOOK(hideaddr_header_prefix_lhck, + TP_PROTO(unsigned long *start, unsigned long *end, vm_flags_t *flags, struct seq_file *m, struct vm_area_struct *vma), + TP_ARGS(start, end, flags, m, vma)); + +#endif /* CONFIG_HCK */ +#endif /* _LITE_HCK_HIDEADDR_H */ diff --git a/include/linux/hck/lite_hck_inet.h b/include/linux/hck/lite_hck_inet.h index 77eff93a8044a8af97cf33c1811efac073972a4d..5dd1ecd837fe17ab62067c1ffcacb64f04530ada 100644 --- a/include/linux/hck/lite_hck_inet.h +++ b/include/linux/hck/lite_hck_inet.h @@ -6,7 +6,6 @@ #ifndef LITE_HCK_INET_H #define LITE_HCK_INET_H -#include #include #ifndef CONFIG_HCK @@ -22,10 +21,6 @@ DECLARE_HCK_LITE_HOOK(nip_ninet_ehashfn_lhck, TP_PROTO(const struct sock *sk, u32 *ret), TP_ARGS(sk, ret)); -DECLARE_HCK_LITE_HOOK(nip_ninet_gifconf_lhck, - TP_PROTO(struct net_device *dev, char __user *buf, int len, int size, int *ret), - TP_ARGS(dev, buf, len, size, ret)); - #endif /* CONFIG_HCK */ #endif /* LITE_HCK_INET_H */ diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 955b19dc28a82bca90f01e18ec6d0eca42dedc22..99b73fc4a82465f3947e5ca818551cb49dd67cac 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -144,7 +145,7 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to, vm_flags_t vm_flags); long hugetlb_unreserve_pages(struct inode *inode, long start, long end, long freed); -bool isolate_huge_page(struct page *page, struct list_head *list); +int isolate_hugetlb(struct page *page, struct list_head *list); void putback_active_hugepage(struct page *page); void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason); void free_huge_page(struct page *page); @@ -325,9 +326,9 @@ static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, return NULL; } -static inline bool isolate_huge_page(struct page *page, struct list_head *list) +static inline int isolate_hugetlb(struct page *page, struct list_head *list) { - return false; + return -EBUSY; } static inline void putback_active_hugepage(struct page *page) @@ -541,7 +542,10 @@ static inline struct hstate *hstate_sizelog(int page_size_log) if (!page_size_log) return &default_hstate; - return size_to_hstate(1UL << page_size_log); + if (page_size_log < BITS_PER_LONG) + return size_to_hstate(1UL << page_size_log); + + return NULL; } static inline struct hstate *hstate_vma(struct vm_area_struct *vma) @@ -942,4 +946,16 @@ static inline __init void hugetlb_cma_check(void) } #endif +#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE +static inline bool hugetlb_pmd_shared(pte_t *pte) +{ + return page_count(virt_to_page(pte)) > 1; +} +#else +static inline bool hugetlb_pmd_shared(pte_t *pte) +{ + return false; +} +#endif + #endif /* _LINUX_HUGETLB_H */ diff --git a/include/linux/ima.h b/include/linux/ima.h index 8fa7bcfb2da2c0e3d98ca6e03abed1f26193bcf4..cd8483fa703eaf844412a2add188b01ed967f3ca 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h @@ -18,7 +18,8 @@ extern int ima_bprm_check(struct linux_binprm *bprm); extern int ima_file_check(struct file *file, int mask); extern void ima_post_create_tmpfile(struct inode *inode); extern void ima_file_free(struct file *file); -extern int ima_file_mmap(struct file *file, unsigned long prot); +extern int ima_file_mmap(struct file *file, unsigned long reqprot, + unsigned long prot, unsigned long flags); extern int ima_file_mprotect(struct vm_area_struct *vma, unsigned long prot); extern int ima_load_data(enum kernel_load_data_id id, bool contents); extern int ima_post_load_data(char *buf, loff_t size, @@ -70,7 +71,8 @@ static inline void ima_file_free(struct file *file) return; } -static inline int ima_file_mmap(struct file *file, unsigned long prot) +static inline int ima_file_mmap(struct file *file, unsigned long reqprot, + unsigned long prot, unsigned long flags) { return 0; } diff --git a/include/linux/irq.h b/include/linux/irq.h index 607bee9271bd7b91d70d662b03b202a8f0a9694c..b89a8ac83d1bc0df37c50e316d60efd53b37a4ee 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -116,7 +116,7 @@ enum { * IRQ_SET_MASK_NOCPY - OK, chip did update irq_common_data.affinity * IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to * support stacked irqchips, which indicates skipping - * all descendent irqchips. + * all descendant irqchips. */ enum { IRQ_SET_MASK_OK = 0, @@ -302,7 +302,7 @@ static inline bool irqd_is_level_type(struct irq_data *d) /* * Must only be called of irqchip.irq_set_affinity() or low level - * hieararchy domain allocation functions. + * hierarchy domain allocation functions. */ static inline void irqd_set_single_target(struct irq_data *d) { diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 5745491303e037a387ba58b5c492ac52653cf000..fdb22e0f9a91e38ddc0f12d36fcaf0c5eaf38cd8 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -32,7 +32,7 @@ struct pt_regs; * @last_unhandled: aging timer for unhandled count * @irqs_unhandled: stats field for spurious unhandled interrupts * @threads_handled: stats field for deferred spurious detection of threaded handlers - * @threads_handled_last: comparator field for deferred spurious detection of theraded handlers + * @threads_handled_last: comparator field for deferred spurious detection of threaded handlers * @lock: locking for SMP * @affinity_hint: hint to user space for preferred irq affinity * @affinity_notify: context for notification of affinity changes diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index ea5a337e0f8b80ac8c97197039c4c391f8675096..9b9743f7538c44ce96beca807c07da8c1fe715a7 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -256,7 +256,7 @@ static inline struct fwnode_handle *irq_domain_alloc_fwnode(phys_addr_t *pa) } void irq_domain_free_fwnode(struct fwnode_handle *fwnode); -struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size, +struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int size, irq_hw_number_t hwirq_max, int direct_max, const struct irq_domain_ops *ops, void *host_data); diff --git a/include/linux/kernel.h b/include/linux/kernel.h index f5392d96d68863534585487a1b147857b648befe..66948e1bf4fa6d9dc1160f9c572c95bf25c8ac04 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -47,6 +47,8 @@ */ #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) +#define PTR_IF(cond, ptr) ((cond) ? (ptr) : NULL) + #define u64_to_user_ptr(x) ( \ { \ typecheck(u64, (x)); \ @@ -320,6 +322,7 @@ extern long (*panic_blink)(int state); __printf(1, 2) void panic(const char *fmt, ...) __noreturn __cold; void nmi_panic(struct pt_regs *regs, const char *msg); +void check_panic_on_warn(const char *origin); extern void oops_enter(void); extern void oops_exit(void); extern bool oops_may_print(void); @@ -520,12 +523,6 @@ static inline u32 int_sqrt64(u64 x) } #endif -#ifdef CONFIG_SMP -extern unsigned int sysctl_oops_all_cpu_backtrace; -#else -#define sysctl_oops_all_cpu_backtrace 0 -#endif /* CONFIG_SMP */ - extern void bust_spinlocks(int yes); extern int panic_timeout; extern unsigned long panic_print; diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 8fff3500d50eea76e543e44b8ffda3f4ae704004..1160e20995a02741ddb49f763da8d1b62bf3629e 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -73,7 +73,7 @@ extern unsigned int kstat_irqs_usr(unsigned int irq); /* * Number of interrupts per cpu, since bootup */ -static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu) +static inline unsigned long kstat_cpu_irqs_sum(unsigned int cpu) { return kstat_cpu(cpu).irqs_sum; } diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 4dbebd319b6f5d3117449ba5e3087b19b7cc22a1..18b7c40ffb37adce316d7cdb0c248712933dcbbd 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -342,6 +342,8 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table, size_t *length, loff_t *ppos); #endif extern void wait_for_kprobe_optimizer(void); +bool optprobe_queued_unopt(struct optimized_kprobe *op); +bool kprobe_disarmed(struct kprobe *p); #else static inline void wait_for_kprobe_optimizer(void) { } #endif /* CONFIG_OPTPROBES */ diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 5491ad5f48a94e4ef44922155b8f57f898dd0de0..33442fd018a0641173be2e604a002cad160b9bcf 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1789,6 +1789,8 @@ struct nfs_rpc_ops { struct nfs_server *(*create_server)(struct fs_context *); struct nfs_server *(*clone_server)(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, rpc_authflavor_t); + void (*enable_swap)(struct inode *inode); + void (*disable_swap)(struct inode *inode); }; /* diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h index 06409a6c40bcb2a0ca7e8ba9408fce159b0d9b6a..5e07f3cfad3018ead3dcc4f839d8feca0eeb1ee2 100644 --- a/include/linux/nvmem-provider.h +++ b/include/linux/nvmem-provider.h @@ -49,7 +49,7 @@ enum nvmem_type { * @word_size: Minimum read/write access granularity. * @stride: Minimum read/write access stride. * @priv: User context passed to read/write callbacks. - * @wp-gpio: Write protect pin + * @ignore_wp: Write Protect pin is managed by the provider. * * Note: A default "nvmem" name will be assigned to the device if * no name is specified in its configuration. In such case "" is @@ -63,12 +63,12 @@ struct nvmem_config { const char *name; int id; struct module *owner; - struct gpio_desc *wp_gpio; const struct nvmem_cell_info *cells; int ncells; enum nvmem_type type; bool read_only; bool root_only; + bool ignore_wp; bool no_of_node; nvmem_reg_read_t reg_read; nvmem_reg_write_t reg_write; diff --git a/include/linux/pci.h b/include/linux/pci.h index 78243803fc7d91fa6473c31ad409f709d13cf68f..bc16207a360b20d1800ac3029ac1894ef500b6f1 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -539,6 +539,7 @@ struct pci_host_bridge { struct msi_controller *msi; unsigned int ignore_reset_delay:1; /* For entire hierarchy */ unsigned int no_ext_tags:1; /* No Extended Tags */ + unsigned int no_inc_mrrs:1; /* No Increase MRRS */ unsigned int native_aer:1; /* OS may use PCIe AER */ unsigned int native_pcie_hotplug:1; /* OS may use PCIe hotplug */ unsigned int native_shpc_hotplug:1; /* OS may use SHPC hotplug */ diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 69e310173fbca8960009de557deda531da76fe43..4b34a5c125999c2b03eecaf51d5309fe84e15ee5 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -3033,6 +3033,8 @@ #define PCI_DEVICE_ID_INTEL_VMD_9A0B 0x9a0b #define PCI_DEVICE_ID_INTEL_S21152BB 0xb152 +#define PCI_VENDOR_ID_WANGXUN 0x8088 + #define PCI_VENDOR_ID_SCALEMP 0x8686 #define PCI_DEVICE_ID_SCALEMP_VSMP_CTL 0x1010 @@ -3113,6 +3115,8 @@ #define PCI_VENDOR_ID_3COM_2 0xa727 +#define PCI_VENDOR_ID_SOLIDRUN 0xd063 + #define PCI_VENDOR_ID_DIGIUM 0xd161 #define PCI_DEVICE_ID_DIGIUM_HFC4S 0xb410 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 67a50c78232fe2e351d8700761f36e4d3b4ea927..03e454582bd3a82bf63a3ea138f0ba915afd12a0 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -659,6 +659,7 @@ struct perf_event { /* The cumulative AND of all event_caps for events in this group. */ int group_caps; + unsigned int group_generation; struct perf_event *group_leader; struct pmu *pmu; void *pmu_private; diff --git a/include/linux/random.h b/include/linux/random.h index 917470c4490ac9015421d5bccc717ca6f0366e3d..ed2bac6c7a8ac5749fbaf97def4a301437587f59 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -19,14 +19,14 @@ void add_input_randomness(unsigned int type, unsigned int code, void add_interrupt_randomness(int irq) __latent_entropy; void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy); -#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__) static inline void add_latent_entropy(void) { +#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__) add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy)); -} #else -static inline void add_latent_entropy(void) { } + add_device_randomness(NULL, 0); #endif +} void get_random_bytes(void *buf, size_t len); size_t __must_check get_random_bytes_arch(void *buf, size_t len); diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 095b3b39bd032cc5062f4b6163cd625e19c57c52..ef8d56b18da6b2726e03966ef2a6c7dfb7909cde 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -189,6 +189,7 @@ void synchronize_rcu_tasks_rude(void); #define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t, false) void exit_tasks_rcu_start(void); +void exit_tasks_rcu_stop(void); void exit_tasks_rcu_finish(void); #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ #define rcu_tasks_qs(t, preempt) do { } while (0) @@ -196,6 +197,7 @@ void exit_tasks_rcu_finish(void); #define call_rcu_tasks call_rcu #define synchronize_rcu_tasks synchronize_rcu static inline void exit_tasks_rcu_start(void) { } +static inline void exit_tasks_rcu_stop(void) { } static inline void exit_tasks_rcu_finish(void) { } #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ @@ -306,11 +308,18 @@ static inline int rcu_read_lock_any_held(void) * RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met * @c: condition to check * @s: informative message + * + * This checks debug_lockdep_rcu_enabled() before checking (c) to + * prevent early boot splats due to lockdep not yet being initialized, + * and rechecks it after checking (c) to prevent false-positive splats + * due to races with lockdep being disabled. See commit 3066820034b5dd + * ("rcu: Reject RCU_LOCKDEP_WARN() false positives") for more detail. */ #define RCU_LOCKDEP_WARN(c, s) \ do { \ static bool __section(".data.unlikely") __warned; \ - if ((c) && debug_lockdep_rcu_enabled() && !__warned) { \ + if (debug_lockdep_rcu_enabled() && (c) && \ + debug_lockdep_rcu_enabled() && !__warned) { \ __warned = true; \ lockdep_rcu_suspicious(__FILE__, __LINE__, s); \ } \ diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index 4ce511437a8aa4c2cccd7cac13b05decff847080..2832cc6be062b58bb8b37e965319ca1136343f81 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -61,6 +61,7 @@ extern void sched_post_fork(struct task_struct *p, extern void sched_dead(struct task_struct *p); void __noreturn do_task_dead(void); +void __noreturn make_task_dead(int signr); extern void proc_caches_init(void); diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 40df88728a6f44fe01ee28e04c95d38c5cd24d09..abf7b8ec1fb646248dd1ac1b3a6bed1befb0a9ef 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -199,6 +199,7 @@ struct plat_stmmacenet_data { int rss_en; int mac_port_sel_speed; bool en_tx_lpi_clockgating; + bool rx_clk_runs_in_lpi; int has_xgmac; bool vlan_fail_q_en; u8 vlan_fail_q; diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 51298a4f4623573a6628718193331fb4f31d5469..161eba9fd9122c52698b6a479d7c721a4f19f280 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -195,6 +195,9 @@ struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path, void unregister_sysctl_table(struct ctl_table_header * table); extern int sysctl_init(void); +extern void __register_sysctl_init(const char *path, struct ctl_table *table, + const char *table_name); +#define register_sysctl_init(path, table) __register_sysctl_init(path, table, #table) void do_sysctl_args(void); extern int pwrsw_enabled; diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index c7c6e8b8344d49871fd65524a30b5e1e1d02cbf6..20668760daa02fd18c2e8b032827d454f90d3cb1 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -348,6 +348,10 @@ copy_struct_from_user(void *dst, size_t ksize, const void __user *src, size_t size = min(ksize, usize); size_t rest = max(ksize, usize) - size; + /* Double check if ksize is larger than a known object size. */ + if (WARN_ON_ONCE(ksize > __builtin_object_size(dst, 1))) + return -E2BIG; + /* Deal with trailing bytes. */ if (usize < ksize) { memset(dst + size, 0, rest); diff --git a/include/linux/units.h b/include/linux/units.h index aaf716364ec34a77b58499b4628223751e3b7be2..3457179f7116a48b5f129b1687c2278b24728938 100644 --- a/include/linux/units.h +++ b/include/linux/units.h @@ -4,6 +4,26 @@ #include +/* Metric prefixes in accordance with Système international (d'unités) */ +#define PETA 1000000000000000ULL +#define TERA 1000000000000ULL +#define GIGA 1000000000UL +#define MEGA 1000000UL +#define KILO 1000UL +#define HECTO 100UL +#define DECA 10UL +#define DECI 10UL +#define CENTI 100UL +#define MILLI 1000UL +#define MICRO 1000000UL +#define NANO 1000000000UL +#define PICO 1000000000000ULL +#define FEMTO 1000000000000000ULL + +#define MILLIWATT_PER_WATT 1000L +#define MICROWATT_PER_MILLIWATT 1000L +#define MICROWATT_PER_WATT 1000000L + #define ABSOLUTE_ZERO_MILLICELSIUS -273150 static inline long milli_kelvin_to_millicelsius(long t) diff --git a/include/linux/util_macros.h b/include/linux/util_macros.h index 72299f261b253392ead9e08977d033f43bf6b77c..43db6e47503c725b69c2c148fe1f190c0c26aa9d 100644 --- a/include/linux/util_macros.h +++ b/include/linux/util_macros.h @@ -38,4 +38,16 @@ */ #define find_closest_descending(x, a, as) __find_closest(x, a, as, >=) +/** + * is_insidevar - check if the @ptr points inside the @var memory range. + * @ptr: the pointer to a memory address. + * @var: the variable which address and size identify the memory range. + * + * Evaluates to true if the address in @ptr lies within the memory + * range allocated to @var. + */ +#define is_insidevar(ptr, var) \ + ((uintptr_t)(ptr) >= (uintptr_t)(var) && \ + (uintptr_t)(ptr) < (uintptr_t)(var) + sizeof(var)) + #endif diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 89ce8a50f2363ddaa670c32de63db90affe294e8..6538b11fadd538d496bde84026d4ac658a69801c 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -663,12 +663,8 @@ static inline u32 ipv6_addr_hash(const struct in6_addr *a) /* more secured version of ipv6_addr_hash() */ static inline u32 __ipv6_addr_jhash(const struct in6_addr *a, const u32 initval) { - u32 v = (__force u32)a->s6_addr32[0] ^ (__force u32)a->s6_addr32[1]; - - return jhash_3words(v, - (__force u32)a->s6_addr32[2], - (__force u32)a->s6_addr32[3], - initval); + return jhash2((__force const u32 *)a->s6_addr32, + ARRAY_SIZE(a->s6_addr32), initval); } static inline bool ipv6_addr_loopback(const struct in6_addr *a) diff --git a/include/net/netfilter/nf_tproxy.h b/include/net/netfilter/nf_tproxy.h index 82d0e41b76f224f02a5b8fb3fec6d4c64721066a..faa108b1ba675ba89724b16ba3c16df2776aeb6b 100644 --- a/include/net/netfilter/nf_tproxy.h +++ b/include/net/netfilter/nf_tproxy.h @@ -17,6 +17,13 @@ static inline bool nf_tproxy_sk_is_transparent(struct sock *sk) return false; } +static inline void nf_tproxy_twsk_deschedule_put(struct inet_timewait_sock *tw) +{ + local_bh_disable(); + inet_twsk_deschedule_put(tw); + local_bh_enable(); +} + /* assign a socket to the skb -- consumes sk */ static inline void nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk) { diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index e7e8c318925deed09a574d558b95b5ff42d02be8..61cd19ee51f4eb2e8a53577aec556beaacdb683e 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -1325,4 +1325,11 @@ static inline int skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res) return res->ingress ? netif_receive_skb(skb) : dev_queue_xmit(skb); } +/* Make sure qdisc is no longer in SCHED state. */ +static inline void qdisc_synchronize(const struct Qdisc *q) +{ + while (test_bit(__QDISC_STATE_SCHED, &q->state)) + msleep(1); +} + #endif diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index be9ff0422c16265a6c30eeefecae8de887eaad22..be59e8df0bffc3c5cc26284a636631a29fc08ca1 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -1394,6 +1394,7 @@ struct sctp_stream_priorities { /* The next stream in line */ struct sctp_stream_out_ext *next; __u16 prio; + __u16 users; }; struct sctp_stream_out_ext { diff --git a/include/net/sock.h b/include/net/sock.h index 4a24f125e3f5197e69dd230d28647e57dcef3c5e..0c35ee2cf92fd87e8386a8b930d3ba4a3eb58a68 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -2256,6 +2256,19 @@ static inline __must_check bool skb_set_owner_sk_safe(struct sk_buff *skb, struc return false; } +static inline struct sk_buff *skb_clone_and_charge_r(struct sk_buff *skb, struct sock *sk) +{ + skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC)); + if (skb) { + if (sk_rmem_schedule(sk, skb, skb->truesize)) { + skb_set_owner_r(skb, sk); + return skb; + } + __kfree_skb(skb); + } + return NULL; +} + void sk_reset_timer(struct sock *sk, struct timer_list *timer, unsigned long expires); diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h index 037c77fb5dc55c98ec405bcb0bc57b76bd100cbf..c4de15f7a0a55c8115c77c4da4f5fa48b296be38 100644 --- a/include/scsi/scsi_transport_iscsi.h +++ b/include/scsi/scsi_transport_iscsi.h @@ -236,6 +236,14 @@ enum { ISCSI_SESSION_FREE, }; +enum { + ISCSI_SESSION_TARGET_UNBOUND, + ISCSI_SESSION_TARGET_ALLOCATED, + ISCSI_SESSION_TARGET_SCANNED, + ISCSI_SESSION_TARGET_UNBINDING, + ISCSI_SESSION_TARGET_MAX, +}; + #define ISCSI_MAX_TARGET -1 struct iscsi_cls_session { @@ -262,6 +270,7 @@ struct iscsi_cls_session { */ pid_t creator; int state; + int target_state; /* session target bind state */ int sid; /* session id */ void *dd_data; /* LLD private data */ struct device dev; /* sysfs transport/container device */ diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index c3039e97929a5db27de8d6f86131a9910752d5ae..32e93d55acf73dd57273ee7213861bf4f9d47f0f 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h @@ -16,6 +16,7 @@ #include struct device; +struct snd_pcm_substream; struct snd_soc_pcm_runtime; struct soc_enum; diff --git a/include/uapi/linux/dma-heap.h b/include/uapi/linux/dma-heap.h index 6f84fa08e074a3b8bdd9c627e4d43427e530212b..b95e533ed8c71081d981a1c9141b94e6ab177524 100644 --- a/include/uapi/linux/dma-heap.h +++ b/include/uapi/linux/dma-heap.h @@ -41,6 +41,29 @@ struct dma_heap_allocation_data { #define DMA_HEAP_IOC_MAGIC 'H' +enum dma_heap_flag_owner_id { + OWNER_DEFAULT = 0, + OWNER_GPU, + OWNER_MEDIA, + COUNT_DMA_HEAP_FLAG_OWNER, +}; + +#define OFFSET_BIT 56 /* 7 bytes */ + +/* Use the first byte (56-63 bits) of heap flags as owner_id flag */ +void set_owner_id_for_heap_flags(__u64 *heap_flags, __u64 owner_id) +{ + if (heap_flags == NULL) + return; + *heap_flags |= owner_id << OFFSET_BIT; +} + +/* To get the binary number of owner_id */ +__u64 get_owner_id_from_heap_flags(__u64 heap_flags) +{ + return heap_flags >> OFFSET_BIT; +} + /** * DOC: DMA_HEAP_IOCTL_ALLOC - allocate memory from pool * diff --git a/include/uapi/linux/fsverity.h b/include/uapi/linux/fsverity.h index da0daf6c193b4bd7f0d0e700ad74f5e639ada116..ae44df1b1d1763a85761f4002b69ed8b9cde3113 100644 --- a/include/uapi/linux/fsverity.h +++ b/include/uapi/linux/fsverity.h @@ -37,4 +37,23 @@ struct fsverity_digest { #define FS_IOC_ENABLE_VERITY _IOW('f', 133, struct fsverity_enable_arg) #define FS_IOC_MEASURE_VERITY _IOWR('f', 134, struct fsverity_digest) +struct code_sign_enable_arg { + __u32 version; + __u32 hash_algorithm; + __u32 block_size; + __u32 salt_size; + __u64 salt_ptr; + __u32 sig_size; + __u32 __reserved1; + __u64 sig_ptr; + __u64 __reserved2[7]; + __u64 tree_offset; + __u64 root_hash_ptr; + __u64 data_size; + __u32 flags; + __u32 cs_version; +}; + +#define FS_IOC_ENABLE_CODE_SIGN _IOW('f', 200, struct code_sign_enable_arg) + #endif /* _UAPI_LINUX_FSVERITY_H */ diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h index d2f143393780c696793f4d466cf7a88b8631f1c6..860bbf6bf29cbafa59b3bbb5c6a5a8357feddce7 100644 --- a/include/uapi/linux/ip.h +++ b/include/uapi/linux/ip.h @@ -18,6 +18,7 @@ #ifndef _UAPI_LINUX_IP_H #define _UAPI_LINUX_IP_H #include +#include #include #define IPTOS_TOS_MASK 0x1E diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h index 766ab5c8ee655e99b0302964878686b120e806cd..d44d0483fd73f40f7a18a7c3cc5daaf81407e49c 100644 --- a/include/uapi/linux/ipv6.h +++ b/include/uapi/linux/ipv6.h @@ -4,6 +4,7 @@ #include #include +#include #include #include diff --git a/include/uapi/linux/netfilter/nf_conntrack_sctp.h b/include/uapi/linux/netfilter/nf_conntrack_sctp.h index edc6ddab0de6afcf7d5cf7284087e959c17d2ad4..2d6f80d75ae74c4eaa55809bf96ddcd5edd6c2bf 100644 --- a/include/uapi/linux/netfilter/nf_conntrack_sctp.h +++ b/include/uapi/linux/netfilter/nf_conntrack_sctp.h @@ -15,7 +15,7 @@ enum sctp_conntrack { SCTP_CONNTRACK_SHUTDOWN_RECD, SCTP_CONNTRACK_SHUTDOWN_ACK_SENT, SCTP_CONNTRACK_HEARTBEAT_SENT, - SCTP_CONNTRACK_HEARTBEAT_ACKED, + SCTP_CONNTRACK_HEARTBEAT_ACKED, /* no longer used */ SCTP_CONNTRACK_MAX }; diff --git a/include/uapi/linux/netfilter/nfnetlink_cttimeout.h b/include/uapi/linux/netfilter/nfnetlink_cttimeout.h index 6b20fb22717b249bcea8c354ef997ca0e04564b4..aa805e6d4e284d63ae43330e571453cacfc18acd 100644 --- a/include/uapi/linux/netfilter/nfnetlink_cttimeout.h +++ b/include/uapi/linux/netfilter/nfnetlink_cttimeout.h @@ -94,7 +94,7 @@ enum ctattr_timeout_sctp { CTA_TIMEOUT_SCTP_SHUTDOWN_RECD, CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT, CTA_TIMEOUT_SCTP_HEARTBEAT_SENT, - CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED, + CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED, /* no longer used */ __CTA_TIMEOUT_SCTP_MAX }; #define CTA_TIMEOUT_SCTP_MAX (__CTA_TIMEOUT_SCTP_MAX - 1) diff --git a/include/uapi/linux/usb/video.h b/include/uapi/linux/usb/video.h index bfdae12cdacf89d3bccb0f7381630b8fe516104d..c58854fb7d94a5bd695b3e59ab1c1c57ebeb951e 100644 --- a/include/uapi/linux/usb/video.h +++ b/include/uapi/linux/usb/video.h @@ -179,6 +179,36 @@ #define UVC_CONTROL_CAP_AUTOUPDATE (1 << 3) #define UVC_CONTROL_CAP_ASYNCHRONOUS (1 << 4) +/* 3.9.2.6 Color Matching Descriptor Values */ +enum uvc_color_primaries_values { + UVC_COLOR_PRIMARIES_UNSPECIFIED, + UVC_COLOR_PRIMARIES_BT_709_SRGB, + UVC_COLOR_PRIMARIES_BT_470_2_M, + UVC_COLOR_PRIMARIES_BT_470_2_B_G, + UVC_COLOR_PRIMARIES_SMPTE_170M, + UVC_COLOR_PRIMARIES_SMPTE_240M, +}; + +enum uvc_transfer_characteristics_values { + UVC_TRANSFER_CHARACTERISTICS_UNSPECIFIED, + UVC_TRANSFER_CHARACTERISTICS_BT_709, + UVC_TRANSFER_CHARACTERISTICS_BT_470_2_M, + UVC_TRANSFER_CHARACTERISTICS_BT_470_2_B_G, + UVC_TRANSFER_CHARACTERISTICS_SMPTE_170M, + UVC_TRANSFER_CHARACTERISTICS_SMPTE_240M, + UVC_TRANSFER_CHARACTERISTICS_LINEAR, + UVC_TRANSFER_CHARACTERISTICS_SRGB, +}; + +enum uvc_matrix_coefficients { + UVC_MATRIX_COEFFICIENTS_UNSPECIFIED, + UVC_MATRIX_COEFFICIENTS_BT_709, + UVC_MATRIX_COEFFICIENTS_FCC, + UVC_MATRIX_COEFFICIENTS_BT_470_2_B_G, + UVC_MATRIX_COEFFICIENTS_SMPTE_170M, + UVC_MATRIX_COEFFICIENTS_SMPTE_240M, +}; + /* ------------------------------------------------------------------------ * UVC structures */ diff --git a/include/uapi/linux/uvcvideo.h b/include/uapi/linux/uvcvideo.h index f80f05b3c423f3b2a5b0372ab03ba41f3e173348..2140923661934d4ef7ffffd25a15eae92d1aca37 100644 --- a/include/uapi/linux/uvcvideo.h +++ b/include/uapi/linux/uvcvideo.h @@ -86,7 +86,7 @@ struct uvc_xu_control_query { * struct. The first two fields are added by the driver, they can be used for * clock synchronisation. The rest is an exact copy of a UVC payload header. * Only complete objects with complete buffers are included. Therefore it's - * always sizeof(meta->ts) + sizeof(meta->sof) + meta->length bytes large. + * always sizeof(meta->ns) + sizeof(meta->sof) + meta->length bytes large. */ struct uvc_meta_buf { __u64 ns; diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index c1e4940091321a954585255fac248b06ff1ba057..1bca428d2b45be96fd9011cba957ec0cbcac78de 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -486,6 +486,7 @@ struct io_poll_iocb { struct file *file; struct wait_queue_head *head; __poll_t events; + int retries; struct wait_queue_entry wait; }; @@ -2460,6 +2461,15 @@ static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req) static inline bool io_run_task_work(void) { + /* + * PF_IO_WORKER never returns to userspace, so check here if we have + * notify work that needs processing. + */ + if (current->flags & PF_IO_WORKER && + test_thread_flag(TIF_NOTIFY_RESUME)) { + __set_current_state(TASK_RUNNING); + tracehook_notify_resume(NULL); + } if (test_thread_flag(TIF_NOTIFY_SIGNAL) || current->task_works) { __set_current_state(TASK_RUNNING); tracehook_notify_signal(); @@ -4986,7 +4996,7 @@ static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); sr->len = READ_ONCE(sqe->len); sr->bgid = READ_ONCE(sqe->buf_group); - sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL; + sr->msg_flags = READ_ONCE(sqe->msg_flags); if (sr->msg_flags & MSG_DONTWAIT) req->flags |= REQ_F_NOWAIT; @@ -5740,6 +5750,14 @@ enum { IO_APOLL_READY }; +/* + * We can't reliably detect loops in repeated poll triggers and issue + * subsequently failing. But rather than fail these immediately, allow a + * certain amount of retries before we give up. Given that this condition + * should _rarely_ trigger even once, we should be fine with a larger value. + */ +#define APOLL_MAX_RETRY 128 + static int io_arm_poll_handler(struct io_kiocb *req) { const struct io_op_def *def = &io_op_defs[req->opcode]; @@ -5751,8 +5769,6 @@ static int io_arm_poll_handler(struct io_kiocb *req) if (!req->file || !file_can_poll(req->file)) return IO_APOLL_ABORTED; - if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED) - return IO_APOLL_ABORTED; if (!def->pollin && !def->pollout) return IO_APOLL_ABORTED; @@ -5770,8 +5786,13 @@ static int io_arm_poll_handler(struct io_kiocb *req) if (req->flags & REQ_F_POLLED) { apoll = req->apoll; kfree(apoll->double_poll); + if (unlikely(!--apoll->poll.retries)) { + apoll->double_poll = NULL; + return IO_APOLL_ABORTED; + } } else { apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC); + apoll->poll.retries = APOLL_MAX_RETRY; } if (unlikely(!apoll)) return IO_APOLL_ABORTED; @@ -9050,14 +9071,17 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov, pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM, pages, vmas); if (pret == nr_pages) { + struct file *file = vmas[0]->vm_file; + /* don't support file backed memory */ for (i = 0; i < nr_pages; i++) { - struct vm_area_struct *vma = vmas[i]; - - if (vma_is_shmem(vma)) + if (vmas[i]->vm_file != file) { + ret = -EINVAL; + break; + } + if (!file) continue; - if (vma->vm_file && - !is_file_hugepages(vma->vm_file)) { + if (!vma_is_shmem(vmas[i]) && !is_file_hugepages(file)) { ret = -EOPNOTSUPP; break; } @@ -9683,6 +9707,7 @@ static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx, while (!list_empty_careful(&ctx->iopoll_list)) { io_iopoll_try_reap_events(ctx); ret = true; + cond_resched(); } } diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index edec71f050c31b3bbd771415d52912d9e20d0e36..b27d88468a8d58a0098184b5738cae2dca5078cb 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -3541,6 +3541,7 @@ static int btf_datasec_resolve(struct btf_verifier_env *env, struct btf *btf = env->btf; u16 i; + env->resolve_mode = RESOLVE_TBD; for_each_vsi_from(i, v->next_member, v->t, vsi) { u32 var_type_id = vsi->type, type_id, type_size = 0; const struct btf_type *var_type = btf_type_by_id(env->btf, @@ -4273,6 +4274,7 @@ btf_get_prog_ctx_type(struct bpf_verifier_log *log, struct btf *btf, if (!ctx_struct) /* should not happen */ return NULL; +again: ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_struct->name_off); if (!ctx_tname) { /* should not happen */ @@ -4286,8 +4288,16 @@ btf_get_prog_ctx_type(struct bpf_verifier_log *log, struct btf *btf, * int socket_filter_bpf_prog(struct __sk_buff *skb) * { // no fields of skb are ever used } */ - if (strcmp(ctx_tname, tname)) - return NULL; + if (strcmp(ctx_tname, tname)) { + /* bpf_user_pt_regs_t is a typedef, so resolve it to + * underlying struct and check name again + */ + if (!btf_type_is_modifier(ctx_struct)) + return NULL; + while (btf_type_is_modifier(ctx_struct)) + ctx_struct = btf_type_by_id(btf_vmlinux, ctx_struct->type); + goto again; + } return ctx_type; } diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 00163e2c0db2c8a7a68ba37fe7bb68c5db145a2b..e4be00980afb6814c9796d71976039a7c2a740c2 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -573,6 +573,12 @@ static bool is_spilled_reg(const struct bpf_stack_state *stack) return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL; } +static void scrub_spilled_slot(u8 *stype) +{ + if (*stype != STACK_INVALID) + *stype = STACK_MISC; +} + static void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_func_state *state) { @@ -1877,8 +1883,6 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, */ if (insn->src_reg != BPF_REG_FP) return 0; - if (BPF_SIZE(insn->code) != BPF_DW) - return 0; /* dreg = *(u64 *)[fp - off] was a fill from the stack. * that [fp - off] slot contains scalar that needs to be @@ -1901,8 +1905,6 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, /* scalars can only be spilled into stack */ if (insn->dst_reg != BPF_REG_FP) return 0; - if (BPF_SIZE(insn->code) != BPF_DW) - return 0; spi = (-insn->off - 1) / BPF_REG_SIZE; if (spi >= 64) { verbose(env, "BUG spi %d\n", spi); @@ -2279,16 +2281,33 @@ static bool __is_pointer_value(bool allow_ptr_leaks, return reg->type != SCALAR_VALUE; } +/* Copy src state preserving dst->parent and dst->live fields */ +static void copy_register_state(struct bpf_reg_state *dst, const struct bpf_reg_state *src) +{ + struct bpf_reg_state *parent = dst->parent; + enum bpf_reg_liveness live = dst->live; + + *dst = *src; + dst->parent = parent; + dst->live = live; +} + static void save_register_state(struct bpf_func_state *state, - int spi, struct bpf_reg_state *reg) + int spi, struct bpf_reg_state *reg, + int size) { int i; - state->stack[spi].spilled_ptr = *reg; - state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; + copy_register_state(&state->stack[spi].spilled_ptr, reg); + if (size == BPF_REG_SIZE) + state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; + + for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--) + state->stack[spi].slot_type[i - 1] = STACK_SPILL; - for (i = 0; i < BPF_REG_SIZE; i++) - state->stack[spi].slot_type[i] = STACK_SPILL; + /* size < 8 bytes spill */ + for (; i; i--) + scrub_spilled_slot(&state->stack[spi].slot_type[i - 1]); } /* check_stack_{read,write}_fixed_off functions track spill/fill of registers, @@ -2326,7 +2345,9 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, bool sanitize = reg && is_spillable_regtype(reg->type); for (i = 0; i < size; i++) { - if (state->stack[spi].slot_type[i] == STACK_INVALID) { + u8 type = state->stack[spi].slot_type[i]; + + if (type != STACK_MISC && type != STACK_ZERO) { sanitize = true; break; } @@ -2336,7 +2357,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, env->insn_aux_data[insn_idx].sanitize_stack_spill = true; } - if (reg && size == BPF_REG_SIZE && register_is_bounded(reg) && + if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) && !register_is_null(reg) && env->bpf_capable) { if (dst_reg != BPF_REG_FP) { /* The backtracking logic can only recognize explicit @@ -2349,7 +2370,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, if (err) return err; } - save_register_state(state, spi, reg); + save_register_state(state, spi, reg, size); } else if (reg && is_spillable_regtype(reg->type)) { /* register containing pointer is being spilled into stack */ if (size != BPF_REG_SIZE) { @@ -2361,7 +2382,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, verbose(env, "cannot spill pointers to stack into stack frame of the caller\n"); return -EINVAL; } - save_register_state(state, spi, reg); + save_register_state(state, spi, reg, size); } else { u8 type = STACK_MISC; @@ -2370,7 +2391,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, /* Mark slots as STACK_MISC if they belonged to spilled ptr. */ if (is_spilled_reg(&state->stack[spi])) for (i = 0; i < BPF_REG_SIZE; i++) - state->stack[spi].slot_type[i] = STACK_MISC; + scrub_spilled_slot(&state->stack[spi].slot_type[i]); /* only mark the slot as written if all 8 bytes were written * otherwise read propagation may incorrectly stop too soon @@ -2577,35 +2598,56 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env, struct bpf_func_state *state = vstate->frame[vstate->curframe]; int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; struct bpf_reg_state *reg; - u8 *stype; + u8 *stype, type; stype = reg_state->stack[spi].slot_type; reg = ®_state->stack[spi].spilled_ptr; if (is_spilled_reg(®_state->stack[spi])) { - if (size != BPF_REG_SIZE) { + u8 spill_size = 1; + + for (i = BPF_REG_SIZE - 1; i > 0 && stype[i - 1] == STACK_SPILL; i--) + spill_size++; + + if (size != BPF_REG_SIZE || spill_size != BPF_REG_SIZE) { if (reg->type != SCALAR_VALUE) { verbose_linfo(env, env->insn_idx, "; "); verbose(env, "invalid size of register fill\n"); return -EACCES; } - if (dst_regno >= 0) { + + mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); + if (dst_regno < 0) + return 0; + + if (!(off % BPF_REG_SIZE) && size == spill_size) { + /* The earlier check_reg_arg() has decided the + * subreg_def for this insn. Save it first. + */ + s32 subreg_def = state->regs[dst_regno].subreg_def; + + copy_register_state(&state->regs[dst_regno], reg); + state->regs[dst_regno].subreg_def = subreg_def; + } else { + for (i = 0; i < size; i++) { + type = stype[(slot - i) % BPF_REG_SIZE]; + if (type == STACK_SPILL) + continue; + if (type == STACK_MISC) + continue; + verbose(env, "invalid read from stack off %d+%d size %d\n", + off, i, size); + return -EACCES; + } mark_reg_unknown(env, state->regs, dst_regno); - state->regs[dst_regno].live |= REG_LIVE_WRITTEN; } - mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); + state->regs[dst_regno].live |= REG_LIVE_WRITTEN; return 0; } - for (i = 1; i < BPF_REG_SIZE; i++) { - if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) { - verbose(env, "corrupted spill memory\n"); - return -EACCES; - } - } if (dst_regno >= 0) { /* restore register state from stack */ - state->regs[dst_regno] = *reg; + copy_register_state(&state->regs[dst_regno], reg); /* mark reg as written since spilled pointer state likely * has its liveness marks cleared by is_state_visited() * which resets stack/reg liveness for state transitions @@ -2624,8 +2666,6 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env, } mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); } else { - u8 type; - for (i = 0; i < size; i++) { type = stype[(slot - i) % BPF_REG_SIZE]; if (type == STACK_MISC) @@ -4138,7 +4178,7 @@ static int check_stack_range_initialized( if (clobber) { __mark_reg_unknown(env, &state->stack[spi].spilled_ptr); for (j = 0; j < BPF_REG_SIZE; j++) - state->stack[spi].slot_type[j] = STACK_MISC; + scrub_spilled_slot(&state->stack[spi].slot_type[j]); } goto mark; } @@ -5928,7 +5968,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env, */ if (!ptr_is_dst_reg) { tmp = *dst_reg; - *dst_reg = *ptr_reg; + copy_register_state(dst_reg, ptr_reg); } ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1, env->insn_idx); @@ -7184,7 +7224,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) * to propagate min/max range. */ src_reg->id = ++env->id_gen; - *dst_reg = *src_reg; + copy_register_state(dst_reg, src_reg); dst_reg->live |= REG_LIVE_WRITTEN; dst_reg->subreg_def = DEF_NOT_SUBREG; } else { @@ -7195,7 +7235,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) insn->src_reg); return -EACCES; } else if (src_reg->type == SCALAR_VALUE) { - *dst_reg = *src_reg; + copy_register_state(dst_reg, src_reg); /* Make sure ID is cleared otherwise * dst_reg min/max could be incorrectly * propagated into src_reg by find_equal_scalars() @@ -8004,7 +8044,7 @@ static void find_equal_scalars(struct bpf_verifier_state *vstate, bpf_for_each_reg_in_vstate(vstate, state, reg, ({ if (reg->type == SCALAR_VALUE && reg->id == known_reg->id) - *reg = *known_reg; + copy_register_state(reg, known_reg); })); } diff --git a/kernel/events/core.c b/kernel/events/core.c index d7b61116f15bbe2c18a08dac8c063840dd41f0c5..53c15bfce15bd13df405bf49bc3335a171f87aaf 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -2048,6 +2048,7 @@ static void perf_group_attach(struct perf_event *event) list_add_tail(&event->sibling_list, &group_leader->sibling_list); group_leader->nr_siblings++; + group_leader->group_generation++; perf_event__header_size(group_leader); @@ -2240,6 +2241,7 @@ static void perf_group_detach(struct perf_event *event) if (leader != event) { list_del_init(&event->sibling_list); event->group_leader->nr_siblings--; + event->group_leader->group_generation++; goto out; } @@ -5217,7 +5219,7 @@ static int __perf_read_group_add(struct perf_event *leader, u64 read_format, u64 *values) { struct perf_event_context *ctx = leader->ctx; - struct perf_event *sub; + struct perf_event *sub, *parent; unsigned long flags; int n = 1; /* skip @nr */ int ret; @@ -5227,6 +5229,33 @@ static int __perf_read_group_add(struct perf_event *leader, return ret; raw_spin_lock_irqsave(&ctx->lock, flags); + /* + * Verify the grouping between the parent and child (inherited) + * events is still in tact. + * + * Specifically: + * - leader->ctx->lock pins leader->sibling_list + * - parent->child_mutex pins parent->child_list + * - parent->ctx->mutex pins parent->sibling_list + * + * Because parent->ctx != leader->ctx (and child_list nests inside + * ctx->mutex), group destruction is not atomic between children, also + * see perf_event_release_kernel(). Additionally, parent can grow the + * group. + * + * Therefore it is possible to have parent and child groups in a + * different configuration and summing over such a beast makes no sense + * what so ever. + * + * Reject this. + */ + parent = leader->parent; + if (parent && + (parent->group_generation != leader->group_generation || + parent->nr_siblings != leader->nr_siblings)) { + ret = -ECHILD; + goto unlock; + } /* * Since we co-schedule groups, {enabled,running} times of siblings @@ -5256,8 +5285,9 @@ static int __perf_read_group_add(struct perf_event *leader, values[n++] = primary_event_id(sub); } +unlock: raw_spin_unlock_irqrestore(&ctx->lock, flags); - return 0; + return ret; } static int perf_read_group(struct perf_event *event, @@ -5276,10 +5306,6 @@ static int perf_read_group(struct perf_event *event, values[0] = 1 + leader->nr_siblings; - /* - * By locking the child_mutex of the leader we effectively - * lock the child list of all siblings.. XXX explain how. - */ mutex_lock(&leader->child_mutex); ret = __perf_read_group_add(leader, read_format, values); @@ -12816,6 +12842,7 @@ static int inherit_group(struct perf_event *parent_event, !perf_get_aux_event(child_ctr, leader)) return -EINVAL; } + leader->group_generation = parent_event->group_generation; return 0; } diff --git a/kernel/exit.c b/kernel/exit.c index 5bd90aca5f8e62b54f2a2021d78e969c84e87405..d788d34dc0aba8cf975b418a12e4f11eb6f97346 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -69,11 +69,58 @@ #include #include #include +#include #include #include #include +/* + * The default value should be high enough to not crash a system that randomly + * crashes its kernel from time to time, but low enough to at least not permit + * overflowing 32-bit refcounts or the ldsem writer count. + */ +static unsigned int oops_limit = 10000; + +#ifdef CONFIG_SYSCTL +static struct ctl_table kern_exit_table[] = { + { + .procname = "oops_limit", + .data = &oops_limit, + .maxlen = sizeof(oops_limit), + .mode = 0644, + .proc_handler = proc_douintvec, + }, + { } +}; + +static __init int kernel_exit_sysctls_init(void) +{ + register_sysctl_init("kernel", kern_exit_table); + return 0; +} +late_initcall(kernel_exit_sysctls_init); +#endif + +static atomic_t oops_count = ATOMIC_INIT(0); + +#ifdef CONFIG_SYSFS +static ssize_t oops_count_show(struct kobject *kobj, struct kobj_attribute *attr, + char *page) +{ + return sysfs_emit(page, "%d\n", atomic_read(&oops_count)); +} + +static struct kobj_attribute oops_count_attr = __ATTR_RO(oops_count); + +static __init int kernel_exit_sysfs_init(void) +{ + sysfs_add_file_to_group(kernel_kobj, &oops_count_attr.attr, NULL); + return 0; +} +late_initcall(kernel_exit_sysfs_init); +#endif + static void __unhash_process(struct task_struct *p, bool group_dead) { nr_threads--; @@ -873,6 +920,31 @@ void __noreturn do_exit(long code) } EXPORT_SYMBOL_GPL(do_exit); +void __noreturn make_task_dead(int signr) +{ + /* + * Take the task off the cpu after something catastrophic has + * happened. + */ + unsigned int limit; + + /* + * Every time the system oopses, if the oops happens while a reference + * to an object was held, the reference leaks. + * If the oops doesn't also leak memory, repeated oopsing can cause + * reference counters to wrap around (if they're not using refcount_t). + * This means that repeated oopsing can make unexploitable-looking bugs + * exploitable through repeated oopsing. + * To make sure this can't happen, place an upper bound on how often the + * kernel may oops without panic(). + */ + limit = READ_ONCE(oops_limit); + if (atomic_inc_return(&oops_count) >= limit && limit) + panic("Oopsed too often (kernel.oops_limit is %d)", limit); + + do_exit(signr); +} + void complete_and_exit(struct completion *comp, long code) { if (comp) diff --git a/kernel/fail_function.c b/kernel/fail_function.c index b0b1ad93fa957c246033f93f69cddfa15b5c0775..8f3795d8ac5b0172cbb53d1753a499eb0dad9424 100644 --- a/kernel/fail_function.c +++ b/kernel/fail_function.c @@ -163,10 +163,7 @@ static void fei_debugfs_add_attr(struct fei_attr *attr) static void fei_debugfs_remove_attr(struct fei_attr *attr) { - struct dentry *dir; - - dir = debugfs_lookup(attr->kp.symbol_name, fei_debugfs_dir); - debugfs_remove_recursive(dir); + debugfs_lookup_and_remove(attr->kp.symbol_name, fei_debugfs_dir); } static int fei_kprobe_handler(struct kprobe *kp, struct pt_regs *regs) diff --git a/kernel/fork.c b/kernel/fork.c index dfeadf1c07fe0249d70be57075cd0c2863fa0fae..23f9cbe60b97500b5b33a08c40f2050eb82aa8eb 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2749,7 +2749,7 @@ static bool clone3_args_valid(struct kernel_clone_args *kargs) * - make the CLONE_DETACHED bit reuseable for clone3 * - make the CSIGNAL bits reuseable for clone3 */ - if (kargs->flags & (CLONE_DETACHED | CSIGNAL)) + if (kargs->flags & (CLONE_DETACHED | (CSIGNAL & (~CLONE_NEWTIME)))) return false; if ((kargs->flags & (CLONE_SIGHAND | CLONE_CLEAR_SIGHAND)) == diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 621d8dd157bc14cb43e26e7622b444a0a7df392d..e7d284261d450f38909165ad9fc3cf48dfc0ef9e 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -811,7 +811,7 @@ void handle_edge_irq(struct irq_desc *desc) /* * When another irq arrived while we were handling * one, we could have masked the irq. - * Renable it, if it was not disabled in meantime. + * Reenable it, if it was not disabled in meantime. */ if (unlikely(desc->istate & IRQS_PENDING)) { if (!irqd_irq_disabled(&desc->irq_data) && diff --git a/kernel/irq/dummychip.c b/kernel/irq/dummychip.c index 0b0cdf206dc44d7151171934344c6d17af26dea1..7fe6cffe7d0df8b3c336a17528ff041812ee9d26 100644 --- a/kernel/irq/dummychip.c +++ b/kernel/irq/dummychip.c @@ -13,7 +13,7 @@ /* * What should we do if we get a hw irq event on an illegal vector? - * Each architecture has to answer this themself. + * Each architecture has to answer this themselves. */ static void ack_bad(struct irq_data *data) { diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 9b0914a063f90d14efc8a8f32b025be930b258c6..6c009a033c73fbfa010a96e8e9c2502c8cc8d5f4 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -31,7 +31,7 @@ static int __init irq_affinity_setup(char *str) cpulist_parse(str, irq_default_affinity); /* * Set at least the boot cpu. We don't want to end up with - * bugreports caused by random comandline masks + * bugreports caused by random commandline masks */ cpumask_set_cpu(smp_processor_id(), irq_default_affinity); return 1; diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index c6b419db68efc5595fb8c02abe3a67153ad7ec8b..fd3f7c16c299ae752d834844dcd6753410ca91b4 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -25,6 +25,9 @@ static DEFINE_MUTEX(irq_domain_mutex); static struct irq_domain *irq_default_domain; +static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base, + unsigned int nr_irqs, int node, void *arg, + bool realloc, const struct irq_affinity_desc *affinity); static void irq_domain_check_hierarchy(struct irq_domain *domain); struct irqchip_fwid { @@ -53,7 +56,7 @@ EXPORT_SYMBOL_GPL(irqchip_fwnode_ops); * @name: Optional user provided domain name * @pa: Optional user-provided physical address * - * Allocate a struct irqchip_fwid, and return a poiner to the embedded + * Allocate a struct irqchip_fwid, and return a pointer to the embedded * fwnode_handle (or NULL on failure). * * Note: The types IRQCHIP_FWNODE_NAMED and IRQCHIP_FWNODE_NAMED_ID are @@ -114,23 +117,12 @@ void irq_domain_free_fwnode(struct fwnode_handle *fwnode) } EXPORT_SYMBOL_GPL(irq_domain_free_fwnode); -/** - * __irq_domain_add() - Allocate a new irq_domain data structure - * @fwnode: firmware node for the interrupt controller - * @size: Size of linear map; 0 for radix mapping only - * @hwirq_max: Maximum number of interrupts supported by controller - * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no - * direct mapping - * @ops: domain callbacks - * @host_data: Controller private data pointer - * - * Allocates and initializes an irq_domain structure. - * Returns pointer to IRQ domain, or NULL on failure. - */ -struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size, - irq_hw_number_t hwirq_max, int direct_max, - const struct irq_domain_ops *ops, - void *host_data) +static struct irq_domain *__irq_domain_create(struct fwnode_handle *fwnode, + unsigned int size, + irq_hw_number_t hwirq_max, + int direct_max, + const struct irq_domain_ops *ops, + void *host_data) { struct irqchip_fwid *fwid; struct irq_domain *domain; @@ -207,12 +199,44 @@ struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size, domain->revmap_direct_max_irq = direct_max; irq_domain_check_hierarchy(domain); + return domain; +} + +static void __irq_domain_publish(struct irq_domain *domain) +{ mutex_lock(&irq_domain_mutex); debugfs_add_domain_dir(domain); list_add(&domain->link, &irq_domain_list); mutex_unlock(&irq_domain_mutex); pr_debug("Added domain %s\n", domain->name); +} + +/** + * __irq_domain_add() - Allocate a new irq_domain data structure + * @fwnode: firmware node for the interrupt controller + * @size: Size of linear map; 0 for radix mapping only + * @hwirq_max: Maximum number of interrupts supported by controller + * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no + * direct mapping + * @ops: domain callbacks + * @host_data: Controller private data pointer + * + * Allocates and initializes an irq_domain structure. + * Returns pointer to IRQ domain, or NULL on failure. + */ +struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int size, + irq_hw_number_t hwirq_max, int direct_max, + const struct irq_domain_ops *ops, + void *host_data) +{ + struct irq_domain *domain; + + domain = __irq_domain_create(fwnode, size, hwirq_max, direct_max, + ops, host_data); + if (domain) + __irq_domain_publish(domain); + return domain; } EXPORT_SYMBOL_GPL(__irq_domain_add); @@ -495,6 +519,9 @@ void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq) return; hwirq = irq_data->hwirq; + + mutex_lock(&irq_domain_mutex); + irq_set_status_flags(irq, IRQ_NOREQUEST); /* remove chip and handler */ @@ -514,10 +541,12 @@ void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq) /* Clear reverse map for this hwirq */ irq_domain_clear_mapping(domain, hwirq); + + mutex_unlock(&irq_domain_mutex); } -int irq_domain_associate(struct irq_domain *domain, unsigned int virq, - irq_hw_number_t hwirq) +static int irq_domain_associate_locked(struct irq_domain *domain, unsigned int virq, + irq_hw_number_t hwirq) { struct irq_data *irq_data = irq_get_irq_data(virq); int ret; @@ -530,7 +559,6 @@ int irq_domain_associate(struct irq_domain *domain, unsigned int virq, if (WARN(irq_data->domain, "error: virq%i is already associated", virq)) return -EINVAL; - mutex_lock(&irq_domain_mutex); irq_data->hwirq = hwirq; irq_data->domain = domain; if (domain->ops->map) { @@ -547,7 +575,6 @@ int irq_domain_associate(struct irq_domain *domain, unsigned int virq, } irq_data->domain = NULL; irq_data->hwirq = 0; - mutex_unlock(&irq_domain_mutex); return ret; } @@ -558,12 +585,23 @@ int irq_domain_associate(struct irq_domain *domain, unsigned int virq, domain->mapcount++; irq_domain_set_mapping(domain, hwirq, irq_data); - mutex_unlock(&irq_domain_mutex); irq_clear_status_flags(virq, IRQ_NOREQUEST); return 0; } + +int irq_domain_associate(struct irq_domain *domain, unsigned int virq, + irq_hw_number_t hwirq) +{ + int ret; + + mutex_lock(&irq_domain_mutex); + ret = irq_domain_associate_locked(domain, virq, hwirq); + mutex_unlock(&irq_domain_mutex); + + return ret; +} EXPORT_SYMBOL_GPL(irq_domain_associate); void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base, @@ -623,6 +661,34 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain) } EXPORT_SYMBOL_GPL(irq_create_direct_mapping); +static unsigned int irq_create_mapping_affinity_locked(struct irq_domain *domain, + irq_hw_number_t hwirq, + const struct irq_affinity_desc *affinity) +{ + struct device_node *of_node = irq_domain_get_of_node(domain); + int virq; + + pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq); + + /* Allocate a virtual interrupt number */ + virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node), + affinity); + if (virq <= 0) { + pr_debug("-> virq allocation failed\n"); + return 0; + } + + if (irq_domain_associate_locked(domain, virq, hwirq)) { + irq_free_desc(virq); + return 0; + } + + pr_debug("irq %lu on domain %s mapped to virtual irq %u\n", + hwirq, of_node_full_name(of_node), virq); + + return virq; +} + /** * irq_create_mapping_affinity() - Map a hardware interrupt into linux irq space * @domain: domain owning this hardware interrupt or NULL for default domain @@ -635,47 +701,31 @@ EXPORT_SYMBOL_GPL(irq_create_direct_mapping); * on the number returned from that call. */ unsigned int irq_create_mapping_affinity(struct irq_domain *domain, - irq_hw_number_t hwirq, - const struct irq_affinity_desc *affinity) + irq_hw_number_t hwirq, + const struct irq_affinity_desc *affinity) { - struct device_node *of_node; int virq; - pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq); - - /* Look for default domain if nececssary */ + /* Look for default domain if necessary */ if (domain == NULL) domain = irq_default_domain; if (domain == NULL) { WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq); return 0; } - pr_debug("-> using domain @%p\n", domain); - of_node = irq_domain_get_of_node(domain); + mutex_lock(&irq_domain_mutex); /* Check if mapping already exists */ virq = irq_find_mapping(domain, hwirq); if (virq) { - pr_debug("-> existing mapping on virq %d\n", virq); - return virq; - } - - /* Allocate a virtual interrupt number */ - virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node), - affinity); - if (virq <= 0) { - pr_debug("-> virq allocation failed\n"); - return 0; + pr_debug("existing mapping on virq %d\n", virq); + goto out; } - if (irq_domain_associate(domain, virq, hwirq)) { - irq_free_desc(virq); - return 0; - } - - pr_debug("irq %lu on domain %s mapped to virtual irq %u\n", - hwirq, of_node_full_name(of_node), virq); + virq = irq_create_mapping_affinity_locked(domain, hwirq, affinity); +out: + mutex_unlock(&irq_domain_mutex); return virq; } @@ -779,6 +829,8 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec) if (WARN_ON(type & ~IRQ_TYPE_SENSE_MASK)) type &= IRQ_TYPE_SENSE_MASK; + mutex_lock(&irq_domain_mutex); + /* * If we've already configured this interrupt, * don't do it again, or hell will break loose. @@ -791,7 +843,7 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec) * interrupt number. */ if (type == IRQ_TYPE_NONE || type == irq_get_trigger_type(virq)) - return virq; + goto out; /* * If the trigger type has not been set yet, then set @@ -799,40 +851,45 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec) */ if (irq_get_trigger_type(virq) == IRQ_TYPE_NONE) { irq_data = irq_get_irq_data(virq); - if (!irq_data) - return 0; + if (!irq_data) { + virq = 0; + goto out; + } irqd_set_trigger_type(irq_data, type); - return virq; + goto out; } pr_warn("type mismatch, failed to map hwirq-%lu for %s!\n", hwirq, of_node_full_name(to_of_node(fwspec->fwnode))); - return 0; + virq = 0; + goto out; } if (irq_domain_is_hierarchy(domain)) { - virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, fwspec); - if (virq <= 0) - return 0; + virq = irq_domain_alloc_irqs_locked(domain, -1, 1, NUMA_NO_NODE, + fwspec, false, NULL); + if (virq <= 0) { + virq = 0; + goto out; + } } else { /* Create mapping */ - virq = irq_create_mapping(domain, hwirq); + virq = irq_create_mapping_affinity_locked(domain, hwirq, NULL); if (!virq) - return virq; + goto out; } irq_data = irq_get_irq_data(virq); - if (!irq_data) { - if (irq_domain_is_hierarchy(domain)) - irq_domain_free_irqs(virq, 1); - else - irq_dispose_mapping(virq); - return 0; + if (WARN_ON(!irq_data)) { + virq = 0; + goto out; } /* Store trigger type */ irqd_set_trigger_type(irq_data, type); +out: + mutex_unlock(&irq_domain_mutex); return virq; } @@ -884,7 +941,7 @@ unsigned int irq_find_mapping(struct irq_domain *domain, { struct irq_data *data; - /* Look for default domain if nececssary */ + /* Look for default domain if necessary */ if (domain == NULL) domain = irq_default_domain; if (domain == NULL) @@ -1074,12 +1131,15 @@ struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent, struct irq_domain *domain; if (size) - domain = irq_domain_create_linear(fwnode, size, ops, host_data); + domain = __irq_domain_create(fwnode, size, size, 0, ops, host_data); else - domain = irq_domain_create_tree(fwnode, ops, host_data); + domain = __irq_domain_create(fwnode, 0, ~0, 0, ops, host_data); + if (domain) { domain->parent = parent; domain->flags |= flags; + + __irq_domain_publish(domain); } return domain; @@ -1396,40 +1456,12 @@ int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain, return domain->ops->alloc(domain, irq_base, nr_irqs, arg); } -/** - * __irq_domain_alloc_irqs - Allocate IRQs from domain - * @domain: domain to allocate from - * @irq_base: allocate specified IRQ number if irq_base >= 0 - * @nr_irqs: number of IRQs to allocate - * @node: NUMA node id for memory allocation - * @arg: domain specific argument - * @realloc: IRQ descriptors have already been allocated if true - * @affinity: Optional irq affinity mask for multiqueue devices - * - * Allocate IRQ numbers and initialized all data structures to support - * hierarchy IRQ domains. - * Parameter @realloc is mainly to support legacy IRQs. - * Returns error code or allocated IRQ number - * - * The whole process to setup an IRQ has been split into two steps. - * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ - * descriptor and required hardware resources. The second step, - * irq_domain_activate_irq(), is to program hardwares with preallocated - * resources. In this way, it's easier to rollback when failing to - * allocate resources. - */ -int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, - unsigned int nr_irqs, int node, void *arg, - bool realloc, const struct irq_affinity_desc *affinity) +static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base, + unsigned int nr_irqs, int node, void *arg, + bool realloc, const struct irq_affinity_desc *affinity) { int i, ret, virq; - if (domain == NULL) { - domain = irq_default_domain; - if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n")) - return -EINVAL; - } - if (realloc && irq_base >= 0) { virq = irq_base; } else { @@ -1448,24 +1480,18 @@ int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, goto out_free_desc; } - mutex_lock(&irq_domain_mutex); ret = irq_domain_alloc_irqs_hierarchy(domain, virq, nr_irqs, arg); - if (ret < 0) { - mutex_unlock(&irq_domain_mutex); + if (ret < 0) goto out_free_irq_data; - } for (i = 0; i < nr_irqs; i++) { ret = irq_domain_trim_hierarchy(virq + i); - if (ret) { - mutex_unlock(&irq_domain_mutex); + if (ret) goto out_free_irq_data; - } } - + for (i = 0; i < nr_irqs; i++) irq_domain_insert_irq(virq + i); - mutex_unlock(&irq_domain_mutex); return virq; @@ -1476,6 +1502,48 @@ int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, return ret; } +/** + * __irq_domain_alloc_irqs - Allocate IRQs from domain + * @domain: domain to allocate from + * @irq_base: allocate specified IRQ number if irq_base >= 0 + * @nr_irqs: number of IRQs to allocate + * @node: NUMA node id for memory allocation + * @arg: domain specific argument + * @realloc: IRQ descriptors have already been allocated if true + * @affinity: Optional irq affinity mask for multiqueue devices + * + * Allocate IRQ numbers and initialized all data structures to support + * hierarchy IRQ domains. + * Parameter @realloc is mainly to support legacy IRQs. + * Returns error code or allocated IRQ number + * + * The whole process to setup an IRQ has been split into two steps. + * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ + * descriptor and required hardware resources. The second step, + * irq_domain_activate_irq(), is to program the hardware with preallocated + * resources. In this way, it's easier to rollback when failing to + * allocate resources. + */ +int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, + unsigned int nr_irqs, int node, void *arg, + bool realloc, const struct irq_affinity_desc *affinity) +{ + int ret; + + if (domain == NULL) { + domain = irq_default_domain; + if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n")) + return -EINVAL; + } + + mutex_lock(&irq_domain_mutex); + ret = irq_domain_alloc_irqs_locked(domain, irq_base, nr_irqs, node, arg, + realloc, affinity); + mutex_unlock(&irq_domain_mutex); + + return ret; +} + /* The irq_data was moved, fix the revmap to refer to the new location */ static void irq_domain_fix_revmap(struct irq_data *d) { @@ -1833,6 +1901,13 @@ void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, irq_set_handler_data(virq, handler_data); } +static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base, + unsigned int nr_irqs, int node, void *arg, + bool realloc, const struct irq_affinity_desc *affinity) +{ + return -EINVAL; +} + static void irq_domain_check_hierarchy(struct irq_domain *domain) { } diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 437b073dc487ef7cf26baab988ff9c54567440ea..0159925054faa80df211e36c6d29b423eb359d37 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -341,7 +341,7 @@ static bool irq_set_affinity_deactivated(struct irq_data *data, * If the interrupt is not yet activated, just store the affinity * mask and do not call the chip driver at all. On activation the * driver has to make sure anyway that the interrupt is in a - * useable state so startup works. + * usable state so startup works. */ if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) || irqd_is_activated(data) || !irqd_affinity_on_activate(data)) @@ -999,7 +999,7 @@ static void irq_finalize_oneshot(struct irq_desc *desc, * to IRQS_INPROGRESS and the irq line is masked forever. * * This also serializes the state of shared oneshot handlers - * versus "desc->threads_onehsot |= action->thread_mask;" in + * versus "desc->threads_oneshot |= action->thread_mask;" in * irq_wake_thread(). See the comment there which explains the * serialization. */ @@ -1877,7 +1877,7 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id) /* Last action releases resources */ if (!desc->action) { /* - * Reaquire bus lock as irq_release_resources() might + * Reacquire bus lock as irq_release_resources() might * require it to deallocate resources over the slow bus. */ chip_bus_lock(desc); diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c index b47d95b68ac1a460a562bdc523c1c0417d30a82f..4457f3e966d0ef6386d3636116c952d56337a5ae 100644 --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c @@ -5,7 +5,7 @@ * * This file is licensed under GPLv2. * - * This file contains common code to support Message Signalled Interrupt for + * This file contains common code to support Message Signaled Interrupts for * PCI compatible and non PCI compatible devices. */ #include diff --git a/kernel/irq/timings.c b/kernel/irq/timings.c index 1f981162648a3d5c4e5e0ab735127b1376d743b5..00d45b6bd8f896bc4dd18e574115f3387458fb2f 100644 --- a/kernel/irq/timings.c +++ b/kernel/irq/timings.c @@ -490,7 +490,7 @@ static inline void irq_timings_store(int irq, struct irqt_stat *irqs, u64 ts) /* * The interrupt triggered more than one second apart, that - * ends the sequence as predictible for our purpose. In this + * ends the sequence as predictable for our purpose. In this * case, assume we have the beginning of a sequence and the * timestamp is the first value. As it is impossible to * predict anything at this point, return. diff --git a/kernel/kcsan/kcsan-test.c b/kernel/kcsan/kcsan-test.c index ebe7fd245104045da9913ab77ccb870fae4711c8..8a8ccaf4f38f30d478e0d3fb32812846e44254b3 100644 --- a/kernel/kcsan/kcsan-test.c +++ b/kernel/kcsan/kcsan-test.c @@ -149,7 +149,7 @@ static bool report_matches(const struct expect_report *r) const bool is_assert = (r->access[0].type | r->access[1].type) & KCSAN_ACCESS_ASSERT; bool ret = false; unsigned long flags; - typeof(observed.lines) expect; + typeof(*observed.lines) *expect; const char *end; char *cur; int i; @@ -158,6 +158,10 @@ static bool report_matches(const struct expect_report *r) if (!report_available()) return false; + expect = kmalloc(sizeof(observed.lines), GFP_KERNEL); + if (WARN_ON(!expect)) + return false; + /* Generate expected report contents. */ /* Title */ @@ -241,6 +245,7 @@ static bool report_matches(const struct expect_report *r) strstr(observed.lines[2], expect[1]))); out: spin_unlock_irqrestore(&observed.lock, flags); + kfree(expect); return ret; } diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c index d3bf87e6007ca7d555794606b6d56e90dbdaf363..069830f5a5d2417f8d41f0d478cf8a996b8502e4 100644 --- a/kernel/kcsan/report.c +++ b/kernel/kcsan/report.c @@ -630,8 +630,8 @@ void kcsan_report(const volatile void *ptr, size_t size, int access_type, bool reported = value_change != KCSAN_VALUE_CHANGE_FALSE && print_report(value_change, type, &ai, other_info); - if (reported && panic_on_warn) - panic("panic_on_warn set ...\n"); + if (reported) + check_panic_on_warn("KCSAN"); release_report(&flags, other_info); } diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 75150e75551809c24d8734e1e606c82cf0b9ab32..86d71c49b495711a09cc1db894ba17711fe4000e 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -447,8 +447,8 @@ static inline int kprobe_optready(struct kprobe *p) return 0; } -/* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */ -static inline int kprobe_disarmed(struct kprobe *p) +/* Return true if the kprobe is disarmed. Note: p must be on hash list */ +bool kprobe_disarmed(struct kprobe *p) { struct optimized_kprobe *op; @@ -652,7 +652,7 @@ void wait_for_kprobe_optimizer(void) mutex_unlock(&kprobe_mutex); } -static bool optprobe_queued_unopt(struct optimized_kprobe *op) +bool optprobe_queued_unopt(struct optimized_kprobe *op) { struct optimized_kprobe *_op; diff --git a/kernel/module.c b/kernel/module.c index a8f341a49a01eb44a5e57756a835577b1a444483..c13c621d1e8288568a2eeff96c61cb4749c1c8ac 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -3666,7 +3666,8 @@ static bool finished_loading(const char *name) sched_annotate_sleep(); mutex_lock(&module_mutex); mod = find_module_all(name, strlen(name), true); - ret = !mod || mod->state == MODULE_STATE_LIVE; + ret = !mod || mod->state == MODULE_STATE_LIVE + || mod->state == MODULE_STATE_GOING; mutex_unlock(&module_mutex); return ret; @@ -3832,20 +3833,35 @@ static int add_unformed_module(struct module *mod) mod->state = MODULE_STATE_UNFORMED; -again: mutex_lock(&module_mutex); old = find_module_all(mod->name, strlen(mod->name), true); if (old != NULL) { - if (old->state != MODULE_STATE_LIVE) { + if (old->state == MODULE_STATE_COMING + || old->state == MODULE_STATE_UNFORMED) { /* Wait in case it fails to load. */ mutex_unlock(&module_mutex); err = wait_event_interruptible(module_wq, finished_loading(mod->name)); if (err) goto out_unlocked; - goto again; + + /* The module might have gone in the meantime. */ + mutex_lock(&module_mutex); + old = find_module_all(mod->name, strlen(mod->name), + true); } - err = -EEXIST; + + /* + * We are here only when the same module was being loaded. Do + * not try to load it again right now. It prevents long delays + * caused by serialized module load failures. It might happen + * when more devices of the same type trigger load of + * a particular module. + */ + if (old && old->state == MODULE_STATE_LIVE) + err = -EEXIST; + else + err = -EBUSY; goto out; } mod_update_bounds(mod); diff --git a/kernel/panic.c b/kernel/panic.c index 332736a72a58e50666cc0bf14cb6ee542491ff2f..bc39e2b27d3152b996f19b7257b0b6a068935b1d 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #define PANIC_TIMER_STEP 100 @@ -41,7 +42,9 @@ * Should we dump all CPUs backtraces in an oops event? * Defaults to 0, can be changed via sysctl. */ -unsigned int __read_mostly sysctl_oops_all_cpu_backtrace; +static unsigned int __read_mostly sysctl_oops_all_cpu_backtrace; +#else +#define sysctl_oops_all_cpu_backtrace 0 #endif /* CONFIG_SMP */ int panic_on_oops = CONFIG_PANIC_ON_OOPS_VALUE; @@ -54,6 +57,7 @@ bool crash_kexec_post_notifiers; int panic_on_warn __read_mostly; unsigned long panic_on_taint; bool panic_on_taint_nousertaint = false; +static unsigned int warn_limit __read_mostly; int panic_timeout = CONFIG_PANIC_TIMEOUT; EXPORT_SYMBOL_GPL(panic_timeout); @@ -70,6 +74,56 @@ ATOMIC_NOTIFIER_HEAD(panic_notifier_list); EXPORT_SYMBOL(panic_notifier_list); +#ifdef CONFIG_SYSCTL +static struct ctl_table kern_panic_table[] = { +#ifdef CONFIG_SMP + { + .procname = "oops_all_cpu_backtrace", + .data = &sysctl_oops_all_cpu_backtrace, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, +#endif + { + .procname = "warn_limit", + .data = &warn_limit, + .maxlen = sizeof(warn_limit), + .mode = 0644, + .proc_handler = proc_douintvec, + }, + { } +}; + +static __init int kernel_panic_sysctls_init(void) +{ + register_sysctl_init("kernel", kern_panic_table); + return 0; +} +late_initcall(kernel_panic_sysctls_init); +#endif + +static atomic_t warn_count = ATOMIC_INIT(0); + +#ifdef CONFIG_SYSFS +static ssize_t warn_count_show(struct kobject *kobj, struct kobj_attribute *attr, + char *page) +{ + return sysfs_emit(page, "%d\n", atomic_read(&warn_count)); +} + +static struct kobj_attribute warn_count_attr = __ATTR_RO(warn_count); + +static __init int kernel_panic_sysfs_init(void) +{ + sysfs_add_file_to_group(kernel_kobj, &warn_count_attr.attr, NULL); + return 0; +} +late_initcall(kernel_panic_sysfs_init); +#endif + static long no_blink(int state) { return 0; @@ -166,6 +220,19 @@ static void panic_print_sys_info(void) ftrace_dump(DUMP_ALL); } +void check_panic_on_warn(const char *origin) +{ + unsigned int limit; + + if (panic_on_warn) + panic("%s: panic_on_warn set ...\n", origin); + + limit = READ_ONCE(warn_limit); + if (atomic_inc_return(&warn_count) >= limit && limit) + panic("%s: system warned too often (kernel.warn_limit is %d)", + origin, limit); +} + /** * panic - halt the system * @fmt: The text string to print @@ -183,6 +250,16 @@ void panic(const char *fmt, ...) int old_cpu, this_cpu; bool _crash_kexec_post_notifiers = crash_kexec_post_notifiers; + if (panic_on_warn) { + /* + * This thread may hit another WARN() in the panic path. + * Resetting this prevents additional WARN() from panicking the + * system on this thread. Other threads are blocked by the + * panic_mutex in panic(). + */ + panic_on_warn = 0; + } + /* * Disable local interrupts. This will prevent panic_smp_self_stop * from deadlocking the first cpu that invokes the panic, since @@ -594,16 +671,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint, if (regs) show_regs(regs); - if (panic_on_warn) { - /* - * This thread may hit another WARN() in the panic path. - * Resetting this prevents additional WARN() from panicking the - * system on this thread. Other threads are blocked by the - * panic_mutex in panic(). - */ - panic_on_warn = 0; - panic("panic_on_warn set ...\n"); - } + check_panic_on_warn("kernel"); if (!regs) dump_stack(); diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index 52c017feabcb3343ce96370868c489818b12990c..a7d25766fb1d837cdab0978270c141322f5b6a23 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c @@ -251,7 +251,24 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns) set_current_state(TASK_INTERRUPTIBLE); if (pid_ns->pid_allocated == init_pids) break; + /* + * Release tasks_rcu_exit_srcu to avoid following deadlock: + * + * 1) TASK A unshare(CLONE_NEWPID) + * 2) TASK A fork() twice -> TASK B (child reaper for new ns) + * and TASK C + * 3) TASK B exits, kills TASK C, waits for TASK A to reap it + * 4) TASK A calls synchronize_rcu_tasks() + * -> synchronize_srcu(tasks_rcu_exit_srcu) + * 5) *DEADLOCK* + * + * It is considered safe to release tasks_rcu_exit_srcu here + * because we assume the current task can not be concurrently + * reaped at this point. + */ + exit_tasks_rcu_stop(); schedule(); + exit_tasks_rcu_start(); } __set_current_state(TASK_RUNNING); diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c index 119b929dcff0f7c76eedbae99533d3e5939afa60..334173fe6940ebc61356ebb71e8459b1c9bbdb5c 100644 --- a/kernel/power/energy_model.c +++ b/kernel/power/energy_model.c @@ -72,10 +72,7 @@ static void em_debug_create_pd(struct device *dev) static void em_debug_remove_pd(struct device *dev) { - struct dentry *debug_dir; - - debug_dir = debugfs_lookup(dev_name(dev), rootdir); - debugfs_remove_recursive(debug_dir); + debugfs_lookup_and_remove(dev_name(dev), rootdir); } static int __init em_debug_init(void) diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 8b51e6a5b3869dcd63860d4b61e54267b21febae..c66d47685b28e40fca40c2e38fc83aa765fcebec 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -171,8 +171,9 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp) { /* Complain if the scheduler has not started. */ - WARN_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE, - "synchronize_rcu_tasks called too soon"); + if (WARN_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE, + "synchronize_%s() called too soon", rtp->name)) + return; /* Wait for the grace period. */ wait_rcu_gp(rtp->call_func); @@ -416,11 +417,21 @@ static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop) static void rcu_tasks_postscan(struct list_head *hop) { /* - * Wait for tasks that are in the process of exiting. This - * does only part of the job, ensuring that all tasks that were - * previously exiting reach the point where they have disabled - * preemption, allowing the later synchronize_rcu() to finish - * the job. + * Exiting tasks may escape the tasklist scan. Those are vulnerable + * until their final schedule() with TASK_DEAD state. To cope with + * this, divide the fragile exit path part in two intersecting + * read side critical sections: + * + * 1) An _SRCU_ read side starting before calling exit_notify(), + * which may remove the task from the tasklist, and ending after + * the final preempt_disable() call in do_exit(). + * + * 2) An _RCU_ read side starting with the final preempt_disable() + * call in do_exit() and ending with the final call to schedule() + * with TASK_DEAD state. + * + * This handles the part 1). And postgp will handle part 2) with a + * call to synchronize_rcu(). */ synchronize_srcu(&tasks_rcu_exit_srcu); } @@ -487,7 +498,10 @@ static void rcu_tasks_postgp(struct rcu_tasks *rtp) * * In addition, this synchronize_rcu() waits for exiting tasks * to complete their final preempt_disable() region of execution, - * cleaning up after the synchronize_srcu() above. + * cleaning up after synchronize_srcu(&tasks_rcu_exit_srcu), + * enforcing the whole region before tasklist removal until + * the final schedule() with TASK_DEAD state to be an RCU TASKS + * read side critical section. */ synchronize_rcu(); } @@ -576,28 +590,43 @@ static void show_rcu_tasks_classic_gp_kthread(void) } #endif /* #ifndef CONFIG_TINY_RCU */ -/* Do the srcu_read_lock() for the above synchronize_srcu(). */ +/* + * Contribute to protect against tasklist scan blind spot while the + * task is exiting and may be removed from the tasklist. See + * corresponding synchronize_srcu() for further details. + */ void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu) { - preempt_disable(); current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu); - preempt_enable(); } -/* Do the srcu_read_unlock() for the above synchronize_srcu(). */ -void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu) +/* + * Contribute to protect against tasklist scan blind spot while the + * task is exiting and may be removed from the tasklist. See + * corresponding synchronize_srcu() for further details. + */ +void exit_tasks_rcu_stop(void) __releases(&tasks_rcu_exit_srcu) { struct task_struct *t = current; - preempt_disable(); __srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx); - preempt_enable(); - exit_tasks_rcu_finish_trace(t); +} + +/* + * Contribute to protect against tasklist scan blind spot while the + * task is exiting and may be removed from the tasklist. See + * corresponding synchronize_srcu() for further details. + */ +void exit_tasks_rcu_finish(void) +{ + exit_tasks_rcu_stop(); + exit_tasks_rcu_finish_trace(current); } #else /* #ifdef CONFIG_TASKS_RCU */ static inline void show_rcu_tasks_classic_gp_kthread(void) { } void exit_tasks_rcu_start(void) { } +void exit_tasks_rcu_stop(void) { } void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); } #endif /* #else #ifdef CONFIG_TASKS_RCU */ @@ -620,9 +649,6 @@ static void rcu_tasks_be_rude(struct work_struct *work) // Wait for one rude RCU-tasks grace period. static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp) { - if (num_online_cpus() <= 1) - return; // Fastpath for only one CPU. - rtp->n_ipis += cpumask_weight(cpu_online_mask); schedule_on_each_cpu(rcu_tasks_be_rude); } diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 0dc16345e668c6ead06136c5a13047bfc4509ae0..ef6570137dcd5411a0f877d2c3c0906049c08d45 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -564,7 +564,9 @@ static void synchronize_rcu_expedited_wait(void) mask = leaf_node_cpu_bit(rnp, cpu); if (!(READ_ONCE(rnp->expmask) & mask)) continue; + preempt_disable(); // For smp_processor_id() in dump_cpu_task(). dump_cpu_task(cpu); + preempt_enable(); } } jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3; diff --git a/kernel/resource.c b/kernel/resource.c index 817545ff80b9b245ed1ab8cd9cc7d0d1cd3ae3d7..100253d4909c987da7a05846864775bf7108b8e6 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -1293,20 +1293,6 @@ void release_mem_region_adjustable(resource_size_t start, resource_size_t size) continue; } - /* - * All memory regions added from memory-hotplug path have the - * flag IORESOURCE_SYSTEM_RAM. If the resource does not have - * this flag, we know that we are dealing with a resource coming - * from HMM/devm. HMM/devm use another mechanism to add/release - * a resource. This goes via devm_request_mem_region and - * devm_release_mem_region. - * HMM/devm take care to release their resources when they want, - * so if we are dealing with them, let us just back off here. - */ - if (!(res->flags & IORESOURCE_SYSRAM)) { - break; - } - if (!(res->flags & IORESOURCE_MEM)) break; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e86758a8199d06eef2456ed1e39ae39bac69b3bd..d4ebc1d4e6394be0893aeb923147e5abfb4c7b9a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4467,8 +4467,7 @@ static noinline void __schedule_bug(struct task_struct *prev) pr_err("Preemption disabled at:"); print_ip_sym(KERN_ERR, preempt_disable_ip); } - if (panic_on_warn) - panic("scheduling while atomic\n"); + check_panic_on_warn("scheduling while atomic"); dump_stack(); add_taint(TAINT_WARN, LOCKDEP_STILL_OK); diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 8b8481a4a88e777958d16b9a74e2fc58f5fd5483..c2ab1f685a6dd3a972d4973563e10f34dd3471ea 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1850,8 +1850,7 @@ static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first) deadline_queue_push_tasks(rq); } -static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq, - struct dl_rq *dl_rq) +static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq) { struct rb_node *left = rb_first_cached(&dl_rq->root); @@ -1870,7 +1869,7 @@ static struct task_struct *pick_next_task_dl(struct rq *rq) if (!sched_dl_runnable(rq)) return NULL; - dl_se = pick_next_dl_entity(rq, dl_rq); + dl_se = pick_next_dl_entity(dl_rq); BUG_ON(!dl_se); p = dl_task_of(dl_se); set_next_task_dl(rq, p, true); diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c index b7f38f3ad42a25fcfddae7a7e900df0d622c0c7c..debaeb07ae533137baee8f848fa279c81ff4046f 100644 --- a/kernel/sched/psi.c +++ b/kernel/sched/psi.c @@ -1158,10 +1158,11 @@ void psi_trigger_destroy(struct psi_trigger *t) group = t->group; /* - * Wakeup waiters to stop polling. Can happen if cgroup is deleted - * from under a polling process. + * Wakeup waiters to stop polling and clear the queue to prevent it from + * being accessed later. Can happen if cgroup is deleted from under a + * polling process. */ - wake_up_interruptible(&t->event_wait); + wake_up_pollfree(&t->event_wait); mutex_lock(&group->trigger_lock); diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 886ec07122765b20ba5d747d37caca3502773e03..0f296598a24ef2f7639c2453132b488723618955 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1631,8 +1631,7 @@ static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool f rt_queue_push_tasks(rq); } -static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq, - struct rt_rq *rt_rq) +static struct sched_rt_entity *pick_next_rt_entity(struct rt_rq *rt_rq) { struct rt_prio_array *array = &rt_rq->active; struct sched_rt_entity *next = NULL; @@ -1643,6 +1642,8 @@ static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq, BUG_ON(idx >= MAX_RT_PRIO); queue = array->queue + idx; + if (SCHED_WARN_ON(list_empty(queue))) + return NULL; next = list_entry(queue->next, struct sched_rt_entity, run_list); return next; @@ -1654,8 +1655,9 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq) struct rt_rq *rt_rq = &rq->rt; do { - rt_se = pick_next_rt_entity(rq, rt_rq); - BUG_ON(!rt_se); + rt_se = pick_next_rt_entity(rt_rq); + if (unlikely(!rt_se)) + return NULL; rt_rq = group_rt_rq(rt_se); } while (rt_rq); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 03b82a433c6f011b56fa1b0a42a4d801fd503514..21ba7f28c149c82b27cf35fed01e466420f41b0c 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2251,17 +2251,6 @@ static struct ctl_table kern_table[] = { .proc_handler = proc_dointvec, }, #endif -#ifdef CONFIG_SMP - { - .procname = "oops_all_cpu_backtrace", - .data = &sysctl_oops_all_cpu_backtrace, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = SYSCTL_ZERO, - .extra2 = SYSCTL_ONE, - }, -#endif /* CONFIG_SMP */ { .procname = "pid_max", .data = &pid_max, diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index daeaa7140d0aaa2bbca2370ca1b8f7a243b878c3..1de426d3f694c0532fe1fcf2edd27089278f5fef 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c @@ -470,11 +470,35 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval) } EXPORT_SYMBOL_GPL(alarm_forward); -u64 alarm_forward_now(struct alarm *alarm, ktime_t interval) +static u64 __alarm_forward_now(struct alarm *alarm, ktime_t interval, bool throttle) { struct alarm_base *base = &alarm_bases[alarm->type]; + ktime_t now = base->get_ktime(); + + if (IS_ENABLED(CONFIG_HIGH_RES_TIMERS) && throttle) { + /* + * Same issue as with posix_timer_fn(). Timers which are + * periodic but the signal is ignored can starve the system + * with a very small interval. The real fix which was + * promised in the context of posix_timer_fn() never + * materialized, but someone should really work on it. + * + * To prevent DOS fake @now to be 1 jiffie out which keeps + * the overrun accounting correct but creates an + * inconsistency vs. timer_gettime(2). + */ + ktime_t kj = NSEC_PER_SEC / HZ; + + if (interval < kj) + now = ktime_add(now, kj); + } + + return alarm_forward(alarm, now, interval); +} - return alarm_forward(alarm, base->get_ktime(), interval); +u64 alarm_forward_now(struct alarm *alarm, ktime_t interval) +{ + return __alarm_forward_now(alarm, interval, false); } EXPORT_SYMBOL_GPL(alarm_forward_now); @@ -548,9 +572,10 @@ static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm, if (posix_timer_event(ptr, si_private) && ptr->it_interval) { /* * Handle ignored signals and rearm the timer. This will go - * away once we handle ignored signals proper. + * away once we handle ignored signals proper. Ensure that + * small intervals cannot starve the system. */ - ptr->it_overrun += alarm_forward_now(alarm, ptr->it_interval); + ptr->it_overrun += __alarm_forward_now(alarm, ptr->it_interval, true); ++ptr->it_requeue_pending; ptr->it_active = 1; result = ALARMTIMER_RESTART; diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index e34ceb91f4c5a60bc7474d0b80281668e40c627e..86e0fbe583f2b0f65d89d6dc95a7d4835c8611e1 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -312,6 +312,15 @@ static void clocksource_verify_percpu(struct clocksource *cs) testcpu, cs_nsec_min, cs_nsec_max, cs->name); } +static inline void clocksource_reset_watchdog(void) +{ + struct clocksource *cs; + + list_for_each_entry(cs, &watchdog_list, wd_list) + cs->flags &= ~CLOCK_SOURCE_WATCHDOG; +} + + static void clocksource_watchdog(struct timer_list *unused) { u64 csnow, wdnow, cslast, wdlast, delta; @@ -319,6 +328,7 @@ static void clocksource_watchdog(struct timer_list *unused) int64_t wd_nsec, cs_nsec; struct clocksource *cs; enum wd_read_status read_ret; + unsigned long extra_wait = 0; u32 md; spin_lock(&watchdog_lock); @@ -338,13 +348,30 @@ static void clocksource_watchdog(struct timer_list *unused) read_ret = cs_watchdog_read(cs, &csnow, &wdnow); - if (read_ret != WD_READ_SUCCESS) { - if (read_ret == WD_READ_UNSTABLE) - /* Clock readout unreliable, so give it up. */ - __clocksource_unstable(cs); + if (read_ret == WD_READ_UNSTABLE) { + /* Clock readout unreliable, so give it up. */ + __clocksource_unstable(cs); continue; } + /* + * When WD_READ_SKIP is returned, it means the system is likely + * under very heavy load, where the latency of reading + * watchdog/clocksource is very big, and affect the accuracy of + * watchdog check. So give system some space and suspend the + * watchdog check for 5 minutes. + */ + if (read_ret == WD_READ_SKIP) { + /* + * As the watchdog timer will be suspended, and + * cs->last could keep unchanged for 5 minutes, reset + * the counters. + */ + clocksource_reset_watchdog(); + extra_wait = HZ * 300; + break; + } + /* Clocksource initialized ? */ if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) || atomic_read(&watchdog_reset_pending)) { @@ -434,7 +461,7 @@ static void clocksource_watchdog(struct timer_list *unused) * pair clocksource_stop_watchdog() clocksource_start_watchdog(). */ if (!timer_pending(&watchdog_timer)) { - watchdog_timer.expires += WATCHDOG_INTERVAL; + watchdog_timer.expires += WATCHDOG_INTERVAL + extra_wait; add_timer_on(&watchdog_timer, next_cpu); } out: @@ -459,14 +486,6 @@ static inline void clocksource_stop_watchdog(void) watchdog_running = 0; } -static inline void clocksource_reset_watchdog(void) -{ - struct clocksource *cs; - - list_for_each_entry(cs, &watchdog_list, wd_list) - cs->flags &= ~CLOCK_SOURCE_WATCHDOG; -} - static void clocksource_resume_watchdog(void) { atomic_inc(&watchdog_reset_pending); diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 0c5a4615a8f0ce5d071f7d021a4810ff2a2b9441..e5633daf03e4e5ce0c6120525e6aee49adc02aa7 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -2050,6 +2050,7 @@ SYSCALL_DEFINE2(nanosleep, struct __kernel_timespec __user *, rqtp, if (!timespec64_valid(&tu)) return -EINVAL; + current->restart_block.fn = do_no_restart_syscall; current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE; current->restart_block.nanosleep.rmtp = rmtp; return hrtimer_nanosleep(timespec64_to_ktime(tu), HRTIMER_MODE_REL, @@ -2071,6 +2072,7 @@ SYSCALL_DEFINE2(nanosleep_time32, struct old_timespec32 __user *, rqtp, if (!timespec64_valid(&tu)) return -EINVAL; + current->restart_block.fn = do_no_restart_syscall; current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE; current->restart_block.nanosleep.compat_rmtp = rmtp; return hrtimer_nanosleep(timespec64_to_ktime(tu), HRTIMER_MODE_REL, diff --git a/kernel/time/posix-stubs.c b/kernel/time/posix-stubs.c index fcb3b21d8bdcd444f32165f98dd9fe078b3d2446..3783d07d60ba038736e6566eecc9d72b00fe6487 100644 --- a/kernel/time/posix-stubs.c +++ b/kernel/time/posix-stubs.c @@ -146,6 +146,7 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags, return -EINVAL; if (flags & TIMER_ABSTIME) rmtp = NULL; + current->restart_block.fn = do_no_restart_syscall; current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE; current->restart_block.nanosleep.rmtp = rmtp; texp = timespec64_to_ktime(t); @@ -239,6 +240,7 @@ SYSCALL_DEFINE4(clock_nanosleep_time32, clockid_t, which_clock, int, flags, return -EINVAL; if (flags & TIMER_ABSTIME) rmtp = NULL; + current->restart_block.fn = do_no_restart_syscall; current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE; current->restart_block.nanosleep.compat_rmtp = rmtp; texp = timespec64_to_ktime(t); diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c index c2a02f2bb6230803c189c95da4ec56470deb86ae..5bbfb9b298cbcfd3d81c22045145b03bd2ff1ab7 100644 --- a/kernel/time/posix-timers.c +++ b/kernel/time/posix-timers.c @@ -1270,6 +1270,7 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags, return -EINVAL; if (flags & TIMER_ABSTIME) rmtp = NULL; + current->restart_block.fn = do_no_restart_syscall; current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE; current->restart_block.nanosleep.rmtp = rmtp; @@ -1297,6 +1298,7 @@ SYSCALL_DEFINE4(clock_nanosleep_time32, clockid_t, which_clock, int, flags, return -EINVAL; if (flags & TIMER_ABSTIME) rmtp = NULL; + current->restart_block.fn = do_no_restart_syscall; current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE; current->restart_block.nanosleep.compat_rmtp = rmtp; diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 22dc4faf776f2f7ab3ec15278c0db4ab82c77714..973af1a5cfbbc5b1db86c0e20e93552cc74229a4 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1055,6 +1055,7 @@ static void do_bpf_send_signal(struct irq_work *entry) work = container_of(entry, struct send_signal_irq_work, irq_work); group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type); + put_task_struct(work->task); } static int bpf_send_signal_common(u32 sig, enum pid_type type) @@ -1072,6 +1073,9 @@ static int bpf_send_signal_common(u32 sig, enum pid_type type) return -EPERM; if (unlikely(!nmi_uaccess_okay())) return -EPERM; + /* Task should not be pid=1 to avoid kernel panic. */ + if (unlikely(is_global_init(current))) + return -EPERM; if (irqs_disabled()) { /* Do an early check on signal validity. Otherwise, @@ -1088,7 +1092,7 @@ static int bpf_send_signal_common(u32 sig, enum pid_type type) * to the irq_work. The current task may change when queued * irq works get executed. */ - work->task = current; + work->task = get_task_struct(current); work->sig = sig; work->type = type; irq_work_queue(&work->irq_work); diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 49ebb8c6626828d8d3fd91f7124b9ea06d9821ff..70da6f3212bc4121d4d711386504adeed9909abb 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -1449,19 +1449,6 @@ static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, return 0; } -/** - * rb_check_list - make sure a pointer to a list has the last bits zero - */ -static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer, - struct list_head *list) -{ - if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev)) - return 1; - if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next)) - return 1; - return 0; -} - /** * rb_check_pages - integrity check of buffer pages * @cpu_buffer: CPU buffer with pages to test @@ -1471,36 +1458,27 @@ static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer, */ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) { - struct list_head *head = cpu_buffer->pages; - struct buffer_page *bpage, *tmp; - - /* Reset the head page if it exists */ - if (cpu_buffer->head_page) - rb_set_head_page(cpu_buffer); - - rb_head_page_deactivate(cpu_buffer); + struct list_head *head = rb_list_head(cpu_buffer->pages); + struct list_head *tmp; - if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) - return -1; - if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) + if (RB_WARN_ON(cpu_buffer, + rb_list_head(rb_list_head(head->next)->prev) != head)) return -1; - if (rb_check_list(cpu_buffer, head)) + if (RB_WARN_ON(cpu_buffer, + rb_list_head(rb_list_head(head->prev)->next) != head)) return -1; - list_for_each_entry_safe(bpage, tmp, head, list) { + for (tmp = rb_list_head(head->next); tmp != head; tmp = rb_list_head(tmp->next)) { if (RB_WARN_ON(cpu_buffer, - bpage->list.next->prev != &bpage->list)) + rb_list_head(rb_list_head(tmp->next)->prev) != tmp)) return -1; + if (RB_WARN_ON(cpu_buffer, - bpage->list.prev->next != &bpage->list)) - return -1; - if (rb_check_list(cpu_buffer, &bpage->list)) + rb_list_head(rb_list_head(tmp->prev)->next) != tmp)) return -1; } - rb_head_page_activate(cpu_buffer); - return 0; } @@ -5324,11 +5302,16 @@ EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page); */ void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data) { - struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; + struct ring_buffer_per_cpu *cpu_buffer; struct buffer_data_page *bpage = data; struct page *page = virt_to_page(bpage); unsigned long flags; + if (!buffer || !buffer->buffers || !buffer->buffers[cpu]) + return; + + cpu_buffer = buffer->buffers[cpu]; + /* If the page is still in use someplace else, we can't reuse it */ if (page_ref_count(page) > 1) goto out; diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index c7c92b0eed04819b34c15c2edf83ab51fecdcf87..8637eab2986eeb949138c68c30aa2575a24b1363 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -8569,9 +8569,6 @@ buffer_percent_write(struct file *filp, const char __user *ubuf, if (val > 100) return -EINVAL; - if (!val) - val = 1; - tr->buffer_percent = val; (*ppos)++; @@ -9680,6 +9677,8 @@ void __init early_trace_init(void) static_key_enable(&tracepoint_printk_key.key); } tracer_alloc_buffers(); + + init_events(); } void __init trace_init(void) diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 8d67f7f44840048ab875ef8e3eebb4acebe1c987..37f616bf5fa93b5af703c013494016e4550e6f84 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -1673,6 +1673,7 @@ extern void trace_event_enable_cmd_record(bool enable); extern void trace_event_enable_tgid_record(bool enable); extern int event_trace_init(void); +extern int init_events(void); extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr); extern int event_trace_del_tracer(struct trace_array *tr); extern void __trace_early_add_events(struct trace_array *tr); diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 0ae3e4454ff2c4993489e779026492ef0437d823..ccc99cd23f3c4e845c4e13def5827a46d6dcc493 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -1646,6 +1646,8 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data, unsigned long fl = flags & ~HIST_FIELD_FL_LOG2; hist_field->fn = hist_field_log2; hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL); + if (!hist_field->operands[0]) + goto free; hist_field->size = hist_field->operands[0]->size; hist_field->type = kstrdup(hist_field->operands[0]->type, GFP_KERNEL); if (!hist_field->type) diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 000e9dc224c61376643cf9c400fb01f7ce39631d..b3ee8d9b6b62aeb54f9fb4474acfb58da1772327 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -1378,7 +1378,7 @@ static struct trace_event *events[] __initdata = { NULL }; -__init static int init_events(void) +__init int init_events(void) { struct trace_event *event; int i, ret; @@ -1396,4 +1396,3 @@ __init static int init_events(void) return 0; } -early_initcall(init_events); diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c index d29731a30b8e13729e93056aef235fc45bcc155f..73717917d81644c942b4e28ac06b00ead0b6f4ca 100644 --- a/kernel/watch_queue.c +++ b/kernel/watch_queue.c @@ -274,6 +274,7 @@ long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes) if (ret < 0) goto error; + ret = -ENOMEM; pages = kcalloc(sizeof(struct page *), nr_pages, GFP_KERNEL); if (!pages) goto error; diff --git a/lib/errname.c b/lib/errname.c index 0c4d3e66170e927bed84e7bc016983d328a709d5..a5799a8c9cab9403513e73aeca8a02021a8c9e22 100644 --- a/lib/errname.c +++ b/lib/errname.c @@ -20,6 +20,7 @@ static const char *names_0[] = { E(EADDRNOTAVAIL), E(EADV), E(EAFNOSUPPORT), + E(EAGAIN), /* EWOULDBLOCK */ E(EALREADY), E(EBADE), E(EBADF), @@ -30,15 +31,17 @@ static const char *names_0[] = { E(EBADSLT), E(EBFONT), E(EBUSY), -#ifdef ECANCELLED - E(ECANCELLED), -#endif + E(ECANCELED), /* ECANCELLED */ E(ECHILD), E(ECHRNG), E(ECOMM), E(ECONNABORTED), + E(ECONNREFUSED), /* EREFUSED */ E(ECONNRESET), + E(EDEADLK), /* EDEADLOCK */ +#if EDEADLK != EDEADLOCK /* mips, sparc, powerpc */ E(EDEADLOCK), +#endif E(EDESTADDRREQ), E(EDOM), E(EDOTDOT), @@ -165,14 +168,17 @@ static const char *names_0[] = { E(EUSERS), E(EXDEV), E(EXFULL), - - E(ECANCELED), /* ECANCELLED */ - E(EAGAIN), /* EWOULDBLOCK */ - E(ECONNREFUSED), /* EREFUSED */ - E(EDEADLK), /* EDEADLOCK */ }; #undef E +#ifdef EREFUSED /* parisc */ +static_assert(EREFUSED == ECONNREFUSED); +#endif +#ifdef ECANCELLED /* parisc */ +static_assert(ECANCELLED == ECANCELED); +#endif +static_assert(EAGAIN == EWOULDBLOCK); /* everywhere */ + #define E(err) [err - 512 + BUILD_BUG_ON_ZERO(err < 512 || err > 550)] = "-" #err static const char *names_512[] = { E(ERESTARTSYS), diff --git a/lib/lockref.c b/lib/lockref.c index 5b34bbd3eba818563db89437eb267782e8c3a6c9..81ac5f3552428ce9c74c1ce9dafaa729295e9319 100644 --- a/lib/lockref.c +++ b/lib/lockref.c @@ -24,7 +24,6 @@ } \ if (!--retry) \ break; \ - cpu_relax(); \ } \ } while (0) diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c index 7ea225b2204fa511c5767a5a2979e7285103d3ea..7054311d787920d9f13b0b7fb79876abfdb48dff 100644 --- a/lib/mpi/mpicoder.c +++ b/lib/mpi/mpicoder.c @@ -504,7 +504,8 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes) while (sg_miter_next(&miter)) { buff = miter.addr; - len = miter.length; + len = min_t(unsigned, miter.length, nbytes); + nbytes -= len; for (x = 0; x < len; x++) { a <<= 8; diff --git a/lib/nlattr.c b/lib/nlattr.c index fe60f9ae9db17ce87aa29a15b5a5969341bcfd83..aa8fc4371e9305b79d91805df7d15a19a956b1b1 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -369,6 +370,7 @@ static int validate_nla(const struct nlattr *nla, int maxtype, if (type <= 0 || type > maxtype) return 0; + type = array_index_nospec(type, maxtype + 1); pt = &policy[type]; BUG_ON(pt->type > NLA_TYPE_MAX); @@ -584,6 +586,7 @@ static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype, } continue; } + type = array_index_nospec(type, maxtype + 1); if (policy) { int err = validate_nla(nla, maxtype, policy, validate, extack, depth); diff --git a/lib/ubsan.c b/lib/ubsan.c index adf8dcf3c84e6660fbcbe3773cdb20b1305fcf12..ee14c46cac8975a702aa11725e91676d99dcf512 100644 --- a/lib/ubsan.c +++ b/lib/ubsan.c @@ -151,16 +151,7 @@ static void ubsan_epilogue(void) current->in_ubsan--; - if (panic_on_warn) { - /* - * This thread may hit another WARN() in the panic path. - * Resetting this prevents additional WARN() from panicking the - * system on this thread. Other threads are blocked by the - * panic_mutex in panic(). - */ - panic_on_warn = 0; - panic("panic_on_warn set ...\n"); - } + check_panic_on_warn("UBSAN"); } static void handle_overflow(struct overflow_data *data, void *lhs, diff --git a/mm/gup.c b/mm/gup.c index 6d5e4fd55d320a13074c5b994ef5b081e66e1981..11307a8b20d5847abc97191c21520c86585fc433 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1627,7 +1627,7 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm, */ if (is_migrate_cma_page(head)) { if (PageHuge(head)) { - if (!isolate_huge_page(head, &cma_page_list)) + if (isolate_hugetlb(head, &cma_page_list)) isolation_error_count++; } else { if (!PageLRU(head) && drain_allow) { diff --git a/mm/huge_memory.c b/mm/huge_memory.c index cb7b0aead7096d6c7440ad06705d5c571dcb1e71..9b15760e0541a5c082ccd0724cd8157b3d55ce73 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2803,6 +2803,9 @@ void deferred_split_huge_page(struct page *page) if (PageSwapCache(page)) return; + if (!list_empty(page_deferred_list(page))) + return; + spin_lock_irqsave(&ds_queue->split_queue_lock, flags); if (list_empty(page_deferred_list(page))) { count_vm_event(THP_DEFERRED_SPLIT_PAGE); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 972f7ea3625b077a043e900fb2d77916843e62ff..feac38090a9ecf7ddb9999207d9a4b883ebf6f0a 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5674,14 +5674,14 @@ follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int fla return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT); } -bool isolate_huge_page(struct page *page, struct list_head *list) +int isolate_hugetlb(struct page *page, struct list_head *list) { - bool ret = true; + int ret = 0; spin_lock(&hugetlb_lock); if (!PageHeadHuge(page) || !page_huge_active(page) || !get_page_unless_zero(page)) { - ret = false; + ret = -EBUSY; goto unlock; } clear_page_huge_active(page); diff --git a/mm/kasan/report.c b/mm/kasan/report.c index 00a53f1355aec6bdd4985347f2d08a8021c7f984..2f5e96ac4d00817fac2dadb725e93e630158704e 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -95,16 +95,8 @@ static void end_report(unsigned long *flags) pr_err("==================================================================\n"); add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); spin_unlock_irqrestore(&report_lock, *flags); - if (panic_on_warn && !test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags)) { - /* - * This thread may hit another WARN() in the panic path. - * Resetting this prevents additional WARN() from panicking the - * system on this thread. Other threads are blocked by the - * panic_mutex in panic(). - */ - panic_on_warn = 0; - panic("panic_on_warn set ...\n"); - } + if (!test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags)) + check_panic_on_warn("KASAN"); kasan_enable_current(); } diff --git a/mm/memblock.c b/mm/memblock.c index f6a4dffb9a88843f02b8470f51eac9ee70541720..f72d539570339743eabc3f437aa73a7c92dcbcf5 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -1597,13 +1597,7 @@ void __init __memblock_free_late(phys_addr_t base, phys_addr_t size) end = PFN_DOWN(base + size); for (; cursor < end; cursor++) { - /* - * Reserved pages are always initialized by the end of - * memblock_free_all() (by memmap_init() and, if deferred - * initialization is enabled, memmap_init_reserved_pages()), so - * these pages can be released directly to the buddy allocator. - */ - __free_pages_core(pfn_to_page(cursor), 0); + memblock_free_pages(pfn_to_page(cursor), cursor, 0); totalram_pages_inc(); } } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f4a6aeb0a9a29d619dd60db6b087d77e45975acd..6a7b65ec206943be1735b2788505adfa8539251a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3959,6 +3959,10 @@ static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, { struct mem_cgroup *memcg = mem_cgroup_from_css(css); + pr_warn_once("Cgroup memory moving (move_charge_at_immigrate) is deprecated. " + "Please report your usecase to linux-mm@kvack.org if you " + "depend on this functionality.\n"); + if (val & ~MOVE_MASK) return -EINVAL; diff --git a/mm/memory-failure.c b/mm/memory-failure.c index b7cd29a26c1bf822e14c428aac037ecc0e179e1c..3aa82f2fe33c0bd137fb049ea9d87b60a53730c8 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1773,7 +1773,7 @@ static bool isolate_page(struct page *page, struct list_head *pagelist) bool lru = PageLRU(page); if (PageHuge(page)) { - isolated = isolate_huge_page(page, pagelist); + isolated = !isolate_hugetlb(page, pagelist); } else { if (lru) isolated = !isolate_lru_page(page); diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 5da1c0299456b4a77b240fb8f7eef22c24422d0f..c6c9ee2bbe7664b20ff92823235b9dabb31fd531 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1292,7 +1292,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) if (PageHuge(page)) { pfn = page_to_pfn(head) + compound_nr(head) - 1; - isolate_huge_page(head, &source); + isolate_hugetlb(head, &source); continue; } else if (PageTransHuge(page)) pfn = page_to_pfn(head) + thp_nr_pages(page) - 1; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 3f3677f9699cae9302f1640181be1dff9d37b022..eba59c9908975552307f9f0431753953253e92ad 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -622,8 +622,9 @@ static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask, /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */ if (flags & (MPOL_MF_MOVE_ALL) || - (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) { - if (!isolate_huge_page(page, qp->pagelist) && + (flags & MPOL_MF_MOVE && page_mapcount(page) == 1 && + !hugetlb_pmd_shared(pte))) { + if (isolate_hugetlb(page, qp->pagelist) && (flags & MPOL_MF_STRICT)) /* * Failed to isolate page but allow migrating pages diff --git a/mm/migrate.c b/mm/migrate.c index 7ed4eb406d1240f12d7470f1cea94f277ecec1ed..8c66ea3e282f936941abdd1a9e984806ca32099b 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -164,7 +164,7 @@ void putback_movable_page(struct page *page) * * This function shall be used whenever the isolated pageset has been * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range() - * and isolate_huge_page(). + * and isolate_hugetlb(). */ void putback_movable_pages(struct list_head *l) { @@ -1671,8 +1671,9 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, if (PageHuge(page)) { if (PageHead(page)) { - isolate_huge_page(page, pagelist); - err = 1; + err = isolate_hugetlb(page, pagelist); + if (!err) + err = 1; } } else { struct page *head; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 468e2061082010a4ad80185d5ccb18558efd99ae..fefcaf0180a70704a503394987e8285fbddf51ed 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5106,9 +5106,12 @@ static inline void free_the_page(struct page *page, unsigned int order) void __free_pages(struct page *page, unsigned int order) { + /* get PageHead before we drop reference */ + int head = PageHead(page); + if (put_page_testzero(page)) free_the_page(page, order); - else if (!PageHead(page)) + else if (!head) while (order-- > 0) free_the_page(page + (1 << order), order); } diff --git a/mm/swapfile.c b/mm/swapfile.c index 181cfc1b129683e7a46a597b8953df4f32185694..43e659eb8d54a9e53ea04c57a9e29f734c7e5609 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1105,6 +1105,7 @@ int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size) goto check_out; pr_debug("scan_swap_map of si %d failed to find offset\n", si->type); + cond_resched(); spin_lock(&swap_avail_lock); nextsi: diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c index 2885ff9c76f07054a2e6317db45fa40b3423dc5e..7217bd9886e36d929418f382b5dbeb111e4dfc51 100644 --- a/net/9p/trans_rdma.c +++ b/net/9p/trans_rdma.c @@ -386,6 +386,7 @@ post_recv(struct p9_client *client, struct p9_rdma_context *c) struct p9_trans_rdma *rdma = client->trans; struct ib_recv_wr wr; struct ib_sge sge; + int ret; c->busa = ib_dma_map_single(rdma->cm_id->device, c->rc.sdata, client->msize, @@ -403,7 +404,12 @@ post_recv(struct p9_client *client, struct p9_rdma_context *c) wr.wr_cqe = &c->cqe; wr.sg_list = &sge; wr.num_sge = 1; - return ib_post_recv(rdma->qp, &wr, NULL); + + ret = ib_post_recv(rdma->qp, &wr, NULL); + if (ret) + ib_dma_unmap_single(rdma->cm_id->device, c->busa, + client->msize, DMA_FROM_DEVICE); + return ret; error: p9_debug(P9_DEBUG_ERROR, "EIO\n"); @@ -500,7 +506,7 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req) if (down_interruptible(&rdma->sq_sem)) { err = -EINTR; - goto send_error; + goto dma_unmap; } /* Mark request as `sent' *before* we actually send it, @@ -510,11 +516,14 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req) req->status = REQ_STATUS_SENT; err = ib_post_send(rdma->qp, &wr, NULL); if (err) - goto send_error; + goto dma_unmap; /* Success */ return 0; +dma_unmap: + ib_dma_unmap_single(rdma->cm_id->device, c->busa, + c->req->tc.size, DMA_TO_DEVICE); /* Handle errors that happened during or while preparing the send: */ send_error: req->status = REQ_STATUS_ERROR; diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c index c9ed9d80e692bfca522306d83bad2f40f30cc471..da056170849bff1dfa321b85b74f842a4e235f52 100644 --- a/net/9p/trans_xen.c +++ b/net/9p/trans_xen.c @@ -397,19 +397,24 @@ static int xen_9pfs_front_alloc_dataring(struct xenbus_device *dev, return ret; } -static int xen_9pfs_front_probe(struct xenbus_device *dev, - const struct xenbus_device_id *id) +static int xen_9pfs_front_init(struct xenbus_device *dev) { int ret, i; struct xenbus_transaction xbt; - struct xen_9pfs_front_priv *priv = NULL; - char *versions; + struct xen_9pfs_front_priv *priv = dev_get_drvdata(&dev->dev); + char *versions, *v; unsigned int max_rings, max_ring_order, len = 0; versions = xenbus_read(XBT_NIL, dev->otherend, "versions", &len); if (IS_ERR(versions)) return PTR_ERR(versions); - if (strcmp(versions, "1")) { + for (v = versions; *v; v++) { + if (simple_strtoul(v, &v, 10) == 1) { + v = NULL; + break; + } + } + if (v) { kfree(versions); return -EINVAL; } @@ -424,11 +429,6 @@ static int xen_9pfs_front_probe(struct xenbus_device *dev, if (p9_xen_trans.maxsize > XEN_FLEX_RING_SIZE(max_ring_order)) p9_xen_trans.maxsize = XEN_FLEX_RING_SIZE(max_ring_order) / 2; - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - - priv->dev = dev; priv->num_rings = XEN_9PFS_NUM_RINGS; priv->rings = kcalloc(priv->num_rings, sizeof(*priv->rings), GFP_KERNEL); @@ -487,23 +487,35 @@ static int xen_9pfs_front_probe(struct xenbus_device *dev, goto error; } - write_lock(&xen_9pfs_lock); - list_add_tail(&priv->list, &xen_9pfs_devs); - write_unlock(&xen_9pfs_lock); - dev_set_drvdata(&dev->dev, priv); - xenbus_switch_state(dev, XenbusStateInitialised); - return 0; error_xenbus: xenbus_transaction_end(xbt, 1); xenbus_dev_fatal(dev, ret, "writing xenstore"); error: - dev_set_drvdata(&dev->dev, NULL); xen_9pfs_front_free(priv); return ret; } +static int xen_9pfs_front_probe(struct xenbus_device *dev, + const struct xenbus_device_id *id) +{ + struct xen_9pfs_front_priv *priv = NULL; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = dev; + dev_set_drvdata(&dev->dev, priv); + + write_lock(&xen_9pfs_lock); + list_add_tail(&priv->list, &xen_9pfs_devs); + write_unlock(&xen_9pfs_lock); + + return 0; +} + static int xen_9pfs_front_resume(struct xenbus_device *dev) { dev_warn(&dev->dev, "suspend/resume unsupported\n"); @@ -522,6 +534,8 @@ static void xen_9pfs_front_changed(struct xenbus_device *dev, break; case XenbusStateInitWait: + if (!xen_9pfs_front_init(dev)) + xenbus_switch_state(dev, XenbusStateInitialised); break; case XenbusStateConnected: diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 2af1477a05ca6db824566fe3df1077d2cd15046b..08c473aa0113d5e906ebc9f5d23f30f16cbf2146 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -1623,6 +1623,7 @@ static int hci_dev_do_open(struct hci_dev *hdev) hdev->flush(hdev); if (hdev->sent_cmd) { + cancel_delayed_work_sync(&hdev->cmd_timer); kfree_skb(hdev->sent_cmd); hdev->sent_cmd = NULL; } diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 954b29605c94249bdc509c45f6cfd56f38ba5985..eb111504afc60dd8513a8d0aa81e37dd4b9c6095 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -4307,6 +4307,19 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct hci_ev_sync_conn_complete *ev = (void *) skb->data; struct hci_conn *conn; + switch (ev->link_type) { + case SCO_LINK: + case ESCO_LINK: + break; + default: + /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type + * for HCI_Synchronous_Connection_Complete is limited to + * either SCO or eSCO + */ + bt_dev_err(hdev, "Ignoring connect complete event for invalid link type"); + return; + } + BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); hci_dev_lock(hdev); diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index d28e263acb6233367760358aeba415b7add4abc5..4dcc1a8a8954f24c731b0a4e599f28d9ff72d13b 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -888,10 +888,6 @@ static int hci_sock_release(struct socket *sock) } sock_orphan(sk); - - skb_queue_purge(&sk->sk_receive_queue); - skb_queue_purge(&sk->sk_write_queue); - release_sock(sk); sock_put(sk); return 0; @@ -2019,6 +2015,12 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname, return err; } +static void hci_sock_destruct(struct sock *sk) +{ + skb_queue_purge(&sk->sk_receive_queue); + skb_queue_purge(&sk->sk_write_queue); +} + static const struct proto_ops hci_sock_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, @@ -2072,6 +2074,7 @@ static int hci_sock_create(struct net *net, struct socket *sock, int protocol, sock->state = SS_UNCONNECTED; sk->sk_state = BT_OPEN; + sk->sk_destruct = hci_sock_destruct; bt_sock_link(&hci_sk_list, sk); return 0; diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index cf56582d298ad835fab851ad000f7f951de9ae2a..bde90df6b4976a3ea80b8b6ba64a01bf7b67dc0a 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -2679,14 +2679,6 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len) if (IS_ERR(skb)) return PTR_ERR(skb); - /* Channel lock is released before requesting new skb and then - * reacquired thus we need to recheck channel state. - */ - if (chan->state != BT_CONNECTED) { - kfree_skb(skb); - return -ENOTCONN; - } - l2cap_do_send(chan, skb); return len; } @@ -2731,14 +2723,6 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len) if (IS_ERR(skb)) return PTR_ERR(skb); - /* Channel lock is released before requesting new skb and then - * reacquired thus we need to recheck channel state. - */ - if (chan->state != BT_CONNECTED) { - kfree_skb(skb); - return -ENOTCONN; - } - l2cap_do_send(chan, skb); err = len; break; @@ -2759,14 +2743,6 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len) */ err = l2cap_segment_sdu(chan, &seg_queue, msg, len); - /* The channel could have been closed while segmenting, - * check that it is still connected. - */ - if (chan->state != BT_CONNECTED) { - __skb_queue_purge(&seg_queue); - err = -ENOTCONN; - } - if (err) break; diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index d2c67852059921bfd188eee1a4a79736f9415323..756523e5402a80b82c9e1146b897cb1f2bdf5f8c 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -45,6 +45,7 @@ static const struct proto_ops l2cap_sock_ops; static void l2cap_sock_init(struct sock *sk, struct sock *parent); static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio, int kern); +static void l2cap_sock_cleanup_listen(struct sock *parent); bool l2cap_is_socket(struct socket *sock) { @@ -1414,6 +1415,7 @@ static int l2cap_sock_release(struct socket *sock) if (!sk) return 0; + l2cap_sock_cleanup_listen(sk); bt_sock_unlink(&l2cap_sk_list, sk); err = l2cap_sock_shutdown(sock, SHUT_RDWR); @@ -1623,6 +1625,14 @@ static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan, if (!skb) return ERR_PTR(err); + /* Channel lock is released before requesting new skb and then + * reacquired thus we need to recheck channel state. + */ + if (chan->state != BT_CONNECTED) { + kfree_skb(skb); + return ERR_PTR(-ENOTCONN); + } + skb->priority = sk->sk_priority; bt_cb(skb)->l2cap.chan = chan; diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index a718204c4bfdda70a555432efc5539103dbd6ebc..f3c7cfba31e1bd2999568d6dc064947a8d40ae5e 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c @@ -871,6 +871,7 @@ static unsigned int ip_sabotage_in(void *priv, if (nf_bridge && !nf_bridge->in_prerouting && !netif_is_l3_master(skb->dev) && !netif_is_l3_slave(skb->dev)) { + nf_bridge_info_free(skb); state->okfn(state->net, state->sk, skb); return NF_STOLEN; } diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 06b80b584381932986eb1635cc30df0033991b9c..8335b7e4bcf6f5e12c5b61a53162bb3c1cd314a7 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -1049,7 +1049,7 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl, audit_log_nfcfg(repl->name, AF_BRIDGE, repl->nentries, AUDIT_XT_OP_REPLACE, GFP_KERNEL); - return ret; + return 0; free_unlock: mutex_unlock(&ebt_mutex); diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 9d26c5e9da058e5153862814c24589d09b88efb3..d35ea927ca8af910b7ffca48ab944b342427613e 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -1020,6 +1020,7 @@ static void caif_sock_destructor(struct sock *sk) return; } sk_stream_kill_queues(&cf_sk->sk); + WARN_ON(sk->sk_forward_alloc); caif_free_client(&cf_sk->layer); } diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c index b02e1292f7f1929b535338e6197d26d6f5f475f3..24488a4e2d26ef6879b24fefffec1aececab85dc 100644 --- a/net/caif/caif_usb.c +++ b/net/caif/caif_usb.c @@ -134,6 +134,9 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what, struct usb_device *usbdev; int res; + if (what == NETDEV_UNREGISTER && dev->reg_state >= NETREG_UNREGISTERED) + return 0; + /* Check whether we have a NCM device, and find its VID/PID. */ if (!(dev->dev.parent && dev->dev.parent->driver && strcmp(dev->dev.parent->driver->name, "cdc_ncm") == 0)) diff --git a/net/can/j1939/address-claim.c b/net/can/j1939/address-claim.c index f33c4732792787a906e1c0a9bad4381b4d98812d..ca4ad6cdd5cbfd323bbdf49ad8cf413cb39ad083 100644 --- a/net/can/j1939/address-claim.c +++ b/net/can/j1939/address-claim.c @@ -165,6 +165,46 @@ static void j1939_ac_process(struct j1939_priv *priv, struct sk_buff *skb) * leaving this function. */ ecu = j1939_ecu_get_by_name_locked(priv, name); + + if (ecu && ecu->addr == skcb->addr.sa) { + /* The ISO 11783-5 standard, in "4.5.2 - Address claim + * requirements", states: + * d) No CF shall begin, or resume, transmission on the + * network until 250 ms after it has successfully claimed + * an address except when responding to a request for + * address-claimed. + * + * But "Figure 6" and "Figure 7" in "4.5.4.2 - Address-claim + * prioritization" show that the CF begins the transmission + * after 250 ms from the first AC (address-claimed) message + * even if it sends another AC message during that time window + * to resolve the address contention with another CF. + * + * As stated in "4.4.2.3 - Address-claimed message": + * In order to successfully claim an address, the CF sending + * an address claimed message shall not receive a contending + * claim from another CF for at least 250 ms. + * + * As stated in "4.4.3.2 - NAME management (NM) message": + * 1) A commanding CF can + * d) request that a CF with a specified NAME transmit + * the address-claimed message with its current NAME. + * 2) A target CF shall + * d) send an address-claimed message in response to a + * request for a matching NAME + * + * Taking the above arguments into account, the 250 ms wait is + * requested only during network initialization. + * + * Do not restart the timer on AC message if both the NAME and + * the address match and so if the address has already been + * claimed (timer has expired) or the AC message has been sent + * to resolve the contention with another CF (timer is still + * running). + */ + goto out_ecu_put; + } + if (!ecu && j1939_address_is_unicast(skcb->addr.sa)) ecu = j1939_ecu_create_locked(priv, name); diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c index 78f6a91106994fbb77fdbd76259d79c799f177bd..57d6aac7f43535f6f7cafb945aead176c837ce89 100644 --- a/net/can/j1939/transport.c +++ b/net/can/j1939/transport.c @@ -1087,10 +1087,6 @@ static bool j1939_session_deactivate(struct j1939_session *session) bool active; j1939_session_list_lock(priv); - /* This function should be called with a session ref-count of at - * least 2. - */ - WARN_ON_ONCE(kref_read(&session->kref) < 2); active = j1939_session_deactivate_locked(session); j1939_session_list_unlock(priv); diff --git a/net/core/dev.c b/net/core/dev.c index 37bb60a7e97ed223a0922907bb0c12e066fc922c..413c2a08d79db8c1d56ff990a0fb333e6fc4ba8d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3119,8 +3119,10 @@ void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason) { if (in_irq() || irqs_disabled()) __dev_kfree_skb_irq(skb, reason); + else if (unlikely(reason == SKB_REASON_DROPPED)) + kfree_skb(skb); else - dev_kfree_skb(skb); + consume_skb(skb); } EXPORT_SYMBOL(__dev_kfree_skb_any); @@ -6109,6 +6111,7 @@ EXPORT_SYMBOL(gro_find_complete_by_type); static void napi_skb_free_stolen_head(struct sk_buff *skb) { + nf_reset_ct(skb); skb_dst_drop(skb); skb_ext_put(skb); kmem_cache_free(skbuff_head_cache, skb); @@ -10326,7 +10329,7 @@ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64)); for (i = 0; i < n; i++) - dst[i] = atomic_long_read(&src[i]); + dst[i] = (unsigned long)atomic_long_read(&src[i]); /* zero out counters that only exist in rtnl_link_stats64 */ memset((char *)stats64 + n * sizeof(u64), 0, sizeof(*stats64) - n * sizeof(u64)); diff --git a/net/core/filter.c b/net/core/filter.c index 25b26175fc5d2eda2ded822c6e2a3e09590fc666..4369d37bb4470330744b45977a782799a1ed0dca 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -5401,7 +5401,7 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params, neigh = __ipv6_neigh_lookup_noref_stub(dev, dst); } - if (!neigh) + if (!neigh || !(neigh->nud_state & NUD_VALID)) return BPF_FIB_LKUP_RET_NO_NEIGH; return bpf_fib_set_fwd_params(params, neigh, dev); @@ -5514,7 +5514,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params, * not needed here. */ neigh = __ipv6_neigh_lookup_noref_stub(dev, dst); - if (!neigh) + if (!neigh || !(neigh->nud_state & NUD_VALID)) return BPF_FIB_LKUP_RET_NO_NEIGH; return bpf_fib_set_fwd_params(params, neigh, dev); diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 37f162d812bc0957959970da2f880a8d960411d0..517de4e9c0e0a20ee908c5eb8ec50d9917116d4e 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -242,7 +242,7 @@ static int neigh_forced_gc(struct neigh_table *tbl) (n->nud_state == NUD_NOARP) || (tbl->is_multicast && tbl->is_multicast(n->primary_key)) || - time_after(tref, n->updated)) + !time_in_range(n->updated, tref, jiffies)) remove = true; write_unlock(&n->lock); @@ -262,7 +262,17 @@ static int neigh_forced_gc(struct neigh_table *tbl) static void neigh_add_timer(struct neighbour *n, unsigned long when) { + /* Use safe distance from the jiffies - LONG_MAX point while timer + * is running in DELAY/PROBE state but still show to user space + * large times in the past. + */ + unsigned long mint = jiffies - (LONG_MAX - 86400 * HZ); + neigh_hold(n); + if (!time_in_range(n->confirmed, mint, jiffies)) + n->confirmed = mint; + if (time_before(n->used, n->confirmed)) + n->used = n->confirmed; if (unlikely(mod_timer(&n->timer, when))) { printk("NEIGH: BUG, double timer add, state is %x\n", n->nud_state); @@ -948,12 +958,14 @@ static void neigh_periodic_work(struct work_struct *work) goto next_elt; } - if (time_before(n->used, n->confirmed)) + if (time_before(n->used, n->confirmed) && + time_is_before_eq_jiffies(n->confirmed)) n->used = n->confirmed; if (refcount_read(&n->refcnt) == 1 && (state == NUD_FAILED || - time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) { + !time_in_range_open(jiffies, n->used, + n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) { *np = n->next; neigh_mark_dead(n); write_unlock(&n->lock); diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index a3b7d965e9c019897ebfee816d9e8f1e5acb1b12..e05dd4f3279a8c119d53a96bf72f5a6ca3660e3f 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -155,12 +155,12 @@ static int ops_init(const struct pernet_operations *ops, struct net *net) return 0; if (ops->id && ops->size) { -cleanup: ng = rcu_dereference_protected(net->gen, lockdep_is_held(&pernet_ops_rwsem)); ng->ptr[*ops->id] = NULL; } +cleanup: kfree(data); out: diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 2b12e0730b852c93dd590803c1f5ba2a4418b408..09cdefe5e1c83b95c63faf95dbf5300cbcc8fe59 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -659,7 +659,6 @@ static void kfree_skbmem(struct sk_buff *skb) void skb_release_head_state(struct sk_buff *skb) { - nf_reset_ct(skb); skb_dst_drop(skb); if (skb->destructor) { WARN_ON(in_irq()); @@ -3688,7 +3687,7 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb, skb_shinfo(skb)->frag_list = NULL; - do { + while (list_skb) { nskb = list_skb; list_skb = list_skb->next; @@ -3732,8 +3731,7 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb, if (skb_needs_linearize(nskb, features) && __skb_linearize(nskb)) goto err_linearize; - - } while (list_skb); + } skb->truesize = skb->truesize - delta_truesize; skb->data_len = skb->data_len - delta_len; diff --git a/net/core/stream.c b/net/core/stream.c index d7c5413d16d57c8fca341293207176e9c5db8e2b..cd60746877b1e509aa78b25ee0dca2082b13d606 100644 --- a/net/core/stream.c +++ b/net/core/stream.c @@ -209,7 +209,6 @@ void sk_stream_kill_queues(struct sock *sk) sk_mem_reclaim(sk); WARN_ON(sk->sk_wmem_queued); - WARN_ON(sk->sk_forward_alloc); /* It is _impossible_ for the backlog to contain anything * when we get here. All user references to this socket diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 21c61a9c3b1523d061b0bee8007003c9a9513373..c563f9b325d05e79a9da66c38cfdf0f7ca7abe95 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -541,11 +541,9 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk, *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash), NULL); /* Clone pktoptions received with SYN, if we own the req */ if (*own_req && ireq->pktopts) { - newnp->pktoptions = skb_clone(ireq->pktopts, GFP_ATOMIC); + newnp->pktoptions = skb_clone_and_charge_r(ireq->pktopts, newsk); consume_skb(ireq->pktopts); ireq->pktopts = NULL; - if (newnp->pktoptions) - skb_set_owner_r(newnp->pktoptions, newsk); } return newsk; @@ -605,7 +603,7 @@ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) --ANK (980728) */ if (np->rxopt.all) - opt_skb = skb_clone(skb, GFP_ATOMIC); + opt_skb = skb_clone_and_charge_r(skb, sk); if (sk->sk_state == DCCP_OPEN) { /* Fast path */ if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len)) @@ -669,7 +667,6 @@ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb)); if (ipv6_opt_accepted(sk, opt_skb, &DCCP_SKB_CB(opt_skb)->header.h6)) { - skb_set_owner_r(opt_skb, sk); memmove(IP6CB(opt_skb), &DCCP_SKB_CB(opt_skb)->header.h6, sizeof(struct inet6_skb_parm)); diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 4576ba7b62c612cdfb545dd4aedbe184be521c41..88b6120878cd9abfb3b9fe0423e5dbf0ffdd6375 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -48,7 +48,6 @@ #include #include #include -#include #ifdef CONFIG_SYSCTL #include #endif @@ -1279,7 +1278,6 @@ int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size) len -= size; done += size; } - CALL_HCK_LITE_HOOK(nip_ninet_gifconf_lhck, dev, buf, len, size, &done); out: return done; } diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index ab9fcc6231b862e4ad486da1e16cede715378673..4e94796ccdbd144c5748e78c70b2484acea1a324 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -1021,6 +1022,7 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi) if (type > RTAX_MAX) return false; + type = array_index_nospec(type, RTAX_MAX + 1); if (type == RTAX_CC_ALGO) { char tmp[TCP_CA_NAME_MAX]; bool ecn_ca = false; diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 23cd1dce49f31f423fb89bc55398a92e9df1893d..f75d1ac87c571e49b15836bf2226eef43738844e 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -580,8 +580,20 @@ bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk) spin_lock(lock); if (osk) { WARN_ON_ONCE(sk->sk_hash != osk->sk_hash); - ret = sk_nulls_del_node_init_rcu(osk); - } else if (found_dup_sk) { + ret = sk_hashed(osk); + if (ret) { + /* Before deleting the node, we insert a new one to make + * sure that the look-up-sk process would not miss either + * of them and that at least one node would exist in ehash + * table all the time. Otherwise there's a tiny chance + * that lookup process could find nothing in ehash table. + */ + __sk_nulls_add_node_tail_rcu(sk, list); + sk_nulls_del_node_init_rcu(osk); + } + goto unlock; + } + if (found_dup_sk) { *found_dup_sk = inet_ehash_lookup_by_sk(sk, list); if (*found_dup_sk) ret = false; @@ -590,6 +602,7 @@ bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk) if (ret) __sk_nulls_add_node_rcu(sk, list); +unlock: spin_unlock(lock); return ret; @@ -756,17 +769,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, u32 index; if (port) { - head = &hinfo->bhash[inet_bhashfn(net, port, - hinfo->bhash_size)]; - tb = inet_csk(sk)->icsk_bind_hash; - spin_lock_bh(&head->lock); - if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { - inet_ehash_nolisten(sk, NULL, NULL); - spin_unlock_bh(&head->lock); - return 0; - } - spin_unlock(&head->lock); - /* No definite answer... Walk to established hash table */ + local_bh_disable(); ret = check_established(death_row, sk, port, NULL); local_bh_enable(); return ret; diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index c411c87ae865fba8361addd7f1339b909123a397..a00102d7c7fd4b62b9bacabbffea22cb2f3d28f7 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c @@ -81,10 +81,10 @@ void inet_twsk_put(struct inet_timewait_sock *tw) } EXPORT_SYMBOL_GPL(inet_twsk_put); -static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw, - struct hlist_nulls_head *list) +static void inet_twsk_add_node_tail_rcu(struct inet_timewait_sock *tw, + struct hlist_nulls_head *list) { - hlist_nulls_add_head_rcu(&tw->tw_node, list); + hlist_nulls_add_tail_rcu(&tw->tw_node, list); } static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw, @@ -120,7 +120,7 @@ void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, spin_lock(lock); - inet_twsk_add_node_rcu(tw, &ehead->chain); + inet_twsk_add_node_tail_rcu(tw, &ehead->chain); /* Step 3: Remove SK from hash chain */ if (__sk_nulls_del_node_init_rcu(sk)) diff --git a/net/ipv4/metrics.c b/net/ipv4/metrics.c index 3205d5f7c8c942921c82c50148acfd9501394d0b..4966ac2aaf87d7fae80b3de83018ab1cf5b4d470 100644 --- a/net/ipv4/metrics.c +++ b/net/ipv4/metrics.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only #include +#include #include #include #include @@ -28,6 +29,7 @@ static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx, return -EINVAL; } + type = array_index_nospec(type, RTAX_MAX + 1); if (type == RTAX_CC_ALGO) { char tmp[TCP_CA_NAME_MAX]; diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index f77ea0dbe6562b38a1a10d176857773c3298d6e8..ec981618b7b22809f7dbe996a964c98fa949d2c3 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -1044,7 +1044,6 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, struct xt_counters *counters; struct ipt_entry *iter; - ret = 0; counters = xt_counters_alloc(num_counters); if (!counters) { ret = -ENOMEM; @@ -1090,7 +1089,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, net_warn_ratelimited("iptables: counters copy to user failed while replacing table\n"); } vfree(counters); - return ret; + return 0; put_module: module_put(t->me); diff --git a/net/ipv4/netfilter/nf_tproxy_ipv4.c b/net/ipv4/netfilter/nf_tproxy_ipv4.c index b2bae0b0e42a1e983210b37b32b50186de1b0eff..61cb2341f50fe007c6208a091d214ba6c37e905d 100644 --- a/net/ipv4/netfilter/nf_tproxy_ipv4.c +++ b/net/ipv4/netfilter/nf_tproxy_ipv4.c @@ -38,7 +38,7 @@ nf_tproxy_handle_time_wait4(struct net *net, struct sk_buff *skb, hp->source, lport ? lport : hp->dest, skb->dev, NF_TPROXY_LOOKUP_LISTENER); if (sk2) { - inet_twsk_deschedule_put(inet_twsk(sk)); + nf_tproxy_twsk_deschedule_put(inet_twsk(sk)); sk = sk2; } } diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 374647693d7acc64420b6435d508b1e48025fe5c..b70572477394778e8948263fbf15ad7e78346360 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1240,6 +1240,7 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie) static void ipv4_send_dest_unreach(struct sk_buff *skb) { + struct net_device *dev; struct ip_options opt; int res; @@ -1257,7 +1258,8 @@ static void ipv4_send_dest_unreach(struct sk_buff *skb) opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr); rcu_read_lock(); - res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL); + dev = skb->dev ? skb->dev : skb_rtable(skb)->dst.dev; + res = __ip_options_compile(dev_net(dev), &opt, skb, NULL); rcu_read_unlock(); if (res) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 74edc125212f94b1f1737a95a31a9be5518d513d..f8cce16dd48c43da725537f5f548d47286da2881 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -432,6 +432,7 @@ void tcp_init_sock(struct sock *sk) /* There's a bubble in the pipe until at least the first ACK. */ tp->app_limited = ~0U; + tp->rate_app_limited = 1; /* See draft-stevens-tcpca-spec-01 for discussion of the * initialization of these values. @@ -2837,6 +2838,7 @@ int tcp_disconnect(struct sock *sk, int flags) tp->last_oow_ack_time = 0; /* There's a bubble in the pipe until at least the first ACK. */ tp->app_limited = ~0U; + tp->rate_app_limited = 1; tp->rack.mstamp = 0; tp->rack.advanced = 0; tp->rack.reo_wnd_steps = 1; diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index 6a1685461f89b5f64746f5e23590cbfdb0bf29cd..926e29e84b40bbf0c115a8c389ce556e7b247685 100644 --- a/net/ipv4/tcp_bpf.c +++ b/net/ipv4/tcp_bpf.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include @@ -642,10 +643,9 @@ struct proto *tcp_bpf_get_proto(struct sock *sk, struct sk_psock *psock) */ void tcp_bpf_clone(const struct sock *sk, struct sock *newsk) { - int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4; struct proto *prot = newsk->sk_prot; - if (prot == &tcp_bpf_prots[family][TCP_BPF_BASE]) + if (is_insidevar(prot, tcp_bpf_prots)) newsk->sk_prot = sk->sk_prot_creator; } #endif /* CONFIG_BPF_STREAM_PARSER */ diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index e42312321191b2e4b3e3b914eaf91646ef542b9d..8d854feebdb00e1bb71ba30edbd56345a97a6531 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -565,6 +565,9 @@ EXPORT_SYMBOL(tcp_create_openreq_child); * validation and inside tcp_v4_reqsk_send_ack(). Can we do better? * * We don't need to initialize tmp_opt.sack_ok as we don't use the results + * + * Note: If @fastopen is true, this can be called from process context. + * Otherwise, this is from BH context. */ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, @@ -717,7 +720,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, &tcp_rsk(req)->last_oow_ack_time)) req->rsk_ops->send_ack(sk, skb, req); if (paws_reject) - __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); + NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); return NULL; } @@ -736,7 +739,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, * "fourth, check the SYN bit" */ if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) { - __TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); + TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); goto embryonic_reset; } diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index f4559e5bc84bfcced68cce819ff01ad1bb6156f8..a30ff5d6808aa4649f69ab47be366c70838e9c0a 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -51,7 +51,7 @@ static void ip6_datagram_flow_key_init(struct flowi6 *fl6, struct sock *sk) fl6->flowi6_mark = sk->sk_mark; fl6->fl6_dport = inet->inet_dport; fl6->fl6_sport = inet->inet_sport; - fl6->flowlabel = np->flow_label; + fl6->flowlabel = ip6_make_flowinfo(np->tclass, np->flow_label); fl6->flowi6_uid = sk->sk_uid; if (!fl6->flowi6_oif) diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c index a1ac0e3d8c60c52fa48d4bccb9d1cd4fadb46fa3..163668531a57f46e3467323f04f7717621d4ce96 100644 --- a/net/ipv6/ila/ila_xlat.c +++ b/net/ipv6/ila/ila_xlat.c @@ -477,6 +477,7 @@ int ila_xlat_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info) rcu_read_lock(); + ret = -ESRCH; ila = ila_lookup_by_params(&xp, ilan); if (ila) { ret = ila_dump_info(ila, diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index d36168baf6776381792fe41b256d4580e29f759d..99bb11d16712716f0798ce44551a9711a285ee0b 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -1062,7 +1062,6 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, struct xt_counters *counters; struct ip6t_entry *iter; - ret = 0; counters = xt_counters_alloc(num_counters); if (!counters) { ret = -ENOMEM; @@ -1108,7 +1107,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n"); } vfree(counters); - return ret; + return 0; put_module: module_put(t->me); diff --git a/net/ipv6/netfilter/nf_tproxy_ipv6.c b/net/ipv6/netfilter/nf_tproxy_ipv6.c index 6bac68fb27a396fa2d80bac2085e716a82ef035f..3fe4f15e01dc823e216516751706fb57adc09de0 100644 --- a/net/ipv6/netfilter/nf_tproxy_ipv6.c +++ b/net/ipv6/netfilter/nf_tproxy_ipv6.c @@ -63,7 +63,7 @@ nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff, lport ? lport : hp->dest, skb->dev, NF_TPROXY_LOOKUP_LISTENER); if (sk2) { - inet_twsk_deschedule_put(inet_twsk(sk)); + nf_tproxy_twsk_deschedule_put(inet_twsk(sk)); sk = sk2; } } diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 803d1aa83140c82be6744d0e06c38a2be65fb156..a6d5c99f65a3a336b4747dc77909d08fb05c1bc8 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -5444,16 +5444,17 @@ static size_t rt6_nlmsg_size(struct fib6_info *f6i) nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_nlmsg_size, &nexthop_len); } else { + struct fib6_info *sibling, *next_sibling; struct fib6_nh *nh = f6i->fib6_nh; nexthop_len = 0; if (f6i->fib6_nsiblings) { - nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */ - + NLA_ALIGN(sizeof(struct rtnexthop)) - + nla_total_size(16) /* RTA_GATEWAY */ - + lwtunnel_get_encap_size(nh->fib_nh_lws); + rt6_nh_nlmsg_size(nh, &nexthop_len); - nexthop_len *= f6i->fib6_nsiblings; + list_for_each_entry_safe(sibling, next_sibling, + &f6i->fib6_siblings, fib6_siblings) { + rt6_nh_nlmsg_size(sibling->fib6_nh, &nexthop_len); + } } nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws); } diff --git a/net/ipv6/rpl.c b/net/ipv6/rpl.c index 307f336b5353ef617d5ef9a2085cdea2f5276ba2..3b0386437f69d83c36ce3bd3a2e7af2ba0a969b3 100644 --- a/net/ipv6/rpl.c +++ b/net/ipv6/rpl.c @@ -32,7 +32,8 @@ static void *ipv6_rpl_segdata_pos(const struct ipv6_rpl_sr_hdr *hdr, int i) size_t ipv6_rpl_srh_size(unsigned char n, unsigned char cmpri, unsigned char cmpre) { - return (n * IPV6_PFXTAIL_LEN(cmpri)) + IPV6_PFXTAIL_LEN(cmpre); + return sizeof(struct ipv6_rpl_sr_hdr) + (n * IPV6_PFXTAIL_LEN(cmpri)) + + IPV6_PFXTAIL_LEN(cmpre); } void ipv6_rpl_srh_decompress(struct ipv6_rpl_sr_hdr *outhdr, diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index b728414ce4e7a209d632844bb7961a0f9bb6e974..3f721eda2dacbcaa43eb7fdc8d24a487ff9f2e2c 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -271,6 +271,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, fl6.flowi6_proto = IPPROTO_TCP; fl6.daddr = sk->sk_v6_daddr; fl6.saddr = saddr ? *saddr : np->saddr; + fl6.flowlabel = ip6_make_flowinfo(np->tclass, np->flow_label); fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.flowi6_mark = sk->sk_mark; fl6.fl6_dport = usin->sin6_port; @@ -1408,14 +1409,11 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * /* Clone pktoptions received with SYN, if we own the req */ if (ireq->pktopts) { - newnp->pktoptions = skb_clone(ireq->pktopts, - sk_gfp_mask(sk, GFP_ATOMIC)); + newnp->pktoptions = skb_clone_and_charge_r(ireq->pktopts, newsk); consume_skb(ireq->pktopts); ireq->pktopts = NULL; - if (newnp->pktoptions) { + if (newnp->pktoptions) tcp_v6_restore_cb(newnp->pktoptions); - skb_set_owner_r(newnp->pktoptions, newsk); - } } } else { if (!req_unhash && found_dup_sk) { @@ -1483,7 +1481,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) --ANK (980728) */ if (np->rxopt.all) - opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC)); + opt_skb = skb_clone_and_charge_r(skb, sk); if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ struct dst_entry *dst; @@ -1565,7 +1563,6 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) if (np->repflow) np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb)); if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) { - skb_set_owner_r(opt_skb, sk); tcp_v6_restore_cb(opt_skb); opt_skb = xchg(&np->pktoptions, opt_skb); } else { diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index d6bb1795329a2079646dcf5771671928aec6ecf9..a4b793d1b7d76862adfc259b5c7021a606dc71e3 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -104,9 +104,9 @@ static struct workqueue_struct *l2tp_wq; /* per-net private data for this module */ static unsigned int l2tp_net_id; struct l2tp_net { - struct list_head l2tp_tunnel_list; - /* Lock for write access to l2tp_tunnel_list */ - spinlock_t l2tp_tunnel_list_lock; + /* Lock for write access to l2tp_tunnel_idr */ + spinlock_t l2tp_tunnel_idr_lock; + struct idr l2tp_tunnel_idr; struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2]; /* Lock for write access to l2tp_session_hlist */ spinlock_t l2tp_session_hlist_lock; @@ -208,13 +208,10 @@ struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id) struct l2tp_tunnel *tunnel; rcu_read_lock_bh(); - list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { - if (tunnel->tunnel_id == tunnel_id && - refcount_inc_not_zero(&tunnel->ref_count)) { - rcu_read_unlock_bh(); - - return tunnel; - } + tunnel = idr_find(&pn->l2tp_tunnel_idr, tunnel_id); + if (tunnel && refcount_inc_not_zero(&tunnel->ref_count)) { + rcu_read_unlock_bh(); + return tunnel; } rcu_read_unlock_bh(); @@ -224,13 +221,14 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_get); struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth) { - const struct l2tp_net *pn = l2tp_pernet(net); + struct l2tp_net *pn = l2tp_pernet(net); + unsigned long tunnel_id, tmp; struct l2tp_tunnel *tunnel; int count = 0; rcu_read_lock_bh(); - list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { - if (++count > nth && + idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) { + if (tunnel && ++count > nth && refcount_inc_not_zero(&tunnel->ref_count)) { rcu_read_unlock_bh(); return tunnel; @@ -1043,7 +1041,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, uns IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED); nf_reset_ct(skb); - bh_lock_sock(sk); + bh_lock_sock_nested(sk); if (sock_owned_by_user(sk)) { kfree_skb(skb); ret = NET_XMIT_DROP; @@ -1229,6 +1227,15 @@ static void l2tp_udp_encap_destroy(struct sock *sk) l2tp_tunnel_delete(tunnel); } +static void l2tp_tunnel_remove(struct net *net, struct l2tp_tunnel *tunnel) +{ + struct l2tp_net *pn = l2tp_pernet(net); + + spin_lock_bh(&pn->l2tp_tunnel_idr_lock); + idr_remove(&pn->l2tp_tunnel_idr, tunnel->tunnel_id); + spin_unlock_bh(&pn->l2tp_tunnel_idr_lock); +} + /* Workqueue tunnel deletion function */ static void l2tp_tunnel_del_work(struct work_struct *work) { @@ -1236,7 +1243,6 @@ static void l2tp_tunnel_del_work(struct work_struct *work) del_work); struct sock *sk = tunnel->sock; struct socket *sock = sk->sk_socket; - struct l2tp_net *pn; l2tp_tunnel_closeall(tunnel); @@ -1250,12 +1256,7 @@ static void l2tp_tunnel_del_work(struct work_struct *work) } } - /* Remove the tunnel struct from the tunnel list */ - pn = l2tp_pernet(tunnel->l2tp_net); - spin_lock_bh(&pn->l2tp_tunnel_list_lock); - list_del_rcu(&tunnel->list); - spin_unlock_bh(&pn->l2tp_tunnel_list_lock); - + l2tp_tunnel_remove(tunnel->l2tp_net, tunnel); /* drop initial ref */ l2tp_tunnel_dec_refcount(tunnel); @@ -1386,8 +1387,6 @@ static int l2tp_tunnel_sock_create(struct net *net, return err; } -static struct lock_class_key l2tp_socket_class; - int l2tp_tunnel_create(int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp) { @@ -1457,12 +1456,19 @@ static int l2tp_validate_socket(const struct sock *sk, const struct net *net, int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net, struct l2tp_tunnel_cfg *cfg) { - struct l2tp_tunnel *tunnel_walk; - struct l2tp_net *pn; + struct l2tp_net *pn = l2tp_pernet(net); + u32 tunnel_id = tunnel->tunnel_id; struct socket *sock; struct sock *sk; int ret; + spin_lock_bh(&pn->l2tp_tunnel_idr_lock); + ret = idr_alloc_u32(&pn->l2tp_tunnel_idr, NULL, &tunnel_id, tunnel_id, + GFP_ATOMIC); + spin_unlock_bh(&pn->l2tp_tunnel_idr_lock); + if (ret) + return ret == -ENOSPC ? -EEXIST : ret; + if (tunnel->fd < 0) { ret = l2tp_tunnel_sock_create(net, tunnel->tunnel_id, tunnel->peer_tunnel_id, cfg, @@ -1476,6 +1482,7 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net, } sk = sock->sk; + lock_sock(sk); write_lock_bh(&sk->sk_callback_lock); ret = l2tp_validate_socket(sk, net, tunnel->encap); if (ret < 0) @@ -1483,24 +1490,6 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net, rcu_assign_sk_user_data(sk, tunnel); write_unlock_bh(&sk->sk_callback_lock); - tunnel->l2tp_net = net; - pn = l2tp_pernet(net); - - sock_hold(sk); - tunnel->sock = sk; - - spin_lock_bh(&pn->l2tp_tunnel_list_lock); - list_for_each_entry(tunnel_walk, &pn->l2tp_tunnel_list, list) { - if (tunnel_walk->tunnel_id == tunnel->tunnel_id) { - spin_unlock_bh(&pn->l2tp_tunnel_list_lock); - sock_put(sk); - ret = -EEXIST; - goto err_sock; - } - } - list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list); - spin_unlock_bh(&pn->l2tp_tunnel_list_lock); - if (tunnel->encap == L2TP_ENCAPTYPE_UDP) { struct udp_tunnel_sock_cfg udp_cfg = { .sk_user_data = tunnel, @@ -1514,9 +1503,16 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net, tunnel->old_sk_destruct = sk->sk_destruct; sk->sk_destruct = &l2tp_tunnel_destruct; - lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, - "l2tp_sock"); sk->sk_allocation = GFP_ATOMIC; + release_sock(sk); + + sock_hold(sk); + tunnel->sock = sk; + tunnel->l2tp_net = net; + + spin_lock_bh(&pn->l2tp_tunnel_idr_lock); + idr_replace(&pn->l2tp_tunnel_idr, tunnel, tunnel->tunnel_id); + spin_unlock_bh(&pn->l2tp_tunnel_idr_lock); trace_register_tunnel(tunnel); @@ -1525,17 +1521,16 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net, return 0; -err_sock: - write_lock_bh(&sk->sk_callback_lock); - rcu_assign_sk_user_data(sk, NULL); err_inval_sock: write_unlock_bh(&sk->sk_callback_lock); + release_sock(sk); if (tunnel->fd < 0) sock_release(sock); else sockfd_put(sock); err: + l2tp_tunnel_remove(net, tunnel); return ret; } EXPORT_SYMBOL_GPL(l2tp_tunnel_register); @@ -1649,8 +1644,8 @@ static __net_init int l2tp_init_net(struct net *net) struct l2tp_net *pn = net_generic(net, l2tp_net_id); int hash; - INIT_LIST_HEAD(&pn->l2tp_tunnel_list); - spin_lock_init(&pn->l2tp_tunnel_list_lock); + idr_init(&pn->l2tp_tunnel_idr); + spin_lock_init(&pn->l2tp_tunnel_idr_lock); for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]); @@ -1664,11 +1659,13 @@ static __net_exit void l2tp_exit_net(struct net *net) { struct l2tp_net *pn = l2tp_pernet(net); struct l2tp_tunnel *tunnel = NULL; + unsigned long tunnel_id, tmp; int hash; rcu_read_lock_bh(); - list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { - l2tp_tunnel_delete(tunnel); + idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) { + if (tunnel) + l2tp_tunnel_delete(tunnel); } rcu_read_unlock_bh(); @@ -1678,6 +1675,7 @@ static __net_exit void l2tp_exit_net(struct net *net) for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) WARN_ON_ONCE(!hlist_empty(&pn->l2tp_session_hlist[hash])); + idr_destroy(&pn->l2tp_tunnel_idr); } static struct pernet_operations l2tp_net_ops = { diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index aea85f91f05995bb670809c1a29781a410b80288..5ecc0f20094448ca9a403af7fbc2d00657932ded 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -651,54 +651,22 @@ static int pppol2tp_tunnel_mtu(const struct l2tp_tunnel *tunnel) return mtu - PPPOL2TP_HEADER_OVERHEAD; } -/* connect() handler. Attach a PPPoX socket to a tunnel UDP socket - */ -static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, - int sockaddr_len, int flags) +static struct l2tp_tunnel *pppol2tp_tunnel_get(struct net *net, + const struct l2tp_connect_info *info, + bool *new_tunnel) { - struct sock *sk = sock->sk; - struct pppox_sock *po = pppox_sk(sk); - struct l2tp_session *session = NULL; - struct l2tp_connect_info info; struct l2tp_tunnel *tunnel; - struct pppol2tp_session *ps; - struct l2tp_session_cfg cfg = { 0, }; - bool drop_refcnt = false; - bool drop_tunnel = false; - bool new_session = false; - bool new_tunnel = false; int error; - error = pppol2tp_sockaddr_get_info(uservaddr, sockaddr_len, &info); - if (error < 0) - return error; + *new_tunnel = false; - lock_sock(sk); - - /* Check for already bound sockets */ - error = -EBUSY; - if (sk->sk_state & PPPOX_CONNECTED) - goto end; - - /* We don't supporting rebinding anyway */ - error = -EALREADY; - if (sk->sk_user_data) - goto end; /* socket is already attached */ - - /* Don't bind if tunnel_id is 0 */ - error = -EINVAL; - if (!info.tunnel_id) - goto end; - - tunnel = l2tp_tunnel_get(sock_net(sk), info.tunnel_id); - if (tunnel) - drop_tunnel = true; + tunnel = l2tp_tunnel_get(net, info->tunnel_id); /* Special case: create tunnel context if session_id and * peer_session_id is 0. Otherwise look up tunnel using supplied * tunnel id. */ - if (!info.session_id && !info.peer_session_id) { + if (!info->session_id && !info->peer_session_id) { if (!tunnel) { struct l2tp_tunnel_cfg tcfg = { .encap = L2TP_ENCAPTYPE_UDP, @@ -707,40 +675,82 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, /* Prevent l2tp_tunnel_register() from trying to set up * a kernel socket. */ - if (info.fd < 0) { - error = -EBADF; - goto end; - } + if (info->fd < 0) + return ERR_PTR(-EBADF); - error = l2tp_tunnel_create(info.fd, - info.version, - info.tunnel_id, - info.peer_tunnel_id, &tcfg, + error = l2tp_tunnel_create(info->fd, + info->version, + info->tunnel_id, + info->peer_tunnel_id, &tcfg, &tunnel); if (error < 0) - goto end; + return ERR_PTR(error); l2tp_tunnel_inc_refcount(tunnel); - error = l2tp_tunnel_register(tunnel, sock_net(sk), - &tcfg); + error = l2tp_tunnel_register(tunnel, net, &tcfg); if (error < 0) { kfree(tunnel); - goto end; + return ERR_PTR(error); } - drop_tunnel = true; - new_tunnel = true; + + *new_tunnel = true; } } else { /* Error if we can't find the tunnel */ - error = -ENOENT; if (!tunnel) - goto end; + return ERR_PTR(-ENOENT); /* Error if socket is not prepped */ - if (!tunnel->sock) - goto end; + if (!tunnel->sock) { + l2tp_tunnel_dec_refcount(tunnel); + return ERR_PTR(-ENOENT); + } } + return tunnel; +} + +/* connect() handler. Attach a PPPoX socket to a tunnel UDP socket + */ +static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, + int sockaddr_len, int flags) +{ + struct sock *sk = sock->sk; + struct pppox_sock *po = pppox_sk(sk); + struct l2tp_session *session = NULL; + struct l2tp_connect_info info; + struct l2tp_tunnel *tunnel; + struct pppol2tp_session *ps; + struct l2tp_session_cfg cfg = { 0, }; + bool drop_refcnt = false; + bool new_session = false; + bool new_tunnel = false; + int error; + + error = pppol2tp_sockaddr_get_info(uservaddr, sockaddr_len, &info); + if (error < 0) + return error; + + /* Don't bind if tunnel_id is 0 */ + if (!info.tunnel_id) + return -EINVAL; + + tunnel = pppol2tp_tunnel_get(sock_net(sk), &info, &new_tunnel); + if (IS_ERR(tunnel)) + return PTR_ERR(tunnel); + + lock_sock(sk); + + /* Check for already bound sockets */ + error = -EBUSY; + if (sk->sk_state & PPPOX_CONNECTED) + goto end; + + /* We don't supporting rebinding anyway */ + error = -EALREADY; + if (sk->sk_user_data) + goto end; /* socket is already attached */ + if (tunnel->peer_tunnel_id == 0) tunnel->peer_tunnel_id = info.peer_tunnel_id; @@ -841,8 +851,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, } if (drop_refcnt) l2tp_session_dec_refcount(session); - if (drop_tunnel) - l2tp_tunnel_dec_refcount(tunnel); + l2tp_tunnel_dec_refcount(tunnel); release_sock(sk); return error; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 63499db5c63d93e34e5a40353f8952dfca195e9c..bd349ae9ee4b4309295b0a5ebd34b0b6158e5d48 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -644,6 +644,26 @@ struct mesh_csa_settings { struct cfg80211_csa_settings settings; }; +/** + * struct mesh_table + * + * @known_gates: list of known mesh gates and their mpaths by the station. The + * gate's mpath may or may not be resolved and active. + * @gates_lock: protects updates to known_gates + * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr + * @walk_head: linked list containing all mesh_path objects + * @walk_lock: lock protecting walk_head + * @entries: number of entries in the table + */ +struct mesh_table { + struct hlist_head known_gates; + spinlock_t gates_lock; + struct rhashtable rhead; + struct hlist_head walk_head; + spinlock_t walk_lock; + atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ +}; + struct ieee80211_if_mesh { struct timer_list housekeeping_timer; struct timer_list mesh_path_timer; @@ -718,8 +738,8 @@ struct ieee80211_if_mesh { /* offset from skb->data while building IE */ int meshconf_offset; - struct mesh_table *mesh_paths; - struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */ + struct mesh_table mesh_paths; + struct mesh_table mpp_paths; /* Store paths for MPP&MAP */ int mesh_paths_generation; int mpp_paths_generation; }; diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index 40492d1bd8fda6c4c3e19fc430d36c6a460a558f..b2b717a78114f36f558c87deedd79653ef01caac 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -127,26 +127,6 @@ struct mesh_path { u32 path_change_count; }; -/** - * struct mesh_table - * - * @known_gates: list of known mesh gates and their mpaths by the station. The - * gate's mpath may or may not be resolved and active. - * @gates_lock: protects updates to known_gates - * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr - * @walk_head: linked list containging all mesh_path objects - * @walk_lock: lock protecting walk_head - * @entries: number of entries in the table - */ -struct mesh_table { - struct hlist_head known_gates; - spinlock_t gates_lock; - struct rhashtable rhead; - struct hlist_head walk_head; - spinlock_t walk_lock; - atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ -}; - /* Recent multicast cache */ /* RMC_BUCKETS must be a power of 2, maximum 256 */ #define RMC_BUCKETS 256 @@ -308,7 +288,7 @@ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata, void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta); void mesh_path_flush_pending(struct mesh_path *mpath); void mesh_path_tx_pending(struct mesh_path *mpath); -int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata); +void mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata); void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata); int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr); void mesh_path_timer(struct timer_list *t); diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index c2b051e0610ab726592ac2bb503e83165f67e5b6..d936ef0c17a3752f64bcac3046a65634342794d2 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -47,32 +47,24 @@ static void mesh_path_rht_free(void *ptr, void *tblptr) mesh_path_free_rcu(tbl, mpath); } -static struct mesh_table *mesh_table_alloc(void) +static void mesh_table_init(struct mesh_table *tbl) { - struct mesh_table *newtbl; + INIT_HLIST_HEAD(&tbl->known_gates); + INIT_HLIST_HEAD(&tbl->walk_head); + atomic_set(&tbl->entries, 0); + spin_lock_init(&tbl->gates_lock); + spin_lock_init(&tbl->walk_lock); - newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC); - if (!newtbl) - return NULL; - - INIT_HLIST_HEAD(&newtbl->known_gates); - INIT_HLIST_HEAD(&newtbl->walk_head); - atomic_set(&newtbl->entries, 0); - spin_lock_init(&newtbl->gates_lock); - spin_lock_init(&newtbl->walk_lock); - if (rhashtable_init(&newtbl->rhead, &mesh_rht_params)) { - kfree(newtbl); - return NULL; - } - - return newtbl; + /* rhashtable_init() may fail only in case of wrong + * mesh_rht_params + */ + WARN_ON(rhashtable_init(&tbl->rhead, &mesh_rht_params)); } static void mesh_table_free(struct mesh_table *tbl) { rhashtable_free_and_destroy(&tbl->rhead, mesh_path_rht_free, tbl); - kfree(tbl); } /** @@ -238,13 +230,13 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst, struct mesh_path * mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) { - return mpath_lookup(sdata->u.mesh.mesh_paths, dst, sdata); + return mpath_lookup(&sdata->u.mesh.mesh_paths, dst, sdata); } struct mesh_path * mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) { - return mpath_lookup(sdata->u.mesh.mpp_paths, dst, sdata); + return mpath_lookup(&sdata->u.mesh.mpp_paths, dst, sdata); } static struct mesh_path * @@ -281,7 +273,7 @@ __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx) struct mesh_path * mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) { - return __mesh_path_lookup_by_idx(sdata->u.mesh.mesh_paths, idx); + return __mesh_path_lookup_by_idx(&sdata->u.mesh.mesh_paths, idx); } /** @@ -296,7 +288,7 @@ mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) struct mesh_path * mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) { - return __mesh_path_lookup_by_idx(sdata->u.mesh.mpp_paths, idx); + return __mesh_path_lookup_by_idx(&sdata->u.mesh.mpp_paths, idx); } /** @@ -309,7 +301,7 @@ int mesh_path_add_gate(struct mesh_path *mpath) int err; rcu_read_lock(); - tbl = mpath->sdata->u.mesh.mesh_paths; + tbl = &mpath->sdata->u.mesh.mesh_paths; spin_lock_bh(&mpath->state_lock); if (mpath->is_gate) { @@ -418,7 +410,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, if (!new_mpath) return ERR_PTR(-ENOMEM); - tbl = sdata->u.mesh.mesh_paths; + tbl = &sdata->u.mesh.mesh_paths; spin_lock_bh(&tbl->walk_lock); mpath = rhashtable_lookup_get_insert_fast(&tbl->rhead, &new_mpath->rhash, @@ -460,7 +452,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata, return -ENOMEM; memcpy(new_mpath->mpp, mpp, ETH_ALEN); - tbl = sdata->u.mesh.mpp_paths; + tbl = &sdata->u.mesh.mpp_paths; spin_lock_bh(&tbl->walk_lock); ret = rhashtable_lookup_insert_fast(&tbl->rhead, @@ -489,7 +481,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata, void mesh_plink_broken(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; - struct mesh_table *tbl = sdata->u.mesh.mesh_paths; + struct mesh_table *tbl = &sdata->u.mesh.mesh_paths; static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; struct mesh_path *mpath; @@ -548,7 +540,7 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath) void mesh_path_flush_by_nexthop(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; - struct mesh_table *tbl = sdata->u.mesh.mesh_paths; + struct mesh_table *tbl = &sdata->u.mesh.mesh_paths; struct mesh_path *mpath; struct hlist_node *n; @@ -563,7 +555,7 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta) static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, const u8 *proxy) { - struct mesh_table *tbl = sdata->u.mesh.mpp_paths; + struct mesh_table *tbl = &sdata->u.mesh.mpp_paths; struct mesh_path *mpath; struct hlist_node *n; @@ -597,8 +589,8 @@ static void table_flush_by_iface(struct mesh_table *tbl) */ void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) { - table_flush_by_iface(sdata->u.mesh.mesh_paths); - table_flush_by_iface(sdata->u.mesh.mpp_paths); + table_flush_by_iface(&sdata->u.mesh.mesh_paths); + table_flush_by_iface(&sdata->u.mesh.mpp_paths); } /** @@ -644,7 +636,7 @@ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr) /* flush relevant mpp entries first */ mpp_flush_by_proxy(sdata, addr); - err = table_path_del(sdata->u.mesh.mesh_paths, sdata, addr); + err = table_path_del(&sdata->u.mesh.mesh_paths, sdata, addr); sdata->u.mesh.mesh_paths_generation++; return err; } @@ -682,7 +674,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath) struct mesh_path *gate; bool copy = false; - tbl = sdata->u.mesh.mesh_paths; + tbl = &sdata->u.mesh.mesh_paths; rcu_read_lock(); hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) { @@ -762,29 +754,10 @@ void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop) mesh_path_tx_pending(mpath); } -int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata) +void mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata) { - struct mesh_table *tbl_path, *tbl_mpp; - int ret; - - tbl_path = mesh_table_alloc(); - if (!tbl_path) - return -ENOMEM; - - tbl_mpp = mesh_table_alloc(); - if (!tbl_mpp) { - ret = -ENOMEM; - goto free_path; - } - - sdata->u.mesh.mesh_paths = tbl_path; - sdata->u.mesh.mpp_paths = tbl_mpp; - - return 0; - -free_path: - mesh_table_free(tbl_path); - return ret; + mesh_table_init(&sdata->u.mesh.mesh_paths); + mesh_table_init(&sdata->u.mesh.mpp_paths); } static @@ -806,12 +779,12 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata, void mesh_path_expire(struct ieee80211_sub_if_data *sdata) { - mesh_path_tbl_expire(sdata, sdata->u.mesh.mesh_paths); - mesh_path_tbl_expire(sdata, sdata->u.mesh.mpp_paths); + mesh_path_tbl_expire(sdata, &sdata->u.mesh.mesh_paths); + mesh_path_tbl_expire(sdata, &sdata->u.mesh.mpp_paths); } void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata) { - mesh_table_free(sdata->u.mesh.mesh_paths); - mesh_table_free(sdata->u.mesh.mpp_paths); + mesh_table_free(&sdata->u.mesh.mesh_paths); + mesh_table_free(&sdata->u.mesh.mpp_paths); } diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index cee39ae52245c5c6170233ef90e8c1674279766b..d572478c4d68ef722bbab59111a812540d390a93 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -2159,7 +2159,7 @@ static void sta_stats_decode_rate(struct ieee80211_local *local, u32 rate, static int sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo) { - u16 rate = READ_ONCE(sta_get_last_rx_stats(sta)->last_rate); + u32 rate = READ_ONCE(sta_get_last_rx_stats(sta)->last_rate); if (rate == STA_STATS_RATE_INVALID) return -EINVAL; diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c index 144346faffc13024d29e1de466eeb834165c97f4..b8ec2c414a5fbd8c8e11d7b1b485efdbc0b19225 100644 --- a/net/netfilter/ipset/ip_set_hash_netportnet.c +++ b/net/netfilter/ipset/ip_set_hash_netportnet.c @@ -35,6 +35,7 @@ MODULE_ALIAS("ip_set_hash:net,port,net"); #define IP_SET_HASH_WITH_PROTO #define IP_SET_HASH_WITH_NETS #define IPSET_NET_COUNT 2 +#define IP_SET_HASH_WITH_NET0 /* IPv4 variant */ diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 2efdc50f978b0fae45bd1bc556bedc7711115b30..c9ca857f1068d3414fba39e55dcfd09bc2bfa2e8 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -317,11 +317,12 @@ ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct) } #ifdef CONFIG_NF_CONNTRACK_MARK -static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct) +static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct, + bool dump) { u32 mark = READ_ONCE(ct->mark); - if (!mark) + if (!mark && !dump) return 0; if (nla_put_be32(skb, CTA_MARK, htonl(mark))) @@ -332,7 +333,7 @@ static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct) return -1; } #else -#define ctnetlink_dump_mark(a, b) (0) +#define ctnetlink_dump_mark(a, b, c) (0) #endif #ifdef CONFIG_NF_CONNTRACK_SECMARK @@ -537,7 +538,7 @@ static int ctnetlink_dump_extinfo(struct sk_buff *skb, static int ctnetlink_dump_info(struct sk_buff *skb, struct nf_conn *ct) { if (ctnetlink_dump_status(skb, ct) < 0 || - ctnetlink_dump_mark(skb, ct) < 0 || + ctnetlink_dump_mark(skb, ct, true) < 0 || ctnetlink_dump_secctx(skb, ct) < 0 || ctnetlink_dump_id(skb, ct) < 0 || ctnetlink_dump_use(skb, ct) < 0 || @@ -816,8 +817,7 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item) } #ifdef CONFIG_NF_CONNTRACK_MARK - if (events & (1 << IPCT_MARK) && - ctnetlink_dump_mark(skb, ct) < 0) + if (ctnetlink_dump_mark(skb, ct, events & (1 << IPCT_MARK))) goto nla_put_failure; #endif nlmsg_end(skb, nlh); @@ -2359,12 +2359,15 @@ ctnetlink_create_conntrack(struct net *net, err = nf_conntrack_hash_check_insert(ct); if (err < 0) - goto err2; + goto err3; rcu_read_unlock(); return ct; +err3: + if (ct->master) + nf_ct_put(ct->master); err2: rcu_read_unlock(); err1: @@ -2731,7 +2734,7 @@ static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct) goto nla_put_failure; #ifdef CONFIG_NF_CONNTRACK_MARK - if (ctnetlink_dump_mark(skb, ct) < 0) + if (ctnetlink_dump_mark(skb, ct, true) < 0) goto nla_put_failure; #endif if (ctnetlink_dump_labels(skb, ct) < 0) diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 7626f3e1c70a774968676d1111ee6376284afc17..cec4b16170a0bc7503f7438e50e6259d6abfb853 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -27,22 +27,16 @@ #include #include -/* FIXME: Examine ipfilter's timeouts and conntrack transitions more - closely. They're more complex. --RR - - And so for me for SCTP :D -Kiran */ - static const char *const sctp_conntrack_names[] = { - "NONE", - "CLOSED", - "COOKIE_WAIT", - "COOKIE_ECHOED", - "ESTABLISHED", - "SHUTDOWN_SENT", - "SHUTDOWN_RECD", - "SHUTDOWN_ACK_SENT", - "HEARTBEAT_SENT", - "HEARTBEAT_ACKED", + [SCTP_CONNTRACK_NONE] = "NONE", + [SCTP_CONNTRACK_CLOSED] = "CLOSED", + [SCTP_CONNTRACK_COOKIE_WAIT] = "COOKIE_WAIT", + [SCTP_CONNTRACK_COOKIE_ECHOED] = "COOKIE_ECHOED", + [SCTP_CONNTRACK_ESTABLISHED] = "ESTABLISHED", + [SCTP_CONNTRACK_SHUTDOWN_SENT] = "SHUTDOWN_SENT", + [SCTP_CONNTRACK_SHUTDOWN_RECD] = "SHUTDOWN_RECD", + [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = "SHUTDOWN_ACK_SENT", + [SCTP_CONNTRACK_HEARTBEAT_SENT] = "HEARTBEAT_SENT", }; #define SECS * HZ @@ -54,12 +48,11 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = { [SCTP_CONNTRACK_CLOSED] = 10 SECS, [SCTP_CONNTRACK_COOKIE_WAIT] = 3 SECS, [SCTP_CONNTRACK_COOKIE_ECHOED] = 3 SECS, - [SCTP_CONNTRACK_ESTABLISHED] = 5 DAYS, + [SCTP_CONNTRACK_ESTABLISHED] = 210 SECS, [SCTP_CONNTRACK_SHUTDOWN_SENT] = 300 SECS / 1000, [SCTP_CONNTRACK_SHUTDOWN_RECD] = 300 SECS / 1000, [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = 3 SECS, [SCTP_CONNTRACK_HEARTBEAT_SENT] = 30 SECS, - [SCTP_CONNTRACK_HEARTBEAT_ACKED] = 210 SECS, }; #define SCTP_FLAG_HEARTBEAT_VTAG_FAILED 1 @@ -73,7 +66,6 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = { #define sSR SCTP_CONNTRACK_SHUTDOWN_RECD #define sSA SCTP_CONNTRACK_SHUTDOWN_ACK_SENT #define sHS SCTP_CONNTRACK_HEARTBEAT_SENT -#define sHA SCTP_CONNTRACK_HEARTBEAT_ACKED #define sIV SCTP_CONNTRACK_MAX /* @@ -96,9 +88,6 @@ SHUTDOWN_ACK_SENT - We have seen a SHUTDOWN_ACK chunk in the direction opposite CLOSED - We have seen a SHUTDOWN_COMPLETE chunk in the direction of the SHUTDOWN chunk. Connection is closed. HEARTBEAT_SENT - We have seen a HEARTBEAT in a new flow. -HEARTBEAT_ACKED - We have seen a HEARTBEAT-ACK in the direction opposite to - that of the HEARTBEAT chunk. Secondary connection is - established. */ /* TODO @@ -115,33 +104,33 @@ cookie echoed to closed. static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = { { /* ORIGINAL */ -/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA */ -/* init */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW, sHA}, -/* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA}, -/* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL}, -/* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL, sSS}, -/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA, sSA, sHA}, -/* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},/* Can't have Stale cookie*/ -/* cookie_echo */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA, sCL, sHA},/* 5.2.4 - Big TODO */ -/* cookie_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},/* Can't come in orig dir */ -/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL, sCL, sHA}, -/* heartbeat */ {sHS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA}, -/* heartbeat_ack*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA} +/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS */ +/* init */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW}, +/* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL}, +/* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL}, +/* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL}, +/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA, sSA}, +/* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},/* Can't have Stale cookie*/ +/* cookie_echo */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA, sCL},/* 5.2.4 - Big TODO */ +/* cookie_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},/* Can't come in orig dir */ +/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL, sCL}, +/* heartbeat */ {sHS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS}, +/* heartbeat_ack*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS}, }, { /* REPLY */ -/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA */ -/* init */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},/* INIT in sCL Big TODO */ -/* init_ack */ {sIV, sCW, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA}, -/* abort */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV, sCL}, -/* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV, sSR}, -/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV, sHA}, -/* error */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA, sIV, sHA}, -/* cookie_echo */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},/* Can't come in reply dir */ -/* cookie_ack */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA, sIV, sHA}, -/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL, sIV, sHA}, -/* heartbeat */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA}, -/* heartbeat_ack*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHA, sHA} +/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS */ +/* init */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV},/* INIT in sCL Big TODO */ +/* init_ack */ {sIV, sCW, sCW, sCE, sES, sSS, sSR, sSA, sIV}, +/* abort */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV}, +/* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV}, +/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV}, +/* error */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA, sIV}, +/* cookie_echo */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV},/* Can't come in reply dir */ +/* cookie_ack */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA, sIV}, +/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL, sIV}, +/* heartbeat */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS}, +/* heartbeat_ack*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sES}, } }; @@ -412,22 +401,29 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct, for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) { /* Special cases of Verification tag check (Sec 8.5.1) */ if (sch->type == SCTP_CID_INIT) { - /* Sec 8.5.1 (A) */ + /* (A) vtag MUST be zero */ if (sh->vtag != 0) goto out_unlock; } else if (sch->type == SCTP_CID_ABORT) { - /* Sec 8.5.1 (B) */ - if (sh->vtag != ct->proto.sctp.vtag[dir] && - sh->vtag != ct->proto.sctp.vtag[!dir]) + /* (B) vtag MUST match own vtag if T flag is unset OR + * MUST match peer's vtag if T flag is set + */ + if ((!(sch->flags & SCTP_CHUNK_FLAG_T) && + sh->vtag != ct->proto.sctp.vtag[dir]) || + ((sch->flags & SCTP_CHUNK_FLAG_T) && + sh->vtag != ct->proto.sctp.vtag[!dir])) goto out_unlock; } else if (sch->type == SCTP_CID_SHUTDOWN_COMPLETE) { - /* Sec 8.5.1 (C) */ - if (sh->vtag != ct->proto.sctp.vtag[dir] && - sh->vtag != ct->proto.sctp.vtag[!dir] && - sch->flags & SCTP_CHUNK_FLAG_T) + /* (C) vtag MUST match own vtag if T flag is unset OR + * MUST match peer's vtag if T flag is set + */ + if ((!(sch->flags & SCTP_CHUNK_FLAG_T) && + sh->vtag != ct->proto.sctp.vtag[dir]) || + ((sch->flags & SCTP_CHUNK_FLAG_T) && + sh->vtag != ct->proto.sctp.vtag[!dir])) goto out_unlock; } else if (sch->type == SCTP_CID_COOKIE_ECHO) { - /* Sec 8.5.1 (D) */ + /* (D) vtag must be same as init_vtag as found in INIT_ACK */ if (sh->vtag != ct->proto.sctp.vtag[dir]) goto out_unlock; } else if (sch->type == SCTP_CID_HEARTBEAT) { @@ -501,8 +497,12 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct, } ct->proto.sctp.state = new_state; - if (old_state != new_state) + if (old_state != new_state) { nf_conntrack_event_cache(IPCT_PROTOINFO, ct); + if (new_state == SCTP_CONNTRACK_ESTABLISHED && + !test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) + nf_conntrack_event_cache(IPCT_ASSURED, ct); + } } spin_unlock_bh(&ct->lock); @@ -516,14 +516,6 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct, nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]); - if (old_state == SCTP_CONNTRACK_COOKIE_ECHOED && - dir == IP_CT_DIR_REPLY && - new_state == SCTP_CONNTRACK_ESTABLISHED) { - pr_debug("Setting assured bit\n"); - set_bit(IPS_ASSURED_BIT, &ct->status); - nf_conntrack_event_cache(IPCT_ASSURED, ct); - } - return NF_ACCEPT; out_unlock: diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 3f785bdfa942d7149bf65573ca17c3fe3ce2164a..c1d02c0b4f0058a32ac8715af27f7f91cc2f2cca 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -1158,6 +1158,16 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct, nf_ct_kill_acct(ct, ctinfo, skb); return NF_ACCEPT; } + + if (index == TCP_SYN_SET && old_state == TCP_CONNTRACK_SYN_SENT) { + /* do not renew timeout on SYN retransmit. + * + * Else port reuse by client or NAT middlebox can keep + * entry alive indefinitely (including nat info). + */ + return NF_ACCEPT; + } + /* ESTABLISHED without SEEN_REPLY, i.e. mid-connection * pickup with loose=1. Avoid large ESTABLISHED timeout. */ diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index a7f88cdf3f87c487a3a2d1adb093e070ca0a4727..e12b52019a550c584ae0436b22f522a7cf9022dc 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -583,7 +583,6 @@ enum nf_ct_sysctl_index { NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_RECD, NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT, NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_SENT, - NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_ACKED, #endif #ifdef CONFIG_NF_CT_PROTO_DCCP NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_REQUEST, @@ -853,12 +852,6 @@ static struct ctl_table nf_ct_sysctl_table[] = { .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, - [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_ACKED] = { - .procname = "nf_conntrack_sctp_timeout_heartbeat_acked", - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = proc_dointvec_jiffies, - }, #endif #ifdef CONFIG_NF_CT_PROTO_DCCP [NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_REQUEST] = { @@ -987,7 +980,6 @@ static void nf_conntrack_standalone_init_sctp_sysctl(struct net *net, XASSIGN(SHUTDOWN_RECD, sn); XASSIGN(SHUTDOWN_ACK_SENT, sn); XASSIGN(HEARTBEAT_SENT, sn); - XASSIGN(HEARTBEAT_ACKED, sn); #undef XASSIGN #endif } diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index ee7fd34955284c4c6c87288c28a1de6a380f9bea..3065c7fcfd82dec99633b35a93c57494f8175495 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -3194,8 +3194,6 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk, NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]); return PTR_ERR(chain); } - if (nft_chain_is_bound(chain)) - return -EOPNOTSUPP; } else if (nla[NFTA_RULE_CHAIN_ID]) { chain = nft_chain_lookup_byid(net, table, nla[NFTA_RULE_CHAIN_ID], @@ -3208,6 +3206,9 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk, return -EINVAL; } + if (nft_chain_is_bound(chain)) + return -EOPNOTSUPP; + if (nla[NFTA_RULE_HANDLE]) { handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_HANDLE])); rule = __nft_rule_lookup(chain, handle); @@ -3454,6 +3455,8 @@ static int nf_tables_delrule(struct net *net, struct sock *nlsk, list_for_each_entry(chain, &table->chains, list) { if (!nft_is_active_next(net, chain)) continue; + if (nft_chain_is_bound(chain)) + continue; ctx.chain = chain; err = nft_delrule_by_chain(&ctx); diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c index 51e3953b414c0497f7d99fd4c249e49d5415e065..260a655def3433c88fa7752997a19077231dce5d 100644 --- a/net/netfilter/nfnetlink_osf.c +++ b/net/netfilter/nfnetlink_osf.c @@ -316,6 +316,14 @@ static int nfnl_osf_add_callback(struct net *net, struct sock *ctnl, f = nla_data(osf_attrs[OSF_ATTR_FINGER]); + if (f->opt_num > ARRAY_SIZE(f->opt)) + return -EINVAL; + + if (!memchr(f->genre, 0, MAXGENRELEN) || + !memchr(f->subtype, 0, MAXGENRELEN) || + !memchr(f->version, 0, MAXGENRELEN)) + return -EINVAL; + kf = kmalloc(sizeof(struct nf_osf_finger), GFP_KERNEL); if (!kf) return -ENOMEM; diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c index 670dd146fb2b16be550ae2874aadb9cb4979f0bf..ca268293cfa12248c3f90ee0ccfbe424520e6bee 100644 --- a/net/netfilter/nft_exthdr.c +++ b/net/netfilter/nft_exthdr.c @@ -33,6 +33,14 @@ static unsigned int optlen(const u8 *opt, unsigned int offset) return opt[offset + 1]; } +static int nft_skb_copy_to_reg(const struct sk_buff *skb, int offset, u32 *dest, unsigned int len) +{ + if (len % NFT_REG32_SIZE) + dest[len / NFT_REG32_SIZE] = 0; + + return skb_copy_bits(skb, offset, dest, len); +} + static void nft_exthdr_ipv6_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) @@ -54,8 +62,7 @@ static void nft_exthdr_ipv6_eval(const struct nft_expr *expr, } offset += priv->offset; - dest[priv->len / NFT_REG32_SIZE] = 0; - if (skb_copy_bits(pkt->skb, offset, dest, priv->len) < 0) + if (nft_skb_copy_to_reg(pkt->skb, offset, dest, priv->len) < 0) goto err; return; err: @@ -151,8 +158,7 @@ static void nft_exthdr_ipv4_eval(const struct nft_expr *expr, } offset += priv->offset; - dest[priv->len / NFT_REG32_SIZE] = 0; - if (skb_copy_bits(pkt->skb, offset, dest, priv->len) < 0) + if (nft_skb_copy_to_reg(pkt->skb, offset, dest, priv->len) < 0) goto err; return; err: @@ -208,7 +214,8 @@ static void nft_exthdr_tcp_eval(const struct nft_expr *expr, if (priv->flags & NFT_EXTHDR_F_PRESENT) { *dest = 1; } else { - dest[priv->len / NFT_REG32_SIZE] = 0; + if (priv->len % NFT_REG32_SIZE) + dest[priv->len / NFT_REG32_SIZE] = 0; memcpy(dest, opt + offset, priv->len); } diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index 94a5446c5eae8c4c2fbc9dada87f507182cb3afc..4b9a499fe8f4d86e8f730eda9eb6811116b99d52 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c @@ -38,10 +38,12 @@ static bool nft_rbtree_interval_start(const struct nft_rbtree_elem *rbe) return !nft_rbtree_interval_end(rbe); } -static bool nft_rbtree_equal(const struct nft_set *set, const void *this, - const struct nft_rbtree_elem *interval) +static int nft_rbtree_cmp(const struct nft_set *set, + const struct nft_rbtree_elem *e1, + const struct nft_rbtree_elem *e2) { - return memcmp(this, nft_set_ext_key(&interval->ext), set->klen) == 0; + return memcmp(nft_set_ext_key(&e1->ext), nft_set_ext_key(&e2->ext), + set->klen); } static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set, @@ -52,7 +54,6 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set const struct nft_rbtree_elem *rbe, *interval = NULL; u8 genmask = nft_genmask_cur(net); const struct rb_node *parent; - const void *this; int d; parent = rcu_dereference_raw(priv->root.rb_node); @@ -62,12 +63,11 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set rbe = rb_entry(parent, struct nft_rbtree_elem, node); - this = nft_set_ext_key(&rbe->ext); - d = memcmp(this, key, set->klen); + d = memcmp(nft_set_ext_key(&rbe->ext), key, set->klen); if (d < 0) { parent = rcu_dereference_raw(parent->rb_left); if (interval && - nft_rbtree_equal(set, this, interval) && + !nft_rbtree_cmp(set, rbe, interval) && nft_rbtree_interval_end(rbe) && nft_rbtree_interval_start(interval)) continue; @@ -214,154 +214,216 @@ static void *nft_rbtree_get(const struct net *net, const struct nft_set *set, return rbe; } +static int nft_rbtree_gc_elem(const struct nft_set *__set, + struct nft_rbtree *priv, + struct nft_rbtree_elem *rbe) +{ + struct nft_set *set = (struct nft_set *)__set; + struct rb_node *prev = rb_prev(&rbe->node); + struct nft_rbtree_elem *rbe_prev; + struct nft_set_gc_batch *gcb; + + gcb = nft_set_gc_batch_check(set, NULL, GFP_ATOMIC); + if (!gcb) + return -ENOMEM; + + /* search for expired end interval coming before this element. */ + do { + rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node); + if (nft_rbtree_interval_end(rbe_prev)) + break; + + prev = rb_prev(prev); + } while (prev != NULL); + + rb_erase(&rbe_prev->node, &priv->root); + rb_erase(&rbe->node, &priv->root); + atomic_sub(2, &set->nelems); + + nft_set_gc_batch_add(gcb, rbe); + nft_set_gc_batch_complete(gcb); + + return 0; +} + +static bool nft_rbtree_update_first(const struct nft_set *set, + struct nft_rbtree_elem *rbe, + struct rb_node *first) +{ + struct nft_rbtree_elem *first_elem; + + first_elem = rb_entry(first, struct nft_rbtree_elem, node); + /* this element is closest to where the new element is to be inserted: + * update the first element for the node list path. + */ + if (nft_rbtree_cmp(set, rbe, first_elem) < 0) + return true; + + return false; +} + static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, struct nft_rbtree_elem *new, struct nft_set_ext **ext) { - bool overlap = false, dup_end_left = false, dup_end_right = false; + struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL; + struct rb_node *node, *parent, **p, *first = NULL; struct nft_rbtree *priv = nft_set_priv(set); u8 genmask = nft_genmask_next(net); - struct nft_rbtree_elem *rbe; - struct rb_node *parent, **p; - int d; + int d, err; - /* Detect overlaps as we descend the tree. Set the flag in these cases: - * - * a1. _ _ __>| ?_ _ __| (insert end before existing end) - * a2. _ _ ___| ?_ _ _>| (insert end after existing end) - * a3. _ _ ___? >|_ _ __| (insert start before existing end) - * - * and clear it later on, as we eventually reach the points indicated by - * '?' above, in the cases described below. We'll always meet these - * later, locally, due to tree ordering, and overlaps for the intervals - * that are the closest together are always evaluated last. - * - * b1. _ _ __>| !_ _ __| (insert end before existing start) - * b2. _ _ ___| !_ _ _>| (insert end after existing start) - * b3. _ _ ___! >|_ _ __| (insert start after existing end, as a leaf) - * '--' no nodes falling in this range - * b4. >|_ _ ! (insert start before existing start) - * - * Case a3. resolves to b3.: - * - if the inserted start element is the leftmost, because the '0' - * element in the tree serves as end element - * - otherwise, if an existing end is found immediately to the left. If - * there are existing nodes in between, we need to further descend the - * tree before we can conclude the new start isn't causing an overlap - * - * or to b4., which, preceded by a3., means we already traversed one or - * more existing intervals entirely, from the right. - * - * For a new, rightmost pair of elements, we'll hit cases b3. and b2., - * in that order. - * - * The flag is also cleared in two special cases: - * - * b5. |__ _ _!|<_ _ _ (insert start right before existing end) - * b6. |__ _ >|!__ _ _ (insert end right after existing start) - * - * which always happen as last step and imply that no further - * overlapping is possible. - * - * Another special case comes from the fact that start elements matching - * an already existing start element are allowed: insertion is not - * performed but we return -EEXIST in that case, and the error will be - * cleared by the caller if NLM_F_EXCL is not present in the request. - * This way, request for insertion of an exact overlap isn't reported as - * error to userspace if not desired. - * - * However, if the existing start matches a pre-existing start, but the - * end element doesn't match the corresponding pre-existing end element, - * we need to report a partial overlap. This is a local condition that - * can be noticed without need for a tracking flag, by checking for a - * local duplicated end for a corresponding start, from left and right, - * separately. + /* Descend the tree to search for an existing element greater than the + * key value to insert that is greater than the new element. This is the + * first element to walk the ordered elements to find possible overlap. */ - parent = NULL; p = &priv->root.rb_node; while (*p != NULL) { parent = *p; rbe = rb_entry(parent, struct nft_rbtree_elem, node); - d = memcmp(nft_set_ext_key(&rbe->ext), - nft_set_ext_key(&new->ext), - set->klen); + d = nft_rbtree_cmp(set, rbe, new); + if (d < 0) { p = &parent->rb_left; - - if (nft_rbtree_interval_start(new)) { - if (nft_rbtree_interval_end(rbe) && - nft_set_elem_active(&rbe->ext, genmask) && - !nft_set_elem_expired(&rbe->ext) && !*p) - overlap = false; - } else { - if (dup_end_left && !*p) - return -ENOTEMPTY; - - overlap = nft_rbtree_interval_end(rbe) && - nft_set_elem_active(&rbe->ext, - genmask) && - !nft_set_elem_expired(&rbe->ext); - - if (overlap) { - dup_end_right = true; - continue; - } - } } else if (d > 0) { - p = &parent->rb_right; + if (!first || + nft_rbtree_update_first(set, rbe, first)) + first = &rbe->node; - if (nft_rbtree_interval_end(new)) { - if (dup_end_right && !*p) - return -ENOTEMPTY; - - overlap = nft_rbtree_interval_end(rbe) && - nft_set_elem_active(&rbe->ext, - genmask) && - !nft_set_elem_expired(&rbe->ext); - - if (overlap) { - dup_end_left = true; - continue; - } - } else if (nft_set_elem_active(&rbe->ext, genmask) && - !nft_set_elem_expired(&rbe->ext)) { - overlap = nft_rbtree_interval_end(rbe); - } + p = &parent->rb_right; } else { - if (nft_rbtree_interval_end(rbe) && - nft_rbtree_interval_start(new)) { + if (nft_rbtree_interval_end(rbe)) p = &parent->rb_left; - - if (nft_set_elem_active(&rbe->ext, genmask) && - !nft_set_elem_expired(&rbe->ext)) - overlap = false; - } else if (nft_rbtree_interval_start(rbe) && - nft_rbtree_interval_end(new)) { + else p = &parent->rb_right; + } + } + + if (!first) + first = rb_first(&priv->root); + + /* Detect overlap by going through the list of valid tree nodes. + * Values stored in the tree are in reversed order, starting from + * highest to lowest value. + */ + for (node = first; node != NULL; node = rb_next(node)) { + rbe = rb_entry(node, struct nft_rbtree_elem, node); - if (nft_set_elem_active(&rbe->ext, genmask) && - !nft_set_elem_expired(&rbe->ext)) - overlap = false; - } else if (nft_set_elem_active(&rbe->ext, genmask) && - !nft_set_elem_expired(&rbe->ext)) { - *ext = &rbe->ext; - return -EEXIST; - } else { - overlap = false; - if (nft_rbtree_interval_end(rbe)) - p = &parent->rb_left; - else - p = &parent->rb_right; + if (!nft_set_elem_active(&rbe->ext, genmask)) + continue; + + /* perform garbage collection to avoid bogus overlap reports. */ + if (nft_set_elem_expired(&rbe->ext)) { + err = nft_rbtree_gc_elem(set, priv, rbe); + if (err < 0) + return err; + + continue; + } + + d = nft_rbtree_cmp(set, rbe, new); + if (d == 0) { + /* Matching end element: no need to look for an + * overlapping greater or equal element. + */ + if (nft_rbtree_interval_end(rbe)) { + rbe_le = rbe; + break; + } + + /* first element that is greater or equal to key value. */ + if (!rbe_ge) { + rbe_ge = rbe; + continue; + } + + /* this is a closer more or equal element, update it. */ + if (nft_rbtree_cmp(set, rbe_ge, new) != 0) { + rbe_ge = rbe; + continue; + } + + /* element is equal to key value, make sure flags are + * the same, an existing more or equal start element + * must not be replaced by more or equal end element. + */ + if ((nft_rbtree_interval_start(new) && + nft_rbtree_interval_start(rbe_ge)) || + (nft_rbtree_interval_end(new) && + nft_rbtree_interval_end(rbe_ge))) { + rbe_ge = rbe; + continue; } + } else if (d > 0) { + /* annotate element greater than the new element. */ + rbe_ge = rbe; + continue; + } else if (d < 0) { + /* annotate element less than the new element. */ + rbe_le = rbe; + break; } + } - dup_end_left = dup_end_right = false; + /* - new start element matching existing start element: full overlap + * reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given. + */ + if (rbe_ge && !nft_rbtree_cmp(set, new, rbe_ge) && + nft_rbtree_interval_start(rbe_ge) == nft_rbtree_interval_start(new)) { + *ext = &rbe_ge->ext; + return -EEXIST; } - if (overlap) + /* - new end element matching existing end element: full overlap + * reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given. + */ + if (rbe_le && !nft_rbtree_cmp(set, new, rbe_le) && + nft_rbtree_interval_end(rbe_le) == nft_rbtree_interval_end(new)) { + *ext = &rbe_le->ext; + return -EEXIST; + } + + /* - new start element with existing closest, less or equal key value + * being a start element: partial overlap, reported as -ENOTEMPTY. + * Anonymous sets allow for two consecutive start element since they + * are constant, skip them to avoid bogus overlap reports. + */ + if (!nft_set_is_anonymous(set) && rbe_le && + nft_rbtree_interval_start(rbe_le) && nft_rbtree_interval_start(new)) + return -ENOTEMPTY; + + /* - new end element with existing closest, less or equal key value + * being a end element: partial overlap, reported as -ENOTEMPTY. + */ + if (rbe_le && + nft_rbtree_interval_end(rbe_le) && nft_rbtree_interval_end(new)) return -ENOTEMPTY; + /* - new end element with existing closest, greater or equal key value + * being an end element: partial overlap, reported as -ENOTEMPTY + */ + if (rbe_ge && + nft_rbtree_interval_end(rbe_ge) && nft_rbtree_interval_end(new)) + return -ENOTEMPTY; + + /* Accepted element: pick insertion point depending on key value */ + parent = NULL; + p = &priv->root.rb_node; + while (*p != NULL) { + parent = *p; + rbe = rb_entry(parent, struct nft_rbtree_elem, node); + d = nft_rbtree_cmp(set, rbe, new); + + if (d < 0) + p = &parent->rb_left; + else if (d > 0) + p = &parent->rb_right; + else if (nft_rbtree_interval_end(rbe)) + p = &parent->rb_left; + else + p = &parent->rb_right; + } + rb_link_node_rcu(&new->node, parent, p); rb_insert_color(&new->node, &priv->root); return 0; @@ -500,23 +562,37 @@ static void nft_rbtree_gc(struct work_struct *work) struct nft_rbtree *priv; struct rb_node *node; struct nft_set *set; + struct net *net; + u8 genmask; priv = container_of(work, struct nft_rbtree, gc_work.work); set = nft_set_container_of(priv); + net = read_pnet(&set->net); + genmask = nft_genmask_cur(net); write_lock_bh(&priv->lock); write_seqcount_begin(&priv->count); for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) { rbe = rb_entry(node, struct nft_rbtree_elem, node); + if (!nft_set_elem_active(&rbe->ext, genmask)) + continue; + + /* elements are reversed in the rbtree for historical reasons, + * from highest to lowest value, that is why end element is + * always visited before the start element. + */ if (nft_rbtree_interval_end(rbe)) { rbe_end = rbe; continue; } if (!nft_set_elem_expired(&rbe->ext)) continue; - if (nft_set_elem_mark_busy(&rbe->ext)) + + if (nft_set_elem_mark_busy(&rbe->ext)) { + rbe_end = NULL; continue; + } if (rbe_prev) { rb_erase(&rbe_prev->node, &priv->root); diff --git a/net/netfilter/nft_tproxy.c b/net/netfilter/nft_tproxy.c index 37c728bdad41cb8e67a0ff8c6cadeeea4af9e6d4..c49d318f8e6ed649357f973ffbd15a7baaf23e34 100644 --- a/net/netfilter/nft_tproxy.c +++ b/net/netfilter/nft_tproxy.c @@ -289,6 +289,13 @@ static int nft_tproxy_dump(struct sk_buff *skb, return 0; } +static int nft_tproxy_validate(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nft_data **data) +{ + return nft_chain_validate_hooks(ctx->chain, 1 << NF_INET_PRE_ROUTING); +} + static struct nft_expr_type nft_tproxy_type; static const struct nft_expr_ops nft_tproxy_ops = { .type = &nft_tproxy_type, @@ -296,6 +303,7 @@ static const struct nft_expr_ops nft_tproxy_ops = { .eval = nft_tproxy_eval, .init = nft_tproxy_init, .dump = nft_tproxy_dump, + .validate = nft_tproxy_validate, }; static struct nft_expr_type nft_tproxy_type __read_mostly = { diff --git a/net/netfilter/xt_sctp.c b/net/netfilter/xt_sctp.c index 680015ba7cb6e7ddd9f4a468919078176a653575..d4bf089c9e3f966bc24a01e144eb90de3953935a 100644 --- a/net/netfilter/xt_sctp.c +++ b/net/netfilter/xt_sctp.c @@ -150,6 +150,8 @@ static int sctp_mt_check(const struct xt_mtchk_param *par) { const struct xt_sctp_info *info = par->matchinfo; + if (info->flag_count > ARRAY_SIZE(info->flag_info)) + return -EINVAL; if (info->flags & ~XT_SCTP_VALID_FLAGS) return -EINVAL; if (info->invflags & ~XT_SCTP_VALID_FLAGS) diff --git a/net/netfilter/xt_u32.c b/net/netfilter/xt_u32.c index 177b40d08098b22de5e5335cfc3833e68f6ee457..117d4615d6684c53b3a13225d2222a0993df0002 100644 --- a/net/netfilter/xt_u32.c +++ b/net/netfilter/xt_u32.c @@ -96,11 +96,32 @@ static bool u32_mt(const struct sk_buff *skb, struct xt_action_param *par) return ret ^ data->invert; } +static int u32_mt_checkentry(const struct xt_mtchk_param *par) +{ + const struct xt_u32 *data = par->matchinfo; + const struct xt_u32_test *ct; + unsigned int i; + + if (data->ntests > ARRAY_SIZE(data->tests)) + return -EINVAL; + + for (i = 0; i < data->ntests; ++i) { + ct = &data->tests[i]; + + if (ct->nnums > ARRAY_SIZE(ct->location) || + ct->nvalues > ARRAY_SIZE(ct->value)) + return -EINVAL; + } + + return 0; +} + static struct xt_match xt_u32_mt_reg __read_mostly = { .name = "u32", .revision = 0, .family = NFPROTO_UNSPEC, .match = u32_mt, + .checkentry = u32_mt_checkentry, .matchsize = sizeof(struct xt_u32), .me = THIS_MODULE, }; diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index d96a610929d9abe074e69423ef757f6e9d6f7b4d..2104fbdd63d29809d78acbcba49fb61de718c3e7 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -570,7 +570,9 @@ static int netlink_insert(struct sock *sk, u32 portid) if (nlk_sk(sk)->bound) goto err; - nlk_sk(sk)->portid = portid; + /* portid can be read locklessly from netlink_getname(). */ + WRITE_ONCE(nlk_sk(sk)->portid, portid); + sock_hold(sk); err = __netlink_insert(table, sk); @@ -1079,9 +1081,11 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr, return -EINVAL; if (addr->sa_family == AF_UNSPEC) { - sk->sk_state = NETLINK_UNCONNECTED; - nlk->dst_portid = 0; - nlk->dst_group = 0; + /* paired with READ_ONCE() in netlink_getsockbyportid() */ + WRITE_ONCE(sk->sk_state, NETLINK_UNCONNECTED); + /* dst_portid and dst_group can be read locklessly */ + WRITE_ONCE(nlk->dst_portid, 0); + WRITE_ONCE(nlk->dst_group, 0); return 0; } if (addr->sa_family != AF_NETLINK) @@ -1102,9 +1106,11 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr, err = netlink_autobind(sock); if (err == 0) { - sk->sk_state = NETLINK_CONNECTED; - nlk->dst_portid = nladdr->nl_pid; - nlk->dst_group = ffs(nladdr->nl_groups); + /* paired with READ_ONCE() in netlink_getsockbyportid() */ + WRITE_ONCE(sk->sk_state, NETLINK_CONNECTED); + /* dst_portid and dst_group can be read locklessly */ + WRITE_ONCE(nlk->dst_portid, nladdr->nl_pid); + WRITE_ONCE(nlk->dst_group, ffs(nladdr->nl_groups)); } return err; @@ -1121,10 +1127,12 @@ static int netlink_getname(struct socket *sock, struct sockaddr *addr, nladdr->nl_pad = 0; if (peer) { - nladdr->nl_pid = nlk->dst_portid; - nladdr->nl_groups = netlink_group_mask(nlk->dst_group); + /* Paired with WRITE_ONCE() in netlink_connect() */ + nladdr->nl_pid = READ_ONCE(nlk->dst_portid); + nladdr->nl_groups = netlink_group_mask(READ_ONCE(nlk->dst_group)); } else { - nladdr->nl_pid = nlk->portid; + /* Paired with WRITE_ONCE() in netlink_insert() */ + nladdr->nl_pid = READ_ONCE(nlk->portid); netlink_lock_table(); nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0; netlink_unlock_table(); @@ -1151,8 +1159,9 @@ static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid) /* Don't bother queuing skb if kernel socket has no input function */ nlk = nlk_sk(sock); - if (sock->sk_state == NETLINK_CONNECTED && - nlk->dst_portid != nlk_sk(ssk)->portid) { + /* dst_portid and sk_state can be changed in netlink_connect() */ + if (READ_ONCE(sock->sk_state) == NETLINK_CONNECTED && + READ_ONCE(nlk->dst_portid) != nlk_sk(ssk)->portid) { sock_put(sock); return ERR_PTR(-ECONNREFUSED); } @@ -1888,8 +1897,9 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) goto out; netlink_skb_flags |= NETLINK_SKB_DST; } else { - dst_portid = nlk->dst_portid; - dst_group = nlk->dst_group; + /* Paired with WRITE_ONCE() in netlink_connect() */ + dst_portid = READ_ONCE(nlk->dst_portid); + dst_group = READ_ONCE(nlk->dst_group); } /* Paired with WRITE_ONCE() in netlink_insert() */ diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c index a8da88db7893fcdc445ed74fd9c25c24ef24d47f..4e7c968cde2dcf58e00d3bb080942034147613ec 100644 --- a/net/netrom/nr_timer.c +++ b/net/netrom/nr_timer.c @@ -121,6 +121,7 @@ static void nr_heartbeat_expiry(struct timer_list *t) is accepted() it isn't 'dead' so doesn't get removed. */ if (sock_flag(sk, SOCK_DESTROY) || (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) { + sock_hold(sk); bh_unlock_sock(sk); nr_destroy_socket(sk); goto out; diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c index cc997518f79d19248a15ea6b999dd6d36eddea57..edadebb3efd2a4f15276721f044b97278e608d22 100644 --- a/net/nfc/llcp_core.c +++ b/net/nfc/llcp_core.c @@ -159,6 +159,7 @@ static void local_cleanup(struct nfc_llcp_local *local) cancel_work_sync(&local->rx_work); cancel_work_sync(&local->timeout_work); kfree_skb(local->rx_pending); + local->rx_pending = NULL; del_timer_sync(&local->sdreq_timer); cancel_work_sync(&local->sdreq_timeout_work); nfc_llcp_free_sdp_tlv_list(&local->pending_sdreqs); diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index 9f2f115e81f33e21741317836a45c9f5dd26378c..86f44d45b60f62fe5116d651615e61df23196cae 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -1442,8 +1442,12 @@ static int nfc_se_io(struct nfc_dev *dev, u32 se_idx, rc = dev->ops->se_io(dev, se_idx, apdu, apdu_length, cb, cb_context); + device_unlock(&dev->dev); + return rc; + error: device_unlock(&dev->dev); + kfree(cb_context); return rc; } diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 435f7f1be6146d6364c97d9a62708b89fda36a87..b625ab5e9a43086742e438e64e007449d9ca683b 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -964,14 +964,14 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) key = kzalloc(sizeof(*key), GFP_KERNEL); if (!key) { error = -ENOMEM; - goto err_kfree_key; + goto err_kfree_flow; } ovs_match_init(&match, key, false, &mask); error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK], log); if (error) - goto err_kfree_flow; + goto err_kfree_key; ovs_flow_mask_key(&new_flow->key, key, true, &mask); @@ -979,14 +979,14 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID], key, log); if (error) - goto err_kfree_flow; + goto err_kfree_key; /* Validate actions. */ error = ovs_nla_copy_actions(net, a[OVS_FLOW_ATTR_ACTIONS], &new_flow->key, &acts, log); if (error) { OVS_NLERR(log, "Flow actions may not be safe on all matching packets."); - goto err_kfree_flow; + goto err_kfree_key; } reply = ovs_flow_cmd_alloc_info(acts, &new_flow->id, info, false, @@ -1086,10 +1086,10 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) kfree_skb(reply); err_kfree_acts: ovs_nla_free_flow_actions(acts); -err_kfree_flow: - ovs_flow_free(new_flow, false); err_kfree_key: kfree(key); +err_kfree_flow: + ovs_flow_free(new_flow, false); error: return error; } diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c index e594b4d6b58a9b931cdd942e8dfb235b873ab26c..0cf3dda5319fe3bc3b24b59a068be03497ee819d 100644 --- a/net/openvswitch/meter.c +++ b/net/openvswitch/meter.c @@ -450,7 +450,7 @@ static int ovs_meter_cmd_set(struct sk_buff *skb, struct genl_info *info) err = attach_meter(meter_tbl, meter); if (err) - goto exit_unlock; + goto exit_free_old_meter; ovs_unlock(); @@ -473,6 +473,8 @@ static int ovs_meter_cmd_set(struct sk_buff *skb, struct genl_info *info) genlmsg_end(reply, ovs_reply_header); return genlmsg_reply(reply, info); +exit_free_old_meter: + ovs_meter_free(old_meter); exit_unlock: ovs_unlock(); nlmsg_free(reply); diff --git a/net/qrtr/ns.c b/net/qrtr/ns.c index e760d4a38fafd34866f6e143c53b8641b875b986..fe81e03851686bdfb7fc38ba81e8bcf95d97da52 100644 --- a/net/qrtr/ns.c +++ b/net/qrtr/ns.c @@ -83,7 +83,10 @@ static struct qrtr_node *node_get(unsigned int node_id) node->id = node_id; - radix_tree_insert(&nodes, node_id, node); + if (radix_tree_insert(&nodes, node_id, node)) { + kfree(node); + return NULL; + } return node; } diff --git a/net/rds/message.c b/net/rds/message.c index b363ef13c75ef680084e53ed85ef209e0f704964..8fa3d19c2e6674a0793a8e4e9797cf0f2aa4635d 100644 --- a/net/rds/message.c +++ b/net/rds/message.c @@ -118,7 +118,7 @@ static void rds_rm_zerocopy_callback(struct rds_sock *rs, ck = &info->zcookies; memset(ck, 0, sizeof(*ck)); WARN_ON(!rds_zcookie_add(info, cookie)); - list_add_tail(&q->zcookie_head, &info->rs_zcookie_next); + list_add_tail(&info->rs_zcookie_next, &q->zcookie_head); spin_unlock_irqrestore(&q->lock, flags); /* caller invokes rds_wake_sk_sleep() */ diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index 29a208ed8fb88d5f2e7f457ffba1cbc1d81f99cc..86c93cf1744b07e622c459499d55c5861b462d65 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -487,6 +487,12 @@ static int rose_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; + lock_sock(sk); + if (sock->state != SS_UNCONNECTED) { + release_sock(sk); + return -EINVAL; + } + if (sk->sk_state != TCP_LISTEN) { struct rose_sock *rose = rose_sk(sk); @@ -496,8 +502,10 @@ static int rose_listen(struct socket *sock, int backlog) memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS); sk->sk_max_ack_backlog = backlog; sk->sk_state = TCP_LISTEN; + release_sock(sk); return 0; } + release_sock(sk); return -EOPNOTSUPP; } diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index a4c7ba35a3438a1f104bc018b0c9da54e59a780d..78f1cd70c8d199157e4f441e054a0a5e502b607e 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c @@ -307,7 +307,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla, ret = tcf_idr_check_alloc(tn, &index, act, bind); if (!ret) { ret = tcf_idr_create(tn, index, est, act, - &act_bpf_ops, bind, true, 0); + &act_bpf_ops, bind, true, flags); if (ret < 0) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c index 31d268eedf3f90a0312c8650d53185a0c061fbd8..b6576a250e851e393901df0d532c380e516f4148 100644 --- a/net/sched/act_connmark.c +++ b/net/sched/act_connmark.c @@ -124,7 +124,7 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla, ret = tcf_idr_check_alloc(tn, &index, a, bind); if (!ret) { ret = tcf_idr_create(tn, index, est, a, - &act_connmark_ops, bind, false, 0); + &act_connmark_ops, bind, false, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c index 06c74f22ab98b76bb3a43d69bcfb59c75eaeaa7c..5aa005835c066ef6b506bd97b20426fdf9adb3b9 100644 --- a/net/sched/act_ctinfo.c +++ b/net/sched/act_ctinfo.c @@ -92,7 +92,7 @@ static int tcf_ctinfo_act(struct sk_buff *skb, const struct tc_action *a, cp = rcu_dereference_bh(ca->params); tcf_lastuse_update(&ca->tcf_tm); - bstats_update(&ca->tcf_bstats, skb); + tcf_action_update_bstats(&ca->common, skb); action = READ_ONCE(ca->tcf_action); wlen = skb_network_offset(skb); @@ -211,8 +211,8 @@ static int tcf_ctinfo_init(struct net *net, struct nlattr *nla, index = actparm->index; err = tcf_idr_check_alloc(tn, &index, a, bind); if (!err) { - ret = tcf_idr_create(tn, index, est, a, - &act_ctinfo_ops, bind, false, 0); + ret = tcf_idr_create_from_flags(tn, index, est, a, + &act_ctinfo_ops, bind, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_gate.c b/net/sched/act_gate.c index a78cb79657182d6e1a87424f715cc151f29176f7..0e7568a06351b9db0ee4c833f5bb0550c16c9638 100644 --- a/net/sched/act_gate.c +++ b/net/sched/act_gate.c @@ -357,7 +357,7 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla, if (!err) { ret = tcf_idr_create(tn, index, est, a, - &act_gate_ops, bind, false, 0); + &act_gate_ops, bind, false, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index a2ddea04183afbf9ddd30e43ad29c4feec56332a..99548b2a1bc8331d9c8e10588eaa319ea88e07c0 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -553,7 +553,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, est, a, &act_ife_ops, - bind, true, 0); + bind, true, flags); if (ret) { tcf_idr_cleanup(tn, index); kfree(p); diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 8dc3bec0d3258a020145352dd38dfde58be21099..080f2952cd536637e1b8c14b5488dfc64ff8f1ae 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -144,7 +144,7 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, est, a, ops, bind, - false, 0); + false, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c index 09799412b248980898c07597dd0959b1a084651d..47b963ded4e432cbb41714a0d257c491359d2882 100644 --- a/net/sched/act_mpls.c +++ b/net/sched/act_mpls.c @@ -254,7 +254,7 @@ static int tcf_mpls_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, est, a, - &act_mpls_ops, bind, true, 0); + &act_mpls_ops, bind, true, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 1ebd2a86d980fc631a5f970543993f37aebdf3b1..8466dc25fe3974d4ea2966f475f4328ab960be5b 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c @@ -61,7 +61,7 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est, err = tcf_idr_check_alloc(tn, &index, a, bind); if (!err) { ret = tcf_idr_create(tn, index, est, a, - &act_nat_ops, bind, false, 0); + &act_nat_ops, bind, false, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 0d5463ddfd62f94e041ec8439ca3261d3b2e783a..db0d3bff19ebaf371da6c3575c8837b40cb08267 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -189,7 +189,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, err = tcf_idr_check_alloc(tn, &index, a, bind); if (!err) { ret = tcf_idr_create(tn, index, est, a, - &act_pedit_ops, bind, false, 0); + &act_pedit_ops, bind, false, flags); if (ret) { tcf_idr_cleanup(tn, index); goto out_free; diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 380733588959060e1b9aff44d0fd5f4ce49e257f..c30cd3ecb3911723e60e751e14e42fa197d21a84 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -87,7 +87,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, NULL, a, - &act_police_ops, bind, true, 0); + &act_police_ops, bind, true, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index 3ebf9ede3cf10cccc825305faec1a15b810fd120..6988a9cf4080674e8694010efed3d1a41e1615d6 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c @@ -54,8 +54,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, sample_policy, NULL); if (ret < 0) return ret; - if (!tb[TCA_SAMPLE_PARMS] || !tb[TCA_SAMPLE_RATE] || - !tb[TCA_SAMPLE_PSAMPLE_GROUP]) + + if (!tb[TCA_SAMPLE_PARMS]) return -EINVAL; parm = nla_data(tb[TCA_SAMPLE_PARMS]); @@ -69,7 +69,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, est, a, - &act_sample_ops, bind, true, 0); + &act_sample_ops, bind, true, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; @@ -79,6 +79,13 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, tcf_idr_release(*a, bind); return -EEXIST; } + + if (!tb[TCA_SAMPLE_RATE] || !tb[TCA_SAMPLE_PSAMPLE_GROUP]) { + NL_SET_ERR_MSG(extack, "sample rate and group are required"); + err = -EINVAL; + goto release_idr; + } + err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); if (err < 0) goto release_idr; diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index a4f3d0f0daa969f4e78e363ff01f9d42991ee323..b9bbc87a87c5b8ee6c97886138f3321518b15dde 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -128,7 +128,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, est, a, - &act_simp_ops, bind, false, 0); + &act_simp_ops, bind, false, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index e5f3fb8b00e32a0cc6bcd65cc3fdb54bcb2e0d6f..a5661f2d93e999049b71474b2e72da10838bbc65 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -176,7 +176,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, est, a, - &act_skbedit_ops, bind, true, 0); + &act_skbedit_ops, bind, true, act_flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c index 8d17a543cc9fef921dfba1bb5fe10a6d4494f63b..aa98dcac94b952f1ea5047dcfdb9af0f1b9bd19a 100644 --- a/net/sched/act_skbmod.c +++ b/net/sched/act_skbmod.c @@ -147,7 +147,7 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, index, est, a, - &act_skbmod_ops, bind, true, 0); + &act_skbmod_ops, bind, true, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index ec945294626a8b3d1cd761b6a2636290fc47d659..08c41f1976c47ec73fb5d83822b766a90192efbd 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c @@ -210,11 +210,6 @@ static int fw_set_parms(struct net *net, struct tcf_proto *tp, if (err < 0) return err; - if (tb[TCA_FW_CLASSID]) { - f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]); - tcf_bind_filter(tp, &f->res, base); - } - if (tb[TCA_FW_INDEV]) { int ret; ret = tcf_change_indev(net, tb[TCA_FW_INDEV], extack); @@ -231,6 +226,11 @@ static int fw_set_parms(struct net *net, struct tcf_proto *tp, } else if (head->mask != 0xFFFFFFFF) return err; + if (tb[TCA_FW_CLASSID]) { + f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]); + tcf_bind_filter(tp, &f->res, base); + } + return 0; } @@ -266,7 +266,6 @@ static int fw_change(struct net *net, struct sk_buff *in_skb, return -ENOBUFS; fnew->id = f->id; - fnew->res = f->res; fnew->ifindex = f->ifindex; fnew->tp = f->tp; diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c index b775e681cb56e2ac45c6d5d9041c9bc9d8f31329..1ad4b3e60eb3b63ab604c152bfc1dc253ea9f412 100644 --- a/net/sched/cls_route.c +++ b/net/sched/cls_route.c @@ -511,7 +511,6 @@ static int route4_change(struct net *net, struct sk_buff *in_skb, if (fold) { f->id = fold->id; f->iif = fold->iif; - f->res = fold->res; f->handle = fold->handle; f->tp = fold->tp; diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c deleted file mode 100644 index 50bf7ec4b5b2522f0daee69e741874ff987f9c81..0000000000000000000000000000000000000000 --- a/net/sched/cls_tcindex.c +++ /dev/null @@ -1,756 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * net/sched/cls_tcindex.c Packet classifier for skb->tc_index - * - * Written 1998,1999 by Werner Almesberger, EPFL ICA - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * Passing parameters to the root seems to be done more awkwardly than really - * necessary. At least, u32 doesn't seem to use such dirty hacks. To be - * verified. FIXME. - */ - -#define PERFECT_HASH_THRESHOLD 64 /* use perfect hash if not bigger */ -#define DEFAULT_HASH_SIZE 64 /* optimized for diffserv */ - - -struct tcindex_data; - -struct tcindex_filter_result { - struct tcf_exts exts; - struct tcf_result res; - struct tcindex_data *p; - struct rcu_work rwork; -}; - -struct tcindex_filter { - u16 key; - struct tcindex_filter_result result; - struct tcindex_filter __rcu *next; - struct rcu_work rwork; -}; - - -struct tcindex_data { - struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */ - struct tcindex_filter __rcu **h; /* imperfect hash; */ - struct tcf_proto *tp; - u16 mask; /* AND key with mask */ - u32 shift; /* shift ANDed key to the right */ - u32 hash; /* hash table size; 0 if undefined */ - u32 alloc_hash; /* allocated size */ - u32 fall_through; /* 0: only classify if explicit match */ - refcount_t refcnt; /* a temporary refcnt for perfect hash */ - struct rcu_work rwork; -}; - -static inline int tcindex_filter_is_set(struct tcindex_filter_result *r) -{ - return tcf_exts_has_actions(&r->exts) || r->res.classid; -} - -static void tcindex_data_get(struct tcindex_data *p) -{ - refcount_inc(&p->refcnt); -} - -static void tcindex_data_put(struct tcindex_data *p) -{ - if (refcount_dec_and_test(&p->refcnt)) { - kfree(p->perfect); - kfree(p->h); - kfree(p); - } -} - -static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p, - u16 key) -{ - if (p->perfect) { - struct tcindex_filter_result *f = p->perfect + key; - - return tcindex_filter_is_set(f) ? f : NULL; - } else if (p->h) { - struct tcindex_filter __rcu **fp; - struct tcindex_filter *f; - - fp = &p->h[key % p->hash]; - for (f = rcu_dereference_bh_rtnl(*fp); - f; - fp = &f->next, f = rcu_dereference_bh_rtnl(*fp)) - if (f->key == key) - return &f->result; - } - - return NULL; -} - - -static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp, - struct tcf_result *res) -{ - struct tcindex_data *p = rcu_dereference_bh(tp->root); - struct tcindex_filter_result *f; - int key = (skb->tc_index & p->mask) >> p->shift; - - pr_debug("tcindex_classify(skb %p,tp %p,res %p),p %p\n", - skb, tp, res, p); - - f = tcindex_lookup(p, key); - if (!f) { - struct Qdisc *q = tcf_block_q(tp->chain->block); - - if (!p->fall_through) - return -1; - res->classid = TC_H_MAKE(TC_H_MAJ(q->handle), key); - res->class = 0; - pr_debug("alg 0x%x\n", res->classid); - return 0; - } - *res = f->res; - pr_debug("map 0x%x\n", res->classid); - - return tcf_exts_exec(skb, &f->exts, res); -} - - -static void *tcindex_get(struct tcf_proto *tp, u32 handle) -{ - struct tcindex_data *p = rtnl_dereference(tp->root); - struct tcindex_filter_result *r; - - pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle); - if (p->perfect && handle >= p->alloc_hash) - return NULL; - r = tcindex_lookup(p, handle); - return r && tcindex_filter_is_set(r) ? r : NULL; -} - -static int tcindex_init(struct tcf_proto *tp) -{ - struct tcindex_data *p; - - pr_debug("tcindex_init(tp %p)\n", tp); - p = kzalloc(sizeof(struct tcindex_data), GFP_KERNEL); - if (!p) - return -ENOMEM; - - p->mask = 0xffff; - p->hash = DEFAULT_HASH_SIZE; - p->fall_through = 1; - refcount_set(&p->refcnt, 1); /* Paired with tcindex_destroy_work() */ - - rcu_assign_pointer(tp->root, p); - return 0; -} - -static void __tcindex_destroy_rexts(struct tcindex_filter_result *r) -{ - tcf_exts_destroy(&r->exts); - tcf_exts_put_net(&r->exts); - tcindex_data_put(r->p); -} - -static void tcindex_destroy_rexts_work(struct work_struct *work) -{ - struct tcindex_filter_result *r; - - r = container_of(to_rcu_work(work), - struct tcindex_filter_result, - rwork); - rtnl_lock(); - __tcindex_destroy_rexts(r); - rtnl_unlock(); -} - -static void __tcindex_destroy_fexts(struct tcindex_filter *f) -{ - tcf_exts_destroy(&f->result.exts); - tcf_exts_put_net(&f->result.exts); - kfree(f); -} - -static void tcindex_destroy_fexts_work(struct work_struct *work) -{ - struct tcindex_filter *f = container_of(to_rcu_work(work), - struct tcindex_filter, - rwork); - - rtnl_lock(); - __tcindex_destroy_fexts(f); - rtnl_unlock(); -} - -static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last, - bool rtnl_held, struct netlink_ext_ack *extack) -{ - struct tcindex_data *p = rtnl_dereference(tp->root); - struct tcindex_filter_result *r = arg; - struct tcindex_filter __rcu **walk; - struct tcindex_filter *f = NULL; - - pr_debug("tcindex_delete(tp %p,arg %p),p %p\n", tp, arg, p); - if (p->perfect) { - if (!r->res.class) - return -ENOENT; - } else { - int i; - - for (i = 0; i < p->hash; i++) { - walk = p->h + i; - for (f = rtnl_dereference(*walk); f; - walk = &f->next, f = rtnl_dereference(*walk)) { - if (&f->result == r) - goto found; - } - } - return -ENOENT; - -found: - rcu_assign_pointer(*walk, rtnl_dereference(f->next)); - } - tcf_unbind_filter(tp, &r->res); - /* all classifiers are required to call tcf_exts_destroy() after rcu - * grace period, since converted-to-rcu actions are relying on that - * in cleanup() callback - */ - if (f) { - if (tcf_exts_get_net(&f->result.exts)) - tcf_queue_work(&f->rwork, tcindex_destroy_fexts_work); - else - __tcindex_destroy_fexts(f); - } else { - tcindex_data_get(p); - - if (tcf_exts_get_net(&r->exts)) - tcf_queue_work(&r->rwork, tcindex_destroy_rexts_work); - else - __tcindex_destroy_rexts(r); - } - - *last = false; - return 0; -} - -static void tcindex_destroy_work(struct work_struct *work) -{ - struct tcindex_data *p = container_of(to_rcu_work(work), - struct tcindex_data, - rwork); - - tcindex_data_put(p); -} - -static inline int -valid_perfect_hash(struct tcindex_data *p) -{ - return p->hash > (p->mask >> p->shift); -} - -static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = { - [TCA_TCINDEX_HASH] = { .type = NLA_U32 }, - [TCA_TCINDEX_MASK] = { .type = NLA_U16 }, - [TCA_TCINDEX_SHIFT] = { .type = NLA_U32 }, - [TCA_TCINDEX_FALL_THROUGH] = { .type = NLA_U32 }, - [TCA_TCINDEX_CLASSID] = { .type = NLA_U32 }, -}; - -static int tcindex_filter_result_init(struct tcindex_filter_result *r, - struct tcindex_data *p, - struct net *net) -{ - memset(r, 0, sizeof(*r)); - r->p = p; - return tcf_exts_init(&r->exts, net, TCA_TCINDEX_ACT, - TCA_TCINDEX_POLICE); -} - -static void tcindex_free_perfect_hash(struct tcindex_data *cp); - -static void tcindex_partial_destroy_work(struct work_struct *work) -{ - struct tcindex_data *p = container_of(to_rcu_work(work), - struct tcindex_data, - rwork); - - rtnl_lock(); - if (p->perfect) - tcindex_free_perfect_hash(p); - kfree(p); - rtnl_unlock(); -} - -static void tcindex_free_perfect_hash(struct tcindex_data *cp) -{ - int i; - - for (i = 0; i < cp->hash; i++) - tcf_exts_destroy(&cp->perfect[i].exts); - kfree(cp->perfect); -} - -static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp) -{ - int i, err = 0; - - cp->perfect = kcalloc(cp->hash, sizeof(struct tcindex_filter_result), - GFP_KERNEL | __GFP_NOWARN); - if (!cp->perfect) - return -ENOMEM; - - for (i = 0; i < cp->hash; i++) { - err = tcf_exts_init(&cp->perfect[i].exts, net, - TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); - if (err < 0) - goto errout; - cp->perfect[i].p = cp; - } - - return 0; - -errout: - tcindex_free_perfect_hash(cp); - return err; -} - -static int -tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, - u32 handle, struct tcindex_data *p, - struct tcindex_filter_result *r, struct nlattr **tb, - struct nlattr *est, bool ovr, struct netlink_ext_ack *extack) -{ - struct tcindex_filter_result new_filter_result; - struct tcindex_data *cp = NULL, *oldp; - struct tcindex_filter *f = NULL; /* make gcc behave */ - struct tcf_result cr = {}; - int err, balloc = 0; - struct tcf_exts e; - bool update_h = false; - - err = tcf_exts_init(&e, net, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); - if (err < 0) - return err; - err = tcf_exts_validate(net, tp, tb, est, &e, ovr, true, extack); - if (err < 0) - goto errout; - - err = -ENOMEM; - /* tcindex_data attributes must look atomic to classifier/lookup so - * allocate new tcindex data and RCU assign it onto root. Keeping - * perfect hash and hash pointers from old data. - */ - cp = kzalloc(sizeof(*cp), GFP_KERNEL); - if (!cp) - goto errout; - - cp->mask = p->mask; - cp->shift = p->shift; - cp->hash = p->hash; - cp->alloc_hash = p->alloc_hash; - cp->fall_through = p->fall_through; - cp->tp = tp; - refcount_set(&cp->refcnt, 1); /* Paired with tcindex_destroy_work() */ - - if (tb[TCA_TCINDEX_HASH]) - cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); - - if (tb[TCA_TCINDEX_MASK]) - cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]); - - if (tb[TCA_TCINDEX_SHIFT]) { - cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]); - if (cp->shift > 16) { - err = -EINVAL; - goto errout; - } - } - if (!cp->hash) { - /* Hash not specified, use perfect hash if the upper limit - * of the hashing index is below the threshold. - */ - if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD) - cp->hash = (cp->mask >> cp->shift) + 1; - else - cp->hash = DEFAULT_HASH_SIZE; - } - - if (p->perfect) { - int i; - - if (tcindex_alloc_perfect_hash(net, cp) < 0) - goto errout; - cp->alloc_hash = cp->hash; - for (i = 0; i < min(cp->hash, p->hash); i++) - cp->perfect[i].res = p->perfect[i].res; - balloc = 1; - } - cp->h = p->h; - - err = tcindex_filter_result_init(&new_filter_result, cp, net); - if (err < 0) - goto errout_alloc; - if (r) - cr = r->res; - - err = -EBUSY; - - /* Hash already allocated, make sure that we still meet the - * requirements for the allocated hash. - */ - if (cp->perfect) { - if (!valid_perfect_hash(cp) || - cp->hash > cp->alloc_hash) - goto errout_alloc; - } else if (cp->h && cp->hash != cp->alloc_hash) { - goto errout_alloc; - } - - err = -EINVAL; - if (tb[TCA_TCINDEX_FALL_THROUGH]) - cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]); - - if (!cp->perfect && !cp->h) - cp->alloc_hash = cp->hash; - - /* Note: this could be as restrictive as if (handle & ~(mask >> shift)) - * but then, we'd fail handles that may become valid after some future - * mask change. While this is extremely unlikely to ever matter, - * the check below is safer (and also more backwards-compatible). - */ - if (cp->perfect || valid_perfect_hash(cp)) - if (handle >= cp->alloc_hash) - goto errout_alloc; - - - err = -ENOMEM; - if (!cp->perfect && !cp->h) { - if (valid_perfect_hash(cp)) { - if (tcindex_alloc_perfect_hash(net, cp) < 0) - goto errout_alloc; - balloc = 1; - } else { - struct tcindex_filter __rcu **hash; - - hash = kcalloc(cp->hash, - sizeof(struct tcindex_filter *), - GFP_KERNEL); - - if (!hash) - goto errout_alloc; - - cp->h = hash; - balloc = 2; - } - } - - if (cp->perfect) { - r = cp->perfect + handle; - } else { - /* imperfect area is updated in-place using rcu */ - update_h = !!tcindex_lookup(cp, handle); - r = &new_filter_result; - } - - if (r == &new_filter_result) { - f = kzalloc(sizeof(*f), GFP_KERNEL); - if (!f) - goto errout_alloc; - f->key = handle; - f->next = NULL; - err = tcindex_filter_result_init(&f->result, cp, net); - if (err < 0) { - kfree(f); - goto errout_alloc; - } - } - - if (tb[TCA_TCINDEX_CLASSID]) { - cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]); - tcf_bind_filter(tp, &cr, base); - } - - oldp = p; - r->res = cr; - tcf_exts_change(&r->exts, &e); - - rcu_assign_pointer(tp->root, cp); - - if (update_h) { - struct tcindex_filter __rcu **fp; - struct tcindex_filter *cf; - - f->result.res = r->res; - tcf_exts_change(&f->result.exts, &r->exts); - - /* imperfect area bucket */ - fp = cp->h + (handle % cp->hash); - - /* lookup the filter, guaranteed to exist */ - for (cf = rcu_dereference_bh_rtnl(*fp); cf; - fp = &cf->next, cf = rcu_dereference_bh_rtnl(*fp)) - if (cf->key == handle) - break; - - f->next = cf->next; - - cf = rcu_replace_pointer(*fp, f, 1); - tcf_exts_get_net(&cf->result.exts); - tcf_queue_work(&cf->rwork, tcindex_destroy_fexts_work); - } else if (r == &new_filter_result) { - struct tcindex_filter *nfp; - struct tcindex_filter __rcu **fp; - - f->result.res = r->res; - tcf_exts_change(&f->result.exts, &r->exts); - - fp = cp->h + (handle % cp->hash); - for (nfp = rtnl_dereference(*fp); - nfp; - fp = &nfp->next, nfp = rtnl_dereference(*fp)) - ; /* nothing */ - - rcu_assign_pointer(*fp, f); - } else { - tcf_exts_destroy(&new_filter_result.exts); - } - - if (oldp) - tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work); - return 0; - -errout_alloc: - if (balloc == 1) - tcindex_free_perfect_hash(cp); - else if (balloc == 2) - kfree(cp->h); - tcf_exts_destroy(&new_filter_result.exts); -errout: - kfree(cp); - tcf_exts_destroy(&e); - return err; -} - -static int -tcindex_change(struct net *net, struct sk_buff *in_skb, - struct tcf_proto *tp, unsigned long base, u32 handle, - struct nlattr **tca, void **arg, bool ovr, - bool rtnl_held, struct netlink_ext_ack *extack) -{ - struct nlattr *opt = tca[TCA_OPTIONS]; - struct nlattr *tb[TCA_TCINDEX_MAX + 1]; - struct tcindex_data *p = rtnl_dereference(tp->root); - struct tcindex_filter_result *r = *arg; - int err; - - pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p," - "p %p,r %p,*arg %p\n", - tp, handle, tca, arg, opt, p, r, *arg); - - if (!opt) - return 0; - - err = nla_parse_nested_deprecated(tb, TCA_TCINDEX_MAX, opt, - tcindex_policy, NULL); - if (err < 0) - return err; - - return tcindex_set_parms(net, tp, base, handle, p, r, tb, - tca[TCA_RATE], ovr, extack); -} - -static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker, - bool rtnl_held) -{ - struct tcindex_data *p = rtnl_dereference(tp->root); - struct tcindex_filter *f, *next; - int i; - - pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp, walker, p); - if (p->perfect) { - for (i = 0; i < p->hash; i++) { - if (!p->perfect[i].res.class) - continue; - if (walker->count >= walker->skip) { - if (walker->fn(tp, p->perfect + i, walker) < 0) { - walker->stop = 1; - return; - } - } - walker->count++; - } - } - if (!p->h) - return; - for (i = 0; i < p->hash; i++) { - for (f = rtnl_dereference(p->h[i]); f; f = next) { - next = rtnl_dereference(f->next); - if (walker->count >= walker->skip) { - if (walker->fn(tp, &f->result, walker) < 0) { - walker->stop = 1; - return; - } - } - walker->count++; - } - } -} - -static void tcindex_destroy(struct tcf_proto *tp, bool rtnl_held, - struct netlink_ext_ack *extack) -{ - struct tcindex_data *p = rtnl_dereference(tp->root); - int i; - - pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p); - - if (p->perfect) { - for (i = 0; i < p->hash; i++) { - struct tcindex_filter_result *r = p->perfect + i; - - /* tcf_queue_work() does not guarantee the ordering we - * want, so we have to take this refcnt temporarily to - * ensure 'p' is freed after all tcindex_filter_result - * here. Imperfect hash does not need this, because it - * uses linked lists rather than an array. - */ - tcindex_data_get(p); - - tcf_unbind_filter(tp, &r->res); - if (tcf_exts_get_net(&r->exts)) - tcf_queue_work(&r->rwork, - tcindex_destroy_rexts_work); - else - __tcindex_destroy_rexts(r); - } - } - - for (i = 0; p->h && i < p->hash; i++) { - struct tcindex_filter *f, *next; - bool last; - - for (f = rtnl_dereference(p->h[i]); f; f = next) { - next = rtnl_dereference(f->next); - tcindex_delete(tp, &f->result, &last, rtnl_held, NULL); - } - } - - tcf_queue_work(&p->rwork, tcindex_destroy_work); -} - - -static int tcindex_dump(struct net *net, struct tcf_proto *tp, void *fh, - struct sk_buff *skb, struct tcmsg *t, bool rtnl_held) -{ - struct tcindex_data *p = rtnl_dereference(tp->root); - struct tcindex_filter_result *r = fh; - struct nlattr *nest; - - pr_debug("tcindex_dump(tp %p,fh %p,skb %p,t %p),p %p,r %p\n", - tp, fh, skb, t, p, r); - pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h); - - nest = nla_nest_start_noflag(skb, TCA_OPTIONS); - if (nest == NULL) - goto nla_put_failure; - - if (!fh) { - t->tcm_handle = ~0; /* whatever ... */ - if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) || - nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) || - nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) || - nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through)) - goto nla_put_failure; - nla_nest_end(skb, nest); - } else { - if (p->perfect) { - t->tcm_handle = r - p->perfect; - } else { - struct tcindex_filter *f; - struct tcindex_filter __rcu **fp; - int i; - - t->tcm_handle = 0; - for (i = 0; !t->tcm_handle && i < p->hash; i++) { - fp = &p->h[i]; - for (f = rtnl_dereference(*fp); - !t->tcm_handle && f; - fp = &f->next, f = rtnl_dereference(*fp)) { - if (&f->result == r) - t->tcm_handle = f->key; - } - } - } - pr_debug("handle = %d\n", t->tcm_handle); - if (r->res.class && - nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid)) - goto nla_put_failure; - - if (tcf_exts_dump(skb, &r->exts) < 0) - goto nla_put_failure; - nla_nest_end(skb, nest); - - if (tcf_exts_dump_stats(skb, &r->exts) < 0) - goto nla_put_failure; - } - - return skb->len; - -nla_put_failure: - nla_nest_cancel(skb, nest); - return -1; -} - -static void tcindex_bind_class(void *fh, u32 classid, unsigned long cl, - void *q, unsigned long base) -{ - struct tcindex_filter_result *r = fh; - - if (r && r->res.classid == classid) { - if (cl) - __tcf_bind_filter(q, &r->res, base); - else - __tcf_unbind_filter(q, &r->res); - } -} - -static struct tcf_proto_ops cls_tcindex_ops __read_mostly = { - .kind = "tcindex", - .classify = tcindex_classify, - .init = tcindex_init, - .destroy = tcindex_destroy, - .get = tcindex_get, - .change = tcindex_change, - .delete = tcindex_delete, - .walk = tcindex_walk, - .dump = tcindex_dump, - .bind_class = tcindex_bind_class, - .owner = THIS_MODULE, -}; - -static int __init init_tcindex(void) -{ - return register_tcf_proto_ops(&cls_tcindex_ops); -} - -static void __exit exit_tcindex(void) -{ - unregister_tcf_proto_ops(&cls_tcindex_ops); -} - -module_init(init_tcindex) -module_exit(exit_tcindex) -MODULE_LICENSE("GPL"); diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index da042bc8b239dd312bec797abe480d2d49148f67..b8c44ead0c16c67982cd6e738a08e10a25d6cc02 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -716,12 +716,18 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp, struct nlattr *est, bool ovr, struct netlink_ext_ack *extack) { - int err; + int err, ifindex = -1; err = tcf_exts_validate(net, tp, tb, est, &n->exts, ovr, true, extack); if (err < 0) return err; + if (tb[TCA_U32_INDEV]) { + ifindex = tcf_change_indev(net, tb[TCA_U32_INDEV], extack); + if (ifindex < 0) + return -EINVAL; + } + if (tb[TCA_U32_LINK]) { u32 handle = nla_get_u32(tb[TCA_U32_LINK]); struct tc_u_hnode *ht_down = NULL, *ht_old; @@ -756,13 +762,9 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp, tcf_bind_filter(tp, &n->res, base); } - if (tb[TCA_U32_INDEV]) { - int ret; - ret = tcf_change_indev(net, tb[TCA_U32_INDEV], extack); - if (ret < 0) - return -EINVAL; - n->ifindex = ret; - } + if (ifindex >= 0) + n->ifindex = ifindex; + return 0; } @@ -810,7 +812,6 @@ static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp, new->ifindex = n->ifindex; new->fshift = n->fshift; - new->res = n->res; new->flags = n->flags; RCU_INIT_POINTER(new->ht_down, ht); diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index cdc43a06aa9bccee969adc31a1bad4dbbeb7ff05..6076294a632c591e26c00e0d5b33147ca2b2cc12 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1012,6 +1012,10 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, if (parent == NULL) return -ENOENT; } + if (!(parent->cl_flags & HFSC_FSC) && parent != &q->root) { + NL_SET_ERR_MSG(extack, "Invalid parent - parent class must have FSC"); + return -EINVAL; + } if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0) return -EINVAL; diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index c3ba018fd083ea61eaebb46b782d3756e0878795..ff84ed531199a83a51ff46b2e766ae67cd2ece67 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -405,7 +405,10 @@ static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl) while (cl->cmode == HTB_MAY_BORROW && p && mask) { m = mask; while (m) { - int prio = ffz(~m); + unsigned int prio = ffz(~m); + + if (WARN_ON_ONCE(prio >= ARRAY_SIZE(p->inner.clprio))) + break; m &= ~(1 << prio); if (p->inner.clprio[prio].feed.rb_node) diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c index cbc2ebca4548c927070dd2dbd7685cdb6a7ffe8f..339990bb59817fcc645f14a0783abb231f4750be 100644 --- a/net/sched/sch_plug.c +++ b/net/sched/sch_plug.c @@ -210,7 +210,7 @@ static struct Qdisc_ops plug_qdisc_ops __read_mostly = { .priv_size = sizeof(struct plug_sched_data), .enqueue = plug_enqueue, .dequeue = plug_dequeue, - .peek = qdisc_peek_head, + .peek = qdisc_peek_dequeued, .init = plug_init, .change = plug_change, .reset = qdisc_reset_queue, diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index cad7deacf60a49d316cf60d239b215c91b9feee9..46393f52e56182ccf504d66e373bd2612cb12b59 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -375,8 +375,13 @@ static int qfq_change_agg(struct Qdisc *sch, struct qfq_class *cl, u32 weight, u32 lmax) { struct qfq_sched *q = qdisc_priv(sch); - struct qfq_aggregate *new_agg = qfq_find_agg(q, lmax, weight); + struct qfq_aggregate *new_agg; + /* 'lmax' can range from [QFQ_MIN_LMAX, pktlen + stab overhead] */ + if (lmax > QFQ_MAX_LMAX) + return -EINVAL; + + new_agg = qfq_find_agg(q, lmax, weight); if (new_agg == NULL) { /* create new aggregate */ new_agg = kzalloc(sizeof(*new_agg), GFP_ATOMIC); if (new_agg == NULL) @@ -970,10 +975,13 @@ static void qfq_update_eligible(struct qfq_sched *q) } /* Dequeue head packet of the head class in the DRR queue of the aggregate. */ -static void agg_dequeue(struct qfq_aggregate *agg, - struct qfq_class *cl, unsigned int len) +static struct sk_buff *agg_dequeue(struct qfq_aggregate *agg, + struct qfq_class *cl, unsigned int len) { - qdisc_dequeue_peeked(cl->qdisc); + struct sk_buff *skb = qdisc_dequeue_peeked(cl->qdisc); + + if (!skb) + return NULL; cl->deficit -= (int) len; @@ -983,6 +991,8 @@ static void agg_dequeue(struct qfq_aggregate *agg, cl->deficit += agg->lmax; list_move_tail(&cl->alist, &agg->active); } + + return skb; } static inline struct sk_buff *qfq_peek_skb(struct qfq_aggregate *agg, @@ -1128,11 +1138,18 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch) if (!skb) return NULL; - qdisc_qstats_backlog_dec(sch, skb); sch->q.qlen--; + + skb = agg_dequeue(in_serv_agg, cl, len); + + if (!skb) { + sch->q.qlen++; + return NULL; + } + + qdisc_qstats_backlog_dec(sch, skb); qdisc_bstats_update(sch, skb); - agg_dequeue(in_serv_agg, cl, len); /* If lmax is lowered, through qfq_change_class, for a class * owning pending packets with larger size than the new value * of lmax, then the following condition may hold. diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index 7f33b31c7b8bde4ffae185c8b9230e3b158e9189..2d842f31ec5a8d66392b96fe5540ab3f164b5f72 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -1621,6 +1621,7 @@ static void taprio_reset(struct Qdisc *sch) int i; hrtimer_cancel(&q->advance_timer); + if (q->qdiscs) { for (i = 0; i < dev->num_tx_queues; i++) if (q->qdiscs[i]) @@ -1642,6 +1643,7 @@ static void taprio_destroy(struct Qdisc *sch) * happens in qdisc_create(), after taprio_init() has been called. */ hrtimer_cancel(&q->advance_timer); + qdisc_synchronize(sch); taprio_disable_offload(dev, q, NULL); @@ -1904,14 +1906,12 @@ static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb) static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl) { - struct taprio_sched *q = qdisc_priv(sch); - struct net_device *dev = qdisc_dev(sch); - unsigned int ntx = cl - 1; + struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); - if (ntx >= dev->num_tx_queues) + if (!dev_queue) return NULL; - return q->qdiscs[ntx]; + return dev_queue->qdisc_sleeping; } static unsigned long taprio_find(struct Qdisc *sch, u32 classid) diff --git a/net/sctp/diag.c b/net/sctp/diag.c index 68ff82ff49a3dc0373b1992618f606cf694a93d0..07d0ada23bfd22e12a01a0373ebb83ea5e569b2a 100644 --- a/net/sctp/diag.c +++ b/net/sctp/diag.c @@ -349,11 +349,9 @@ static int sctp_sock_filter(struct sctp_endpoint *ep, struct sctp_transport *tsp struct sctp_comm_param *commp = p; struct sock *sk = ep->base.sk; const struct inet_diag_req_v2 *r = commp->r; - struct sctp_association *assoc = - list_entry(ep->asocs.next, struct sctp_association, asocs); /* find the ep only once through the transports by this condition */ - if (tsp->asoc != assoc) + if (!list_is_first(&tsp->asoc->asocs, &ep->asocs)) return 0; if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family) diff --git a/net/sctp/stream_sched_prio.c b/net/sctp/stream_sched_prio.c index 4fc9f2923ed116cbc02bc2e9d4771c888588fe8c..7dd9f8b387cca4099e5488bdaef0286c9cf4ca11 100644 --- a/net/sctp/stream_sched_prio.c +++ b/net/sctp/stream_sched_prio.c @@ -25,6 +25,18 @@ static void sctp_sched_prio_unsched_all(struct sctp_stream *stream); +static struct sctp_stream_priorities *sctp_sched_prio_head_get(struct sctp_stream_priorities *p) +{ + p->users++; + return p; +} + +static void sctp_sched_prio_head_put(struct sctp_stream_priorities *p) +{ + if (p && --p->users == 0) + kfree(p); +} + static struct sctp_stream_priorities *sctp_sched_prio_new_head( struct sctp_stream *stream, int prio, gfp_t gfp) { @@ -38,6 +50,7 @@ static struct sctp_stream_priorities *sctp_sched_prio_new_head( INIT_LIST_HEAD(&p->active); p->next = NULL; p->prio = prio; + p->users = 1; return p; } @@ -53,7 +66,7 @@ static struct sctp_stream_priorities *sctp_sched_prio_get_head( */ list_for_each_entry(p, &stream->prio_list, prio_sched) { if (p->prio == prio) - return p; + return sctp_sched_prio_head_get(p); if (p->prio > prio) break; } @@ -70,7 +83,7 @@ static struct sctp_stream_priorities *sctp_sched_prio_get_head( */ break; if (p->prio == prio) - return p; + return sctp_sched_prio_head_get(p); } /* If not even there, allocate a new one. */ @@ -154,32 +167,21 @@ static int sctp_sched_prio_set(struct sctp_stream *stream, __u16 sid, struct sctp_stream_out_ext *soute = sout->ext; struct sctp_stream_priorities *prio_head, *old; bool reschedule = false; - int i; + + old = soute->prio_head; + if (old && old->prio == prio) + return 0; prio_head = sctp_sched_prio_get_head(stream, prio, gfp); if (!prio_head) return -ENOMEM; reschedule = sctp_sched_prio_unsched(soute); - old = soute->prio_head; soute->prio_head = prio_head; if (reschedule) sctp_sched_prio_sched(stream, soute); - if (!old) - /* Happens when we set the priority for the first time */ - return 0; - - for (i = 0; i < stream->outcnt; i++) { - soute = SCTP_SO(stream, i)->ext; - if (soute && soute->prio_head == old) - /* It's still in use, nothing else to do here. */ - return 0; - } - - /* No hits, we are good to free it. */ - kfree(old); - + sctp_sched_prio_head_put(old); return 0; } @@ -206,20 +208,8 @@ static int sctp_sched_prio_init_sid(struct sctp_stream *stream, __u16 sid, static void sctp_sched_prio_free_sid(struct sctp_stream *stream, __u16 sid) { - struct sctp_stream_priorities *prio = SCTP_SO(stream, sid)->ext->prio_head; - int i; - - if (!prio) - return; - + sctp_sched_prio_head_put(SCTP_SO(stream, sid)->ext->prio_head); SCTP_SO(stream, sid)->ext->prio_head = NULL; - for (i = 0; i < stream->outcnt; i++) { - if (SCTP_SO(stream, i)->ext && - SCTP_SO(stream, i)->ext->prio_head == prio) - return; - } - - kfree(prio); } static void sctp_sched_prio_free(struct sctp_stream *stream) diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 41cbc7c89c9d21ddcf0b6bd71e05cc9fbaa2917c..8ab84926816f61cda789183244fb238a827baf52 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -1988,16 +1988,14 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct smc_sock *smc; - int rc = -EPIPE; + int rc; smc = smc_sk(sk); lock_sock(sk); - if ((sk->sk_state != SMC_ACTIVE) && - (sk->sk_state != SMC_APPCLOSEWAIT1) && - (sk->sk_state != SMC_INIT)) - goto out; + /* SMC does not support connect with fastopen */ if (msg->msg_flags & MSG_FASTOPEN) { + /* not connected yet, fallback */ if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) { smc_switch_to_fallback(smc); smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP; @@ -2005,6 +2003,11 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) rc = -EINVAL; goto out; } + } else if ((sk->sk_state != SMC_ACTIVE) && + (sk->sk_state != SMC_APPCLOSEWAIT1) && + (sk->sk_state != SMC_INIT)) { + rc = -EPIPE; + goto out; } if (smc->use_fallback) diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index c478108ca6a658a8a03b07a888c590930efb850e..c6e8bd78e35d6a80a8cfdf869bc340590c336fad 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -3026,6 +3026,8 @@ rpc_clnt_swap_activate_callback(struct rpc_clnt *clnt, int rpc_clnt_swap_activate(struct rpc_clnt *clnt) { + while (clnt != clnt->cl_parent) + clnt = clnt->cl_parent; if (atomic_inc_return(&clnt->cl_swapper) == 1) return rpc_clnt_iterate_for_each_xprt(clnt, rpc_clnt_swap_activate_callback, NULL); @@ -3045,6 +3047,8 @@ rpc_clnt_swap_deactivate_callback(struct rpc_clnt *clnt, void rpc_clnt_swap_deactivate(struct rpc_clnt *clnt) { + while (clnt != clnt->cl_parent) + clnt = clnt->cl_parent; if (atomic_dec_if_positive(&clnt->cl_swapper) == 0) rpc_clnt_iterate_for_each_xprt(clnt, rpc_clnt_swap_deactivate_callback, NULL); diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index d38788cd9433a0fad4c1c7fa41eef628189c38f0..af657a482ad2d73022c96a59be66c37ed0278a3f 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -800,6 +800,7 @@ EXPORT_SYMBOL_GPL(svc_set_num_threads); static int svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) { + struct svc_rqst *rqstp; struct task_struct *task; unsigned int state = serv->sv_nrthreads-1; @@ -808,7 +809,10 @@ svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) task = choose_victim(serv, pool, &state); if (task == NULL) break; - kthread_stop(task); + rqstp = kthread_data(task); + /* Did we lose a race to svo_function threadfn? */ + if (kthread_stop(task) == -EINTR) + svc_exit_thread(rqstp); nrservs++; } while (nrservs < 0); return 0; diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 21f20c3cda971dffb20f8d98edc8bc21bca50ddc..ac7feadb439048eb5a2496f5df1b6dbac15f2d59 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -949,7 +949,9 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) MSG_CMSG_COMPAT)) return -EOPNOTSUPP; - mutex_lock(&tls_ctx->tx_lock); + ret = mutex_lock_interruptible(&tls_ctx->tx_lock); + if (ret) + return ret; lock_sock(sk); if (unlikely(msg->msg_controllen)) { @@ -1283,7 +1285,9 @@ int tls_sw_sendpage(struct sock *sk, struct page *page, MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY)) return -EOPNOTSUPP; - mutex_lock(&tls_ctx->tx_lock); + ret = mutex_lock_interruptible(&tls_ctx->tx_lock); + if (ret) + return ret; lock_sock(sk); ret = tls_sw_do_sendpage(sk, page, offset, size, flags); release_sock(sk); @@ -2266,11 +2270,19 @@ static void tx_work_handler(struct work_struct *work) if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) return; - mutex_lock(&tls_ctx->tx_lock); - lock_sock(sk); - tls_tx_records(sk, -1); - release_sock(sk); - mutex_unlock(&tls_ctx->tx_lock); + + if (mutex_trylock(&tls_ctx->tx_lock)) { + lock_sock(sk); + tls_tx_records(sk, -1); + release_sock(sk); + mutex_unlock(&tls_ctx->tx_lock); + } else if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) { + /* Someone is holding the tx_lock, they will likely run Tx + * and cancel the work on their way out of the lock section. + * Schedule a long delay just in case. + */ + schedule_delayed_work(&ctx->tx_work.work, msecs_to_jiffies(10)); + } } void tls_sw_write_space(struct sock *sk, struct tls_context *ctx) diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 28721e9575b75fba23b074fc3319028cf50ad9f5..45aa3801e320b23be8ae3cf2038be9e14360cbb5 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -2008,6 +2008,7 @@ static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page, if (false) { alloc_skb: + spin_unlock(&other->sk_receive_queue.lock); unix_state_unlock(other); mutex_unlock(&unix_sk(other)->iolock); newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT, @@ -2047,6 +2048,7 @@ static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page, init_scm = false; } + spin_lock(&other->sk_receive_queue.lock); skb = skb_peek_tail(&other->sk_receive_queue); if (tail && tail == skb) { skb = newskb; @@ -2077,14 +2079,11 @@ static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page, refcount_add(size, &sk->sk_wmem_alloc); if (newskb) { - err = unix_scm_to_skb(&scm, skb, false); - if (err) - goto err_state_unlock; - spin_lock(&other->sk_receive_queue.lock); + unix_scm_to_skb(&scm, skb, false); __skb_queue_tail(&other->sk_receive_queue, newskb); - spin_unlock(&other->sk_receive_queue.lock); } + spin_unlock(&other->sk_receive_queue.lock); unix_state_unlock(other); mutex_unlock(&unix_sk(other)->iolock); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 8a7f0c8fba5e92647ef8ad58419d296a86f9cd0f..ea36d8c47b31a623f0fb04e81c4779d2a29a0dd0 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -12675,7 +12675,7 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info) return -ERANGE; if (nla_len(tb[NL80211_REKEY_DATA_KCK]) != NL80211_KCK_LEN && !(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK && - nla_len(tb[NL80211_REKEY_DATA_KEK]) == NL80211_KCK_EXT_LEN)) + nla_len(tb[NL80211_REKEY_DATA_KCK]) == NL80211_KCK_EXT_LEN)) return -ERANGE; rekey_data.kek = nla_data(tb[NL80211_REKEY_DATA_KEK]); diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 060e365c8259bf2995783f84b1a0d85f005305c4..f7e2e172a68df106528864a8d40b98472ace1ca5 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -269,6 +269,15 @@ void cfg80211_conn_work(struct work_struct *work) rtnl_unlock(); } +static void cfg80211_step_auth_next(struct cfg80211_conn *conn, + struct cfg80211_bss *bss) +{ + memcpy(conn->bssid, bss->bssid, ETH_ALEN); + conn->params.bssid = conn->bssid; + conn->params.channel = bss->channel; + conn->state = CFG80211_CONN_AUTHENTICATE_NEXT; +} + /* Returned bss is reference counted and must be cleaned up appropriately. */ static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev) { @@ -286,10 +295,7 @@ static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev) if (!bss) return NULL; - memcpy(wdev->conn->bssid, bss->bssid, ETH_ALEN); - wdev->conn->params.bssid = wdev->conn->bssid; - wdev->conn->params.channel = bss->channel; - wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT; + cfg80211_step_auth_next(wdev->conn, bss); schedule_work(&rdev->conn_work); return bss; @@ -568,7 +574,12 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev, wdev->conn->params.ssid_len = wdev->ssid_len; /* see if we have the bss already */ - bss = cfg80211_get_conn_bss(wdev); + bss = cfg80211_get_bss(wdev->wiphy, wdev->conn->params.channel, + wdev->conn->params.bssid, + wdev->conn->params.ssid, + wdev->conn->params.ssid_len, + wdev->conn_bss_type, + IEEE80211_PRIVACY(wdev->conn->params.privacy)); if (prev_bssid) { memcpy(wdev->conn->prev_bssid, prev_bssid, ETH_ALEN); @@ -579,6 +590,7 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev, if (bss) { enum nl80211_timeout_reason treason; + cfg80211_step_auth_next(wdev->conn, bss); err = cfg80211_conn_do_work(wdev, &treason); cfg80211_put_bss(wdev->wiphy, bss); } else { @@ -1245,6 +1257,13 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev, } else { if (WARN_ON(connkeys)) return -EINVAL; + + /* connect can point to wdev->wext.connect which + * can hold key data from a previous connection + */ + connect->key = NULL; + connect->key_len = 0; + connect->key_idx = 0; } wdev->connect_keys = connkeys; diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index d231d4620c38f8c63aa938ec5f92a3d21c79e3b2..161dc194e634221bce420f962d5cb04ce1ace8c8 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -492,6 +492,12 @@ static int x25_listen(struct socket *sock, int backlog) int rc = -EOPNOTSUPP; lock_sock(sk); + if (sock->state != SS_UNCONNECTED) { + rc = -EINVAL; + release_sock(sk); + return rc; + } + if (sk->sk_state != TCP_LISTEN) { memset(&x25_sk(sk)->dest_addr, 0, X25_ADDR_LEN); sk->sk_max_ack_backlog = backlog; diff --git a/net/xfrm/xfrm_compat.c b/net/xfrm/xfrm_compat.c index a0f62fa02e06e0aa97901aaf226dc84895f6a8ec..8cbf45a8bcdc24767c2b31584472f769827fd30b 100644 --- a/net/xfrm/xfrm_compat.c +++ b/net/xfrm/xfrm_compat.c @@ -5,6 +5,7 @@ * Based on code and translator idea by: Florian Westphal */ #include +#include #include #include @@ -302,7 +303,7 @@ static int xfrm_xlate64(struct sk_buff *dst, const struct nlmsghdr *nlh_src) nla_for_each_attr(nla, attrs, len, remaining) { int err; - switch (type) { + switch (nlh_src->nlmsg_type) { case XFRM_MSG_NEWSPDINFO: err = xfrm_nla_cpy(dst, nla, nla_len(nla)); break; @@ -437,6 +438,7 @@ static int xfrm_xlate32_attr(void *dst, const struct nlattr *nla, NL_SET_ERR_MSG(extack, "Bad attribute"); return -EOPNOTSUPP; } + type = array_index_nospec(type, XFRMA_MAX + 1); if (nla_len(nla) < compat_policy[type].len) { NL_SET_ERR_MSG(extack, "Attribute bad length"); return -EOPNOTSUPP; diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index 77e82033ad7002c75a2deae537dadace92e79074..fef99a1c5df10fefacc44acd9afc91b0526b8d33 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -277,8 +277,7 @@ static int xfrm6_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb) goto out; if (x->props.flags & XFRM_STATE_DECAP_DSCP) - ipv6_copy_dscp(ipv6_get_dsfield(ipv6_hdr(skb)), - ipipv6_hdr(skb)); + ipv6_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, ipipv6_hdr(skb)); if (!(x->props.flags & XFRM_STATE_NOECN)) ipip6_ecn_decapsulate(skb); diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c index da518b4ca84c6cad59903be1fcc0cd6c4dab1846..e4f21a6924153d24f9d3d3e5eeaf2cbedd316234 100644 --- a/net/xfrm/xfrm_interface.c +++ b/net/xfrm/xfrm_interface.c @@ -207,6 +207,52 @@ static void xfrmi_scrub_packet(struct sk_buff *skb, bool xnet) skb->mark = 0; } +static int xfrmi_input(struct sk_buff *skb, int nexthdr, __be32 spi, + int encap_type, unsigned short family) +{ + struct sec_path *sp; + + sp = skb_sec_path(skb); + if (sp && (sp->len || sp->olen) && + !xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family)) + goto discard; + + XFRM_SPI_SKB_CB(skb)->family = family; + if (family == AF_INET) { + XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); + XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL; + } else { + XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr); + XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL; + } + + return xfrm_input(skb, nexthdr, spi, encap_type); +discard: + kfree_skb(skb); + return 0; +} + +static int xfrmi4_rcv(struct sk_buff *skb) +{ + return xfrmi_input(skb, ip_hdr(skb)->protocol, 0, 0, AF_INET); +} + +static int xfrmi6_rcv(struct sk_buff *skb) +{ + return xfrmi_input(skb, skb_network_header(skb)[IP6CB(skb)->nhoff], + 0, 0, AF_INET6); +} + +static int xfrmi4_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) +{ + return xfrmi_input(skb, nexthdr, spi, encap_type, AF_INET); +} + +static int xfrmi6_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) +{ + return xfrmi_input(skb, nexthdr, spi, encap_type, AF_INET6); +} + static int xfrmi_rcv_cb(struct sk_buff *skb, int err) { const struct xfrm_mode *inner_mode; @@ -780,8 +826,8 @@ static struct pernet_operations xfrmi_net_ops = { }; static struct xfrm6_protocol xfrmi_esp6_protocol __read_mostly = { - .handler = xfrm6_rcv, - .input_handler = xfrm_input, + .handler = xfrmi6_rcv, + .input_handler = xfrmi6_input, .cb_handler = xfrmi_rcv_cb, .err_handler = xfrmi6_err, .priority = 10, @@ -831,8 +877,8 @@ static struct xfrm6_tunnel xfrmi_ip6ip_handler __read_mostly = { #endif static struct xfrm4_protocol xfrmi_esp4_protocol __read_mostly = { - .handler = xfrm4_rcv, - .input_handler = xfrm_input, + .handler = xfrmi4_rcv, + .input_handler = xfrmi4_input, .cb_handler = xfrmi_rcv_cb, .err_handler = xfrmi4_err, .priority = 10, diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 0d12bdf59d4cce1575dfdb50d872030542748b15..d15aa62887de011ee4509e28f253edabd3acdb83 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -3710,6 +3710,9 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, goto reject; } + if (if_id) + secpath_reset(skb); + xfrm_pols_put(pols, npols); return 1; } diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index c6bf3898d1bf0d9c15a0a011ac5112d195390d3f..fb1fbb5dc5c3649aba42f8f010a1ab4cd8ff9e4f 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -527,7 +527,7 @@ static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs, struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH]; struct nlattr *mt = attrs[XFRMA_MTIMER_THRESH]; - if (re) { + if (re && x->replay_esn && x->preplay_esn) { struct xfrm_replay_state_esn *replay_esn; replay_esn = nla_data(re); memcpy(x->replay_esn, replay_esn, @@ -1062,6 +1062,15 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb) sizeof(*filter), GFP_KERNEL); if (filter == NULL) return -ENOMEM; + + /* see addr_match(), (prefix length >> 5) << 2 + * will be used to compare xfrm_address_t + */ + if (filter->splen > (sizeof(xfrm_address_t) << 3) || + filter->dplen > (sizeof(xfrm_address_t) << 3)) { + kfree(filter); + return -EINVAL; + } } if (attrs[XFRMA_PROTO]) diff --git a/scripts/checkkconfigsymbols.py b/scripts/checkkconfigsymbols.py index 1548f9ce46827a932efc51cd840676d7b4d4a0c5..697972432bbe752adfe300314aaddcf54399f856 100755 --- a/scripts/checkkconfigsymbols.py +++ b/scripts/checkkconfigsymbols.py @@ -113,7 +113,7 @@ def parse_options(): return args -def main(): +def print_undefined_symbols(): """Main function of this module.""" args = parse_options() @@ -472,5 +472,16 @@ def parse_kconfig_file(kfile): return defined, references +def main(): + try: + print_undefined_symbols() + except BrokenPipeError: + # Python flushes standard streams on exit; redirect remaining output + # to devnull to avoid another BrokenPipeError at shutdown + devnull = os.open(os.devnull, os.O_WRONLY) + os.dup2(devnull, sys.stdout.fileno()) + sys.exit(1) # Python exits with error code 1 on EPIPE + + if __name__ == "__main__": main() diff --git a/scripts/clang-tools/run-clang-tools.py b/scripts/clang-tools/run-clang-tools.py index f754415af398b7eb97710931c1ce5761e664f879..f42699134f1c01a6450c01b4b30378b691e7a9fb 100755 --- a/scripts/clang-tools/run-clang-tools.py +++ b/scripts/clang-tools/run-clang-tools.py @@ -60,14 +60,21 @@ def run_analysis(entry): def main(): - args = parse_arguments() + try: + args = parse_arguments() - lock = multiprocessing.Lock() - pool = multiprocessing.Pool(initializer=init, initargs=(lock, args)) - # Read JSON data into the datastore variable - with open(args.path, "r") as f: - datastore = json.load(f) - pool.map(run_analysis, datastore) + lock = multiprocessing.Lock() + pool = multiprocessing.Pool(initializer=init, initargs=(lock, args)) + # Read JSON data into the datastore variable + with open(args.path, "r") as f: + datastore = json.load(f) + pool.map(run_analysis, datastore) + except BrokenPipeError: + # Python flushes standard streams on exit; redirect remaining output + # to devnull to avoid another BrokenPipeError at shutdown + devnull = os.open(os.devnull, os.O_WRONLY) + os.dup2(devnull, sys.stdout.fileno()) + sys.exit(1) # Python exits with error code 1 on EPIPE if __name__ == "__main__": diff --git a/scripts/diffconfig b/scripts/diffconfig index d5da5fa05d1d3c264775dbb5c960bc3f69cd8684..43f0f3d273ae7178086f03038780ba103fd9970b 100755 --- a/scripts/diffconfig +++ b/scripts/diffconfig @@ -65,7 +65,7 @@ def print_config(op, config, value, new_value): else: print(" %s %s -> %s" % (config, value, new_value)) -def main(): +def show_diff(): global merge_style # parse command line args @@ -129,4 +129,16 @@ def main(): for config in new: print_config("+", config, None, b[config]) -main() +def main(): + try: + show_diff() + except BrokenPipeError: + # Python flushes standard streams on exit; redirect remaining output + # to devnull to avoid another BrokenPipeError at shutdown + devnull = os.open(os.devnull, os.O_WRONLY) + os.dup2(devnull, sys.stdout.fileno()) + sys.exit(1) # Python exits with error code 1 on EPIPE + + +if __name__ == '__main__': + main() diff --git a/scripts/kconfig/lexer.l b/scripts/kconfig/lexer.l index 2632461bec790b93ee3ad0292fb39ab74d7974a0..328bf2d0c39f939edc4842bfaa9473ff41039bd5 100644 --- a/scripts/kconfig/lexer.l +++ b/scripts/kconfig/lexer.l @@ -26,6 +26,9 @@ static const char *kconfig_white_list[] = { "net/newip/Kconfig", "security/xpm/Kconfig", "drivers/auth_ctl/Kconfig", + "drivers/staging/ucollection/Kconfig", + "fs/proc/memory_security/Kconfig", + "fs/code_sign/Kconfig", }; static struct { diff --git a/scripts/package/mkdebian b/scripts/package/mkdebian index 60a2a63a5e900c30691462b970cfcf924df85c52..32d528a3678686ee649bfeece3a0447e859722ab 100755 --- a/scripts/package/mkdebian +++ b/scripts/package/mkdebian @@ -236,7 +236,7 @@ binary-arch: build-arch KBUILD_BUILD_VERSION=${revision} -f \$(srctree)/Makefile intdeb-pkg clean: - rm -rf debian/*tmp debian/files + rm -rf debian/files debian/linux-* \$(MAKE) clean binary: binary-arch diff --git a/scripts/tags.sh b/scripts/tags.sh index fd96734deff137ffe0db574407e83c3ffa603939..b82aebb0c995e66a717873a73f9d2d412fdb763a 100755 --- a/scripts/tags.sh +++ b/scripts/tags.sh @@ -95,10 +95,13 @@ all_sources() all_compiled_sources() { - realpath -es $([ -z "$KBUILD_ABS_SRCTREE" ] && echo --relative-to=.) \ - include/generated/autoconf.h $(find $ignore -name "*.cmd" -exec \ - grep -Poh '(?(?=^source_.* \K).*|(?=^ \K\S).*(?= \\))' {} \+ | - awk '!a[$0]++') | sort -u + { + echo include/generated/autoconf.h + find $ignore -name "*.cmd" -exec \ + sed -n -E 's/^source_.* (.*)/\1/p; s/^ (\S.*) \\/\1/p' {} \+ | + awk '!a[$0]++' + } | xargs realpath -es $([ -z "$KBUILD_ABS_SRCTREE" ] && echo --relative-to=.) | + sort -u } all_target_sources() diff --git a/scripts/tracing/ftrace-bisect.sh b/scripts/tracing/ftrace-bisect.sh index 926701162bc8322149c33d6451c41e53660fd36a..bb4f59262bbe9e1f9f425699499abfc4226b309e 100755 --- a/scripts/tracing/ftrace-bisect.sh +++ b/scripts/tracing/ftrace-bisect.sh @@ -12,7 +12,7 @@ # (note, if this is a problem with function_graph tracing, then simply # replace "function" with "function_graph" in the following steps). # -# # cd /sys/kernel/debug/tracing +# # cd /sys/kernel/tracing # # echo schedule > set_ftrace_filter # # echo function > current_tracer # @@ -20,22 +20,40 @@ # # # echo nop > current_tracer # -# # cat available_filter_functions > ~/full-file +# Starting with v5.1 this can be done with numbers, making it much faster: +# +# The old (slow) way, for kernels before v5.1. +# +# [old-way] # cat available_filter_functions > ~/full-file +# +# [old-way] *** Note *** this process will take several minutes to update the +# [old-way] filters. Setting multiple functions is an O(n^2) operation, and we +# [old-way] are dealing with thousands of functions. So go have coffee, talk +# [old-way] with your coworkers, read facebook. And eventually, this operation +# [old-way] will end. +# +# The new way (using numbers) is an O(n) operation, and usually takes less than a second. +# +# seq `wc -l available_filter_functions | cut -d' ' -f1` > ~/full-file +# +# This will create a sequence of numbers that match the functions in +# available_filter_functions, and when echoing in a number into the +# set_ftrace_filter file, it will enable the corresponding function in +# O(1) time. Making enabling all functions O(n) where n is the number of +# functions to enable. +# +# For either the new or old way, the rest of the operations remain the same. +# # # ftrace-bisect ~/full-file ~/test-file ~/non-test-file # # cat ~/test-file > set_ftrace_filter # -# *** Note *** this will take several minutes. Setting multiple functions is -# an O(n^2) operation, and we are dealing with thousands of functions. So go -# have coffee, talk with your coworkers, read facebook. And eventually, this -# operation will end. -# # # echo function > current_tracer # # If it crashes, we know that ~/test-file has a bad function. # # Reboot back to test kernel. # -# # cd /sys/kernel/debug/tracing +# # cd /sys/kernel/tracing # # mv ~/test-file ~/full-file # # If it didn't crash. diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c index 600b97677085f95c866882d9db0698302bc14a0f..dd4b28b11ebe3c9be4ab26b0f17001734ae766c8 100644 --- a/security/integrity/ima/ima_main.c +++ b/security/integrity/ima/ima_main.c @@ -378,7 +378,9 @@ static int process_measurement(struct file *file, const struct cred *cred, /** * ima_file_mmap - based on policy, collect/store measurement. * @file: pointer to the file to be measured (May be NULL) - * @prot: contains the protection that will be applied by the kernel. + * @reqprot: protection requested by the application + * @prot: protection that will be applied by the kernel + * @flags: operational flags * * Measure files being mmapped executable based on the ima_must_measure() * policy decision. @@ -386,7 +388,8 @@ static int process_measurement(struct file *file, const struct cred *cred, * On success return 0. On integrity appraisal error, assuming the file * is in policy and IMA-appraisal is in enforcing mode, return -EACCES. */ -int ima_file_mmap(struct file *file, unsigned long prot) +int ima_file_mmap(struct file *file, unsigned long reqprot, + unsigned long prot, unsigned long flags) { u32 secid; diff --git a/security/security.c b/security/security.c index 6c5f9a7c6b592e936d024fd62b3d2b67cca67252..60e3bee786e52e1aa35ef8dfea8d4ddc12ffd971 100644 --- a/security/security.c +++ b/security/security.c @@ -1534,12 +1534,13 @@ static inline unsigned long mmap_prot(struct file *file, unsigned long prot) int security_mmap_file(struct file *file, unsigned long prot, unsigned long flags) { + unsigned long prot_adj = mmap_prot(file, prot); int ret; - ret = call_int_hook(mmap_file, 0, file, prot, - mmap_prot(file, prot), flags); + + ret = call_int_hook(mmap_file, 0, file, prot, prot_adj, flags); if (ret) return ret; - return ima_file_mmap(file, prot); + return ima_file_mmap(file, prot, prot_adj, flags); } int security_mmap_addr(unsigned long addr) diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h index 1c6c59b92ab5c79c73352c7246bce957c01e60dd..33b3424819fbe30f6597d7f685dd24203082f256 100644 --- a/security/selinux/include/classmap.h +++ b/security/selinux/include/classmap.h @@ -252,6 +252,10 @@ struct security_class_mapping secclass_map[] = { { "integrity", "confidentiality", NULL } }, { "xpm", { "exec_no_sign", "exec_anon_mem", NULL } }, + { "hideaddr", + { "hide_exec_anon_mem", "hide_exec_anon_mem_debug", NULL } }, + { "code_sign", + { "add_cert_chain", NULL } }, { NULL } }; diff --git a/security/tomoyo/Makefile b/security/tomoyo/Makefile index cca5a3012fee2e564ffa0ae293d1e8e223d80689..221eaadffb09c7f5f4d5ec217d7c4ab2f4b34942 100644 --- a/security/tomoyo/Makefile +++ b/security/tomoyo/Makefile @@ -10,7 +10,7 @@ endef quiet_cmd_policy = POLICY $@ cmd_policy = ($(call do_policy,profile); $(call do_policy,exception_policy); $(call do_policy,domain_policy); $(call do_policy,manager); $(call do_policy,stat)) >$@ -$(obj)/builtin-policy.h: $(wildcard $(obj)/policy/*.conf $(src)/policy/*.conf.default) FORCE +$(obj)/builtin-policy.h: $(wildcard $(obj)/policy/*.conf $(srctree)/$(src)/policy/*.conf.default) FORCE $(call if_changed,policy) $(obj)/common.o: $(obj)/builtin-policy.h diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c index 4efbcc41fdfb7431a3fc07124083784476399d55..0a83afa5f373c96d36c1d4359dbe12edd5a55412 100644 --- a/sound/pci/hda/hda_bind.c +++ b/sound/pci/hda/hda_bind.c @@ -143,6 +143,7 @@ static int hda_codec_driver_probe(struct device *dev) error: snd_hda_codec_cleanup_for_unbind(codec); + codec->preset = NULL; return err; } @@ -159,6 +160,7 @@ static int hda_codec_driver_remove(struct device *dev) if (codec->patch_ops.free) codec->patch_ops.free(codec); snd_hda_codec_cleanup_for_unbind(codec); + codec->preset = NULL; module_put(dev->driver->owner); return 0; } diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 39281106477ebf2df18b9ecd273c272076bc3783..fc4a64a83ff2f6f6473e1f232f4724433f2c9fba 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -784,7 +784,6 @@ void snd_hda_codec_cleanup_for_unbind(struct hda_codec *codec) snd_array_free(&codec->cvt_setups); snd_array_free(&codec->spdif_out); snd_array_free(&codec->verbs); - codec->preset = NULL; codec->follower_dig_outs = NULL; codec->spdif_status_reset = 0; snd_array_free(&codec->mixers); diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c index 82f14c3f642bdb9b9f2cefd133f56ae08456f0e5..24c2638cde376f28465d7d96359c56c1f6182d86 100644 --- a/sound/pci/hda/patch_ca0132.c +++ b/sound/pci/hda/patch_ca0132.c @@ -2331,7 +2331,7 @@ static int dspio_set_uint_param_no_source(struct hda_codec *codec, int mod_id, static int dspio_alloc_dma_chan(struct hda_codec *codec, unsigned int *dma_chan) { int status = 0; - unsigned int size = sizeof(dma_chan); + unsigned int size = sizeof(*dma_chan); codec_dbg(codec, " dspio_alloc_dma_chan() -- begin\n"); status = dspio_scp(codec, MASTERCONTROL, 0x20, diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 2bd0a5839e8050d6000f687a62671012854c066c..48b802563c2da65f3986047663992504f2b877fb 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -1117,6 +1117,7 @@ static const struct hda_device_id snd_hda_id_conexant[] = { HDA_CODEC_ENTRY(0x14f11f86, "CX8070", patch_conexant_auto), HDA_CODEC_ENTRY(0x14f12008, "CX8200", patch_conexant_auto), HDA_CODEC_ENTRY(0x14f120d0, "CX11970", patch_conexant_auto), + HDA_CODEC_ENTRY(0x14f120d1, "SN6180", patch_conexant_auto), HDA_CODEC_ENTRY(0x14f15045, "CX20549 (Venice)", patch_conexant_auto), HDA_CODEC_ENTRY(0x14f15047, "CX20551 (Waikiki)", patch_conexant_auto), HDA_CODEC_ENTRY(0x14f15051, "CX20561 (Hermosa)", patch_conexant_auto), diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index cfd86389d37f617e011e261954fa365a8c565093..f2ef75c8de42743df20b19af1c72aa7a1dc3a2c9 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -827,7 +827,7 @@ static int alc_subsystem_id(struct hda_codec *codec, const hda_nid_t *ports) alc_setup_gpio(codec, 0x02); break; case 7: - alc_setup_gpio(codec, 0x03); + alc_setup_gpio(codec, 0x04); break; case 5: default: @@ -8811,6 +8811,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1025, 0x142b, "Acer Swift SF314-42", ALC255_FIXUP_ACER_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1025, 0x1430, "Acer TravelMate B311R-31", ALC256_FIXUP_ACER_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1025, 0x1466, "Acer Aspire A515-56", ALC255_FIXUP_ACER_HEADPHONE_AND_MIC), + SND_PCI_QUIRK(0x1025, 0x1534, "Acer Predator PH315-54", ALC255_FIXUP_ACER_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), SND_PCI_QUIRK(0x1028, 0x053c, "Dell Latitude E5430", ALC292_FIXUP_DELL_E7X), SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS), @@ -9089,6 +9090,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_AMP), SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_AMP), SND_PCI_QUIRK(0x144d, 0xc832, "Samsung Galaxy Book Flex Alpha (NP730QCJ)", ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), + SND_PCI_QUIRK(0x144d, 0xca03, "Samsung Galaxy Book2 Pro 360 (NP930QED)", ALC298_FIXUP_SAMSUNG_AMP), SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC), @@ -9260,6 +9262,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */ SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802), SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X), + SND_PCI_QUIRK(0x1c6c, 0x1251, "Positivo N14KP6-TG", ALC288_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1d05, 0x1132, "TongFang PHxTxX1", ALC256_FIXUP_SET_COEF_DEFAULTS), SND_PCI_QUIRK(0x1d05, 0x1096, "TongFang GMxMRxx", ALC269_FIXUP_NO_SHUTUP), SND_PCI_QUIRK(0x1d05, 0x1100, "TongFang GKxNRxx", ALC269_FIXUP_NO_SHUTUP), @@ -11150,6 +11153,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800), + SND_PCI_QUIRK(0x103c, 0x870c, "HP", ALC897_FIXUP_HP_HSMIC_VERB), SND_PCI_QUIRK(0x103c, 0x8719, "HP", ALC897_FIXUP_HP_HSMIC_VERB), SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2), SND_PCI_QUIRK(0x103c, 0x877e, "HP 288 Pro G6", ALC671_FIXUP_HP_HEADSET_MIC2), diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c index a188901a83bbe778f93cebe44ec128402fe0d827..29abc96dc146c284f657a3c20cdddfaa46c95d75 100644 --- a/sound/pci/hda/patch_via.c +++ b/sound/pci/hda/patch_via.c @@ -821,6 +821,9 @@ static int add_secret_dac_path(struct hda_codec *codec) return 0; nums = snd_hda_get_connections(codec, spec->gen.mixer_nid, conn, ARRAY_SIZE(conn) - 1); + if (nums < 0) + return nums; + for (i = 0; i < nums; i++) { if (get_wcaps_type(get_wcaps(codec, conn[i])) == AC_WID_AUD_OUT) return 0; diff --git a/sound/pci/ice1712/aureon.c b/sound/pci/ice1712/aureon.c index 9a30f6d35d1358e31282002bc07d2d276b3c65e1..40a0e0095030179c7ccb642672d34990f4ab9b02 100644 --- a/sound/pci/ice1712/aureon.c +++ b/sound/pci/ice1712/aureon.c @@ -1892,6 +1892,7 @@ static int aureon_add_controls(struct snd_ice1712 *ice) unsigned char id; snd_ice1712_save_gpio_status(ice); id = aureon_cs8415_get(ice, CS8415_ID); + snd_ice1712_restore_gpio_status(ice); if (id != 0x41) dev_info(ice->card->dev, "No CS8415 chip. Skipping CS8415 controls.\n"); @@ -1909,7 +1910,6 @@ static int aureon_add_controls(struct snd_ice1712 *ice) kctl->id.device = ice->pcm->device; } } - snd_ice1712_restore_gpio_status(ice); } return 0; diff --git a/sound/pci/lx6464es/lx_core.c b/sound/pci/lx6464es/lx_core.c index f884f5a6a61c215974e9b34faf199fe935168b78..a49a3254f96771bc9bf9dc5687bbbf8e72c1197b 100644 --- a/sound/pci/lx6464es/lx_core.c +++ b/sound/pci/lx6464es/lx_core.c @@ -493,12 +493,11 @@ int lx_buffer_ask(struct lx6464es *chip, u32 pipe, int is_capture, dev_dbg(chip->card->dev, "CMD_08_ASK_BUFFERS: needed %d, freed %d\n", *r_needed, *r_freed); - for (i = 0; i < MAX_STREAM_BUFFER; ++i) { - for (i = 0; i != chip->rmh.stat_len; ++i) - dev_dbg(chip->card->dev, - " stat[%d]: %x, %x\n", i, - chip->rmh.stat[i], - chip->rmh.stat[i] & MASK_DATA_SIZE); + for (i = 0; i < MAX_STREAM_BUFFER && i < chip->rmh.stat_len; + ++i) { + dev_dbg(chip->card->dev, " stat[%d]: %x, %x\n", i, + chip->rmh.stat[i], + chip->rmh.stat[i] & MASK_DATA_SIZE); } } diff --git a/sound/soc/atmel/mchp-spdifrx.c b/sound/soc/atmel/mchp-spdifrx.c index 46f3407ed0e81c2448e4b1c29c6d3ef896bcb11d..39a3c2a33bdbb88d803a0b75f06ff639990cfbd9 100644 --- a/sound/soc/atmel/mchp-spdifrx.c +++ b/sound/soc/atmel/mchp-spdifrx.c @@ -56,7 +56,7 @@ /* Validity Bit Mode */ #define SPDIFRX_MR_VBMODE_MASK GENAMSK(1, 1) #define SPDIFRX_MR_VBMODE_ALWAYS_LOAD \ - (0 << 1) /* Load sample regardles of validity bit value */ + (0 << 1) /* Load sample regardless of validity bit value */ #define SPDIFRX_MR_VBMODE_DISCARD_IF_VB1 \ (1 << 1) /* Load sample only if validity bit is 0 */ @@ -217,7 +217,6 @@ struct mchp_spdifrx_ch_stat { struct mchp_spdifrx_user_data { unsigned char data[SPDIFRX_UD_BITS / 8]; struct completion done; - spinlock_t lock; /* protect access to user data */ }; struct mchp_spdifrx_mixer_control { @@ -231,13 +230,13 @@ struct mchp_spdifrx_mixer_control { struct mchp_spdifrx_dev { struct snd_dmaengine_dai_dma_data capture; struct mchp_spdifrx_mixer_control control; - spinlock_t blockend_lock; /* protect access to blockend_refcount */ - int blockend_refcount; + struct mutex mlock; struct device *dev; struct regmap *regmap; struct clk *pclk; struct clk *gclk; unsigned int fmt; + unsigned int trigger_enabled; unsigned int gclk_enabled:1; }; @@ -275,37 +274,11 @@ static void mchp_spdifrx_channel_user_data_read(struct mchp_spdifrx_dev *dev, } } -/* called from non-atomic context only */ -static void mchp_spdifrx_isr_blockend_en(struct mchp_spdifrx_dev *dev) -{ - unsigned long flags; - - spin_lock_irqsave(&dev->blockend_lock, flags); - dev->blockend_refcount++; - /* don't enable BLOCKEND interrupt if it's already enabled */ - if (dev->blockend_refcount == 1) - regmap_write(dev->regmap, SPDIFRX_IER, SPDIFRX_IR_BLOCKEND); - spin_unlock_irqrestore(&dev->blockend_lock, flags); -} - -/* called from atomic/non-atomic context */ -static void mchp_spdifrx_isr_blockend_dis(struct mchp_spdifrx_dev *dev) -{ - unsigned long flags; - - spin_lock_irqsave(&dev->blockend_lock, flags); - dev->blockend_refcount--; - /* don't enable BLOCKEND interrupt if it's already enabled */ - if (dev->blockend_refcount == 0) - regmap_write(dev->regmap, SPDIFRX_IDR, SPDIFRX_IR_BLOCKEND); - spin_unlock_irqrestore(&dev->blockend_lock, flags); -} - static irqreturn_t mchp_spdif_interrupt(int irq, void *dev_id) { struct mchp_spdifrx_dev *dev = dev_id; struct mchp_spdifrx_mixer_control *ctrl = &dev->control; - u32 sr, imr, pending, idr = 0; + u32 sr, imr, pending; irqreturn_t ret = IRQ_NONE; int ch; @@ -320,13 +293,10 @@ static irqreturn_t mchp_spdif_interrupt(int irq, void *dev_id) if (pending & SPDIFRX_IR_BLOCKEND) { for (ch = 0; ch < SPDIFRX_CHANNELS; ch++) { - spin_lock(&ctrl->user_data[ch].lock); mchp_spdifrx_channel_user_data_read(dev, ch); - spin_unlock(&ctrl->user_data[ch].lock); - complete(&ctrl->user_data[ch].done); } - mchp_spdifrx_isr_blockend_dis(dev); + regmap_write(dev->regmap, SPDIFRX_IDR, SPDIFRX_IR_BLOCKEND); ret = IRQ_HANDLED; } @@ -334,7 +304,7 @@ static irqreturn_t mchp_spdif_interrupt(int irq, void *dev_id) if (pending & SPDIFRX_IR_CSC(ch)) { mchp_spdifrx_channel_status_read(dev, ch); complete(&ctrl->ch_stat[ch].done); - idr |= SPDIFRX_IR_CSC(ch); + regmap_write(dev->regmap, SPDIFRX_IDR, SPDIFRX_IR_CSC(ch)); ret = IRQ_HANDLED; } } @@ -344,8 +314,6 @@ static irqreturn_t mchp_spdif_interrupt(int irq, void *dev_id) ret = IRQ_HANDLED; } - regmap_write(dev->regmap, SPDIFRX_IDR, idr); - return ret; } @@ -353,47 +321,40 @@ static int mchp_spdifrx_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct mchp_spdifrx_dev *dev = snd_soc_dai_get_drvdata(dai); - u32 mr; - int running; - int ret; - - regmap_read(dev->regmap, SPDIFRX_MR, &mr); - running = !!(mr & SPDIFRX_MR_RXEN_ENABLE); + int ret = 0; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: - if (!running) { - mr &= ~SPDIFRX_MR_RXEN_MASK; - mr |= SPDIFRX_MR_RXEN_ENABLE; - /* enable overrun interrupts */ - regmap_write(dev->regmap, SPDIFRX_IER, - SPDIFRX_IR_OVERRUN); - } + mutex_lock(&dev->mlock); + /* Enable overrun interrupts */ + regmap_write(dev->regmap, SPDIFRX_IER, SPDIFRX_IR_OVERRUN); + + /* Enable receiver. */ + regmap_update_bits(dev->regmap, SPDIFRX_MR, SPDIFRX_MR_RXEN_MASK, + SPDIFRX_MR_RXEN_ENABLE); + dev->trigger_enabled = true; + mutex_unlock(&dev->mlock); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: - if (running) { - mr &= ~SPDIFRX_MR_RXEN_MASK; - mr |= SPDIFRX_MR_RXEN_DISABLE; - /* disable overrun interrupts */ - regmap_write(dev->regmap, SPDIFRX_IDR, - SPDIFRX_IR_OVERRUN); - } + mutex_lock(&dev->mlock); + /* Disable overrun interrupts */ + regmap_write(dev->regmap, SPDIFRX_IDR, SPDIFRX_IR_OVERRUN); + + /* Disable receiver. */ + regmap_update_bits(dev->regmap, SPDIFRX_MR, SPDIFRX_MR_RXEN_MASK, + SPDIFRX_MR_RXEN_DISABLE); + dev->trigger_enabled = false; + mutex_unlock(&dev->mlock); break; default: - return -EINVAL; - } - - ret = regmap_write(dev->regmap, SPDIFRX_MR, mr); - if (ret) { - dev_err(dev->dev, "unable to enable/disable RX: %d\n", ret); - return ret; + ret = -EINVAL; } - return 0; + return ret; } static int mchp_spdifrx_hw_params(struct snd_pcm_substream *substream, @@ -401,7 +362,7 @@ static int mchp_spdifrx_hw_params(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct mchp_spdifrx_dev *dev = snd_soc_dai_get_drvdata(dai); - u32 mr; + u32 mr = 0; int ret; dev_dbg(dev->dev, "%s() rate=%u format=%#x width=%u channels=%u\n", @@ -413,13 +374,6 @@ static int mchp_spdifrx_hw_params(struct snd_pcm_substream *substream, return -EINVAL; } - regmap_read(dev->regmap, SPDIFRX_MR, &mr); - - if (mr & SPDIFRX_MR_RXEN_ENABLE) { - dev_err(dev->dev, "PCM already running\n"); - return -EBUSY; - } - if (params_channels(params) != SPDIFRX_CHANNELS) { dev_err(dev->dev, "unsupported number of channels: %d\n", params_channels(params)); @@ -445,6 +399,13 @@ static int mchp_spdifrx_hw_params(struct snd_pcm_substream *substream, return -EINVAL; } + mutex_lock(&dev->mlock); + if (dev->trigger_enabled) { + dev_err(dev->dev, "PCM already running\n"); + ret = -EBUSY; + goto unlock; + } + if (dev->gclk_enabled) { clk_disable_unprepare(dev->gclk); dev->gclk_enabled = 0; @@ -455,19 +416,24 @@ static int mchp_spdifrx_hw_params(struct snd_pcm_substream *substream, dev_err(dev->dev, "unable to set gclk min rate: rate %u * ratio %u + 1\n", params_rate(params), SPDIFRX_GCLK_RATIO_MIN); - return ret; + goto unlock; } ret = clk_prepare_enable(dev->gclk); if (ret) { dev_err(dev->dev, "unable to enable gclk: %d\n", ret); - return ret; + goto unlock; } dev->gclk_enabled = 1; dev_dbg(dev->dev, "GCLK range min set to %d\n", params_rate(params) * SPDIFRX_GCLK_RATIO_MIN + 1); - return regmap_write(dev->regmap, SPDIFRX_MR, mr); + ret = regmap_write(dev->regmap, SPDIFRX_MR, mr); + +unlock: + mutex_unlock(&dev->mlock); + + return ret; } static int mchp_spdifrx_hw_free(struct snd_pcm_substream *substream, @@ -475,10 +441,12 @@ static int mchp_spdifrx_hw_free(struct snd_pcm_substream *substream, { struct mchp_spdifrx_dev *dev = snd_soc_dai_get_drvdata(dai); + mutex_lock(&dev->mlock); if (dev->gclk_enabled) { clk_disable_unprepare(dev->gclk); dev->gclk_enabled = 0; } + mutex_unlock(&dev->mlock); return 0; } @@ -515,22 +483,51 @@ static int mchp_spdifrx_cs_get(struct mchp_spdifrx_dev *dev, { struct mchp_spdifrx_mixer_control *ctrl = &dev->control; struct mchp_spdifrx_ch_stat *ch_stat = &ctrl->ch_stat[channel]; - int ret; - - regmap_write(dev->regmap, SPDIFRX_IER, SPDIFRX_IR_CSC(channel)); - /* check for new data available */ - ret = wait_for_completion_interruptible_timeout(&ch_stat->done, - msecs_to_jiffies(100)); - /* IP might not be started or valid stream might not be prezent */ - if (ret < 0) { - dev_dbg(dev->dev, "channel status for channel %d timeout\n", - channel); + int ret = 0; + + mutex_lock(&dev->mlock); + + /* + * We may reach this point with both clocks enabled but the receiver + * still disabled. To void waiting for completion and return with + * timeout check the dev->trigger_enabled. + * + * To retrieve data: + * - if the receiver is enabled CSC IRQ will update the data in software + * caches (ch_stat->data) + * - otherwise we just update it here the software caches with latest + * available information and return it; in this case we don't need + * spin locking as the IRQ is disabled and will not be raised from + * anywhere else. + */ + + if (dev->trigger_enabled) { + reinit_completion(&ch_stat->done); + regmap_write(dev->regmap, SPDIFRX_IER, SPDIFRX_IR_CSC(channel)); + /* Check for new data available */ + ret = wait_for_completion_interruptible_timeout(&ch_stat->done, + msecs_to_jiffies(100)); + /* Valid stream might not be present */ + if (ret <= 0) { + dev_dbg(dev->dev, "channel status for channel %d timeout\n", + channel); + regmap_write(dev->regmap, SPDIFRX_IDR, SPDIFRX_IR_CSC(channel)); + ret = ret ? : -ETIMEDOUT; + goto unlock; + } else { + ret = 0; + } + } else { + /* Update software cache with latest channel status. */ + mchp_spdifrx_channel_status_read(dev, channel); } memcpy(uvalue->value.iec958.status, ch_stat->data, sizeof(ch_stat->data)); - return 0; +unlock: + mutex_unlock(&dev->mlock); + return ret; } static int mchp_spdifrx_cs1_get(struct snd_kcontrol *kcontrol, @@ -564,29 +561,49 @@ static int mchp_spdifrx_subcode_ch_get(struct mchp_spdifrx_dev *dev, int channel, struct snd_ctl_elem_value *uvalue) { - unsigned long flags; struct mchp_spdifrx_mixer_control *ctrl = &dev->control; struct mchp_spdifrx_user_data *user_data = &ctrl->user_data[channel]; - int ret; - - reinit_completion(&user_data->done); - mchp_spdifrx_isr_blockend_en(dev); - ret = wait_for_completion_interruptible_timeout(&user_data->done, - msecs_to_jiffies(100)); - /* IP might not be started or valid stream might not be prezent */ - if (ret <= 0) { - dev_dbg(dev->dev, "user data for channel %d timeout\n", - channel); - mchp_spdifrx_isr_blockend_dis(dev); - return ret; + int ret = 0; + + mutex_lock(&dev->mlock); + + /* + * We may reach this point with both clocks enabled but the receiver + * still disabled. To void waiting for completion to just timeout we + * check here the dev->trigger_enabled flag. + * + * To retrieve data: + * - if the receiver is enabled we need to wait for blockend IRQ to read + * data to and update it for us in software caches + * - otherwise reading the SPDIFRX_CHUD() registers is enough. + */ + + if (dev->trigger_enabled) { + reinit_completion(&user_data->done); + regmap_write(dev->regmap, SPDIFRX_IER, SPDIFRX_IR_BLOCKEND); + ret = wait_for_completion_interruptible_timeout(&user_data->done, + msecs_to_jiffies(100)); + /* Valid stream might not be present. */ + if (ret <= 0) { + dev_dbg(dev->dev, "user data for channel %d timeout\n", + channel); + regmap_write(dev->regmap, SPDIFRX_IDR, SPDIFRX_IR_BLOCKEND); + ret = ret ? : -ETIMEDOUT; + goto unlock; + } else { + ret = 0; + } + } else { + /* Update software cache with last available data. */ + mchp_spdifrx_channel_user_data_read(dev, channel); } - spin_lock_irqsave(&user_data->lock, flags); memcpy(uvalue->value.iec958.subcode, user_data->data, sizeof(user_data->data)); - spin_unlock_irqrestore(&user_data->lock, flags); - return 0; +unlock: + mutex_unlock(&dev->mlock); + return ret; } static int mchp_spdifrx_subcode_ch1_get(struct snd_kcontrol *kcontrol, @@ -627,10 +644,24 @@ static int mchp_spdifrx_ulock_get(struct snd_kcontrol *kcontrol, u32 val; bool ulock_old = ctrl->ulock; - regmap_read(dev->regmap, SPDIFRX_RSR, &val); - ctrl->ulock = !(val & SPDIFRX_RSR_ULOCK); + mutex_lock(&dev->mlock); + + /* + * The RSR.ULOCK has wrong value if both pclk and gclk are enabled + * and the receiver is disabled. Thus we take into account the + * dev->trigger_enabled here to return a real status. + */ + if (dev->trigger_enabled) { + regmap_read(dev->regmap, SPDIFRX_RSR, &val); + ctrl->ulock = !(val & SPDIFRX_RSR_ULOCK); + } else { + ctrl->ulock = 0; + } + uvalue->value.integer.value[0] = ctrl->ulock; + mutex_unlock(&dev->mlock); + return ulock_old != ctrl->ulock; } @@ -643,8 +674,22 @@ static int mchp_spdifrx_badf_get(struct snd_kcontrol *kcontrol, u32 val; bool badf_old = ctrl->badf; - regmap_read(dev->regmap, SPDIFRX_RSR, &val); - ctrl->badf = !!(val & SPDIFRX_RSR_BADF); + mutex_lock(&dev->mlock); + + /* + * The RSR.ULOCK has wrong value if both pclk and gclk are enabled + * and the receiver is disabled. Thus we take into account the + * dev->trigger_enabled here to return a real status. + */ + if (dev->trigger_enabled) { + regmap_read(dev->regmap, SPDIFRX_RSR, &val); + ctrl->badf = !!(val & SPDIFRX_RSR_BADF); + } else { + ctrl->badf = 0; + } + + mutex_unlock(&dev->mlock); + uvalue->value.integer.value[0] = ctrl->badf; return badf_old != ctrl->badf; @@ -656,11 +701,48 @@ static int mchp_spdifrx_signal_get(struct snd_kcontrol *kcontrol, struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol); struct mchp_spdifrx_dev *dev = snd_soc_dai_get_drvdata(dai); struct mchp_spdifrx_mixer_control *ctrl = &dev->control; - u32 val; + u32 val = ~0U, loops = 10; + int ret; bool signal_old = ctrl->signal; - regmap_read(dev->regmap, SPDIFRX_RSR, &val); - ctrl->signal = !(val & SPDIFRX_RSR_NOSIGNAL); + mutex_lock(&dev->mlock); + + /* + * To get the signal we need to have receiver enabled. This + * could be enabled also from trigger() function thus we need to + * take care of not disabling the receiver when it runs. + */ + if (!dev->trigger_enabled) { + ret = clk_prepare_enable(dev->gclk); + if (ret) + goto unlock; + + regmap_update_bits(dev->regmap, SPDIFRX_MR, SPDIFRX_MR_RXEN_MASK, + SPDIFRX_MR_RXEN_ENABLE); + + /* Wait for RSR.ULOCK bit. */ + while (--loops) { + regmap_read(dev->regmap, SPDIFRX_RSR, &val); + if (!(val & SPDIFRX_RSR_ULOCK)) + break; + usleep_range(100, 150); + } + + regmap_update_bits(dev->regmap, SPDIFRX_MR, SPDIFRX_MR_RXEN_MASK, + SPDIFRX_MR_RXEN_DISABLE); + + clk_disable_unprepare(dev->gclk); + } else { + regmap_read(dev->regmap, SPDIFRX_RSR, &val); + } + +unlock: + mutex_unlock(&dev->mlock); + + if (!(val & SPDIFRX_RSR_ULOCK)) + ctrl->signal = !(val & SPDIFRX_RSR_NOSIGNAL); + else + ctrl->signal = 0; uvalue->value.integer.value[0] = ctrl->signal; return signal_old != ctrl->signal; @@ -685,18 +767,32 @@ static int mchp_spdifrx_rate_get(struct snd_kcontrol *kcontrol, u32 val; int rate; - regmap_read(dev->regmap, SPDIFRX_RSR, &val); - - /* if the receiver is not locked, ISF data is invalid */ - if (val & SPDIFRX_RSR_ULOCK || !(val & SPDIFRX_RSR_IFS_MASK)) { + mutex_lock(&dev->mlock); + + /* + * The RSR.ULOCK has wrong value if both pclk and gclk are enabled + * and the receiver is disabled. Thus we take into account the + * dev->trigger_enabled here to return a real status. + */ + if (dev->trigger_enabled) { + regmap_read(dev->regmap, SPDIFRX_RSR, &val); + /* If the receiver is not locked, ISF data is invalid. */ + if (val & SPDIFRX_RSR_ULOCK || !(val & SPDIFRX_RSR_IFS_MASK)) { + ucontrol->value.integer.value[0] = 0; + goto unlock; + } + } else { + /* Reveicer is not locked, IFS data is invalid. */ ucontrol->value.integer.value[0] = 0; - return 0; + goto unlock; } rate = clk_get_rate(dev->gclk); ucontrol->value.integer.value[0] = rate / (32 * SPDIFRX_RSR_IFS(val)); +unlock: + mutex_unlock(&dev->mlock); return 0; } @@ -808,11 +904,9 @@ static int mchp_spdifrx_dai_probe(struct snd_soc_dai *dai) SPDIFRX_MR_AUTORST_NOACTION | SPDIFRX_MR_PACK_DISABLED); - dev->blockend_refcount = 0; for (ch = 0; ch < SPDIFRX_CHANNELS; ch++) { init_completion(&ctrl->ch_stat[ch].done); init_completion(&ctrl->user_data[ch].done); - spin_lock_init(&ctrl->user_data[ch].lock); } /* Add controls */ @@ -827,7 +921,7 @@ static int mchp_spdifrx_dai_remove(struct snd_soc_dai *dai) struct mchp_spdifrx_dev *dev = snd_soc_dai_get_drvdata(dai); /* Disable interrupts */ - regmap_write(dev->regmap, SPDIFRX_IDR, 0xFF); + regmap_write(dev->regmap, SPDIFRX_IDR, GENMASK(14, 0)); clk_disable_unprepare(dev->pclk); @@ -912,7 +1006,17 @@ static int mchp_spdifrx_probe(struct platform_device *pdev) "failed to get the PMC generated clock: %d\n", err); return err; } - spin_lock_init(&dev->blockend_lock); + + /* + * Signal control need a valid rate on gclk. hw_params() configures + * it propertly but requesting signal before any hw_params() has been + * called lead to invalid value returned for signal. Thus, configure + * gclk at a valid rate, here, in initialization, to simplify the + * control path. + */ + clk_set_min_rate(dev->gclk, 48000 * SPDIFRX_GCLK_RATIO_MIN + 1); + + mutex_init(&dev->mlock); dev->dev = &pdev->dev; dev->regmap = regmap; diff --git a/sound/soc/atmel/mchp-spdiftx.c b/sound/soc/atmel/mchp-spdiftx.c index 0d2e3fa21519c6cfced2b03ff8678ae31354730a..bcca1cf3cd7b6d217647b9ab42774bb24fae7395 100644 --- a/sound/soc/atmel/mchp-spdiftx.c +++ b/sound/soc/atmel/mchp-spdiftx.c @@ -80,7 +80,7 @@ #define SPDIFTX_MR_VALID1 BIT(24) #define SPDIFTX_MR_VALID2 BIT(25) -/* Disable Null Frame on underrrun */ +/* Disable Null Frame on underrun */ #define SPDIFTX_MR_DNFR_MASK GENMASK(27, 27) #define SPDIFTX_MR_DNFR_INVALID (0 << 27) #define SPDIFTX_MR_DNFR_VALID (1 << 27) diff --git a/sound/soc/atmel/tse850-pcm5142.c b/sound/soc/atmel/tse850-pcm5142.c index 59e2edb22b3adc809c19e2adc902aee153e1ee85..50c3dc6936f9072430c85d4b1cae804c610a74bc 100644 --- a/sound/soc/atmel/tse850-pcm5142.c +++ b/sound/soc/atmel/tse850-pcm5142.c @@ -23,7 +23,7 @@ // IN2 +---o--+------------+--o---+ OUT2 // loop2 relays // -// The 'loop1' gpio pin controlls two relays, which are either in loop +// The 'loop1' gpio pin controls two relays, which are either in loop // position, meaning that input and output are directly connected, or // they are in mixer position, meaning that the signal is passed through // the 'Sum' mixer. Similarly for 'loop2'. diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig index 25f331551f6895d0d0267f0500cc1ccd4f6d2d7a..f1c9e563994b2bfc0762a1b3db23663222299377 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig @@ -1701,7 +1701,7 @@ config SND_SOC_WSA881X config SND_SOC_ZL38060 tristate "Microsemi ZL38060 Connected Home Audio Processor" depends on SPI_MASTER - select GPIOLIB + depends on GPIOLIB select REGMAP help Support for ZL38060 Connected Home Audio Processor from Microsemi, diff --git a/sound/soc/codecs/adau7118.c b/sound/soc/codecs/adau7118.c index 841229dcbca108f49f7abaa1a5adb46cec19beeb..305f294b7710e29d21f0c0da2d3b80ad70e13c17 100644 --- a/sound/soc/codecs/adau7118.c +++ b/sound/soc/codecs/adau7118.c @@ -445,22 +445,6 @@ static const struct snd_soc_component_driver adau7118_component_driver = { .non_legacy_dai_naming = 1, }; -static void adau7118_regulator_disable(void *data) -{ - struct adau7118_data *st = data; - int ret; - /* - * If we fail to disable DVDD, don't bother in trying IOVDD. We - * actually don't want to be left in the situation where DVDD - * is enabled and IOVDD is disabled. - */ - ret = regulator_disable(st->dvdd); - if (ret) - return; - - regulator_disable(st->iovdd); -} - static int adau7118_regulator_setup(struct adau7118_data *st) { st->iovdd = devm_regulator_get(st->dev, "iovdd"); @@ -482,8 +466,7 @@ static int adau7118_regulator_setup(struct adau7118_data *st) regcache_cache_only(st->map, true); } - return devm_add_action_or_reset(st->dev, adau7118_regulator_disable, - st); + return 0; } static int adau7118_parset_dt(const struct adau7118_data *st) diff --git a/sound/soc/codecs/cs42l56.c b/sound/soc/codecs/cs42l56.c index d41e0319310615fc70172158a1bca5616f24ceb2..3c5ec47a8fe64aa83a1e75281e28ce417b4809f8 100644 --- a/sound/soc/codecs/cs42l56.c +++ b/sound/soc/codecs/cs42l56.c @@ -1193,18 +1193,12 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client, if (pdata) { cs42l56->pdata = *pdata; } else { - pdata = devm_kzalloc(&i2c_client->dev, sizeof(*pdata), - GFP_KERNEL); - if (!pdata) - return -ENOMEM; - if (i2c_client->dev.of_node) { ret = cs42l56_handle_of_data(i2c_client, &cs42l56->pdata); if (ret != 0) return ret; } - cs42l56->pdata = *pdata; } if (cs42l56->pdata.gpio_nreset) { diff --git a/sound/soc/codecs/tlv320adcx140.c b/sound/soc/codecs/tlv320adcx140.c index 53a80246aee19227a19b2f6d9b9fd44b6ea094b0..a6241a0453694eda468f121ef11cd8146250befb 100644 --- a/sound/soc/codecs/tlv320adcx140.c +++ b/sound/soc/codecs/tlv320adcx140.c @@ -870,7 +870,7 @@ static int adcx140_configure_gpio(struct adcx140_priv *adcx140) gpio_count = device_property_count_u32(adcx140->dev, "ti,gpio-config"); - if (gpio_count == 0) + if (gpio_count <= 0) return 0; if (gpio_count != ADCX140_NUM_GPIO_CFGS) diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c index 7cd14d6b9436aac98aa595262d817dacf298a1c4..9a756d0a60320ac202c9f8d6710f6451dae665cc 100644 --- a/sound/soc/fsl/fsl-asoc-card.c +++ b/sound/soc/fsl/fsl-asoc-card.c @@ -117,11 +117,11 @@ static const struct snd_soc_dapm_route audio_map[] = { static const struct snd_soc_dapm_route audio_map_ac97[] = { /* 1st half -- Normal DAPM routes */ - {"Playback", NULL, "AC97 Playback"}, - {"AC97 Capture", NULL, "Capture"}, + {"AC97 Playback", NULL, "CPU AC97 Playback"}, + {"CPU AC97 Capture", NULL, "AC97 Capture"}, /* 2nd half -- ASRC DAPM routes */ - {"AC97 Playback", NULL, "ASRC-Playback"}, - {"ASRC-Capture", NULL, "AC97 Capture"}, + {"CPU AC97 Playback", NULL, "ASRC-Playback"}, + {"ASRC-Capture", NULL, "CPU AC97 Capture"}, }; static const struct snd_soc_dapm_route audio_map_tx[] = { diff --git a/sound/soc/fsl/fsl_micfil.c b/sound/soc/fsl/fsl_micfil.c index 6c794605e33c9c481ae5e018f0ac062238cb5a72..97f83c63e7652f47a5e5788ccd9c23148b789c99 100644 --- a/sound/soc/fsl/fsl_micfil.c +++ b/sound/soc/fsl/fsl_micfil.c @@ -87,21 +87,21 @@ static DECLARE_TLV_DB_SCALE(gain_tlv, 0, 100, 0); static const struct snd_kcontrol_new fsl_micfil_snd_controls[] = { SOC_SINGLE_SX_TLV("CH0 Volume", REG_MICFIL_OUT_CTRL, - MICFIL_OUTGAIN_CHX_SHIFT(0), 0xF, 0x7, gain_tlv), + MICFIL_OUTGAIN_CHX_SHIFT(0), 0x8, 0xF, gain_tlv), SOC_SINGLE_SX_TLV("CH1 Volume", REG_MICFIL_OUT_CTRL, - MICFIL_OUTGAIN_CHX_SHIFT(1), 0xF, 0x7, gain_tlv), + MICFIL_OUTGAIN_CHX_SHIFT(1), 0x8, 0xF, gain_tlv), SOC_SINGLE_SX_TLV("CH2 Volume", REG_MICFIL_OUT_CTRL, - MICFIL_OUTGAIN_CHX_SHIFT(2), 0xF, 0x7, gain_tlv), + MICFIL_OUTGAIN_CHX_SHIFT(2), 0x8, 0xF, gain_tlv), SOC_SINGLE_SX_TLV("CH3 Volume", REG_MICFIL_OUT_CTRL, - MICFIL_OUTGAIN_CHX_SHIFT(3), 0xF, 0x7, gain_tlv), + MICFIL_OUTGAIN_CHX_SHIFT(3), 0x8, 0xF, gain_tlv), SOC_SINGLE_SX_TLV("CH4 Volume", REG_MICFIL_OUT_CTRL, - MICFIL_OUTGAIN_CHX_SHIFT(4), 0xF, 0x7, gain_tlv), + MICFIL_OUTGAIN_CHX_SHIFT(4), 0x8, 0xF, gain_tlv), SOC_SINGLE_SX_TLV("CH5 Volume", REG_MICFIL_OUT_CTRL, - MICFIL_OUTGAIN_CHX_SHIFT(5), 0xF, 0x7, gain_tlv), + MICFIL_OUTGAIN_CHX_SHIFT(5), 0x8, 0xF, gain_tlv), SOC_SINGLE_SX_TLV("CH6 Volume", REG_MICFIL_OUT_CTRL, - MICFIL_OUTGAIN_CHX_SHIFT(6), 0xF, 0x7, gain_tlv), + MICFIL_OUTGAIN_CHX_SHIFT(6), 0x8, 0xF, gain_tlv), SOC_SINGLE_SX_TLV("CH7 Volume", REG_MICFIL_OUT_CTRL, - MICFIL_OUTGAIN_CHX_SHIFT(7), 0xF, 0x7, gain_tlv), + MICFIL_OUTGAIN_CHX_SHIFT(7), 0x8, 0xF, gain_tlv), SOC_ENUM_EXT("MICFIL Quality Select", fsl_micfil_quality_enum, snd_soc_get_enum_double, snd_soc_put_enum_double), diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c index 3e5c1eaccd5e7973dd280374bb83410b28071b8f..6a5d2b08e2713375420bac0e497ee3b6940f84fb 100644 --- a/sound/soc/fsl/fsl_sai.c +++ b/sound/soc/fsl/fsl_sai.c @@ -230,6 +230,7 @@ static int fsl_sai_set_dai_fmt_tr(struct snd_soc_dai *cpu_dai, if (!sai->is_lsb_first) val_cr4 |= FSL_SAI_CR4_MF; + sai->is_dsp_mode = false; /* DAI mode */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c index 1d774c876c52eafdab1a2e0b69c5b3dd2039441a..94229ce1a30ef7dc3864e7cf084107b3d53368d3 100644 --- a/sound/soc/fsl/fsl_ssi.c +++ b/sound/soc/fsl/fsl_ssi.c @@ -1161,14 +1161,14 @@ static struct snd_soc_dai_driver fsl_ssi_ac97_dai = { .symmetric_channels = 1, .probe = fsl_ssi_dai_probe, .playback = { - .stream_name = "AC97 Playback", + .stream_name = "CPU AC97 Playback", .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_48000, .formats = SNDRV_PCM_FMTBIT_S16 | SNDRV_PCM_FMTBIT_S20, }, .capture = { - .stream_name = "AC97 Capture", + .stream_name = "CPU AC97 Capture", .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_48000, diff --git a/sound/soc/intel/boards/sof_rt5682.c b/sound/soc/intel/boards/sof_rt5682.c index 1f94fa5a15db6b5613e0bd4f45b00d31c6af442e..5883d1fa3b7edf4acf91c834e048cef3b9b57648 100644 --- a/sound/soc/intel/boards/sof_rt5682.c +++ b/sound/soc/intel/boards/sof_rt5682.c @@ -704,6 +704,9 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev, links[id].num_platforms = ARRAY_SIZE(platform_component); links[id].nonatomic = true; links[id].dpcm_playback = 1; + /* feedback stream or firmware-generated echo reference */ + links[id].dpcm_capture = 1; + links[id].no_pcm = 1; links[id].cpus = &cpus[id]; links[id].num_cpus = 1; diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c index e037826b24517c62facec338533783530b9e4434..2d41e6ab2ce4e0edf4e0b9be0aba00bbabb7bd26 100644 --- a/sound/soc/kirkwood/kirkwood-dma.c +++ b/sound/soc/kirkwood/kirkwood-dma.c @@ -86,7 +86,7 @@ kirkwood_dma_conf_mbus_windows(void __iomem *base, int win, /* try to find matching cs for current dma address */ for (i = 0; i < dram->num_cs; i++) { - const struct mbus_dram_window *cs = dram->cs + i; + const struct mbus_dram_window *cs = &dram->cs[i]; if ((cs->base & 0xffff0000) < (dma & 0xffff0000)) { writel(cs->base & 0xffff0000, base + KIRKWOOD_AUDIO_WIN_BASE_REG(win)); diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c index d0f3ff8edd904e2b9765c939c219786412dd9163..8f4ebb189e019a9477f80949baa5a16cec5e5a3d 100644 --- a/sound/soc/soc-compress.c +++ b/sound/soc/soc-compress.c @@ -822,7 +822,7 @@ int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num) rtd->fe_compr = 1; if (rtd->dai_link->dpcm_playback) be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd; - else if (rtd->dai_link->dpcm_capture) + if (rtd->dai_link->dpcm_capture) be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd; memcpy(compr->ops, &soc_compr_dyn_ops, sizeof(soc_compr_dyn_ops)); } else { diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c index de80f1b3d7f255923805429d9d1a34a494555b43..a6275cc92a4055c52123ea9166e76079794fadfd 100644 --- a/sound/soc/sof/intel/hda-dai.c +++ b/sound/soc/sof/intel/hda-dai.c @@ -212,6 +212,10 @@ static int hda_link_hw_params(struct snd_pcm_substream *substream, int stream_tag; int ret; + link = snd_hdac_ext_bus_get_link(bus, codec_dai->component->name); + if (!link) + return -EINVAL; + /* get stored dma data if resuming from system suspend */ link_dev = snd_soc_dai_get_dma_data(dai, substream); if (!link_dev) { @@ -232,10 +236,6 @@ static int hda_link_hw_params(struct snd_pcm_substream *substream, if (ret < 0) return ret; - link = snd_hdac_ext_bus_get_link(bus, codec_dai->component->name); - if (!link) - return -EINVAL; - /* set the hdac_stream in the codec dai */ snd_soc_dai_set_stream(codec_dai, hdac_stream(link_dev), substream->stream); diff --git a/sound/synth/emux/emux_nrpn.c b/sound/synth/emux/emux_nrpn.c index 7eed5791972ccbb8b22a6197f10fc5ea50185d28..a7d83182f7d285e0f0a8510b898daf43a388eef7 100644 --- a/sound/synth/emux/emux_nrpn.c +++ b/sound/synth/emux/emux_nrpn.c @@ -349,6 +349,9 @@ int snd_emux_xg_control(struct snd_emux_port *port, struct snd_midi_channel *chan, int param) { + if (param >= ARRAY_SIZE(chan->control)) + return -EINVAL; + return send_converted_effect(xg_effects, ARRAY_SIZE(xg_effects), port, chan, param, chan->control[param], diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 592536904dde2d59a152c838a39273111df77495..d2bcce627b32064dbbec9e4fe3c0b904db78a13f 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -1912,10 +1912,38 @@ static void profile_close_perf_events(struct profiler_bpf *obj) profile_perf_event_cnt = 0; } +static int profile_open_perf_event(int mid, int cpu, int map_fd) +{ + int pmu_fd; + + pmu_fd = syscall(__NR_perf_event_open, &metrics[mid].attr, + -1 /*pid*/, cpu, -1 /*group_fd*/, 0); + if (pmu_fd < 0) { + if (errno == ENODEV) { + p_info("cpu %d may be offline, skip %s profiling.", + cpu, metrics[mid].name); + profile_perf_event_cnt++; + return 0; + } + return -1; + } + + if (bpf_map_update_elem(map_fd, + &profile_perf_event_cnt, + &pmu_fd, BPF_ANY) || + ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0)) { + close(pmu_fd); + return -1; + } + + profile_perf_events[profile_perf_event_cnt++] = pmu_fd; + return 0; +} + static int profile_open_perf_events(struct profiler_bpf *obj) { unsigned int cpu, m; - int map_fd, pmu_fd; + int map_fd; profile_perf_events = calloc( sizeof(int), obj->rodata->num_cpu * obj->rodata->num_metric); @@ -1934,17 +1962,11 @@ static int profile_open_perf_events(struct profiler_bpf *obj) if (!metrics[m].selected) continue; for (cpu = 0; cpu < obj->rodata->num_cpu; cpu++) { - pmu_fd = syscall(__NR_perf_event_open, &metrics[m].attr, - -1/*pid*/, cpu, -1/*group_fd*/, 0); - if (pmu_fd < 0 || - bpf_map_update_elem(map_fd, &profile_perf_event_cnt, - &pmu_fd, BPF_ANY) || - ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0)) { + if (profile_open_perf_event(m, cpu, map_fd)) { p_err("failed to create event %s on cpu %d", metrics[m].name, cpu); return -1; } - profile_perf_events[profile_perf_event_cnt++] = pmu_fd; } } return 0; diff --git a/tools/gpio/gpio-event-mon.c b/tools/gpio/gpio-event-mon.c index 84ae1039b0a877a2b293e1d0132ed9d5e2a20761..367c10636890a68de9f63d7b70bc351e5db9d6c4 100644 --- a/tools/gpio/gpio-event-mon.c +++ b/tools/gpio/gpio-event-mon.c @@ -86,6 +86,7 @@ int monitor_device(const char *device_name, gpiotools_test_bit(values.bits, i)); } + i = 0; while (1) { struct gpio_v2_line_event event; diff --git a/tools/iio/iio_utils.c b/tools/iio/iio_utils.c index d66b18c54606ab005e5fb8fc3662dc484b8a6381..48360994c2a13d9eaf6e0860a0848cf876326044 100644 --- a/tools/iio/iio_utils.c +++ b/tools/iio/iio_utils.c @@ -262,6 +262,7 @@ int iioutils_get_param_float(float *output, const char *param_name, if (fscanf(sysfsfp, "%f", output) != 1) ret = errno ? -errno : -ENODATA; + fclose(sysfsfp); break; } error_free_filename: @@ -342,9 +343,9 @@ int build_channel_array(const char *device_dir, } sysfsfp = fopen(filename, "r"); + free(filename); if (!sysfsfp) { ret = -errno; - free(filename); goto error_close_dir; } @@ -354,7 +355,6 @@ int build_channel_array(const char *device_dir, if (fclose(sysfsfp)) perror("build_channel_array(): Failed to close file"); - free(filename); goto error_close_dir; } if (ret == 1) @@ -362,11 +362,9 @@ int build_channel_array(const char *device_dir, if (fclose(sysfsfp)) { ret = -errno; - free(filename); goto error_close_dir; } - free(filename); } *ci_array = malloc(sizeof(**ci_array) * (*counter)); @@ -392,9 +390,9 @@ int build_channel_array(const char *device_dir, } sysfsfp = fopen(filename, "r"); + free(filename); if (!sysfsfp) { ret = -errno; - free(filename); count--; goto error_cleanup_array; } @@ -402,20 +400,17 @@ int build_channel_array(const char *device_dir, errno = 0; if (fscanf(sysfsfp, "%i", ¤t_enabled) != 1) { ret = errno ? -errno : -ENODATA; - free(filename); count--; goto error_cleanup_array; } if (fclose(sysfsfp)) { ret = -errno; - free(filename); count--; goto error_cleanup_array; } if (!current_enabled) { - free(filename); count--; continue; } @@ -426,7 +421,6 @@ int build_channel_array(const char *device_dir, strlen(ent->d_name) - strlen("_en")); if (!current->name) { - free(filename); ret = -ENOMEM; count--; goto error_cleanup_array; @@ -436,7 +430,6 @@ int build_channel_array(const char *device_dir, ret = iioutils_break_up_name(current->name, ¤t->generic_name); if (ret) { - free(filename); free(current->name); count--; goto error_cleanup_array; @@ -447,17 +440,16 @@ int build_channel_array(const char *device_dir, scan_el_dir, current->name); if (ret < 0) { - free(filename); ret = -ENOMEM; goto error_cleanup_array; } sysfsfp = fopen(filename, "r"); + free(filename); if (!sysfsfp) { ret = -errno; - fprintf(stderr, "failed to open %s\n", - filename); - free(filename); + fprintf(stderr, "failed to open %s/%s_index\n", + scan_el_dir, current->name); goto error_cleanup_array; } @@ -467,17 +459,14 @@ int build_channel_array(const char *device_dir, if (fclose(sysfsfp)) perror("build_channel_array(): Failed to close file"); - free(filename); goto error_cleanup_array; } if (fclose(sysfsfp)) { ret = -errno; - free(filename); goto error_cleanup_array; } - free(filename); /* Find the scale */ ret = iioutils_get_param_float(¤t->scale, "scale", diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index e6f644cdc9f15d2c95aa701d910e2fe929d1f353..f7c48b1fb3a051d427713fda0c3e00e72f35c5a2 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -614,8 +614,21 @@ int btf__align_of(const struct btf *btf, __u32 id) if (align <= 0) return align; max_align = max(max_align, align); + + /* if field offset isn't aligned according to field + * type's alignment, then struct must be packed + */ + if (btf_member_bitfield_size(t, i) == 0 && + (m->offset % (8 * align)) != 0) + return 1; } + /* if struct/union size isn't a multiple of its alignment, + * then struct must be packed + */ + if ((t->size % max_align) != 0) + return 1; + return max_align; } default: diff --git a/tools/lib/bpf/nlattr.c b/tools/lib/bpf/nlattr.c index b607fa9852b1cb59a59541463a2fa5cb32a360a0..1a04299a2a604d40a32b62600198bef8ab7f3d05 100644 --- a/tools/lib/bpf/nlattr.c +++ b/tools/lib/bpf/nlattr.c @@ -178,7 +178,7 @@ int libbpf_nla_dump_errormsg(struct nlmsghdr *nlh) hlen += nlmsg_len(&err->msg); attr = (struct nlattr *) ((void *) err + hlen); - alen = nlh->nlmsg_len - hlen; + alen = (void *)nlh + nlh->nlmsg_len - (void *)attr; if (libbpf_nla_parse(tb, NLMSGERR_ATTR_MAX, attr, alen, extack_policy) != 0) { diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 700984e7f5ba1d33be93e6a04ac4fba49e30dc76..9a0a54194636c490900764933a9d6d55de9d17d8 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -168,6 +168,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func, "panic", "do_exit", "do_task_dead", + "make_task_dead", "__module_put_and_exit", "complete_and_exit", "__reiserfs_panic", @@ -175,10 +176,11 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func, "fortify_panic", "usercopy_abort", "machine_real_restart", - "rewind_stack_do_exit", + "rewind_stack_and_make_dead", "kunit_try_catch_throw", "xen_start_kernel", "cpu_bringup_and_idle", + "stop_this_cpu", }; if (!func) @@ -570,6 +572,7 @@ static int create_static_call_sections(struct objtool_file *file) if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR, STATIC_CALL_TRAMP_PREFIX_LEN)) { WARN("static_call: trampoline name malformed: %s", key_name); + free(key_name); return -1; } tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN; @@ -579,6 +582,7 @@ static int create_static_call_sections(struct objtool_file *file) if (!key_sym) { if (!module) { WARN("static_call: can't find static_call_key symbol: %s", tmp); + free(key_name); return -1; } @@ -862,6 +866,8 @@ static const char *uaccess_safe_builtin[] = { "__tsan_atomic64_compare_exchange_val", "__tsan_atomic_thread_fence", "__tsan_atomic_signal_fence", + "__tsan_unaligned_read16", + "__tsan_unaligned_write16", /* KCOV */ "write_comp_data", "check_kcov_mode", diff --git a/tools/perf/perf-completion.sh b/tools/perf/perf-completion.sh index fdf75d45efff7a413347d215851f93f2a8f5bfb3..978249d7868c22e875720e73550eaf0897999c64 100644 --- a/tools/perf/perf-completion.sh +++ b/tools/perf/perf-completion.sh @@ -165,7 +165,12 @@ __perf_main () local cur1=${COMP_WORDS[COMP_CWORD]} local raw_evts=$($cmd list --raw-dump) - local arr s tmp result + local arr s tmp result cpu_evts + + # aarch64 doesn't have /sys/bus/event_source/devices/cpu/events + if [[ `uname -m` != aarch64 ]]; then + cpu_evts=$(ls /sys/bus/event_source/devices/cpu/events) + fi if [[ "$cur1" == */* && ${cur1#*/} =~ ^[A-Z] ]]; then OLD_IFS="$IFS" @@ -183,9 +188,9 @@ __perf_main () fi done - evts=${result}" "$(ls /sys/bus/event_source/devices/cpu/events) + evts=${result}" "${cpu_evts} else - evts=${raw_evts}" "$(ls /sys/bus/event_source/devices/cpu/events) + evts=${raw_evts}" "${cpu_evts} fi if [[ "$cur1" == , ]]; then diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c index 0bf6b4d4c90a73860571fb89a48c581f0d29495f..570cde4640d051fd816680e7c51af95f6e64b4ba 100644 --- a/tools/perf/util/llvm-utils.c +++ b/tools/perf/util/llvm-utils.c @@ -525,14 +525,37 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf, pr_debug("llvm compiling command template: %s\n", template); + /* + * Below, substitute control characters for values that can cause the + * echo to misbehave, then substitute the values back. + */ err = -ENOMEM; - if (asprintf(&command_echo, "echo -n \"%s\"", template) < 0) + if (asprintf(&command_echo, "echo -n \a%s\a", template) < 0) goto errout; +#define SWAP_CHAR(a, b) do { if (*p == a) *p = b; } while (0) + for (char *p = command_echo; *p; p++) { + SWAP_CHAR('<', '\001'); + SWAP_CHAR('>', '\002'); + SWAP_CHAR('"', '\003'); + SWAP_CHAR('\'', '\004'); + SWAP_CHAR('|', '\005'); + SWAP_CHAR('&', '\006'); + SWAP_CHAR('\a', '"'); + } err = read_from_pipe(command_echo, (void **) &command_out, NULL); if (err) goto errout; + for (char *p = command_out; *p; p++) { + SWAP_CHAR('\001', '<'); + SWAP_CHAR('\002', '>'); + SWAP_CHAR('\003', '"'); + SWAP_CHAR('\004', '\''); + SWAP_CHAR('\005', '|'); + SWAP_CHAR('\006', '&'); + } +#undef SWAP_CHAR pr_debug("llvm compiling command : %s\n", command_out); err = read_from_pipe(template, &obj_buf, &obj_buf_sz); diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index 8b1e3ae8fe50d919cea3fc396789594c6b65df3c..ea26f2b0c1bc2316d37b10501a431561f3593670 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -178,6 +178,7 @@ my $store_failures; my $store_successes; my $test_name; my $timeout; +my $run_timeout; my $connect_timeout; my $config_bisect_exec; my $booted_timeout; @@ -340,6 +341,7 @@ my %option_map = ( "STORE_SUCCESSES" => \$store_successes, "TEST_NAME" => \$test_name, "TIMEOUT" => \$timeout, + "RUN_TIMEOUT" => \$run_timeout, "CONNECT_TIMEOUT" => \$connect_timeout, "CONFIG_BISECT_EXEC" => \$config_bisect_exec, "BOOTED_TIMEOUT" => \$booted_timeout, @@ -1433,7 +1435,8 @@ sub reboot { # Still need to wait for the reboot to finish wait_for_monitor($time, $reboot_success_line); - + } + if ($powercycle || $time) { end_monitor; } } @@ -1799,6 +1802,14 @@ sub run_command { $command =~ s/\$SSH_USER/$ssh_user/g; $command =~ s/\$MACHINE/$machine/g; + if (!defined($timeout)) { + $timeout = $run_timeout; + } + + if (!defined($timeout)) { + $timeout = -1; # tell wait_for_input to wait indefinitely + } + doprint("$command ... "); $start_time = time; @@ -1825,13 +1836,10 @@ sub run_command { while (1) { my $fp = \*CMD; - if (defined($timeout)) { - doprint "timeout = $timeout\n"; - } my $line = wait_for_input($fp, $timeout); if (!defined($line)) { my $now = time; - if (defined($timeout) && (($now - $start_time) >= $timeout)) { + if ($timeout >= 0 && (($now - $start_time) >= $timeout)) { doprint "Hit timeout of $timeout, killing process\n"; $hit_timeout = 1; kill 9, $pid; @@ -2004,6 +2012,11 @@ sub wait_for_input $time = $timeout; } + if ($time < 0) { + # Negative number means wait indefinitely + undef $time; + } + $rin = ''; vec($rin, fileno($fp), 1) = 1; vec($rin, fileno(\*STDIN), 1) = 1; @@ -4283,6 +4296,9 @@ sub send_email { } sub cancel_test { + if ($monitor_cnt) { + end_monitor; + } if ($email_when_canceled) { my $name = get_test_name; send_email("KTEST: Your [$name] test was cancelled", diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf index 5e7d1d7297529be4ca1b684af3bd5b3e87b1fcfc..65957a9803b50abc3dc706bdb10fb55b214842a6 100644 --- a/tools/testing/ktest/sample.conf +++ b/tools/testing/ktest/sample.conf @@ -809,6 +809,11 @@ # is issued instead of a reboot. # CONNECT_TIMEOUT = 25 +# The timeout in seconds for how long to wait for any running command +# to timeout. If not defined, it will let it go indefinitely. +# (default undefined) +#RUN_TIMEOUT = 600 + # In between tests, a reboot of the box may occur, and this # is the time to wait for the console after it stops producing # output. Some machines may not produce a large lag on reboot diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 1d915553336082fd178f9ec15f1b206c2077d3f7..a845724e0906a8fb8b331940c3891a9f9d7f954d 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -119,8 +119,6 @@ RESOLVE_BTFIDS := $(BUILD_DIR)/resolve_btfids/resolve_btfids # NOTE: Semicolon at the end is critical to override lib.mk's default static # rule for binaries. $(notdir $(TEST_GEN_PROGS) \ - $(TEST_PROGS) \ - $(TEST_PROGS_EXTENDED) \ $(TEST_GEN_PROGS_EXTENDED) \ $(TEST_CUSTOM_PROGS)): %: $(OUTPUT)/% ; diff --git a/tools/testing/selftests/bpf/prog_tests/jeq_infer_not_null.c b/tools/testing/selftests/bpf/prog_tests/jeq_infer_not_null.c deleted file mode 100644 index 3add34df57678d28e4678ffa8778e198fd97fe62..0000000000000000000000000000000000000000 --- a/tools/testing/selftests/bpf/prog_tests/jeq_infer_not_null.c +++ /dev/null @@ -1,9 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -#include -#include "jeq_infer_not_null_fail.skel.h" - -void test_jeq_infer_not_null(void) -{ - RUN_TESTS(jeq_infer_not_null_fail); -} diff --git a/tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c b/tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c deleted file mode 100644 index f46965053acb2e0afb39094719a567117a3aedc6..0000000000000000000000000000000000000000 --- a/tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c +++ /dev/null @@ -1,42 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -#include "vmlinux.h" -#include -#include "bpf_misc.h" - -char _license[] SEC("license") = "GPL"; - -struct { - __uint(type, BPF_MAP_TYPE_HASH); - __uint(max_entries, 1); - __type(key, u64); - __type(value, u64); -} m_hash SEC(".maps"); - -SEC("?raw_tp") -__failure __msg("R8 invalid mem access 'map_value_or_null") -int jeq_infer_not_null_ptr_to_btfid(void *ctx) -{ - struct bpf_map *map = (struct bpf_map *)&m_hash; - struct bpf_map *inner_map = map->inner_map_meta; - u64 key = 0, ret = 0, *val; - - val = bpf_map_lookup_elem(map, &key); - /* Do not mark ptr as non-null if one of them is - * PTR_TO_BTF_ID (R9), reject because of invalid - * access to map value (R8). - * - * Here, we need to inline those insns to access - * R8 directly, since compiler may use other reg - * once it figures out val==inner_map. - */ - asm volatile("r8 = %[val];\n" - "r9 = %[inner_map];\n" - "if r8 != r9 goto +1;\n" - "%[ret] = *(u64 *)(r8 +0);\n" - : [ret] "+r"(ret) - : [inner_map] "r"(inner_map), [val] "r"(val) - : "r8", "r9"); - - return ret; -} diff --git a/tools/testing/selftests/bpf/verifier/search_pruning.c b/tools/testing/selftests/bpf/verifier/search_pruning.c index 7e50cb80873a5b7c08f607fe27006bda2afc6bf5..7e36078f8f482c7ca2e7ca66ebd5ea6ad75ae77d 100644 --- a/tools/testing/selftests/bpf/verifier/search_pruning.c +++ b/tools/testing/selftests/bpf/verifier/search_pruning.c @@ -154,3 +154,39 @@ .result_unpriv = ACCEPT, .insn_processed = 15, }, +/* The test performs a conditional 64-bit write to a stack location + * fp[-8], this is followed by an unconditional 8-bit write to fp[-8], + * then data is read from fp[-8]. This sequence is unsafe. + * + * The test would be mistakenly marked as safe w/o dst register parent + * preservation in verifier.c:copy_register_state() function. + * + * Note the usage of BPF_F_TEST_STATE_FREQ to force creation of the + * checkpoint state after conditional 64-bit assignment. + */ +{ + "write tracking and register parent chain bug", + .insns = { + /* r6 = ktime_get_ns() */ + BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + /* r0 = ktime_get_ns() */ + BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), + /* if r0 > r6 goto +1 */ + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_6, 1), + /* *(u64 *)(r10 - 8) = 0xdeadbeef */ + BPF_ST_MEM(BPF_DW, BPF_REG_FP, -8, 0xdeadbeef), + /* r1 = 42 */ + BPF_MOV64_IMM(BPF_REG_1, 42), + /* *(u8 *)(r10 - 8) = r1 */ + BPF_STX_MEM(BPF_B, BPF_REG_FP, BPF_REG_1, -8), + /* r2 = *(u64 *)(r10 - 8) */ + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_FP, -8), + /* exit(0) */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .flags = BPF_F_TEST_STATE_FREQ, + .errstr = "invalid read from stack off -8+1 size 8", + .result = REJECT, +}, diff --git a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh index 16d2de18591d322925c3c5722ded4a7c18dc15cc..2c81e01c30b319ada57357c1927a4ba918fa96dd 100755 --- a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh +++ b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh @@ -16,6 +16,18 @@ SYSFS_NET_DIR=/sys/bus/netdevsim/devices/$DEV_NAME/net/ DEBUGFS_DIR=/sys/kernel/debug/netdevsim/$DEV_NAME/ DL_HANDLE=netdevsim/$DEV_NAME +wait_for_devlink() +{ + "$@" | grep -q $DL_HANDLE +} + +devlink_wait() +{ + local timeout=$1 + + busywait "$timeout" wait_for_devlink devlink dev +} + fw_flash_test() { RET=0 @@ -255,6 +267,9 @@ netns_reload_test() ip netns del testns2 ip netns del testns1 + # Wait until netns async cleanup is done. + devlink_wait 2000 + log_test "netns reload test" } @@ -347,6 +362,9 @@ resource_test() ip netns del testns2 ip netns del testns1 + # Wait until netns async cleanup is done. + devlink_wait 2000 + log_test "resource test" } diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc index 27a68bbe778beb9230008aa25bdda3700eefd6fb..d9b81279507712ff593d943a4af6819cd5314bf0 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc @@ -42,7 +42,7 @@ test_event_enabled() { while [ $check_times -ne 0 ]; do e=`cat $EVENT_ENABLE` - if [ "$e" == $val ]; then + if [ "$e" = $val ]; then return 0 fi sleep $SLEEP_TIME diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic_event_syntax_errors.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic_event_syntax_errors.tc index 955e3ceea44b5b0a3b191eb8f64b4c44ac53707f..ada594fe16cb3a83a2d46a62a65da1aed106c32d 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic_event_syntax_errors.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic_event_syntax_errors.tc @@ -1,38 +1,19 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: event trigger - test synthetic_events syntax parser errors -# requires: synthetic_events error_log "char name[]' >> synthetic_events":README +# requires: synthetic_events error_log check_error() { # command-with-error-pos-by-^ ftrace_errlog_check 'synthetic_events' "$1" 'synthetic_events' } -check_dyn_error() { # command-with-error-pos-by-^ - ftrace_errlog_check 'synthetic_events' "$1" 'dynamic_events' -} - check_error 'myevent ^chr arg' # INVALID_TYPE -check_error 'myevent ^unsigned arg' # INCOMPLETE_TYPE - -check_error 'myevent char ^str]; int v' # BAD_NAME -check_error '^mye-vent char str[]' # BAD_NAME -check_error 'myevent char ^st-r[]' # BAD_NAME - -check_error 'myevent char str;^[]' # INVALID_FIELD -check_error 'myevent char str; ^int' # INVALID_FIELD - -check_error 'myevent char ^str[; int v' # INVALID_ARRAY_SPEC -check_error 'myevent char ^str[kdjdk]' # INVALID_ARRAY_SPEC -check_error 'myevent char ^str[257]' # INVALID_ARRAY_SPEC - -check_error '^mye;vent char str[]' # INVALID_CMD -check_error '^myevent ; char str[]' # INVALID_CMD -check_error '^myevent; char str[]' # INVALID_CMD -check_error '^myevent ;char str[]' # INVALID_CMD -check_error '^; char str[]' # INVALID_CMD -check_error '^;myevent char str[]' # INVALID_CMD -check_error '^myevent' # INVALID_CMD - -check_dyn_error '^s:junk/myevent char str[' # INVALID_DYN_CMD +check_error 'myevent ^char str[];; int v' # INVALID_TYPE +check_error 'myevent char ^str]; int v' # INVALID_NAME +check_error 'myevent char ^str;[]' # INVALID_NAME +check_error 'myevent ^char str[; int v' # INVALID_TYPE +check_error '^mye;vent char str[]' # BAD_NAME +check_error 'myevent char str[]; ^int' # INVALID_FIELD +check_error '^myevent' # INCOMPLETE_CMD exit 0 diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh index 0f3bf90e04d36f7e51bf06ddc0c22b3b11bfcd22..8f42e17db5d096583fcfede560deedabbfefab37 100755 --- a/tools/testing/selftests/net/fib_tests.sh +++ b/tools/testing/selftests/net/fib_tests.sh @@ -1773,6 +1773,8 @@ EOF ################################################################################ # main +trap cleanup EXIT + while getopts :t:pPhv o do case $o in diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index 54020d05a62b8f002b0d44a34aa741b4c6a7106c..9605e158a0bfced24e6f60ab01f673f45842db64 100644 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -731,14 +731,14 @@ sysctl_set() local value=$1; shift SYSCTL_ORIG[$key]=$(sysctl -n $key) - sysctl -qw $key=$value + sysctl -qw $key="$value" } sysctl_restore() { local key=$1; shift - sysctl -qw $key=${SYSCTL_ORIG["$key"]} + sysctl -qw $key="${SYSCTL_ORIG[$key]}" } forwarding_enable() diff --git a/tools/testing/selftests/net/udpgso_bench.sh b/tools/testing/selftests/net/udpgso_bench.sh index dc932fd65363450c093e491d690da5366ad84f7d..640bc43452faa9cd6ac2d18b16817ac67bd353a9 100755 --- a/tools/testing/selftests/net/udpgso_bench.sh +++ b/tools/testing/selftests/net/udpgso_bench.sh @@ -7,6 +7,7 @@ readonly GREEN='\033[0;92m' readonly YELLOW='\033[0;33m' readonly RED='\033[0;31m' readonly NC='\033[0m' # No Color +readonly TESTPORT=8000 readonly KSFT_PASS=0 readonly KSFT_FAIL=1 @@ -56,11 +57,26 @@ trap wake_children EXIT run_one() { local -r args=$@ + local nr_socks=0 + local i=0 + local -r timeout=10 + + ./udpgso_bench_rx -p "$TESTPORT" & + ./udpgso_bench_rx -p "$TESTPORT" -t & + + # Wait for the above test program to get ready to receive connections. + while [ "$i" -lt "$timeout" ]; do + nr_socks="$(ss -lnHi | grep -c "\*:${TESTPORT}")" + [ "$nr_socks" -eq 2 ] && break + i=$((i + 1)) + sleep 1 + done + if [ "$nr_socks" -ne 2 ]; then + echo "timed out while waiting for udpgso_bench_rx" + exit 1 + fi - ./udpgso_bench_rx & - ./udpgso_bench_rx -t & - - ./udpgso_bench_tx ${args} + ./udpgso_bench_tx -p "$TESTPORT" ${args} } run_in_netns() { diff --git a/tools/testing/selftests/net/udpgso_bench_rx.c b/tools/testing/selftests/net/udpgso_bench_rx.c index 6a193425c367fccdbb18bcc1269f8f4b240acdce..f35a924d4a3030780447f2cc137f6ff373ed693c 100644 --- a/tools/testing/selftests/net/udpgso_bench_rx.c +++ b/tools/testing/selftests/net/udpgso_bench_rx.c @@ -214,11 +214,10 @@ static void do_verify_udp(const char *data, int len) static int recv_msg(int fd, char *buf, int len, int *gso_size) { - char control[CMSG_SPACE(sizeof(uint16_t))] = {0}; + char control[CMSG_SPACE(sizeof(int))] = {0}; struct msghdr msg = {0}; struct iovec iov = {0}; struct cmsghdr *cmsg; - uint16_t *gsosizeptr; int ret; iov.iov_base = buf; @@ -237,8 +236,7 @@ static int recv_msg(int fd, char *buf, int len, int *gso_size) cmsg = CMSG_NXTHDR(&msg, cmsg)) { if (cmsg->cmsg_level == SOL_UDP && cmsg->cmsg_type == UDP_GRO) { - gsosizeptr = (uint16_t *) CMSG_DATA(cmsg); - *gso_size = *gsosizeptr; + *gso_size = *(int *)CMSG_DATA(cmsg); break; } } @@ -250,7 +248,7 @@ static int recv_msg(int fd, char *buf, int len, int *gso_size) static void do_flush_udp(int fd) { static char rbuf[ETH_MAX_MTU]; - int ret, len, gso_size, budget = 256; + int ret, len, gso_size = 0, budget = 256; len = cfg_read_all ? sizeof(rbuf) : 0; while (budget--) { @@ -336,6 +334,8 @@ static void parse_opts(int argc, char **argv) cfg_verify = true; cfg_read_all = true; break; + default: + exit(1); } } diff --git a/tools/testing/selftests/net/udpgso_bench_tx.c b/tools/testing/selftests/net/udpgso_bench_tx.c index f1fdaa270291331e70fb2456003bdd44056b93d5..477392715a9ad52a2edb76d95e4b332ba953f06c 100644 --- a/tools/testing/selftests/net/udpgso_bench_tx.c +++ b/tools/testing/selftests/net/udpgso_bench_tx.c @@ -62,6 +62,7 @@ static int cfg_payload_len = (1472 * 42); static int cfg_port = 8000; static int cfg_runtime_ms = -1; static bool cfg_poll; +static int cfg_poll_loop_timeout_ms = 2000; static bool cfg_segment; static bool cfg_sendmmsg; static bool cfg_tcp; @@ -235,16 +236,17 @@ static void flush_errqueue_recv(int fd) } } -static void flush_errqueue(int fd, const bool do_poll) +static void flush_errqueue(int fd, const bool do_poll, + unsigned long poll_timeout, const bool poll_err) { if (do_poll) { struct pollfd fds = {0}; int ret; fds.fd = fd; - ret = poll(&fds, 1, 500); + ret = poll(&fds, 1, poll_timeout); if (ret == 0) { - if (cfg_verbose) + if ((cfg_verbose) && (poll_err)) fprintf(stderr, "poll timeout\n"); } else if (ret < 0) { error(1, errno, "poll"); @@ -254,6 +256,20 @@ static void flush_errqueue(int fd, const bool do_poll) flush_errqueue_recv(fd); } +static void flush_errqueue_retry(int fd, unsigned long num_sends) +{ + unsigned long tnow, tstop; + bool first_try = true; + + tnow = gettimeofday_ms(); + tstop = tnow + cfg_poll_loop_timeout_ms; + do { + flush_errqueue(fd, true, tstop - tnow, first_try); + first_try = false; + tnow = gettimeofday_ms(); + } while ((stat_zcopies != num_sends) && (tnow < tstop)); +} + static int send_tcp(int fd, char *data) { int ret, done = 0, count = 0; @@ -413,7 +429,8 @@ static int send_udp_segment(int fd, char *data) static void usage(const char *filepath) { - error(1, 0, "Usage: %s [-46acmHPtTuvz] [-C cpu] [-D dst ip] [-l secs] [-M messagenr] [-p port] [-s sendsize] [-S gsosize]", + error(1, 0, "Usage: %s [-46acmHPtTuvz] [-C cpu] [-D dst ip] [-l secs] " + "[-L secs] [-M messagenr] [-p port] [-s sendsize] [-S gsosize]", filepath); } @@ -423,7 +440,7 @@ static void parse_opts(int argc, char **argv) int max_len, hdrlen; int c; - while ((c = getopt(argc, argv, "46acC:D:Hl:mM:p:s:PS:tTuvz")) != -1) { + while ((c = getopt(argc, argv, "46acC:D:Hl:L:mM:p:s:PS:tTuvz")) != -1) { switch (c) { case '4': if (cfg_family != PF_UNSPEC) @@ -452,6 +469,9 @@ static void parse_opts(int argc, char **argv) case 'l': cfg_runtime_ms = strtoul(optarg, NULL, 10) * 1000; break; + case 'L': + cfg_poll_loop_timeout_ms = strtoul(optarg, NULL, 10) * 1000; + break; case 'm': cfg_sendmmsg = true; break; @@ -490,6 +510,8 @@ static void parse_opts(int argc, char **argv) case 'z': cfg_zerocopy = true; break; + default: + exit(1); } } @@ -677,7 +699,7 @@ int main(int argc, char **argv) num_sends += send_udp(fd, buf[i]); num_msgs++; if ((cfg_zerocopy && ((num_msgs & 0xF) == 0)) || cfg_tx_tstamp) - flush_errqueue(fd, cfg_poll); + flush_errqueue(fd, cfg_poll, 500, true); if (cfg_msg_nr && num_msgs >= cfg_msg_nr) break; @@ -696,7 +718,7 @@ int main(int argc, char **argv) } while (!interrupted && (cfg_runtime_ms == -1 || tnow < tstop)); if (cfg_zerocopy || cfg_tx_tstamp) - flush_errqueue(fd, true); + flush_errqueue_retry(fd, num_sends); if (close(fd)) error(1, errno, "close"); diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh index 4e15e81673104b417746932ef649359ad670858a..67697d8ea59a529971ca35dd224b42c940a737a7 100755 --- a/tools/testing/selftests/netfilter/nft_nat.sh +++ b/tools/testing/selftests/netfilter/nft_nat.sh @@ -404,6 +404,8 @@ EOF echo SERVER-$family | ip netns exec "$ns1" timeout 5 socat -u STDIN TCP-LISTEN:2000 & sc_s=$! + sleep 1 + result=$(ip netns exec "$ns0" timeout 1 socat TCP:$daddr:2000 STDOUT) if [ "$result" = "SERVER-inet" ];then diff --git a/tools/virtio/linux/bug.h b/tools/virtio/linux/bug.h index b14c2c3b6b85782d67c74ec9d01aa83e1207a84e..74aef964f5099eb52aa0cb5bec4321d6dbffdda9 100644 --- a/tools/virtio/linux/bug.h +++ b/tools/virtio/linux/bug.h @@ -1,11 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef BUG_H -#define BUG_H +#ifndef _LINUX_BUG_H +#define _LINUX_BUG_H #define BUG_ON(__BUG_ON_cond) assert(!(__BUG_ON_cond)) -#define BUILD_BUG_ON(x) - #define BUG() abort() -#endif /* BUG_H */ +#endif /* _LINUX_BUG_H */ diff --git a/tools/virtio/linux/build_bug.h b/tools/virtio/linux/build_bug.h new file mode 100644 index 0000000000000000000000000000000000000000..cdbb75e28a604e05368544109033518d1f4ccfbf --- /dev/null +++ b/tools/virtio/linux/build_bug.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BUILD_BUG_H +#define _LINUX_BUILD_BUG_H + +#define BUILD_BUG_ON(x) + +#endif /* _LINUX_BUILD_BUG_H */ diff --git a/tools/virtio/linux/cpumask.h b/tools/virtio/linux/cpumask.h new file mode 100644 index 0000000000000000000000000000000000000000..307da69d6b26c0228ecdcf48aa59f6f4748854b8 --- /dev/null +++ b/tools/virtio/linux/cpumask.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CPUMASK_H +#define _LINUX_CPUMASK_H + +#include + +#endif /* _LINUX_CPUMASK_H */ diff --git a/tools/virtio/linux/gfp.h b/tools/virtio/linux/gfp.h new file mode 100644 index 0000000000000000000000000000000000000000..43d146f236f1469b9b2dd5ac7e8009a809532f18 --- /dev/null +++ b/tools/virtio/linux/gfp.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_GFP_H +#define __LINUX_GFP_H + +#include + +#endif diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h index 315e85cabedab1a3145dd7ad77673e5eeea7da98..063ccc8975647d17cff5603ee9c404b687ff8f5a 100644 --- a/tools/virtio/linux/kernel.h +++ b/tools/virtio/linux/kernel.h @@ -10,6 +10,7 @@ #include #include +#include #include #include #include diff --git a/tools/virtio/linux/kmsan.h b/tools/virtio/linux/kmsan.h new file mode 100644 index 0000000000000000000000000000000000000000..272b5aa285d5a14053ba1f36063f4c1b8d618daf --- /dev/null +++ b/tools/virtio/linux/kmsan.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_KMSAN_H +#define _LINUX_KMSAN_H + +#include + +inline void kmsan_handle_dma(struct page *page, size_t offset, size_t size, + enum dma_data_direction dir) +{ +} + +#endif /* _LINUX_KMSAN_H */ diff --git a/tools/virtio/linux/scatterlist.h b/tools/virtio/linux/scatterlist.h index 369ee308b6686ca4a106581b91f8d382e45c79e8..74d9e1825748e971568f1b6abafd211b03eefc02 100644 --- a/tools/virtio/linux/scatterlist.h +++ b/tools/virtio/linux/scatterlist.h @@ -2,6 +2,7 @@ #ifndef SCATTERLIST_H #define SCATTERLIST_H #include +#include struct scatterlist { unsigned long page_link; diff --git a/tools/virtio/linux/topology.h b/tools/virtio/linux/topology.h new file mode 100644 index 0000000000000000000000000000000000000000..910794afb993abb349935b2449d41aa0ae781d4c --- /dev/null +++ b/tools/virtio/linux/topology.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_TOPOLOGY_H +#define _LINUX_TOPOLOGY_H + +#include + +#endif /* _LINUX_TOPOLOGY_H */ diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c index d5bebb37238c0bc9908d0d1f82f954310fa696ea..ce6f3b916ef9d8f9367f60b5e3ed5a75414f56d6 100644 --- a/virt/kvm/coalesced_mmio.c +++ b/virt/kvm/coalesced_mmio.c @@ -187,15 +187,17 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, r = kvm_io_bus_unregister_dev(kvm, zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev); + kvm_iodevice_destructor(&dev->dev); + /* * On failure, unregister destroys all devices on the * bus _except_ the target device, i.e. coalesced_zones - * has been modified. No need to restart the walk as - * there aren't any zones left. + * has been modified. Bail after destroying the target + * device, there's no need to restart the walk as there + * aren't any zones left. */ if (r) break; - kvm_iodevice_destructor(&dev->dev); } }