diff --git a/Documentation/admin-guide/hw-vuln/srso.rst b/Documentation/admin-guide/hw-vuln/srso.rst index 2f923c805802ffc7a0a2915749274af1be9af9bd..f79cb11b080f67733c4a4d08699a991f95aeb015 100644 --- a/Documentation/admin-guide/hw-vuln/srso.rst +++ b/Documentation/admin-guide/hw-vuln/srso.rst @@ -124,8 +124,8 @@ sequence. To ensure the safety of this mitigation, the kernel must ensure that the safe return sequence is itself free from attacker interference. In Zen3 and Zen4, this is accomplished by creating a BTB alias between the -untraining function srso_untrain_ret_alias() and the safe return -function srso_safe_ret_alias() which results in evicting a potentially +untraining function srso_alias_untrain_ret() and the safe return +function srso_alias_safe_ret() which results in evicting a potentially poisoned BTB entry and using that safe one for all function returns. In older Zen1 and Zen2, this is accomplished using a reinterpretation diff --git a/Documentation/devicetree/bindings/iio/addac/adi,ad74413r.yaml b/Documentation/devicetree/bindings/iio/addac/adi,ad74413r.yaml new file mode 100644 index 0000000000000000000000000000000000000000..baa65a521bad5b2cf533061ee331e023eb62bdd5 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/addac/adi,ad74413r.yaml @@ -0,0 +1,158 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/iio/addac/adi,ad74413r.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Analog Devices AD74412R/AD74413R device + +maintainers: + - Cosmin Tanislav + +description: | + The AD74412R and AD74413R are quad-channel software configurable input/output + solutions for building and process control applications. They contain + functionality for analog output, analog input, digital input, resistance + temperature detector, and thermocouple measurements integrated + into a single chip solution with an SPI interface. + The devices feature a 16-bit ADC and four configurable 13-bit DACs to provide + four configurable input/output channels and a suite of diagnostic functions. + The AD74413R differentiates itself from the AD74412R by being HART-compatible. + https://www.analog.com/en/products/ad74412r.html + https://www.analog.com/en/products/ad74413r.html + +properties: + compatible: + enum: + - adi,ad74412r + - adi,ad74413r + + reg: + maxItems: 1 + + '#address-cells': + const: 1 + + '#size-cells': + const: 0 + + spi-max-frequency: + maximum: 1000000 + + spi-cpol: true + + interrupts: + maxItems: 1 + + refin-supply: true + + shunt-resistor-micro-ohms: + description: + Shunt (sense) resistor value in micro-Ohms. + default: 100000000 + +required: + - compatible + - reg + - spi-max-frequency + - spi-cpol + - refin-supply + +additionalProperties: false + +patternProperties: + "^channel@[0-3]$": + type: object + description: Represents the external channels which are connected to the device. + + properties: + reg: + description: | + The channel number. It can have up to 4 channels numbered from 0 to 3. + minimum: 0 + maximum: 3 + + adi,ch-func: + $ref: /schemas/types.yaml#/definitions/uint32 + description: | + Channel function. + HART functions are not supported on AD74412R. + 0 - CH_FUNC_HIGH_IMPEDANCE + 1 - CH_FUNC_VOLTAGE_OUTPUT + 2 - CH_FUNC_CURRENT_OUTPUT + 3 - CH_FUNC_VOLTAGE_INPUT + 4 - CH_FUNC_CURRENT_INPUT_EXT_POWER + 5 - CH_FUNC_CURRENT_INPUT_LOOP_POWER + 6 - CH_FUNC_RESISTANCE_INPUT + 7 - CH_FUNC_DIGITAL_INPUT_LOGIC + 8 - CH_FUNC_DIGITAL_INPUT_LOOP_POWER + 9 - CH_FUNC_CURRENT_INPUT_EXT_POWER_HART + 10 - CH_FUNC_CURRENT_INPUT_LOOP_POWER_HART + minimum: 0 + maximum: 10 + default: 0 + + adi,gpo-comparator: + type: boolean + description: | + Whether to configure GPO as a comparator or not. + When not configured as a comparator, the GPO will be treated as an + output-only GPIO. + + required: + - reg + +examples: + - | + #include + #include + #include + + spi { + #address-cells = <1>; + #size-cells = <0>; + + cs-gpios = <&gpio 17 GPIO_ACTIVE_LOW>; + status = "okay"; + + ad74413r@0 { + compatible = "adi,ad74413r"; + reg = <0>; + spi-max-frequency = <1000000>; + spi-cpol; + + #address-cells = <1>; + #size-cells = <0>; + + interrupt-parent = <&gpio>; + interrupts = <26 IRQ_TYPE_EDGE_FALLING>; + + refin-supply = <&ad74413r_refin>; + + channel@0 { + reg = <0>; + + adi,ch-func = ; + }; + + channel@1 { + reg = <1>; + + adi,ch-func = ; + }; + + channel@2 { + reg = <2>; + + adi,ch-func = ; + adi,gpo-comparator; + }; + + channel@3 { + reg = <3>; + + adi,ch-func = ; + }; + }; + }; +... diff --git a/Makefile b/Makefile index 6343e149148dafffd1623cdf52bdca2280193eaf..b05eb6391030bba7521c39c4e9f4745d0579015a 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 5 PATCHLEVEL = 10 -SUBLEVEL = 190 +SUBLEVEL = 192 EXTRAVERSION = NAME = Dare mighty things diff --git a/README.OpenSource b/README.OpenSource index 67edd0a7fdb7b7f0c92f8fc3b07f408f41a2f6f5..d87cf91cfbf7466f7ad4bd49fd3562a3376a3c3e 100644 --- a/README.OpenSource +++ b/README.OpenSource @@ -3,7 +3,7 @@ "Name": "linux-5.10", "License": "GPL-2.0+", "License File": "COPYING", - "Version Number": "5.10.190", + "Version Number": "5.10.192", "Owner": "liuyu82@huawei.com", "Upstream URL": "https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/log/?h=linux-5.10.y", "Description": "linux kernel 5.10" diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c index 7eea28d6f6c880a53ab7d9d98f9147db4f1b0a35..490a1611129680d54e227da059856bf276ff19b5 100644 --- a/arch/alpha/kernel/setup.c +++ b/arch/alpha/kernel/setup.c @@ -394,8 +394,7 @@ setup_memory(void *kernel_end) extern void setup_memory(void *); #endif /* !CONFIG_DISCONTIGMEM */ -int __init -page_is_ram(unsigned long pfn) +int page_is_ram(unsigned long pfn) { struct memclust_struct * cluster; struct memdesc_struct * memdesc; diff --git a/arch/arm/boot/dts/imx6dl-prtrvt.dts b/arch/arm/boot/dts/imx6dl-prtrvt.dts index 5ac84445e9cc1893edd2e3525b335a5be930bde9..90e01de8c2c1582889b4651510dd2fb689cef738 100644 --- a/arch/arm/boot/dts/imx6dl-prtrvt.dts +++ b/arch/arm/boot/dts/imx6dl-prtrvt.dts @@ -126,6 +126,10 @@ &usbh1 { status = "disabled"; }; +&usbotg { + disable-over-current; +}; + &vpu { status = "disabled"; }; diff --git a/arch/arm/boot/dts/imx6qdl-prti6q.dtsi b/arch/arm/boot/dts/imx6qdl-prti6q.dtsi index 19578f660b092b6c07df90f27ba7797125d74c5c..70dfa07a16981e4aa0a040917794adf337e28f01 100644 --- a/arch/arm/boot/dts/imx6qdl-prti6q.dtsi +++ b/arch/arm/boot/dts/imx6qdl-prti6q.dtsi @@ -69,6 +69,7 @@ &usbh1 { vbus-supply = <®_usb_h1_vbus>; phy_type = "utmi"; dr_mode = "host"; + disable-over-current; status = "okay"; }; @@ -78,10 +79,18 @@ &usbotg { pinctrl-0 = <&pinctrl_usbotg>; phy_type = "utmi"; dr_mode = "host"; - disable-over-current; + over-current-active-low; status = "okay"; }; +&usbphynop1 { + status = "disabled"; +}; + +&usbphynop2 { + status = "disabled"; +}; + &usdhc1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc1>; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi index 64df6433911944a74daf878f9780c8447dc18f19..2f52b91b721529228e0948321d127b238dbe7e62 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi @@ -31,32 +31,56 @@ sdio_pwrseq: sdio-pwrseq { reset-gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>; }; - vcc12v_dcin: dc-12v { + sound { + compatible = "audio-graph-card"; + label = "Analog"; + dais = <&i2s0_p0>; + }; + + sound-dit { + compatible = "audio-graph-card"; + label = "SPDIF"; + dais = <&spdif_p0>; + }; + + spdif-dit { + compatible = "linux,spdif-dit"; + #sound-dai-cells = <0>; + + port { + dit_p0_0: endpoint { + remote-endpoint = <&spdif_p0_0>; + }; + }; + }; + + vbus_typec: vbus-typec-regulator { compatible = "regulator-fixed"; - regulator-name = "vcc12v_dcin"; + enable-active-high; + gpio = <&gpio1 RK_PA3 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&vcc5v0_typec_en>; + regulator-name = "vbus_typec"; regulator-always-on; - regulator-boot-on; - regulator-min-microvolt = <12000000>; - regulator-max-microvolt = <12000000>; + vin-supply = <&vcc5v0_sys>; }; - vcc5v0_sys: vcc-sys { + vcc12v_dcin: dc-12v { compatible = "regulator-fixed"; - regulator-name = "vcc5v0_sys"; + regulator-name = "vcc12v_dcin"; regulator-always-on; regulator-boot-on; - regulator-min-microvolt = <5000000>; - regulator-max-microvolt = <5000000>; - vin-supply = <&vcc12v_dcin>; + regulator-min-microvolt = <12000000>; + regulator-max-microvolt = <12000000>; }; - vcc_0v9: vcc-0v9 { + vcc3v3_lan: vcc3v3-lan-regulator { compatible = "regulator-fixed"; - regulator-name = "vcc_0v9"; + regulator-name = "vcc3v3_lan"; regulator-always-on; regulator-boot-on; - regulator-min-microvolt = <900000>; - regulator-max-microvolt = <900000>; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; vin-supply = <&vcc3v3_sys>; }; @@ -93,28 +117,24 @@ vcc5v0_host: vcc5v0-host-regulator { vin-supply = <&vcc5v0_sys>; }; - vcc5v0_typec: vcc5v0-typec-regulator { + vcc5v0_sys: vcc-sys { compatible = "regulator-fixed"; - enable-active-high; - gpio = <&gpio1 RK_PA3 GPIO_ACTIVE_HIGH>; - pinctrl-names = "default"; - pinctrl-0 = <&vcc5v0_typec_en>; - regulator-name = "vcc5v0_typec"; + regulator-name = "vcc5v0_sys"; regulator-always-on; - vin-supply = <&vcc5v0_sys>; + regulator-boot-on; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&vcc12v_dcin>; }; - vcc_lan: vcc3v3-phy-regulator { + vcc_0v9: vcc-0v9 { compatible = "regulator-fixed"; - regulator-name = "vcc_lan"; + regulator-name = "vcc_0v9"; regulator-always-on; regulator-boot-on; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - - regulator-state-mem { - regulator-off-in-suspend; - }; + regulator-min-microvolt = <900000>; + regulator-max-microvolt = <900000>; + vin-supply = <&vcc3v3_sys>; }; vdd_log: vdd-log { @@ -161,7 +181,7 @@ &gmac { assigned-clocks = <&cru SCLK_RMII_SRC>; assigned-clock-parents = <&clkin_gmac>; clock_in_out = "input"; - phy-supply = <&vcc_lan>; + phy-supply = <&vcc3v3_lan>; phy-mode = "rgmii"; pinctrl-names = "default"; pinctrl-0 = <&rgmii_pins>; @@ -266,8 +286,8 @@ regulator-state-mem { }; }; - vcc1v8_codec: LDO_REG1 { - regulator-name = "vcc1v8_codec"; + vcca1v8_codec: LDO_REG1 { + regulator-name = "vcca1v8_codec"; regulator-always-on; regulator-boot-on; regulator-min-microvolt = <1800000>; @@ -277,8 +297,8 @@ regulator-state-mem { }; }; - vcc1v8_hdmi: LDO_REG2 { - regulator-name = "vcc1v8_hdmi"; + vcca1v8_hdmi: LDO_REG2 { + regulator-name = "vcca1v8_hdmi"; regulator-always-on; regulator-boot-on; regulator-min-microvolt = <1800000>; @@ -335,8 +355,8 @@ regulator-state-mem { }; }; - vcc0v9_hdmi: LDO_REG7 { - regulator-name = "vcc0v9_hdmi"; + vcca0v9_hdmi: LDO_REG7 { + regulator-name = "vcca0v9_hdmi"; regulator-always-on; regulator-boot-on; regulator-min-microvolt = <900000>; @@ -362,8 +382,6 @@ vcc_cam: SWITCH_REG1 { regulator-name = "vcc_cam"; regulator-always-on; regulator-boot-on; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; regulator-state-mem { regulator-off-in-suspend; }; @@ -373,8 +391,6 @@ vcc_mipi: SWITCH_REG2 { regulator-name = "vcc_mipi"; regulator-always-on; regulator-boot-on; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; regulator-state-mem { regulator-off-in-suspend; }; @@ -425,6 +441,20 @@ &i2c1 { i2c-scl-rising-time-ns = <300>; i2c-scl-falling-time-ns = <15>; status = "okay"; + + es8316: codec@11 { + compatible = "everest,es8316"; + reg = <0x11>; + clocks = <&cru SCLK_I2S_8CH_OUT>; + clock-names = "mclk"; + #sound-dai-cells = <0>; + + port { + es8316_p0_0: endpoint { + remote-endpoint = <&i2s0_p0_0>; + }; + }; + }; }; &i2c3 { @@ -443,6 +473,14 @@ &i2s0 { rockchip,playback-channels = <8>; rockchip,capture-channels = <8>; status = "okay"; + + i2s0_p0: port { + i2s0_p0_0: endpoint { + dai-format = "i2s"; + mclk-fs = <256>; + remote-endpoint = <&es8316_p0_0>; + }; + }; }; &i2s1 { @@ -455,21 +493,10 @@ &i2s2 { }; &io_domains { - status = "okay"; - + audio-supply = <&vcca1v8_codec>; bt656-supply = <&vcc_3v0>; - audio-supply = <&vcc1v8_codec>; - sdmmc-supply = <&vcc_sdio>; gpio1830-supply = <&vcc_3v0>; -}; - -&pmu_io_domains { - status = "okay"; - - pmu1830-supply = <&vcc_3v0>; -}; - -&pcie_phy { + sdmmc-supply = <&vcc_sdio>; status = "okay"; }; @@ -485,6 +512,10 @@ &pcie0 { status = "okay"; }; +&pcie_phy { + status = "okay"; +}; + &pinctrl { bt { bt_enable_h: bt-enable-h { @@ -506,6 +537,20 @@ pcie_pwr_en: pcie-pwr-en { }; }; + pmic { + pmic_int_l: pmic-int-l { + rockchip,pins = <1 RK_PC5 RK_FUNC_GPIO &pcfg_pull_up>; + }; + + vsel1_pin: vsel1-pin { + rockchip,pins = <1 RK_PC1 RK_FUNC_GPIO &pcfg_pull_down>; + }; + + vsel2_pin: vsel2-pin { + rockchip,pins = <1 RK_PB6 RK_FUNC_GPIO &pcfg_pull_down>; + }; + }; + sdio0 { sdio0_bus4: sdio0-bus4 { rockchip,pins = <2 RK_PC4 1 &pcfg_pull_up_20ma>, @@ -523,20 +568,6 @@ sdio0_clk: sdio0-clk { }; }; - pmic { - pmic_int_l: pmic-int-l { - rockchip,pins = <1 RK_PC5 RK_FUNC_GPIO &pcfg_pull_up>; - }; - - vsel1_pin: vsel1-pin { - rockchip,pins = <1 RK_PC1 RK_FUNC_GPIO &pcfg_pull_down>; - }; - - vsel2_pin: vsel2-pin { - rockchip,pins = <1 RK_PB6 RK_FUNC_GPIO &pcfg_pull_down>; - }; - }; - usb-typec { vcc5v0_typec_en: vcc5v0-typec-en { rockchip,pins = <1 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>; @@ -560,6 +591,11 @@ wifi_host_wake_l: wifi-host-wake-l { }; }; +&pmu_io_domains { + pmu1830-supply = <&vcc_3v0>; + status = "okay"; +}; + &pwm2 { status = "okay"; }; @@ -570,6 +606,14 @@ &saradc { vref-supply = <&vcc_1v8>; }; +&sdhci { + max-frequency = <150000000>; + bus-width = <8>; + mmc-hs200-1_8v; + non-removable; + status = "okay"; +}; + &sdio0 { #address-cells = <1>; #size-cells = <0>; @@ -597,12 +641,13 @@ &sdmmc { status = "okay"; }; -&sdhci { - bus-width = <8>; - mmc-hs400-1_8v; - mmc-hs400-enhanced-strobe; - non-removable; - status = "okay"; +&spdif { + + spdif_p0: port { + spdif_p0_0: endpoint { + remote-endpoint = <&dit_p0_0>; + }; + }; }; &tcphy0 { @@ -677,13 +722,13 @@ &usbdrd3_0 { status = "okay"; }; -&usbdrd_dwc3_0 { +&usbdrd3_1 { status = "okay"; - dr_mode = "otg"; }; -&usbdrd3_1 { +&usbdrd_dwc3_0 { status = "okay"; + dr_mode = "host"; }; &usbdrd_dwc3_1 { diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index 8294eaa6f902deaf957514ca12ea9cc90598fd0c..dd03bc905841f9a1fbe6043247a459fe3d63ea85 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -126,7 +126,24 @@ #define cpu_has_tx39_cache __opt(MIPS_CPU_TX39_CACHE) #endif #ifndef cpu_has_octeon_cache -#define cpu_has_octeon_cache 0 +#define cpu_has_octeon_cache \ +({ \ + int __res; \ + \ + switch (boot_cpu_type()) { \ + case CPU_CAVIUM_OCTEON: \ + case CPU_CAVIUM_OCTEON_PLUS: \ + case CPU_CAVIUM_OCTEON2: \ + case CPU_CAVIUM_OCTEON3: \ + __res = 1; \ + break; \ + \ + default: \ + __res = 0; \ + } \ + \ + __res; \ +}) #endif /* Don't override `cpu_has_fpu' to 1 or the "nofpu" option won't work. */ #ifndef cpu_has_fpu @@ -353,7 +370,7 @@ ({ \ int __res; \ \ - switch (current_cpu_type()) { \ + switch (boot_cpu_type()) { \ case CPU_M14KC: \ case CPU_74K: \ case CPU_1074K: \ diff --git a/arch/mips/include/asm/dec/prom.h b/arch/mips/include/asm/dec/prom.h index 1e1247add1cf802b9abe3a5216b3a785f83e5988..908e96e3a3117e6d738e86e26596077778963f5a 100644 --- a/arch/mips/include/asm/dec/prom.h +++ b/arch/mips/include/asm/dec/prom.h @@ -70,7 +70,7 @@ static inline bool prom_is_rex(u32 magic) */ typedef struct { int pagesize; - unsigned char bitmap[0]; + unsigned char bitmap[]; } memmap; diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c index a99179d835382f1c1d6a3ea408cb593ef93f4a39..56bd0aa30f93067e65d1fd3a536852f5c0f3b19a 100644 --- a/arch/powerpc/kernel/rtas_flash.c +++ b/arch/powerpc/kernel/rtas_flash.c @@ -710,9 +710,9 @@ static int __init rtas_flash_init(void) if (!rtas_validate_flash_data.buf) return -ENOMEM; - flash_block_cache = kmem_cache_create("rtas_flash_cache", - RTAS_BLK_SIZE, RTAS_BLK_SIZE, 0, - NULL); + flash_block_cache = kmem_cache_create_usercopy("rtas_flash_cache", + RTAS_BLK_SIZE, RTAS_BLK_SIZE, + 0, 0, RTAS_BLK_SIZE, NULL); if (!flash_block_cache) { printk(KERN_ERR "%s: failed to create block cache\n", __func__); diff --git a/arch/powerpc/mm/kasan/Makefile b/arch/powerpc/mm/kasan/Makefile index bb1a5408b86b2cb3ca1af86c65c7f4355a0adbc6..8636b17c6a20fc3c659103d40a8fdb7bbc4cc92e 100644 --- a/arch/powerpc/mm/kasan/Makefile +++ b/arch/powerpc/mm/kasan/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 KASAN_SANITIZE := n +KCOV_INSTRUMENT := n obj-$(CONFIG_PPC32) += kasan_init_32.o obj-$(CONFIG_PPC_8xx) += 8xx.o diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h index 4a382fb6a9ef88b4e2a7cc3efe2464837d3ec1c6..5443851d3aa6089e592ee60b0fb30120c1e1ac4d 100644 --- a/arch/x86/include/asm/entry-common.h +++ b/arch/x86/include/asm/entry-common.h @@ -78,6 +78,7 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, static __always_inline void arch_exit_to_user_mode(void) { mds_user_clear_cpu_buffers(); + amd_clear_divider(); } #define arch_exit_to_user_mode arch_exit_to_user_mode diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 99fbce2c1c7c1765fbfda5ed1ac38363a6a56f36..7b4782249a9250737c8abc517677fb5e56523844 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -156,9 +156,9 @@ .endm #ifdef CONFIG_CPU_UNRET_ENTRY -#define CALL_ZEN_UNTRAIN_RET "call zen_untrain_ret" +#define CALL_UNTRAIN_RET "call entry_untrain_ret" #else -#define CALL_ZEN_UNTRAIN_RET "" +#define CALL_UNTRAIN_RET "" #endif /* @@ -166,7 +166,7 @@ * return thunk isn't mapped into the userspace tables (then again, AMD * typically has NO_MELTDOWN). * - * While zen_untrain_ret() doesn't clobber anything but requires stack, + * While retbleed_untrain_ret() doesn't clobber anything but requires stack, * entry_ibpb() will clobber AX, CX, DX. * * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point @@ -177,14 +177,9 @@ defined(CONFIG_CPU_SRSO) ANNOTATE_UNRET_END ALTERNATIVE_2 "", \ - CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \ + CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \ "call entry_ibpb", X86_FEATURE_ENTRY_IBPB #endif - -#ifdef CONFIG_CPU_SRSO - ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \ - "call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS -#endif .endm #else /* __ASSEMBLY__ */ @@ -195,10 +190,21 @@ _ASM_PTR " 999b\n\t" \ ".popsection\n\t" +#ifdef CONFIG_RETHUNK extern void __x86_return_thunk(void); -extern void zen_untrain_ret(void); +#else +static inline void __x86_return_thunk(void) {} +#endif + +extern void retbleed_return_thunk(void); +extern void srso_return_thunk(void); +extern void srso_alias_return_thunk(void); + +extern void retbleed_untrain_ret(void); extern void srso_untrain_ret(void); -extern void srso_untrain_ret_alias(void); +extern void srso_alias_untrain_ret(void); + +extern void entry_untrain_ret(void); extern void entry_ibpb(void); #ifdef CONFIG_RETPOLINE diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 7f351093cd947810b9c4eaabc022923d001b0fc4..2bc1c78a7cc574cab873f48b812c4e1f54a89de7 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -1332,3 +1332,4 @@ void noinstr amd_clear_divider(void) asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0) :: "a" (0), "d" (0), "r" (1)); } +EXPORT_SYMBOL_GPL(amd_clear_divider); diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index d31639e3ce282a315c72223ef6bb45e817784a61..4d11a50089b27d74a8ea9ec5ed58fef854f39dfd 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -61,6 +61,8 @@ EXPORT_SYMBOL_GPL(x86_pred_cmd); static DEFINE_MUTEX(spec_ctrl_mutex); +void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk; + /* Update SPEC_CTRL MSR and its cached copy unconditionally */ static void update_spec_ctrl(u64 val) { @@ -155,8 +157,13 @@ void __init cpu_select_mitigations(void) l1tf_select_mitigation(); md_clear_select_mitigation(); srbds_select_mitigation(); - gds_select_mitigation(); + + /* + * srso_select_mitigation() depends and must run after + * retbleed_select_mitigation(). + */ srso_select_mitigation(); + gds_select_mitigation(); } /* @@ -976,6 +983,9 @@ static void __init retbleed_select_mitigation(void) setup_force_cpu_cap(X86_FEATURE_RETHUNK); setup_force_cpu_cap(X86_FEATURE_UNRET); + if (IS_ENABLED(CONFIG_RETHUNK)) + x86_return_thunk = retbleed_return_thunk; + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) pr_err(RETBLEED_UNTRAIN_MSG); @@ -2318,9 +2328,10 @@ static void __init srso_select_mitigation(void) * Zen1/2 with SMT off aren't vulnerable after the right * IBPB microcode has been applied. */ - if ((boot_cpu_data.x86 < 0x19) && - (!cpu_smt_possible() || (cpu_smt_control == CPU_SMT_DISABLED))) + if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) { setup_force_cpu_cap(X86_FEATURE_SRSO_NO); + return; + } } if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { @@ -2349,11 +2360,15 @@ static void __init srso_select_mitigation(void) * like ftrace, static_call, etc. */ setup_force_cpu_cap(X86_FEATURE_RETHUNK); + setup_force_cpu_cap(X86_FEATURE_UNRET); - if (boot_cpu_data.x86 == 0x19) + if (boot_cpu_data.x86 == 0x19) { setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS); - else + x86_return_thunk = srso_alias_return_thunk; + } else { setup_force_cpu_cap(X86_FEATURE_SRSO); + x86_return_thunk = srso_return_thunk; + } srso_mitigation = SRSO_MITIGATION_SAFE_RET; } else { pr_err("WARNING: kernel not compiled with CPU_SRSO.\n"); @@ -2602,6 +2617,9 @@ static ssize_t gds_show_state(char *buf) static ssize_t srso_show_state(char *buf) { + if (boot_cpu_has(X86_FEATURE_SRSO_NO)) + return sysfs_emit(buf, "Mitigation: SMT disabled\n"); + return sysfs_emit(buf, "%s%s\n", srso_strings[srso_mitigation], (cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode")); diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c index 2973b3fb0ec1ad863edf61df69c08d910a42a7b9..759b986b7f033f49a1e4130f08710cda8ceefd97 100644 --- a/arch/x86/kernel/static_call.c +++ b/arch/x86/kernel/static_call.c @@ -123,6 +123,19 @@ EXPORT_SYMBOL_GPL(arch_static_call_transform); */ bool __static_call_fixup(void *tramp, u8 op, void *dest) { + unsigned long addr = (unsigned long)tramp; + /* + * Not all .return_sites are a static_call trampoline (most are not). + * Check if the 3 bytes after the return are still kernel text, if not, + * then this definitely is not a trampoline and we need not worry + * further. + * + * This avoids the memcmp() below tripping over pagefaults etc.. + */ + if (((addr >> PAGE_SHIFT) != ((addr + 7) >> PAGE_SHIFT)) && + !kernel_text_address(addr + 7)) + return false; + if (memcmp(tramp+5, tramp_ud, 3)) { /* Not a trampoline site, not our problem. */ return false; diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 28f5cc0a9decbe98fbcf6654da7cf71dd8cc7c15..98838b784524e7710d091a367c902c89feee09b1 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -198,8 +198,6 @@ DEFINE_IDTENTRY(exc_divide_error) { do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE, FPE_INTDIV, error_get_trap_addr(regs)); - - amd_clear_divider(); } DEFINE_IDTENTRY(exc_overflow) diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 72ba175cb9d4cec32cb1ebe98095b0f6398054a1..f0d4500ae77ae262c28e654db7d8ab5180081bbe 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -134,18 +134,18 @@ SECTIONS KPROBES_TEXT ALIGN_ENTRY_TEXT_BEGIN #ifdef CONFIG_CPU_SRSO - *(.text.__x86.rethunk_untrain) + *(.text..__x86.rethunk_untrain) #endif ENTRY_TEXT #ifdef CONFIG_CPU_SRSO /* - * See the comment above srso_untrain_ret_alias()'s + * See the comment above srso_alias_untrain_ret()'s * definition. */ - . = srso_untrain_ret_alias | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20); - *(.text.__x86.rethunk_safe) + . = srso_alias_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20); + *(.text..__x86.rethunk_safe) #endif ALIGN_ENTRY_TEXT_END SOFTIRQENTRY_TEXT @@ -155,8 +155,8 @@ SECTIONS #ifdef CONFIG_RETPOLINE __indirect_thunk_start = .; - *(.text.__x86.indirect_thunk) - *(.text.__x86.return_thunk) + *(.text..__x86.indirect_thunk) + *(.text..__x86.return_thunk) __indirect_thunk_end = .; #endif } :text =0xcccc @@ -518,7 +518,7 @@ INIT_PER_CPU(irq_stack_backing_store); #endif #ifdef CONFIG_RETHUNK -. = ASSERT((__ret & 0x3f) == 0, "__ret not cacheline-aligned"); +. = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned"); . = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned"); #endif @@ -533,8 +533,8 @@ INIT_PER_CPU(irq_stack_backing_store); * Instead do: (A | B) - (A & B) in order to compute the XOR * of the two function addresses: */ -. = ASSERT(((ABSOLUTE(srso_untrain_ret_alias) | srso_safe_ret_alias) - - (ABSOLUTE(srso_untrain_ret_alias) & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)), +. = ASSERT(((ABSOLUTE(srso_alias_untrain_ret) | srso_alias_safe_ret) - + (ABSOLUTE(srso_alias_untrain_ret) & srso_alias_safe_ret)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)), "SRSO function pair won't alias"); #endif diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 8544bca6b33568e16ad4d6979418b57ef4b0e6aa..1616e39ddc3f16a270185b6972ec86a891f3fa0b 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3376,6 +3376,7 @@ static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva) static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) { + amd_clear_divider(); } static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu) diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index 5f7eed97487ece63fe9a3ef2f739a544a2672a00..6f5321b36dbb14cf21198fd55ae997479d213078 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -11,7 +11,7 @@ #include #include - .section .text.__x86.indirect_thunk + .section .text..__x86.indirect_thunk .macro RETPOLINE reg ANNOTATE_INTRA_FUNCTION_CALL @@ -75,74 +75,105 @@ SYM_CODE_END(__x86_indirect_thunk_array) #ifdef CONFIG_RETHUNK /* - * srso_untrain_ret_alias() and srso_safe_ret_alias() are placed at + * srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at * special addresses: * - * - srso_untrain_ret_alias() is 2M aligned - * - srso_safe_ret_alias() is also in the same 2M page but bits 2, 8, 14 + * - srso_alias_untrain_ret() is 2M aligned + * - srso_alias_safe_ret() is also in the same 2M page but bits 2, 8, 14 * and 20 in its virtual address are set (while those bits in the - * srso_untrain_ret_alias() function are cleared). + * srso_alias_untrain_ret() function are cleared). * * This guarantees that those two addresses will alias in the branch * target buffer of Zen3/4 generations, leading to any potential * poisoned entries at that BTB slot to get evicted. * - * As a result, srso_safe_ret_alias() becomes a safe return. + * As a result, srso_alias_safe_ret() becomes a safe return. */ #ifdef CONFIG_CPU_SRSO - .section .text.__x86.rethunk_untrain + .section .text..__x86.rethunk_untrain -SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) +SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) + UNWIND_HINT_FUNC ASM_NOP2 lfence - jmp __x86_return_thunk -SYM_FUNC_END(srso_untrain_ret_alias) -__EXPORT_THUNK(srso_untrain_ret_alias) + jmp srso_alias_return_thunk +SYM_FUNC_END(srso_alias_untrain_ret) +__EXPORT_THUNK(srso_alias_untrain_ret) - .section .text.__x86.rethunk_safe + .section .text..__x86.rethunk_safe +#else +/* dummy definition for alternatives */ +SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) + ANNOTATE_UNRET_SAFE + ret + int3 +SYM_FUNC_END(srso_alias_untrain_ret) #endif -/* Needs a definition for the __x86_return_thunk alternative below. */ -SYM_START(srso_safe_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) -#ifdef CONFIG_CPU_SRSO - add $8, %_ASM_SP +SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE) + lea 8(%_ASM_SP), %_ASM_SP UNWIND_HINT_FUNC -#endif ANNOTATE_UNRET_SAFE ret int3 -SYM_FUNC_END(srso_safe_ret_alias) +SYM_FUNC_END(srso_alias_safe_ret) - .section .text.__x86.return_thunk + .section .text..__x86.return_thunk + +SYM_CODE_START(srso_alias_return_thunk) + UNWIND_HINT_FUNC + ANNOTATE_NOENDBR + call srso_alias_safe_ret + ud2 +SYM_CODE_END(srso_alias_return_thunk) + +/* + * Some generic notes on the untraining sequences: + * + * They are interchangeable when it comes to flushing potentially wrong + * RET predictions from the BTB. + * + * The SRSO Zen1/2 (MOVABS) untraining sequence is longer than the + * Retbleed sequence because the return sequence done there + * (srso_safe_ret()) is longer and the return sequence must fully nest + * (end before) the untraining sequence. Therefore, the untraining + * sequence must fully overlap the return sequence. + * + * Regarding alignment - the instructions which need to be untrained, + * must all start at a cacheline boundary for Zen1/2 generations. That + * is, instruction sequences starting at srso_safe_ret() and + * the respective instruction sequences at retbleed_return_thunk() + * must start at a cacheline boundary. + */ /* * Safety details here pertain to the AMD Zen{1,2} microarchitecture: - * 1) The RET at __x86_return_thunk must be on a 64 byte boundary, for + * 1) The RET at retbleed_return_thunk must be on a 64 byte boundary, for * alignment within the BTB. - * 2) The instruction at zen_untrain_ret must contain, and not + * 2) The instruction at retbleed_untrain_ret must contain, and not * end with, the 0xc3 byte of the RET. * 3) STIBP must be enabled, or SMT disabled, to prevent the sibling thread * from re-poisioning the BTB prediction. */ .align 64 - .skip 64 - (__ret - zen_untrain_ret), 0xcc -SYM_FUNC_START_NOALIGN(zen_untrain_ret); + .skip 64 - (retbleed_return_thunk - retbleed_untrain_ret), 0xcc +SYM_FUNC_START_NOALIGN(retbleed_untrain_ret); /* - * As executed from zen_untrain_ret, this is: + * As executed from retbleed_untrain_ret, this is: * * TEST $0xcc, %bl * LFENCE - * JMP __x86_return_thunk + * JMP retbleed_return_thunk * * Executing the TEST instruction has a side effect of evicting any BTB * prediction (potentially attacker controlled) attached to the RET, as - * __x86_return_thunk + 1 isn't an instruction boundary at the moment. + * retbleed_return_thunk + 1 isn't an instruction boundary at the moment. */ .byte 0xf6 /* - * As executed from __x86_return_thunk, this is a plain RET. + * As executed from retbleed_return_thunk, this is a plain RET. * * As part of the TEST above, RET is the ModRM byte, and INT3 the imm8. * @@ -154,13 +185,13 @@ SYM_FUNC_START_NOALIGN(zen_untrain_ret); * With SMT enabled and STIBP active, a sibling thread cannot poison * RET's prediction to a type of its choice, but can evict the * prediction due to competitive sharing. If the prediction is - * evicted, __x86_return_thunk will suffer Straight Line Speculation + * evicted, retbleed_return_thunk will suffer Straight Line Speculation * which will be contained safely by the INT3. */ -SYM_INNER_LABEL(__ret, SYM_L_GLOBAL) +SYM_INNER_LABEL(retbleed_return_thunk, SYM_L_GLOBAL) ret int3 -SYM_CODE_END(__ret) +SYM_CODE_END(retbleed_return_thunk) /* * Ensure the TEST decoding / BTB invalidation is complete. @@ -171,16 +202,16 @@ SYM_CODE_END(__ret) * Jump back and execute the RET in the middle of the TEST instruction. * INT3 is for SLS protection. */ - jmp __ret + jmp retbleed_return_thunk int3 -SYM_FUNC_END(zen_untrain_ret) -__EXPORT_THUNK(zen_untrain_ret) +SYM_FUNC_END(retbleed_untrain_ret) +__EXPORT_THUNK(retbleed_untrain_ret) /* - * SRSO untraining sequence for Zen1/2, similar to zen_untrain_ret() + * SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret() * above. On kernel entry, srso_untrain_ret() is executed which is a * - * movabs $0xccccccc308c48348,%rax + * movabs $0xccccc30824648d48,%rax * * and when the return thunk executes the inner label srso_safe_ret() * later, it is a stack manipulation and a RET which is mispredicted and @@ -191,22 +222,44 @@ __EXPORT_THUNK(zen_untrain_ret) SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) .byte 0x48, 0xb8 +/* + * This forces the function return instruction to speculate into a trap + * (UD2 in srso_return_thunk() below). This RET will then mispredict + * and execution will continue at the return site read from the top of + * the stack. + */ SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL) - add $8, %_ASM_SP + lea 8(%_ASM_SP), %_ASM_SP ret int3 int3 - int3 + /* end of movabs */ lfence call srso_safe_ret - int3 + ud2 SYM_CODE_END(srso_safe_ret) SYM_FUNC_END(srso_untrain_ret) __EXPORT_THUNK(srso_untrain_ret) -SYM_FUNC_START(__x86_return_thunk) - ALTERNATIVE_2 "jmp __ret", "call srso_safe_ret", X86_FEATURE_SRSO, \ - "call srso_safe_ret_alias", X86_FEATURE_SRSO_ALIAS +SYM_CODE_START(srso_return_thunk) + UNWIND_HINT_FUNC + ANNOTATE_NOENDBR + call srso_safe_ret + ud2 +SYM_CODE_END(srso_return_thunk) + +SYM_FUNC_START(entry_untrain_ret) + ALTERNATIVE_2 "jmp retbleed_untrain_ret", \ + "jmp srso_untrain_ret", X86_FEATURE_SRSO, \ + "jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS +SYM_FUNC_END(entry_untrain_ret) +__EXPORT_THUNK(entry_untrain_ret) + +SYM_CODE_START(__x86_return_thunk) + UNWIND_HINT_FUNC + ANNOTATE_NOENDBR + ANNOTATE_UNRET_SAFE + ret int3 SYM_CODE_END(__x86_return_thunk) EXPORT_SYMBOL(__x86_return_thunk) diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 63491748dc8d7d4b11504b3259bab6e0e0e44eaa..95cbd5790ed602aa7363f1f1b95d7c0eec3cc4ae 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3740,7 +3740,7 @@ static int rbd_lock(struct rbd_device *rbd_dev) ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie, RBD_LOCK_TAG, "", 0); - if (ret) + if (ret && ret != -EEXIST) return ret; __rbd_lock(rbd_dev, cookie); @@ -3914,10 +3914,26 @@ static void wake_lock_waiters(struct rbd_device *rbd_dev, int result) list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list); } -static int get_lock_owner_info(struct rbd_device *rbd_dev, - struct ceph_locker **lockers, u32 *num_lockers) +static bool locker_equal(const struct ceph_locker *lhs, + const struct ceph_locker *rhs) +{ + return lhs->id.name.type == rhs->id.name.type && + lhs->id.name.num == rhs->id.name.num && + !strcmp(lhs->id.cookie, rhs->id.cookie) && + ceph_addr_equal_no_type(&lhs->info.addr, &rhs->info.addr); +} + +static void free_locker(struct ceph_locker *locker) +{ + if (locker) + ceph_free_lockers(locker, 1); +} + +static struct ceph_locker *get_lock_owner_info(struct rbd_device *rbd_dev) { struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; + struct ceph_locker *lockers; + u32 num_lockers; u8 lock_type; char *lock_tag; int ret; @@ -3926,39 +3942,45 @@ static int get_lock_owner_info(struct rbd_device *rbd_dev, ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, RBD_LOCK_NAME, - &lock_type, &lock_tag, lockers, num_lockers); - if (ret) - return ret; + &lock_type, &lock_tag, &lockers, &num_lockers); + if (ret) { + rbd_warn(rbd_dev, "failed to get header lockers: %d", ret); + return ERR_PTR(ret); + } - if (*num_lockers == 0) { + if (num_lockers == 0) { dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev); + lockers = NULL; goto out; } if (strcmp(lock_tag, RBD_LOCK_TAG)) { rbd_warn(rbd_dev, "locked by external mechanism, tag %s", lock_tag); - ret = -EBUSY; - goto out; + goto err_busy; } if (lock_type == CEPH_CLS_LOCK_SHARED) { rbd_warn(rbd_dev, "shared lock type detected"); - ret = -EBUSY; - goto out; + goto err_busy; } - if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX, + WARN_ON(num_lockers != 1); + if (strncmp(lockers[0].id.cookie, RBD_LOCK_COOKIE_PREFIX, strlen(RBD_LOCK_COOKIE_PREFIX))) { rbd_warn(rbd_dev, "locked by external mechanism, cookie %s", - (*lockers)[0].id.cookie); - ret = -EBUSY; - goto out; + lockers[0].id.cookie); + goto err_busy; } out: kfree(lock_tag); - return ret; + return lockers; + +err_busy: + kfree(lock_tag); + ceph_free_lockers(lockers, num_lockers); + return ERR_PTR(-EBUSY); } static int find_watcher(struct rbd_device *rbd_dev, @@ -3974,13 +3996,19 @@ static int find_watcher(struct rbd_device *rbd_dev, ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, &watchers, &num_watchers); - if (ret) + if (ret) { + rbd_warn(rbd_dev, "failed to get watchers: %d", ret); return ret; + } sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie); for (i = 0; i < num_watchers; i++) { - if (!memcmp(&watchers[i].addr, &locker->info.addr, - sizeof(locker->info.addr)) && + /* + * Ignore addr->type while comparing. This mimics + * entity_addr_t::get_legacy_str() + strcmp(). + */ + if (ceph_addr_equal_no_type(&watchers[i].addr, + &locker->info.addr) && watchers[i].cookie == cookie) { struct rbd_client_id cid = { .gid = le64_to_cpu(watchers[i].name.num), @@ -4008,51 +4036,72 @@ static int find_watcher(struct rbd_device *rbd_dev, static int rbd_try_lock(struct rbd_device *rbd_dev) { struct ceph_client *client = rbd_dev->rbd_client->client; - struct ceph_locker *lockers; - u32 num_lockers; + struct ceph_locker *locker, *refreshed_locker; int ret; for (;;) { + locker = refreshed_locker = NULL; + ret = rbd_lock(rbd_dev); - if (ret != -EBUSY) - return ret; + if (!ret) + goto out; + if (ret != -EBUSY) { + rbd_warn(rbd_dev, "failed to lock header: %d", ret); + goto out; + } /* determine if the current lock holder is still alive */ - ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers); - if (ret) - return ret; - - if (num_lockers == 0) + locker = get_lock_owner_info(rbd_dev); + if (IS_ERR(locker)) { + ret = PTR_ERR(locker); + locker = NULL; + goto out; + } + if (!locker) goto again; - ret = find_watcher(rbd_dev, lockers); + ret = find_watcher(rbd_dev, locker); if (ret) goto out; /* request lock or error */ + refreshed_locker = get_lock_owner_info(rbd_dev); + if (IS_ERR(refreshed_locker)) { + ret = PTR_ERR(refreshed_locker); + refreshed_locker = NULL; + goto out; + } + if (!refreshed_locker || + !locker_equal(locker, refreshed_locker)) + goto again; + rbd_warn(rbd_dev, "breaking header lock owned by %s%llu", - ENTITY_NAME(lockers[0].id.name)); + ENTITY_NAME(locker->id.name)); ret = ceph_monc_blocklist_add(&client->monc, - &lockers[0].info.addr); + &locker->info.addr); if (ret) { - rbd_warn(rbd_dev, "blocklist of %s%llu failed: %d", - ENTITY_NAME(lockers[0].id.name), ret); + rbd_warn(rbd_dev, "failed to blocklist %s%llu: %d", + ENTITY_NAME(locker->id.name), ret); goto out; } ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, RBD_LOCK_NAME, - lockers[0].id.cookie, - &lockers[0].id.name); - if (ret && ret != -ENOENT) + locker->id.cookie, &locker->id.name); + if (ret && ret != -ENOENT) { + rbd_warn(rbd_dev, "failed to break header lock: %d", + ret); goto out; + } again: - ceph_free_lockers(lockers, num_lockers); + free_locker(refreshed_locker); + free_locker(locker); } out: - ceph_free_lockers(lockers, num_lockers); + free_locker(refreshed_locker); + free_locker(locker); return ret; } @@ -4102,11 +4151,8 @@ static int rbd_try_acquire_lock(struct rbd_device *rbd_dev) ret = rbd_try_lock(rbd_dev); if (ret < 0) { - rbd_warn(rbd_dev, "failed to lock header: %d", ret); - if (ret == -EBLOCKLISTED) - goto out; - - ret = 1; /* request lock anyway */ + rbd_warn(rbd_dev, "failed to acquire lock: %d", ret); + goto out; } if (ret > 0) { up_write(&rbd_dev->lock_rwsem); @@ -6656,12 +6702,11 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev) cancel_delayed_work_sync(&rbd_dev->lock_dwork); if (!ret) ret = -ETIMEDOUT; - } - if (ret) { - rbd_warn(rbd_dev, "failed to acquire exclusive lock: %ld", ret); - return ret; + rbd_warn(rbd_dev, "failed to acquire lock: %ld", ret); } + if (ret) + return ret; /* * The lock may have been released by now, unless automatic lock diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 2695ece47eb0e242d5999a1832b2f1b780816ad2..49d5375b04f40a6acd728201ef629c6c3f3e896b 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -432,6 +432,9 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x0489, 0xe0d9), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0489, 0xe0f5), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x13d3, 0x3568), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile index 397e35392bff8340b7205577f1a14a0269a75129..16c47a0616ae4798f567005215b2114ed3abd580 100644 --- a/drivers/bus/Makefile +++ b/drivers/bus/Makefile @@ -38,4 +38,4 @@ obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o obj-$(CONFIG_DA8XX_MSTPRI) += da8xx-mstpri.o # MHI -obj-$(CONFIG_MHI_BUS) += mhi/ +obj-y += mhi/ diff --git a/drivers/bus/mhi/Kconfig b/drivers/bus/mhi/Kconfig index e841c1097fb4d8b41ae62930640bf0d8bd12e608..4748df7f9cd5835f5cd0fdce4f3bd95a3e1921db 100644 --- a/drivers/bus/mhi/Kconfig +++ b/drivers/bus/mhi/Kconfig @@ -2,21 +2,7 @@ # # MHI bus # -# Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. +# Copyright (c) 2021, Linaro Ltd. # -config MHI_BUS - tristate "Modem Host Interface (MHI) bus" - help - Bus driver for MHI protocol. Modem Host Interface (MHI) is a - communication protocol used by the host processors to control - and communicate with modem devices over a high speed peripheral - bus or shared memory. - -config MHI_BUS_DEBUG - bool "Debugfs support for the MHI bus" - depends on MHI_BUS && DEBUG_FS - help - Enable debugfs support for use with the MHI transport. Allows - reading and/or modifying some values within the MHI controller - for debug and test purposes. +source "drivers/bus/mhi/host/Kconfig" diff --git a/drivers/bus/mhi/Makefile b/drivers/bus/mhi/Makefile index 19e6443b72df4645ade41a232a4f3a40b6bf7b9d..5f5708a249f54eec3a00387ab78845d95dcca21f 100644 --- a/drivers/bus/mhi/Makefile +++ b/drivers/bus/mhi/Makefile @@ -1,2 +1,2 @@ -# core layer -obj-y += core/ +# Host MHI stack +obj-y += host/ diff --git a/drivers/bus/mhi/host/Kconfig b/drivers/bus/mhi/host/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..da5cd0c9fc620ab595e742c422f1a22a2a84c7b9 --- /dev/null +++ b/drivers/bus/mhi/host/Kconfig @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# MHI bus +# +# Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. +# + +config MHI_BUS + tristate "Modem Host Interface (MHI) bus" + help + Bus driver for MHI protocol. Modem Host Interface (MHI) is a + communication protocol used by the host processors to control + and communicate with modem devices over a high speed peripheral + bus or shared memory. + +config MHI_BUS_DEBUG + bool "Debugfs support for the MHI bus" + depends on MHI_BUS && DEBUG_FS + help + Enable debugfs support for use with the MHI transport. Allows + reading and/or modifying some values within the MHI controller + for debug and test purposes. + +config MHI_BUS_PCI_GENERIC + tristate "MHI PCI controller driver" + depends on MHI_BUS + depends on PCI + help + This driver provides MHI PCI controller driver for devices such as + Qualcomm SDX55 based PCIe modems. + diff --git a/drivers/bus/mhi/core/Makefile b/drivers/bus/mhi/host/Makefile similarity index 54% rename from drivers/bus/mhi/core/Makefile rename to drivers/bus/mhi/host/Makefile index c3feb4130aa371f5d4209cec6c4119748d4ba8e9..859c2f38451c669b3d3014c374b2b957c99a1cfe 100644 --- a/drivers/bus/mhi/core/Makefile +++ b/drivers/bus/mhi/host/Makefile @@ -1,4 +1,6 @@ obj-$(CONFIG_MHI_BUS) += mhi.o - mhi-y := init.o main.o pm.o boot.o mhi-$(CONFIG_MHI_BUS_DEBUG) += debugfs.o + +obj-$(CONFIG_MHI_BUS_PCI_GENERIC) += mhi_pci_generic.o +mhi_pci_generic-y += pci_generic.o diff --git a/drivers/bus/mhi/core/boot.c b/drivers/bus/mhi/host/boot.c similarity index 100% rename from drivers/bus/mhi/core/boot.c rename to drivers/bus/mhi/host/boot.c diff --git a/drivers/bus/mhi/core/debugfs.c b/drivers/bus/mhi/host/debugfs.c similarity index 100% rename from drivers/bus/mhi/core/debugfs.c rename to drivers/bus/mhi/host/debugfs.c diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/host/init.c similarity index 98% rename from drivers/bus/mhi/core/init.c rename to drivers/bus/mhi/host/init.c index 0d0386f67ffe2a44d650c54be641d0b1e27a02e4..2cc48f96afdbc5476e437443921e5386bcb6c2a9 100644 --- a/drivers/bus/mhi/core/init.c +++ b/drivers/bus/mhi/host/init.c @@ -498,6 +498,12 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl) return -EIO; } + if (val >= mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB)) { + dev_err(dev, "CHDB offset: 0x%x is out of range: 0x%zx\n", + val, mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB)); + return -ERANGE; + } + /* Setup wake db */ mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB); mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 4, 0); @@ -517,6 +523,12 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl) return -EIO; } + if (val >= mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings)) { + dev_err(dev, "ERDB offset: 0x%x is out of range: 0x%zx\n", + val, mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings)); + return -ERANGE; + } + /* Setup event db address for each ev_ring */ mhi_event = mhi_cntrl->mhi_event; for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) { diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/host/internal.h similarity index 100% rename from drivers/bus/mhi/core/internal.h rename to drivers/bus/mhi/host/internal.h diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/host/main.c similarity index 100% rename from drivers/bus/mhi/core/main.c rename to drivers/bus/mhi/host/main.c diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c new file mode 100644 index 0000000000000000000000000000000000000000..e3df838c3c80e21d9b9df4fa770efeb98281a3ef --- /dev/null +++ b/drivers/bus/mhi/host/pci_generic.c @@ -0,0 +1,345 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * MHI PCI driver - MHI over PCI controller driver + * + * This module is a generic driver for registering MHI-over-PCI devices, + * such as PCIe QCOM modems. + * + * Copyright (C) 2020 Linaro Ltd + */ + +#include +#include +#include +#include + +#define MHI_PCI_DEFAULT_BAR_NUM 0 + +/** + * struct mhi_pci_dev_info - MHI PCI device specific information + * @config: MHI controller configuration + * @name: name of the PCI module + * @fw: firmware path (if any) + * @edl: emergency download mode firmware path (if any) + * @bar_num: PCI base address register to use for MHI MMIO register space + * @dma_data_width: DMA transfer word size (32 or 64 bits) + */ +struct mhi_pci_dev_info { + const struct mhi_controller_config *config; + const char *name; + const char *fw; + const char *edl; + unsigned int bar_num; + unsigned int dma_data_width; +}; + +#define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \ + { \ + .num = ch_num, \ + .name = ch_name, \ + .num_elements = el_count, \ + .event_ring = ev_ring, \ + .dir = DMA_TO_DEVICE, \ + .ee_mask = BIT(MHI_EE_AMSS), \ + .pollcfg = 0, \ + .doorbell = MHI_DB_BRST_DISABLE, \ + .lpm_notify = false, \ + .offload_channel = false, \ + .doorbell_mode_switch = false, \ + } \ + +#define MHI_CHANNEL_CONFIG_DL(ch_num, ch_name, el_count, ev_ring) \ + { \ + .num = ch_num, \ + .name = ch_name, \ + .num_elements = el_count, \ + .event_ring = ev_ring, \ + .dir = DMA_FROM_DEVICE, \ + .ee_mask = BIT(MHI_EE_AMSS), \ + .pollcfg = 0, \ + .doorbell = MHI_DB_BRST_DISABLE, \ + .lpm_notify = false, \ + .offload_channel = false, \ + .doorbell_mode_switch = false, \ + } + +#define MHI_EVENT_CONFIG_CTRL(ev_ring) \ + { \ + .num_elements = 64, \ + .irq_moderation_ms = 0, \ + .irq = (ev_ring) + 1, \ + .priority = 1, \ + .mode = MHI_DB_BRST_DISABLE, \ + .data_type = MHI_ER_CTRL, \ + .hardware_event = false, \ + .client_managed = false, \ + .offload_channel = false, \ + } + +#define MHI_EVENT_CONFIG_DATA(ev_ring) \ + { \ + .num_elements = 128, \ + .irq_moderation_ms = 5, \ + .irq = (ev_ring) + 1, \ + .priority = 1, \ + .mode = MHI_DB_BRST_DISABLE, \ + .data_type = MHI_ER_DATA, \ + .hardware_event = false, \ + .client_managed = false, \ + .offload_channel = false, \ + } + +#define MHI_EVENT_CONFIG_HW_DATA(ev_ring, ch_num) \ + { \ + .num_elements = 128, \ + .irq_moderation_ms = 5, \ + .irq = (ev_ring) + 1, \ + .priority = 1, \ + .mode = MHI_DB_BRST_DISABLE, \ + .data_type = MHI_ER_DATA, \ + .hardware_event = true, \ + .client_managed = false, \ + .offload_channel = false, \ + .channel = ch_num, \ + } + +static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = { + MHI_CHANNEL_CONFIG_UL(12, "MBIM", 4, 0), + MHI_CHANNEL_CONFIG_DL(13, "MBIM", 4, 0), + MHI_CHANNEL_CONFIG_UL(14, "QMI", 4, 0), + MHI_CHANNEL_CONFIG_DL(15, "QMI", 4, 0), + MHI_CHANNEL_CONFIG_UL(20, "IPCR", 8, 0), + MHI_CHANNEL_CONFIG_DL(21, "IPCR", 8, 0), + MHI_CHANNEL_CONFIG_UL(100, "IP_HW0", 128, 1), + MHI_CHANNEL_CONFIG_DL(101, "IP_HW0", 128, 2), +}; + +static const struct mhi_event_config modem_qcom_v1_mhi_events[] = { + /* first ring is control+data ring */ + MHI_EVENT_CONFIG_CTRL(0), + /* Hardware channels request dedicated hardware event rings */ + MHI_EVENT_CONFIG_HW_DATA(1, 100), + MHI_EVENT_CONFIG_HW_DATA(2, 101) +}; + +static const struct mhi_controller_config modem_qcom_v1_mhiv_config = { + .max_channels = 128, + .timeout_ms = 5000, + .num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels), + .ch_cfg = modem_qcom_v1_mhi_channels, + .num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events), + .event_cfg = modem_qcom_v1_mhi_events, +}; + +static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = { + .name = "qcom-sdx55m", + .fw = "qcom/sdx55m/sbl1.mbn", + .edl = "qcom/sdx55m/edl.mbn", + .config = &modem_qcom_v1_mhiv_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32 +}; + +static const struct pci_device_id mhi_pci_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306), + .driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info }, + { } +}; +MODULE_DEVICE_TABLE(pci, mhi_pci_id_table); + +static int mhi_pci_read_reg(struct mhi_controller *mhi_cntrl, + void __iomem *addr, u32 *out) +{ + *out = readl(addr); + return 0; +} + +static void mhi_pci_write_reg(struct mhi_controller *mhi_cntrl, + void __iomem *addr, u32 val) +{ + writel(val, addr); +} + +static void mhi_pci_status_cb(struct mhi_controller *mhi_cntrl, + enum mhi_callback cb) +{ + /* Nothing to do for now */ +} + +static int mhi_pci_claim(struct mhi_controller *mhi_cntrl, + unsigned int bar_num, u64 dma_mask) +{ + struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev); + int err; + + err = pci_assign_resource(pdev, bar_num); + if (err) + return err; + + err = pcim_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "failed to enable pci device: %d\n", err); + return err; + } + + err = pcim_iomap_regions(pdev, 1 << bar_num, pci_name(pdev)); + if (err) { + dev_err(&pdev->dev, "failed to map pci region: %d\n", err); + return err; + } + mhi_cntrl->regs = pcim_iomap_table(pdev)[bar_num]; + + err = pci_set_dma_mask(pdev, dma_mask); + if (err) { + dev_err(&pdev->dev, "Cannot set proper DMA mask\n"); + return err; + } + + err = pci_set_consistent_dma_mask(pdev, dma_mask); + if (err) { + dev_err(&pdev->dev, "set consistent dma mask failed\n"); + return err; + } + + pci_set_master(pdev); + + return 0; +} + +static int mhi_pci_get_irqs(struct mhi_controller *mhi_cntrl, + const struct mhi_controller_config *mhi_cntrl_config) +{ + struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev); + int nr_vectors, i; + int *irq; + + /* + * Alloc one MSI vector for BHI + one vector per event ring, ideally... + * No explicit pci_free_irq_vectors required, done by pcim_release. + */ + mhi_cntrl->nr_irqs = 1 + mhi_cntrl_config->num_events; + + nr_vectors = pci_alloc_irq_vectors(pdev, 1, mhi_cntrl->nr_irqs, PCI_IRQ_MSI); + if (nr_vectors < 0) { + dev_err(&pdev->dev, "Error allocating MSI vectors %d\n", + nr_vectors); + return nr_vectors; + } + + if (nr_vectors < mhi_cntrl->nr_irqs) { + dev_warn(&pdev->dev, "Not enough MSI vectors (%d/%d), use shared MSI\n", + nr_vectors, mhi_cntrl_config->num_events); + } + + irq = devm_kcalloc(&pdev->dev, mhi_cntrl->nr_irqs, sizeof(int), GFP_KERNEL); + if (!irq) + return -ENOMEM; + + for (i = 0; i < mhi_cntrl->nr_irqs; i++) { + int vector = i >= nr_vectors ? (nr_vectors - 1) : i; + + irq[i] = pci_irq_vector(pdev, vector); + } + + mhi_cntrl->irq = irq; + + return 0; +} + +static int mhi_pci_runtime_get(struct mhi_controller *mhi_cntrl) +{ + /* no PM for now */ + return 0; +} + +static void mhi_pci_runtime_put(struct mhi_controller *mhi_cntrl) +{ + /* no PM for now */ +} + +static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + const struct mhi_pci_dev_info *info = (struct mhi_pci_dev_info *) id->driver_data; + const struct mhi_controller_config *mhi_cntrl_config; + struct mhi_controller *mhi_cntrl; + int err; + + dev_dbg(&pdev->dev, "MHI PCI device found: %s\n", info->name); + + mhi_cntrl = mhi_alloc_controller(); + if (!mhi_cntrl) + return -ENOMEM; + + mhi_cntrl_config = info->config; + mhi_cntrl->cntrl_dev = &pdev->dev; + mhi_cntrl->iova_start = 0; + mhi_cntrl->iova_stop = DMA_BIT_MASK(info->dma_data_width); + mhi_cntrl->fw_image = info->fw; + mhi_cntrl->edl_image = info->edl; + + mhi_cntrl->read_reg = mhi_pci_read_reg; + mhi_cntrl->write_reg = mhi_pci_write_reg; + mhi_cntrl->status_cb = mhi_pci_status_cb; + mhi_cntrl->runtime_get = mhi_pci_runtime_get; + mhi_cntrl->runtime_put = mhi_pci_runtime_put; + + err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width)); + if (err) + goto err_release; + + err = mhi_pci_get_irqs(mhi_cntrl, mhi_cntrl_config); + if (err) + goto err_release; + + pci_set_drvdata(pdev, mhi_cntrl); + + err = mhi_register_controller(mhi_cntrl, mhi_cntrl_config); + if (err) + goto err_release; + + /* MHI bus does not power up the controller by default */ + err = mhi_prepare_for_power_up(mhi_cntrl); + if (err) { + dev_err(&pdev->dev, "failed to prepare MHI controller\n"); + goto err_unregister; + } + + err = mhi_sync_power_up(mhi_cntrl); + if (err) { + dev_err(&pdev->dev, "failed to power up MHI controller\n"); + goto err_unprepare; + } + + return 0; + +err_unprepare: + mhi_unprepare_after_power_down(mhi_cntrl); +err_unregister: + mhi_unregister_controller(mhi_cntrl); +err_release: + mhi_free_controller(mhi_cntrl); + + return err; +} + +static void mhi_pci_remove(struct pci_dev *pdev) +{ + struct mhi_controller *mhi_cntrl = pci_get_drvdata(pdev); + + mhi_power_down(mhi_cntrl, true); + mhi_unprepare_after_power_down(mhi_cntrl); + mhi_unregister_controller(mhi_cntrl); + mhi_free_controller(mhi_cntrl); +} + +static struct pci_driver mhi_pci_driver = { + .name = "mhi-pci-generic", + .id_table = mhi_pci_id_table, + .probe = mhi_pci_probe, + .remove = mhi_pci_remove +}; +module_pci_driver(mhi_pci_driver); + +MODULE_AUTHOR("Loic Poulain "); +MODULE_DESCRIPTION("Modem Host Interface (MHI) PCI controller driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/host/pm.c similarity index 100% rename from drivers/bus/mhi/core/pm.c rename to drivers/bus/mhi/host/pm.c diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 4b1641fe30dba412e317bc0fce5ce3f2b984f334..fcfe4d16cc1495bb40972fe36c18a0bd88fcc8f5 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -2078,6 +2078,8 @@ static int sysc_reset(struct sysc *ddata) sysc_val = sysc_read_sysconfig(ddata); sysc_val |= sysc_mask; sysc_write(ddata, sysc_offset, sysc_val); + /* Flush posted write */ + sysc_val = sysc_read_sysconfig(ddata); } if (ddata->cfg.srst_udelay) diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c index 4fb4fd4b06bdade6729ef0f15f861bc0cd262930..737aa70e2cb3d0998f2c11f534895ad1034f749e 100644 --- a/drivers/clk/clk-devres.c +++ b/drivers/clk/clk-devres.c @@ -205,18 +205,19 @@ EXPORT_SYMBOL(devm_clk_put); struct clk *devm_get_clk_from_child(struct device *dev, struct device_node *np, const char *con_id) { - struct clk **ptr, *clk; + struct devm_clk_state *state; + struct clk *clk; - ptr = devres_alloc(devm_clk_release, sizeof(*ptr), GFP_KERNEL); - if (!ptr) + state = devres_alloc(devm_clk_release, sizeof(*state), GFP_KERNEL); + if (!state) return ERR_PTR(-ENOMEM); clk = of_clk_get_by_name(np, con_id); if (!IS_ERR(clk)) { - *ptr = clk; - devres_add(dev, ptr); + state->clk = clk; + devres_add(dev, state); } else { - devres_free(ptr); + devres_free(state); } return clk; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 8445bb7ae06ab867fd288bd2ec3b0e1c5a9b9da0..3b4724d60868f5c9898ab3459c596e568bdc0716 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2155,6 +2155,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, amdgpu_vm_bo_base_init(&bo_va->base, vm, bo); bo_va->ref_count = 1; + bo_va->last_pt_update = dma_fence_get_stub(); INIT_LIST_HEAD(&bo_va->valids); INIT_LIST_HEAD(&bo_va->invalids); @@ -2867,7 +2868,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, vm->update_funcs = &amdgpu_vm_cpu_funcs; else vm->update_funcs = &amdgpu_vm_sdma_funcs; - vm->last_update = NULL; + + vm->last_update = dma_fence_get_stub(); vm->last_unlocked = dma_fence_get_stub(); mutex_init(&vm->eviction_lock); @@ -3042,7 +3044,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, vm->update_funcs = &amdgpu_vm_sdma_funcs; } dma_fence_put(vm->last_update); - vm->last_update = NULL; + vm->last_update = dma_fence_get_stub(); vm->is_compute_context = true; if (vm->pasid) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 71a85c5306ed0c98f01190895d2e73431eba03bd..1c669f115dd8088c1148542459ed86ef2914a63c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -3282,7 +3282,9 @@ void dcn10_wait_for_mpcc_disconnect( if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) { struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst); - res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst); + if (pipe_ctx->stream_res.tg && + pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg)) + res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst); pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false; hubp->funcs->set_blank(hubp, true); } diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 7b69f81444ebd71336acba84e6008e1f7415066b..e40321d7989815128a1d27ee769749ef401f51e3 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -1010,21 +1010,21 @@ static const struct panel_desc auo_g104sn02 = { }, }; -static const struct drm_display_mode auo_g121ean01_mode = { - .clock = 66700, - .hdisplay = 1280, - .hsync_start = 1280 + 58, - .hsync_end = 1280 + 58 + 8, - .htotal = 1280 + 58 + 8 + 70, - .vdisplay = 800, - .vsync_start = 800 + 6, - .vsync_end = 800 + 6 + 4, - .vtotal = 800 + 6 + 4 + 10, +static const struct display_timing auo_g121ean01_timing = { + .pixelclock = { 60000000, 74400000, 90000000 }, + .hactive = { 1280, 1280, 1280 }, + .hfront_porch = { 20, 50, 100 }, + .hback_porch = { 20, 50, 100 }, + .hsync_len = { 30, 100, 200 }, + .vactive = { 800, 800, 800 }, + .vfront_porch = { 2, 10, 25 }, + .vback_porch = { 2, 10, 25 }, + .vsync_len = { 4, 18, 50 }, }; static const struct panel_desc auo_g121ean01 = { - .modes = &auo_g121ean01_mode, - .num_modes = 1, + .timings = &auo_g121ean01_timing, + .num_timings = 1, .bpc = 8, .size = { .width = 261, diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index a78b60b62caf21889eb4b93fcfb024489daa7199..87a57e5588a288c31ce7e7132ff39a574227b391 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -271,7 +271,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) { struct drm_radeon_cs *cs = data; uint64_t *chunk_array_ptr; - unsigned size, i; + u64 size; + unsigned i; u32 ring = RADEON_CS_RING_GFX; s32 priority = 0; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index ad208a5f4ebe52e1f14c982bd0c75a2aa3e5fef0..0a79c57c7db6401f6b45ec4ccc3369a5831ca641 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -1606,4 +1606,17 @@ static inline void vmw_mmio_write(u32 value, u32 *addr) { WRITE_ONCE(*addr, value); } + +static inline bool vmw_shadertype_is_valid(enum vmw_sm_type shader_model, + u32 shader_type) +{ + SVGA3dShaderType max_allowed = SVGA3D_SHADERTYPE_PREDX_MAX; + + if (shader_model >= VMW_SM_5) + max_allowed = SVGA3D_SHADERTYPE_MAX; + else if (shader_model >= VMW_SM_4) + max_allowed = SVGA3D_SHADERTYPE_DX10_MAX; + return shader_type >= SVGA3D_SHADERTYPE_MIN && shader_type < max_allowed; +} + #endif diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 739cbc77d8867cdb98fb4555467096c8eca4c6ea..4c6c2e5abf95e96ab16a02c27feb534ce5f4df6e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -1998,7 +1998,7 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, cmd = container_of(header, typeof(*cmd), header); - if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) { + if (!vmw_shadertype_is_valid(VMW_SM_LEGACY, cmd->body.type)) { VMW_DEBUG_USER("Illegal shader type %u.\n", (unsigned int) cmd->body.type); return -EINVAL; @@ -2120,8 +2120,6 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer); - SVGA3dShaderType max_shader_num = has_sm5_context(dev_priv) ? - SVGA3D_NUM_SHADERTYPE : SVGA3D_NUM_SHADERTYPE_DX10; struct vmw_resource *res = NULL; struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); @@ -2138,6 +2136,14 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, if (unlikely(ret != 0)) return ret; + if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type) || + cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) { + VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n", + (unsigned int) cmd->body.type, + (unsigned int) cmd->body.slot); + return -EINVAL; + } + binding.bi.ctx = ctx_node->ctx; binding.bi.res = res; binding.bi.bt = vmw_ctx_binding_cb; @@ -2146,14 +2152,6 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, binding.size = cmd->body.sizeInBytes; binding.slot = cmd->body.slot; - if (binding.shader_slot >= max_shader_num || - binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) { - VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n", - (unsigned int) cmd->body.type, - (unsigned int) binding.slot); - return -EINVAL; - } - vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, binding.slot); @@ -2174,15 +2172,13 @@ static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv, { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) = container_of(header, typeof(*cmd), header); - SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ? - SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX; u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dShaderResourceViewId); if ((u64) cmd->body.startView + (u64) num_sr_view > (u64) SVGA3D_DX_MAX_SRVIEWS || - cmd->body.type >= max_allowed) { + !vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) { VMW_DEBUG_USER("Invalid shader binding.\n"); return -EINVAL; } @@ -2206,8 +2202,6 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader); - SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ? - SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX; struct vmw_resource *res = NULL; struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); struct vmw_ctx_bindinfo_shader binding; @@ -2218,8 +2212,7 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv, cmd = container_of(header, typeof(*cmd), header); - if (cmd->body.type >= max_allowed || - cmd->body.type < SVGA3D_SHADERTYPE_MIN) { + if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) { VMW_DEBUG_USER("Illegal shader type %u.\n", (unsigned int) cmd->body.type); return -EINVAL; diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 2b658d820b800f06f646bc7b82762f0b2cfe1f26..6712d99ad80da80e5499dd9e0f5608c13b35cd45 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -582,6 +582,7 @@ #define USB_DEVICE_ID_UGCI_FIGHTING 0x0030 #define USB_VENDOR_ID_HP 0x03f0 +#define USB_PRODUCT_ID_HP_ELITE_PRESENTER_MOUSE_464A 0x464a #define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A 0x0a4a #define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A 0x0b4a #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 9f1fcbea19eb724bf93dc6ec0ff5e569fc6bc01c..4229e5de067454fa949c6f35e2ad2e7ce6398463 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -96,6 +96,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A293), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A), HID_QUIRK_ALWAYS_POLL }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_ELITE_PRESENTER_MOUSE_464A), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A), HID_QUIRK_ALWAYS_POLL }, diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c index 35baca2f62c4e068f8884837676e2fc157728576..a524d2cd114214ae5333ef52acfa7d77dfc39418 100644 --- a/drivers/i2c/busses/i2c-bcm-iproc.c +++ b/drivers/i2c/busses/i2c-bcm-iproc.c @@ -242,13 +242,14 @@ static inline u32 iproc_i2c_rd_reg(struct bcm_iproc_i2c_dev *iproc_i2c, u32 offset) { u32 val; + unsigned long flags; if (iproc_i2c->idm_base) { - spin_lock(&iproc_i2c->idm_lock); + spin_lock_irqsave(&iproc_i2c->idm_lock, flags); writel(iproc_i2c->ape_addr_mask, iproc_i2c->idm_base + IDM_CTRL_DIRECT_OFFSET); val = readl(iproc_i2c->base + offset); - spin_unlock(&iproc_i2c->idm_lock); + spin_unlock_irqrestore(&iproc_i2c->idm_lock, flags); } else { val = readl(iproc_i2c->base + offset); } @@ -259,12 +260,14 @@ static inline u32 iproc_i2c_rd_reg(struct bcm_iproc_i2c_dev *iproc_i2c, static inline void iproc_i2c_wr_reg(struct bcm_iproc_i2c_dev *iproc_i2c, u32 offset, u32 val) { + unsigned long flags; + if (iproc_i2c->idm_base) { - spin_lock(&iproc_i2c->idm_lock); + spin_lock_irqsave(&iproc_i2c->idm_lock, flags); writel(iproc_i2c->ape_addr_mask, iproc_i2c->idm_base + IDM_CTRL_DIRECT_OFFSET); writel(val, iproc_i2c->base + offset); - spin_unlock(&iproc_i2c->idm_lock); + spin_unlock_irqrestore(&iproc_i2c->idm_lock, flags); } else { writel(val, iproc_i2c->base + offset); } diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index 2871cf2ee8b44a8b9e69d64d57073c32cf25d3a7..106080b25e81ce0c10498e5e0209031e412d20dc 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -432,8 +432,19 @@ i2c_dw_read(struct dw_i2c_dev *dev) regmap_read(dev->map, DW_IC_DATA_CMD, &tmp); /* Ensure length byte is a valid value */ - if (flags & I2C_M_RECV_LEN && - tmp <= I2C_SMBUS_BLOCK_MAX && tmp > 0) { + if (flags & I2C_M_RECV_LEN) { + /* + * if IC_EMPTYFIFO_HOLD_MASTER_EN is set, which cannot be + * detected from the registers, the controller can be + * disabled if the STOP bit is set. But it is only set + * after receiving block data response length in + * I2C_FUNC_SMBUS_BLOCK_DATA case. That needs to read + * another byte with STOP bit set when the block data + * response length is invalid to complete the transaction. + */ + if (!tmp || tmp > I2C_SMBUS_BLOCK_MAX) + tmp = 1; + len = i2c_dw_recv_len(dev, tmp); } *buf++ = tmp; diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig index 267553386c710191abbebe520843025209e09672..2ed303aa7de3cb79d40e7e75c882a6b5c0c84a28 100644 --- a/drivers/iio/Kconfig +++ b/drivers/iio/Kconfig @@ -70,6 +70,7 @@ config IIO_TRIGGERED_EVENT source "drivers/iio/accel/Kconfig" source "drivers/iio/adc/Kconfig" +source "drivers/iio/addac/Kconfig" source "drivers/iio/afe/Kconfig" source "drivers/iio/amplifiers/Kconfig" source "drivers/iio/chemical/Kconfig" diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile index 1712011c0f4a1696e5a455fb2a6d45945927d156..d6690e449ccec539e861d3da542c26fbb292eea0 100644 --- a/drivers/iio/Makefile +++ b/drivers/iio/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_IIO_TRIGGERED_EVENT) += industrialio-triggered-event.o obj-y += accel/ obj-y += adc/ +obj-y += addac/ obj-y += afe/ obj-y += amplifiers/ obj-y += buffer/ diff --git a/drivers/iio/adc/stx104.c b/drivers/iio/adc/stx104.c index 55bd2dc514e9367a57c441ecc7e8f4bafffc5340..b658a75d4e3a86d3466e7ea4561d98153f6a4b24 100644 --- a/drivers/iio/adc/stx104.c +++ b/drivers/iio/adc/stx104.c @@ -15,7 +15,9 @@ #include #include #include +#include #include +#include #define STX104_OUT_CHAN(chan) { \ .type = IIO_VOLTAGE, \ @@ -44,14 +46,38 @@ static unsigned int num_stx104; module_param_hw_array(base, uint, ioport, &num_stx104, 0); MODULE_PARM_DESC(base, "Apex Embedded Systems STX104 base addresses"); +/** + * struct stx104_reg - device register structure + * @ssr_ad: Software Strobe Register and ADC Data + * @achan: ADC Channel + * @dio: Digital I/O + * @dac: DAC Channels + * @cir_asr: Clear Interrupts and ADC Status + * @acr: ADC Control + * @pccr_fsh: Pacer Clock Control and FIFO Status MSB + * @acfg: ADC Configuration + */ +struct stx104_reg { + u16 ssr_ad; + u8 achan; + u8 dio; + u16 dac[2]; + u8 cir_asr; + u8 acr; + u8 pccr_fsh; + u8 acfg; +}; + /** * struct stx104_iio - IIO device private data structure + * @lock: synchronization lock to prevent I/O race conditions * @chan_out_states: channels' output states - * @base: base port address of the IIO device + * @reg: I/O address offset for the device registers */ struct stx104_iio { + struct mutex lock; unsigned int chan_out_states[STX104_NUM_OUT_CHAN]; - unsigned int base; + struct stx104_reg __iomem *reg; }; /** @@ -64,7 +90,7 @@ struct stx104_iio { struct stx104_gpio { struct gpio_chip chip; spinlock_t lock; - unsigned int base; + u8 __iomem *base; unsigned int out_state; }; @@ -72,6 +98,7 @@ static int stx104_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) { struct stx104_iio *const priv = iio_priv(indio_dev); + struct stx104_reg __iomem *const reg = priv->reg; unsigned int adc_config; int adbu; int gain; @@ -79,7 +106,7 @@ static int stx104_read_raw(struct iio_dev *indio_dev, switch (mask) { case IIO_CHAN_INFO_HARDWAREGAIN: /* get gain configuration */ - adc_config = inb(priv->base + 11); + adc_config = ioread8(®->acfg); gain = adc_config & 0x3; *val = 1 << gain; @@ -90,25 +117,31 @@ static int stx104_read_raw(struct iio_dev *indio_dev, return IIO_VAL_INT; } + mutex_lock(&priv->lock); + /* select ADC channel */ - outb(chan->channel | (chan->channel << 4), priv->base + 2); + iowrite8(chan->channel | (chan->channel << 4), ®->achan); + + /* trigger ADC sample capture by writing to the 8-bit + * Software Strobe Register and wait for completion + */ + iowrite8(0, ®->ssr_ad); + while (ioread8(®->cir_asr) & BIT(7)); - /* trigger ADC sample capture and wait for completion */ - outb(0, priv->base); - while (inb(priv->base + 8) & BIT(7)); + *val = ioread16(®->ssr_ad); - *val = inw(priv->base); + mutex_unlock(&priv->lock); return IIO_VAL_INT; case IIO_CHAN_INFO_OFFSET: /* get ADC bipolar/unipolar configuration */ - adc_config = inb(priv->base + 11); + adc_config = ioread8(®->acfg); adbu = !(adc_config & BIT(2)); *val = -32768 * adbu; return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: /* get ADC bipolar/unipolar and gain configuration */ - adc_config = inb(priv->base + 11); + adc_config = ioread8(®->acfg); adbu = !(adc_config & BIT(2)); gain = adc_config & 0x3; @@ -130,16 +163,16 @@ static int stx104_write_raw(struct iio_dev *indio_dev, /* Only four gain states (x1, x2, x4, x8) */ switch (val) { case 1: - outb(0, priv->base + 11); + iowrite8(0, &priv->reg->acfg); break; case 2: - outb(1, priv->base + 11); + iowrite8(1, &priv->reg->acfg); break; case 4: - outb(2, priv->base + 11); + iowrite8(2, &priv->reg->acfg); break; case 8: - outb(3, priv->base + 11); + iowrite8(3, &priv->reg->acfg); break; default: return -EINVAL; @@ -152,9 +185,12 @@ static int stx104_write_raw(struct iio_dev *indio_dev, if ((unsigned int)val > 65535) return -EINVAL; + mutex_lock(&priv->lock); + priv->chan_out_states[chan->channel] = val; - outw(val, priv->base + 4 + 2 * chan->channel); + iowrite16(val, &priv->reg->dac[chan->channel]); + mutex_unlock(&priv->lock); return 0; } return -EINVAL; @@ -222,7 +258,7 @@ static int stx104_gpio_get(struct gpio_chip *chip, unsigned int offset) if (offset >= 4) return -EINVAL; - return !!(inb(stx104gpio->base) & BIT(offset)); + return !!(ioread8(stx104gpio->base) & BIT(offset)); } static int stx104_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask, @@ -230,7 +266,7 @@ static int stx104_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask, { struct stx104_gpio *const stx104gpio = gpiochip_get_data(chip); - *bits = inb(stx104gpio->base); + *bits = ioread8(stx104gpio->base); return 0; } @@ -252,7 +288,7 @@ static void stx104_gpio_set(struct gpio_chip *chip, unsigned int offset, else stx104gpio->out_state &= ~mask; - outb(stx104gpio->out_state, stx104gpio->base); + iowrite8(stx104gpio->out_state, stx104gpio->base); spin_unlock_irqrestore(&stx104gpio->lock, flags); } @@ -279,7 +315,7 @@ static void stx104_gpio_set_multiple(struct gpio_chip *chip, stx104gpio->out_state &= ~*mask; stx104gpio->out_state |= *mask & *bits; - outb(stx104gpio->out_state, stx104gpio->base); + iowrite8(stx104gpio->out_state, stx104gpio->base); spin_unlock_irqrestore(&stx104gpio->lock, flags); } @@ -306,11 +342,16 @@ static int stx104_probe(struct device *dev, unsigned int id) return -EBUSY; } + priv = iio_priv(indio_dev); + priv->reg = devm_ioport_map(dev, base[id], STX104_EXTENT); + if (!priv->reg) + return -ENOMEM; + indio_dev->info = &stx104_info; indio_dev->modes = INDIO_DIRECT_MODE; /* determine if differential inputs */ - if (inb(base[id] + 8) & BIT(5)) { + if (ioread8(&priv->reg->cir_asr) & BIT(5)) { indio_dev->num_channels = ARRAY_SIZE(stx104_channels_diff); indio_dev->channels = stx104_channels_diff; } else { @@ -320,18 +361,17 @@ static int stx104_probe(struct device *dev, unsigned int id) indio_dev->name = dev_name(dev); - priv = iio_priv(indio_dev); - priv->base = base[id]; + mutex_init(&priv->lock); /* configure device for software trigger operation */ - outb(0, base[id] + 9); + iowrite8(0, &priv->reg->acr); /* initialize gain setting to x1 */ - outb(0, base[id] + 11); + iowrite8(0, &priv->reg->acfg); /* initialize DAC output to 0V */ - outw(0, base[id] + 4); - outw(0, base[id] + 6); + iowrite16(0, &priv->reg->dac[0]); + iowrite16(0, &priv->reg->dac[1]); stx104gpio->chip.label = dev_name(dev); stx104gpio->chip.parent = dev; @@ -346,7 +386,7 @@ static int stx104_probe(struct device *dev, unsigned int id) stx104gpio->chip.get_multiple = stx104_gpio_get_multiple; stx104gpio->chip.set = stx104_gpio_set; stx104gpio->chip.set_multiple = stx104_gpio_set_multiple; - stx104gpio->base = base[id] + 3; + stx104gpio->base = &priv->reg->dio; stx104gpio->out_state = 0x0; spin_lock_init(&stx104gpio->lock); diff --git a/drivers/iio/addac/Kconfig b/drivers/iio/addac/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..2e64d7755d5ea3b99305226b692bdbb7cfb3450a --- /dev/null +++ b/drivers/iio/addac/Kconfig @@ -0,0 +1,8 @@ +# +# ADC DAC drivers +# +# When adding new entries keep the list in alphabetical order + +menu "Analog to digital and digital to analog converters" + +endmenu diff --git a/drivers/iio/addac/Makefile b/drivers/iio/addac/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..b888b9ee12da0c03fe1026f2ba1609b32f0f19b7 --- /dev/null +++ b/drivers/iio/addac/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for industrial I/O ADDAC drivers +# + +# When adding new entries keep the list in alphabetical order diff --git a/drivers/infiniband/hw/mlx5/qpc.c b/drivers/infiniband/hw/mlx5/qpc.c index c683d7000168dbce0d36c8aadb806a58d3c8e28d..9a306da7f9496a43f333fbc1763a3e0ee5bd3ab5 100644 --- a/drivers/infiniband/hw/mlx5/qpc.c +++ b/drivers/infiniband/hw/mlx5/qpc.c @@ -297,8 +297,7 @@ int mlx5_core_destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp) MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP); MLX5_SET(destroy_qp_in, in, qpn, qp->qpn); MLX5_SET(destroy_qp_in, in, uid, qp->uid); - mlx5_cmd_exec_in(dev->mdev, destroy_qp, in); - return 0; + return mlx5_cmd_exec_in(dev->mdev, destroy_qp, in); } int mlx5_core_set_delay_drop(struct mlx5_ib_dev *dev, @@ -542,14 +541,14 @@ int mlx5_core_xrcd_dealloc(struct mlx5_ib_dev *dev, u32 xrcdn) return mlx5_cmd_exec_in(dev->mdev, dealloc_xrcd, in); } -static void destroy_rq_tracked(struct mlx5_ib_dev *dev, u32 rqn, u16 uid) +static int destroy_rq_tracked(struct mlx5_ib_dev *dev, u32 rqn, u16 uid) { u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {}; MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ); MLX5_SET(destroy_rq_in, in, rqn, rqn); MLX5_SET(destroy_rq_in, in, uid, uid); - mlx5_cmd_exec_in(dev->mdev, destroy_rq, in); + return mlx5_cmd_exec_in(dev->mdev, destroy_rq, in); } int mlx5_core_create_rq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen, @@ -580,8 +579,7 @@ int mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev, struct mlx5_core_qp *rq) { destroy_resource_common(dev, rq); - destroy_rq_tracked(dev, rq->qpn, rq->uid); - return 0; + return destroy_rq_tracked(dev, rq->qpn, rq->uid); } static void destroy_sq_tracked(struct mlx5_ib_dev *dev, u32 sqn, u16 uid) diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index 8ada91bdbe4d014c7f2108e3509f510d18826332..fc25b900cef71dd3a8dc2ffe87c6a2e2faf97916 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -48,7 +48,7 @@ void __iomem *mips_gic_base; static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks); -static DEFINE_SPINLOCK(gic_lock); +static DEFINE_RAW_SPINLOCK(gic_lock); static struct irq_domain *gic_irq_domain; static int gic_shared_intrs; static unsigned int gic_cpu_pin; @@ -209,7 +209,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) irq = GIC_HWIRQ_TO_SHARED(d->hwirq); - spin_lock_irqsave(&gic_lock, flags); + raw_spin_lock_irqsave(&gic_lock, flags); switch (type & IRQ_TYPE_SENSE_MASK) { case IRQ_TYPE_EDGE_FALLING: pol = GIC_POL_FALLING_EDGE; @@ -249,7 +249,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) else irq_set_chip_handler_name_locked(d, &gic_level_irq_controller, handle_level_irq, NULL); - spin_unlock_irqrestore(&gic_lock, flags); + raw_spin_unlock_irqrestore(&gic_lock, flags); return 0; } @@ -267,7 +267,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, return -EINVAL; /* Assumption : cpumask refers to a single CPU */ - spin_lock_irqsave(&gic_lock, flags); + raw_spin_lock_irqsave(&gic_lock, flags); /* Re-route this IRQ */ write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu))); @@ -278,7 +278,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, set_bit(irq, per_cpu_ptr(pcpu_masks, cpu)); irq_data_update_effective_affinity(d, cpumask_of(cpu)); - spin_unlock_irqrestore(&gic_lock, flags); + raw_spin_unlock_irqrestore(&gic_lock, flags); return IRQ_SET_MASK_OK; } @@ -356,12 +356,12 @@ static void gic_mask_local_irq_all_vpes(struct irq_data *d) cd = irq_data_get_irq_chip_data(d); cd->mask = false; - spin_lock_irqsave(&gic_lock, flags); + raw_spin_lock_irqsave(&gic_lock, flags); for_each_online_cpu(cpu) { write_gic_vl_other(mips_cm_vp_id(cpu)); write_gic_vo_rmask(BIT(intr)); } - spin_unlock_irqrestore(&gic_lock, flags); + raw_spin_unlock_irqrestore(&gic_lock, flags); } static void gic_unmask_local_irq_all_vpes(struct irq_data *d) @@ -374,32 +374,43 @@ static void gic_unmask_local_irq_all_vpes(struct irq_data *d) cd = irq_data_get_irq_chip_data(d); cd->mask = true; - spin_lock_irqsave(&gic_lock, flags); + raw_spin_lock_irqsave(&gic_lock, flags); for_each_online_cpu(cpu) { write_gic_vl_other(mips_cm_vp_id(cpu)); write_gic_vo_smask(BIT(intr)); } - spin_unlock_irqrestore(&gic_lock, flags); + raw_spin_unlock_irqrestore(&gic_lock, flags); } -static void gic_all_vpes_irq_cpu_online(struct irq_data *d) +static void gic_all_vpes_irq_cpu_online(void) { - struct gic_all_vpes_chip_data *cd; - unsigned int intr; + static const unsigned int local_intrs[] = { + GIC_LOCAL_INT_TIMER, + GIC_LOCAL_INT_PERFCTR, + GIC_LOCAL_INT_FDC, + }; + unsigned long flags; + int i; - intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); - cd = irq_data_get_irq_chip_data(d); + raw_spin_lock_irqsave(&gic_lock, flags); - write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map); - if (cd->mask) - write_gic_vl_smask(BIT(intr)); + for (i = 0; i < ARRAY_SIZE(local_intrs); i++) { + unsigned int intr = local_intrs[i]; + struct gic_all_vpes_chip_data *cd; + + cd = &gic_all_vpes_chip_data[intr]; + write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map); + if (cd->mask) + write_gic_vl_smask(BIT(intr)); + } + + raw_spin_unlock_irqrestore(&gic_lock, flags); } static struct irq_chip gic_all_vpes_local_irq_controller = { .name = "MIPS GIC Local", .irq_mask = gic_mask_local_irq_all_vpes, .irq_unmask = gic_unmask_local_irq_all_vpes, - .irq_cpu_online = gic_all_vpes_irq_cpu_online, }; static void __gic_irq_dispatch(void) @@ -423,11 +434,11 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq, data = irq_get_irq_data(virq); - spin_lock_irqsave(&gic_lock, flags); + raw_spin_lock_irqsave(&gic_lock, flags); write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu))); irq_data_update_effective_affinity(data, cpumask_of(cpu)); - spin_unlock_irqrestore(&gic_lock, flags); + raw_spin_unlock_irqrestore(&gic_lock, flags); return 0; } @@ -480,6 +491,10 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, intr = GIC_HWIRQ_TO_LOCAL(hwirq); map = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin; + /* + * If adding support for more per-cpu interrupts, keep the the + * array in gic_all_vpes_irq_cpu_online() in sync. + */ switch (intr) { case GIC_LOCAL_INT_TIMER: /* CONFIG_MIPS_CMP workaround (see __gic_init) */ @@ -518,12 +533,12 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, if (!gic_local_irq_is_routable(intr)) return -EPERM; - spin_lock_irqsave(&gic_lock, flags); + raw_spin_lock_irqsave(&gic_lock, flags); for_each_online_cpu(cpu) { write_gic_vl_other(mips_cm_vp_id(cpu)); write_gic_vo_map(mips_gic_vx_map_reg(intr), map); } - spin_unlock_irqrestore(&gic_lock, flags); + raw_spin_unlock_irqrestore(&gic_lock, flags); return 0; } @@ -710,8 +725,8 @@ static int gic_cpu_startup(unsigned int cpu) /* Clear all local IRQ masks (ie. disable all local interrupts) */ write_gic_vl_rmask(~0); - /* Invoke irq_cpu_online callbacks to enable desired interrupts */ - irq_cpu_online(); + /* Enable desired interrupts */ + gic_all_vpes_irq_cpu_online(); return 0; } diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 7599a122c956306864ede2b436655d555438ef2d..1667ac14060982fa667697724d25cde2d9e5eecb 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -31,11 +31,11 @@ #define DEFAULT_BUFFER_SECTORS 128 #define DEFAULT_JOURNAL_WATERMARK 50 #define DEFAULT_SYNC_MSEC 10000 -#define DEFAULT_MAX_JOURNAL_SECTORS 131072 +#define DEFAULT_MAX_JOURNAL_SECTORS (IS_ENABLED(CONFIG_64BIT) ? 131072 : 8192) #define MIN_LOG2_INTERLEAVE_SECTORS 3 #define MAX_LOG2_INTERLEAVE_SECTORS 31 #define METADATA_WORKQUEUE_MAX_ACTIVE 16 -#define RECALC_SECTORS 8192 +#define RECALC_SECTORS (IS_ENABLED(CONFIG_64BIT) ? 32768 : 2048) #define RECALC_WRITE_SUPER 16 #define BITMAP_BLOCK_SIZE 4096 /* don't change it */ #define BITMAP_FLUSH_INTERVAL (10 * HZ) diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c index 21de1431cfcb5091d82ab7be9f55ed50c6388ed8..2c2be43a3e9ea2685f2c4a4b9b83b6e207ad79af 100644 --- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c +++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c @@ -729,6 +729,8 @@ static int vb2ops_venc_queue_setup(struct vb2_queue *vq, return -EINVAL; if (*nplanes) { + if (*nplanes != q_data->fmt->num_planes) + return -EINVAL; for (i = 0; i < *nplanes; i++) if (sizes[i] < q_data->sizeimage[i]) return -EINVAL; diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c index c62eb212cca92d7fed0ccd30e5b986c891bd9d03..e7c4b0dd588a9eb762ee287e5f2252af37a0a04f 100644 --- a/drivers/media/platform/mtk-vpu/mtk_vpu.c +++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c @@ -539,15 +539,17 @@ static int load_requested_vpu(struct mtk_vpu *vpu, int vpu_load_firmware(struct platform_device *pdev) { struct mtk_vpu *vpu; - struct device *dev = &pdev->dev; + struct device *dev; struct vpu_run *run; int ret; if (!pdev) { - dev_err(dev, "VPU platform device is invalid\n"); + pr_err("VPU platform device is invalid\n"); return -EINVAL; } + dev = &pdev->dev; + vpu = platform_get_drvdata(pdev); run = &vpu->run; diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 599b7317b59a50cb077f3a2de93a27aef8ef9935..d81baf750aebbac9f9e53593f3ada2881418589d 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -1980,14 +1980,14 @@ static void mmc_blk_mq_poll_completion(struct mmc_queue *mq, mmc_blk_urgent_bkops(mq, mqrq); } -static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req) +static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, enum mmc_issue_type issue_type) { unsigned long flags; bool put_card; spin_lock_irqsave(&mq->lock, flags); - mq->in_flight[mmc_issue_type(mq, req)] -= 1; + mq->in_flight[issue_type] -= 1; put_card = (mmc_tot_in_flight(mq) == 0); @@ -1999,6 +1999,7 @@ static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req) static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req) { + enum mmc_issue_type issue_type = mmc_issue_type(mq, req); struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); struct mmc_request *mrq = &mqrq->brq.mrq; struct mmc_host *host = mq->card->host; @@ -2014,7 +2015,7 @@ static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req) else if (likely(!blk_should_fake_timeout(req->q))) blk_mq_complete_request(req); - mmc_blk_mq_dec_in_flight(mq, req); + mmc_blk_mq_dec_in_flight(mq, issue_type); } void mmc_blk_mq_recovery(struct mmc_queue *mq) diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 03e2f965a96a89aa6e4d5667221c68a1e8f74db1..1f46694b2e531d33cd138040c374fe0899d56331 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -513,6 +513,32 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) EXPORT_SYMBOL(mmc_alloc_host); +static void devm_mmc_host_release(struct device *dev, void *res) +{ + mmc_free_host(*(struct mmc_host **)res); +} + +struct mmc_host *devm_mmc_alloc_host(struct device *dev, int extra) +{ + struct mmc_host **dr, *host; + + dr = devres_alloc(devm_mmc_host_release, sizeof(*dr), GFP_KERNEL); + if (!dr) + return ERR_PTR(-ENOMEM); + + host = mmc_alloc_host(extra, dev); + if (IS_ERR(host)) { + devres_free(dr); + return host; + } + + *dr = host; + devres_add(dev, dr); + + return host; +} +EXPORT_SYMBOL(devm_mmc_alloc_host); + static int mmc_validate_host_caps(struct mmc_host *host) { if (host->caps & MMC_CAP_SDIO_IRQ && !host->ops->enable_sdio_irq) { diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c index 8c2361e662774ad45bca7dc68d7ee8d64a1dc36b..985079943be76497560954207378e44ea826d7f6 100644 --- a/drivers/mmc/host/bcm2835.c +++ b/drivers/mmc/host/bcm2835.c @@ -1413,8 +1413,8 @@ static int bcm2835_probe(struct platform_device *pdev) host->max_clk = clk_get_rate(clk); host->irq = platform_get_irq(pdev, 0); - if (host->irq <= 0) { - ret = -EINVAL; + if (host->irq < 0) { + ret = host->irq; goto err; } diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index e89bd6f4b317c3099322eba01652ef532c9e2fa0..1992eea8b777efc012d7879b3f3cd29cd1c446eb 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -1122,7 +1122,7 @@ static int meson_mmc_probe(struct platform_device *pdev) struct mmc_host *mmc; int ret; - mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev); + mmc = devm_mmc_alloc_host(&pdev->dev, sizeof(struct meson_host)); if (!mmc) return -ENOMEM; host = mmc_priv(mmc); @@ -1138,46 +1138,33 @@ static int meson_mmc_probe(struct platform_device *pdev) host->vqmmc_enabled = false; ret = mmc_regulator_get_supply(mmc); if (ret) - goto free_host; + return ret; ret = mmc_of_parse(mmc); - if (ret) { - if (ret != -EPROBE_DEFER) - dev_warn(&pdev->dev, "error parsing DT: %d\n", ret); - goto free_host; - } + if (ret) + return dev_err_probe(&pdev->dev, ret, "error parsing DT\n"); host->data = (struct meson_mmc_data *) of_device_get_match_data(&pdev->dev); - if (!host->data) { - ret = -EINVAL; - goto free_host; - } + if (!host->data) + return -EINVAL; ret = device_reset_optional(&pdev->dev); - if (ret) { - dev_err_probe(&pdev->dev, ret, "device reset failed\n"); - goto free_host; - } + if (ret) + return dev_err_probe(&pdev->dev, ret, "device reset failed\n"); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); host->regs = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(host->regs)) { - ret = PTR_ERR(host->regs); - goto free_host; - } + if (IS_ERR(host->regs)) + return PTR_ERR(host->regs); host->irq = platform_get_irq(pdev, 0); - if (host->irq <= 0) { - ret = -EINVAL; - goto free_host; - } + if (host->irq < 0) + return host->irq; host->pinctrl = devm_pinctrl_get(&pdev->dev); - if (IS_ERR(host->pinctrl)) { - ret = PTR_ERR(host->pinctrl); - goto free_host; - } + if (IS_ERR(host->pinctrl)) + return PTR_ERR(host->pinctrl); host->pins_clk_gate = pinctrl_lookup_state(host->pinctrl, "clk-gate"); @@ -1188,14 +1175,12 @@ static int meson_mmc_probe(struct platform_device *pdev) } host->core_clk = devm_clk_get(&pdev->dev, "core"); - if (IS_ERR(host->core_clk)) { - ret = PTR_ERR(host->core_clk); - goto free_host; - } + if (IS_ERR(host->core_clk)) + return PTR_ERR(host->core_clk); ret = clk_prepare_enable(host->core_clk); if (ret) - goto free_host; + return ret; ret = meson_mmc_clk_init(host); if (ret) @@ -1290,8 +1275,6 @@ static int meson_mmc_probe(struct platform_device *pdev) clk_disable_unprepare(host->mmc_clk); err_core_clk: clk_disable_unprepare(host->core_clk); -free_host: - mmc_free_host(mmc); return ret; } @@ -1315,7 +1298,6 @@ static int meson_mmc_remove(struct platform_device *pdev) clk_disable_unprepare(host->mmc_clk); clk_disable_unprepare(host->core_clk); - mmc_free_host(host->mmc); return 0; } diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c index 6c4f43e112826d6123bc58e85fa3285f5e407e26..7ede74bf37230c58f6a16e04c713f1b194d5c384 100644 --- a/drivers/mmc/host/sdhci_f_sdh30.c +++ b/drivers/mmc/host/sdhci_f_sdh30.c @@ -26,9 +26,16 @@ struct f_sdhost_priv { bool enable_cmd_dat_delay; }; +static void *sdhci_f_sdhost_priv(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + + return sdhci_pltfm_priv(pltfm_host); +} + static void sdhci_f_sdh30_soft_voltage_switch(struct sdhci_host *host) { - struct f_sdhost_priv *priv = sdhci_priv(host); + struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host); u32 ctrl = 0; usleep_range(2500, 3000); @@ -61,7 +68,7 @@ static unsigned int sdhci_f_sdh30_get_min_clock(struct sdhci_host *host) static void sdhci_f_sdh30_reset(struct sdhci_host *host, u8 mask) { - struct f_sdhost_priv *priv = sdhci_priv(host); + struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host); u32 ctl; if (sdhci_readw(host, SDHCI_CLOCK_CONTROL) == 0) @@ -85,30 +92,32 @@ static const struct sdhci_ops sdhci_f_sdh30_ops = { .set_uhs_signaling = sdhci_set_uhs_signaling, }; +static const struct sdhci_pltfm_data sdhci_f_sdh30_pltfm_data = { + .ops = &sdhci_f_sdh30_ops, + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC + | SDHCI_QUIRK_INVERTED_WRITE_PROTECT, + .quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE + | SDHCI_QUIRK2_TUNING_WORK_AROUND, +}; + static int sdhci_f_sdh30_probe(struct platform_device *pdev) { struct sdhci_host *host; struct device *dev = &pdev->dev; - int irq, ctrl = 0, ret = 0; + int ctrl = 0, ret = 0; struct f_sdhost_priv *priv; + struct sdhci_pltfm_host *pltfm_host; u32 reg = 0; - irq = platform_get_irq(pdev, 0); - if (irq < 0) - return irq; - - host = sdhci_alloc_host(dev, sizeof(struct f_sdhost_priv)); + host = sdhci_pltfm_init(pdev, &sdhci_f_sdh30_pltfm_data, + sizeof(struct f_sdhost_priv)); if (IS_ERR(host)) return PTR_ERR(host); - priv = sdhci_priv(host); + pltfm_host = sdhci_priv(host); + priv = sdhci_pltfm_priv(pltfm_host); priv->dev = dev; - host->quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | - SDHCI_QUIRK_INVERTED_WRITE_PROTECT; - host->quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE | - SDHCI_QUIRK2_TUNING_WORK_AROUND; - priv->enable_cmd_dat_delay = device_property_read_bool(dev, "fujitsu,cmd-dat-delay-select"); @@ -116,18 +125,6 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev) if (ret) goto err; - platform_set_drvdata(pdev, host); - - host->hw_name = "f_sdh30"; - host->ops = &sdhci_f_sdh30_ops; - host->irq = irq; - - host->ioaddr = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(host->ioaddr)) { - ret = PTR_ERR(host->ioaddr); - goto err; - } - if (dev_of_node(dev)) { sdhci_get_of_property(pdev); @@ -182,23 +179,22 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev) err_clk: clk_disable_unprepare(priv->clk_iface); err: - sdhci_free_host(host); + sdhci_pltfm_free(pdev); + return ret; } static int sdhci_f_sdh30_remove(struct platform_device *pdev) { struct sdhci_host *host = platform_get_drvdata(pdev); - struct f_sdhost_priv *priv = sdhci_priv(host); - - sdhci_remove_host(host, readl(host->ioaddr + SDHCI_INT_STATUS) == - 0xffffffff); + struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host); + struct clk *clk_iface = priv->clk_iface; + struct clk *clk = priv->clk; - clk_disable_unprepare(priv->clk_iface); - clk_disable_unprepare(priv->clk); + sdhci_pltfm_unregister(pdev); - sdhci_free_host(host); - platform_set_drvdata(pdev, NULL); + clk_disable_unprepare(clk_iface); + clk_disable_unprepare(clk); return 0; } diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index 9215069c61560abde3f2b388dff2849e4071c7de..b834fde3f9eda77b6b5c04f995b823d71ef0fd66 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c @@ -1317,8 +1317,8 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, return ret; host->irq = platform_get_irq(pdev, 0); - if (host->irq <= 0) { - ret = -EINVAL; + if (host->irq < 0) { + ret = host->irq; goto error_disable_mmc; } diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c index f3090216e0dcc8c471589860453259971e8bb7da..6db08070b628d9354b3675e89c5e6270c20a9d8e 100644 --- a/drivers/mmc/host/wbsd.c +++ b/drivers/mmc/host/wbsd.c @@ -1710,8 +1710,6 @@ static int wbsd_init(struct device *dev, int base, int irq, int dma, wbsd_release_resources(host); wbsd_free_mmc(dev); - - mmc_free_host(mmc); return ret; } diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 152f76f869278df8bd53c6683515bdeccb5844e5..64ba465741a7896ef77827ff6a930671e0183977 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -656,10 +656,10 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond) return NULL; arp = (struct arp_pkt *)skb_network_header(skb); - /* Don't modify or load balance ARPs that do not originate locally - * (e.g.,arrive via a bridge). + /* Don't modify or load balance ARPs that do not originate + * from the bond itself or a VLAN directly above the bond. */ - if (!bond_slave_has_mac_rx(bond, arp->mac_src)) + if (!bond_slave_has_mac_rcu(bond, arp->mac_src)) return NULL; if (arp->op_code == htons(ARPOP_REPLY)) { diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c index 282c53ef76d233535938091043c3d52072c36e9b..1bfede407270d369348d508314c71e043149bada 100644 --- a/drivers/net/can/vxcan.c +++ b/drivers/net/can/vxcan.c @@ -179,12 +179,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev, nla_peer = data[VXCAN_INFO_PEER]; ifmp = nla_data(nla_peer); - err = rtnl_nla_parse_ifla(peer_tb, - nla_data(nla_peer) + - sizeof(struct ifinfomsg), - nla_len(nla_peer) - - sizeof(struct ifinfomsg), - NULL); + err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack); if (err < 0) return err; diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 8474cf8da86002f5edcb04b50404a15e381b5fde..7f8c55cee982821ead2213382d82cda2aa1f287d 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2310,6 +2310,14 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip) /* If there is a GPIO connected to the reset pin, toggle it */ if (gpiod) { + /* If the switch has just been reset and not yet completed + * loading EEPROM, the reset may interrupt the I2C transaction + * mid-byte, causing the first EEPROM read after the reset + * from the wrong location resulting in the switch booting + * to wrong mode and inoperable. + */ + mv88e6xxx_g1_wait_eeprom_done(chip); + gpiod_set_value_cansleep(gpiod, 1); usleep_range(10000, 20000); gpiod_set_value_cansleep(gpiod, 0); diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index ab8ee933163543030575e7de431fd74c5cf71624..a4f6143e66fe9adce442582b19947ecd9a1d7025 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1448,7 +1448,7 @@ int bgmac_phy_connect_direct(struct bgmac *bgmac) int err; phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); - if (!phy_dev || IS_ERR(phy_dev)) { + if (IS_ERR(phy_dev)) { dev_err(bgmac->dev, "Failed to register fixed PHY device\n"); return -ENODEV; } diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 99aba64f03c2f3711a33a7c59ca948a3892960b6..2b0538f2af6399825d135c68e629fb2b4b748fe8 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -568,7 +568,7 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv) }; phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); - if (!phydev || IS_ERR(phydev)) { + if (IS_ERR(phydev)) { dev_err(kdev, "failed to register fixed PHY device\n"); return -ENODEV; } diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index c3ec9ceed833ed4e6d128c86bbf09aa476de91b5..d80f155574c6767f55b5b34ecff50312d2962699 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -196,7 +196,7 @@ static inline void ibmveth_flush_buffer(void *addr, unsigned long length) unsigned long offset; for (offset = 0; offset < length; offset += SMP_CACHE_BYTES) - asm("dcbfl %0,%1" :: "b" (addr), "r" (offset)); + asm("dcbf %0,%1,1" :: "b" (addr), "r" (offset)); } /* replenish the buffers for a pool. note that we don't need to diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 7164f4ad8120293f8d7b820e23edd34914894459..6b1996451a4bd6785f037c024e931faee7e0be8d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -210,11 +210,11 @@ static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset, * @hw: pointer to the HW structure. * @module_pointer: module pointer location in words from the NVM beginning * @offset: offset in words from module start - * @words: number of words to write - * @data: buffer with words to write to the Shadow RAM + * @words: number of words to read + * @data: buffer with words to read to the Shadow RAM * @last_command: tells the AdminQ that this is the last command * - * Writes a 16 bit words buffer to the Shadow RAM using the admin command. + * Reads a 16 bit words buffer to the Shadow RAM using the admin command. **/ static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer, u32 offset, @@ -234,18 +234,18 @@ static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, */ if ((offset + words) > hw->nvm.sr_size) i40e_debug(hw, I40E_DEBUG_NVM, - "NVM write error: offset %d beyond Shadow RAM limit %d\n", + "NVM read error: offset %d beyond Shadow RAM limit %d\n", (offset + words), hw->nvm.sr_size); else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) - /* We can write only up to 4KB (one sector), in one AQ write */ + /* We can read only up to 4KB (one sector), in one AQ write */ i40e_debug(hw, I40E_DEBUG_NVM, - "NVM write fail error: tried to write %d words, limit is %d.\n", + "NVM read fail error: tried to read %d words, limit is %d.\n", words, I40E_SR_SECTOR_SIZE_IN_WORDS); else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) - /* A single write cannot spread over two sectors */ + /* A single read cannot spread over two sectors */ i40e_debug(hw, I40E_DEBUG_NVM, - "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n", + "NVM read error: cannot spread over two sectors in a single read offset=%d words=%d\n", offset, words); else ret_code = i40e_aq_read_nvm(hw, module_pointer, diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 1929847b8c404d19ec57207b16d5cb2665056327..59df4c9bd8f909ae16ad9ed1b6e518c9203e8edf 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -353,7 +353,8 @@ int ice_setup_rx_ctx(struct ice_ring *ring) /* Receive Packet Data Buffer Size. * The Packet Data Buffer Size is defined in 128 byte units. */ - rlan_ctx.dbuf = ring->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; + rlan_ctx.dbuf = DIV_ROUND_UP(ring->rx_buf_len, + BIT_ULL(ICE_RLAN_CTX_DBUF_S)); /* use 32 byte descriptors */ rlan_ctx.dsize = 1; diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 86a576201f5ff9e4fa3bf0252ea9852ffb8ca0bd..0dbbb32905fa4e53adbbf730c18ef7cf57d9c4d0 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -1262,18 +1262,6 @@ void igb_ptp_init(struct igb_adapter *adapter) return; } - spin_lock_init(&adapter->tmreg_lock); - INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work); - - if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK) - INIT_DELAYED_WORK(&adapter->ptp_overflow_work, - igb_ptp_overflow_check); - - adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; - adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; - - igb_ptp_reset(adapter); - adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, &adapter->pdev->dev); if (IS_ERR(adapter->ptp_clock)) { @@ -1283,6 +1271,18 @@ void igb_ptp_init(struct igb_adapter *adapter) dev_info(&adapter->pdev->dev, "added PHC on %s\n", adapter->netdev->name); adapter->ptp_flags |= IGB_PTP_ENABLED; + + spin_lock_init(&adapter->tmreg_lock); + INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work); + + if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK) + INIT_DELAYED_WORK(&adapter->ptp_overflow_work, + igb_ptp_overflow_check); + + adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + + igb_ptp_reset(adapter); } } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 449f5224d1aeb6baa57d5759f4b04bd10570e518..e549b09c347a769400f987aeb3f96d8e6d21de83 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -2876,9 +2876,10 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, if (link < 0) return NIX_AF_ERR_RX_LINK_INVALID; - nix_find_link_frs(rvu, req, pcifunc); linkcfg: + nix_find_link_frs(rvu, req, pcifunc); + cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link)); cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16); if (req->update_minlen) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index 44a434b1178b540cd338cdb449c56b1f388ca325..80dee8c692495acd4e87578d6293d000c9629a9c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -89,7 +89,8 @@ static u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev, static u64 read_internal_timer(const struct cyclecounter *cc) { - struct mlx5_clock *clock = container_of(cc, struct mlx5_clock, cycles); + struct mlx5_timer *timer = container_of(cc, struct mlx5_timer, cycles); + struct mlx5_clock *clock = container_of(timer, struct mlx5_clock, timer); struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock); @@ -100,6 +101,7 @@ static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev) { struct mlx5_ib_clock_info *clock_info = mdev->clock_info; struct mlx5_clock *clock = &mdev->clock; + struct mlx5_timer *timer; u32 sign; if (!clock_info) @@ -109,10 +111,11 @@ static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev) smp_store_mb(clock_info->sign, sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING); - clock_info->cycles = clock->tc.cycle_last; - clock_info->mult = clock->cycles.mult; - clock_info->nsec = clock->tc.nsec; - clock_info->frac = clock->tc.frac; + timer = &clock->timer; + clock_info->cycles = timer->tc.cycle_last; + clock_info->mult = timer->cycles.mult; + clock_info->nsec = timer->tc.nsec; + clock_info->frac = timer->tc.frac; smp_store_release(&clock_info->sign, sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2); @@ -151,28 +154,37 @@ static void mlx5_timestamp_overflow(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct mlx5_core_dev *mdev; + struct mlx5_timer *timer; struct mlx5_clock *clock; unsigned long flags; - clock = container_of(dwork, struct mlx5_clock, overflow_work); + timer = container_of(dwork, struct mlx5_timer, overflow_work); + clock = container_of(timer, struct mlx5_clock, timer); mdev = container_of(clock, struct mlx5_core_dev, clock); + + if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) + goto out; + write_seqlock_irqsave(&clock->lock, flags); - timecounter_read(&clock->tc); + timecounter_read(&timer->tc); mlx5_update_clock_info_page(mdev); write_sequnlock_irqrestore(&clock->lock, flags); - schedule_delayed_work(&clock->overflow_work, clock->overflow_period); + +out: + schedule_delayed_work(&timer->overflow_work, timer->overflow_period); } static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts) { struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info); + struct mlx5_timer *timer = &clock->timer; u64 ns = timespec64_to_ns(ts); struct mlx5_core_dev *mdev; unsigned long flags; mdev = container_of(clock, struct mlx5_core_dev, clock); write_seqlock_irqsave(&clock->lock, flags); - timecounter_init(&clock->tc, &clock->cycles, ns); + timecounter_init(&timer->tc, &timer->cycles, ns); mlx5_update_clock_info_page(mdev); write_sequnlock_irqrestore(&clock->lock, flags); @@ -183,6 +195,7 @@ static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, struct ptp_system_timestamp *sts) { struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info); + struct mlx5_timer *timer = &clock->timer; struct mlx5_core_dev *mdev; unsigned long flags; u64 cycles, ns; @@ -190,7 +203,7 @@ static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, mdev = container_of(clock, struct mlx5_core_dev, clock); write_seqlock_irqsave(&clock->lock, flags); cycles = mlx5_read_internal_timer(mdev, sts); - ns = timecounter_cyc2time(&clock->tc, cycles); + ns = timecounter_cyc2time(&timer->tc, cycles); write_sequnlock_irqrestore(&clock->lock, flags); *ts = ns_to_timespec64(ns); @@ -201,12 +214,13 @@ static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) { struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info); + struct mlx5_timer *timer = &clock->timer; struct mlx5_core_dev *mdev; unsigned long flags; mdev = container_of(clock, struct mlx5_core_dev, clock); write_seqlock_irqsave(&clock->lock, flags); - timecounter_adjtime(&clock->tc, delta); + timecounter_adjtime(&timer->tc, delta); mlx5_update_clock_info_page(mdev); write_sequnlock_irqrestore(&clock->lock, flags); @@ -216,27 +230,27 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) { struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info); + struct mlx5_timer *timer = &clock->timer; struct mlx5_core_dev *mdev; unsigned long flags; int neg_adj = 0; u32 diff; u64 adj; - if (delta < 0) { neg_adj = 1; delta = -delta; } - adj = clock->nominal_c_mult; + adj = timer->nominal_c_mult; adj *= delta; diff = div_u64(adj, 1000000000ULL); mdev = container_of(clock, struct mlx5_core_dev, clock); write_seqlock_irqsave(&clock->lock, flags); - timecounter_read(&clock->tc); - clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff : - clock->nominal_c_mult + diff; + timecounter_read(&timer->tc); + timer->cycles.mult = neg_adj ? timer->nominal_c_mult - diff : + timer->nominal_c_mult + diff; mlx5_update_clock_info_page(mdev); write_sequnlock_irqrestore(&clock->lock, flags); @@ -313,6 +327,7 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp, container_of(ptp, struct mlx5_clock, ptp_info); struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock); + struct mlx5_timer *timer = &clock->timer; u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; u64 nsec_now, nsec_delta, time_stamp = 0; u64 cycles_now, cycles_delta; @@ -355,10 +370,10 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp, ns = timespec64_to_ns(&ts); cycles_now = mlx5_read_internal_timer(mdev, NULL); write_seqlock_irqsave(&clock->lock, flags); - nsec_now = timecounter_cyc2time(&clock->tc, cycles_now); + nsec_now = timecounter_cyc2time(&timer->tc, cycles_now); nsec_delta = ns - nsec_now; - cycles_delta = div64_u64(nsec_delta << clock->cycles.shift, - clock->cycles.mult); + cycles_delta = div64_u64(nsec_delta << timer->cycles.shift, + timer->cycles.mult); write_sequnlock_irqrestore(&clock->lock, flags); time_stamp = cycles_now + cycles_delta; field_select = MLX5_MTPPS_FS_PIN_MODE | @@ -541,6 +556,7 @@ static int mlx5_pps_event(struct notifier_block *nb, unsigned long type, void *data) { struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb); + struct mlx5_timer *timer = &clock->timer; struct ptp_clock_event ptp_event; u64 cycles_now, cycles_delta; u64 nsec_now, nsec_delta, ns; @@ -575,10 +591,10 @@ static int mlx5_pps_event(struct notifier_block *nb, ts.tv_nsec = 0; ns = timespec64_to_ns(&ts); write_seqlock_irqsave(&clock->lock, flags); - nsec_now = timecounter_cyc2time(&clock->tc, cycles_now); + nsec_now = timecounter_cyc2time(&timer->tc, cycles_now); nsec_delta = ns - nsec_now; - cycles_delta = div64_u64(nsec_delta << clock->cycles.shift, - clock->cycles.mult); + cycles_delta = div64_u64(nsec_delta << timer->cycles.shift, + timer->cycles.mult); clock->pps_info.start[pin] = cycles_now + cycles_delta; write_sequnlock_irqrestore(&clock->lock, flags); schedule_work(&clock->pps_info.out_work); @@ -591,29 +607,32 @@ static int mlx5_pps_event(struct notifier_block *nb, return NOTIFY_OK; } -void mlx5_init_clock(struct mlx5_core_dev *mdev) +static void mlx5_timecounter_init(struct mlx5_core_dev *mdev) { struct mlx5_clock *clock = &mdev->clock; - u64 overflow_cycles; - u64 ns; - u64 frac = 0; + struct mlx5_timer *timer = &clock->timer; u32 dev_freq; dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz); - if (!dev_freq) { - mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n"); - return; - } - seqlock_init(&clock->lock); - clock->cycles.read = read_internal_timer; - clock->cycles.shift = MLX5_CYCLES_SHIFT; - clock->cycles.mult = clocksource_khz2mult(dev_freq, - clock->cycles.shift); - clock->nominal_c_mult = clock->cycles.mult; - clock->cycles.mask = CLOCKSOURCE_MASK(41); - - timecounter_init(&clock->tc, &clock->cycles, + timer->cycles.read = read_internal_timer; + timer->cycles.shift = MLX5_CYCLES_SHIFT; + timer->cycles.mult = clocksource_khz2mult(dev_freq, + timer->cycles.shift); + timer->nominal_c_mult = timer->cycles.mult; + timer->cycles.mask = CLOCKSOURCE_MASK(41); + + timecounter_init(&timer->tc, &timer->cycles, ktime_to_ns(ktime_get_real())); +} + +static void mlx5_init_overflow_period(struct mlx5_clock *clock) +{ + struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock); + struct mlx5_ib_clock_info *clock_info = mdev->clock_info; + struct mlx5_timer *timer = &clock->timer; + u64 overflow_cycles; + u64 frac = 0; + u64 ns; /* Calculate period in seconds to call the overflow watchdog - to make * sure counter is checked at least twice every wrap around. @@ -622,32 +641,63 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev) * multiplied by clock multiplier where the result doesn't exceed * 64bits. */ - overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult); - overflow_cycles = min(overflow_cycles, div_u64(clock->cycles.mask, 3)); + overflow_cycles = div64_u64(~0ULL >> 1, timer->cycles.mult); + overflow_cycles = min(overflow_cycles, div_u64(timer->cycles.mask, 3)); - ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles, + ns = cyclecounter_cyc2ns(&timer->cycles, overflow_cycles, frac, &frac); do_div(ns, NSEC_PER_SEC / HZ); - clock->overflow_period = ns; + timer->overflow_period = ns; - mdev->clock_info = - (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL); - if (mdev->clock_info) { - mdev->clock_info->nsec = clock->tc.nsec; - mdev->clock_info->cycles = clock->tc.cycle_last; - mdev->clock_info->mask = clock->cycles.mask; - mdev->clock_info->mult = clock->nominal_c_mult; - mdev->clock_info->shift = clock->cycles.shift; - mdev->clock_info->frac = clock->tc.frac; - mdev->clock_info->overflow_period = clock->overflow_period; + INIT_DELAYED_WORK(&timer->overflow_work, mlx5_timestamp_overflow); + if (timer->overflow_period) + schedule_delayed_work(&timer->overflow_work, 0); + else + mlx5_core_warn(mdev, + "invalid overflow period, overflow_work is not scheduled\n"); + + if (clock_info) + clock_info->overflow_period = timer->overflow_period; +} + +static void mlx5_init_clock_info(struct mlx5_core_dev *mdev) +{ + struct mlx5_clock *clock = &mdev->clock; + struct mlx5_ib_clock_info *info; + struct mlx5_timer *timer; + + mdev->clock_info = (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL); + if (!mdev->clock_info) { + mlx5_core_warn(mdev, "Failed to allocate IB clock info page\n"); + return; } + info = mdev->clock_info; + timer = &clock->timer; + + info->nsec = timer->tc.nsec; + info->cycles = timer->tc.cycle_last; + info->mask = timer->cycles.mask; + info->mult = timer->nominal_c_mult; + info->shift = timer->cycles.shift; + info->frac = timer->tc.frac; +} + +void mlx5_init_clock(struct mlx5_core_dev *mdev) +{ + struct mlx5_clock *clock = &mdev->clock; + + if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) { + mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n"); + return; + } + + seqlock_init(&clock->lock); + + mlx5_timecounter_init(mdev); + mlx5_init_clock_info(mdev); + mlx5_init_overflow_period(clock); INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out); - INIT_DELAYED_WORK(&clock->overflow_work, mlx5_timestamp_overflow); - if (clock->overflow_period) - schedule_delayed_work(&clock->overflow_work, 0); - else - mlx5_core_warn(mdev, "invalid overflow period, overflow_work is not scheduled\n"); /* Configure the PHC */ clock->ptp_info = mlx5_ptp_clock_info; @@ -684,7 +734,7 @@ void mlx5_cleanup_clock(struct mlx5_core_dev *mdev) } cancel_work_sync(&clock->pps_info.out_work); - cancel_delayed_work_sync(&clock->overflow_work); + cancel_delayed_work_sync(&clock->timer.overflow_work); if (mdev->clock_info) { free_page((unsigned long)mdev->clock_info); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h index 31600924bdc367824b2ed9ae199dbc05d17177d7..6e8804ebc773ba7fdcc09f5918321808a61958fa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h @@ -45,12 +45,13 @@ static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev) static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock, u64 timestamp) { + struct mlx5_timer *timer = &clock->timer; unsigned int seq; u64 nsec; do { seq = read_seqbegin(&clock->lock); - nsec = timecounter_cyc2time(&clock->tc, timestamp); + nsec = timecounter_cyc2time(&timer->tc, timestamp); } while (read_seqretry(&clock->lock, seq)); return ns_to_ktime(nsec); diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 60b7d93bb834e6d48fab41723eeda55c5f116770..93be7dd571fc5c33853a31912f30b14e6588786e 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -745,7 +745,8 @@ static int ipvlan_device_event(struct notifier_block *unused, write_pnet(&port->pnet, newnet); - ipvlan_migrate_l3s_hook(oldnet, newnet); + if (port->mode == IPVLAN_MODE_L3S) + ipvlan_migrate_l3s_hook(oldnet, newnet); break; } case NETDEV_UNREGISTER: diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 4fdb970e34823d14b6193518c3978f001d6c28bc..2ad15b1d7ffd7cba14bd03c527a38c9d99d66b47 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -159,6 +159,19 @@ static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr) return sa; } +static struct macsec_rx_sa *macsec_active_rxsa_get(struct macsec_rx_sc *rx_sc) +{ + struct macsec_rx_sa *sa = NULL; + int an; + + for (an = 0; an < MACSEC_NUM_AN; an++) { + sa = macsec_rxsa_get(rx_sc->sa[an]); + if (sa) + break; + } + return sa; +} + static void free_rx_sc_rcu(struct rcu_head *head) { struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head); @@ -497,18 +510,28 @@ static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev) skb->protocol = eth_hdr(skb)->h_proto; } +static unsigned int macsec_msdu_len(struct sk_buff *skb) +{ + struct macsec_dev *macsec = macsec_priv(skb->dev); + struct macsec_secy *secy = &macsec->secy; + bool sci_present = macsec_skb_cb(skb)->has_sci; + + return skb->len - macsec_hdr_len(sci_present) - secy->icv_len; +} + static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc, struct macsec_tx_sa *tx_sa) { + unsigned int msdu_len = macsec_msdu_len(skb); struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats); u64_stats_update_begin(&txsc_stats->syncp); if (tx_sc->encrypt) { - txsc_stats->stats.OutOctetsEncrypted += skb->len; + txsc_stats->stats.OutOctetsEncrypted += msdu_len; txsc_stats->stats.OutPktsEncrypted++; this_cpu_inc(tx_sa->stats->OutPktsEncrypted); } else { - txsc_stats->stats.OutOctetsProtected += skb->len; + txsc_stats->stats.OutOctetsProtected += msdu_len; txsc_stats->stats.OutPktsProtected++; this_cpu_inc(tx_sa->stats->OutPktsProtected); } @@ -538,9 +561,10 @@ static void macsec_encrypt_done(struct crypto_async_request *base, int err) aead_request_free(macsec_skb_cb(skb)->req); rcu_read_lock_bh(); - macsec_encrypt_finish(skb, dev); macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); - len = skb->len; + /* packet is encrypted/protected so tx_bytes must be calculated */ + len = macsec_msdu_len(skb) + 2 * ETH_ALEN; + macsec_encrypt_finish(skb, dev); ret = dev_queue_xmit(skb); count_tx(dev, ret, len); rcu_read_unlock_bh(); @@ -699,6 +723,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb, macsec_skb_cb(skb)->req = req; macsec_skb_cb(skb)->tx_sa = tx_sa; + macsec_skb_cb(skb)->has_sci = sci_present; aead_request_set_callback(req, 0, macsec_encrypt_done, skb); dev_hold(skb->dev); @@ -740,15 +765,17 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsLate++; u64_stats_update_end(&rxsc_stats->syncp); + DEV_STATS_INC(secy->netdev, rx_dropped); return false; } if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) { + unsigned int msdu_len = macsec_msdu_len(skb); u64_stats_update_begin(&rxsc_stats->syncp); if (hdr->tci_an & MACSEC_TCI_E) - rxsc_stats->stats.InOctetsDecrypted += skb->len; + rxsc_stats->stats.InOctetsDecrypted += msdu_len; else - rxsc_stats->stats.InOctetsValidated += skb->len; + rxsc_stats->stats.InOctetsValidated += msdu_len; u64_stats_update_end(&rxsc_stats->syncp); } @@ -761,6 +788,8 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsNotValid++; u64_stats_update_end(&rxsc_stats->syncp); + this_cpu_inc(rx_sa->stats->InPktsNotValid); + DEV_STATS_INC(secy->netdev, rx_errors); return false; } @@ -853,9 +882,9 @@ static void macsec_decrypt_done(struct crypto_async_request *base, int err) macsec_finalize_skb(skb, macsec->secy.icv_len, macsec_extra_len(macsec_skb_cb(skb)->has_sci)); + len = skb->len; macsec_reset_skb(skb, macsec->secy.netdev); - len = skb->len; if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS) count_rx(dev, len); @@ -1046,6 +1075,7 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb) u64_stats_update_begin(&secy_stats->syncp); secy_stats->stats.InPktsNoTag++; u64_stats_update_end(&secy_stats->syncp); + DEV_STATS_INC(macsec->secy.netdev, rx_dropped); continue; } @@ -1155,6 +1185,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) u64_stats_update_begin(&secy_stats->syncp); secy_stats->stats.InPktsBadTag++; u64_stats_update_end(&secy_stats->syncp); + DEV_STATS_INC(secy->netdev, rx_errors); goto drop_nosa; } @@ -1165,11 +1196,15 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) /* If validateFrames is Strict or the C bit in the * SecTAG is set, discard */ + struct macsec_rx_sa *active_rx_sa = macsec_active_rxsa_get(rx_sc); if (hdr->tci_an & MACSEC_TCI_C || secy->validate_frames == MACSEC_VALIDATE_STRICT) { u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsNotUsingSA++; u64_stats_update_end(&rxsc_stats->syncp); + DEV_STATS_INC(secy->netdev, rx_errors); + if (active_rx_sa) + this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA); goto drop_nosa; } @@ -1179,6 +1214,8 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsUnusedSA++; u64_stats_update_end(&rxsc_stats->syncp); + if (active_rx_sa) + this_cpu_inc(active_rx_sa->stats->InPktsUnusedSA); goto deliver; } @@ -1199,6 +1236,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsLate++; u64_stats_update_end(&rxsc_stats->syncp); + DEV_STATS_INC(macsec->secy.netdev, rx_dropped); goto drop; } } @@ -1227,6 +1265,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) deliver: macsec_finalize_skb(skb, secy->icv_len, macsec_extra_len(macsec_skb_cb(skb)->has_sci)); + len = skb->len; macsec_reset_skb(skb, secy->netdev); if (rx_sa) @@ -1234,12 +1273,11 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) macsec_rxsc_put(rx_sc); skb_orphan(skb); - len = skb->len; ret = gro_cells_receive(&macsec->gro_cells, skb); if (ret == NET_RX_SUCCESS) count_rx(dev, len); else - macsec->secy.netdev->stats.rx_dropped++; + DEV_STATS_INC(macsec->secy.netdev, rx_dropped); rcu_read_unlock(); @@ -1276,6 +1314,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) u64_stats_update_begin(&secy_stats->syncp); secy_stats->stats.InPktsNoSCI++; u64_stats_update_end(&secy_stats->syncp); + DEV_STATS_INC(macsec->secy.netdev, rx_errors); continue; } @@ -1294,7 +1333,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) secy_stats->stats.InPktsUnknownSCI++; u64_stats_update_end(&secy_stats->syncp); } else { - macsec->secy.netdev->stats.rx_dropped++; + DEV_STATS_INC(macsec->secy.netdev, rx_dropped); } } @@ -3403,21 +3442,21 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, if (!secy->operational) { kfree_skb(skb); - dev->stats.tx_dropped++; + DEV_STATS_INC(dev, tx_dropped); return NETDEV_TX_OK; } + len = skb->len; skb = macsec_encrypt(skb, dev); if (IS_ERR(skb)) { if (PTR_ERR(skb) != -EINPROGRESS) - dev->stats.tx_dropped++; + DEV_STATS_INC(dev, tx_dropped); return NETDEV_TX_OK; } macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); macsec_encrypt_finish(skb, dev); - len = skb->len; ret = dev_queue_xmit(skb); count_tx(dev, ret, len); return ret; @@ -3646,8 +3685,9 @@ static void macsec_get_stats64(struct net_device *dev, dev_fetch_sw_netstats(s, dev->tstats); - s->rx_dropped = dev->stats.rx_dropped; - s->tx_dropped = dev->stats.tx_dropped; + s->rx_dropped = atomic_long_read(&dev->stats.__rx_dropped); + s->tx_dropped = atomic_long_read(&dev->stats.__tx_dropped); + s->rx_errors = atomic_long_read(&dev->stats.__rx_errors); } static int macsec_get_iflink(const struct net_device *dev) diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 0cde17bd743f3cb3798f5c282cbd3e7fcae630e9..a664faa8f01f66cafdc70492412eddd5d2f5eb14 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -404,6 +404,17 @@ static int bcm54xx_resume(struct phy_device *phydev) return bcm54xx_config_init(phydev); } +static int bcm54810_read_mmd(struct phy_device *phydev, int devnum, u16 regnum) +{ + return -EOPNOTSUPP; +} + +static int bcm54810_write_mmd(struct phy_device *phydev, int devnum, u16 regnum, + u16 val) +{ + return -EOPNOTSUPP; +} + static int bcm54811_config_init(struct phy_device *phydev) { int err, reg; @@ -841,6 +852,8 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM54810", /* PHY_GBIT_FEATURES */ + .read_mmd = bcm54810_read_mmd, + .write_mmd = bcm54810_write_mmd, .config_init = bcm54xx_config_init, .config_aneg = bcm5481_config_aneg, .ack_interrupt = bcm_phy_ack_intr, diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 36c7eae776d4465cd7c887211531939ab1956a6b..721b536ce886192b0bc48c1288ae0cb6888d4ab0 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -2195,7 +2195,9 @@ static void team_setup(struct net_device *dev) dev->hw_features = TEAM_VLAN_FEATURES | NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER; + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_STAG_FILTER; dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4; dev->features |= dev->hw_features; diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 5e8a9dc1f84df50214155f1d4609bcba715008d3..222fbd8016e59465f865d97e682cc22e05032959 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -1310,10 +1310,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, nla_peer = data[VETH_INFO_PEER]; ifmp = nla_data(nla_peer); - err = rtnl_nla_parse_ifla(peer_tb, - nla_data(nla_peer) + sizeof(struct ifinfomsg), - nla_len(nla_peer) - sizeof(struct ifinfomsg), - NULL); + err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack); if (err < 0) return err; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 165149bcf0b1a3a3cfd128980d039aaa5fe775c9..2fd5d2b7a209242d3225cc5714ff15119f19f6bf 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -3220,8 +3220,6 @@ static int virtnet_probe(struct virtio_device *vdev) } } - _virtnet_set_queues(vi, vi->curr_queue_pairs); - /* serialize netdev register + virtio_device_ready() with ndo_open() */ rtnl_lock(); @@ -3234,6 +3232,8 @@ static int virtnet_probe(struct virtio_device *vdev) virtio_device_ready(vdev); + _virtnet_set_queues(vi, vi->curr_queue_pairs); + rtnl_unlock(); err = virtnet_cpu_notif_add(vi); diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c index fe64430b438a1238222d5faef131722bbf85f889..be26346085faf43c7f4d23e6e85ea2fe985f05f0 100644 --- a/drivers/of/dynamic.c +++ b/drivers/of/dynamic.c @@ -63,15 +63,14 @@ int of_reconfig_notifier_unregister(struct notifier_block *nb) } EXPORT_SYMBOL_GPL(of_reconfig_notifier_unregister); -#ifdef DEBUG -const char *action_names[] = { +static const char *action_names[] = { + [0] = "INVALID", [OF_RECONFIG_ATTACH_NODE] = "ATTACH_NODE", [OF_RECONFIG_DETACH_NODE] = "DETACH_NODE", [OF_RECONFIG_ADD_PROPERTY] = "ADD_PROPERTY", [OF_RECONFIG_REMOVE_PROPERTY] = "REMOVE_PROPERTY", [OF_RECONFIG_UPDATE_PROPERTY] = "UPDATE_PROPERTY", }; -#endif int of_reconfig_notify(unsigned long action, struct of_reconfig_data *p) { @@ -589,21 +588,9 @@ static int __of_changeset_entry_apply(struct of_changeset_entry *ce) } ret = __of_add_property(ce->np, ce->prop); - if (ret) { - pr_err("changeset: add_property failed @%pOF/%s\n", - ce->np, - ce->prop->name); - break; - } break; case OF_RECONFIG_REMOVE_PROPERTY: ret = __of_remove_property(ce->np, ce->prop); - if (ret) { - pr_err("changeset: remove_property failed @%pOF/%s\n", - ce->np, - ce->prop->name); - break; - } break; case OF_RECONFIG_UPDATE_PROPERTY: @@ -617,20 +604,17 @@ static int __of_changeset_entry_apply(struct of_changeset_entry *ce) } ret = __of_update_property(ce->np, ce->prop, &old_prop); - if (ret) { - pr_err("changeset: update_property failed @%pOF/%s\n", - ce->np, - ce->prop->name); - break; - } break; default: ret = -EINVAL; } raw_spin_unlock_irqrestore(&devtree_lock, flags); - if (ret) + if (ret) { + pr_err("changeset: apply failed: %-15s %pOF:%s\n", + action_names[ce->action], ce->np, ce->prop->name); return ret; + } switch (ce->action) { case OF_RECONFIG_ATTACH_NODE: @@ -913,6 +897,9 @@ int of_changeset_action(struct of_changeset *ocs, unsigned long action, if (!ce) return -ENOMEM; + if (WARN_ON(action >= ARRAY_SIZE(action_names))) + return -EINVAL; + /* get a reference to the node */ ce->action = action; ce->np = of_node_get(np); diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c index 1222f5749bc67ae7fe0b63a42326681bcd008455..a215777df96c7d65cb8e2aa0d540ac65adfa1a0b 100644 --- a/drivers/pci/controller/dwc/pcie-tegra194.c +++ b/drivers/pci/controller/dwc/pcie-tegra194.c @@ -239,6 +239,7 @@ #define EP_STATE_ENABLED 1 static const unsigned int pcie_gen_freq[] = { + GEN1_CORE_CLK_FREQ, /* PCI_EXP_LNKSTA_CLS == 0; undefined */ GEN1_CORE_CLK_FREQ, GEN2_CORE_CLK_FREQ, GEN3_CORE_CLK_FREQ, @@ -470,7 +471,11 @@ static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg) speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) & PCI_EXP_LNKSTA_CLS; - clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]); + + if (speed >= ARRAY_SIZE(pcie_gen_freq)) + speed = 0; + + clk_set_rate(pcie->core_clk, pcie_gen_freq[speed]); /* If EP doesn't advertise L1SS, just return */ val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub); @@ -973,7 +978,11 @@ static int tegra_pcie_dw_host_init(struct pcie_port *pp) speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) & PCI_EXP_LNKSTA_CLS; - clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]); + + if (speed >= ARRAY_SIZE(pcie_gen_freq)) + speed = 0; + + clk_set_rate(pcie->core_clk, pcie_gen_freq[speed]); tegra_pcie_enable_interrupts(pp); diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index f031302ad4019b5c2d783cb713102e5214bcd578..0a37967b0a939d13106294ab43390b37bff238d7 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -503,12 +503,15 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge) if (pass && dev->subordinate) { check_hotplug_bridge(slot, dev); pcibios_resource_survey_bus(dev->subordinate); - __pci_bus_size_bridges(dev->subordinate, - &add_list); + if (pci_is_root_bus(bus)) + __pci_bus_size_bridges(dev->subordinate, &add_list); } } } - __pci_bus_assign_resources(bus, &add_list, NULL); + if (pci_is_root_bus(bus)) + __pci_bus_assign_resources(bus, &add_list, NULL); + else + pci_assign_unassigned_bridge_resources(bus->self); } acpiphp_sanitize_bus(bus); diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c index 69a6e9a5d6d269be87c84ea1611443c8e3ea657f..6e90927e65769972b2afe0cc0a8479cf3c64b447 100644 --- a/drivers/pcmcia/rsrc_nonstatic.c +++ b/drivers/pcmcia/rsrc_nonstatic.c @@ -1053,6 +1053,8 @@ static void nonstatic_release_resource_db(struct pcmcia_socket *s) q = p->next; kfree(p); } + + kfree(data); } diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c index 3242ff63986fdf4072044113c3a35de50a662d2f..37e1994c1f8149263819f0550838843edbfb0bf4 100644 --- a/drivers/scsi/53c700.c +++ b/drivers/scsi/53c700.c @@ -1600,7 +1600,7 @@ NCR_700_intr(int irq, void *dev_id) printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host->host_no, pun, lun, count, (void *)temp, temp - hostdata->pScript, sbcl_to_string(NCR_700_readb(host, SBCL_REG))); #endif resume_offset = hostdata->pScript + Ent_SendMessagePhaseMismatch; - } else if(dsp >= to32bit(&slot->pSG[0].ins) && + } else if (slot && dsp >= to32bit(&slot->pSG[0].ins) && dsp <= to32bit(&slot->pSG[NCR_700_SG_SEGMENTS].ins)) { int data_transfer = NCR_700_readl(host, DBC_REG) & 0xffffff; int SGcount = (dsp - to32bit(&slot->pSG[0].ins))/sizeof(struct NCR_700_SG_List); diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index b33cb1172f31d6a56651a503c23e733ac5f12440..63c9368bafcf1be5a367f1f51a3b0c6e23bbe1a1 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -31,6 +31,7 @@ static void qedf_remove(struct pci_dev *pdev); static void qedf_shutdown(struct pci_dev *pdev); static void qedf_schedule_recovery_handler(void *dev); static void qedf_recovery_handler(struct work_struct *work); +static int qedf_suspend(struct pci_dev *pdev, pm_message_t state); /* * Driver module parameters. @@ -3272,6 +3273,7 @@ static struct pci_driver qedf_pci_driver = { .probe = qedf_probe, .remove = qedf_remove, .shutdown = qedf_shutdown, + .suspend = qedf_suspend, }; static int __qedf_probe(struct pci_dev *pdev, int mode) @@ -3986,6 +3988,22 @@ static void qedf_shutdown(struct pci_dev *pdev) __qedf_remove(pdev, QEDF_MODE_NORMAL); } +static int qedf_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct qedf_ctx *qedf; + + if (!pdev) { + QEDF_ERR(NULL, "pdev is NULL.\n"); + return -ENODEV; + } + + qedf = pci_get_drvdata(pdev); + + QEDF_ERR(&qedf->dbg_ctx, "%s: Device does not support suspend operation\n", __func__); + + return -EPERM; +} + /* * Recovery handler code */ diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index 7df0106f132eed88e3b133f42df5313034018bc8..cc2152c56d35580176a510b2915f6e2a017f62f0 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -69,6 +69,7 @@ static struct nvm_iscsi_block *qedi_get_nvram_block(struct qedi_ctx *qedi); static void qedi_recovery_handler(struct work_struct *work); static void qedi_schedule_hw_err_handler(void *dev, enum qed_hw_err_type err_type); +static int qedi_suspend(struct pci_dev *pdev, pm_message_t state); static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle) { @@ -2517,6 +2518,22 @@ static void qedi_shutdown(struct pci_dev *pdev) __qedi_remove(pdev, QEDI_MODE_SHUTDOWN); } +static int qedi_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct qedi_ctx *qedi; + + if (!pdev) { + QEDI_ERR(NULL, "pdev is NULL.\n"); + return -ENODEV; + } + + qedi = pci_get_drvdata(pdev); + + QEDI_ERR(&qedi->dbg_ctx, "%s: Device does not support suspend operation\n", __func__); + + return -EPERM; +} + static int __qedi_probe(struct pci_dev *pdev, int mode) { struct qedi_ctx *qedi; @@ -2875,6 +2892,7 @@ static struct pci_driver qedi_pci_driver = { .remove = qedi_remove, .shutdown = qedi_shutdown, .err_handler = &qedi_err_handler, + .suspend = qedi_suspend, }; static int __init qedi_init(void) diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c index 898a0bdf8df6702666f865dbe9371851973f8068..711252e52d8e1f08d65c0801d4c53f70182d0134 100644 --- a/drivers/scsi/raid_class.c +++ b/drivers/scsi/raid_class.c @@ -248,6 +248,7 @@ int raid_component_add(struct raid_template *r,struct device *raid_dev, return 0; err_out: + put_device(&rc->dev); list_del(&rc->node); rd->component_count--; put_device(component_dev); diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c index d6982d3557396b02fd89f5a664450a8a50a37719..94603e64cc6bf32cae7cc776ce1ac3a9d12960a7 100644 --- a/drivers/scsi/scsi_proc.c +++ b/drivers/scsi/scsi_proc.c @@ -311,7 +311,7 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf, size_t length, loff_t *ppos) { int host, channel, id, lun; - char *buffer, *p; + char *buffer, *end, *p; int err; if (!buf || length > PAGE_SIZE) @@ -326,10 +326,14 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf, goto out; err = -EINVAL; - if (length < PAGE_SIZE) - buffer[length] = '\0'; - else if (buffer[PAGE_SIZE-1]) - goto out; + if (length < PAGE_SIZE) { + end = buffer + length; + *end = '\0'; + } else { + end = buffer + PAGE_SIZE - 1; + if (*end) + goto out; + } /* * Usage: echo "scsi add-single-device 0 1 2 3" >/proc/scsi/scsi @@ -338,10 +342,10 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf, if (!strncmp("scsi add-single-device", buffer, 22)) { p = buffer + 23; - host = simple_strtoul(p, &p, 0); - channel = simple_strtoul(p + 1, &p, 0); - id = simple_strtoul(p + 1, &p, 0); - lun = simple_strtoul(p + 1, &p, 0); + host = (p < end) ? simple_strtoul(p, &p, 0) : 0; + channel = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; + id = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; + lun = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; err = scsi_add_single_device(host, channel, id, lun); @@ -352,10 +356,10 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf, } else if (!strncmp("scsi remove-single-device", buffer, 25)) { p = buffer + 26; - host = simple_strtoul(p, &p, 0); - channel = simple_strtoul(p + 1, &p, 0); - id = simple_strtoul(p + 1, &p, 0); - lun = simple_strtoul(p + 1, &p, 0); + host = (p < end) ? simple_strtoul(p, &p, 0) : 0; + channel = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; + id = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; + lun = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; err = scsi_remove_single_device(host, channel, id, lun); } diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c index 7cf871323b2c49c819b7cb8a2c2ae985e377f01a..c445853c623e2e1db3a6cdb19aeef3f7063769ee 100644 --- a/drivers/scsi/snic/snic_disc.c +++ b/drivers/scsi/snic/snic_disc.c @@ -317,6 +317,7 @@ snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid) "Snic Tgt: device_add, with err = %d\n", ret); + put_device(&tgt->dev); put_device(&snic->shost->shost_gendev); spin_lock_irqsave(snic->shost->host_lock, flags); list_del(&tgt->list); diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 70b4868fe2f7da42a71e8aed2c6f9f599fdb4a24..45d8549623442b5327fbcb92fc16155d71b4d670 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1641,10 +1641,6 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd) */ static enum blk_eh_timer_return storvsc_eh_timed_out(struct scsi_cmnd *scmnd) { -#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) - if (scmnd->device->host->transportt == fc_transport_template) - return fc_eh_timed_out(scmnd); -#endif return BLK_EH_RESET_TIMER; } diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 432a438929e6484beae3e02f74ffa0cbc5413823..7499954c9aa769dd4e5e9da59ba00587b8d2c61e 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -3231,6 +3231,7 @@ void serial8250_init_port(struct uart_8250_port *up) struct uart_port *port = &up->port; spin_lock_init(&port->lock); + port->pm = NULL; port->ops = &serial8250_pops; port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE); diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index a4cf00756681a9923ffa267808f674737d3230c7..227fb2d32046504011041ae9cc593641cebffff9 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -1062,8 +1062,8 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport) unsigned long sr = lpuart32_read(&sport->port, UARTSTAT); if (sr & (UARTSTAT_PE | UARTSTAT_FE)) { - /* Read DR to clear the error flags */ - lpuart32_read(&sport->port, UARTDATA); + /* Clear the error flags */ + lpuart32_write(&sport->port, sr, UARTSTAT); if (sr & UARTSTAT_PE) sport->port.icount.parity++; diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c index e3a8b6c71aa1ddd5b71b49d59a5584ef0564a33b..210c1d61508253d8d168036cf9b2c91e0e5e361b 100644 --- a/drivers/usb/cdns3/gadget.c +++ b/drivers/usb/cdns3/gadget.c @@ -2041,7 +2041,7 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable) u8 mult = 0; int ret; - buffering = CDNS3_EP_BUF_SIZE - 1; + buffering = priv_dev->ep_buf_size - 1; cdns3_configure_dmult(priv_dev, priv_ep); @@ -2060,7 +2060,7 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable) break; default: ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_ISOC); - mult = CDNS3_EP_ISO_HS_MULT - 1; + mult = priv_dev->ep_iso_burst - 1; buffering = mult + 1; } @@ -2076,14 +2076,14 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable) mult = 0; max_packet_size = 1024; if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) { - maxburst = CDNS3_EP_ISO_SS_BURST - 1; + maxburst = priv_dev->ep_iso_burst - 1; buffering = (mult + 1) * (maxburst + 1); if (priv_ep->interval > 1) buffering++; } else { - maxburst = CDNS3_EP_BUF_SIZE - 1; + maxburst = priv_dev->ep_buf_size - 1; } break; default: @@ -2098,6 +2098,23 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable) else priv_ep->trb_burst_size = 16; + /* + * In versions preceding DEV_VER_V2, for example, iMX8QM, there exit the bugs + * in the DMA. These bugs occur when the trb_burst_size exceeds 16 and the + * address is not aligned to 128 Bytes (which is a product of the 64-bit AXI + * and AXI maximum burst length of 16 or 0xF+1, dma_axi_ctrl0[3:0]). This + * results in data corruption when it crosses the 4K border. The corruption + * specifically occurs from the position (4K - (address & 0x7F)) to 4K. + * + * So force trb_burst_size to 16 at such platform. + */ + if (priv_dev->dev_ver < DEV_VER_V2) + priv_ep->trb_burst_size = 16; + + mult = min_t(u8, mult, EP_CFG_MULT_MAX); + buffering = min_t(u8, buffering, EP_CFG_BUFFERING_MAX); + maxburst = min_t(u8, maxburst, EP_CFG_MAXBURST_MAX); + /* onchip buffer is only allocated before configuration */ if (!priv_dev->hw_configured_flag) { ret = cdns3_ep_onchip_buffer_reserve(priv_dev, buffering + 1, @@ -2971,6 +2988,40 @@ static int cdns3_gadget_udc_stop(struct usb_gadget *gadget) return 0; } +/** + * cdns3_gadget_check_config - ensure cdns3 can support the USB configuration + * @gadget: pointer to the USB gadget + * + * Used to record the maximum number of endpoints being used in a USB composite + * device. (across all configurations) This is to be used in the calculation + * of the TXFIFO sizes when resizing internal memory for individual endpoints. + * It will help ensured that the resizing logic reserves enough space for at + * least one max packet. + */ +static int cdns3_gadget_check_config(struct usb_gadget *gadget) +{ + struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); + struct usb_ep *ep; + int n_in = 0; + int total; + + list_for_each_entry(ep, &gadget->ep_list, ep_list) { + if (ep->claimed && (ep->address & USB_DIR_IN)) + n_in++; + } + + /* 2KB are reserved for EP0, 1KB for out*/ + total = 2 + n_in + 1; + + if (total > priv_dev->onchip_buffers) + return -ENOMEM; + + priv_dev->ep_buf_size = priv_dev->ep_iso_burst = + (priv_dev->onchip_buffers - 2) / (n_in + 1); + + return 0; +} + static const struct usb_gadget_ops cdns3_gadget_ops = { .get_frame = cdns3_gadget_get_frame, .wakeup = cdns3_gadget_wakeup, @@ -2979,6 +3030,7 @@ static const struct usb_gadget_ops cdns3_gadget_ops = { .udc_start = cdns3_gadget_udc_start, .udc_stop = cdns3_gadget_udc_stop, .match_ep = cdns3_gadget_match_ep, + .check_config = cdns3_gadget_check_config, }; static void cdns3_free_all_eps(struct cdns3_device *priv_dev) diff --git a/drivers/usb/cdns3/gadget.h b/drivers/usb/cdns3/gadget.h index 21fa461c518ec269258012ff5ba6e35e79a18906..32825477edd3eae3bedcb18a4cc4a0f3c5959ceb 100644 --- a/drivers/usb/cdns3/gadget.h +++ b/drivers/usb/cdns3/gadget.h @@ -561,15 +561,18 @@ struct cdns3_usb_regs { /* Max burst size (used only in SS mode). */ #define EP_CFG_MAXBURST_MASK GENMASK(11, 8) #define EP_CFG_MAXBURST(p) (((p) << 8) & EP_CFG_MAXBURST_MASK) +#define EP_CFG_MAXBURST_MAX 15 /* ISO max burst. */ #define EP_CFG_MULT_MASK GENMASK(15, 14) #define EP_CFG_MULT(p) (((p) << 14) & EP_CFG_MULT_MASK) +#define EP_CFG_MULT_MAX 2 /* ISO max burst. */ #define EP_CFG_MAXPKTSIZE_MASK GENMASK(26, 16) #define EP_CFG_MAXPKTSIZE(p) (((p) << 16) & EP_CFG_MAXPKTSIZE_MASK) /* Max number of buffered packets. */ #define EP_CFG_BUFFERING_MASK GENMASK(31, 27) #define EP_CFG_BUFFERING(p) (((p) << 27) & EP_CFG_BUFFERING_MASK) +#define EP_CFG_BUFFERING_MAX 15 /* EP_CMD - bitmasks */ /* Endpoint reset. */ @@ -1093,9 +1096,6 @@ struct cdns3_trb { #define CDNS3_ENDPOINTS_MAX_COUNT 32 #define CDNS3_EP_ZLP_BUF_SIZE 1024 -#define CDNS3_EP_BUF_SIZE 4 /* KB */ -#define CDNS3_EP_ISO_HS_MULT 3 -#define CDNS3_EP_ISO_SS_BURST 3 #define CDNS3_MAX_NUM_DESCMISS_BUF 32 #define CDNS3_DESCMIS_BUF_SIZE 2048 /* Bytes */ #define CDNS3_WA2_NUM_BUFFERS 128 @@ -1330,6 +1330,9 @@ struct cdns3_device { /*in KB */ u16 onchip_buffers; u16 onchip_used_size; + + u16 ep_buf_size; + u16 ep_iso_burst; }; void cdns3_set_register_bit(void __iomem *ptr, u32 mask); diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c index f7984559428442296e68dadd258f55043fa8286c..4d47fe89864d9dce00e64ead38bb7eb953d23bd9 100644 --- a/drivers/usb/chipidea/ci_hdrc_imx.c +++ b/drivers/usb/chipidea/ci_hdrc_imx.c @@ -70,6 +70,10 @@ static const struct ci_hdrc_imx_platform_flag imx7ulp_usb_data = { CI_HDRC_PMQOS, }; +static const struct ci_hdrc_imx_platform_flag imx8ulp_usb_data = { + .flags = CI_HDRC_SUPPORTS_RUNTIME_PM, +}; + static const struct of_device_id ci_hdrc_imx_dt_ids[] = { { .compatible = "fsl,imx23-usb", .data = &imx23_usb_data}, { .compatible = "fsl,imx28-usb", .data = &imx28_usb_data}, @@ -80,6 +84,7 @@ static const struct of_device_id ci_hdrc_imx_dt_ids[] = { { .compatible = "fsl,imx6ul-usb", .data = &imx6ul_usb_data}, { .compatible = "fsl,imx7d-usb", .data = &imx7d_usb_data}, { .compatible = "fsl,imx7ulp-usb", .data = &imx7ulp_usb_data}, + { .compatible = "fsl,imx8ulp-usb", .data = &imx8ulp_usb_data}, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, ci_hdrc_imx_dt_ids); diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c index 425b29168b4d0be122b3e38cc0b8971635c14c69..9b1d5c11dc340bf7270b3908c296e6458dcce89c 100644 --- a/drivers/usb/chipidea/usbmisc_imx.c +++ b/drivers/usb/chipidea/usbmisc_imx.c @@ -135,7 +135,7 @@ #define TXVREFTUNE0_MASK (0xf << 20) #define MX6_USB_OTG_WAKEUP_BITS (MX6_BM_WAKEUP_ENABLE | MX6_BM_VBUS_WAKEUP | \ - MX6_BM_ID_WAKEUP) + MX6_BM_ID_WAKEUP | MX6SX_BM_DPDM_WAKEUP_EN) struct usbmisc_ops { /* It's called once when probe a usb device */ diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c index e2b38e482a7ee86a385e3175b32d39a1d332c6c0..9652b5ab289c8f2cdc435ea9b5733969537da632 100644 --- a/drivers/usb/dwc3/dwc3-qcom.c +++ b/drivers/usb/dwc3/dwc3-qcom.c @@ -306,7 +306,16 @@ static void dwc3_qcom_interconnect_exit(struct dwc3_qcom *qcom) /* Only usable in contexts where the role can not change. */ static bool dwc3_qcom_is_host(struct dwc3_qcom *qcom) { - struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3); + struct dwc3 *dwc; + + /* + * FIXME: Fix this layering violation. + */ + dwc = platform_get_drvdata(qcom->dwc3); + + /* Core driver may not have probed yet. */ + if (!dwc) + return false; return dwc->xhci; } diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c index 3b5a6430e241886fad554a105e0890aff1e21425..a717b53847a8ef43e4ae3a1c7c39a85b50e0e0b5 100644 --- a/drivers/usb/gadget/function/u_serial.c +++ b/drivers/usb/gadget/function/u_serial.c @@ -917,8 +917,11 @@ static void __gs_console_push(struct gs_console *cons) } req->length = size; + + spin_unlock_irq(&cons->lock); if (usb_ep_queue(ep, req, GFP_ATOMIC)) req->length = 0; + spin_lock_irq(&cons->lock); } static void gs_console_work(struct work_struct *work) diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index 3a3b5a03dda75a297588642cdb5e9889af387922..14d9d1ee16fc41fbdfbea8515db01ec6507f3e98 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -1004,6 +1004,25 @@ int usb_gadget_ep_match_desc(struct usb_gadget *gadget, } EXPORT_SYMBOL_GPL(usb_gadget_ep_match_desc); +/** + * usb_gadget_check_config - checks if the UDC can support the binded + * configuration + * @gadget: controller to check the USB configuration + * + * Ensure that a UDC is able to support the requested resources by a + * configuration, and that there are no resource limitations, such as + * internal memory allocated to all requested endpoints. + * + * Returns zero on success, else a negative errno. + */ +int usb_gadget_check_config(struct usb_gadget *gadget) +{ + if (gadget->ops->check_config) + return gadget->ops->check_config(gadget); + return 0; +} +EXPORT_SYMBOL_GPL(usb_gadget_check_config); + /* ------------------------------------------------------------------------- */ static void usb_gadget_state_work(struct work_struct *work) diff --git a/drivers/video/fbdev/core/sysimgblt.c b/drivers/video/fbdev/core/sysimgblt.c index a4d05b1b17d7da6dfdebcf6c7df3f538cbb982e6..665ef7a0a2495bde34d6306d268cdb41353f9061 100644 --- a/drivers/video/fbdev/core/sysimgblt.c +++ b/drivers/video/fbdev/core/sysimgblt.c @@ -188,23 +188,29 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p, { u32 fgx = fgcolor, bgx = bgcolor, bpp = p->var.bits_per_pixel; u32 ppw = 32/bpp, spitch = (image->width + 7)/8; - u32 bit_mask, end_mask, eorx, shift; - const char *s = image->data, *src; + u32 bit_mask, eorx, shift; + const u8 *s = image->data, *src; u32 *dst; - const u32 *tab = NULL; + const u32 *tab; + size_t tablen; + u32 colortab[16]; int i, j, k; switch (bpp) { case 8: tab = fb_be_math(p) ? cfb_tab8_be : cfb_tab8_le; + tablen = 16; break; case 16: tab = fb_be_math(p) ? cfb_tab16_be : cfb_tab16_le; + tablen = 4; break; case 32: - default: tab = cfb_tab32; + tablen = 2; break; + default: + return; } for (i = ppw-1; i--; ) { @@ -218,20 +224,62 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p, eorx = fgx ^ bgx; k = image->width/ppw; + for (i = 0; i < tablen; ++i) + colortab[i] = (tab[i] & eorx) ^ bgx; + for (i = image->height; i--; ) { dst = dst1; shift = 8; src = s; - for (j = k; j--; ) { + /* + * Manually unroll the per-line copying loop for better + * performance. This works until we processed the last + * completely filled source byte (inclusive). + */ + switch (ppw) { + case 4: /* 8 bpp */ + for (j = k; j >= 2; j -= 2, ++src) { + *dst++ = colortab[(*src >> 4) & bit_mask]; + *dst++ = colortab[(*src >> 0) & bit_mask]; + } + break; + case 2: /* 16 bpp */ + for (j = k; j >= 4; j -= 4, ++src) { + *dst++ = colortab[(*src >> 6) & bit_mask]; + *dst++ = colortab[(*src >> 4) & bit_mask]; + *dst++ = colortab[(*src >> 2) & bit_mask]; + *dst++ = colortab[(*src >> 0) & bit_mask]; + } + break; + case 1: /* 32 bpp */ + for (j = k; j >= 8; j -= 8, ++src) { + *dst++ = colortab[(*src >> 7) & bit_mask]; + *dst++ = colortab[(*src >> 6) & bit_mask]; + *dst++ = colortab[(*src >> 5) & bit_mask]; + *dst++ = colortab[(*src >> 4) & bit_mask]; + *dst++ = colortab[(*src >> 3) & bit_mask]; + *dst++ = colortab[(*src >> 2) & bit_mask]; + *dst++ = colortab[(*src >> 1) & bit_mask]; + *dst++ = colortab[(*src >> 0) & bit_mask]; + } + break; + } + + /* + * For image widths that are not a multiple of 8, there + * are trailing pixels left on the current line. Print + * them as well. + */ + for (; j--; ) { shift -= ppw; - end_mask = tab[(*src >> shift) & bit_mask]; - *dst++ = (end_mask & eorx) ^ bgx; + *dst++ = colortab[(*src >> shift) & bit_mask]; if (!shift) { shift = 8; - src++; + ++src; } } + dst1 += p->fix.line_length; s += spitch; } diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c index 061a105afb8655df6dc33bf41ca87e232d5dbb55..27c3ee5df8defe04e1228f41b6b57beac6780aaa 100644 --- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c +++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c @@ -518,7 +518,9 @@ static int mmphw_probe(struct platform_device *pdev) ret = -ENOENT; goto failed; } - clk_prepare_enable(ctrl->clk); + ret = clk_prepare_enable(ctrl->clk); + if (ret) + goto failed; /* init global regs */ ctrl_set_default(ctrl); diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index e8ef0c66e558fddaba669d683dfd77a40f025d4e..136f90dbad831afeed98e43c586b957b77b79a09 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -571,11 +571,9 @@ static void virtio_mmio_release_dev(struct device *_d) { struct virtio_device *vdev = container_of(_d, struct virtio_device, dev); - struct virtio_mmio_device *vm_dev = - container_of(vdev, struct virtio_mmio_device, vdev); - struct platform_device *pdev = vm_dev->pdev; + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); - devm_kfree(&pdev->dev, vm_dev); + kfree(vm_dev); } /* Platform device */ @@ -586,7 +584,7 @@ static int virtio_mmio_probe(struct platform_device *pdev) unsigned long magic; int rc; - vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL); + vm_dev = kzalloc(sizeof(*vm_dev), GFP_KERNEL); if (!vm_dev) return -ENOMEM; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 83dca79ff042c54f449b54a594216aa98edc9b05..b798586263ebb351c09a4b83953f1c90ea9ae323 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -4460,8 +4460,7 @@ int btrfs_cancel_balance(struct btrfs_fs_info *fs_info) } } - BUG_ON(fs_info->balance_ctl || - test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); + ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); atomic_dec(&fs_info->balance_cancel_req); mutex_unlock(&fs_info->balance_mutex); return 0; diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 5fe85dc0e2651e2c0d8981968c82a71e3aaf9ac3..a56738244f3a99e83b791a1fc6df7da0f109881d 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -4580,9 +4580,9 @@ static int cifs_readpage_worker(struct file *file, struct page *page, io_error: kunmap(page); - unlock_page(page); read_complete: + unlock_page(page); return rc; } diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index dde9afb6747bac4407fb28f29297276a1720a07c..51ab06308bc73a9b1fcb1e14ce3b57acd5f62e87 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -1856,7 +1856,7 @@ static void del_timeout(struct dlm_lkb *lkb) void dlm_scan_timeout(struct dlm_ls *ls) { struct dlm_rsb *r; - struct dlm_lkb *lkb; + struct dlm_lkb *lkb = NULL, *iter; int do_cancel, do_warn; s64 wait_us; @@ -1867,27 +1867,28 @@ void dlm_scan_timeout(struct dlm_ls *ls) do_cancel = 0; do_warn = 0; mutex_lock(&ls->ls_timeout_mutex); - list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) { + list_for_each_entry(iter, &ls->ls_timeout, lkb_time_list) { wait_us = ktime_to_us(ktime_sub(ktime_get(), - lkb->lkb_timestamp)); + iter->lkb_timestamp)); - if ((lkb->lkb_exflags & DLM_LKF_TIMEOUT) && - wait_us >= (lkb->lkb_timeout_cs * 10000)) + if ((iter->lkb_exflags & DLM_LKF_TIMEOUT) && + wait_us >= (iter->lkb_timeout_cs * 10000)) do_cancel = 1; - if ((lkb->lkb_flags & DLM_IFL_WATCH_TIMEWARN) && + if ((iter->lkb_flags & DLM_IFL_WATCH_TIMEWARN) && wait_us >= dlm_config.ci_timewarn_cs * 10000) do_warn = 1; if (!do_cancel && !do_warn) continue; - hold_lkb(lkb); + hold_lkb(iter); + lkb = iter; break; } mutex_unlock(&ls->ls_timeout_mutex); - if (!do_cancel && !do_warn) + if (!lkb) break; r = lkb->lkb_resource; @@ -5241,21 +5242,18 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls) static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls) { - struct dlm_lkb *lkb; - int found = 0; + struct dlm_lkb *lkb = NULL, *iter; mutex_lock(&ls->ls_waiters_mutex); - list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) { - if (lkb->lkb_flags & DLM_IFL_RESEND) { - hold_lkb(lkb); - found = 1; + list_for_each_entry(iter, &ls->ls_waiters, lkb_wait_reply) { + if (iter->lkb_flags & DLM_IFL_RESEND) { + hold_lkb(iter); + lkb = iter; break; } } mutex_unlock(&ls->ls_waiters_mutex); - if (!found) - lkb = NULL; return lkb; } @@ -5914,37 +5912,36 @@ int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, int mode, uint32_t flags, void *name, unsigned int namelen, unsigned long timeout_cs, uint32_t *lkid) { - struct dlm_lkb *lkb; + struct dlm_lkb *lkb = NULL, *iter; struct dlm_user_args *ua; int found_other_mode = 0; - int found = 0; int rv = 0; mutex_lock(&ls->ls_orphans_mutex); - list_for_each_entry(lkb, &ls->ls_orphans, lkb_ownqueue) { - if (lkb->lkb_resource->res_length != namelen) + list_for_each_entry(iter, &ls->ls_orphans, lkb_ownqueue) { + if (iter->lkb_resource->res_length != namelen) continue; - if (memcmp(lkb->lkb_resource->res_name, name, namelen)) + if (memcmp(iter->lkb_resource->res_name, name, namelen)) continue; - if (lkb->lkb_grmode != mode) { + if (iter->lkb_grmode != mode) { found_other_mode = 1; continue; } - found = 1; - list_del_init(&lkb->lkb_ownqueue); - lkb->lkb_flags &= ~DLM_IFL_ORPHAN; - *lkid = lkb->lkb_id; + lkb = iter; + list_del_init(&iter->lkb_ownqueue); + iter->lkb_flags &= ~DLM_IFL_ORPHAN; + *lkid = iter->lkb_id; break; } mutex_unlock(&ls->ls_orphans_mutex); - if (!found && found_other_mode) { + if (!lkb && found_other_mode) { rv = -EAGAIN; goto out; } - if (!found) { + if (!lkb) { rv = -ENOENT; goto out; } diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c index f3482e936cc2500f27d7d74c4b1c83db80eb403a..28735e8c5e2069bf497412b78350caa8076eb110 100644 --- a/fs/dlm/plock.c +++ b/fs/dlm/plock.c @@ -80,8 +80,7 @@ static void send_op(struct plock_op *op) abandoned waiter. So, we have to insert the unlock-close when the lock call is interrupted. */ -static void do_unlock_close(struct dlm_ls *ls, u64 number, - struct file *file, struct file_lock *fl) +static void do_unlock_close(const struct dlm_plock_info *info) { struct plock_op *op; @@ -90,15 +89,12 @@ static void do_unlock_close(struct dlm_ls *ls, u64 number, return; op->info.optype = DLM_PLOCK_OP_UNLOCK; - op->info.pid = fl->fl_pid; - op->info.fsid = ls->ls_global_id; - op->info.number = number; + op->info.pid = info->pid; + op->info.fsid = info->fsid; + op->info.number = info->number; op->info.start = 0; op->info.end = OFFSET_MAX; - if (fl->fl_lmops && fl->fl_lmops->lm_grant) - op->info.owner = (__u64) fl->fl_pid; - else - op->info.owner = (__u64)(long) fl->fl_owner; + op->info.owner = info->owner; op->info.flags |= DLM_PLOCK_FL_CLOSE; send_op(op); @@ -161,13 +157,14 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file, rv = wait_event_killable(recv_wq, (op->done != 0)); if (rv == -ERESTARTSYS) { - log_debug(ls, "%s: wait killed %llx", __func__, - (unsigned long long)number); spin_lock(&ops_lock); list_del(&op->list); spin_unlock(&ops_lock); + log_debug(ls, "%s: wait interrupted %x %llx pid %d", + __func__, ls->ls_global_id, + (unsigned long long)number, op->info.pid); dlm_release_plock_op(op); - do_unlock_close(ls, number, file, fl); + do_unlock_close(&op->info); goto out; } @@ -408,7 +405,7 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count, if (op->info.flags & DLM_PLOCK_FL_CLOSE) list_del(&op->list); else - list_move(&op->list, &recv_list); + list_move_tail(&op->list, &recv_list); memcpy(&info, &op->info, sizeof(info)); } spin_unlock(&ops_lock); @@ -433,9 +430,9 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count, static ssize_t dev_write(struct file *file, const char __user *u, size_t count, loff_t *ppos) { + struct plock_op *op = NULL, *iter; struct dlm_plock_info info; - struct plock_op *op; - int found = 0, do_callback = 0; + int do_callback = 0; if (count != sizeof(info)) return -EINVAL; @@ -446,31 +443,63 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count, if (check_version(&info)) return -EINVAL; + /* + * The results for waiting ops (SETLKW) can be returned in any + * order, so match all fields to find the op. The results for + * non-waiting ops are returned in the order that they were sent + * to userspace, so match the result with the first non-waiting op. + */ spin_lock(&ops_lock); - list_for_each_entry(op, &recv_list, list) { - if (op->info.fsid == info.fsid && - op->info.number == info.number && - op->info.owner == info.owner) { - list_del_init(&op->list); - memcpy(&op->info, &info, sizeof(info)); - if (op->data) - do_callback = 1; - else - op->done = 1; - found = 1; - break; + if (info.wait) { + list_for_each_entry(iter, &recv_list, list) { + if (iter->info.fsid == info.fsid && + iter->info.number == info.number && + iter->info.owner == info.owner && + iter->info.pid == info.pid && + iter->info.start == info.start && + iter->info.end == info.end && + iter->info.ex == info.ex && + iter->info.wait) { + op = iter; + break; + } + } + } else { + list_for_each_entry(iter, &recv_list, list) { + if (!iter->info.wait) { + op = iter; + break; + } } } + + if (op) { + /* Sanity check that op and info match. */ + if (info.wait) + WARN_ON(op->info.optype != DLM_PLOCK_OP_LOCK); + else + WARN_ON(op->info.fsid != info.fsid || + op->info.number != info.number || + op->info.owner != info.owner || + op->info.optype != info.optype); + + list_del_init(&op->list); + memcpy(&op->info, &info, sizeof(info)); + if (op->data) + do_callback = 1; + else + op->done = 1; + } spin_unlock(&ops_lock); - if (found) { + if (op) { if (do_callback) dlm_plock_callback(op); else wake_up(&recv_wq); } else - log_print("dev_write no op %x %llx", info.fsid, - (unsigned long long)info.number); + log_print("%s: no op %x %llx", __func__, + info.fsid, (unsigned long long)info.number); return count; } diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c index 8928e99dfd47d0ac033e064b23570423850537c4..df18f38a02734087ce628c609ecf222881f55fb1 100644 --- a/fs/dlm/recover.c +++ b/fs/dlm/recover.c @@ -732,10 +732,9 @@ void dlm_recovered_lock(struct dlm_rsb *r) static void recover_lvb(struct dlm_rsb *r) { - struct dlm_lkb *lkb, *high_lkb = NULL; + struct dlm_lkb *big_lkb = NULL, *iter, *high_lkb = NULL; uint32_t high_seq = 0; int lock_lvb_exists = 0; - int big_lock_exists = 0; int lvblen = r->res_ls->ls_lvblen; if (!rsb_flag(r, RSB_NEW_MASTER2) && @@ -751,37 +750,37 @@ static void recover_lvb(struct dlm_rsb *r) /* we are the new master, so figure out if VALNOTVALID should be set, and set the rsb lvb from the best lkb available. */ - list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) { - if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) + list_for_each_entry(iter, &r->res_grantqueue, lkb_statequeue) { + if (!(iter->lkb_exflags & DLM_LKF_VALBLK)) continue; lock_lvb_exists = 1; - if (lkb->lkb_grmode > DLM_LOCK_CR) { - big_lock_exists = 1; + if (iter->lkb_grmode > DLM_LOCK_CR) { + big_lkb = iter; goto setflag; } - if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) { - high_lkb = lkb; - high_seq = lkb->lkb_lvbseq; + if (((int)iter->lkb_lvbseq - (int)high_seq) >= 0) { + high_lkb = iter; + high_seq = iter->lkb_lvbseq; } } - list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) { - if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) + list_for_each_entry(iter, &r->res_convertqueue, lkb_statequeue) { + if (!(iter->lkb_exflags & DLM_LKF_VALBLK)) continue; lock_lvb_exists = 1; - if (lkb->lkb_grmode > DLM_LOCK_CR) { - big_lock_exists = 1; + if (iter->lkb_grmode > DLM_LOCK_CR) { + big_lkb = iter; goto setflag; } - if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) { - high_lkb = lkb; - high_seq = lkb->lkb_lvbseq; + if (((int)iter->lkb_lvbseq - (int)high_seq) >= 0) { + high_lkb = iter; + high_seq = iter->lkb_lvbseq; } } @@ -790,7 +789,7 @@ static void recover_lvb(struct dlm_rsb *r) goto out; /* lvb is invalidated if only NL/CR locks remain */ - if (!big_lock_exists) + if (!big_lkb) rsb_set_flag(r, RSB_VALNOTVALID); if (!r->res_lvbptr) { @@ -799,9 +798,9 @@ static void recover_lvb(struct dlm_rsb *r) goto out; } - if (big_lock_exists) { - r->res_lvbseq = lkb->lkb_lvbseq; - memcpy(r->res_lvbptr, lkb->lkb_lvbptr, lvblen); + if (big_lkb) { + r->res_lvbseq = big_lkb->lkb_lvbseq; + memcpy(r->res_lvbptr, big_lkb->lkb_lvbptr, lvblen); } else if (high_lkb) { r->res_lvbseq = high_lkb->lkb_lvbseq; memcpy(r->res_lvbptr, high_lkb->lkb_lvbptr, lvblen); diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index e01b6a2d12d30642f0fecf6b4363a16cdbce10f1..b61de8dab51a0abf54690560b7fc239afc35f122 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -1017,7 +1017,14 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root) { struct gfs2_sbd *sdp = root->d_sb->s_fs_info; struct gfs2_args *args = &sdp->sd_args; - int val; + unsigned int logd_secs, statfs_slow, statfs_quantum, quota_quantum; + + spin_lock(&sdp->sd_tune.gt_spin); + logd_secs = sdp->sd_tune.gt_logd_secs; + quota_quantum = sdp->sd_tune.gt_quota_quantum; + statfs_quantum = sdp->sd_tune.gt_statfs_quantum; + statfs_slow = sdp->sd_tune.gt_statfs_slow; + spin_unlock(&sdp->sd_tune.gt_spin); if (is_ancestor(root, sdp->sd_master_dir)) seq_puts(s, ",meta"); @@ -1072,17 +1079,14 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root) } if (args->ar_discard) seq_puts(s, ",discard"); - val = sdp->sd_tune.gt_logd_secs; - if (val != 30) - seq_printf(s, ",commit=%d", val); - val = sdp->sd_tune.gt_statfs_quantum; - if (val != 30) - seq_printf(s, ",statfs_quantum=%d", val); - else if (sdp->sd_tune.gt_statfs_slow) + if (logd_secs != 30) + seq_printf(s, ",commit=%d", logd_secs); + if (statfs_quantum != 30) + seq_printf(s, ",statfs_quantum=%d", statfs_quantum); + else if (statfs_slow) seq_puts(s, ",statfs_quantum=0"); - val = sdp->sd_tune.gt_quota_quantum; - if (val != 60) - seq_printf(s, ",quota_quantum=%d", val); + if (quota_quantum != 60) + seq_printf(s, ",quota_quantum=%d", quota_quantum); if (args->ar_statfs_percent) seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent); if (args->ar_errors != GFS2_ERRORS_DEFAULT) { diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c index bd9af2be352fc0cd8a7daac0a6b85bf234f4ef58..cef3303d949955cfc012c7af74534d76fde57722 100644 --- a/fs/jfs/jfs_dmap.c +++ b/fs/jfs/jfs_dmap.c @@ -2027,6 +2027,9 @@ dbAllocDmapLev(struct bmap * bmp, if (dbFindLeaf((dmtree_t *) & dp->tree, l2nb, &leafidx)) return -ENOSPC; + if (leafidx < 0) + return -EIO; + /* determine the block number within the file system corresponding * to the leaf at which free space was found. */ diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c index c8ce7f1bc59424ba9f3db4ac4946d7ff04734a25..6f6a5b9203d3f308353b309f777dc416f1e7b7bf 100644 --- a/fs/jfs/jfs_txnmgr.c +++ b/fs/jfs/jfs_txnmgr.c @@ -354,6 +354,11 @@ tid_t txBegin(struct super_block *sb, int flag) jfs_info("txBegin: flag = 0x%x", flag); log = JFS_SBI(sb)->log; + if (!log) { + jfs_error(sb, "read-only filesystem\n"); + return 0; + } + TXN_LOCK(); INCREMENT(TxStat.txBegin); diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c index 7a55d14cc1af0da7fe3fb06eac4fa2c5f117f34c..f155ad6650bd4e0d0decd2c4bc4b508b72cfca32 100644 --- a/fs/jfs/namei.c +++ b/fs/jfs/namei.c @@ -798,6 +798,11 @@ static int jfs_link(struct dentry *old_dentry, if (rc) goto out; + if (isReadOnly(ip)) { + jfs_error(ip->i_sb, "read-only filesystem\n"); + return -EROFS; + } + tid = txBegin(ip->i_sb, 0); mutex_lock_nested(&JFS_IP(dir)->commit_mutex, COMMIT_MUTEX_PARENT); diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index c220810c61d145a4b6e3f113aa0a27781b01a5c3..fbc7304bed56b59d52b8f40e304ac9d4e1b872e2 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -509,20 +509,26 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter, return result; } -static void -nfs_direct_join_group(struct list_head *list, struct inode *inode) +static void nfs_direct_join_group(struct list_head *list, struct inode *inode) { - struct nfs_page *req, *next; + struct nfs_page *req, *subreq; list_for_each_entry(req, list, wb_list) { - if (req->wb_head != req || req->wb_this_page == req) + if (req->wb_head != req) continue; - for (next = req->wb_this_page; - next != req->wb_head; - next = next->wb_this_page) { - nfs_list_remove_request(next); - nfs_release_request(next); - } + subreq = req->wb_this_page; + if (subreq == req) + continue; + do { + /* + * Remove subrequests from this list before freeing + * them in the call to nfs_join_page_group(). + */ + if (!list_empty(&subreq->wb_list)) { + nfs_list_remove_request(subreq); + nfs_release_request(subreq); + } + } while ((subreq = subreq->wb_this_page) != req); nfs_join_page_group(req, inode); } } diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index b9567cc8698ed661e58802486e99b1af9a50dc2c..c34df51a8f2b73cfec74d1168ce21333fb034f64 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -5864,9 +5864,8 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu out_ok: ret = res.acl_len; out_free: - for (i = 0; i < npages; i++) - if (pages[i]) - __free_page(pages[i]); + while (--i >= 0) + __free_page(pages[i]); if (res.acl_scratch) __free_page(res.acl_scratch); kfree(pages); @@ -7047,8 +7046,15 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata) } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid)) goto out_restart; break; - case -NFS4ERR_BAD_STATEID: case -NFS4ERR_OLD_STATEID: + if (data->arg.new_lock_owner != 0 && + nfs4_refresh_open_old_stateid(&data->arg.open_stateid, + lsp->ls_state)) + goto out_restart; + if (nfs4_refresh_lock_old_stateid(&data->arg.lock_stateid, lsp)) + goto out_restart; + fallthrough; + case -NFS4ERR_BAD_STATEID: case -NFS4ERR_STALE_STATEID: case -NFS4ERR_EXPIRED: if (data->arg.new_lock_owner != 0) { diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 1c1b231b2ab332f9ba0be79ac96ee1b01ea5a6f0..b045be7394a08dff3f34d0f6384ce897c649a634 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -1145,9 +1145,9 @@ static void revoke_delegation(struct nfs4_delegation *dp) WARN_ON(!list_empty(&dp->dl_recall_lru)); if (clp->cl_minorversion) { + spin_lock(&clp->cl_lock); dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID; refcount_inc(&dp->dl_stid.sc_count); - spin_lock(&clp->cl_lock); list_add(&dp->dl_recall_lru, &clp->cl_revoked); spin_unlock(&clp->cl_lock); } diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h index b208eba5d0b64d388def17af4445c34711b91808..b58a0140d78d5e3938d0e20f61fd4042a8db7cce 100644 --- a/fs/overlayfs/ovl_entry.h +++ b/fs/overlayfs/ovl_entry.h @@ -30,6 +30,7 @@ struct ovl_sb { }; struct ovl_layer { + /* ovl_free_fs() relies on @mnt being the first member! */ struct vfsmount *mnt; /* Trap in ovl inode cache */ struct inode *trap; @@ -40,6 +41,14 @@ struct ovl_layer { int fsid; }; +/* + * ovl_free_fs() relies on @mnt being the first member when unmounting + * the private mounts created for each layer. Let's check both the + * offset and type. + */ +static_assert(offsetof(struct ovl_layer, mnt) == 0); +static_assert(__same_type(typeof_member(struct ovl_layer, mnt), struct vfsmount *)); + struct ovl_path { const struct ovl_layer *layer; struct dentry *dentry; diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index ad255f8ab5c5548a332a64d0ba6ee8d3866601f1..8d0cd68fc90a4c52d4f08a819d69e8b2ab962358 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -557,7 +557,7 @@ static void invalidate_dquots(struct super_block *sb, int type) continue; /* Wait for dquot users */ if (atomic_read(&dquot->dq_count)) { - dqgrab(dquot); + atomic_inc(&dquot->dq_count); spin_unlock(&dq_list_lock); /* * Once dqput() wakes us up, we know it's time to free @@ -2415,7 +2415,8 @@ int dquot_load_quota_sb(struct super_block *sb, int type, int format_id, error = add_dquot_ref(sb, type); if (error) - dquot_disable(sb, type, flags); + dquot_disable(sb, type, + DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); return error; out_fmt: diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c index 622569007b530bf848b98ddb7f58f45a7c442edb..2142cbd1dde243c1738ec389decd13ca9ce05158 100644 --- a/fs/udf/unicode.c +++ b/fs/udf/unicode.c @@ -247,7 +247,7 @@ static int udf_name_from_CS0(struct super_block *sb, } if (translate) { - if (str_o_len <= 2 && str_o[0] == '.' && + if (str_o_len > 0 && str_o_len <= 2 && str_o[0] == '.' && (str_o_len == 1 || str_o[1] == '.')) needsCRC = 1; if (needsCRC) { diff --git a/include/dt-bindings/iio/addac/adi,ad74413r.h b/include/dt-bindings/iio/addac/adi,ad74413r.h new file mode 100644 index 0000000000000000000000000000000000000000..204f92bbd79f26a5c68b000feb0370043a1519f0 --- /dev/null +++ b/include/dt-bindings/iio/addac/adi,ad74413r.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _DT_BINDINGS_ADI_AD74413R_H +#define _DT_BINDINGS_ADI_AD74413R_H + +#define CH_FUNC_HIGH_IMPEDANCE 0x0 +#define CH_FUNC_VOLTAGE_OUTPUT 0x1 +#define CH_FUNC_CURRENT_OUTPUT 0x2 +#define CH_FUNC_VOLTAGE_INPUT 0x3 +#define CH_FUNC_CURRENT_INPUT_EXT_POWER 0x4 +#define CH_FUNC_CURRENT_INPUT_LOOP_POWER 0x5 +#define CH_FUNC_RESISTANCE_INPUT 0x6 +#define CH_FUNC_DIGITAL_INPUT_LOGIC 0x7 +#define CH_FUNC_DIGITAL_INPUT_LOOP_POWER 0x8 +#define CH_FUNC_CURRENT_INPUT_EXT_POWER_HART 0x9 +#define CH_FUNC_CURRENT_INPUT_LOOP_POWER_HART 0xA + +#define CH_FUNC_MIN CH_FUNC_HIGH_IMPEDANCE +#define CH_FUNC_MAX CH_FUNC_CURRENT_INPUT_LOOP_POWER_HART + +#endif /* _DT_BINDINGS_ADI_AD74413R_H */ diff --git a/include/linux/ceph/msgr.h b/include/linux/ceph/msgr.h index 9e50aede46c8327bec9309b2ad21eb1ecff2c711..7bde0af29a8148f878cae93d90af648fe62a3a81 100644 --- a/include/linux/ceph/msgr.h +++ b/include/linux/ceph/msgr.h @@ -61,11 +61,18 @@ extern const char *ceph_entity_type_name(int type); * entity_addr -- network address */ struct ceph_entity_addr { - __le32 type; + __le32 type; /* CEPH_ENTITY_ADDR_TYPE_* */ __le32 nonce; /* unique id for process (e.g. pid) */ struct sockaddr_storage in_addr; } __attribute__ ((packed)); +static inline bool ceph_addr_equal_no_type(const struct ceph_entity_addr *lhs, + const struct ceph_entity_addr *rhs) +{ + return !memcmp(&lhs->in_addr, &rhs->in_addr, sizeof(lhs->in_addr)) && + lhs->nonce == rhs->nonce; +} + struct ceph_entity_inst { struct ceph_entity_name name; struct ceph_entity_addr addr; diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h index 2c8860e406bd8cae9ebcaa350f14e4801ecbf6ee..0417360a6db9b0d65a3e52f8572951daa96987f3 100644 --- a/include/linux/iopoll.h +++ b/include/linux/iopoll.h @@ -53,6 +53,7 @@ } \ if (__sleep_us) \ usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ + cpu_relax(); \ } \ (cond) ? 0 : -ETIMEDOUT; \ }) @@ -95,6 +96,7 @@ } \ if (__delay_us) \ udelay(__delay_us); \ + cpu_relax(); \ } \ (cond) ? 0 : -ETIMEDOUT; \ }) diff --git a/include/linux/mhi.h b/include/linux/mhi.h index d4841e5a5f458531123b635f1f134e8520fb1b8f..5d9f8c6f3d40f68efadb44c8e587b41b3ce0323f 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -303,6 +303,7 @@ struct mhi_controller_config { * @rddm_size: RAM dump size that host should allocate for debugging purpose * @sbl_size: SBL image size downloaded through BHIe (optional) * @seg_len: BHIe vector size (optional) + * @reg_len: Length of the MHI MMIO region (required) * @fbc_image: Points to firmware image buffer * @rddm_image: Points to RAM dump buffer * @mhi_chan: Points to the channel configuration table @@ -383,6 +384,7 @@ struct mhi_controller { size_t rddm_size; size_t sbl_size; size_t seg_len; + size_t reg_len; struct image_info *fbc_image; struct image_info *rddm_image; struct mhi_chan *mhi_chan; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index ae88362216a4e1a32078dbddaf9a1410e4c0072d..4f95b98215d81a1e1c00e3236a3b0a360df96b51 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -644,18 +644,22 @@ struct mlx5_pps { u8 enabled; }; -struct mlx5_clock { - struct mlx5_nb pps_nb; - seqlock_t lock; +struct mlx5_timer { struct cyclecounter cycles; struct timecounter tc; - struct hwtstamp_config hwtstamp_config; u32 nominal_c_mult; unsigned long overflow_period; struct delayed_work overflow_work; +}; + +struct mlx5_clock { + struct mlx5_nb pps_nb; + seqlock_t lock; + struct hwtstamp_config hwtstamp_config; struct ptp_clock *ptp; struct ptp_clock_info ptp_info; struct mlx5_pps pps_info; + struct mlx5_timer timer; }; struct mlx5_dm; diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 40d7e98fc9902318fe86fd01dec1700067785b5d..fb294cbb9081ddd63504b4ed6ee0b31eb3ce88f4 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -477,6 +477,7 @@ struct mmc_host { struct device_node; struct mmc_host *mmc_alloc_host(int extra, struct device *); +struct mmc_host *devm_mmc_alloc_host(struct device *dev, int extra); int mmc_add_host(struct mmc_host *); void mmc_remove_host(struct mmc_host *); void mmc_free_host(struct mmc_host *); diff --git a/include/linux/objtool.h b/include/linux/objtool.h index 662f19374bd982b6e732e79ba62c427427920252..d17fa7f4001d79214a98bb2f18652fec0902e72e 100644 --- a/include/linux/objtool.h +++ b/include/linux/objtool.h @@ -71,6 +71,23 @@ struct unwind_hint { static void __used __section(".discard.func_stack_frame_non_standard") \ *__func_stack_frame_non_standard_##func = func +/* + * STACK_FRAME_NON_STANDARD_FP() is a frame-pointer-specific function ignore + * for the case where a function is intentionally missing frame pointer setup, + * but otherwise needs objtool/ORC coverage when frame pointers are disabled. + */ +#ifdef CONFIG_FRAME_POINTER +#define STACK_FRAME_NON_STANDARD_FP(func) STACK_FRAME_NON_STANDARD(func) +#else +#define STACK_FRAME_NON_STANDARD_FP(func) +#endif + +#define ANNOTATE_NOENDBR \ + "986: \n\t" \ + ".pushsection .discard.noendbr\n\t" \ + _ASM_PTR " 986b\n\t" \ + ".popsection\n\t" + #else /* __ASSEMBLY__ */ /* @@ -117,6 +134,13 @@ struct unwind_hint { .popsection .endm +.macro ANNOTATE_NOENDBR +.Lhere_\@: + .pushsection .discard.noendbr + .quad .Lhere_\@ + .popsection +.endm + #endif /* __ASSEMBLY__ */ #else /* !CONFIG_STACK_VALIDATION */ @@ -126,10 +150,14 @@ struct unwind_hint { #define UNWIND_HINT(sp_reg, sp_offset, type, end) \ "\n\t" #define STACK_FRAME_NON_STANDARD(func) +#define STACK_FRAME_NON_STANDARD_FP(func) +#define ANNOTATE_NOENDBR #else #define ANNOTATE_INTRA_FUNCTION_CALL .macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 end=0 .endm +.macro ANNOTATE_NOENDBR +.endm #endif #endif /* CONFIG_STACK_VALIDATION */ diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index e7351d64f11fac7e8806d8a4cfdd3e9cbf811624..11df3d5b40c6b85f11f8eb49585d7ef2b10cce20 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -326,6 +326,7 @@ struct usb_gadget_ops { struct usb_ep *(*match_ep)(struct usb_gadget *, struct usb_endpoint_descriptor *, struct usb_ss_ep_comp_descriptor *); + int (*check_config)(struct usb_gadget *gadget); }; /** @@ -596,6 +597,7 @@ int usb_gadget_connect(struct usb_gadget *gadget); int usb_gadget_disconnect(struct usb_gadget *gadget); int usb_gadget_deactivate(struct usb_gadget *gadget); int usb_gadget_activate(struct usb_gadget *gadget); +int usb_gadget_check_config(struct usb_gadget *gadget); #else static inline int usb_gadget_frame_number(struct usb_gadget *gadget) { return 0; } @@ -619,6 +621,8 @@ static inline int usb_gadget_deactivate(struct usb_gadget *gadget) { return 0; } static inline int usb_gadget_activate(struct usb_gadget *gadget) { return 0; } +static inline int usb_gadget_check_config(struct usb_gadget *gadget) +{ return 0; } #endif /* CONFIG_USB_GADGET */ /*-------------------------------------------------------------------------*/ diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index a960de68ac69ee12939a95729e36513312e41a68..6047058d67037b21ff51cfb509b1b6e8032ae174 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -148,6 +148,10 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, if (gso_type & SKB_GSO_UDP) nh_off -= thlen; + /* Kernel has a special handling for GSO_BY_FRAGS. */ + if (gso_size == GSO_BY_FRAGS) + return -EINVAL; + /* Too small packets are not really GSO ones. */ if (skb->len - nh_off > gso_size) { shinfo->gso_size = gso_size; diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h index 5a91b548ecc0cc8dd7eeb97ad4346d23e546af65..8d52c4506762d8ea4f04fb42c53a6292c633415b 100644 --- a/include/media/v4l2-mem2mem.h +++ b/include/media/v4l2-mem2mem.h @@ -588,7 +588,14 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, static inline unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx) { - return m2m_ctx->out_q_ctx.num_rdy; + unsigned int num_buf_rdy; + unsigned long flags; + + spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); + num_buf_rdy = m2m_ctx->out_q_ctx.num_rdy; + spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); + + return num_buf_rdy; } /** @@ -600,7 +607,14 @@ unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx) static inline unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx) { - return m2m_ctx->cap_q_ctx.num_rdy; + unsigned int num_buf_rdy; + unsigned long flags; + + spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags); + num_buf_rdy = m2m_ctx->cap_q_ctx.num_rdy; + spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags); + + return num_buf_rdy; } /** diff --git a/include/net/bonding.h b/include/net/bonding.h index a248caff969f5334be9d42a4f50752e6be2f9b7d..82d128c0fe6df28d6c7fa7942ef2640a9e741d77 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -698,37 +698,14 @@ static inline struct slave *bond_slave_has_mac(struct bonding *bond, } /* Caller must hold rcu_read_lock() for read */ -static inline struct slave *bond_slave_has_mac_rcu(struct bonding *bond, - const u8 *mac) +static inline bool bond_slave_has_mac_rcu(struct bonding *bond, const u8 *mac) { struct list_head *iter; struct slave *tmp; - bond_for_each_slave_rcu(bond, tmp, iter) - if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr)) - return tmp; - - return NULL; -} - -/* Caller must hold rcu_read_lock() for read */ -static inline bool bond_slave_has_mac_rx(struct bonding *bond, const u8 *mac) -{ - struct list_head *iter; - struct slave *tmp; - struct netdev_hw_addr *ha; - bond_for_each_slave_rcu(bond, tmp, iter) if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr)) return true; - - if (netdev_uc_empty(bond->dev)) - return false; - - netdev_for_each_uc_addr(ha, bond->dev) - if (ether_addr_equal_64bits(mac, ha->addr)) - return true; - return false; } diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index 4da61c950e931a424d60c6af5b7400e80d6bb904..5c2a73bbfabeefa4c00aa059c133c356bac77b5d 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h @@ -166,8 +166,8 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname, int rtnl_delete_link(struct net_device *dev); int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm); -int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len, - struct netlink_ext_ack *exterr); +int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer, + struct netlink_ext_ack *exterr); struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid); #define MODULE_ALIAS_RTNL_LINK(kind) MODULE_ALIAS("rtnl-link-" kind) diff --git a/include/net/sock.h b/include/net/sock.h index 50e7e82f2265f4d37372fcbbb5d14b3cd95178bb..c985cf5394dabf3ca3e7b9a70b68f35d0a748f0d 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1241,6 +1241,7 @@ struct proto { /* * Pressure flag: try to collapse. * Technical note: it is used by multiple contexts non atomically. + * Make sure to use READ_ONCE()/WRITE_ONCE() for all reads/writes. * All the __sk_mem_schedule() is of this nature: accounting * is strict, actions are advisory and have some latency. */ @@ -1354,6 +1355,12 @@ static inline bool sk_has_memory_pressure(const struct sock *sk) return sk->sk_prot->memory_pressure != NULL; } +static inline bool sk_under_global_memory_pressure(const struct sock *sk) +{ + return sk->sk_prot->memory_pressure && + !!READ_ONCE(*sk->sk_prot->memory_pressure); +} + static inline bool sk_under_memory_pressure(const struct sock *sk) { if (!sk->sk_prot->memory_pressure) @@ -1363,7 +1370,7 @@ static inline bool sk_under_memory_pressure(const struct sock *sk) mem_cgroup_under_socket_pressure(sk->sk_memcg)) return true; - return !!*sk->sk_prot->memory_pressure; + return !!READ_ONCE(*sk->sk_prot->memory_pressure); } static inline long @@ -1417,7 +1424,7 @@ proto_memory_pressure(struct proto *prot) { if (!prot->memory_pressure) return false; - return !!*prot->memory_pressure; + return !!READ_ONCE(*prot->memory_pressure); } diff --git a/kernel/dma/remap.c b/kernel/dma/remap.c index 905c3fa005f107031cdeb7648cb211ecb5e566f6..5bff061993102b597258971ecc8bfd9617b4e16b 100644 --- a/kernel/dma/remap.c +++ b/kernel/dma/remap.c @@ -43,13 +43,13 @@ void *dma_common_contiguous_remap(struct page *page, size_t size, void *vaddr; int i; - pages = kmalloc_array(count, sizeof(struct page *), GFP_KERNEL); + pages = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL); if (!pages) return NULL; for (i = 0; i < count; i++) pages[i] = nth_page(page, i); vaddr = vmap(pages, count, VM_DMA_COHERENT, prot); - kfree(pages); + kvfree(pages); return vaddr; } diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 3b8c53264441ed4a461f13b215cb456c79d33735..f8126fa0630e2a68e2212809f07fd49453607a5c 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -541,6 +541,7 @@ struct trace_buffer { unsigned flags; int cpus; atomic_t record_disabled; + atomic_t resizing; cpumask_var_t cpumask; struct lock_class_key *reader_lock_key; @@ -2041,7 +2042,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, /* prevent another thread from changing buffer sizes */ mutex_lock(&buffer->mutex); - + atomic_inc(&buffer->resizing); if (cpu_id == RING_BUFFER_ALL_CPUS) { /* @@ -2184,6 +2185,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, atomic_dec(&buffer->record_disabled); } + atomic_dec(&buffer->resizing); mutex_unlock(&buffer->mutex); return 0; @@ -2204,6 +2206,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, } } out_err_unlock: + atomic_dec(&buffer->resizing); mutex_unlock(&buffer->mutex); return err; } @@ -5253,6 +5256,15 @@ int ring_buffer_swap_cpu(struct trace_buffer *buffer_a, if (local_read(&cpu_buffer_b->committing)) goto out_dec; + /* + * When resize is in progress, we cannot swap it because + * it will mess the state of the cpu buffer. + */ + if (atomic_read(&buffer_a->resizing)) + goto out_dec; + if (atomic_read(&buffer_b->resizing)) + goto out_dec; + buffer_a->buffers[cpu] = cpu_buffer_b; buffer_b->buffers[cpu] = cpu_buffer_a; diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 7e99319bd5365026b3a0c983b6a8e2d89be8782c..597487a7f1bfb9359d8417b6cdf8288e32bd9697 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1882,9 +1882,10 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) * place on this CPU. We fail to record, but we reset * the max trace buffer (no one writes directly to it) * and flag that it failed. + * Another reason is resize is in progress. */ trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_, - "Failed to swap buffers due to commit in progress\n"); + "Failed to swap buffers due to commit or resize in progress\n"); } WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); @@ -3781,8 +3782,15 @@ static void *s_start(struct seq_file *m, loff_t *pos) * will point to the same string as current_trace->name. */ mutex_lock(&trace_types_lock); - if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) + if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) { + /* Close iter->trace before switching to the new current tracer */ + if (iter->trace->close) + iter->trace->close(iter); *iter->trace = *tr->current_trace; + /* Reopen the new current tracer */ + if (iter->trace->open) + iter->trace->open(iter); + } mutex_unlock(&trace_types_lock); #ifdef CONFIG_TRACER_MAX_TRACE @@ -4842,11 +4850,17 @@ int tracing_set_cpumask(struct trace_array *tr, !cpumask_test_cpu(cpu, tracing_cpumask_new)) { atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu); +#ifdef CONFIG_TRACER_MAX_TRACE + ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu); +#endif } if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && cpumask_test_cpu(cpu, tracing_cpumask_new)) { atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu); +#ifdef CONFIG_TRACER_MAX_TRACE + ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu); +#endif } } arch_spin_unlock(&tr->max_lock); diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index ee4571b624bcb264d671e0732bb60784d0b2525a..619a60944bb6d8ad3ad44a23e57f44f2f7411b6e 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -228,7 +228,8 @@ static void irqsoff_trace_open(struct trace_iterator *iter) { if (is_graph(iter->tr)) graph_trace_open(iter); - + else + iter->private = NULL; } static void irqsoff_trace_close(struct trace_iterator *iter) diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 41dd17390c732bdfda00c1c39d04b4ae17d68efc..b882c6519b035b08832e13d450848c76ee57fb25 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1332,9 +1332,10 @@ probe_mem_read(void *dest, void *src, size_t size) /* Note that we don't verify it, since the code does not come from user space */ static int -process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest, +process_fetch_insn(struct fetch_insn *code, void *rec, void *dest, void *base) { + struct pt_regs *regs = rec; unsigned long val; retry: diff --git a/kernel/trace/trace_probe_tmpl.h b/kernel/trace/trace_probe_tmpl.h index 29348874ebde724c4820b774ed836bfb23da1cc8..cf14a37dff8c8dd3650a7c5cc5b556bc4aeeb879 100644 --- a/kernel/trace/trace_probe_tmpl.h +++ b/kernel/trace/trace_probe_tmpl.h @@ -54,7 +54,7 @@ fetch_apply_bitfield(struct fetch_insn *code, void *buf) * If dest is NULL, don't store result and return required dynamic data size. */ static int -process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, +process_fetch_insn(struct fetch_insn *code, void *rec, void *dest, void *base); static nokprobe_inline int fetch_store_strlen(unsigned long addr); static nokprobe_inline int @@ -190,7 +190,7 @@ __get_data_size(struct trace_probe *tp, struct pt_regs *regs) /* Store the value of each argument */ static nokprobe_inline void -store_trace_args(void *data, struct trace_probe *tp, struct pt_regs *regs, +store_trace_args(void *data, struct trace_probe *tp, void *rec, int header_size, int maxlen) { struct probe_arg *arg; @@ -205,12 +205,14 @@ store_trace_args(void *data, struct trace_probe *tp, struct pt_regs *regs, /* Point the dynamic data area if needed */ if (unlikely(arg->dynamic)) *dl = make_data_loc(maxlen, dyndata - base); - ret = process_fetch_insn(arg->code, regs, dl, base); - if (unlikely(ret < 0 && arg->dynamic)) { - *dl = make_data_loc(0, dyndata - base); - } else { - dyndata += ret; - maxlen -= ret; + ret = process_fetch_insn(arg->code, rec, dl, base); + if (arg->dynamic) { + if (unlikely(ret < 0)) { + *dl = make_data_loc(0, dyndata - base); + } else { + dyndata += ret; + maxlen -= ret; + } } } } diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 97b10bb31a1f096f4d46d311d559cbf2320f30b2..037e1e863b17f660b0052aa3315be30f55f0e2a1 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -171,6 +171,8 @@ static void wakeup_trace_open(struct trace_iterator *iter) { if (is_graph(iter->tr)) graph_trace_open(iter); + else + iter->private = NULL; } static void wakeup_trace_close(struct trace_iterator *iter) diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 9900d4e3808cc1d47370124916627cc51685064c..f6c47361c154ef44bd7fc6f9918ca9d9daffdac3 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -217,9 +217,10 @@ static unsigned long translate_user_vaddr(unsigned long file_offset) /* Note that we don't verify it, since the code does not come from user space */ static int -process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest, +process_fetch_insn(struct fetch_insn *code, void *rec, void *dest, void *base) { + struct pt_regs *regs = rec; unsigned long val; /* 1st stage: get value from context */ diff --git a/lib/clz_ctz.c b/lib/clz_ctz.c index 0d3a686b5ba29a91332a1bff14b4a2f70c0742e4..fb8c0c5c2bd277c2e14f8a45fbd4bf2efd744af3 100644 --- a/lib/clz_ctz.c +++ b/lib/clz_ctz.c @@ -28,36 +28,16 @@ int __weak __clzsi2(int val) } EXPORT_SYMBOL(__clzsi2); -int __weak __clzdi2(long val); -int __weak __ctzdi2(long val); -#if BITS_PER_LONG == 32 - -int __weak __clzdi2(long val) +int __weak __clzdi2(u64 val); +int __weak __clzdi2(u64 val) { - return 32 - fls((int)val); + return 64 - fls64(val); } EXPORT_SYMBOL(__clzdi2); -int __weak __ctzdi2(long val) +int __weak __ctzdi2(u64 val); +int __weak __ctzdi2(u64 val) { - return __ffs((u32)val); + return __ffs64(val); } EXPORT_SYMBOL(__ctzdi2); - -#elif BITS_PER_LONG == 64 - -int __weak __clzdi2(long val) -{ - return 64 - fls64((u64)val); -} -EXPORT_SYMBOL(__clzdi2); - -int __weak __ctzdi2(long val) -{ - return __ffs64((u64)val); -} -EXPORT_SYMBOL(__ctzdi2); - -#else -#error BITS_PER_LONG not 32 or 64 -#endif diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 3a4da11b804d9aaaa148258d3dde8c087c9f237c..cbc69152523636015d6110379167c0784de7eb9b 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -1133,7 +1133,6 @@ static void set_iter_tags(struct radix_tree_iter *iter, void __rcu **radix_tree_iter_resume(void __rcu **slot, struct radix_tree_iter *iter) { - slot++; iter->index = __radix_tree_iter_add(iter, 1); iter->next_index = iter->index; iter->tags = 0; diff --git a/mm/vmalloc.c b/mm/vmalloc.c index f988014288e65cbffef94560b545879bce8940c7..6a1525cd8f589a0d674363b8f9fc1d1fd2a5a3e2 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2453,6 +2453,10 @@ void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot) free_vm_area(area); return NULL; } + + flush_cache_vmap((unsigned long)area->addr, + (unsigned long)area->addr + count * PAGE_SIZE); + return area->addr; } EXPORT_SYMBOL_GPL(vmap_pfn); diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c index 79a7dfc32e76c511c5b5d8716cbebf64f6228d9d..83586f1dd8d76e01e2b19655a1c03cd2dccd93ab 100644 --- a/net/batman-adv/bat_v_elp.c +++ b/net/batman-adv/bat_v_elp.c @@ -509,7 +509,7 @@ int batadv_v_elp_packet_recv(struct sk_buff *skb, struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); struct batadv_elp_packet *elp_packet; struct batadv_hard_iface *primary_if; - struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb); + struct ethhdr *ethhdr; bool res; int ret = NET_RX_DROP; @@ -517,6 +517,7 @@ int batadv_v_elp_packet_recv(struct sk_buff *skb, if (!res) goto free_skb; + ethhdr = eth_hdr(skb); if (batadv_is_my_mac(bat_priv, ethhdr->h_source)) goto free_skb; diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c index 8c1148fc73d776c076bdb35896f5536d7378578d..c451694fdb42f98dcd1bec54b2f57d72502ae261 100644 --- a/net/batman-adv/bat_v_ogm.c +++ b/net/batman-adv/bat_v_ogm.c @@ -123,8 +123,10 @@ static void batadv_v_ogm_send_to_if(struct sk_buff *skb, { struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); - if (hard_iface->if_status != BATADV_IF_ACTIVE) + if (hard_iface->if_status != BATADV_IF_ACTIVE) { + kfree_skb(skb); return; + } batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_TX); batadv_add_counter(bat_priv, BATADV_CNT_MGMT_TX_BYTES, @@ -998,7 +1000,7 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb, { struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); struct batadv_ogm2_packet *ogm_packet; - struct ethhdr *ethhdr = eth_hdr(skb); + struct ethhdr *ethhdr; int ogm_offset; u8 *packet_pos; int ret = NET_RX_DROP; @@ -1012,6 +1014,7 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb, if (!batadv_check_management_packet(skb, if_incoming, BATADV_OGM2_HLEN)) goto free_skb; + ethhdr = eth_hdr(skb); if (batadv_is_my_mac(bat_priv, ethhdr->h_source)) goto free_skb; diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index fe0898a9b4e82963652da8a32615ea2030e620f9..fe79bfc6d2dd17cce735757be9353789132911fe 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -632,7 +632,19 @@ int batadv_hardif_min_mtu(struct net_device *soft_iface) */ void batadv_update_min_mtu(struct net_device *soft_iface) { - soft_iface->mtu = batadv_hardif_min_mtu(soft_iface); + struct batadv_priv *bat_priv = netdev_priv(soft_iface); + int limit_mtu; + int mtu; + + mtu = batadv_hardif_min_mtu(soft_iface); + + if (bat_priv->mtu_set_by_user) + limit_mtu = bat_priv->mtu_set_by_user; + else + limit_mtu = ETH_DATA_LEN; + + mtu = min(mtu, limit_mtu); + dev_set_mtu(soft_iface, mtu); /* Check if the local translate table should be cleaned up to match a * new (and smaller) MTU. diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c index 121459704b0693e7597e90c7389034f66a2d13ea..931bc3b5c6df068d3b226eaee80cebc864f258b3 100644 --- a/net/batman-adv/netlink.c +++ b/net/batman-adv/netlink.c @@ -496,7 +496,10 @@ static int batadv_netlink_set_mesh(struct sk_buff *skb, struct genl_info *info) attr = info->attrs[BATADV_ATTR_FRAGMENTATION_ENABLED]; atomic_set(&bat_priv->fragmentation, !!nla_get_u8(attr)); + + rtnl_lock(); batadv_update_min_mtu(bat_priv->soft_iface); + rtnl_unlock(); } if (info->attrs[BATADV_ATTR_GW_BANDWIDTH_DOWN]) { diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 8f7c778255fba74f51e9f84ad094c5beb492b0f9..7ac16d7b94a2f702231043fedbedd4e824371f73 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -156,11 +156,14 @@ static int batadv_interface_set_mac_addr(struct net_device *dev, void *p) static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu) { + struct batadv_priv *bat_priv = netdev_priv(dev); + /* check ranges */ if (new_mtu < 68 || new_mtu > batadv_hardif_min_mtu(dev)) return -EINVAL; dev->mtu = new_mtu; + bat_priv->mtu_set_by_user = new_mtu; return 0; } diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 5f990a2061072b8f7e024efacf9460250e079e5e..9e8ebac9b7e7ec5b66b7d12ce8f06facf741f7ed 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -775,7 +775,6 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr, if (roamed_back) { batadv_tt_global_free(bat_priv, tt_global, "Roaming canceled"); - tt_global = NULL; } else { /* The global entry has to be marked as ROAMING and * has to be kept for consistency purpose diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 965336a3b89d554364d40b45bbee8d25e6e3a714..7d47fe7534c184f36f396167bc3896120e9da6f0 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -1566,6 +1566,12 @@ struct batadv_priv { /** @soft_iface: net device which holds this struct as private data */ struct net_device *soft_iface; + /** + * @mtu_set_by_user: MTU was set once by user + * protected by rtnl_lock + */ + int mtu_set_by_user; + /** * @bat_counters: mesh internal traffic statistic counters (see * batadv_counters) diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 568f0f072b3df0cde0fc5be17647b58ab2ac3ee5..7b40e4737a2bbeea7a49b3c556cc16cd8d36a352 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -6370,9 +6370,14 @@ static inline int l2cap_le_command_rej(struct l2cap_conn *conn, if (!chan) goto done; + chan = l2cap_chan_hold_unless_zero(chan); + if (!chan) + goto done; + l2cap_chan_lock(chan); l2cap_chan_del(chan, ECONNREFUSED); l2cap_chan_unlock(chan); + l2cap_chan_put(chan); done: mutex_unlock(&conn->chan_lock); diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c index c4cf2529d08ba9d76e8a55df411eafc9d3e67ae4..ef5c174102d5e3114e54d31c8c76d43c42d77978 100644 --- a/net/ceph/mon_client.c +++ b/net/ceph/mon_client.c @@ -96,9 +96,11 @@ int ceph_monmap_contains(struct ceph_monmap *m, struct ceph_entity_addr *addr) { int i; - for (i = 0; i < m->num_mon; i++) - if (memcmp(addr, &m->mon_inst[i].addr, sizeof(*addr)) == 0) + for (i = 0; i < m->num_mon; i++) { + if (ceph_addr_equal_no_type(addr, &m->mon_inst[i].addr)) return 1; + } + return 0; } diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index ce37a052b9c32e356c48e72519b85cb0a44547a0..021dcfdae28359b1b694e7f3297fbbfe2e71cb0a 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2161,13 +2161,27 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) return err; } -int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len, - struct netlink_ext_ack *exterr) +int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer, + struct netlink_ext_ack *exterr) { - return nla_parse_deprecated(tb, IFLA_MAX, head, len, ifla_policy, + const struct ifinfomsg *ifmp; + const struct nlattr *attrs; + size_t len; + + ifmp = nla_data(nla_peer); + attrs = nla_data(nla_peer) + sizeof(struct ifinfomsg); + len = nla_len(nla_peer) - sizeof(struct ifinfomsg); + + if (ifmp->ifi_index < 0) { + NL_SET_ERR_MSG_ATTR(exterr, nla_peer, + "ifindex can't be negative"); + return -EINVAL; + } + + return nla_parse_deprecated(tb, IFLA_MAX, attrs, len, ifla_policy, exterr); } -EXPORT_SYMBOL(rtnl_nla_parse_ifla); +EXPORT_SYMBOL(rtnl_nla_parse_ifinfomsg); struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]) { @@ -3258,6 +3272,7 @@ static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, struct ifinfomsg *ifm; char ifname[IFNAMSIZ]; struct nlattr **data; + bool link_specified; int err; #ifdef CONFIG_MODULES @@ -3278,12 +3293,19 @@ static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, ifname[0] = '\0'; ifm = nlmsg_data(nlh); - if (ifm->ifi_index > 0) + if (ifm->ifi_index > 0) { + link_specified = true; dev = __dev_get_by_index(net, ifm->ifi_index); - else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) + } else if (ifm->ifi_index < 0) { + NL_SET_ERR_MSG(extack, "ifindex can't be negative"); + return -EINVAL; + } else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) { + link_specified = true; dev = rtnl_dev_get(net, NULL, tb[IFLA_ALT_IFNAME], ifname); - else + } else { + link_specified = false; dev = NULL; + } master_dev = NULL; m_ops = NULL; @@ -3386,7 +3408,12 @@ static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, } if (!(nlh->nlmsg_flags & NLM_F_CREATE)) { - if (ifm->ifi_index == 0 && tb[IFLA_GROUP]) + /* No dev found and NLM_F_CREATE not set. Requested dev does not exist, + * or it's for a group + */ + if (link_specified) + return -ENODEV; + if (tb[IFLA_GROUP]) return rtnl_group_changelink(skb, net, nla_get_u32(tb[IFLA_GROUP]), ifm, extack, tb); diff --git a/net/core/sock.c b/net/core/sock.c index 4f2892d3f933b0f027f5741c96a83d603682e555..365e67a765f2329946dae258190201b4754d1197 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2723,7 +2723,7 @@ void __sk_mem_reduce_allocated(struct sock *sk, int amount) if (mem_cgroup_sockets_enabled && sk->sk_memcg) mem_cgroup_uncharge_skmem(sk->sk_memcg, amount); - if (sk_under_memory_pressure(sk) && + if (sk_under_global_memory_pressure(sk) && (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0))) sk_leave_memory_pressure(sk); } diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 3293c1e3aa5d5dfdec5ca187ed1ff837a1ba736e..c647035a36d1891b0df1e634f3fe6c121589a021 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -324,11 +324,15 @@ EXPORT_SYMBOL_GPL(dccp_disconnect); __poll_t dccp_poll(struct file *file, struct socket *sock, poll_table *wait) { - __poll_t mask; struct sock *sk = sock->sk; + __poll_t mask; + u8 shutdown; + int state; sock_poll_wait(file, sock, wait); - if (sk->sk_state == DCCP_LISTEN) + + state = inet_sk_state_load(sk); + if (state == DCCP_LISTEN) return inet_csk_listen_poll(sk); /* Socket is not locked. We are protected from async events @@ -337,20 +341,21 @@ __poll_t dccp_poll(struct file *file, struct socket *sock, */ mask = 0; - if (sk->sk_err) + if (READ_ONCE(sk->sk_err)) mask = EPOLLERR; + shutdown = READ_ONCE(sk->sk_shutdown); - if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED) + if (shutdown == SHUTDOWN_MASK || state == DCCP_CLOSED) mask |= EPOLLHUP; - if (sk->sk_shutdown & RCV_SHUTDOWN) + if (shutdown & RCV_SHUTDOWN) mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; /* Connected? */ - if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) { + if ((1 << state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) { if (atomic_read(&sk->sk_rmem_alloc) > 0) mask |= EPOLLIN | EPOLLRDNORM; - if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { + if (!(shutdown & SEND_SHUTDOWN)) { if (sk_stream_is_writeable(sk)) { mask |= EPOLLOUT | EPOLLWRNORM; } else { /* send SIGIO later */ @@ -368,7 +373,6 @@ __poll_t dccp_poll(struct file *file, struct socket *sock, } return mask; } - EXPORT_SYMBOL_GPL(dccp_poll); int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg) diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index c040acbf122b322cd0d62bc85741e08c790ef2bb..62bbdcf8ca953dc6ebe1c34416c895f2248cdd20 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -287,12 +287,12 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) switch (skb->protocol) { case htons(ETH_P_IP): - xfrm_decode_session(skb, &fl, AF_INET); memset(IPCB(skb), 0, sizeof(*IPCB(skb))); + xfrm_decode_session(skb, &fl, AF_INET); break; case htons(ETH_P_IPV6): - xfrm_decode_session(skb, &fl, AF_INET6); memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); + xfrm_decode_session(skb, &fl, AF_INET6); break; default: goto tx_err; diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index b2f885c621734322a40d22aaa3b031895d3815e5..0cbf65ad7142a797a8310bb9ac3e9ba07000241b 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -590,7 +590,9 @@ void tcp_retransmit_timer(struct sock *sk) tcp_stream_is_thin(tp) && icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) { icsk->icsk_backoff = 0; - icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX); + icsk->icsk_rto = clamp(__tcp_set_rto(tp), + tcp_rto_min(sk), + TCP_RTO_MAX); } else { /* Use normal (exponential) backoff */ icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 287c9d53f74a784db803ab712125af0ec6c204ba..3cc1331111f9913782980e8cc08203a960c2c69d 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -569,12 +569,12 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) vti6_addr_conflict(t, ipv6_hdr(skb))) goto tx_err; - xfrm_decode_session(skb, &fl, AF_INET6); memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); + xfrm_decode_session(skb, &fl, AF_INET6); break; case htons(ETH_P_IP): - xfrm_decode_session(skb, &fl, AF_INET); memset(IPCB(skb), 0, sizeof(*IPCB(skb))); + xfrm_decode_session(skb, &fl, AF_INET); break; default: goto tx_err; diff --git a/net/key/af_key.c b/net/key/af_key.c index fff2bd5f03e37f02951fd0c3826343f3508e4f25..f42854973ba8d7f025cd70809aab584d102e833f 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -1852,9 +1852,9 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms if (ext_hdrs[SADB_X_EXT_FILTER - 1]) { struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1]; - if ((xfilter->sadb_x_filter_splen >= + if ((xfilter->sadb_x_filter_splen > (sizeof(xfrm_address_t) << 3)) || - (xfilter->sadb_x_filter_dplen >= + (xfilter->sadb_x_filter_dplen > (sizeof(xfrm_address_t) << 3))) { mutex_unlock(&pfk->dump_lock); return -EINVAL; diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 29ec3ef63edc7a6b59b37bd0eb5e95b25e32bf57..d0b64c36471d553a7bc5e9f095e8ba7a85c2c4cc 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -1802,6 +1802,7 @@ static int proc_do_sync_threshold(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { + struct netns_ipvs *ipvs = table->extra2; int *valp = table->data; int val[2]; int rc; @@ -1811,6 +1812,7 @@ proc_do_sync_threshold(struct ctl_table *table, int write, .mode = table->mode, }; + mutex_lock(&ipvs->sync_mutex); memcpy(val, valp, sizeof(val)); rc = proc_dointvec(&tmp, write, buffer, lenp, ppos); if (write) { @@ -1820,6 +1822,7 @@ proc_do_sync_threshold(struct ctl_table *table, int write, else memcpy(valp, val, sizeof(val)); } + mutex_unlock(&ipvs->sync_mutex); return rc; } @@ -4077,6 +4080,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs) ipvs->sysctl_sync_threshold[0] = DEFAULT_SYNC_THRESHOLD; ipvs->sysctl_sync_threshold[1] = DEFAULT_SYNC_PERIOD; tbl[idx].data = &ipvs->sysctl_sync_threshold; + tbl[idx].extra2 = ipvs; tbl[idx++].maxlen = sizeof(ipvs->sysctl_sync_threshold); ipvs->sysctl_sync_refresh_period = DEFAULT_SYNC_REFRESH_PERIOD; tbl[idx++].data = &ipvs->sysctl_sync_refresh_period; diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index cec4b16170a0bc7503f7438e50e6259d6abfb853..21cbaf6dac331823006d7883594c08247c6a8aee 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -49,8 +49,8 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = { [SCTP_CONNTRACK_COOKIE_WAIT] = 3 SECS, [SCTP_CONNTRACK_COOKIE_ECHOED] = 3 SECS, [SCTP_CONNTRACK_ESTABLISHED] = 210 SECS, - [SCTP_CONNTRACK_SHUTDOWN_SENT] = 300 SECS / 1000, - [SCTP_CONNTRACK_SHUTDOWN_RECD] = 300 SECS / 1000, + [SCTP_CONNTRACK_SHUTDOWN_SENT] = 3 SECS, + [SCTP_CONNTRACK_SHUTDOWN_RECD] = 3 SECS, [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = 3 SECS, [SCTP_CONNTRACK_HEARTBEAT_SENT] = 30 SECS, }; @@ -105,7 +105,7 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = { { /* ORIGINAL */ /* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS */ -/* init */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW}, +/* init */ {sCL, sCL, sCW, sCE, sES, sCL, sCL, sSA, sCW}, /* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL}, /* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL}, /* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL}, diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c index 8d47782b778f12a3cba6844511a1ab0f1096c735..408b7f5faa5e597f4624b48ee1065fabd77239f0 100644 --- a/net/netfilter/nft_dynset.c +++ b/net/netfilter/nft_dynset.c @@ -138,6 +138,9 @@ static int nft_dynset_init(const struct nft_ctx *ctx, if (IS_ERR(set)) return PTR_ERR(set); + if (set->flags & NFT_SET_OBJECT) + return -EOPNOTSUPP; + if (set->ops->update == NULL) return -EOPNOTSUPP; diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c index ff4a76a77f0bc8aeae29568757207f2390748cf2..4865a7c0ea5e4e66e75f0f25375406e3f3d0049e 100644 --- a/net/netfilter/nft_set_pipapo.c +++ b/net/netfilter/nft_set_pipapo.c @@ -901,12 +901,14 @@ static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f) static int pipapo_insert(struct nft_pipapo_field *f, const uint8_t *k, int mask_bits) { - int rule = f->rules++, group, ret, bit_offset = 0; + int rule = f->rules, group, ret, bit_offset = 0; - ret = pipapo_resize(f, f->rules - 1, f->rules); + ret = pipapo_resize(f, f->rules, f->rules + 1); if (ret) return ret; + f->rules++; + for (group = 0; group < f->groups; group++) { int i, v; u8 mask; @@ -1051,7 +1053,9 @@ static int pipapo_expand(struct nft_pipapo_field *f, step++; if (step >= len) { if (!masks) { - pipapo_insert(f, base, 0); + err = pipapo_insert(f, base, 0); + if (err < 0) + return err; masks = 1; } goto out; @@ -1234,6 +1238,9 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set, else ret = pipapo_expand(f, start, end, f->groups * f->bb); + if (ret < 0) + return ret; + if (f->bsize > bsize_max) bsize_max = f->bsize; diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index fb50e3f3283f988d1ad8225356d2c5ed33b0ba86..5c2d230790db9b07e81c2a8fadc7d22d4fe0e3f7 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1513,10 +1513,28 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, return 0; } +static bool req_create_or_replace(struct nlmsghdr *n) +{ + return (n->nlmsg_flags & NLM_F_CREATE && + n->nlmsg_flags & NLM_F_REPLACE); +} + +static bool req_create_exclusive(struct nlmsghdr *n) +{ + return (n->nlmsg_flags & NLM_F_CREATE && + n->nlmsg_flags & NLM_F_EXCL); +} + +static bool req_change(struct nlmsghdr *n) +{ + return (!(n->nlmsg_flags & NLM_F_CREATE) && + !(n->nlmsg_flags & NLM_F_REPLACE) && + !(n->nlmsg_flags & NLM_F_EXCL)); +} + /* * Create/change qdisc. */ - static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, struct netlink_ext_ack *extack) { @@ -1613,27 +1631,35 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, * * We know, that some child q is already * attached to this parent and have choice: - * either to change it or to create/graft new one. + * 1) change it or 2) create/graft new one. + * If the requested qdisc kind is different + * than the existing one, then we choose graft. + * If they are the same then this is "change" + * operation - just let it fallthrough.. * * 1. We are allowed to create/graft only - * if CREATE and REPLACE flags are set. + * if the request is explicitly stating + * "please create if it doesn't exist". * - * 2. If EXCL is set, requestor wanted to say, - * that qdisc tcm_handle is not expected + * 2. If the request is to exclusive create + * then the qdisc tcm_handle is not expected * to exist, so that we choose create/graft too. * * 3. The last case is when no flags are set. + * This will happen when for example tc + * utility issues a "change" command. * Alas, it is sort of hole in API, we * cannot decide what to do unambiguously. - * For now we select create/graft, if - * user gave KIND, which does not match existing. + * For now we select create/graft. */ - if ((n->nlmsg_flags & NLM_F_CREATE) && - (n->nlmsg_flags & NLM_F_REPLACE) && - ((n->nlmsg_flags & NLM_F_EXCL) || - (tca[TCA_KIND] && - nla_strcmp(tca[TCA_KIND], q->ops->id)))) - goto create_n_graft; + if (tca[TCA_KIND] && + nla_strcmp(tca[TCA_KIND], q->ops->id)) { + if (req_create_or_replace(n) || + req_create_exclusive(n)) + goto create_n_graft; + else if (req_change(n)) + goto create_n_graft2; + } } } } else { @@ -1667,6 +1693,7 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag"); return -ENOENT; } +create_n_graft2: if (clid == TC_H_INGRESS) { if (dev_ingress_queue(dev)) { q = qdisc_create(dev, dev_ingress_queue(dev), p, diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index be42b1196786bc48672d7929afbbbe6bf923b0ea..08aaa6efc62c86033eeecbcacbc01f85ecd6da0e 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -773,12 +773,10 @@ static void dist_free(struct disttable *d) * signed 16 bit values. */ -static int get_dist_table(struct Qdisc *sch, struct disttable **tbl, - const struct nlattr *attr) +static int get_dist_table(struct disttable **tbl, const struct nlattr *attr) { size_t n = nla_len(attr)/sizeof(__s16); const __s16 *data = nla_data(attr); - spinlock_t *root_lock; struct disttable *d; int i; @@ -793,13 +791,7 @@ static int get_dist_table(struct Qdisc *sch, struct disttable **tbl, for (i = 0; i < n; i++) d->table[i] = data[i]; - root_lock = qdisc_root_sleeping_lock(sch); - - spin_lock_bh(root_lock); - swap(*tbl, d); - spin_unlock_bh(root_lock); - - dist_free(d); + *tbl = d; return 0; } @@ -956,6 +948,8 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt, { struct netem_sched_data *q = qdisc_priv(sch); struct nlattr *tb[TCA_NETEM_MAX + 1]; + struct disttable *delay_dist = NULL; + struct disttable *slot_dist = NULL; struct tc_netem_qopt *qopt; struct clgstate old_clg; int old_loss_model = CLG_RANDOM; @@ -969,6 +963,18 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt, if (ret < 0) return ret; + if (tb[TCA_NETEM_DELAY_DIST]) { + ret = get_dist_table(&delay_dist, tb[TCA_NETEM_DELAY_DIST]); + if (ret) + goto table_free; + } + + if (tb[TCA_NETEM_SLOT_DIST]) { + ret = get_dist_table(&slot_dist, tb[TCA_NETEM_SLOT_DIST]); + if (ret) + goto table_free; + } + sch_tree_lock(sch); /* backup q->clg and q->loss_model */ old_clg = q->clg; @@ -978,26 +984,17 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt, ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]); if (ret) { q->loss_model = old_loss_model; + q->clg = old_clg; goto unlock; } } else { q->loss_model = CLG_RANDOM; } - if (tb[TCA_NETEM_DELAY_DIST]) { - ret = get_dist_table(sch, &q->delay_dist, - tb[TCA_NETEM_DELAY_DIST]); - if (ret) - goto get_table_failure; - } - - if (tb[TCA_NETEM_SLOT_DIST]) { - ret = get_dist_table(sch, &q->slot_dist, - tb[TCA_NETEM_SLOT_DIST]); - if (ret) - goto get_table_failure; - } - + if (delay_dist) + swap(q->delay_dist, delay_dist); + if (slot_dist) + swap(q->slot_dist, slot_dist); sch->limit = qopt->limit; q->latency = PSCHED_TICKS2NS(qopt->latency); @@ -1047,17 +1044,11 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt, unlock: sch_tree_unlock(sch); - return ret; -get_table_failure: - /* recover clg and loss_model, in case of - * q->clg and q->loss_model were modified - * in get_loss_clg() - */ - q->clg = old_clg; - q->loss_model = old_loss_model; - - goto unlock; +table_free: + dist_free(delay_dist); + dist_free(slot_dist); + return ret; } static int netem_init(struct Qdisc *sch, struct nlattr *opt, diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 534364bb871a32e97bcddddaa2dd83aece429573..fa4d31b507f297aca7bb71ec4e7d1828caf8dc71 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -97,7 +97,7 @@ struct percpu_counter sctp_sockets_allocated; static void sctp_enter_memory_pressure(struct sock *sk) { - sctp_memory_pressure = 1; + WRITE_ONCE(sctp_memory_pressure, 1); } diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 338b06de86d163501f7eca1363641cc0bd11b2a4..d015576f3081af95fe05eddca2883a0401ede2b5 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -972,9 +972,6 @@ struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt, if (!rep->rr_rdmabuf) goto out_free; - if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf)) - goto out_free_regbuf; - xdr_buf_init(&rep->rr_hdrbuf, rdmab_data(rep->rr_rdmabuf), rdmab_length(rep->rr_rdmabuf)); rep->rr_cqe.done = rpcrdma_wc_receive; @@ -987,8 +984,6 @@ struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt, list_add(&rep->rr_all, &r_xprt->rx_buf.rb_all_reps); return rep; -out_free_regbuf: - rpcrdma_regbuf_free(rep->rr_rdmabuf); out_free: kfree(rep); out: @@ -1425,6 +1420,10 @@ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp) rep = rpcrdma_rep_create(r_xprt, temp); if (!rep) break; + if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf)) { + rpcrdma_rep_put(buf, rep); + break; + } trace_xprtrdma_post_recv(rep); rep->rr_recv_wr.next = wr; diff --git a/net/xfrm/xfrm_compat.c b/net/xfrm/xfrm_compat.c index 8cbf45a8bcdc24767c2b31584472f769827fd30b..655fe4ff862122738eb4ab8089ed3a1e45f4cdf1 100644 --- a/net/xfrm/xfrm_compat.c +++ b/net/xfrm/xfrm_compat.c @@ -108,7 +108,7 @@ static const struct nla_policy compat_policy[XFRMA_MAX+1] = { [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) }, [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) }, [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) }, - [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) }, + [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_user_sec_ctx) }, [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) }, [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) }, [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 }, diff --git a/net/xfrm/xfrm_interface_core.c b/net/xfrm/xfrm_interface_core.c index e4f21a6924153d24f9d3d3e5eeaf2cbedd316234..4eeec336757545ec20a290215132e2d013e998b4 100644 --- a/net/xfrm/xfrm_interface_core.c +++ b/net/xfrm/xfrm_interface_core.c @@ -403,8 +403,8 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev) switch (skb->protocol) { case htons(ETH_P_IPV6): - xfrm_decode_session(skb, &fl, AF_INET6); memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); + xfrm_decode_session(skb, &fl, AF_INET6); if (!dst) { fl.u.ip6.flowi6_oif = dev->ifindex; fl.u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC; @@ -418,8 +418,8 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev) } break; case htons(ETH_P_IP): - xfrm_decode_session(skb, &fl, AF_INET); memset(IPCB(skb), 0, sizeof(*IPCB(skb))); + xfrm_decode_session(skb, &fl, AF_INET); if (!dst) { struct rtable *rt; diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index fb1fbb5dc5c3649aba42f8f010a1ab4cd8ff9e4f..8fce2e93bb3b3601baca9128e08728d0fdda62ae 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -2737,7 +2737,7 @@ const struct nla_policy xfrma_policy[XFRMA_MAX+1] = { [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) }, [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) }, [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) }, - [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) }, + [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_user_sec_ctx) }, [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) }, [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) }, [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 }, @@ -2757,6 +2757,7 @@ const struct nla_policy xfrma_policy[XFRMA_MAX+1] = { [XFRMA_SET_MARK] = { .type = NLA_U32 }, [XFRMA_SET_MARK_MASK] = { .type = NLA_U32 }, [XFRMA_IF_ID] = { .type = NLA_U32 }, + [XFRMA_MTIMER_THRESH] = { .type = NLA_U32 }, }; EXPORT_SYMBOL_GPL(xfrma_policy); diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig index 755af0b29e75566166d4eee250657c1521b113a5..0a5ae1e8da47ab6211e8624ed9e6619f5111ce4d 100644 --- a/security/integrity/ima/Kconfig +++ b/security/integrity/ima/Kconfig @@ -8,7 +8,7 @@ config IMA select CRYPTO_HMAC select CRYPTO_SHA1 select CRYPTO_HASH_INFO - select TCG_TPM if HAS_IOMEM && !UML + select TCG_TPM if HAS_IOMEM select TCG_TIS if TCG_TPM && X86 select TCG_CRB if TCG_TPM && ACPI select TCG_IBMVTPM if TCG_TPM && PPC_PSERIES diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c index 6a04de21343f8771c619b780f9526736417ec3a5..82cfeab162174191d1de82ea55e88d4aa67caae0 100644 --- a/security/selinux/ss/policydb.c +++ b/security/selinux/ss/policydb.c @@ -2011,6 +2011,7 @@ static int filename_trans_read_helper(struct policydb *p, void *fp) if (!datum) goto out; + datum->next = NULL; *dst = datum; /* ebitmap_read() will at least init the bitmap */ @@ -2023,7 +2024,6 @@ static int filename_trans_read_helper(struct policydb *p, void *fp) goto out; datum->otype = le32_to_cpu(buf[0]); - datum->next = NULL; dst = &datum->next; } diff --git a/sound/core/pcm_memory.c b/sound/core/pcm_memory.c index 191883842a35d43b68976776c65125e3d0ca6dd7..3e60a337bbef125809cac2ed8208bd26312c847f 100644 --- a/sound/core/pcm_memory.c +++ b/sound/core/pcm_memory.c @@ -31,20 +31,51 @@ static unsigned long max_alloc_per_card = 32UL * 1024UL * 1024UL; module_param(max_alloc_per_card, ulong, 0644); MODULE_PARM_DESC(max_alloc_per_card, "Max total allocation bytes per card."); +static void __update_allocated_size(struct snd_card *card, ssize_t bytes) +{ + card->total_pcm_alloc_bytes += bytes; +} + +static void update_allocated_size(struct snd_card *card, ssize_t bytes) +{ + mutex_lock(&card->memory_mutex); + __update_allocated_size(card, bytes); + mutex_unlock(&card->memory_mutex); +} + +static void decrease_allocated_size(struct snd_card *card, size_t bytes) +{ + mutex_lock(&card->memory_mutex); + WARN_ON(card->total_pcm_alloc_bytes < bytes); + __update_allocated_size(card, -(ssize_t)bytes); + mutex_unlock(&card->memory_mutex); +} + static int do_alloc_pages(struct snd_card *card, int type, struct device *dev, size_t size, struct snd_dma_buffer *dmab) { int err; + /* check and reserve the requested size */ + mutex_lock(&card->memory_mutex); if (max_alloc_per_card && - card->total_pcm_alloc_bytes + size > max_alloc_per_card) + card->total_pcm_alloc_bytes + size > max_alloc_per_card) { + mutex_unlock(&card->memory_mutex); return -ENOMEM; + } + __update_allocated_size(card, size); + mutex_unlock(&card->memory_mutex); err = snd_dma_alloc_pages(type, dev, size, dmab); if (!err) { - mutex_lock(&card->memory_mutex); - card->total_pcm_alloc_bytes += dmab->bytes; - mutex_unlock(&card->memory_mutex); + /* the actual allocation size might be bigger than requested, + * and we need to correct the account + */ + if (dmab->bytes != size) + update_allocated_size(card, dmab->bytes - size); + } else { + /* take back on allocation failure */ + decrease_allocated_size(card, size); } return err; } @@ -53,10 +84,7 @@ static void do_free_pages(struct snd_card *card, struct snd_dma_buffer *dmab) { if (!dmab->area) return; - mutex_lock(&card->memory_mutex); - WARN_ON(card->total_pcm_alloc_bytes < dmab->bytes); - card->total_pcm_alloc_bytes -= dmab->bytes; - mutex_unlock(&card->memory_mutex); + decrease_allocated_size(card, dmab->bytes); snd_dma_free_pages(dmab); dmab->area = NULL; } diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c index d75f31eb9d78f04ef48d8c379b4950c7888ebee9..bf35acca5ea0eaf46c1d5b1a8546ae1f0ff02a03 100644 --- a/sound/hda/hdac_regmap.c +++ b/sound/hda/hdac_regmap.c @@ -597,10 +597,9 @@ EXPORT_SYMBOL_GPL(snd_hdac_regmap_update_raw_once); */ void snd_hdac_regmap_sync(struct hdac_device *codec) { - if (codec->regmap) { - mutex_lock(&codec->regmap_lock); + mutex_lock(&codec->regmap_lock); + if (codec->regmap) regcache_sync(codec->regmap); - mutex_unlock(&codec->regmap_lock); - } + mutex_unlock(&codec->regmap_lock); } EXPORT_SYMBOL_GPL(snd_hdac_regmap_sync); diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c index 4e76ed0e91d5da0a531fb9e6f78dee82d62c24a5..e17b93b25d2ff3d45d3e3c12b4477110e08ea227 100644 --- a/sound/pci/emu10k1/emufx.c +++ b/sound/pci/emu10k1/emufx.c @@ -1560,14 +1560,8 @@ A_OP(icode, &ptr, iMAC0, A_GPR(var), A_GPR(var), A_GPR(vol), A_EXTIN(input)) gpr += 2; /* Master volume (will be renamed later) */ - A_OP(icode, &ptr, iMAC0, A_GPR(playback+0+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+0+SND_EMU10K1_PLAYBACK_CHANNELS)); - A_OP(icode, &ptr, iMAC0, A_GPR(playback+1+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+1+SND_EMU10K1_PLAYBACK_CHANNELS)); - A_OP(icode, &ptr, iMAC0, A_GPR(playback+2+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+2+SND_EMU10K1_PLAYBACK_CHANNELS)); - A_OP(icode, &ptr, iMAC0, A_GPR(playback+3+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+3+SND_EMU10K1_PLAYBACK_CHANNELS)); - A_OP(icode, &ptr, iMAC0, A_GPR(playback+4+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+4+SND_EMU10K1_PLAYBACK_CHANNELS)); - A_OP(icode, &ptr, iMAC0, A_GPR(playback+5+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+5+SND_EMU10K1_PLAYBACK_CHANNELS)); - A_OP(icode, &ptr, iMAC0, A_GPR(playback+6+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+6+SND_EMU10K1_PLAYBACK_CHANNELS)); - A_OP(icode, &ptr, iMAC0, A_GPR(playback+7+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+7+SND_EMU10K1_PLAYBACK_CHANNELS)); + for (z = 0; z < 8; z++) + A_OP(icode, &ptr, iMAC0, A_GPR(playback+z+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+z+SND_EMU10K1_PLAYBACK_CHANNELS)); snd_emu10k1_init_mono_control(&controls[nctl++], "Wave Master Playback Volume", gpr, 0); gpr += 2; @@ -1651,102 +1645,14 @@ A_OP(icode, &ptr, iMAC0, A_GPR(var), A_GPR(var), A_GPR(vol), A_EXTIN(input)) dev_dbg(emu->card->dev, "emufx.c: gpr=0x%x, tmp=0x%x\n", gpr, tmp); */ - /* For the EMU1010: How to get 32bit values from the DSP. High 16bits into L, low 16bits into R. */ - /* A_P16VIN(0) is delayed by one sample, - * so all other A_P16VIN channels will need to also be delayed - */ - /* Left ADC in. 1 of 2 */ snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_P16VIN(0x0), A_FXBUS2(0) ); - /* Right ADC in 1 of 2 */ - gpr_map[gpr++] = 0x00000000; - /* Delaying by one sample: instead of copying the input - * value A_P16VIN to output A_FXBUS2 as in the first channel, - * we use an auxiliary register, delaying the value by one - * sample - */ - snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(2) ); - A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x1), A_C_00000000, A_C_00000000); - gpr_map[gpr++] = 0x00000000; - snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(4) ); - A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x2), A_C_00000000, A_C_00000000); - gpr_map[gpr++] = 0x00000000; - snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(6) ); - A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x3), A_C_00000000, A_C_00000000); - /* For 96kHz mode */ - /* Left ADC in. 2 of 2 */ - gpr_map[gpr++] = 0x00000000; - snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(0x8) ); - A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x4), A_C_00000000, A_C_00000000); - /* Right ADC in 2 of 2 */ - gpr_map[gpr++] = 0x00000000; - snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(0xa) ); - A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x5), A_C_00000000, A_C_00000000); - gpr_map[gpr++] = 0x00000000; - snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(0xc) ); - A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x6), A_C_00000000, A_C_00000000); - gpr_map[gpr++] = 0x00000000; - snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(0xe) ); - A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x7), A_C_00000000, A_C_00000000); - /* Pavel Hofman - we still have voices, A_FXBUS2s, and - * A_P16VINs available - - * let's add 8 more capture channels - total of 16 - */ - gpr_map[gpr++] = 0x00000000; - snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp, - bit_shifter16, - A_GPR(gpr - 1), - A_FXBUS2(0x10)); - A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x8), - A_C_00000000, A_C_00000000); - gpr_map[gpr++] = 0x00000000; - snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp, - bit_shifter16, - A_GPR(gpr - 1), - A_FXBUS2(0x12)); - A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x9), - A_C_00000000, A_C_00000000); - gpr_map[gpr++] = 0x00000000; - snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp, - bit_shifter16, - A_GPR(gpr - 1), - A_FXBUS2(0x14)); - A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xa), - A_C_00000000, A_C_00000000); - gpr_map[gpr++] = 0x00000000; - snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp, - bit_shifter16, - A_GPR(gpr - 1), - A_FXBUS2(0x16)); - A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xb), - A_C_00000000, A_C_00000000); - gpr_map[gpr++] = 0x00000000; - snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp, - bit_shifter16, - A_GPR(gpr - 1), - A_FXBUS2(0x18)); - A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xc), - A_C_00000000, A_C_00000000); - gpr_map[gpr++] = 0x00000000; - snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp, - bit_shifter16, - A_GPR(gpr - 1), - A_FXBUS2(0x1a)); - A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xd), - A_C_00000000, A_C_00000000); - gpr_map[gpr++] = 0x00000000; - snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp, - bit_shifter16, - A_GPR(gpr - 1), - A_FXBUS2(0x1c)); - A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xe), - A_C_00000000, A_C_00000000); - gpr_map[gpr++] = 0x00000000; - snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp, - bit_shifter16, - A_GPR(gpr - 1), - A_FXBUS2(0x1e)); - A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xf), - A_C_00000000, A_C_00000000); + /* A_P16VIN(0) is delayed by one sample, so all other A_P16VIN channels + * will need to also be delayed; we use an auxiliary register for that. */ + for (z = 1; z < 0x10; z++) { + snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr), A_FXBUS2(z * 2) ); + A_OP(icode, &ptr, iACC3, A_GPR(gpr), A_P16VIN(z), A_C_00000000, A_C_00000000); + gpr_map[gpr++] = 0x00000000; + } } #if 0 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index db8593d794315f728404e744cfb2e61f276e648d..adfab80b8189d357d01863a6fa20251385223590 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -10006,6 +10006,7 @@ static int patch_alc269(struct hda_codec *codec) spec = codec->spec; spec->gen.shared_mic_vref_pin = 0x18; codec->power_save_node = 0; + spec->en_3kpull_low = true; #ifdef CONFIG_PM codec->patch_ops.suspend = alc269_suspend; @@ -10088,14 +10089,16 @@ static int patch_alc269(struct hda_codec *codec) spec->shutup = alc256_shutup; spec->init_hook = alc256_init; spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */ - if (codec->bus->pci->vendor == PCI_VENDOR_ID_AMD) - spec->en_3kpull_low = true; + if (codec->core.vendor_id == 0x10ec0236 && + codec->bus->pci->vendor != PCI_VENDOR_ID_AMD) + spec->en_3kpull_low = false; break; case 0x10ec0257: spec->codec_variant = ALC269_TYPE_ALC257; spec->shutup = alc256_shutup; spec->init_hook = alc256_init; spec->gen.mixer_nid = 0; + spec->en_3kpull_low = false; break; case 0x10ec0215: case 0x10ec0245: @@ -10719,6 +10722,7 @@ enum { ALC897_FIXUP_HP_HSMIC_VERB, ALC897_FIXUP_LENOVO_HEADSET_MODE, ALC897_FIXUP_HEADSET_MIC_PIN2, + ALC897_FIXUP_UNIS_H3C_X500S, }; static const struct hda_fixup alc662_fixups[] = { @@ -11158,6 +11162,13 @@ static const struct hda_fixup alc662_fixups[] = { .chained = true, .chain_id = ALC897_FIXUP_LENOVO_HEADSET_MODE }, + [ALC897_FIXUP_UNIS_H3C_X500S] = { + .type = HDA_FIXUP_VERBS, + .v.verbs = (const struct hda_verb[]) { + { 0x14, AC_VERB_SET_EAPD_BTLENABLE, 0 }, + {} + }, + }, }; static const struct snd_pci_quirk alc662_fixup_tbl[] = { @@ -11319,6 +11330,7 @@ static const struct hda_model_fixup alc662_fixup_models[] = { {.id = ALC662_FIXUP_USI_HEADSET_MODE, .name = "usi-headset"}, {.id = ALC662_FIXUP_LENOVO_MULTI_CODECS, .name = "dual-codecs"}, {.id = ALC669_FIXUP_ACER_ASPIRE_ETHOS, .name = "aspire-ethos"}, + {.id = ALC897_FIXUP_UNIS_H3C_X500S, .name = "unis-h3c-x500s"}, {} }; diff --git a/sound/soc/codecs/rt5665.c b/sound/soc/codecs/rt5665.c index 8a915cdce0fe9949556af24e30dde9579b83dbcb..8b73c2d7f1f10f3dadd596a63ab012b88f070951 100644 --- a/sound/soc/codecs/rt5665.c +++ b/sound/soc/codecs/rt5665.c @@ -4472,6 +4472,8 @@ static void rt5665_remove(struct snd_soc_component *component) struct rt5665_priv *rt5665 = snd_soc_component_get_drvdata(component); regmap_write(rt5665->regmap, RT5665_RESET, 0); + + regulator_bulk_disable(ARRAY_SIZE(rt5665->supplies), rt5665->supplies); } #ifdef CONFIG_PM diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c index f5d8f7951cfc3e49a224f42a4a32ae31b2512f03..cbbb50ddc7954bde8eeb4ea9fc9ce9eea9135b29 100644 --- a/sound/soc/intel/boards/sof_sdw.c +++ b/sound/soc/intel/boards/sof_sdw.c @@ -199,6 +199,31 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = { SOF_RT715_DAI_ID_FIX | SOF_SDW_PCH_DMIC), }, + { + .callback = sof_sdw_quirk_cb, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "Meteor Lake Client Platform"), + }, + .driver_data = (void *)(RT711_JD2_100K), + }, + { + .callback = sof_sdw_quirk_cb, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Google"), + DMI_MATCH(DMI_PRODUCT_NAME, "Rex"), + }, + .driver_data = (void *)(SOF_SDW_PCH_DMIC), + }, + /* LunarLake devices */ + { + .callback = sof_sdw_quirk_cb, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "Lunar Lake Client Platform"), + }, + .driver_data = (void *)(RT711_JD2_100K), + }, {} }; diff --git a/sound/soc/meson/axg-tdm-formatter.c b/sound/soc/meson/axg-tdm-formatter.c index cab7fa2851aa848a08aa8ef3804adecc12a2f5a6..4834cfd163c03b8418460e54cb122f61b2a86085 100644 --- a/sound/soc/meson/axg-tdm-formatter.c +++ b/sound/soc/meson/axg-tdm-formatter.c @@ -30,27 +30,32 @@ int axg_tdm_formatter_set_channel_masks(struct regmap *map, struct axg_tdm_stream *ts, unsigned int offset) { - unsigned int val, ch = ts->channels; - unsigned long mask; - int i, j; + unsigned int ch = ts->channels; + u32 val[AXG_TDM_NUM_LANES]; + int i, j, k; + + /* + * We need to mimick the slot distribution used by the HW to keep the + * channel placement consistent regardless of the number of channel + * in the stream. This is why the odd algorithm below is used. + */ + memset(val, 0, sizeof(*val) * AXG_TDM_NUM_LANES); /* * Distribute the channels of the stream over the available slots - * of each TDM lane + * of each TDM lane. We need to go over the 32 slots ... */ - for (i = 0; i < AXG_TDM_NUM_LANES; i++) { - val = 0; - mask = ts->mask[i]; - - for (j = find_first_bit(&mask, 32); - (j < 32) && ch; - j = find_next_bit(&mask, 32, j + 1)) { - val |= 1 << j; - ch -= 1; + for (i = 0; (i < 32) && ch; i += 2) { + /* ... of all the lanes ... */ + for (j = 0; j < AXG_TDM_NUM_LANES; j++) { + /* ... then distribute the channels in pairs */ + for (k = 0; k < 2; k++) { + if ((BIT(i + k) & ts->mask[j]) && ch) { + val[j] |= BIT(i + k); + ch -= 1; + } + } } - - regmap_write(map, offset, val); - offset += regmap_get_reg_stride(map); } /* @@ -63,6 +68,11 @@ int axg_tdm_formatter_set_channel_masks(struct regmap *map, return -EINVAL; } + for (i = 0; i < AXG_TDM_NUM_LANES; i++) { + regmap_write(map, offset, val[i]); + offset += regmap_get_reg_stride(map); + } + return 0; } EXPORT_SYMBOL_GPL(axg_tdm_formatter_set_channel_masks); diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index ec74aead0844c0eb5f2e80c865888f6aa7da1803..97fe2fadcafb3e565fd147eb25471bc6e2398a1b 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -3797,6 +3797,35 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"), } } }, +{ + /* Advanced modes of the Mythware XA001AU. + * For the standard mode, Mythware XA001AU has ID ffad:a001 + */ + USB_DEVICE_VENDOR_SPEC(0xffad, 0xa001), + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { + .vendor_name = "Mythware", + .product_name = "XA001AU", + .ifnum = QUIRK_ANY_INTERFACE, + .type = QUIRK_COMPOSITE, + .data = (const struct snd_usb_audio_quirk[]) { + { + .ifnum = 0, + .type = QUIRK_IGNORE_INTERFACE, + }, + { + .ifnum = 1, + .type = QUIRK_AUDIO_STANDARD_INTERFACE, + }, + { + .ifnum = 2, + .type = QUIRK_AUDIO_STANDARD_INTERFACE, + }, + { + .ifnum = -1 + } + } + } +}, #undef USB_DEVICE_VENDOR_SPEC #undef USB_AUDIO_DEVICE diff --git a/tools/include/linux/objtool.h b/tools/include/linux/objtool.h index 662f19374bd982b6e732e79ba62c427427920252..d17fa7f4001d79214a98bb2f18652fec0902e72e 100644 --- a/tools/include/linux/objtool.h +++ b/tools/include/linux/objtool.h @@ -71,6 +71,23 @@ struct unwind_hint { static void __used __section(".discard.func_stack_frame_non_standard") \ *__func_stack_frame_non_standard_##func = func +/* + * STACK_FRAME_NON_STANDARD_FP() is a frame-pointer-specific function ignore + * for the case where a function is intentionally missing frame pointer setup, + * but otherwise needs objtool/ORC coverage when frame pointers are disabled. + */ +#ifdef CONFIG_FRAME_POINTER +#define STACK_FRAME_NON_STANDARD_FP(func) STACK_FRAME_NON_STANDARD(func) +#else +#define STACK_FRAME_NON_STANDARD_FP(func) +#endif + +#define ANNOTATE_NOENDBR \ + "986: \n\t" \ + ".pushsection .discard.noendbr\n\t" \ + _ASM_PTR " 986b\n\t" \ + ".popsection\n\t" + #else /* __ASSEMBLY__ */ /* @@ -117,6 +134,13 @@ struct unwind_hint { .popsection .endm +.macro ANNOTATE_NOENDBR +.Lhere_\@: + .pushsection .discard.noendbr + .quad .Lhere_\@ + .popsection +.endm + #endif /* __ASSEMBLY__ */ #else /* !CONFIG_STACK_VALIDATION */ @@ -126,10 +150,14 @@ struct unwind_hint { #define UNWIND_HINT(sp_reg, sp_offset, type, end) \ "\n\t" #define STACK_FRAME_NON_STANDARD(func) +#define STACK_FRAME_NON_STANDARD_FP(func) +#define ANNOTATE_NOENDBR #else #define ANNOTATE_INTRA_FUNCTION_CALL .macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 end=0 .endm +.macro ANNOTATE_NOENDBR +.endm #endif #endif /* CONFIG_STACK_VALIDATION */ diff --git a/tools/objtool/arch.h b/tools/objtool/arch.h index 580ce18575857349f42624403cb13de666a9ed88..75840291b3934f919f013e08626b140d5ff79473 100644 --- a/tools/objtool/arch.h +++ b/tools/objtool/arch.h @@ -90,6 +90,7 @@ int arch_decode_hint_reg(u8 sp_reg, int *base); bool arch_is_retpoline(struct symbol *sym); bool arch_is_rethunk(struct symbol *sym); +bool arch_is_embedded_insn(struct symbol *sym); int arch_rewrite_retpolines(struct objtool_file *file); diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c index 5b915ebb61163a50a82be5ae06e45feb8d640afb..9d2af67672e7cacbf6f1955bb1984d8c92ed81fa 100644 --- a/tools/objtool/arch/x86/decode.c +++ b/tools/objtool/arch/x86/decode.c @@ -652,8 +652,11 @@ bool arch_is_retpoline(struct symbol *sym) bool arch_is_rethunk(struct symbol *sym) { - return !strcmp(sym->name, "__x86_return_thunk") || - !strcmp(sym->name, "srso_untrain_ret") || - !strcmp(sym->name, "srso_safe_ret") || - !strcmp(sym->name, "__ret"); + return !strcmp(sym->name, "__x86_return_thunk"); +} + +bool arch_is_embedded_insn(struct symbol *sym) +{ + return !strcmp(sym->name, "retbleed_return_thunk") || + !strcmp(sym->name, "srso_safe_ret"); } diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 9a0a54194636c490900764933a9d6d55de9d17d8..bd24951faa094ec27aba6afa0f9317dd99afb2c4 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -369,7 +369,7 @@ static int decode_instructions(struct objtool_file *file) if (!strcmp(sec->name, ".noinstr.text") || !strcmp(sec->name, ".entry.text") || - !strncmp(sec->name, ".text.__x86.", 12)) + !strncmp(sec->name, ".text..__x86.", 13)) sec->noinstr = true; for (offset = 0; offset < sec->len; offset += insn->len) { @@ -946,16 +946,33 @@ static int add_ignore_alternatives(struct objtool_file *file) return 0; } +/* + * Symbols that replace INSN_CALL_DYNAMIC, every (tail) call to such a symbol + * will be added to the .retpoline_sites section. + */ __weak bool arch_is_retpoline(struct symbol *sym) { return false; } +/* + * Symbols that replace INSN_RETURN, every (tail) call to such a symbol + * will be added to the .return_sites section. + */ __weak bool arch_is_rethunk(struct symbol *sym) { return false; } +/* + * Symbols that are embedded inside other instructions, because sometimes crazy + * code exists. These are mostly ignored for validation purposes. + */ +__weak bool arch_is_embedded_insn(struct symbol *sym) +{ + return false; +} + #define NEGATIVE_RELOC ((void *)-1L) static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn) @@ -1165,14 +1182,14 @@ static int add_jump_destinations(struct objtool_file *file) continue; /* - * This is a special case for zen_untrain_ret(). + * This is a special case for retbleed_untrain_ret(). * It jumps to __x86_return_thunk(), but objtool * can't find the thunk's starting RET * instruction, because the RET is also in the * middle of another instruction. Objtool only * knows about the outer instruction. */ - if (sym && sym->return_thunk) { + if (sym && sym->embedded_insn) { add_return_call(file, insn, false); continue; } @@ -1971,6 +1988,9 @@ static int classify_symbols(struct objtool_file *file) if (arch_is_rethunk(func)) func->return_thunk = true; + if (arch_is_embedded_insn(func)) + func->embedded_insn = true; + if (!strcmp(func->name, "__fentry__")) func->fentry = true; @@ -2079,12 +2099,17 @@ static int decode_sections(struct objtool_file *file) return 0; } -static bool is_fentry_call(struct instruction *insn) +static bool is_special_call(struct instruction *insn) { - if (insn->type == INSN_CALL && - insn->call_dest && - insn->call_dest->fentry) - return true; + if (insn->type == INSN_CALL) { + struct symbol *dest = insn->call_dest; + + if (!dest) + return false; + + if (dest->fentry) + return true; + } return false; } @@ -2958,7 +2983,7 @@ static int validate_branch(struct objtool_file *file, struct symbol *func, if (ret) return ret; - if (!no_fp && func && !is_fentry_call(insn) && + if (!no_fp && func && !is_special_call(insn) && !has_valid_stack_frame(&state)) { WARN_FUNC("call without frame pointer save/setup", sec, insn->offset); diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h index a1863eb35fbbcdfe1babceabdd6ab280f61cd9e8..19446d9112443912a22e91a29deb90ffe607458c 100644 --- a/tools/objtool/elf.h +++ b/tools/objtool/elf.h @@ -61,6 +61,7 @@ struct symbol { u8 return_thunk : 1; u8 fentry : 1; u8 kcov : 1; + u8 embedded_insn : 1; }; struct reloc { diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh index 472bd023e2a5fb591727dee854641adacb5d87fe..b501b366367f775a156d1c8604dae182acbfef83 100755 --- a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh @@ -72,7 +72,8 @@ test_span_gre_ttl() RET=0 - mirror_install $swp1 ingress $tundev "matchall $tcflags" + mirror_install $swp1 ingress $tundev \ + "prot ip flower $tcflags ip_prot icmp" tc filter add dev $h3 ingress pref 77 prot $prot \ flower ip_ttl 50 action pass