diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst index 4f3206495217cff152fd01b601923949a701277c..10a26d44ef4a9782d778268768d84a1ebe588e0b 100644 --- a/Documentation/arm64/silicon-errata.rst +++ b/Documentation/arm64/silicon-errata.rst @@ -149,6 +149,9 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | Hisilicon | Hip08 SMMU PMCG | #162001800 | N/A | +----------------+-----------------+-----------------+-----------------------------+ +| Hisilicon | Hip08 SMMU PMCG | #162001900 | N/A | +| | Hip09 SMMU PMCG | | | ++----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+ | Qualcomm Tech. | Kryo/Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 | +----------------+-----------------+-----------------+-----------------------------+ diff --git a/Makefile b/Makefile index 0081cf02e163ad085fb87183d529b1f98cc15470..a6e9b9e7fd20ea64dd482b0ebee7afbd7814ef2a 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 5 PATCHLEVEL = 10 -SUBLEVEL = 196 +SUBLEVEL = 197 EXTRAVERSION = NAME = Dare mighty things diff --git a/README.OpenSource b/README.OpenSource index 657f957bdbb06b7f88d2665aadd27378c689b41e..d47af0554a07dacd872486cc60ca7c38c0490d14 100644 --- a/README.OpenSource +++ b/README.OpenSource @@ -3,7 +3,7 @@ "Name": "linux-5.10", "License": "GPL-2.0+", "License File": "COPYING", - "Version Number": "5.10.196", + "Version Number": "5.10.197", "Owner": "liuyu82@huawei.com", "Upstream URL": "https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/log/?h=linux-5.10.y", "Description": "linux kernel 5.10" diff --git a/arch/arm/boot/dts/am335x-guardian.dts b/arch/arm/boot/dts/am335x-guardian.dts index 1918766c1f809deec278f890c345ed87bfd97aa0..9594276acf9d2ef6778a2be5c3b86d049789b682 100644 --- a/arch/arm/boot/dts/am335x-guardian.dts +++ b/arch/arm/boot/dts/am335x-guardian.dts @@ -100,11 +100,12 @@ panel-info { }; - pwm7: dmtimer-pwm { + guardian_beeper: pwm-7 { compatible = "ti,omap-dmtimer-pwm"; + #pwm-cells = <3>; ti,timers = <&timer7>; pinctrl-names = "default"; - pinctrl-0 = <&dmtimer7_pins>; + pinctrl-0 = <&guardian_beeper_pins>; ti,clock-source = <0x01>; }; @@ -343,9 +344,9 @@ AM33XX_IOPAD(0x9b4, PIN_OUTPUT_PULLDOWN | MUX_MODE3) >; }; - dmtimer7_pins: pinmux_dmtimer7_pins { + guardian_beeper_pins: pinmux_dmtimer7_pins { pinctrl-single,pins = < - AM33XX_IOPAD(0x968, PIN_OUTPUT | MUX_MODE5) + AM33XX_IOPAD(0x968, PIN_OUTPUT | MUX_MODE5) /* (E18) timer7 */ >; }; diff --git a/arch/arm/boot/dts/am3517-evm.dts b/arch/arm/boot/dts/am3517-evm.dts index c8b80f156ec981bac2de6c8440deb878179ec7aa..9cc1ae36c4204b278e9b2c45f2f9e8dbf8625894 100644 --- a/arch/arm/boot/dts/am3517-evm.dts +++ b/arch/arm/boot/dts/am3517-evm.dts @@ -150,7 +150,7 @@ bl: backlight { enable-gpios = <&gpio6 22 GPIO_ACTIVE_HIGH>; /* gpio_182 */ }; - pwm11: dmtimer-pwm@11 { + pwm11: pwm-11 { compatible = "ti,omap-dmtimer-pwm"; pinctrl-names = "default"; pinctrl-0 = <&pwm_pins>; diff --git a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi index 533a47bc4a5316ddb0927359d746cd1b166dd161..1386a5e63eff7810834497771c306bb39852f28c 100644 --- a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi +++ b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi @@ -59,7 +59,7 @@ led2 { }; }; - pwm10: dmtimer-pwm { + pwm10: pwm-10 { compatible = "ti,omap-dmtimer-pwm"; pinctrl-names = "default"; pinctrl-0 = <&pwm_pins>; diff --git a/arch/arm/boot/dts/motorola-mapphone-common.dtsi b/arch/arm/boot/dts/motorola-mapphone-common.dtsi index 5f8f77cfbe59f7b1a020d3d7b2eda2accb511942..8cb26b924d3ca4cfae9d2256aeab0f533dbfc887 100644 --- a/arch/arm/boot/dts/motorola-mapphone-common.dtsi +++ b/arch/arm/boot/dts/motorola-mapphone-common.dtsi @@ -156,7 +156,7 @@ soundcard { dais = <&mcbsp2_port>, <&mcbsp3_port>; }; - pwm8: dmtimer-pwm-8 { + pwm8: pwm-8 { pinctrl-names = "default"; pinctrl-0 = <&vibrator_direction_pin>; @@ -166,7 +166,7 @@ pwm8: dmtimer-pwm-8 { ti,clock-source = <0x01>; }; - pwm9: dmtimer-pwm-9 { + pwm9: pwm-9 { pinctrl-names = "default"; pinctrl-0 = <&vibrator_enable_pin>; @@ -192,6 +192,29 @@ backlight: backlight { }; }; +&cpu_thermal { + polling-delay = <10000>; /* milliseconds */ +}; + +&cpu_alert0 { + temperature = <80000>; /* millicelsius */ +}; + +&cpu0 { + /* + * Note that the 1.2GiHz mode is enabled for all SoC variants for + * the Motorola Android Linux v3.0.8 based kernel. + */ + operating-points = < + /* kHz uV */ + 300000 1025000 + 600000 1200000 + 800000 1313000 + 1008000 1375000 + 1200000 1375000 + >; +}; + &dss { status = "okay"; }; @@ -384,7 +407,7 @@ &mmc3 { #address-cells = <1>; #size-cells = <0>; wlcore: wlcore@2 { - compatible = "ti,wl1285", "ti,wl1283"; + compatible = "ti,wl1285"; reg = <2>; /* gpio_100 with gpmc_wait2 pad as wakeirq */ interrupts-extended = <&gpio4 4 IRQ_TYPE_LEVEL_HIGH>, @@ -716,12 +739,12 @@ &rng_target { /* Configure pwm clock source for timers 8 & 9 */ &timer8 { assigned-clocks = <&abe_clkctrl OMAP4_TIMER8_CLKCTRL 24>; - assigned-clock-parents = <&sys_clkin_ck>; + assigned-clock-parents = <&sys_32k_ck>; }; &timer9 { assigned-clocks = <&l4_per_clkctrl OMAP4_TIMER9_CLKCTRL 24>; - assigned-clock-parents = <&sys_clkin_ck>; + assigned-clock-parents = <&sys_32k_ck>; }; /* diff --git a/arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi b/arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi index ded7e8fec9ebae76ec82c66b264bb231ccc422c2..9cf52650f07312894694037b90706b1e51d140e5 100644 --- a/arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi +++ b/arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi @@ -8,9 +8,9 @@ / { vddvario: regulator-vddvario { - compatible = "regulator-fixed"; - regulator-name = "vddvario"; - regulator-always-on; + compatible = "regulator-fixed"; + regulator-name = "vddvario"; + regulator-always-on; }; vdd33a: regulator-vdd33a { diff --git a/arch/arm/boot/dts/omap-gpmc-smsc9221.dtsi b/arch/arm/boot/dts/omap-gpmc-smsc9221.dtsi index e7534fe9c53cf960b216d06d27d69f17316a23ca..bc8961f3690f0679ad4112fa05738c38df20135d 100644 --- a/arch/arm/boot/dts/omap-gpmc-smsc9221.dtsi +++ b/arch/arm/boot/dts/omap-gpmc-smsc9221.dtsi @@ -12,9 +12,9 @@ / { vddvario: regulator-vddvario { - compatible = "regulator-fixed"; - regulator-name = "vddvario"; - regulator-always-on; + compatible = "regulator-fixed"; + regulator-name = "vddvario"; + regulator-always-on; }; vdd33a: regulator-vdd33a { diff --git a/arch/arm/boot/dts/omap3-cm-t3517.dts b/arch/arm/boot/dts/omap3-cm-t3517.dts index 3b8349094baa60308e937eb72bed012df56845ab..f25c0a84a190ca36bb2d021440b74ddac6ec15e5 100644 --- a/arch/arm/boot/dts/omap3-cm-t3517.dts +++ b/arch/arm/boot/dts/omap3-cm-t3517.dts @@ -11,12 +11,12 @@ / { model = "CompuLab CM-T3517"; compatible = "compulab,omap3-cm-t3517", "ti,am3517", "ti,omap3"; - vmmc: regulator-vmmc { - compatible = "regulator-fixed"; - regulator-name = "vmmc"; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - }; + vmmc: regulator-vmmc { + compatible = "regulator-fixed"; + regulator-name = "vmmc"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + }; wl12xx_vmmc2: wl12xx_vmmc2 { compatible = "regulator-fixed"; diff --git a/arch/arm/boot/dts/omap3-cpu-thermal.dtsi b/arch/arm/boot/dts/omap3-cpu-thermal.dtsi index 1ed837859374514c4b3e48659a7ed5841d6c0bd2..51e6c2d42be2899665d18065112a17aacc7c3e02 100644 --- a/arch/arm/boot/dts/omap3-cpu-thermal.dtsi +++ b/arch/arm/boot/dts/omap3-cpu-thermal.dtsi @@ -15,8 +15,7 @@ cpu_thermal: cpu_thermal { polling-delay = <1000>; /* milliseconds */ coefficients = <0 20000>; - /* sensor ID */ - thermal-sensors = <&bandgap 0>; + thermal-sensors = <&bandgap>; cpu_trips: trips { cpu_alert0: cpu_alert { diff --git a/arch/arm/boot/dts/omap3-gta04.dtsi b/arch/arm/boot/dts/omap3-gta04.dtsi index e61e5ddbf2027e0efd06b820bf954212f2eeaebd..68e56b50652a9331f58912cb3304bdef5d5571c8 100644 --- a/arch/arm/boot/dts/omap3-gta04.dtsi +++ b/arch/arm/boot/dts/omap3-gta04.dtsi @@ -147,7 +147,7 @@ backlight: backlight { pinctrl-0 = <&backlight_pins>; }; - pwm11: dmtimer-pwm { + pwm11: pwm-11 { compatible = "ti,omap-dmtimer-pwm"; ti,timers = <&timer11>; #pwm-cells = <3>; @@ -332,7 +332,7 @@ OMAP3_CORE1_IOPAD(0x2106, PIN_OUTPUT | MUX_MODE0) /* dss_data21.dss_data21 */ OMAP3_CORE1_IOPAD(0x2108, PIN_OUTPUT | MUX_MODE0) /* dss_data22.dss_data22 */ OMAP3_CORE1_IOPAD(0x210a, PIN_OUTPUT | MUX_MODE0) /* dss_data23.dss_data23 */ >; - }; + }; gps_pins: pinmux_gps_pins { pinctrl-single,pins = < @@ -866,8 +866,8 @@ &mcbsp4 { /* GSM voice PCM */ }; &hdqw1w { - pinctrl-names = "default"; - pinctrl-0 = <&hdq_pins>; + pinctrl-names = "default"; + pinctrl-0 = <&hdq_pins>; }; /* image signal processor within OMAP3 SoC */ diff --git a/arch/arm/boot/dts/omap3-ldp.dts b/arch/arm/boot/dts/omap3-ldp.dts index 9c6a9272459045af7448e383627cb235d0de39c0..b898e2f6f41dc02a5d1803c381f140390d1af98a 100644 --- a/arch/arm/boot/dts/omap3-ldp.dts +++ b/arch/arm/boot/dts/omap3-ldp.dts @@ -301,5 +301,5 @@ &usb_otg_hs { &vaux1 { /* Needed for ads7846 */ - regulator-name = "vcc"; + regulator-name = "vcc"; }; diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index d40c3d2c4914e400cf1365397632dc3d59ebf0f8..7dafd69b7d35918198085c34ee87345927840f32 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts @@ -156,7 +156,7 @@ battery: n900-battery { io-channel-names = "temp", "bsi", "vbat"; }; - pwm9: dmtimer-pwm { + pwm9: pwm-9 { compatible = "ti,omap-dmtimer-pwm"; #pwm-cells = <3>; ti,timers = <&timer9>; @@ -236,27 +236,27 @@ gpmc_pins: pinmux_gpmc_pins { pinctrl-single,pins = < /* address lines */ - OMAP3_CORE1_IOPAD(0x207a, PIN_OUTPUT | MUX_MODE0) /* gpmc_a1.gpmc_a1 */ - OMAP3_CORE1_IOPAD(0x207c, PIN_OUTPUT | MUX_MODE0) /* gpmc_a2.gpmc_a2 */ - OMAP3_CORE1_IOPAD(0x207e, PIN_OUTPUT | MUX_MODE0) /* gpmc_a3.gpmc_a3 */ + OMAP3_CORE1_IOPAD(0x207a, PIN_OUTPUT | MUX_MODE0) /* gpmc_a1.gpmc_a1 */ + OMAP3_CORE1_IOPAD(0x207c, PIN_OUTPUT | MUX_MODE0) /* gpmc_a2.gpmc_a2 */ + OMAP3_CORE1_IOPAD(0x207e, PIN_OUTPUT | MUX_MODE0) /* gpmc_a3.gpmc_a3 */ /* data lines, gpmc_d0..d7 not muxable according to TRM */ - OMAP3_CORE1_IOPAD(0x209e, PIN_INPUT | MUX_MODE0) /* gpmc_d8.gpmc_d8 */ - OMAP3_CORE1_IOPAD(0x20a0, PIN_INPUT | MUX_MODE0) /* gpmc_d9.gpmc_d9 */ - OMAP3_CORE1_IOPAD(0x20a2, PIN_INPUT | MUX_MODE0) /* gpmc_d10.gpmc_d10 */ - OMAP3_CORE1_IOPAD(0x20a4, PIN_INPUT | MUX_MODE0) /* gpmc_d11.gpmc_d11 */ - OMAP3_CORE1_IOPAD(0x20a6, PIN_INPUT | MUX_MODE0) /* gpmc_d12.gpmc_d12 */ - OMAP3_CORE1_IOPAD(0x20a8, PIN_INPUT | MUX_MODE0) /* gpmc_d13.gpmc_d13 */ - OMAP3_CORE1_IOPAD(0x20aa, PIN_INPUT | MUX_MODE0) /* gpmc_d14.gpmc_d14 */ - OMAP3_CORE1_IOPAD(0x20ac, PIN_INPUT | MUX_MODE0) /* gpmc_d15.gpmc_d15 */ + OMAP3_CORE1_IOPAD(0x209e, PIN_INPUT | MUX_MODE0) /* gpmc_d8.gpmc_d8 */ + OMAP3_CORE1_IOPAD(0x20a0, PIN_INPUT | MUX_MODE0) /* gpmc_d9.gpmc_d9 */ + OMAP3_CORE1_IOPAD(0x20a2, PIN_INPUT | MUX_MODE0) /* gpmc_d10.gpmc_d10 */ + OMAP3_CORE1_IOPAD(0x20a4, PIN_INPUT | MUX_MODE0) /* gpmc_d11.gpmc_d11 */ + OMAP3_CORE1_IOPAD(0x20a6, PIN_INPUT | MUX_MODE0) /* gpmc_d12.gpmc_d12 */ + OMAP3_CORE1_IOPAD(0x20a8, PIN_INPUT | MUX_MODE0) /* gpmc_d13.gpmc_d13 */ + OMAP3_CORE1_IOPAD(0x20aa, PIN_INPUT | MUX_MODE0) /* gpmc_d14.gpmc_d14 */ + OMAP3_CORE1_IOPAD(0x20ac, PIN_INPUT | MUX_MODE0) /* gpmc_d15.gpmc_d15 */ /* * gpmc_ncs0, gpmc_nadv_ale, gpmc_noe, gpmc_nwe, gpmc_wait0 not muxable * according to TRM. OneNAND seems to require PIN_INPUT on clock. */ - OMAP3_CORE1_IOPAD(0x20b0, PIN_OUTPUT | MUX_MODE0) /* gpmc_ncs1.gpmc_ncs1 */ - OMAP3_CORE1_IOPAD(0x20be, PIN_INPUT | MUX_MODE0) /* gpmc_clk.gpmc_clk */ - >; + OMAP3_CORE1_IOPAD(0x20b0, PIN_OUTPUT | MUX_MODE0) /* gpmc_ncs1.gpmc_ncs1 */ + OMAP3_CORE1_IOPAD(0x20be, PIN_INPUT | MUX_MODE0) /* gpmc_clk.gpmc_clk */ + >; }; i2c1_pins: pinmux_i2c1_pins { @@ -738,12 +738,12 @@ tpa6130a2: tpa6130a2@60 { si4713: si4713@63 { compatible = "silabs,si4713"; - reg = <0x63>; + reg = <0x63>; - interrupts-extended = <&gpio2 21 IRQ_TYPE_EDGE_FALLING>; /* 53 */ - reset-gpios = <&gpio6 3 GPIO_ACTIVE_HIGH>; /* 163 */ - vio-supply = <&vio>; - vdd-supply = <&vaux1>; + interrupts-extended = <&gpio2 21 IRQ_TYPE_EDGE_FALLING>; /* 53 */ + reset-gpios = <&gpio6 3 GPIO_ACTIVE_HIGH>; /* 163 */ + vio-supply = <&vio>; + vdd-supply = <&vaux1>; }; bq24150a: bq24150a@6b { diff --git a/arch/arm/boot/dts/omap3-zoom3.dts b/arch/arm/boot/dts/omap3-zoom3.dts index 0482676d18306e78282f4c89d1984bb60a383588..ce58b1f208e817d05cffd8affaf445a64a36ed44 100644 --- a/arch/arm/boot/dts/omap3-zoom3.dts +++ b/arch/arm/boot/dts/omap3-zoom3.dts @@ -23,9 +23,9 @@ memory@80000000 { }; vddvario: regulator-vddvario { - compatible = "regulator-fixed"; - regulator-name = "vddvario"; - regulator-always-on; + compatible = "regulator-fixed"; + regulator-name = "vddvario"; + regulator-always-on; }; vdd33a: regulator-vdd33a { @@ -84,28 +84,28 @@ OMAP3_CORE1_IOPAD(0x21d0, PIN_INPUT_PULLUP | MUX_MODE3) /* mcspi1_cs1.sdmmc3_cmd uart1_pins: pinmux_uart1_pins { pinctrl-single,pins = < - OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT | MUX_MODE0) /* uart1_cts.uart1_cts */ - OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE0) /* uart1_rts.uart1_rts */ - OMAP3_CORE1_IOPAD(0x2182, WAKEUP_EN | PIN_INPUT | MUX_MODE0) /* uart1_rx.uart1_rx */ - OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE0) /* uart1_tx.uart1_tx */ + OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT | MUX_MODE0) /* uart1_cts.uart1_cts */ + OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE0) /* uart1_rts.uart1_rts */ + OMAP3_CORE1_IOPAD(0x2182, WAKEUP_EN | PIN_INPUT | MUX_MODE0) /* uart1_rx.uart1_rx */ + OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE0) /* uart1_tx.uart1_tx */ >; }; uart2_pins: pinmux_uart2_pins { pinctrl-single,pins = < - OMAP3_CORE1_IOPAD(0x2174, PIN_INPUT_PULLUP | MUX_MODE0) /* uart2_cts.uart2_cts */ - OMAP3_CORE1_IOPAD(0x2176, PIN_OUTPUT | MUX_MODE0) /* uart2_rts.uart2_rts */ - OMAP3_CORE1_IOPAD(0x217a, PIN_INPUT | MUX_MODE0) /* uart2_rx.uart2_rx */ - OMAP3_CORE1_IOPAD(0x2178, PIN_OUTPUT | MUX_MODE0) /* uart2_tx.uart2_tx */ + OMAP3_CORE1_IOPAD(0x2174, PIN_INPUT_PULLUP | MUX_MODE0) /* uart2_cts.uart2_cts */ + OMAP3_CORE1_IOPAD(0x2176, PIN_OUTPUT | MUX_MODE0) /* uart2_rts.uart2_rts */ + OMAP3_CORE1_IOPAD(0x217a, PIN_INPUT | MUX_MODE0) /* uart2_rx.uart2_rx */ + OMAP3_CORE1_IOPAD(0x2178, PIN_OUTPUT | MUX_MODE0) /* uart2_tx.uart2_tx */ >; }; uart3_pins: pinmux_uart3_pins { pinctrl-single,pins = < - OMAP3_CORE1_IOPAD(0x219a, PIN_INPUT_PULLDOWN | MUX_MODE0) /* uart3_cts_rctx.uart3_cts_rctx */ - OMAP3_CORE1_IOPAD(0x219c, PIN_OUTPUT | MUX_MODE0) /* uart3_rts_sd.uart3_rts_sd */ - OMAP3_CORE1_IOPAD(0x219e, PIN_INPUT | MUX_MODE0) /* uart3_rx_irrx.uart3_rx_irrx */ - OMAP3_CORE1_IOPAD(0x21a0, PIN_OUTPUT | MUX_MODE0) /* uart3_tx_irtx.uart3_tx_irtx */ + OMAP3_CORE1_IOPAD(0x219a, PIN_INPUT_PULLDOWN | MUX_MODE0) /* uart3_cts_rctx.uart3_cts_rctx */ + OMAP3_CORE1_IOPAD(0x219c, PIN_OUTPUT | MUX_MODE0) /* uart3_rts_sd.uart3_rts_sd */ + OMAP3_CORE1_IOPAD(0x219e, PIN_INPUT | MUX_MODE0) /* uart3_rx_irrx.uart3_rx_irrx */ + OMAP3_CORE1_IOPAD(0x21a0, PIN_OUTPUT | MUX_MODE0) /* uart3_tx_irtx.uart3_tx_irtx */ >; }; @@ -205,22 +205,22 @@ wlcore: wlcore@2 { }; &uart1 { - pinctrl-names = "default"; - pinctrl-0 = <&uart1_pins>; + pinctrl-names = "default"; + pinctrl-0 = <&uart1_pins>; }; &uart2 { - pinctrl-names = "default"; - pinctrl-0 = <&uart2_pins>; + pinctrl-names = "default"; + pinctrl-0 = <&uart2_pins>; }; &uart3 { - pinctrl-names = "default"; - pinctrl-0 = <&uart3_pins>; + pinctrl-names = "default"; + pinctrl-0 = <&uart3_pins>; }; &uart4 { - status = "disabled"; + status = "disabled"; }; &usb_otg_hs { diff --git a/arch/arm/boot/dts/omap4-cpu-thermal.dtsi b/arch/arm/boot/dts/omap4-cpu-thermal.dtsi index 03d054b2bf9af5244e20c4eb35266c8d103449bb..4b3afe29806290a2d1ca1f7907330d452e4ada86 100644 --- a/arch/arm/boot/dts/omap4-cpu-thermal.dtsi +++ b/arch/arm/boot/dts/omap4-cpu-thermal.dtsi @@ -15,21 +15,24 @@ cpu_thermal: cpu_thermal { polling-delay-passive = <250>; /* milliseconds */ polling-delay = <1000>; /* milliseconds */ - /* sensor ID */ - thermal-sensors = <&bandgap 0>; + /* + * See 44xx files for single sensor addressing, omap5 and dra7 need + * also sensor ID for addressing. + */ + thermal-sensors = <&bandgap 0>; cpu_trips: trips { - cpu_alert0: cpu_alert { - temperature = <100000>; /* millicelsius */ - hysteresis = <2000>; /* millicelsius */ - type = "passive"; - }; - cpu_crit: cpu_crit { - temperature = <125000>; /* millicelsius */ - hysteresis = <2000>; /* millicelsius */ - type = "critical"; - }; - }; + cpu_alert0: cpu_alert { + temperature = <100000>; /* millicelsius */ + hysteresis = <2000>; /* millicelsius */ + type = "passive"; + }; + cpu_crit: cpu_crit { + temperature = <125000>; /* millicelsius */ + hysteresis = <2000>; /* millicelsius */ + type = "critical"; + }; + }; cpu_cooling_maps: cooling-maps { map0 { diff --git a/arch/arm/boot/dts/omap443x.dtsi b/arch/arm/boot/dts/omap443x.dtsi index dd8ef58cbaed41a66ed6177080da2a472c19904a..cce39dce1428a655ef5d6905a6e0a0a9d02b7546 100644 --- a/arch/arm/boot/dts/omap443x.dtsi +++ b/arch/arm/boot/dts/omap443x.dtsi @@ -72,6 +72,7 @@ abb_iva: regulator-abb-iva { }; &cpu_thermal { + thermal-sensors = <&bandgap>; coefficients = <0 20000>; }; diff --git a/arch/arm/boot/dts/omap4460.dtsi b/arch/arm/boot/dts/omap4460.dtsi index 2d3e54901b6ea4f20b2afcc38ec2665afd8ca776..d62e2bacca18171d31b047bd7a117153f9bcb949 100644 --- a/arch/arm/boot/dts/omap4460.dtsi +++ b/arch/arm/boot/dts/omap4460.dtsi @@ -89,6 +89,7 @@ abb_iva: regulator-abb-iva { }; &cpu_thermal { + thermal-sensors = <&bandgap>; coefficients = <348 (-9301)>; }; diff --git a/arch/arm/boot/dts/omap5-cm-t54.dts b/arch/arm/boot/dts/omap5-cm-t54.dts index e62ea8b6d53fd76dd802e3478d10867af17d30a3..af288d63a26a464b456e5f353a0464d61715022e 100644 --- a/arch/arm/boot/dts/omap5-cm-t54.dts +++ b/arch/arm/boot/dts/omap5-cm-t54.dts @@ -84,36 +84,36 @@ led1 { }; lcd0: display { - compatible = "startek,startek-kd050c", "panel-dpi"; - label = "lcd"; - - pinctrl-names = "default"; - pinctrl-0 = <&lcd_pins>; - - enable-gpios = <&gpio8 3 GPIO_ACTIVE_HIGH>; - - panel-timing { - clock-frequency = <33000000>; - hactive = <800>; - vactive = <480>; - hfront-porch = <40>; - hback-porch = <40>; - hsync-len = <43>; - vback-porch = <29>; - vfront-porch = <13>; - vsync-len = <3>; - hsync-active = <0>; - vsync-active = <0>; - de-active = <1>; - pixelclk-active = <1>; - }; - - port { - lcd_in: endpoint { - remote-endpoint = <&dpi_lcd_out>; - }; - }; - }; + compatible = "startek,startek-kd050c", "panel-dpi"; + label = "lcd"; + + pinctrl-names = "default"; + pinctrl-0 = <&lcd_pins>; + + enable-gpios = <&gpio8 3 GPIO_ACTIVE_HIGH>; + + panel-timing { + clock-frequency = <33000000>; + hactive = <800>; + vactive = <480>; + hfront-porch = <40>; + hback-porch = <40>; + hsync-len = <43>; + vback-porch = <29>; + vfront-porch = <13>; + vsync-len = <3>; + hsync-active = <0>; + vsync-active = <0>; + de-active = <1>; + pixelclk-active = <1>; + }; + + port { + lcd_in: endpoint { + remote-endpoint = <&dpi_lcd_out>; + }; + }; + }; hdmi0: connector0 { compatible = "hdmi-connector"; @@ -644,8 +644,8 @@ &usbhsehci { }; &usb3 { - extcon = <&extcon_usb3>; - vbus-supply = <&smps10_out1_reg>; + extcon = <&extcon_usb3>; + vbus-supply = <&smps10_out1_reg>; }; &cpu0 { diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index b1423fb130ea4a04d5192ec61beb8453b29b04af..8f1fa7aac31fbe2cf131e25a30187c3c4bc6b6c7 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -626,7 +626,7 @@ int hw_breakpoint_arch_parse(struct perf_event *bp, hw->address &= ~alignment_mask; hw->ctrl.len <<= offset; - if (is_default_overflow_handler(bp)) { + if (uses_default_overflow_handler(bp)) { /* * Mismatch breakpoints are required for single-stepping * breakpoints. @@ -798,7 +798,7 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr, * Otherwise, insert a temporary mismatch breakpoint so that * we can single-step over the watchpoint trigger. */ - if (!is_default_overflow_handler(wp)) + if (!uses_default_overflow_handler(wp)) continue; step: enable_single_step(wp, instruction_pointer(regs)); @@ -811,7 +811,7 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr, info->trigger = addr; pr_debug("watchpoint fired: address = 0x%x\n", info->trigger); perf_bp_event(wp, regs); - if (is_default_overflow_handler(wp)) + if (uses_default_overflow_handler(wp)) enable_single_step(wp, instruction_pointer(regs)); } @@ -886,7 +886,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs) info->trigger = addr; pr_debug("breakpoint fired: address = 0x%x\n", addr); perf_bp_event(bp, regs); - if (is_default_overflow_handler(bp)) + if (uses_default_overflow_handler(bp)) enable_single_step(bp, addr); goto unlock; } diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index 712e97c03e54c287e4f6e9ee0e5d02ee3665a3cf..e5a0c38f1b5ee98b07e78decb70348544a0b696a 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -654,7 +654,7 @@ static int breakpoint_handler(unsigned long unused, unsigned int esr, perf_bp_event(bp, regs); /* Do we need to handle the stepping? */ - if (is_default_overflow_handler(bp)) + if (uses_default_overflow_handler(bp)) step = 1; unlock: rcu_read_unlock(); @@ -733,7 +733,7 @@ static u64 get_distance_from_watchpoint(unsigned long addr, u64 val, static int watchpoint_report(struct perf_event *wp, unsigned long addr, struct pt_regs *regs) { - int step = is_default_overflow_handler(wp); + int step = uses_default_overflow_handler(wp); struct arch_hw_breakpoint *info = counter_arch_bp(wp); info->trigger = addr; diff --git a/arch/mips/alchemy/devboards/db1000.c b/arch/mips/alchemy/devboards/db1000.c index 50de86eb8784c79cdb50ae3d277c164df9563c1d..3183df60ad33775ec601f9488961fe67cd910773 100644 --- a/arch/mips/alchemy/devboards/db1000.c +++ b/arch/mips/alchemy/devboards/db1000.c @@ -164,6 +164,7 @@ static struct platform_device db1x00_audio_dev = { /******************************************************************************/ +#ifdef CONFIG_MMC_AU1X static irqreturn_t db1100_mmc_cd(int irq, void *ptr) { mmc_detect_change(ptr, msecs_to_jiffies(500)); @@ -369,6 +370,7 @@ static struct platform_device db1100_mmc1_dev = { .num_resources = ARRAY_SIZE(au1100_mmc1_res), .resource = au1100_mmc1_res, }; +#endif /* CONFIG_MMC_AU1X */ /******************************************************************************/ @@ -432,8 +434,10 @@ static struct platform_device *db1x00_devs[] = { static struct platform_device *db1100_devs[] = { &au1100_lcd_device, +#ifdef CONFIG_MMC_AU1X &db1100_mmc0_dev, &db1100_mmc1_dev, +#endif }; int __init db1000_dev_setup(void) diff --git a/arch/mips/alchemy/devboards/db1200.c b/arch/mips/alchemy/devboards/db1200.c index b70e2cf8a27bc0a5be507c05257a74a23daa27f3..414f92eacb5e5c6a94aea7a247ac7e7757c62c5b 100644 --- a/arch/mips/alchemy/devboards/db1200.c +++ b/arch/mips/alchemy/devboards/db1200.c @@ -326,6 +326,7 @@ static struct platform_device db1200_ide_dev = { /**********************************************************************/ +#ifdef CONFIG_MMC_AU1X /* SD carddetects: they're supposed to be edge-triggered, but ack * doesn't seem to work (CPLD Rev 2). Instead, the screaming one * is disabled and its counterpart enabled. The 200ms timeout is @@ -584,6 +585,7 @@ static struct platform_device pb1200_mmc1_dev = { .num_resources = ARRAY_SIZE(au1200_mmc1_res), .resource = au1200_mmc1_res, }; +#endif /* CONFIG_MMC_AU1X */ /**********************************************************************/ @@ -751,7 +753,9 @@ static struct platform_device db1200_audiodma_dev = { static struct platform_device *db1200_devs[] __initdata = { NULL, /* PSC0, selected by S6.8 */ &db1200_ide_dev, +#ifdef CONFIG_MMC_AU1X &db1200_mmc0_dev, +#endif &au1200_lcd_dev, &db1200_eth_dev, &db1200_nand_dev, @@ -762,7 +766,9 @@ static struct platform_device *db1200_devs[] __initdata = { }; static struct platform_device *pb1200_devs[] __initdata = { +#ifdef CONFIG_MMC_AU1X &pb1200_mmc1_dev, +#endif }; /* Some peripheral base addresses differ on the PB1200 */ diff --git a/arch/mips/alchemy/devboards/db1300.c b/arch/mips/alchemy/devboards/db1300.c index ca71e5ed51abd0507030af07d1948464ce403831..c965d00074818767f8ddc90fb2634e1c7504ea9f 100644 --- a/arch/mips/alchemy/devboards/db1300.c +++ b/arch/mips/alchemy/devboards/db1300.c @@ -450,6 +450,7 @@ static struct platform_device db1300_ide_dev = { /**********************************************************************/ +#ifdef CONFIG_MMC_AU1X static irqreturn_t db1300_mmc_cd(int irq, void *ptr) { disable_irq_nosync(irq); @@ -632,6 +633,7 @@ static struct platform_device db1300_sd0_dev = { .resource = au1300_sd0_res, .num_resources = ARRAY_SIZE(au1300_sd0_res), }; +#endif /* CONFIG_MMC_AU1X */ /**********************************************************************/ @@ -776,8 +778,10 @@ static struct platform_device *db1300_dev[] __initdata = { &db1300_5waysw_dev, &db1300_nand_dev, &db1300_ide_dev, +#ifdef CONFIG_MMC_AU1X &db1300_sd0_dev, &db1300_sd1_dev, +#endif &db1300_lcd_dev, &db1300_ac97_dev, &db1300_i2s_dev, diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c index 1cd2351d241e83c5c008c53afd585e66b8bcd029..61a08747b1641d66d1e27a9fea5931ca3a2bb59b 100644 --- a/arch/powerpc/perf/hv-24x7.c +++ b/arch/powerpc/perf/hv-24x7.c @@ -1410,7 +1410,7 @@ static int h_24x7_event_init(struct perf_event *event) } domain = event_get_domain(event); - if (domain >= HV_PERF_DOMAIN_MAX) { + if (domain == 0 || domain >= HV_PERF_DOMAIN_MAX) { pr_devel("invalid domain %d\n", domain); return -EINVAL; } diff --git a/arch/powerpc/platforms/pseries/ibmebus.c b/arch/powerpc/platforms/pseries/ibmebus.c index 8c6e509f696756e5a877af8a4e9161263bcc1671..c3cc010e9cc459a5daf1918073b92e2b44e728ce 100644 --- a/arch/powerpc/platforms/pseries/ibmebus.c +++ b/arch/powerpc/platforms/pseries/ibmebus.c @@ -451,6 +451,7 @@ static int __init ibmebus_bus_init(void) if (err) { printk(KERN_WARNING "%s: device_register returned %i\n", __func__, err); + put_device(&ibmebus_bus_device); bus_unregister(&ibmebus_bus_type); return err; diff --git a/arch/x86/boot/compressed/ident_map_64.c b/arch/x86/boot/compressed/ident_map_64.c index 39b2eded7bc2b633c8fafa2bce50ed788b054a83..f4a2e6d373b29a283c95f242d7acdeb346022562 100644 --- a/arch/x86/boot/compressed/ident_map_64.c +++ b/arch/x86/boot/compressed/ident_map_64.c @@ -67,6 +67,14 @@ static void *alloc_pgt_page(void *context) return NULL; } + /* Consumed more tables than expected? */ + if (pages->pgt_buf_offset == BOOT_PGT_SIZE_WARN) { + debug_putstr("pgt_buf running low in " __FILE__ "\n"); + debug_putstr("Need to raise BOOT_PGT_SIZE?\n"); + debug_putaddr(pages->pgt_buf_offset); + debug_putaddr(pages->pgt_buf_size); + } + entry = pages->pgt_buf + pages->pgt_buf_offset; pages->pgt_buf_offset += PAGE_SIZE; diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h index 9191280d9ea3160d8d92db5e688fc34712b90bf8..215d37f7dde8a993295160eaa6e8f012beb05783 100644 --- a/arch/x86/include/asm/boot.h +++ b/arch/x86/include/asm/boot.h @@ -40,23 +40,40 @@ #ifdef CONFIG_X86_64 # define BOOT_STACK_SIZE 0x4000 +/* + * Used by decompressor's startup_32() to allocate page tables for identity + * mapping of the 4G of RAM in 4-level paging mode: + * - 1 level4 table; + * - 1 level3 table; + * - 4 level2 table that maps everything with 2M pages; + * + * The additional level5 table needed for 5-level paging is allocated from + * trampoline_32bit memory. + */ # define BOOT_INIT_PGT_SIZE (6*4096) -# ifdef CONFIG_RANDOMIZE_BASE + /* - * Assuming all cross the 512GB boundary: - * 1 page for level4 - * (2+2)*4 pages for kernel, param, cmd_line, and randomized kernel - * 2 pages for first 2M (video RAM: CONFIG_X86_VERBOSE_BOOTUP). - * Total is 19 pages. + * Total number of page tables kernel_add_identity_map() can allocate, + * including page tables consumed by startup_32(). + * + * Worst-case scenario: + * - 5-level paging needs 1 level5 table; + * - KASLR needs to map kernel, boot_params, cmdline and randomized kernel, + * assuming all of them cross 256T boundary: + * + 4*2 level4 table; + * + 4*2 level3 table; + * + 4*2 level2 table; + * - X86_VERBOSE_BOOTUP needs to map the first 2M (video RAM): + * + 1 level4 table; + * + 1 level3 table; + * + 1 level2 table; + * Total: 28 tables + * + * Add 4 spare table in case decompressor touches anything beyond what is + * accounted above. Warn if it happens. */ -# ifdef CONFIG_X86_VERBOSE_BOOTUP -# define BOOT_PGT_SIZE (19*4096) -# else /* !CONFIG_X86_VERBOSE_BOOTUP */ -# define BOOT_PGT_SIZE (17*4096) -# endif -# else /* !CONFIG_RANDOMIZE_BASE */ -# define BOOT_PGT_SIZE BOOT_INIT_PGT_SIZE -# endif +# define BOOT_PGT_SIZE_WARN (28*4096) +# define BOOT_PGT_SIZE (32*4096) #else /* !CONFIG_X86_64 */ # define BOOT_STACK_SIZE 0x1000 diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 4d11a50089b27d74a8ea9ec5ed58fef854f39dfd..ec3ddb9a456ba2b659eeb5d34938e2b7b75e1a77 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -2344,7 +2344,7 @@ static void __init srso_select_mitigation(void) switch (srso_cmd) { case SRSO_CMD_OFF: - return; + goto pred_cmd; case SRSO_CMD_MICROCODE: if (has_microcode) { @@ -2622,7 +2622,7 @@ static ssize_t srso_show_state(char *buf) return sysfs_emit(buf, "%s%s\n", srso_strings[srso_mitigation], - (cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode")); + boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) ? "" : ", no microcode"); } static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, diff --git a/crypto/lrw.c b/crypto/lrw.c index bcf09fbc750af33d8c91a018ed218efec27d870e..80d9076e42e0be7f6fed7677082e32eec0da0028 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -357,10 +357,10 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb) * cipher name. */ if (!strncmp(cipher_name, "ecb(", 4)) { - unsigned len; + int len; - len = strlcpy(ecb_name, cipher_name + 4, sizeof(ecb_name)); - if (len < 2 || len >= sizeof(ecb_name)) + len = strscpy(ecb_name, cipher_name + 4, sizeof(ecb_name)); + if (len < 2) goto err_free_inst; if (ecb_name[len - 1] != ')') diff --git a/crypto/xts.c b/crypto/xts.c index c6a105dba38b946ecf8e26a6ff884d21b9ba2d60..74dc199d5486707399f64777b13be14376589a50 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -395,10 +395,10 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb) * cipher name. */ if (!strncmp(cipher_name, "ecb(", 4)) { - unsigned len; + int len; - len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name)); - if (len < 2 || len >= sizeof(ctx->name)) + len = strscpy(ctx->name, cipher_name + 4, sizeof(ctx->name)); + if (len < 2) goto err_free_inst; if (ctx->name[len - 1] != ')') diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c index 28af49263ebfae5808a04e569de988d483aab2b5..62957cba30f616c202a90c52c36d1fbfae37b847 100644 --- a/drivers/acpi/acpica/psopcode.c +++ b/drivers/acpi/acpica/psopcode.c @@ -603,7 +603,7 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = { /* 7E */ ACPI_OP("Timer", ARGP_TIMER_OP, ARGI_TIMER_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_0A_0T_1R, - AML_FLAGS_EXEC_0A_0T_1R), + AML_FLAGS_EXEC_0A_0T_1R | AML_NO_OPERAND_RESOLVE), /* ACPI 5.0 opcodes */ diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 50ed949dc14494538084767a5050d296e0e0463e..554943be2698478adbab58915a1fa6e4b17572d1 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -1474,7 +1474,10 @@ static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res, static struct acpi_platform_list pmcg_plat_info[] __initdata = { /* HiSilicon Hip08 Platform */ {"HISI ", "HIP08 ", 0, ACPI_SIG_IORT, greater_than_or_equal, - "Erratum #162001800", IORT_SMMU_V3_PMCG_HISI_HIP08}, + "Erratum #162001800, Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP08}, + /* HiSilicon Hip09 Platform */ + {"HISI ", "HIP09 ", 0, ACPI_SIG_IORT, greater_than_or_equal, + "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09}, { } }; diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index b02d381e7848307cfa553f6b9fe80c1ba5440ca9..a5cb9e1d48bcc6bbf82505b8721f7505830130a2 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -307,6 +307,15 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_BOARD_NAME, "Lenovo IdeaPad S405"), }, }, + { + /* https://bugzilla.suse.com/show_bug.cgi?id=1208724 */ + .callback = video_detect_force_native, + /* Lenovo Ideapad Z470 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Z470"), + }, + }, { /* https://bugzilla.redhat.com/show_bug.cgi?id=1187004 */ .callback = video_detect_force_native, @@ -348,6 +357,24 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "iMac11,3"), }, }, + { + /* https://gitlab.freedesktop.org/drm/amd/-/issues/1838 */ + .callback = video_detect_force_native, + /* Apple iMac12,1 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "iMac12,1"), + }, + }, + { + /* https://gitlab.freedesktop.org/drm/amd/-/issues/2753 */ + .callback = video_detect_force_native, + /* Apple iMac12,2 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "iMac12,2"), + }, + }, { /* https://bugzilla.redhat.com/show_bug.cgi?id=1217249 */ .callback = video_detect_force_native, diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 7ca9fa9a75e24db03832a0d64bb79404242bef5b..4297a8d69dbf7ef09aa05e3a7adf68714acd6a33 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -50,7 +50,8 @@ enum board_ids { /* board IDs by feature in alphabetical order */ board_ahci, board_ahci_ign_iferr, - board_ahci_mobile, + board_ahci_low_power, + board_ahci_no_debounce_delay, board_ahci_nomsi, board_ahci_noncq, board_ahci_nosntf, @@ -135,13 +136,20 @@ static const struct ata_port_info ahci_port_info[] = { .udma_mask = ATA_UDMA6, .port_ops = &ahci_ops, }, - [board_ahci_mobile] = { + [board_ahci_low_power] = { AHCI_HFLAGS (AHCI_HFLAG_IS_MOBILE), .flags = AHCI_FLAG_COMMON, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &ahci_ops, }, + [board_ahci_no_debounce_delay] = { + .flags = AHCI_FLAG_COMMON, + .link_flags = ATA_LFLAG_NO_DEBOUNCE_DELAY, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &ahci_ops, + }, [board_ahci_nomsi] = { AHCI_HFLAGS (AHCI_HFLAG_NO_MSI), .flags = AHCI_FLAG_COMMON, @@ -268,13 +276,13 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */ { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */ { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */ - { PCI_VDEVICE(INTEL, 0x2929), board_ahci_mobile }, /* ICH9M */ - { PCI_VDEVICE(INTEL, 0x292a), board_ahci_mobile }, /* ICH9M */ - { PCI_VDEVICE(INTEL, 0x292b), board_ahci_mobile }, /* ICH9M */ - { PCI_VDEVICE(INTEL, 0x292c), board_ahci_mobile }, /* ICH9M */ - { PCI_VDEVICE(INTEL, 0x292f), board_ahci_mobile }, /* ICH9M */ + { PCI_VDEVICE(INTEL, 0x2929), board_ahci_low_power }, /* ICH9M */ + { PCI_VDEVICE(INTEL, 0x292a), board_ahci_low_power }, /* ICH9M */ + { PCI_VDEVICE(INTEL, 0x292b), board_ahci_low_power }, /* ICH9M */ + { PCI_VDEVICE(INTEL, 0x292c), board_ahci_low_power }, /* ICH9M */ + { PCI_VDEVICE(INTEL, 0x292f), board_ahci_low_power }, /* ICH9M */ { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */ - { PCI_VDEVICE(INTEL, 0x294e), board_ahci_mobile }, /* ICH9M */ + { PCI_VDEVICE(INTEL, 0x294e), board_ahci_low_power }, /* ICH9M */ { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */ { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */ { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */ @@ -284,9 +292,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */ { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */ { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */ - { PCI_VDEVICE(INTEL, 0x3b29), board_ahci_mobile }, /* PCH M AHCI */ + { PCI_VDEVICE(INTEL, 0x3b29), board_ahci_low_power }, /* PCH M AHCI */ { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ - { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci_mobile }, /* PCH M RAID */ + { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci_low_power }, /* PCH M RAID */ { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */ { PCI_VDEVICE(INTEL, 0x19b0), board_ahci_pcs7 }, /* DNV AHCI */ { PCI_VDEVICE(INTEL, 0x19b1), board_ahci_pcs7 }, /* DNV AHCI */ @@ -309,9 +317,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x19cE), board_ahci_pcs7 }, /* DNV AHCI */ { PCI_VDEVICE(INTEL, 0x19cF), board_ahci_pcs7 }, /* DNV AHCI */ { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */ - { PCI_VDEVICE(INTEL, 0x1c03), board_ahci_mobile }, /* CPT M AHCI */ + { PCI_VDEVICE(INTEL, 0x1c03), board_ahci_low_power }, /* CPT M AHCI */ { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */ - { PCI_VDEVICE(INTEL, 0x1c05), board_ahci_mobile }, /* CPT M RAID */ + { PCI_VDEVICE(INTEL, 0x1c05), board_ahci_low_power }, /* CPT M RAID */ { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */ { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */ { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */ @@ -320,29 +328,29 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */ { PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */ { PCI_VDEVICE(INTEL, 0x1e02), board_ahci }, /* Panther Point AHCI */ - { PCI_VDEVICE(INTEL, 0x1e03), board_ahci_mobile }, /* Panther M AHCI */ + { PCI_VDEVICE(INTEL, 0x1e03), board_ahci_low_power }, /* Panther M AHCI */ { PCI_VDEVICE(INTEL, 0x1e04), board_ahci }, /* Panther Point RAID */ { PCI_VDEVICE(INTEL, 0x1e05), board_ahci }, /* Panther Point RAID */ { PCI_VDEVICE(INTEL, 0x1e06), board_ahci }, /* Panther Point RAID */ - { PCI_VDEVICE(INTEL, 0x1e07), board_ahci_mobile }, /* Panther M RAID */ + { PCI_VDEVICE(INTEL, 0x1e07), board_ahci_low_power }, /* Panther M RAID */ { PCI_VDEVICE(INTEL, 0x1e0e), board_ahci }, /* Panther Point RAID */ { PCI_VDEVICE(INTEL, 0x8c02), board_ahci }, /* Lynx Point AHCI */ - { PCI_VDEVICE(INTEL, 0x8c03), board_ahci_mobile }, /* Lynx M AHCI */ + { PCI_VDEVICE(INTEL, 0x8c03), board_ahci_low_power }, /* Lynx M AHCI */ { PCI_VDEVICE(INTEL, 0x8c04), board_ahci }, /* Lynx Point RAID */ - { PCI_VDEVICE(INTEL, 0x8c05), board_ahci_mobile }, /* Lynx M RAID */ + { PCI_VDEVICE(INTEL, 0x8c05), board_ahci_low_power }, /* Lynx M RAID */ { PCI_VDEVICE(INTEL, 0x8c06), board_ahci }, /* Lynx Point RAID */ - { PCI_VDEVICE(INTEL, 0x8c07), board_ahci_mobile }, /* Lynx M RAID */ + { PCI_VDEVICE(INTEL, 0x8c07), board_ahci_low_power }, /* Lynx M RAID */ { PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */ - { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci_mobile }, /* Lynx M RAID */ - { PCI_VDEVICE(INTEL, 0x9c02), board_ahci_mobile }, /* Lynx LP AHCI */ - { PCI_VDEVICE(INTEL, 0x9c03), board_ahci_mobile }, /* Lynx LP AHCI */ - { PCI_VDEVICE(INTEL, 0x9c04), board_ahci_mobile }, /* Lynx LP RAID */ - { PCI_VDEVICE(INTEL, 0x9c05), board_ahci_mobile }, /* Lynx LP RAID */ - { PCI_VDEVICE(INTEL, 0x9c06), board_ahci_mobile }, /* Lynx LP RAID */ - { PCI_VDEVICE(INTEL, 0x9c07), board_ahci_mobile }, /* Lynx LP RAID */ - { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci_mobile }, /* Lynx LP RAID */ - { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci_mobile }, /* Lynx LP RAID */ - { PCI_VDEVICE(INTEL, 0x9dd3), board_ahci_mobile }, /* Cannon Lake PCH-LP AHCI */ + { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci_low_power }, /* Lynx M RAID */ + { PCI_VDEVICE(INTEL, 0x9c02), board_ahci_low_power }, /* Lynx LP AHCI */ + { PCI_VDEVICE(INTEL, 0x9c03), board_ahci_low_power }, /* Lynx LP AHCI */ + { PCI_VDEVICE(INTEL, 0x9c04), board_ahci_low_power }, /* Lynx LP RAID */ + { PCI_VDEVICE(INTEL, 0x9c05), board_ahci_low_power }, /* Lynx LP RAID */ + { PCI_VDEVICE(INTEL, 0x9c06), board_ahci_low_power }, /* Lynx LP RAID */ + { PCI_VDEVICE(INTEL, 0x9c07), board_ahci_low_power }, /* Lynx LP RAID */ + { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci_low_power }, /* Lynx LP RAID */ + { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci_low_power }, /* Lynx LP RAID */ + { PCI_VDEVICE(INTEL, 0x9dd3), board_ahci_low_power }, /* Cannon Lake PCH-LP AHCI */ { PCI_VDEVICE(INTEL, 0x1f22), board_ahci }, /* Avoton AHCI */ { PCI_VDEVICE(INTEL, 0x1f23), board_ahci }, /* Avoton AHCI */ { PCI_VDEVICE(INTEL, 0x1f24), board_ahci }, /* Avoton RAID */ @@ -374,26 +382,26 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x8d66), board_ahci }, /* Wellsburg RAID */ { PCI_VDEVICE(INTEL, 0x8d6e), board_ahci }, /* Wellsburg RAID */ { PCI_VDEVICE(INTEL, 0x23a3), board_ahci }, /* Coleto Creek AHCI */ - { PCI_VDEVICE(INTEL, 0x9c83), board_ahci_mobile }, /* Wildcat LP AHCI */ - { PCI_VDEVICE(INTEL, 0x9c85), board_ahci_mobile }, /* Wildcat LP RAID */ - { PCI_VDEVICE(INTEL, 0x9c87), board_ahci_mobile }, /* Wildcat LP RAID */ - { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci_mobile }, /* Wildcat LP RAID */ + { PCI_VDEVICE(INTEL, 0x9c83), board_ahci_low_power }, /* Wildcat LP AHCI */ + { PCI_VDEVICE(INTEL, 0x9c85), board_ahci_low_power }, /* Wildcat LP RAID */ + { PCI_VDEVICE(INTEL, 0x9c87), board_ahci_low_power }, /* Wildcat LP RAID */ + { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci_low_power }, /* Wildcat LP RAID */ { PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */ - { PCI_VDEVICE(INTEL, 0x8c83), board_ahci_mobile }, /* 9 Series M AHCI */ + { PCI_VDEVICE(INTEL, 0x8c83), board_ahci_low_power }, /* 9 Series M AHCI */ { PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */ - { PCI_VDEVICE(INTEL, 0x8c85), board_ahci_mobile }, /* 9 Series M RAID */ + { PCI_VDEVICE(INTEL, 0x8c85), board_ahci_low_power }, /* 9 Series M RAID */ { PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */ - { PCI_VDEVICE(INTEL, 0x8c87), board_ahci_mobile }, /* 9 Series M RAID */ + { PCI_VDEVICE(INTEL, 0x8c87), board_ahci_low_power }, /* 9 Series M RAID */ { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */ - { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci_mobile }, /* 9 Series M RAID */ - { PCI_VDEVICE(INTEL, 0x9d03), board_ahci_mobile }, /* Sunrise LP AHCI */ - { PCI_VDEVICE(INTEL, 0x9d05), board_ahci_mobile }, /* Sunrise LP RAID */ - { PCI_VDEVICE(INTEL, 0x9d07), board_ahci_mobile }, /* Sunrise LP RAID */ + { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci_low_power }, /* 9 Series M RAID */ + { PCI_VDEVICE(INTEL, 0x9d03), board_ahci_low_power }, /* Sunrise LP AHCI */ + { PCI_VDEVICE(INTEL, 0x9d05), board_ahci_low_power }, /* Sunrise LP RAID */ + { PCI_VDEVICE(INTEL, 0x9d07), board_ahci_low_power }, /* Sunrise LP RAID */ { PCI_VDEVICE(INTEL, 0xa102), board_ahci }, /* Sunrise Point-H AHCI */ - { PCI_VDEVICE(INTEL, 0xa103), board_ahci_mobile }, /* Sunrise M AHCI */ + { PCI_VDEVICE(INTEL, 0xa103), board_ahci_low_power }, /* Sunrise M AHCI */ { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */ { PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */ - { PCI_VDEVICE(INTEL, 0xa107), board_ahci_mobile }, /* Sunrise M RAID */ + { PCI_VDEVICE(INTEL, 0xa107), board_ahci_low_power }, /* Sunrise M RAID */ { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */ { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/ @@ -410,13 +418,15 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0xa356), board_ahci }, /* Cannon Lake PCH-H RAID */ { PCI_VDEVICE(INTEL, 0x06d7), board_ahci }, /* Comet Lake-H RAID */ { PCI_VDEVICE(INTEL, 0xa386), board_ahci }, /* Comet Lake PCH-V RAID */ - { PCI_VDEVICE(INTEL, 0x0f22), board_ahci_mobile }, /* Bay Trail AHCI */ - { PCI_VDEVICE(INTEL, 0x0f23), board_ahci_mobile }, /* Bay Trail AHCI */ - { PCI_VDEVICE(INTEL, 0x22a3), board_ahci_mobile }, /* Cherry Tr. AHCI */ - { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_mobile }, /* ApolloLake AHCI */ - { PCI_VDEVICE(INTEL, 0x34d3), board_ahci_mobile }, /* Ice Lake LP AHCI */ - { PCI_VDEVICE(INTEL, 0x02d3), board_ahci_mobile }, /* Comet Lake PCH-U AHCI */ - { PCI_VDEVICE(INTEL, 0x02d7), board_ahci_mobile }, /* Comet Lake PCH RAID */ + { PCI_VDEVICE(INTEL, 0x0f22), board_ahci_low_power }, /* Bay Trail AHCI */ + { PCI_VDEVICE(INTEL, 0x0f23), board_ahci_low_power }, /* Bay Trail AHCI */ + { PCI_VDEVICE(INTEL, 0x22a3), board_ahci_low_power }, /* Cherry Tr. AHCI */ + { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_low_power }, /* ApolloLake AHCI */ + { PCI_VDEVICE(INTEL, 0x34d3), board_ahci_low_power }, /* Ice Lake LP AHCI */ + { PCI_VDEVICE(INTEL, 0x02d3), board_ahci_low_power }, /* Comet Lake PCH-U AHCI */ + { PCI_VDEVICE(INTEL, 0x02d7), board_ahci_low_power }, /* Comet Lake PCH RAID */ + /* Elkhart Lake IDs 0x4b60 & 0x4b62 https://sata-io.org/product/8803 not tested yet */ + { PCI_VDEVICE(INTEL, 0x4b63), board_ahci_low_power }, /* Elkhart Lake AHCI */ /* JMicron 360/1/3/5/6, match class to avoid IDE function */ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, @@ -442,8 +452,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { board_ahci_al }, /* AMD */ { PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */ + { PCI_VDEVICE(AMD, 0x7801), board_ahci_no_debounce_delay }, /* AMD Hudson-2 (AHCI mode) */ { PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */ - { PCI_VDEVICE(AMD, 0x7901), board_ahci_mobile }, /* AMD Green Sardine */ + { PCI_VDEVICE(AMD, 0x7901), board_ahci_low_power }, /* AMD Green Sardine */ /* AMD is using RAID class only for ahci controllers */ { PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci }, @@ -703,7 +714,7 @@ static void ahci_pci_init_controller(struct ata_host *host) /* clear port IRQ */ tmp = readl(port_mmio + PORT_IRQ_STAT); - VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp); + dev_dbg(&pdev->dev, "PORT_IRQ_STAT 0x%x\n", tmp); if (tmp) writel(tmp, port_mmio + PORT_IRQ_STAT); } @@ -1495,7 +1506,6 @@ static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance) u32 irq_stat, irq_masked; unsigned int handled = 1; - VPRINTK("ENTER\n"); hpriv = host->private_data; mmio = hpriv->mmio; irq_stat = readl(mmio + HOST_IRQ_STAT); @@ -1512,7 +1522,6 @@ static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance) irq_stat = readl(mmio + HOST_IRQ_STAT); spin_unlock(&host->lock); } while (irq_stat); - VPRINTK("EXIT\n"); return IRQ_RETVAL(handled); } @@ -1882,6 +1891,15 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) else dev_info(&pdev->dev, "SSS flag set, parallel bus scan disabled\n"); + if (!(hpriv->cap & HOST_CAP_PART)) + host->flags |= ATA_HOST_NO_PART; + + if (!(hpriv->cap & HOST_CAP_SSC)) + host->flags |= ATA_HOST_NO_SSC; + + if (!(hpriv->cap2 & HOST_CAP2_SDS)) + host->flags |= ATA_HOST_NO_DEVSLP; + if (pi.flags & ATA_FLAG_EM) ahci_reset_em(host); diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c index 5b32df5d33adcf6033a530d21f2bf08da4c700b3..2e4252545fd2712b198fc4b7e7b03459bae6a35b 100644 --- a/drivers/ata/ahci_brcm.c +++ b/drivers/ata/ahci_brcm.c @@ -332,7 +332,7 @@ static struct ata_port_operations ahci_brcm_platform_ops = { static const struct ata_port_info ahci_brcm_port_info = { .flags = AHCI_FLAG_COMMON | ATA_FLAG_NO_DIPM, - .link_flags = ATA_LFLAG_NO_DB_DELAY, + .link_flags = ATA_LFLAG_NO_DEBOUNCE_DELAY, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &ahci_brcm_platform_ops, diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c index 16246c843365ebd632a985f0a232183c497fb1dc..e0f0577ac191c5b2639f0030ca8018f4c8d5c838 100644 --- a/drivers/ata/ahci_xgene.c +++ b/drivers/ata/ahci_xgene.c @@ -588,8 +588,6 @@ static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance) void __iomem *mmio; u32 irq_stat, irq_masked; - VPRINTK("ENTER\n"); - hpriv = host->private_data; mmio = hpriv->mmio; @@ -612,8 +610,6 @@ static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance) spin_unlock(&host->lock); - VPRINTK("EXIT\n"); - return IRQ_RETVAL(rc); } diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index fec2e9754aed20325d7b9ade7f97230a56a2d3eb..e188850f65ff2b5f022c5a060add27c6d9bbf994 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -1199,6 +1199,26 @@ static ssize_t ahci_activity_show(struct ata_device *dev, char *buf) return sprintf(buf, "%d\n", emp->blink_policy); } +static void ahci_port_clear_pending_irq(struct ata_port *ap) +{ + struct ahci_host_priv *hpriv = ap->host->private_data; + void __iomem *port_mmio = ahci_port_base(ap); + u32 tmp; + + /* clear SError */ + tmp = readl(port_mmio + PORT_SCR_ERR); + dev_dbg(ap->host->dev, "PORT_SCR_ERR 0x%x\n", tmp); + writel(tmp, port_mmio + PORT_SCR_ERR); + + /* clear port IRQ */ + tmp = readl(port_mmio + PORT_IRQ_STAT); + dev_dbg(ap->host->dev, "PORT_IRQ_STAT 0x%x\n", tmp); + if (tmp) + writel(tmp, port_mmio + PORT_IRQ_STAT); + + writel(1 << ap->port_no, hpriv->mmio + HOST_IRQ_STAT); +} + static void ahci_port_init(struct device *dev, struct ata_port *ap, int port_no, void __iomem *mmio, void __iomem *port_mmio) @@ -1213,18 +1233,7 @@ static void ahci_port_init(struct device *dev, struct ata_port *ap, if (rc) dev_warn(dev, "%s (%d)\n", emsg, rc); - /* clear SError */ - tmp = readl(port_mmio + PORT_SCR_ERR); - VPRINTK("PORT_SCR_ERR 0x%x\n", tmp); - writel(tmp, port_mmio + PORT_SCR_ERR); - - /* clear port IRQ */ - tmp = readl(port_mmio + PORT_IRQ_STAT); - VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp); - if (tmp) - writel(tmp, port_mmio + PORT_IRQ_STAT); - - writel(1 << port_no, mmio + HOST_IRQ_STAT); + ahci_port_clear_pending_irq(ap); /* mark esata ports */ tmp = readl(port_mmio + PORT_CMD); @@ -1251,10 +1260,10 @@ void ahci_init_controller(struct ata_host *host) } tmp = readl(mmio + HOST_CTL); - VPRINTK("HOST_CTL 0x%x\n", tmp); + dev_dbg(host->dev, "HOST_CTL 0x%x\n", tmp); writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL); tmp = readl(mmio + HOST_CTL); - VPRINTK("HOST_CTL 0x%x\n", tmp); + dev_dbg(host->dev, "HOST_CTL 0x%x\n", tmp); } EXPORT_SYMBOL_GPL(ahci_init_controller); @@ -1554,6 +1563,8 @@ int ahci_do_hardreset(struct ata_link *link, unsigned int *class, tf.command = ATA_BUSY; ata_tf_to_fis(&tf, 0, 0, d2h_fis); + ahci_port_clear_pending_irq(ap); + rc = sata_link_hardreset(link, timing, deadline, online, ahci_check_ready); @@ -1905,8 +1916,6 @@ static irqreturn_t ahci_multi_irqs_intr_hard(int irq, void *dev_instance) void __iomem *port_mmio = ahci_port_base(ap); u32 status; - VPRINTK("ENTER\n"); - status = readl(port_mmio + PORT_IRQ_STAT); writel(status, port_mmio + PORT_IRQ_STAT); @@ -1914,8 +1923,6 @@ static irqreturn_t ahci_multi_irqs_intr_hard(int irq, void *dev_instance) ahci_handle_port_interrupt(ap, port_mmio, status); spin_unlock(ap->lock); - VPRINTK("EXIT\n"); - return IRQ_HANDLED; } @@ -1932,9 +1939,7 @@ u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked) ap = host->ports[i]; if (ap) { ahci_port_intr(ap); - VPRINTK("port %u\n", i); } else { - VPRINTK("port %u (no irq)\n", i); if (ata_ratelimit()) dev_warn(host->dev, "interrupt on disabled port %u\n", i); @@ -1955,8 +1960,6 @@ static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance) void __iomem *mmio; u32 irq_stat, irq_masked; - VPRINTK("ENTER\n"); - hpriv = host->private_data; mmio = hpriv->mmio; @@ -1984,8 +1987,6 @@ static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance) spin_unlock(&host->lock); - VPRINTK("EXIT\n"); - return IRQ_RETVAL(rc); } diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c index c16423e445255d48494b2a2381f71d34650535e7..45656067c547a8b506631be099f3bfb02f29d1ff 100644 --- a/drivers/ata/libata-sata.c +++ b/drivers/ata/libata-sata.c @@ -317,7 +317,7 @@ int sata_link_resume(struct ata_link *link, const unsigned long *params, * immediately after resuming. Delay 200ms before * debouncing. */ - if (!(link->flags & ATA_LFLAG_NO_DB_DELAY)) + if (!(link->flags & ATA_LFLAG_NO_DEBOUNCE_DELAY)) ata_msleep(link->ap, 200); /* is SControl restored correctly? */ @@ -394,10 +394,23 @@ int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy, case ATA_LPM_MED_POWER_WITH_DIPM: case ATA_LPM_MIN_POWER_WITH_PARTIAL: case ATA_LPM_MIN_POWER: - if (ata_link_nr_enabled(link) > 0) - /* no restrictions on LPM transitions */ + if (ata_link_nr_enabled(link) > 0) { + /* assume no restrictions on LPM transitions */ scontrol &= ~(0x7 << 8); - else { + + /* + * If the controller does not support partial, slumber, + * or devsleep, then disallow these transitions. + */ + if (link->ap->host->flags & ATA_HOST_NO_PART) + scontrol |= (0x1 << 8); + + if (link->ap->host->flags & ATA_HOST_NO_SSC) + scontrol |= (0x2 << 8); + + if (link->ap->host->flags & ATA_HOST_NO_DEVSLP) + scontrol |= (0x4 << 8); + } else { /* empty port, power off */ scontrol &= ~0xf; scontrol |= (0x1 << 2); diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index c8e0f8cb9aa326f9bab955d1beee3d5f9b3d923b..ef8c7bfd79a8f7469627c2c8afae08c0228e2023 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -38,6 +38,7 @@ enum sysc_soc { SOC_2420, SOC_2430, SOC_3430, + SOC_AM35, SOC_3630, SOC_4430, SOC_4460, @@ -1113,6 +1114,11 @@ static int sysc_enable_module(struct device *dev) if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_SIDLE_ACT)) { best_mode = SYSC_IDLE_NO; + + /* Clear WAKEUP */ + if (regbits->enwkup_shift >= 0 && + ddata->cfg.sysc_val & BIT(regbits->enwkup_shift)) + reg &= ~BIT(regbits->enwkup_shift); } else { best_mode = fls(ddata->cfg.sidlemodes) - 1; if (best_mode > SYSC_IDLE_MASK) { @@ -1233,6 +1239,13 @@ static int sysc_disable_module(struct device *dev) } } + if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_SIDLE_ACT) { + /* Set WAKEUP */ + if (regbits->enwkup_shift >= 0 && + ddata->cfg.sysc_val & BIT(regbits->enwkup_shift)) + reg |= BIT(regbits->enwkup_shift); + } + reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift); reg |= best_mode << regbits->sidle_shift; if (regbits->autoidle_shift >= 0 && @@ -1496,14 +1509,16 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x38, -ENODEV, 0x00000000, 0xffffffff, SYSC_QUIRK_LEGACY_IDLE), SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff, - SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE), + SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE), SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff, - SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE), + SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE), /* Uarts on omap4 and later */ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff, - SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE), + SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE), SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff, - SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE), + SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE), + SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47424e03, 0xffffffff, + SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE), /* Quirks that need to be set based on the module address */ SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff, @@ -1816,7 +1831,7 @@ static void sysc_pre_reset_quirk_dss(struct sysc *ddata) dev_warn(ddata->dev, "%s: timed out %08x !+ %08x\n", __func__, val, irq_mask); - if (sysc_soc->soc == SOC_3430) { + if (sysc_soc->soc == SOC_3430 || sysc_soc->soc == SOC_AM35) { /* Clear DSS_SDI_CONTROL */ sysc_write(ddata, 0x44, 0); @@ -2083,8 +2098,7 @@ static int sysc_reset(struct sysc *ddata) } if (ddata->cfg.srst_udelay) - usleep_range(ddata->cfg.srst_udelay, - ddata->cfg.srst_udelay * 2); + fsleep(ddata->cfg.srst_udelay); if (ddata->post_reset_quirk) ddata->post_reset_quirk(ddata); @@ -2958,6 +2972,7 @@ static void ti_sysc_idle(struct work_struct *work) static const struct soc_device_attribute sysc_soc_match[] = { SOC_FLAG("OMAP242*", SOC_2420), SOC_FLAG("OMAP243*", SOC_2430), + SOC_FLAG("AM35*", SOC_AM35), SOC_FLAG("OMAP3[45]*", SOC_3430), SOC_FLAG("OMAP3[67]*", SOC_3630), SOC_FLAG("OMAP443*", SOC_4430), @@ -3145,7 +3160,7 @@ static int sysc_check_active_timer(struct sysc *ddata) * can be dropped if we stop supporting old beagleboard revisions * A to B4 at some point. */ - if (sysc_soc->soc == SOC_3430) + if (sysc_soc->soc == SOC_3430 || sysc_soc->soc == SOC_AM35) error = -ENXIO; else error = -EBUSY; diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index d7c440ac465f3329865ff0b999f5322cb85d4587..b3452259d6e0b6e82c5bc723c36d622d1c059d75 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -469,10 +469,17 @@ static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len) int rc; u32 ordinal; unsigned long dur; - - rc = tpm_tis_send_data(chip, buf, len); - if (rc < 0) - return rc; + unsigned int try; + + for (try = 0; try < TPM_RETRY; try++) { + rc = tpm_tis_send_data(chip, buf, len); + if (rc >= 0) + /* Data transfer done successfully */ + break; + else if (rc != -EIO) + /* Data transfer failed, not recoverable */ + return rc; + } /* go and do it */ rc = tpm_tis_write8(priv, TPM_STS(priv->locality), TPM_STS_GO); diff --git a/drivers/clk/tegra/clk-bpmp.c b/drivers/clk/tegra/clk-bpmp.c index a66263b6490d39dcb77fd4e220c14c71cccf81e5..00845044c98efa7d2933b4cfb17500632b9a574b 100644 --- a/drivers/clk/tegra/clk-bpmp.c +++ b/drivers/clk/tegra/clk-bpmp.c @@ -159,7 +159,7 @@ static unsigned long tegra_bpmp_clk_recalc_rate(struct clk_hw *hw, err = tegra_bpmp_clk_transfer(clk->bpmp, &msg); if (err < 0) - return err; + return 0; return response.rate; } diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c index 866201cf5f65ed4899ebe37f00dd185b5580a760..4a9dcaad4a6cbb63954a43e0e4d16dadf145eb6b 100644 --- a/drivers/gpio/gpio-tb10x.c +++ b/drivers/gpio/gpio-tb10x.c @@ -195,7 +195,7 @@ static int tb10x_gpio_probe(struct platform_device *pdev) handle_edge_irq, IRQ_NOREQUEST, IRQ_NOPROBE, IRQ_GC_INIT_MASK_CACHE); if (ret) - return ret; + goto err_remove_domain; gc = tb10x_gpio->domain->gc->gc[0]; gc->reg_base = tb10x_gpio->base; @@ -209,6 +209,10 @@ static int tb10x_gpio_probe(struct platform_device *pdev) } return 0; + +err_remove_domain: + irq_domain_remove(tb10x_gpio->domain); + return ret; } static int tb10x_gpio_remove(struct platform_device *pdev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index e25c3387bcf87711e6b0ef3d7c8bdc0fbf918410..7f2adac82e3a671f334239aa58339a70e0c60c06 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -45,7 +45,6 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, struct drm_gem_object *gobj; struct amdgpu_bo *bo; unsigned long size; - int r; gobj = drm_gem_object_lookup(p->filp, data->handle); if (gobj == NULL) @@ -60,23 +59,14 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, drm_gem_object_put(gobj); size = amdgpu_bo_size(bo); - if (size != PAGE_SIZE || (data->offset + 8) > size) { - r = -EINVAL; - goto error_unref; - } + if (size != PAGE_SIZE || data->offset > (size - 8)) + return -EINVAL; - if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { - r = -EINVAL; - goto error_unref; - } + if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) + return -EINVAL; *offset = data->offset; - return 0; - -error_unref: - amdgpu_bo_unref(&bo); - return r; } static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 0bdc83d8994632b23e9a3ec23caedcfdb39bdf40..652ddec188385e1273d4e18356529ba789c164fd 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -6971,6 +6971,13 @@ static void handle_cursor_update(struct drm_plane *plane, attributes.rotation_angle = 0; attributes.attribute_flags.value = 0; + /* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM + * legacy gamma setup. + */ + if (crtc_state->cm_is_degamma_srgb && + adev->dm.dc->caps.color.dpp.gamma_corr) + attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1; + attributes.pitch = attributes.width; if (crtc_state->stream) { diff --git a/drivers/gpu/drm/bridge/tc358762.c b/drivers/gpu/drm/bridge/tc358762.c index 1bfdfc6affafe0b85496e8b7d55770dcd6551312..21c57d34356870a5d1329d911861e74be7aca0c8 100644 --- a/drivers/gpu/drm/bridge/tc358762.c +++ b/drivers/gpu/drm/bridge/tc358762.c @@ -224,7 +224,7 @@ static int tc358762_probe(struct mipi_dsi_device *dsi) dsi->lanes = 1; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | - MIPI_DSI_MODE_LPM; + MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_VIDEO_HSE; ret = tc358762_parse_dt(ctx); if (ret < 0) diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index 1c03485676efa33e04376a42480a4a2e1a9aec88..de9fadccf22e509a3680748d56b5aadaa90e701d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -39,13 +39,12 @@ static void exynos_drm_crtc_atomic_disable(struct drm_crtc *crtc, if (exynos_crtc->ops->atomic_disable) exynos_crtc->ops->atomic_disable(exynos_crtc); + spin_lock_irq(&crtc->dev->event_lock); if (crtc->state->event && !crtc->state->active) { - spin_lock_irq(&crtc->dev->event_lock); drm_crtc_send_vblank_event(crtc, crtc->state->event); - spin_unlock_irq(&crtc->dev->event_lock); - crtc->state->event = NULL; } + spin_unlock_irq(&crtc->dev->event_lock); } static int exynos_crtc_atomic_check(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c index 0f5d1e598d75fc5532096c9c2791a4ec2349edc5..1656f3ee0b19347abcbe2c1bda14cfd910b314be 100644 --- a/drivers/gpu/drm/tiny/gm12u320.c +++ b/drivers/gpu/drm/tiny/gm12u320.c @@ -67,10 +67,10 @@ MODULE_PARM_DESC(eco_mode, "Turn on Eco mode (less bright, more silent)"); #define READ_STATUS_SIZE 13 #define MISC_VALUE_SIZE 4 -#define CMD_TIMEOUT msecs_to_jiffies(200) -#define DATA_TIMEOUT msecs_to_jiffies(1000) -#define IDLE_TIMEOUT msecs_to_jiffies(2000) -#define FIRST_FRAME_TIMEOUT msecs_to_jiffies(2000) +#define CMD_TIMEOUT 200 +#define DATA_TIMEOUT 1000 +#define IDLE_TIMEOUT 2000 +#define FIRST_FRAME_TIMEOUT 2000 #define MISC_REQ_GET_SET_ECO_A 0xff #define MISC_REQ_GET_SET_ECO_B 0x35 @@ -399,7 +399,7 @@ static void gm12u320_fb_update_work(struct work_struct *work) * switches back to showing its logo. */ queue_delayed_work(system_long_wq, &gm12u320->fb_update.work, - IDLE_TIMEOUT); + msecs_to_jiffies(IDLE_TIMEOUT)); return; err: diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c index 724bf30600d609539a455648ed3fb24d42431c2c..dac46bc2fafc82bfce37abe72b85bbd11ff73a3c 100644 --- a/drivers/i2c/busses/i2c-aspeed.c +++ b/drivers/i2c/busses/i2c-aspeed.c @@ -693,13 +693,16 @@ static int aspeed_i2c_master_xfer(struct i2c_adapter *adap, if (time_left == 0) { /* - * If timed out and bus is still busy in a multi master - * environment, attempt recovery at here. + * In a multi-master setup, if a timeout occurs, attempt + * recovery. But if the bus is idle, we still need to reset the + * i2c controller to clear the remaining interrupts. */ if (bus->multi_master && (readl(bus->base + ASPEED_I2C_CMD_REG) & ASPEED_I2CD_BUS_BUSY_STS)) aspeed_i2c_recover_bus(bus); + else + aspeed_i2c_reset(bus); /* * If timed out and the state is still pending, drop the pending diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c index f7a7405d4350a10718b7948d546ca31fc11dabdb..8e8688e8de0fbf3979f9f0baa348f21e71002d93 100644 --- a/drivers/i2c/muxes/i2c-demux-pinctrl.c +++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c @@ -243,6 +243,10 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev) props[i].name = devm_kstrdup(&pdev->dev, "status", GFP_KERNEL); props[i].value = devm_kstrdup(&pdev->dev, "ok", GFP_KERNEL); + if (!props[i].name || !props[i].value) { + err = -ENOMEM; + goto err_rollback; + } props[i].length = 3; of_changeset_init(&priv->chan[i].chgset); diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-acpipnpio.h similarity index 99% rename from drivers/input/serio/i8042-x86ia64io.h rename to drivers/input/serio/i8042-acpipnpio.h index 9dcdf21c50bdcd09dc0f610b4414e984f6d8350b..1bd5898abb97dcc12a435d2841b2f1e11e865d2f 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-acpipnpio.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -#ifndef _I8042_X86IA64IO_H -#define _I8042_X86IA64IO_H +#ifndef _I8042_ACPIPNPIO_H +#define _I8042_ACPIPNPIO_H #ifdef CONFIG_X86 @@ -1184,6 +1184,13 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) }, + /* See comment on TUXEDO InfinityBook S17 Gen6 / Clevo NS70MU above */ + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "PD5x_7xPNP_PNR_PNN_PNT"), + }, + .driver_data = (void *)(SERIO_QUIRK_NOAUX) + }, { .matches = { DMI_MATCH(DMI_BOARD_NAME, "X170SM"), @@ -1587,4 +1594,4 @@ static inline void i8042_platform_exit(void) i8042_pnp_exit(); } -#endif /* _I8042_X86IA64IO_H */ +#endif /* _I8042_ACPIPNPIO_H */ diff --git a/drivers/input/serio/i8042.h b/drivers/input/serio/i8042.h index 55381783dc82ddc4ada059d6ce6c7219e46c240d..bf2592fa9a783d17d32e4456b6e454738e5e42d3 100644 --- a/drivers/input/serio/i8042.h +++ b/drivers/input/serio/i8042.h @@ -20,7 +20,7 @@ #elif defined(CONFIG_SPARC) #include "i8042-sparcio.h" #elif defined(CONFIG_X86) || defined(CONFIG_IA64) -#include "i8042-x86ia64io.h" +#include "i8042-acpipnpio.h" #else #include "i8042-io.h" #endif diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 6b5cc3f59fb3924c4e3caf072e2c18433a0df0fa..3619db7e382a0ad31eafb02ffc06ccc793e17cee 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1793,6 +1793,9 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev) int number = rdev->raid_disk; struct raid1_info *p = conf->mirrors + number; + if (unlikely(number >= conf->raid_disks)) + goto abort; + if (rdev != p->rdev) p = conf->mirrors + conf->raid_disks + number; diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c index a380e0920a21fc6677fd5e4879da90da17e8f76b..86e3bb59037129fac97bc836dc134819a18bcd72 100644 --- a/drivers/media/pci/cx23885/cx23885-video.c +++ b/drivers/media/pci/cx23885/cx23885-video.c @@ -412,7 +412,7 @@ static int buffer_prepare(struct vb2_buffer *vb) dev->height >> 1); break; default: - BUG(); + return -EINVAL; /* should not happen */ } dprintk(2, "[%p/%d] buffer_init - %dx%d %dbpp 0x%08x - dma=0x%08lx\n", buf, buf->vb.vb2_buf.index, diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c index d6838c8ebd7e859f0c818d79a0a1a1e633032f40..f8dca47904766dbb9bf082c25f8e846454a12ea9 100644 --- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c +++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c @@ -355,7 +355,7 @@ static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q) void __iomem *const base = cio2->base; u8 lanes, csi2bus = q->csi2.port; u8 sensor_vc = SENSOR_VIR_CH_DFLT; - struct cio2_csi2_timing timing; + struct cio2_csi2_timing timing = { 0 }; int i, r; fmt = cio2_find_format(NULL, &q->subdev_fmt.code); diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c index 62d11c6e41d60d2b18b068260160d2e4afc51189..5f7ac2807e5f47c78d32410e464cf38c1c4e485f 100644 --- a/drivers/media/platform/qcom/venus/core.c +++ b/drivers/media/platform/qcom/venus/core.c @@ -21,6 +21,7 @@ #include "core.h" #include "firmware.h" #include "pm_helpers.h" +#include "hfi_venus_io.h" static void venus_event_notify(struct venus_core *core, u32 event) { @@ -210,6 +211,15 @@ static int venus_enumerate_codecs(struct venus_core *core, u32 type) return ret; } +static void venus_assign_register_offsets(struct venus_core *core) +{ + core->vbif_base = core->base + VBIF_BASE; + core->cpu_base = core->base + CPU_BASE; + core->cpu_cs_base = core->base + CPU_CS_BASE; + core->cpu_ic_base = core->base + CPU_IC_BASE; + core->wrapper_base = core->base + WRAPPER_BASE; +} + static int venus_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -276,6 +286,8 @@ static int venus_probe(struct platform_device *pdev) if (ret) goto err_core_put; + venus_assign_register_offsets(core); + ret = v4l2_device_register(dev, &core->v4l2_dev); if (ret) goto err_core_deinit; diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h index aebd4c664bfa1cda2babbfb369fc0078e5a04b66..75d0068033276dcf086e86c6b66959f6329b4222 100644 --- a/drivers/media/platform/qcom/venus/core.h +++ b/drivers/media/platform/qcom/venus/core.h @@ -119,6 +119,11 @@ struct venus_caps { * struct venus_core - holds core parameters valid for all instances * * @base: IO memory base address + * @vbif_base IO memory vbif base address + * @cpu_base IO memory cpu base address + * @cpu_cs_base IO memory cpu_cs base address + * @cpu_ic_base IO memory cpu_ic base address + * @wrapper_base IO memory wrapper base address * @irq: Venus irq * @clks: an array of struct clk pointers * @vcodec0_clks: an array of vcodec0 struct clk pointers @@ -152,6 +157,11 @@ struct venus_caps { */ struct venus_core { void __iomem *base; + void __iomem *vbif_base; + void __iomem *cpu_base; + void __iomem *cpu_cs_base; + void __iomem *cpu_ic_base; + void __iomem *wrapper_base; int irq; struct clk *clks[VIDC_CLKS_NUM_MAX]; struct clk *vcodec0_clks[VIDC_VCODEC_CLKS_NUM_MAX]; @@ -416,6 +426,7 @@ struct venus_inst { #define IS_V1(core) ((core)->res->hfi_version == HFI_VERSION_1XX) #define IS_V3(core) ((core)->res->hfi_version == HFI_VERSION_3XX) #define IS_V4(core) ((core)->res->hfi_version == HFI_VERSION_4XX) +#define IS_V6(core) ((core)->res->hfi_version == HFI_VERSION_6XX) #define ctrl_to_inst(ctrl) \ container_of((ctrl)->handler, struct venus_inst, ctrl_handler) diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c index 1db64a854b88ba63bb24c0ad661f81687b5d5f76..67b9138a7c5fb2b1f96b4140abe383e9e5263799 100644 --- a/drivers/media/platform/qcom/venus/firmware.c +++ b/drivers/media/platform/qcom/venus/firmware.c @@ -27,19 +27,19 @@ static void venus_reset_cpu(struct venus_core *core) { u32 fw_size = core->fw.mapped_mem_size; - void __iomem *base = core->base; + void __iomem *wrapper_base = core->wrapper_base; - writel(0, base + WRAPPER_FW_START_ADDR); - writel(fw_size, base + WRAPPER_FW_END_ADDR); - writel(0, base + WRAPPER_CPA_START_ADDR); - writel(fw_size, base + WRAPPER_CPA_END_ADDR); - writel(fw_size, base + WRAPPER_NONPIX_START_ADDR); - writel(fw_size, base + WRAPPER_NONPIX_END_ADDR); - writel(0x0, base + WRAPPER_CPU_CGC_DIS); - writel(0x0, base + WRAPPER_CPU_CLOCK_CONFIG); + writel(0, wrapper_base + WRAPPER_FW_START_ADDR); + writel(fw_size, wrapper_base + WRAPPER_FW_END_ADDR); + writel(0, wrapper_base + WRAPPER_CPA_START_ADDR); + writel(fw_size, wrapper_base + WRAPPER_CPA_END_ADDR); + writel(fw_size, wrapper_base + WRAPPER_NONPIX_START_ADDR); + writel(fw_size, wrapper_base + WRAPPER_NONPIX_END_ADDR); + writel(0x0, wrapper_base + WRAPPER_CPU_CGC_DIS); + writel(0x0, wrapper_base + WRAPPER_CPU_CLOCK_CONFIG); /* Bring ARM9 out of reset */ - writel(0, base + WRAPPER_A9SS_SW_RESET); + writel(0, wrapper_base + WRAPPER_A9SS_SW_RESET); } int venus_set_hw_state(struct venus_core *core, bool resume) @@ -56,7 +56,7 @@ int venus_set_hw_state(struct venus_core *core, bool resume) if (resume) venus_reset_cpu(core); else - writel(1, core->base + WRAPPER_A9SS_SW_RESET); + writel(1, core->wrapper_base + WRAPPER_A9SS_SW_RESET); return 0; } @@ -159,12 +159,12 @@ static int venus_shutdown_no_tz(struct venus_core *core) size_t unmapped; u32 reg; struct device *dev = core->fw.dev; - void __iomem *base = core->base; + void __iomem *wrapper_base = core->wrapper_base; /* Assert the reset to ARM9 */ - reg = readl_relaxed(base + WRAPPER_A9SS_SW_RESET); + reg = readl_relaxed(wrapper_base + WRAPPER_A9SS_SW_RESET); reg |= WRAPPER_A9SS_SW_RESET_BIT; - writel_relaxed(reg, base + WRAPPER_A9SS_SW_RESET); + writel_relaxed(reg, wrapper_base + WRAPPER_A9SS_SW_RESET); /* Make sure reset is asserted before the mapping is removed */ mb(); diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c index 4be4a75ddcb6e463f1dc600c8b9b763ec30c2712..9d939f63d16f426b8995889df103a4d6a990225d 100644 --- a/drivers/media/platform/qcom/venus/hfi_venus.c +++ b/drivers/media/platform/qcom/venus/hfi_venus.c @@ -345,16 +345,6 @@ static void venus_free(struct venus_hfi_device *hdev, struct mem_desc *mem) dma_free_attrs(dev, mem->size, mem->kva, mem->da, mem->attrs); } -static void venus_writel(struct venus_hfi_device *hdev, u32 reg, u32 value) -{ - writel(value, hdev->core->base + reg); -} - -static u32 venus_readl(struct venus_hfi_device *hdev, u32 reg) -{ - return readl(hdev->core->base + reg); -} - static void venus_set_registers(struct venus_hfi_device *hdev) { const struct venus_resources *res = hdev->core->res; @@ -363,12 +353,14 @@ static void venus_set_registers(struct venus_hfi_device *hdev) unsigned int i; for (i = 0; i < count; i++) - venus_writel(hdev, tbl[i].reg, tbl[i].value); + writel(tbl[i].value, hdev->core->base + tbl[i].reg); } static void venus_soft_int(struct venus_hfi_device *hdev) { - venus_writel(hdev, CPU_IC_SOFTINT, BIT(CPU_IC_SOFTINT_H2A_SHIFT)); + void __iomem *cpu_ic_base = hdev->core->cpu_ic_base; + + writel(BIT(CPU_IC_SOFTINT_H2A_SHIFT), cpu_ic_base + CPU_IC_SOFTINT); } static int venus_iface_cmdq_write_nolock(struct venus_hfi_device *hdev, @@ -439,16 +431,25 @@ static int venus_boot_core(struct venus_hfi_device *hdev) { struct device *dev = hdev->core->dev; static const unsigned int max_tries = 100; - u32 ctrl_status = 0; + u32 ctrl_status = 0, mask_val; unsigned int count = 0; + void __iomem *cpu_cs_base = hdev->core->cpu_cs_base; + void __iomem *wrapper_base = hdev->core->wrapper_base; int ret = 0; - venus_writel(hdev, VIDC_CTRL_INIT, BIT(VIDC_CTRL_INIT_CTRL_SHIFT)); - venus_writel(hdev, WRAPPER_INTR_MASK, WRAPPER_INTR_MASK_A2HVCODEC_MASK); - venus_writel(hdev, CPU_CS_SCIACMDARG3, 1); + if (IS_V6(hdev->core)) { + mask_val = readl(wrapper_base + WRAPPER_INTR_MASK); + mask_val &= ~(WRAPPER_INTR_MASK_A2HWD_BASK_V6 | + WRAPPER_INTR_MASK_A2HCPU_MASK); + } else { + mask_val = WRAPPER_INTR_MASK_A2HVCODEC_MASK; + } + writel(mask_val, wrapper_base + WRAPPER_INTR_MASK); + writel(1, cpu_cs_base + CPU_CS_SCIACMDARG3); + writel(BIT(VIDC_CTRL_INIT_CTRL_SHIFT), cpu_cs_base + VIDC_CTRL_INIT); while (!ctrl_status && count < max_tries) { - ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0); + ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0); if ((ctrl_status & CPU_CS_SCIACMDARG0_ERROR_STATUS_MASK) == 4) { dev_err(dev, "invalid setting for UC_REGION\n"); ret = -EINVAL; @@ -462,15 +463,20 @@ static int venus_boot_core(struct venus_hfi_device *hdev) if (count >= max_tries) ret = -ETIMEDOUT; + if (IS_V6(hdev->core)) + writel(0x0, cpu_cs_base + CPU_CS_X2RPMH_V6); + return ret; } static u32 venus_hwversion(struct venus_hfi_device *hdev) { struct device *dev = hdev->core->dev; - u32 ver = venus_readl(hdev, WRAPPER_HW_VERSION); + void __iomem *wrapper_base = hdev->core->wrapper_base; + u32 ver; u32 major, minor, step; + ver = readl(wrapper_base + WRAPPER_HW_VERSION); major = ver & WRAPPER_HW_VERSION_MAJOR_VERSION_MASK; major = major >> WRAPPER_HW_VERSION_MAJOR_VERSION_SHIFT; minor = ver & WRAPPER_HW_VERSION_MINOR_VERSION_MASK; @@ -485,6 +491,7 @@ static u32 venus_hwversion(struct venus_hfi_device *hdev) static int venus_run(struct venus_hfi_device *hdev) { struct device *dev = hdev->core->dev; + void __iomem *cpu_cs_base = hdev->core->cpu_cs_base; int ret; /* @@ -493,12 +500,12 @@ static int venus_run(struct venus_hfi_device *hdev) */ venus_set_registers(hdev); - venus_writel(hdev, UC_REGION_ADDR, hdev->ifaceq_table.da); - venus_writel(hdev, UC_REGION_SIZE, SHARED_QSIZE); - venus_writel(hdev, CPU_CS_SCIACMDARG2, hdev->ifaceq_table.da); - venus_writel(hdev, CPU_CS_SCIACMDARG1, 0x01); + writel(hdev->ifaceq_table.da, cpu_cs_base + UC_REGION_ADDR); + writel(SHARED_QSIZE, cpu_cs_base + UC_REGION_SIZE); + writel(hdev->ifaceq_table.da, cpu_cs_base + CPU_CS_SCIACMDARG2); + writel(0x01, cpu_cs_base + CPU_CS_SCIACMDARG1); if (hdev->sfr.da) - venus_writel(hdev, SFR_ADDR, hdev->sfr.da); + writel(hdev->sfr.da, cpu_cs_base + SFR_ADDR); ret = venus_boot_core(hdev); if (ret) { @@ -513,17 +520,18 @@ static int venus_run(struct venus_hfi_device *hdev) static int venus_halt_axi(struct venus_hfi_device *hdev) { - void __iomem *base = hdev->core->base; + void __iomem *wrapper_base = hdev->core->wrapper_base; + void __iomem *vbif_base = hdev->core->vbif_base; struct device *dev = hdev->core->dev; u32 val; int ret; if (IS_V4(hdev->core)) { - val = venus_readl(hdev, WRAPPER_CPU_AXI_HALT); + val = readl(wrapper_base + WRAPPER_CPU_AXI_HALT); val |= WRAPPER_CPU_AXI_HALT_HALT; - venus_writel(hdev, WRAPPER_CPU_AXI_HALT, val); + writel(val, wrapper_base + WRAPPER_CPU_AXI_HALT); - ret = readl_poll_timeout(base + WRAPPER_CPU_AXI_HALT_STATUS, + ret = readl_poll_timeout(wrapper_base + WRAPPER_CPU_AXI_HALT_STATUS, val, val & WRAPPER_CPU_AXI_HALT_STATUS_IDLE, POLL_INTERVAL_US, @@ -537,12 +545,12 @@ static int venus_halt_axi(struct venus_hfi_device *hdev) } /* Halt AXI and AXI IMEM VBIF Access */ - val = venus_readl(hdev, VBIF_AXI_HALT_CTRL0); + val = readl(vbif_base + VBIF_AXI_HALT_CTRL0); val |= VBIF_AXI_HALT_CTRL0_HALT_REQ; - venus_writel(hdev, VBIF_AXI_HALT_CTRL0, val); + writel(val, vbif_base + VBIF_AXI_HALT_CTRL0); /* Request for AXI bus port halt */ - ret = readl_poll_timeout(base + VBIF_AXI_HALT_CTRL1, val, + ret = readl_poll_timeout(vbif_base + VBIF_AXI_HALT_CTRL1, val, val & VBIF_AXI_HALT_CTRL1_HALT_ACK, POLL_INTERVAL_US, VBIF_AXI_HALT_ACK_TIMEOUT_US); @@ -1035,19 +1043,21 @@ static irqreturn_t venus_isr(struct venus_core *core) { struct venus_hfi_device *hdev = to_hfi_priv(core); u32 status; + void __iomem *cpu_cs_base = hdev->core->cpu_cs_base; + void __iomem *wrapper_base = hdev->core->wrapper_base; if (!hdev) return IRQ_NONE; - status = venus_readl(hdev, WRAPPER_INTR_STATUS); + status = readl(wrapper_base + WRAPPER_INTR_STATUS); if (status & WRAPPER_INTR_STATUS_A2H_MASK || status & WRAPPER_INTR_STATUS_A2HWD_MASK || status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK) hdev->irq_status = status; - venus_writel(hdev, CPU_CS_A2HSOFTINTCLR, 1); - venus_writel(hdev, WRAPPER_INTR_CLEAR, status); + writel(1, cpu_cs_base + CPU_CS_A2HSOFTINTCLR); + writel(status, wrapper_base + WRAPPER_INTR_CLEAR); return IRQ_WAKE_THREAD; } @@ -1380,6 +1390,7 @@ static int venus_suspend_1xx(struct venus_core *core) { struct venus_hfi_device *hdev = to_hfi_priv(core); struct device *dev = core->dev; + void __iomem *cpu_cs_base = hdev->core->cpu_cs_base; u32 ctrl_status; int ret; @@ -1414,7 +1425,7 @@ static int venus_suspend_1xx(struct venus_core *core) return -EINVAL; } - ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0); + ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0); if (!(ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)) { mutex_unlock(&hdev->lock); return -EINVAL; @@ -1435,10 +1446,12 @@ static int venus_suspend_1xx(struct venus_core *core) static bool venus_cpu_and_video_core_idle(struct venus_hfi_device *hdev) { + void __iomem *wrapper_base = hdev->core->wrapper_base; + void __iomem *cpu_cs_base = hdev->core->cpu_cs_base; u32 ctrl_status, cpu_status; - cpu_status = venus_readl(hdev, WRAPPER_CPU_STATUS); - ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0); + cpu_status = readl(wrapper_base + WRAPPER_CPU_STATUS); + ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0); if (cpu_status & WRAPPER_CPU_STATUS_WFI && ctrl_status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK) @@ -1449,10 +1462,12 @@ static bool venus_cpu_and_video_core_idle(struct venus_hfi_device *hdev) static bool venus_cpu_idle_and_pc_ready(struct venus_hfi_device *hdev) { + void __iomem *wrapper_base = hdev->core->wrapper_base; + void __iomem *cpu_cs_base = hdev->core->cpu_cs_base; u32 ctrl_status, cpu_status; - cpu_status = venus_readl(hdev, WRAPPER_CPU_STATUS); - ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0); + cpu_status = readl(wrapper_base + WRAPPER_CPU_STATUS); + ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0); if (cpu_status & WRAPPER_CPU_STATUS_WFI && ctrl_status & CPU_CS_SCIACMDARG0_PC_READY) @@ -1465,6 +1480,7 @@ static int venus_suspend_3xx(struct venus_core *core) { struct venus_hfi_device *hdev = to_hfi_priv(core); struct device *dev = core->dev; + void __iomem *cpu_cs_base = hdev->core->cpu_cs_base; u32 ctrl_status; bool val; int ret; @@ -1481,7 +1497,7 @@ static int venus_suspend_3xx(struct venus_core *core) return -EINVAL; } - ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0); + ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0); if (ctrl_status & CPU_CS_SCIACMDARG0_PC_READY) goto power_off; diff --git a/drivers/media/platform/qcom/venus/hfi_venus_io.h b/drivers/media/platform/qcom/venus/hfi_venus_io.h index 3b52f98478db05c036cd87d7ce8655a8cc872b90..9cad15eac9e800a8608c0dfc690a831a8e4acfce 100644 --- a/drivers/media/platform/qcom/venus/hfi_venus_io.h +++ b/drivers/media/platform/qcom/venus/hfi_venus_io.h @@ -8,27 +8,28 @@ #define VBIF_BASE 0x80000 -#define VBIF_AXI_HALT_CTRL0 (VBIF_BASE + 0x208) -#define VBIF_AXI_HALT_CTRL1 (VBIF_BASE + 0x20c) +#define VBIF_AXI_HALT_CTRL0 0x208 +#define VBIF_AXI_HALT_CTRL1 0x20c #define VBIF_AXI_HALT_CTRL0_HALT_REQ BIT(0) #define VBIF_AXI_HALT_CTRL1_HALT_ACK BIT(0) #define VBIF_AXI_HALT_ACK_TIMEOUT_US 500000 #define CPU_BASE 0xc0000 + #define CPU_CS_BASE (CPU_BASE + 0x12000) #define CPU_IC_BASE (CPU_BASE + 0x1f000) -#define CPU_CS_A2HSOFTINTCLR (CPU_CS_BASE + 0x1c) +#define CPU_CS_A2HSOFTINTCLR 0x1c -#define VIDC_CTRL_INIT (CPU_CS_BASE + 0x48) +#define VIDC_CTRL_INIT 0x48 #define VIDC_CTRL_INIT_RESERVED_BITS31_1_MASK 0xfffffffe #define VIDC_CTRL_INIT_RESERVED_BITS31_1_SHIFT 1 #define VIDC_CTRL_INIT_CTRL_MASK 0x1 #define VIDC_CTRL_INIT_CTRL_SHIFT 0 /* HFI control status */ -#define CPU_CS_SCIACMDARG0 (CPU_CS_BASE + 0x4c) +#define CPU_CS_SCIACMDARG0 0x4c #define CPU_CS_SCIACMDARG0_MASK 0xff #define CPU_CS_SCIACMDARG0_SHIFT 0x0 #define CPU_CS_SCIACMDARG0_ERROR_STATUS_MASK 0xfe @@ -39,42 +40,55 @@ #define CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK BIT(30) /* HFI queue table info */ -#define CPU_CS_SCIACMDARG1 (CPU_CS_BASE + 0x50) +#define CPU_CS_SCIACMDARG1 0x50 /* HFI queue table address */ -#define CPU_CS_SCIACMDARG2 (CPU_CS_BASE + 0x54) +#define CPU_CS_SCIACMDARG2 0x54 /* Venus cpu */ -#define CPU_CS_SCIACMDARG3 (CPU_CS_BASE + 0x58) - -#define SFR_ADDR (CPU_CS_BASE + 0x5c) -#define MMAP_ADDR (CPU_CS_BASE + 0x60) -#define UC_REGION_ADDR (CPU_CS_BASE + 0x64) -#define UC_REGION_SIZE (CPU_CS_BASE + 0x68) - -#define CPU_IC_SOFTINT (CPU_IC_BASE + 0x18) +#define CPU_CS_SCIACMDARG3 0x58 + +#define SFR_ADDR 0x5c +#define MMAP_ADDR 0x60 +#define UC_REGION_ADDR 0x64 +#define UC_REGION_SIZE 0x68 + +#define CPU_CS_H2XSOFTINTEN_V6 0x148 + +#define CPU_CS_X2RPMH_V6 0x168 +#define CPU_CS_X2RPMH_MASK0_BMSK_V6 0x1 +#define CPU_CS_X2RPMH_MASK0_SHFT_V6 0x0 +#define CPU_CS_X2RPMH_MASK1_BMSK_V6 0x2 +#define CPU_CS_X2RPMH_MASK1_SHFT_V6 0x1 +#define CPU_CS_X2RPMH_SWOVERRIDE_BMSK_V6 0x4 +#define CPU_CS_X2RPMH_SWOVERRIDE_SHFT_V6 0x3 + +/* Relative to CPU_IC_BASE */ +#define CPU_IC_SOFTINT 0x18 +#define CPU_IC_SOFTINT_V6 0x150 #define CPU_IC_SOFTINT_H2A_MASK 0x8000 #define CPU_IC_SOFTINT_H2A_SHIFT 0xf +#define CPU_IC_SOFTINT_H2A_SHIFT_V6 0x0 /* Venus wrapper */ #define WRAPPER_BASE 0x000e0000 -#define WRAPPER_HW_VERSION (WRAPPER_BASE + 0x00) +#define WRAPPER_HW_VERSION 0x00 #define WRAPPER_HW_VERSION_MAJOR_VERSION_MASK 0x78000000 #define WRAPPER_HW_VERSION_MAJOR_VERSION_SHIFT 28 #define WRAPPER_HW_VERSION_MINOR_VERSION_MASK 0xfff0000 #define WRAPPER_HW_VERSION_MINOR_VERSION_SHIFT 16 #define WRAPPER_HW_VERSION_STEP_VERSION_MASK 0xffff -#define WRAPPER_CLOCK_CONFIG (WRAPPER_BASE + 0x04) +#define WRAPPER_CLOCK_CONFIG 0x04 -#define WRAPPER_INTR_STATUS (WRAPPER_BASE + 0x0c) +#define WRAPPER_INTR_STATUS 0x0c #define WRAPPER_INTR_STATUS_A2HWD_MASK 0x10 #define WRAPPER_INTR_STATUS_A2HWD_SHIFT 0x4 #define WRAPPER_INTR_STATUS_A2H_MASK 0x4 #define WRAPPER_INTR_STATUS_A2H_SHIFT 0x2 -#define WRAPPER_INTR_MASK (WRAPPER_BASE + 0x10) +#define WRAPPER_INTR_MASK 0x10 #define WRAPPER_INTR_MASK_A2HWD_BASK 0x10 #define WRAPPER_INTR_MASK_A2HWD_SHIFT 0x4 #define WRAPPER_INTR_MASK_A2HVCODEC_MASK 0x8 @@ -82,41 +96,59 @@ #define WRAPPER_INTR_MASK_A2HCPU_MASK 0x4 #define WRAPPER_INTR_MASK_A2HCPU_SHIFT 0x2 -#define WRAPPER_INTR_CLEAR (WRAPPER_BASE + 0x14) +#define WRAPPER_INTR_STATUS_A2HWD_MASK_V6 0x8 +#define WRAPPER_INTR_MASK_A2HWD_BASK_V6 0x8 + +#define WRAPPER_INTR_CLEAR 0x14 #define WRAPPER_INTR_CLEAR_A2HWD_MASK 0x10 #define WRAPPER_INTR_CLEAR_A2HWD_SHIFT 0x4 #define WRAPPER_INTR_CLEAR_A2H_MASK 0x4 #define WRAPPER_INTR_CLEAR_A2H_SHIFT 0x2 -#define WRAPPER_POWER_STATUS (WRAPPER_BASE + 0x44) -#define WRAPPER_VDEC_VCODEC_POWER_CONTROL (WRAPPER_BASE + 0x48) -#define WRAPPER_VENC_VCODEC_POWER_CONTROL (WRAPPER_BASE + 0x4c) -#define WRAPPER_VDEC_VENC_AHB_BRIDGE_SYNC_RESET (WRAPPER_BASE + 0x64) +#define WRAPPER_POWER_STATUS 0x44 +#define WRAPPER_VDEC_VCODEC_POWER_CONTROL 0x48 +#define WRAPPER_VENC_VCODEC_POWER_CONTROL 0x4c +#define WRAPPER_DEBUG_BRIDGE_LPI_CONTROL_V6 0x54 +#define WRAPPER_DEBUG_BRIDGE_LPI_STATUS_V6 0x58 +#define WRAPPER_VDEC_VENC_AHB_BRIDGE_SYNC_RESET 0x64 -#define WRAPPER_CPU_CLOCK_CONFIG (WRAPPER_BASE + 0x2000) -#define WRAPPER_CPU_AXI_HALT (WRAPPER_BASE + 0x2008) +#define WRAPPER_CPU_CLOCK_CONFIG 0x2000 +#define WRAPPER_CPU_AXI_HALT 0x2008 #define WRAPPER_CPU_AXI_HALT_HALT BIT(16) -#define WRAPPER_CPU_AXI_HALT_STATUS (WRAPPER_BASE + 0x200c) +#define WRAPPER_CPU_AXI_HALT_STATUS 0x200c #define WRAPPER_CPU_AXI_HALT_STATUS_IDLE BIT(24) -#define WRAPPER_CPU_CGC_DIS (WRAPPER_BASE + 0x2010) -#define WRAPPER_CPU_STATUS (WRAPPER_BASE + 0x2014) +#define WRAPPER_CPU_CGC_DIS 0x2010 +#define WRAPPER_CPU_STATUS 0x2014 #define WRAPPER_CPU_STATUS_WFI BIT(0) -#define WRAPPER_SW_RESET (WRAPPER_BASE + 0x3000) -#define WRAPPER_CPA_START_ADDR (WRAPPER_BASE + 0x1020) -#define WRAPPER_CPA_END_ADDR (WRAPPER_BASE + 0x1024) -#define WRAPPER_FW_START_ADDR (WRAPPER_BASE + 0x1028) -#define WRAPPER_FW_END_ADDR (WRAPPER_BASE + 0x102C) -#define WRAPPER_NONPIX_START_ADDR (WRAPPER_BASE + 0x1030) -#define WRAPPER_NONPIX_END_ADDR (WRAPPER_BASE + 0x1034) -#define WRAPPER_A9SS_SW_RESET (WRAPPER_BASE + 0x3000) +#define WRAPPER_SW_RESET 0x3000 +#define WRAPPER_CPA_START_ADDR 0x1020 +#define WRAPPER_CPA_END_ADDR 0x1024 +#define WRAPPER_FW_START_ADDR 0x1028 +#define WRAPPER_FW_END_ADDR 0x102C +#define WRAPPER_NONPIX_START_ADDR 0x1030 +#define WRAPPER_NONPIX_END_ADDR 0x1034 +#define WRAPPER_A9SS_SW_RESET 0x3000 #define WRAPPER_A9SS_SW_RESET_BIT BIT(4) /* Venus 4xx */ -#define WRAPPER_VCODEC0_MMCC_POWER_STATUS (WRAPPER_BASE + 0x90) -#define WRAPPER_VCODEC0_MMCC_POWER_CONTROL (WRAPPER_BASE + 0x94) +#define WRAPPER_VCODEC0_MMCC_POWER_STATUS 0x90 +#define WRAPPER_VCODEC0_MMCC_POWER_CONTROL 0x94 + +#define WRAPPER_VCODEC1_MMCC_POWER_STATUS 0x110 +#define WRAPPER_VCODEC1_MMCC_POWER_CONTROL 0x114 + +/* Venus 6xx */ +#define WRAPPER_CORE_POWER_STATUS_V6 0x80 +#define WRAPPER_CORE_POWER_CONTROL_V6 0x84 + +/* Wrapper TZ 6xx */ +#define WRAPPER_TZ_BASE_V6 0x000c0000 +#define WRAPPER_TZ_CPU_STATUS_V6 0x10 -#define WRAPPER_VCODEC1_MMCC_POWER_STATUS (WRAPPER_BASE + 0x110) -#define WRAPPER_VCODEC1_MMCC_POWER_CONTROL (WRAPPER_BASE + 0x114) +/* Venus AON */ +#define AON_BASE_V6 0x000e0000 +#define AON_WRAPPER_MVP_NOC_LPI_CONTROL 0x00 +#define AON_WRAPPER_MVP_NOC_LPI_STATUS 0x04 #endif diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c index f7de02352f1b0b123cca254c7c4240b0e08df1cf..6bf9c5c319de70f47f8edbecea6b049912cc0130 100644 --- a/drivers/media/platform/qcom/venus/pm_helpers.c +++ b/drivers/media/platform/qcom/venus/pm_helpers.c @@ -304,9 +304,9 @@ vcodec_control_v3(struct venus_core *core, u32 session_type, bool enable) void __iomem *ctrl; if (session_type == VIDC_SESSION_TYPE_DEC) - ctrl = core->base + WRAPPER_VDEC_VCODEC_POWER_CONTROL; + ctrl = core->wrapper_base + WRAPPER_VDEC_VCODEC_POWER_CONTROL; else - ctrl = core->base + WRAPPER_VENC_VCODEC_POWER_CONTROL; + ctrl = core->wrapper_base + WRAPPER_VENC_VCODEC_POWER_CONTROL; if (enable) writel(0, ctrl); @@ -381,11 +381,11 @@ static int vcodec_control_v4(struct venus_core *core, u32 coreid, bool enable) int ret; if (coreid == VIDC_CORE_ID_1) { - ctrl = core->base + WRAPPER_VCODEC0_MMCC_POWER_CONTROL; - stat = core->base + WRAPPER_VCODEC0_MMCC_POWER_STATUS; + ctrl = core->wrapper_base + WRAPPER_VCODEC0_MMCC_POWER_CONTROL; + stat = core->wrapper_base + WRAPPER_VCODEC0_MMCC_POWER_STATUS; } else { - ctrl = core->base + WRAPPER_VCODEC1_MMCC_POWER_CONTROL; - stat = core->base + WRAPPER_VCODEC1_MMCC_POWER_STATUS; + ctrl = core->wrapper_base + WRAPPER_VCODEC1_MMCC_POWER_CONTROL; + stat = core->wrapper_base + WRAPPER_VCODEC1_MMCC_POWER_STATUS; } if (enable) { diff --git a/drivers/media/tuners/qt1010.c b/drivers/media/tuners/qt1010.c index 60931367b82ca7abc52dbd9994a5b6e24ea5fcdf..48fc79cd402733aec6d3fdef8a99eb2327cd9eee 100644 --- a/drivers/media/tuners/qt1010.c +++ b/drivers/media/tuners/qt1010.c @@ -345,11 +345,12 @@ static int qt1010_init(struct dvb_frontend *fe) else valptr = &tmpval; - BUG_ON(i >= ARRAY_SIZE(i2c_data) - 1); - - err = qt1010_init_meas1(priv, i2c_data[i+1].reg, - i2c_data[i].reg, - i2c_data[i].val, valptr); + if (i >= ARRAY_SIZE(i2c_data) - 1) + err = -EIO; + else + err = qt1010_init_meas1(priv, i2c_data[i + 1].reg, + i2c_data[i].reg, + i2c_data[i].val, valptr); i++; break; } diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c index b1f69c11c83952b07cb902acf3296a7292acba3a..8cbaab9a608448a0353c280f6be7a958c3c0f643 100644 --- a/drivers/media/usb/dvb-usb-v2/af9035.c +++ b/drivers/media/usb/dvb-usb-v2/af9035.c @@ -269,6 +269,7 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap, struct dvb_usb_device *d = i2c_get_adapdata(adap); struct state *state = d_to_priv(d); int ret; + u32 reg; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; @@ -321,8 +322,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap, ret = -EOPNOTSUPP; } else if ((msg[0].addr == state->af9033_i2c_addr[0]) || (msg[0].addr == state->af9033_i2c_addr[1])) { + if (msg[0].len < 3 || msg[1].len < 1) + return -EOPNOTSUPP; /* demod access via firmware interface */ - u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 | + reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 | msg[0].buf[2]; if (msg[0].addr == state->af9033_i2c_addr[1]) @@ -380,17 +383,16 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap, ret = -EOPNOTSUPP; } else if ((msg[0].addr == state->af9033_i2c_addr[0]) || (msg[0].addr == state->af9033_i2c_addr[1])) { + if (msg[0].len < 3) + return -EOPNOTSUPP; /* demod access via firmware interface */ - u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 | + reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 | msg[0].buf[2]; if (msg[0].addr == state->af9033_i2c_addr[1]) reg |= 0x100000; - ret = (msg[0].len >= 3) ? af9035_wr_regs(d, reg, - &msg[0].buf[3], - msg[0].len - 3) - : -EOPNOTSUPP; + ret = af9035_wr_regs(d, reg, &msg[0].buf[3], msg[0].len - 3); } else { /* I2C write */ u8 buf[MAX_XFER_SIZE]; diff --git a/drivers/media/usb/dvb-usb-v2/anysee.c b/drivers/media/usb/dvb-usb-v2/anysee.c index 89a1b204b90c3c2ba46d78995b85fc2b47bce455..3dacf3914d75ba41f37365359e8d371eebf1c2cc 100644 --- a/drivers/media/usb/dvb-usb-v2/anysee.c +++ b/drivers/media/usb/dvb-usb-v2/anysee.c @@ -202,7 +202,7 @@ static int anysee_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, while (i < num) { if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) { - if (msg[i].len > 2 || msg[i+1].len > 60) { + if (msg[i].len != 2 || msg[i + 1].len > 60) { ret = -EOPNOTSUPP; break; } diff --git a/drivers/media/usb/dvb-usb-v2/az6007.c b/drivers/media/usb/dvb-usb-v2/az6007.c index 7524c90f5da61d4fe01b638cfd77047f4f0bb476..6cbfe75791c2123f19693888db45f054c150fe56 100644 --- a/drivers/media/usb/dvb-usb-v2/az6007.c +++ b/drivers/media/usb/dvb-usb-v2/az6007.c @@ -788,6 +788,10 @@ static int az6007_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], if (az6007_xfer_debug) printk(KERN_DEBUG "az6007: I2C W addr=0x%x len=%d\n", addr, msgs[i].len); + if (msgs[i].len < 1) { + ret = -EIO; + goto err; + } req = AZ6007_I2C_WR; index = msgs[i].buf[0]; value = addr | (1 << 8); @@ -802,6 +806,10 @@ static int az6007_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], if (az6007_xfer_debug) printk(KERN_DEBUG "az6007: I2C R addr=0x%x len=%d\n", addr, msgs[i].len); + if (msgs[i].len < 1) { + ret = -EIO; + goto err; + } req = AZ6007_I2C_RD; index = msgs[i].buf[0]; value = addr; diff --git a/drivers/media/usb/dvb-usb-v2/gl861.c b/drivers/media/usb/dvb-usb-v2/gl861.c index 0c434259c36f19034247f90d16151692db4eb356..c71e7b93476dec5c76a692f0b7cefc2b02e8858f 100644 --- a/drivers/media/usb/dvb-usb-v2/gl861.c +++ b/drivers/media/usb/dvb-usb-v2/gl861.c @@ -120,7 +120,7 @@ static int gl861_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], } else if (num == 2 && !(msg[0].flags & I2C_M_RD) && (msg[1].flags & I2C_M_RD)) { /* I2C write + read */ - if (msg[0].len > 1 || msg[1].len > sizeof(ctx->buf)) { + if (msg[0].len != 1 || msg[1].len > sizeof(ctx->buf)) { ret = -EOPNOTSUPP; goto err; } diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c index b6a2436d16e97434a886eb10a822931c6df0d7b7..9af54fcbed1decbfca6686e9dbcdf6252438a2c2 100644 --- a/drivers/media/usb/dvb-usb/af9005.c +++ b/drivers/media/usb/dvb-usb/af9005.c @@ -422,6 +422,10 @@ static int af9005_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], if (ret == 0) ret = 2; } else { + if (msg[0].len < 2) { + ret = -EOPNOTSUPP; + goto unlock; + } /* write one or more registers */ reg = msg[0].buf[0]; addr = msg[0].addr; @@ -431,6 +435,7 @@ static int af9005_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], ret = 1; } +unlock: mutex_unlock(&d->i2c_mutex); return ret; } diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c index 3c4ac998d040f399c2c3989c9ee4422e3192eb20..2290f132a82c81110c61edd12aacbded8ad5e473 100644 --- a/drivers/media/usb/dvb-usb/dw2102.c +++ b/drivers/media/usb/dvb-usb/dw2102.c @@ -128,6 +128,10 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], switch (num) { case 2: + if (msg[0].len < 1) { + num = -EOPNOTSUPP; + break; + } /* read stv0299 register */ value = msg[0].buf[0];/* register */ for (i = 0; i < msg[1].len; i++) { @@ -139,6 +143,10 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], case 1: switch (msg[0].addr) { case 0x68: + if (msg[0].len < 2) { + num = -EOPNOTSUPP; + break; + } /* write to stv0299 register */ buf6[0] = 0x2a; buf6[1] = msg[0].buf[0]; @@ -148,6 +156,10 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], break; case 0x60: if (msg[0].flags == 0) { + if (msg[0].len < 4) { + num = -EOPNOTSUPP; + break; + } /* write to tuner pll */ buf6[0] = 0x2c; buf6[1] = 5; @@ -159,6 +171,10 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], dw210x_op_rw(d->udev, 0xb2, 0, 0, buf6, 7, DW210X_WRITE_MSG); } else { + if (msg[0].len < 1) { + num = -EOPNOTSUPP; + break; + } /* read from tuner */ dw210x_op_rw(d->udev, 0xb5, 0, 0, buf6, 1, DW210X_READ_MSG); @@ -166,12 +182,20 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], } break; case (DW2102_RC_QUERY): + if (msg[0].len < 2) { + num = -EOPNOTSUPP; + break; + } dw210x_op_rw(d->udev, 0xb8, 0, 0, buf6, 2, DW210X_READ_MSG); msg[0].buf[0] = buf6[0]; msg[0].buf[1] = buf6[1]; break; case (DW2102_VOLTAGE_CTRL): + if (msg[0].len < 1) { + num = -EOPNOTSUPP; + break; + } buf6[0] = 0x30; buf6[1] = msg[0].buf[0]; dw210x_op_rw(d->udev, 0xb2, 0, 0, diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index a49b8fe2a0982059c272b64b96b4ae4c731d5153..95abd421d0d24671d32cafb7a79b50e58873701f 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -1010,11 +1010,9 @@ int renesas_sdhi_probe(struct platform_device *pdev, host->ops.start_signal_voltage_switch = renesas_sdhi_start_signal_voltage_switch; host->sdcard_irq_setbit_mask = TMIO_STAT_ALWAYS_SET_27; - - if (of_data && of_data->scc_offset) { - priv->scc_ctl = host->ctl + of_data->scc_offset; - host->reset = renesas_sdhi_reset; - } + host->reset = renesas_sdhi_reset; + } else { + host->sdcard_irq_mask_all = TMIO_MASK_ALL; } /* Orginally registers were 16 bit apart, could be 32 or 64 nowadays */ @@ -1070,10 +1068,6 @@ int renesas_sdhi_probe(struct platform_device *pdev, quirks->hs400_calib_table + 1); } - ret = tmio_mmc_host_probe(host); - if (ret < 0) - goto edisclk; - /* Enable tuning iff we have an SCC and a supported mode */ if (of_data && of_data->scc_offset && (host->mmc->caps & MMC_CAP_UHS_SDR104 || @@ -1098,6 +1092,7 @@ int renesas_sdhi_probe(struct platform_device *pdev, if (!hit) dev_warn(&host->pdev->dev, "Unknown clock rate for tuning\n"); + priv->scc_ctl = host->ctl + of_data->scc_offset; host->check_retune = renesas_sdhi_check_scc_error; host->ops.execute_tuning = renesas_sdhi_execute_tuning; host->ops.prepare_hs400_tuning = renesas_sdhi_prepare_hs400_tuning; @@ -1105,6 +1100,8 @@ int renesas_sdhi_probe(struct platform_device *pdev, host->ops.hs400_complete = renesas_sdhi_hs400_complete; } + sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask_all); + num_irqs = platform_irq_count(pdev); if (num_irqs < 0) { ret = num_irqs; @@ -1130,6 +1127,10 @@ int renesas_sdhi_probe(struct platform_device *pdev, goto eirq; } + ret = tmio_mmc_host_probe(host); + if (ret < 0) + goto edisclk; + dev_info(&pdev->dev, "%s base at %pa, max clock rate %u MHz\n", mmc_hostname(host->mmc), &res->start, host->mmc->f_max / 1000000); diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 70f388f83485c2b55da3491e0ecd24cb59905c54..b030f657e253423f2d9c437b74c6b1751397d330 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -166,8 +166,8 @@ #define ESDHC_FLAG_HS400 BIT(9) /* * The IP has errata ERR010450 - * uSDHC: Due to the I/O timing limit, for SDR mode, SD card clock can't - * exceed 150MHz, for DDR mode, SD card clock can't exceed 45MHz. + * uSDHC: At 1.8V due to the I/O timing limit, for SDR mode, SD card + * clock can't exceed 150MHz, for DDR mode, SD card clock can't exceed 45MHz. */ #define ESDHC_FLAG_ERR010450 BIT(10) /* The IP supports HS400ES mode */ @@ -873,7 +873,8 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host, | ESDHC_CLOCK_MASK); sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); - if (imx_data->socdata->flags & ESDHC_FLAG_ERR010450) { + if ((imx_data->socdata->flags & ESDHC_FLAG_ERR010450) && + (!(host->quirks2 & SDHCI_QUIRK2_NO_1_8_V))) { unsigned int max_clock; max_clock = imx_data->is_ddr ? 45000000 : 150000000; diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index 9546e542619cbc29b60fb35ec0267fb0bb14f9a8..d6ed5e1f8386e4591068c89de1ae0e3774f8e020 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h @@ -161,6 +161,7 @@ struct tmio_mmc_host { u32 sdio_irq_mask; unsigned int clk_cache; u32 sdcard_irq_setbit_mask; + u32 sdcard_irq_mask_all; spinlock_t lock; /* protect host private data */ unsigned long last_req_ts; diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index ac4e7874a3f13557da45c858c4ae8bda344ad559..abf36acb2641f1010b2d023bd28dcbc9e873b538 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -1158,7 +1158,9 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host) tmio_mmc_reset(_host); _host->sdcard_irq_mask = sd_ctrl_read16_and_16_as_32(_host, CTL_IRQ_MASK); - tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL); + if (!_host->sdcard_irq_mask_all) + _host->sdcard_irq_mask_all = TMIO_MASK_ALL; + tmio_mmc_disable_mmc_irqs(_host, _host->sdcard_irq_mask_all); if (_host->native_hotplug) tmio_mmc_enable_mmc_irqs(_host, @@ -1212,7 +1214,7 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host) cancel_work_sync(&host->done); cancel_delayed_work_sync(&host->delayed_reset_work); tmio_mmc_release_dma(host); - tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL); + tmio_mmc_disable_mmc_irqs(host, host->sdcard_irq_mask_all); if (host->native_hotplug) pm_runtime_put_noidle(&pdev->dev); @@ -1242,7 +1244,7 @@ int tmio_mmc_host_runtime_suspend(struct device *dev) { struct tmio_mmc_host *host = dev_get_drvdata(dev); - tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL); + tmio_mmc_disable_mmc_irqs(host, host->sdcard_irq_mask_all); if (host->clk_cache) host->set_clock(host, 0); diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index e170c545fec50205ee6dcac29d539ef4adc30cac..11d706ff30dd016ae88d106a556eb4dc3f24ef19 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -207,6 +208,8 @@ enum { struct brcmnand_host; +static DEFINE_STATIC_KEY_FALSE(brcmnand_soc_has_ops_key); + struct brcmnand_controller { struct device *dev; struct nand_controller controller; @@ -265,6 +268,7 @@ struct brcmnand_controller { const unsigned int *page_sizes; unsigned int page_size_shift; unsigned int max_oob; + u32 ecc_level_shift; u32 features; /* for low-power standby/resume only */ @@ -589,15 +593,53 @@ enum { INTFC_CTLR_READY = BIT(31), }; +/*********************************************************************** + * NAND ACC CONTROL bitfield + * + * Some bits have remained constant throughout hardware revision, while + * others have shifted around. + ***********************************************************************/ + +/* Constant for all versions (where supported) */ +enum { + /* See BRCMNAND_HAS_CACHE_MODE */ + ACC_CONTROL_CACHE_MODE = BIT(22), + + /* See BRCMNAND_HAS_PREFETCH */ + ACC_CONTROL_PREFETCH = BIT(23), + + ACC_CONTROL_PAGE_HIT = BIT(24), + ACC_CONTROL_WR_PREEMPT = BIT(25), + ACC_CONTROL_PARTIAL_PAGE = BIT(26), + ACC_CONTROL_RD_ERASED = BIT(27), + ACC_CONTROL_FAST_PGM_RDIN = BIT(28), + ACC_CONTROL_WR_ECC = BIT(30), + ACC_CONTROL_RD_ECC = BIT(31), +}; + +#define ACC_CONTROL_ECC_SHIFT 16 +/* Only for v7.2 */ +#define ACC_CONTROL_ECC_EXT_SHIFT 13 + +static inline bool brcmnand_non_mmio_ops(struct brcmnand_controller *ctrl) +{ + return static_branch_unlikely(&brcmnand_soc_has_ops_key); +} + static inline u32 nand_readreg(struct brcmnand_controller *ctrl, u32 offs) { + if (brcmnand_non_mmio_ops(ctrl)) + return brcmnand_soc_read(ctrl->soc, offs); return brcmnand_readl(ctrl->nand_base + offs); } static inline void nand_writereg(struct brcmnand_controller *ctrl, u32 offs, u32 val) { - brcmnand_writel(val, ctrl->nand_base + offs); + if (brcmnand_non_mmio_ops(ctrl)) + brcmnand_soc_write(ctrl->soc, val, offs); + else + brcmnand_writel(val, ctrl->nand_base + offs); } static int brcmnand_revision_init(struct brcmnand_controller *ctrl) @@ -716,6 +758,12 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl) else if (of_property_read_bool(ctrl->dev->of_node, "brcm,nand-has-wp")) ctrl->features |= BRCMNAND_HAS_WP; + /* v7.2 has different ecc level shift in the acc register */ + if (ctrl->nand_version == 0x0702) + ctrl->ecc_level_shift = ACC_CONTROL_ECC_EXT_SHIFT; + else + ctrl->ecc_level_shift = ACC_CONTROL_ECC_SHIFT; + return 0; } @@ -763,13 +811,18 @@ static inline void brcmnand_rmw_reg(struct brcmnand_controller *ctrl, static inline u32 brcmnand_read_fc(struct brcmnand_controller *ctrl, int word) { + if (brcmnand_non_mmio_ops(ctrl)) + return brcmnand_soc_read(ctrl->soc, BRCMNAND_NON_MMIO_FC_ADDR); return __raw_readl(ctrl->nand_fc + word * 4); } static inline void brcmnand_write_fc(struct brcmnand_controller *ctrl, int word, u32 val) { - __raw_writel(val, ctrl->nand_fc + word * 4); + if (brcmnand_non_mmio_ops(ctrl)) + brcmnand_soc_write(ctrl->soc, val, BRCMNAND_NON_MMIO_FC_ADDR); + else + __raw_writel(val, ctrl->nand_fc + word * 4); } static inline void edu_writel(struct brcmnand_controller *ctrl, @@ -899,30 +952,6 @@ static inline int brcmnand_cmd_shift(struct brcmnand_controller *ctrl) return 0; } -/*********************************************************************** - * NAND ACC CONTROL bitfield - * - * Some bits have remained constant throughout hardware revision, while - * others have shifted around. - ***********************************************************************/ - -/* Constant for all versions (where supported) */ -enum { - /* See BRCMNAND_HAS_CACHE_MODE */ - ACC_CONTROL_CACHE_MODE = BIT(22), - - /* See BRCMNAND_HAS_PREFETCH */ - ACC_CONTROL_PREFETCH = BIT(23), - - ACC_CONTROL_PAGE_HIT = BIT(24), - ACC_CONTROL_WR_PREEMPT = BIT(25), - ACC_CONTROL_PARTIAL_PAGE = BIT(26), - ACC_CONTROL_RD_ERASED = BIT(27), - ACC_CONTROL_FAST_PGM_RDIN = BIT(28), - ACC_CONTROL_WR_ECC = BIT(30), - ACC_CONTROL_RD_ECC = BIT(31), -}; - static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl) { if (ctrl->nand_version == 0x0702) @@ -935,18 +964,15 @@ static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl) return GENMASK(4, 0); } -#define NAND_ACC_CONTROL_ECC_SHIFT 16 -#define NAND_ACC_CONTROL_ECC_EXT_SHIFT 13 - static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl) { u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f; - mask <<= NAND_ACC_CONTROL_ECC_SHIFT; + mask <<= ACC_CONTROL_ECC_SHIFT; /* v7.2 includes additional ECC levels */ - if (ctrl->nand_version >= 0x0702) - mask |= 0x7 << NAND_ACC_CONTROL_ECC_EXT_SHIFT; + if (ctrl->nand_version == 0x0702) + mask |= 0x7 << ACC_CONTROL_ECC_EXT_SHIFT; return mask; } @@ -960,8 +986,8 @@ static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en) if (en) { acc_control |= ecc_flags; /* enable RD/WR ECC */ - acc_control |= host->hwcfg.ecc_level - << NAND_ACC_CONTROL_ECC_SHIFT; + acc_control &= ~brcmnand_ecc_level_mask(ctrl); + acc_control |= host->hwcfg.ecc_level << ctrl->ecc_level_shift; } else { acc_control &= ~ecc_flags; /* disable RD/WR ECC */ acc_control &= ~brcmnand_ecc_level_mask(ctrl); @@ -2515,7 +2541,7 @@ static int brcmnand_set_cfg(struct brcmnand_host *host, tmp &= ~brcmnand_ecc_level_mask(ctrl); tmp &= ~brcmnand_spare_area_mask(ctrl); if (ctrl->nand_version >= 0x0302) { - tmp |= cfg->ecc_level << NAND_ACC_CONTROL_ECC_SHIFT; + tmp |= cfg->ecc_level << ctrl->ecc_level_shift; tmp |= cfg->spare_area_size; } nand_writereg(ctrl, acc_control_offs, tmp); @@ -2985,6 +3011,12 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc) dev_set_drvdata(dev, ctrl); ctrl->dev = dev; + /* Enable the static key if the soc provides I/O operations indicating + * that a non-memory mapped IO access path must be used + */ + if (brcmnand_soc_has_ops(ctrl->soc)) + static_branch_enable(&brcmnand_soc_has_ops_key); + init_completion(&ctrl->done); init_completion(&ctrl->dma_done); init_completion(&ctrl->edu_done); diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.h b/drivers/mtd/nand/raw/brcmnand/brcmnand.h index eb498fbe505ece3b591e45179f842f870756be21..f1f93d85f50d27f93efb35d4a85eaba9501d8ad4 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.h +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.h @@ -11,12 +11,25 @@ struct platform_device; struct dev_pm_ops; +struct brcmnand_io_ops; + +/* Special register offset constant to intercept a non-MMIO access + * to the flash cache register space. This is intentionally large + * not to overlap with an existing offset. + */ +#define BRCMNAND_NON_MMIO_FC_ADDR 0xffffffff struct brcmnand_soc { bool (*ctlrdy_ack)(struct brcmnand_soc *soc); void (*ctlrdy_set_enabled)(struct brcmnand_soc *soc, bool en); void (*prepare_data_bus)(struct brcmnand_soc *soc, bool prepare, bool is_param); + const struct brcmnand_io_ops *ops; +}; + +struct brcmnand_io_ops { + u32 (*read_reg)(struct brcmnand_soc *soc, u32 offset); + void (*write_reg)(struct brcmnand_soc *soc, u32 val, u32 offset); }; static inline void brcmnand_soc_data_bus_prepare(struct brcmnand_soc *soc, @@ -58,6 +71,22 @@ static inline void brcmnand_writel(u32 val, void __iomem *addr) writel_relaxed(val, addr); } +static inline bool brcmnand_soc_has_ops(struct brcmnand_soc *soc) +{ + return soc && soc->ops && soc->ops->read_reg && soc->ops->write_reg; +} + +static inline u32 brcmnand_soc_read(struct brcmnand_soc *soc, u32 offset) +{ + return soc->ops->read_reg(soc, offset); +} + +static inline void brcmnand_soc_write(struct brcmnand_soc *soc, u32 val, + u32 offset) +{ + soc->ops->write_reg(soc, val, offset); +} + int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc); int brcmnand_remove(struct platform_device *pdev); diff --git a/drivers/net/ethernet/atheros/alx/ethtool.c b/drivers/net/ethernet/atheros/alx/ethtool.c index 2f4eabf652e800ff9a2722a43acac174f8ddc427..51e5aa2c74b3410d9e155533a71d0f52f1223149 100644 --- a/drivers/net/ethernet/atheros/alx/ethtool.c +++ b/drivers/net/ethernet/atheros/alx/ethtool.c @@ -281,9 +281,8 @@ static void alx_get_ethtool_stats(struct net_device *netdev, spin_lock(&alx->stats_lock); alx_update_hw_stats(hw); - BUILD_BUG_ON(sizeof(hw->stats) - offsetof(struct alx_hw_stats, rx_ok) < - ALX_NUM_STATS * sizeof(u64)); - memcpy(data, &hw->stats.rx_ok, ALX_NUM_STATS * sizeof(u64)); + BUILD_BUG_ON(sizeof(hw->stats) != ALX_NUM_STATS * sizeof(u64)); + memcpy(data, &hw->stats, sizeof(hw->stats)); spin_unlock(&alx->stats_lock); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index d8366351cf14afee0e1dbe98f8c2d21663f6cf8b..c67a108c2c07f5a55870c4379fc2a1b811bc9a20 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2404,6 +2404,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) struct rx_cmp_ext *rxcmp1; u32 cp_cons, tmp_raw_cons; u32 raw_cons = cpr->cp_raw_cons; + bool flush_xdp = false; u32 rx_pkts = 0; u8 event = 0; @@ -2438,6 +2439,8 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) rx_pkts++; else if (rc == -EBUSY) /* partial completion */ break; + if (event & BNXT_REDIRECT_EVENT) + flush_xdp = true; } else if (unlikely(TX_CMP_TYPE(txcmp) == CMPL_BASE_TYPE_HWRM_DONE)) { bnxt_hwrm_handler(bp, txcmp); @@ -2457,6 +2460,8 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) if (event & BNXT_AGG_EVENT) bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); + if (flush_xdp) + xdp_do_flush(); if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { napi_complete_done(napi, rx_pkts); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 47f8f66cf7ecdc538ca287631e2251f055fb6832..deba485ced1bd601d0e6104508aa02ecefb9120b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -3125,8 +3125,13 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, u32 regclr) { +#define HCLGE_IMP_RESET_DELAY 5 + switch (event_type) { case HCLGE_VECTOR0_EVENT_RST: + if (regclr == BIT(HCLGE_VECTOR0_IMPRESET_INT_B)) + mdelay(HCLGE_IMP_RESET_DELAY); + hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr); break; case HCLGE_VECTOR0_EVENT_MBX: @@ -7850,7 +7855,7 @@ static void hclge_update_overflow_flags(struct hclge_vport *vport, if (mac_type == HCLGE_MAC_ADDR_UC) { if (is_all_added) vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE; - else + else if (hclge_is_umv_space_full(vport, true)) vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE; } else { if (is_all_added) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index bb2a79b70c3ae15d03f048ff04d6edeab22db7d0..dfaa34f2473ab947de2d5d8475e3976e75bbb04d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -4332,9 +4332,6 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, /* duplicate request, so just return success */ goto error_pvid; - i40e_vc_reset_vf(vf, true); - /* During reset the VF got a new VSI, so refresh a pointer. */ - vsi = pf->vsi[vf->lan_vsi_idx]; /* Locked once because multiple functions below iterate list */ spin_lock_bh(&vsi->mac_filter_hash_lock); @@ -4420,6 +4417,10 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, */ vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); + i40e_vc_reset_vf(vf, true); + /* During reset the VF got a new VSI, so refresh a pointer. */ + vsi = pf->vsi[vf->lan_vsi_idx]; + ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni); if (ret) { dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n"); diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 721b536ce886192b0bc48c1288ae0cb6888d4ab0..97a77dabed64ceab65ac6939ef702a4efb02efbb 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -2122,7 +2122,12 @@ static const struct ethtool_ops team_ethtool_ops = { static void team_setup_by_port(struct net_device *dev, struct net_device *port_dev) { - dev->header_ops = port_dev->header_ops; + struct team *team = netdev_priv(dev); + + if (port_dev->type == ARPHRD_ETHER) + dev->header_ops = team->header_ops_cache; + else + dev->header_ops = port_dev->header_ops; dev->type = port_dev->type; dev->hard_header_len = port_dev->hard_header_len; dev->needed_headroom = port_dev->needed_headroom; @@ -2169,8 +2174,11 @@ static int team_dev_type_check_change(struct net_device *dev, static void team_setup(struct net_device *dev) { + struct team *team = netdev_priv(dev); + ether_setup(dev); dev->max_mtu = ETH_MAX_MTU; + team->header_ops_cache = dev->header_ops; dev->netdev_ops = &team_netdev_ops; dev->ethtool_ops = &team_ethtool_ops; diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c index cdefb8e2daf1437e98309afdd01ca218a7de554e..05fb76a4e144efb30c1c360f62b5b6df9a808635 100644 --- a/drivers/net/wireless/ath/ath9k/ahb.c +++ b/drivers/net/wireless/ath/ath9k/ahb.c @@ -136,8 +136,8 @@ static int ath_ahb_probe(struct platform_device *pdev) ah = sc->sc_ah; ath9k_hw_name(ah, hw_name, sizeof(hw_name)); - wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n", - hw_name, (unsigned long)mem, irq); + wiphy_info(hw->wiphy, "%s mem=0x%p, irq=%d\n", + hw_name, mem, irq); return 0; diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h index fd6aa49adadfee3594a8afd105222100e48eee92..9b00e77a6fc3c5b3a01e291e627c46ec7e4ebadc 100644 --- a/drivers/net/wireless/ath/ath9k/mac.h +++ b/drivers/net/wireless/ath/ath9k/mac.h @@ -113,8 +113,10 @@ struct ath_tx_status { u8 qid; u16 desc_id; u8 tid; - u32 ba_low; - u32 ba_high; + struct_group(ba, + u32 ba_low; + u32 ba_high; + ); u32 evm0; u32 evm1; u32 evm2; diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index cff9af3af38d5cba428ed102a97f7cba0ce20e55..4f90c304d12141ed2eeb8eed0de40bc220780a58 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -994,8 +994,8 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) sc->sc_ah->msi_reg = 0; ath9k_hw_name(sc->sc_ah, hw_name, sizeof(hw_name)); - wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n", - hw_name, (unsigned long)sc->mem, pdev->irq); + wiphy_info(hw->wiphy, "%s mem=0x%p, irq=%d\n", + hw_name, sc->mem, pdev->irq); return 0; diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 6555abf02f18bd9c06443e2b1e1791a1d8cdaac8..84c68aefc171a71cf551d9ed11664f4d03877242 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -421,7 +421,7 @@ static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf, isaggr = bf_isaggr(bf); if (isaggr) { seq_st = ts->ts_seqnum; - memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3); + memcpy(ba, &ts->ba, WME_BA_BMP_SIZE >> 3); } while (bf) { @@ -504,7 +504,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, if (isaggr && txok) { if (ts->ts_flags & ATH9K_TX_BA) { seq_st = ts->ts_seqnum; - memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3); + memcpy(ba, &ts->ba, WME_BA_BMP_SIZE >> 3); } else { /* * AR5416 can become deaf/mute when BA diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index cc830c795b33ca2dc18bce2663bde6d15eb32c90..5b2de4f3fa0bd20bb88d77831faa5a649a923167 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -666,7 +666,7 @@ static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb) struct wil_tid_crypto_rx *c = mc ? &s->group_crypto_rx : &s->tid_crypto_rx[tid]; struct wil_tid_crypto_rx_single *cc = &c->key_id[key_id]; - const u8 *pn = (u8 *)&d->mac.pn_15_0; + const u8 *pn = (u8 *)&d->mac.pn; if (!cc->key_set) { wil_err_ratelimited(wil, diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h index 1f4c8ec75be8785d60a1f6acfcbed7c36ceb8683..0f6f6b62bfc9ae3cd577929601ce14645b57b318 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.h +++ b/drivers/net/wireless/ath/wil6210/txrx.h @@ -343,8 +343,10 @@ struct vring_rx_mac { u32 d0; u32 d1; u16 w4; - u16 pn_15_0; - u32 pn_47_16; + struct_group_attr(pn, __packed, + u16 pn_15_0; + u32 pn_47_16; + ); } __packed; /* Rx descriptor - DMA part diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c index 8ca2ce51c83ef89a75ccbcf85706f6458407d6b9..b23c05f16320b85934105e30f5ba1954b72b75d3 100644 --- a/drivers/net/wireless/ath/wil6210/txrx_edma.c +++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c @@ -548,7 +548,7 @@ static int wil_rx_crypto_check_edma(struct wil6210_priv *wil, s = &wil->sta[cid]; c = mc ? &s->group_crypto_rx : &s->tid_crypto_rx[tid]; cc = &c->key_id[key_id]; - pn = (u8 *)&st->ext.pn_15_0; + pn = (u8 *)&st->ext.pn; if (!cc->key_set) { wil_err_ratelimited(wil, diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.h b/drivers/net/wireless/ath/wil6210/txrx_edma.h index c736f7413a35f870b79d46b1e89e404c4c1b546c..ee90e225bb050c28c3dce639ca7920082d136a29 100644 --- a/drivers/net/wireless/ath/wil6210/txrx_edma.h +++ b/drivers/net/wireless/ath/wil6210/txrx_edma.h @@ -330,8 +330,10 @@ struct wil_rx_status_extension { u32 d0; u32 d1; __le16 seq_num; /* only lower 12 bits */ - u16 pn_15_0; - u32 pn_47_16; + struct_group_attr(pn, __packed, + u16 pn_15_0; + u32 pn_47_16; + ); } __packed; struct wil_rx_status_extended { diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 255286b2324e2a78fbc4117a90b78f5bb507f260..0d41f172a1dc265ad6350eb41350e33118d39404 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -3619,14 +3619,15 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2, frame_data_len = nla_len(info->attrs[HWSIM_ATTR_FRAME]); frame_data = (void *)nla_data(info->attrs[HWSIM_ATTR_FRAME]); + if (frame_data_len < sizeof(struct ieee80211_hdr_3addr) || + frame_data_len > IEEE80211_MAX_DATA_LEN) + goto err; + /* Allocate new skb here */ skb = alloc_skb(frame_data_len, GFP_KERNEL); if (skb == NULL) goto err; - if (frame_data_len > IEEE80211_MAX_DATA_LEN) - goto err; - /* Copy the data */ skb_put_data(skb, frame_data, frame_data_len); diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c index 97bb87c3676bb769f5b0712784fdec9debc5c1b8..6c60621b6cccb581ea045051fbcb67506ae90c6f 100644 --- a/drivers/net/wireless/marvell/mwifiex/tdls.c +++ b/drivers/net/wireless/marvell/mwifiex/tdls.c @@ -735,6 +735,7 @@ mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv, int ret; u16 capab; struct ieee80211_ht_cap *ht_cap; + unsigned int extra; u8 radio, *pos; capab = priv->curr_bss_params.bss_descriptor.cap_info_bitmap; @@ -753,7 +754,10 @@ mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv, switch (action_code) { case WLAN_PUB_ACTION_TDLS_DISCOVER_RES: - skb_put(skb, sizeof(mgmt->u.action.u.tdls_discover_resp) + 1); + /* See the layout of 'struct ieee80211_mgmt'. */ + extra = sizeof(mgmt->u.action.u.tdls_discover_resp) + + sizeof(mgmt->u.action.category); + skb_put(skb, extra); mgmt->u.action.category = WLAN_CATEGORY_PUBLIC; mgmt->u.action.u.tdls_discover_resp.action_code = WLAN_PUB_ACTION_TDLS_DISCOVER_RES; @@ -762,8 +766,7 @@ mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv, mgmt->u.action.u.tdls_discover_resp.capability = cpu_to_le16(capab); /* move back for addr4 */ - memmove(pos + ETH_ALEN, &mgmt->u.action.category, - sizeof(mgmt->u.action.u.tdls_discover_resp)); + memmove(pos + ETH_ALEN, &mgmt->u.action, extra); /* init address 4 */ eth_broadcast_addr(pos); diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c index f5a33dbe7acb98ac222c70be97cb783ccdd392dc..6ebe72b86266195caf2b89a3a7a8c6ad3bb103c5 100644 --- a/drivers/perf/arm_smmuv3_pmu.c +++ b/drivers/perf/arm_smmuv3_pmu.c @@ -95,6 +95,7 @@ #define SMMU_PMCG_PA_SHIFT 12 #define SMMU_PMCG_EVCNTR_RDONLY BIT(0) +#define SMMU_PMCG_HARDEN_DISABLE BIT(1) static int cpuhp_state_num; @@ -138,6 +139,20 @@ static inline void smmu_pmu_enable(struct pmu *pmu) writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base + SMMU_PMCG_CR); } +static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu, + struct perf_event *event, int idx); + +static inline void smmu_pmu_enable_quirk_hip08_09(struct pmu *pmu) +{ + struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu); + unsigned int idx; + + for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters) + smmu_pmu_apply_event_filter(smmu_pmu, smmu_pmu->events[idx], idx); + + smmu_pmu_enable(pmu); +} + static inline void smmu_pmu_disable(struct pmu *pmu) { struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu); @@ -146,6 +161,22 @@ static inline void smmu_pmu_disable(struct pmu *pmu) writel(0, smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL); } +static inline void smmu_pmu_disable_quirk_hip08_09(struct pmu *pmu) +{ + struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu); + unsigned int idx; + + /* + * The global disable of PMU sometimes fail to stop the counting. + * Harden this by writing an invalid event type to each used counter + * to forcibly stop counting. + */ + for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters) + writel(0xffff, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx)); + + smmu_pmu_disable(pmu); +} + static inline void smmu_pmu_counter_set_value(struct smmu_pmu *smmu_pmu, u32 idx, u64 value) { @@ -719,7 +750,10 @@ static void smmu_pmu_get_acpi_options(struct smmu_pmu *smmu_pmu) switch (model) { case IORT_SMMU_V3_PMCG_HISI_HIP08: /* HiSilicon Erratum 162001800 */ - smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY; + smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY | SMMU_PMCG_HARDEN_DISABLE; + break; + case IORT_SMMU_V3_PMCG_HISI_HIP09: + smmu_pmu->options |= SMMU_PMCG_HARDEN_DISABLE; break; } @@ -806,6 +840,16 @@ static int smmu_pmu_probe(struct platform_device *pdev) smmu_pmu_get_acpi_options(smmu_pmu); + /* + * For platforms suffer this quirk, the PMU disable sometimes fails to + * stop the counters. This will leads to inaccurate or error counting. + * Forcibly disable the counters with these quirk handler. + */ + if (smmu_pmu->options & SMMU_PMCG_HARDEN_DISABLE) { + smmu_pmu->pmu.pmu_enable = smmu_pmu_enable_quirk_hip08_09; + smmu_pmu->pmu.pmu_disable = smmu_pmu_disable_quirk_hip08_09; + } + /* Pick one CPU to be the preferred one to use */ smmu_pmu->on_cpu = raw_smp_processor_id(); WARN_ON(irq_set_affinity_hint(smmu_pmu->irq, diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c index bdeb888c0fea4d029b854891da7e5440d4536d29..84ed8286946308f6f0d2ad12706e52718745d2e4 100644 --- a/drivers/platform/x86/intel_scu_ipc.c +++ b/drivers/platform/x86/intel_scu_ipc.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -232,19 +233,15 @@ static inline u32 ipc_data_readl(struct intel_scu_ipc_dev *scu, u32 offset) /* Wait till scu status is busy */ static inline int busy_loop(struct intel_scu_ipc_dev *scu) { - unsigned long end = jiffies + IPC_TIMEOUT; - - do { - u32 status; - - status = ipc_read_status(scu); - if (!(status & IPC_STATUS_BUSY)) - return (status & IPC_STATUS_ERR) ? -EIO : 0; + u8 status; + int err; - usleep_range(50, 100); - } while (time_before(jiffies, end)); + err = readx_poll_timeout(ipc_read_status, scu, status, !(status & IPC_STATUS_BUSY), + 100, jiffies_to_usecs(IPC_TIMEOUT)); + if (err) + return err; - return -ETIMEDOUT; + return (status & IPC_STATUS_ERR) ? -EIO : 0; } /* Wait till ipc ioc interrupt is received or timeout in 10 HZ */ @@ -252,10 +249,12 @@ static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu) { int status; - if (!wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT)) - return -ETIMEDOUT; + wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT); status = ipc_read_status(scu); + if (status & IPC_STATUS_BUSY) + return -ETIMEDOUT; + if (status & IPC_STATUS_ERR) return -EIO; @@ -267,6 +266,24 @@ static int intel_scu_ipc_check_status(struct intel_scu_ipc_dev *scu) return scu->irq > 0 ? ipc_wait_for_interrupt(scu) : busy_loop(scu); } +static struct intel_scu_ipc_dev *intel_scu_ipc_get(struct intel_scu_ipc_dev *scu) +{ + u8 status; + + if (!scu) + scu = ipcdev; + if (!scu) + return ERR_PTR(-ENODEV); + + status = ipc_read_status(scu); + if (status & IPC_STATUS_BUSY) { + dev_dbg(&scu->dev, "device is busy\n"); + return ERR_PTR(-EBUSY); + } + + return scu; +} + /* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */ static int pwr_reg_rdwr(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data, u32 count, u32 op, u32 id) @@ -280,11 +297,10 @@ static int pwr_reg_rdwr(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data, memset(cbuf, 0, sizeof(cbuf)); mutex_lock(&ipclock); - if (!scu) - scu = ipcdev; - if (!scu) { + scu = intel_scu_ipc_get(scu); + if (IS_ERR(scu)) { mutex_unlock(&ipclock); - return -ENODEV; + return PTR_ERR(scu); } for (nc = 0; nc < count; nc++, offset += 2) { @@ -439,13 +455,12 @@ int intel_scu_ipc_dev_simple_command(struct intel_scu_ipc_dev *scu, int cmd, int err; mutex_lock(&ipclock); - if (!scu) - scu = ipcdev; - if (!scu) { + scu = intel_scu_ipc_get(scu); + if (IS_ERR(scu)) { mutex_unlock(&ipclock); - return -ENODEV; + return PTR_ERR(scu); } - scu = ipcdev; + cmdval = sub << 12 | cmd; ipc_command(scu, cmdval); err = intel_scu_ipc_check_status(scu); @@ -485,11 +500,10 @@ int intel_scu_ipc_dev_command_with_size(struct intel_scu_ipc_dev *scu, int cmd, return -EINVAL; mutex_lock(&ipclock); - if (!scu) - scu = ipcdev; - if (!scu) { + scu = intel_scu_ipc_get(scu); + if (IS_ERR(scu)) { mutex_unlock(&ipclock); - return -ENODEV; + return PTR_ERR(scu); } memcpy(inbuf, in, inlen); diff --git a/drivers/power/supply/ucs1002_power.c b/drivers/power/supply/ucs1002_power.c index ef673ec3db568a16c4ff99278cde344fe209e45b..332cb50d9fb4fae5281c2f56493644677fd1b50b 100644 --- a/drivers/power/supply/ucs1002_power.c +++ b/drivers/power/supply/ucs1002_power.c @@ -384,7 +384,8 @@ static int ucs1002_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_USB_TYPE: return ucs1002_get_usb_type(info, val); case POWER_SUPPLY_PROP_HEALTH: - return val->intval = info->health; + val->intval = info->health; + return 0; case POWER_SUPPLY_PROP_PRESENT: val->intval = info->present; return 0; diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 2b77cbbcdccb69261c47662fe62dc0fa87ef0f07..f91eee01ce95ee9cabf74099eb107efd6fc2a53f 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -5909,7 +5909,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) phba->hba_debugfs_root, phba, &lpfc_debugfs_op_multixripools); - if (!phba->debug_multixri_pools) { + if (IS_ERR(phba->debug_multixri_pools)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0527 Cannot create debugfs multixripools\n"); goto debug_failed; @@ -5921,7 +5921,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) debugfs_create_file(name, 0644, phba->hba_debugfs_root, phba, &lpfc_debugfs_ras_log); - if (!phba->debug_ras_log) { + if (IS_ERR(phba->debug_ras_log)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "6148 Cannot create debugfs" " ras_log\n"); @@ -5942,7 +5942,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) debugfs_create_file(name, S_IFREG | 0644, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_lockstat); - if (!phba->debug_lockstat) { + if (IS_ERR(phba->debug_lockstat)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "4610 Can't create debugfs lockstat\n"); goto debug_failed; @@ -6171,7 +6171,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) debugfs_create_file(name, 0644, vport->vport_debugfs_root, vport, &lpfc_debugfs_op_scsistat); - if (!vport->debug_scsistat) { + if (IS_ERR(vport->debug_scsistat)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "4611 Cannot create debugfs scsistat\n"); goto debug_failed; @@ -6182,7 +6182,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) debugfs_create_file(name, 0644, vport->vport_debugfs_root, vport, &lpfc_debugfs_op_ioktime); - if (!vport->debug_ioktime) { + if (IS_ERR(vport->debug_ioktime)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0815 Cannot create debugfs ioktime\n"); goto debug_failed; diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 2d5b1d5978664eb153afb45ece887eb1e8e148f5..f78cb87e46fa4a710c08f999abbdf3275c027cb0 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -2327,7 +2327,7 @@ struct megasas_instance { u32 support_morethan256jbod; /* FW support for more than 256 PD/JBOD */ bool use_seqnum_jbod_fp; /* Added for PD sequence */ bool smp_affinity_enable; - spinlock_t crashdump_lock; + struct mutex crashdump_lock; struct megasas_register_set __iomem *reg_set; u32 __iomem *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY]; diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 45fbf12be40bf67c63fe65c20a6c0162c8038c2c..f05f524aea76b255314e7f4851b0d47be312dd24 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -3239,14 +3239,13 @@ fw_crash_buffer_store(struct device *cdev, struct megasas_instance *instance = (struct megasas_instance *) shost->hostdata; int val = 0; - unsigned long flags; if (kstrtoint(buf, 0, &val) != 0) return -EINVAL; - spin_lock_irqsave(&instance->crashdump_lock, flags); + mutex_lock(&instance->crashdump_lock); instance->fw_crash_buffer_offset = val; - spin_unlock_irqrestore(&instance->crashdump_lock, flags); + mutex_unlock(&instance->crashdump_lock); return strlen(buf); } @@ -3261,24 +3260,23 @@ fw_crash_buffer_show(struct device *cdev, unsigned long dmachunk = CRASH_DMA_BUF_SIZE; unsigned long chunk_left_bytes; unsigned long src_addr; - unsigned long flags; u32 buff_offset; - spin_lock_irqsave(&instance->crashdump_lock, flags); + mutex_lock(&instance->crashdump_lock); buff_offset = instance->fw_crash_buffer_offset; if (!instance->crash_dump_buf || !((instance->fw_crash_state == AVAILABLE) || (instance->fw_crash_state == COPYING))) { dev_err(&instance->pdev->dev, "Firmware crash dump is not available\n"); - spin_unlock_irqrestore(&instance->crashdump_lock, flags); + mutex_unlock(&instance->crashdump_lock); return -EINVAL; } if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) { dev_err(&instance->pdev->dev, "Firmware crash dump offset is out of range\n"); - spin_unlock_irqrestore(&instance->crashdump_lock, flags); + mutex_unlock(&instance->crashdump_lock); return 0; } @@ -3290,7 +3288,7 @@ fw_crash_buffer_show(struct device *cdev, src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] + (buff_offset % dmachunk); memcpy(buf, (void *)src_addr, size); - spin_unlock_irqrestore(&instance->crashdump_lock, flags); + mutex_unlock(&instance->crashdump_lock); return size; } @@ -3315,7 +3313,6 @@ fw_crash_state_store(struct device *cdev, struct megasas_instance *instance = (struct megasas_instance *) shost->hostdata; int val = 0; - unsigned long flags; if (kstrtoint(buf, 0, &val) != 0) return -EINVAL; @@ -3329,9 +3326,9 @@ fw_crash_state_store(struct device *cdev, instance->fw_crash_state = val; if ((val == COPIED) || (val == COPY_ERROR)) { - spin_lock_irqsave(&instance->crashdump_lock, flags); + mutex_lock(&instance->crashdump_lock); megasas_free_host_crash_buffer(instance); - spin_unlock_irqrestore(&instance->crashdump_lock, flags); + mutex_unlock(&instance->crashdump_lock); if (val == COPY_ERROR) dev_info(&instance->pdev->dev, "application failed to " "copy Firmware crash dump\n"); @@ -7364,7 +7361,7 @@ static inline void megasas_init_ctrl_params(struct megasas_instance *instance) init_waitqueue_head(&instance->int_cmd_wait_q); init_waitqueue_head(&instance->abort_cmd_wait_q); - spin_lock_init(&instance->crashdump_lock); + mutex_init(&instance->crashdump_lock); spin_lock_init(&instance->mfi_pool_lock); spin_lock_init(&instance->hba_lock); spin_lock_init(&instance->stream_lock); diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 01eb2ade207092d365e5a9d7318bd345ada41ea1..f40db6f40b1dc876a136a22675ca6e099ef715ac 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -255,7 +255,6 @@ static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id) return ret; } -static u32 pm8001_setup_irq(struct pm8001_hba_info *pm8001_ha); static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha); /** @@ -275,13 +274,6 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha, pm8001_dbg(pm8001_ha, INIT, "pm8001_alloc: PHY:%x\n", pm8001_ha->chip->n_phy); - /* Setup Interrupt */ - rc = pm8001_setup_irq(pm8001_ha); - if (rc) { - pm8001_dbg(pm8001_ha, FAIL, - "pm8001_setup_irq failed [ret: %d]\n", rc); - goto err_out; - } /* Request Interrupt */ rc = pm8001_request_irq(pm8001_ha); if (rc) @@ -990,47 +982,38 @@ static u32 pm8001_request_msix(struct pm8001_hba_info *pm8001_ha) } #endif -static u32 pm8001_setup_irq(struct pm8001_hba_info *pm8001_ha) -{ - struct pci_dev *pdev; - - pdev = pm8001_ha->pdev; - -#ifdef PM8001_USE_MSIX - if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) - return pm8001_setup_msix(pm8001_ha); - pm8001_dbg(pm8001_ha, INIT, "MSIX not supported!!!\n"); -#endif - return 0; -} - /** * pm8001_request_irq - register interrupt * @pm8001_ha: our ha struct. */ static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha) { - struct pci_dev *pdev; + struct pci_dev *pdev = pm8001_ha->pdev; +#ifdef PM8001_USE_MSIX int rc; - pdev = pm8001_ha->pdev; + if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) { + rc = pm8001_setup_msix(pm8001_ha); + if (rc) { + pm8001_dbg(pm8001_ha, FAIL, + "pm8001_setup_irq failed [ret: %d]\n", rc); + return rc; + } -#ifdef PM8001_USE_MSIX - if (pdev->msix_cap && pci_msi_enabled()) - return pm8001_request_msix(pm8001_ha); - else { - pm8001_dbg(pm8001_ha, INIT, "MSIX not supported!!!\n"); - goto intx; + if (pdev->msix_cap && pci_msi_enabled()) + return pm8001_request_msix(pm8001_ha); } + + pm8001_dbg(pm8001_ha, INIT, "MSIX not supported!!!\n"); #endif -intx: /* initialize the INT-X interrupt */ pm8001_ha->irq_vector[0].irq_id = 0; pm8001_ha->irq_vector[0].drv_inst = pm8001_ha; - rc = request_irq(pdev->irq, pm8001_interrupt_handler_intx, IRQF_SHARED, - pm8001_ha->name, SHOST_TO_SAS_HA(pm8001_ha->shost)); - return rc; + + return request_irq(pdev->irq, pm8001_interrupt_handler_intx, + IRQF_SHARED, pm8001_ha->name, + SHOST_TO_SAS_HA(pm8001_ha->shost)); } /** diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c index d5ebcf7d70ff09a1536ff961b6d1e70424b441f7..7d778bf3fd7227bee1b33f075eab0cb8b13f5d04 100644 --- a/drivers/scsi/qla2xxx/qla_dfs.c +++ b/drivers/scsi/qla2xxx/qla_dfs.c @@ -116,7 +116,7 @@ qla2x00_dfs_create_rport(scsi_qla_host_t *vha, struct fc_port *fp) sprintf(wwn, "pn-%016llx", wwn_to_u64(fp->port_name)); fp->dfs_rport_dir = debugfs_create_dir(wwn, vha->dfs_rport_root); - if (!fp->dfs_rport_dir) + if (IS_ERR(fp->dfs_rport_dir)) return; if (NVME_TARGET(vha->hw, fp)) debugfs_create_file("dev_loss_tmo", 0600, fp->dfs_rport_dir, @@ -571,14 +571,14 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha) if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) { ha->tgt.dfs_naqp = debugfs_create_file("naqp", 0400, ha->dfs_dir, vha, &dfs_naqp_ops); - if (!ha->tgt.dfs_naqp) { + if (IS_ERR(ha->tgt.dfs_naqp)) { ql_log(ql_log_warn, vha, 0xd011, "Unable to create debugFS naqp node.\n"); goto out; } } vha->dfs_rport_root = debugfs_create_dir("rports", ha->dfs_dir); - if (!vha->dfs_rport_root) { + if (IS_ERR(vha->dfs_rport_root)) { ql_log(ql_log_warn, vha, 0xd012, "Unable to create debugFS rports node.\n"); goto out; diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index 0fa1d57b26fa8546277a5ff0e5719b34033dae2f..3cd671bbb9a41c4cc8fa13550f3cd7d82b3eace9 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -508,102 +508,102 @@ static ssize_t lio_target_nacl_info_show(struct config_item *item, char *page) spin_lock_bh(&se_nacl->nacl_sess_lock); se_sess = se_nacl->nacl_sess; if (!se_sess) { - rb += sprintf(page+rb, "No active iSCSI Session for Initiator" + rb += sysfs_emit_at(page, rb, "No active iSCSI Session for Initiator" " Endpoint: %s\n", se_nacl->initiatorname); } else { sess = se_sess->fabric_sess_ptr; - rb += sprintf(page+rb, "InitiatorName: %s\n", + rb += sysfs_emit_at(page, rb, "InitiatorName: %s\n", sess->sess_ops->InitiatorName); - rb += sprintf(page+rb, "InitiatorAlias: %s\n", + rb += sysfs_emit_at(page, rb, "InitiatorAlias: %s\n", sess->sess_ops->InitiatorAlias); - rb += sprintf(page+rb, + rb += sysfs_emit_at(page, rb, "LIO Session ID: %u ISID: 0x%6ph TSIH: %hu ", sess->sid, sess->isid, sess->tsih); - rb += sprintf(page+rb, "SessionType: %s\n", + rb += sysfs_emit_at(page, rb, "SessionType: %s\n", (sess->sess_ops->SessionType) ? "Discovery" : "Normal"); - rb += sprintf(page+rb, "Session State: "); + rb += sysfs_emit_at(page, rb, "Session State: "); switch (sess->session_state) { case TARG_SESS_STATE_FREE: - rb += sprintf(page+rb, "TARG_SESS_FREE\n"); + rb += sysfs_emit_at(page, rb, "TARG_SESS_FREE\n"); break; case TARG_SESS_STATE_ACTIVE: - rb += sprintf(page+rb, "TARG_SESS_STATE_ACTIVE\n"); + rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_ACTIVE\n"); break; case TARG_SESS_STATE_LOGGED_IN: - rb += sprintf(page+rb, "TARG_SESS_STATE_LOGGED_IN\n"); + rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_LOGGED_IN\n"); break; case TARG_SESS_STATE_FAILED: - rb += sprintf(page+rb, "TARG_SESS_STATE_FAILED\n"); + rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_FAILED\n"); break; case TARG_SESS_STATE_IN_CONTINUE: - rb += sprintf(page+rb, "TARG_SESS_STATE_IN_CONTINUE\n"); + rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_IN_CONTINUE\n"); break; default: - rb += sprintf(page+rb, "ERROR: Unknown Session" + rb += sysfs_emit_at(page, rb, "ERROR: Unknown Session" " State!\n"); break; } - rb += sprintf(page+rb, "---------------------[iSCSI Session" + rb += sysfs_emit_at(page, rb, "---------------------[iSCSI Session" " Values]-----------------------\n"); - rb += sprintf(page+rb, " CmdSN/WR : CmdSN/WC : ExpCmdSN" + rb += sysfs_emit_at(page, rb, " CmdSN/WR : CmdSN/WC : ExpCmdSN" " : MaxCmdSN : ITT : TTT\n"); max_cmd_sn = (u32) atomic_read(&sess->max_cmd_sn); - rb += sprintf(page+rb, " 0x%08x 0x%08x 0x%08x 0x%08x" + rb += sysfs_emit_at(page, rb, " 0x%08x 0x%08x 0x%08x 0x%08x" " 0x%08x 0x%08x\n", sess->cmdsn_window, (max_cmd_sn - sess->exp_cmd_sn) + 1, sess->exp_cmd_sn, max_cmd_sn, sess->init_task_tag, sess->targ_xfer_tag); - rb += sprintf(page+rb, "----------------------[iSCSI" + rb += sysfs_emit_at(page, rb, "----------------------[iSCSI" " Connections]-------------------------\n"); spin_lock(&sess->conn_lock); list_for_each_entry(conn, &sess->sess_conn_list, conn_list) { - rb += sprintf(page+rb, "CID: %hu Connection" + rb += sysfs_emit_at(page, rb, "CID: %hu Connection" " State: ", conn->cid); switch (conn->conn_state) { case TARG_CONN_STATE_FREE: - rb += sprintf(page+rb, + rb += sysfs_emit_at(page, rb, "TARG_CONN_STATE_FREE\n"); break; case TARG_CONN_STATE_XPT_UP: - rb += sprintf(page+rb, + rb += sysfs_emit_at(page, rb, "TARG_CONN_STATE_XPT_UP\n"); break; case TARG_CONN_STATE_IN_LOGIN: - rb += sprintf(page+rb, + rb += sysfs_emit_at(page, rb, "TARG_CONN_STATE_IN_LOGIN\n"); break; case TARG_CONN_STATE_LOGGED_IN: - rb += sprintf(page+rb, + rb += sysfs_emit_at(page, rb, "TARG_CONN_STATE_LOGGED_IN\n"); break; case TARG_CONN_STATE_IN_LOGOUT: - rb += sprintf(page+rb, + rb += sysfs_emit_at(page, rb, "TARG_CONN_STATE_IN_LOGOUT\n"); break; case TARG_CONN_STATE_LOGOUT_REQUESTED: - rb += sprintf(page+rb, + rb += sysfs_emit_at(page, rb, "TARG_CONN_STATE_LOGOUT_REQUESTED\n"); break; case TARG_CONN_STATE_CLEANUP_WAIT: - rb += sprintf(page+rb, + rb += sysfs_emit_at(page, rb, "TARG_CONN_STATE_CLEANUP_WAIT\n"); break; default: - rb += sprintf(page+rb, + rb += sysfs_emit_at(page, rb, "ERROR: Unknown Connection State!\n"); break; } - rb += sprintf(page+rb, " Address %pISc %s", &conn->login_sockaddr, + rb += sysfs_emit_at(page, rb, " Address %pISc %s", &conn->login_sockaddr, (conn->network_transport == ISCSI_TCP) ? "TCP" : "SCTP"); - rb += sprintf(page+rb, " StatSN: 0x%08x\n", + rb += sysfs_emit_at(page, rb, " StatSN: 0x%08x\n", conn->stat_sn); } spin_unlock(&sess->conn_lock); diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c index 4df47d02b34b40581649310a40e8a91e098a83ee..f727222469b60959f8f3435bcc53d2be980bc49f 100644 --- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c +++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c @@ -1263,19 +1263,14 @@ static void cpm_uart_console_write(struct console *co, const char *s, { struct uart_cpm_port *pinfo = &cpm_uart_ports[co->index]; unsigned long flags; - int nolock = oops_in_progress; - if (unlikely(nolock)) { + if (unlikely(oops_in_progress)) { local_irq_save(flags); - } else { - spin_lock_irqsave(&pinfo->port.lock, flags); - } - - cpm_uart_early_write(pinfo, s, count, true); - - if (unlikely(nolock)) { + cpm_uart_early_write(pinfo, s, count, true); local_irq_restore(flags); } else { + spin_lock_irqsave(&pinfo->port.lock, flags); + cpm_uart_early_write(pinfo, s, count, true); spin_unlock_irqrestore(&pinfo->port.lock, flags); } } diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c index fa66449b39075625e539210b4e0beeb9b0f93dbc..618b0a38c8855ac69f43b25efcbe8252e1cff699 100644 --- a/drivers/usb/gadget/udc/fsl_qe_udc.c +++ b/drivers/usb/gadget/udc/fsl_qe_udc.c @@ -1950,9 +1950,13 @@ static void ch9getstatus(struct qe_udc *udc, u8 request_type, u16 value, } else if ((request_type & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) { /* Get endpoint status */ int pipe = index & USB_ENDPOINT_NUMBER_MASK; - struct qe_ep *target_ep = &udc->eps[pipe]; + struct qe_ep *target_ep; u16 usep; + if (pipe >= USB_MAX_ENDPOINTS) + goto stall; + target_ep = &udc->eps[pipe]; + /* stall if endpoint doesn't exist */ if (!target_ep->ep.desc) goto stall; diff --git a/fs/attr.c b/fs/attr.c index 326a0db3296d7e95df7c4c5da7358241bbeda220..fefcdc7780286ba6f481fb1cc852a42a6f89e29f 100644 --- a/fs/attr.c +++ b/fs/attr.c @@ -309,9 +309,25 @@ int notify_change(struct dentry * dentry, struct iattr * attr, struct inode **de } if ((ia_valid & ATTR_MODE)) { - umode_t amode = attr->ia_mode; + /* + * Don't allow changing the mode of symlinks: + * + * (1) The vfs doesn't take the mode of symlinks into account + * during permission checking. + * (2) This has never worked correctly. Most major filesystems + * did return EOPNOTSUPP due to interactions with POSIX ACLs + * but did still updated the mode of the symlink. + * This inconsistency led system call wrapper providers such + * as libc to block changing the mode of symlinks with + * EOPNOTSUPP already. + * (3) To even do this in the first place one would have to use + * specific file descriptors and quite some effort. + */ + if (S_ISLNK(inode->i_mode)) + return -EOPNOTSUPP; + /* Flag setting protected by i_mutex */ - if (is_sxid(amode)) + if (is_sxid(attr->ia_mode)) inode->i_flags &= ~S_NOSEC; } diff --git a/fs/autofs/waitq.c b/fs/autofs/waitq.c index 5ced859dac5396b47b4bf78fc1599e501cc8a135..dd479198310e82283385d3d862675757cd2293be 100644 --- a/fs/autofs/waitq.c +++ b/fs/autofs/waitq.c @@ -32,8 +32,9 @@ void autofs_catatonic_mode(struct autofs_sb_info *sbi) wq->status = -ENOENT; /* Magic is gone - report failure */ kfree(wq->name.name); wq->name.name = NULL; - wq->wait_ctr--; wake_up_interruptible(&wq->queue); + if (!--wq->wait_ctr) + kfree(wq); wq = nwq; } fput(sbi->pipe); /* Close the pipe */ diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index bcc6848bb6d6a9d2f53f04cba44758cd5c2c3267..67831868ef0de6d6162f18bd190c3b2209680e83 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -529,8 +529,6 @@ struct btrfs_swapfile_pin { int bg_extent_count; }; -bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr); - enum { BTRFS_FS_BARRIER, BTRFS_FS_CLOSING_START, diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 04422d929c232f79a9c6edf9bf555f3045ce8670..bcffe7886530ab29e2e01abd9e69660df611cbb3 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -1173,20 +1173,33 @@ static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr) ret = __btrfs_commit_inode_delayed_items(trans, path, curr_node); if (ret) { - btrfs_release_delayed_node(curr_node); - curr_node = NULL; btrfs_abort_transaction(trans, ret); break; } prev_node = curr_node; curr_node = btrfs_next_delayed_node(curr_node); + /* + * See the comment below about releasing path before releasing + * node. If the commit of delayed items was successful the path + * should always be released, but in case of an error, it may + * point to locked extent buffers (a leaf at the very least). + */ + ASSERT(path->nodes[0] == NULL); btrfs_release_delayed_node(prev_node); } + /* + * Release the path to avoid a potential deadlock and lockdep splat when + * releasing the delayed node, as that requires taking the delayed node's + * mutex. If another task starts running delayed items before we take + * the mutex, it will first lock the mutex and then it may try to lock + * the same btree path (leaf). + */ + btrfs_free_path(path); + if (curr_node) btrfs_release_delayed_node(curr_node); - btrfs_free_path(path); trans->block_rsv = block_rsv; return ret; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index bf0f71413826956e442113ce5a2504a1cd6f0caf..019f0925fa73c55223b6ac78467f533f146e66fc 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2512,13 +2512,11 @@ static int validate_super(struct btrfs_fs_info *fs_info, ret = -EINVAL; } - if (btrfs_fs_incompat(fs_info, METADATA_UUID) && - memcmp(fs_info->fs_devices->metadata_uuid, - fs_info->super_copy->metadata_uuid, BTRFS_FSID_SIZE)) { + if (memcmp(fs_info->fs_devices->metadata_uuid, btrfs_sb_fsid_ptr(sb), + BTRFS_FSID_SIZE) != 0) { btrfs_err(fs_info, "superblock metadata_uuid doesn't match metadata uuid of fs_devices: %pU != %pU", - fs_info->super_copy->metadata_uuid, - fs_info->fs_devices->metadata_uuid); + btrfs_sb_fsid_ptr(sb), fs_info->fs_devices->metadata_uuid); ret = -EINVAL; } diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 9cdf50e2484e1ef73722895e1f464cdc0f02966d..4d2f25ebe3048c64e55416fcc3e35b5b3143d520 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -857,6 +857,11 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans, err = -ENOENT; goto out; } else if (WARN_ON(ret)) { + btrfs_print_leaf(path->nodes[0]); + btrfs_err(fs_info, +"extent item not found for insert, bytenr %llu num_bytes %llu parent %llu root_objectid %llu owner %llu offset %llu", + bytenr, num_bytes, parent, root_objectid, owner, + offset); err = -EIO; goto out; } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 0e266772beaefb43cae6fc999d6b0c7d37bc1481..685a375bb6af5f4576f9d3e1e42bf1f94880d148 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -5634,8 +5634,14 @@ void read_extent_buffer(const struct extent_buffer *eb, void *dstv, char *dst = (char *)dstv; unsigned long i = start >> PAGE_SHIFT; - if (check_eb_range(eb, start, len)) + if (check_eb_range(eb, start, len)) { + /* + * Invalid range hit, reset the memory, so callers won't get + * some random garbage for their uninitialzed memory. + */ + memset(dstv, 0, len); return; + } offset = offset_in_page(start); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 63bf68e0b00612033dee7e85fa4f7c3be5cf1ec8..930126b094add390c7dadd01af279a31e76a908d 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2550,6 +2550,13 @@ static int btrfs_search_path_in_tree_user(struct inode *inode, goto out_put; } + /* + * We don't need the path anymore, so release it and + * avoid deadlocks and lockdep warnings in case + * btrfs_iget() needs to lookup the inode from its root + * btree and lock the same leaf. + */ + btrfs_release_path(path); temp_inode = btrfs_iget(sb, key2.objectid, root); if (IS_ERR(temp_inode)) { ret = PTR_ERR(temp_inode); @@ -2569,7 +2576,6 @@ static int btrfs_search_path_in_tree_user(struct inode *inode, goto out_put; } - btrfs_release_path(path); key.objectid = key.offset; key.offset = (u64)-1; dirid = key.objectid; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index b798586263ebb351c09a4b83953f1c90ea9ae323..86c50e0570a5ebf57a9abf46b83b26ebd0e24688 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -718,6 +718,14 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, return -EINVAL; } +u8 *btrfs_sb_fsid_ptr(struct btrfs_super_block *sb) +{ + bool has_metadata_uuid = (btrfs_super_incompat_flags(sb) & + BTRFS_FEATURE_INCOMPAT_METADATA_UUID); + + return has_metadata_uuid ? sb->metadata_uuid : sb->fsid; +} + /* * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices * being created with a disk that has already completed its fsid change. Such diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index f2177263748e8ab16d108e60d9f82931bc960a5c..b2046e92b91438502f88ef76e55379f6c90322c2 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -580,4 +580,7 @@ int btrfs_bg_type_to_factor(u64 flags); const char *btrfs_bg_type_to_raid_name(u64 flags); int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info); +bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr); +u8 *btrfs_sb_fsid_ptr(struct btrfs_super_block *sb); + #endif diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c index 841fa6d9d744b7712f0ad89dbc423a1f5dc2ae28..f1dc11dab0d889317a5dd8181afea9fb483ea447 100644 --- a/fs/ext2/xattr.c +++ b/fs/ext2/xattr.c @@ -694,10 +694,10 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh, /* We need to allocate a new block */ ext2_fsblk_t goal = ext2_group_first_block_no(sb, EXT2_I(inode)->i_block_group); - int block = ext2_new_block(inode, goal, &error); + ext2_fsblk_t block = ext2_new_block(inode, goal, &error); if (error) goto cleanup; - ea_idebug(inode, "creating block %d", block); + ea_idebug(inode, "creating block %lu", block); new_bh = sb_getblk(sb, block); if (unlikely(!new_bh)) { diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 2326449795e6f0e755f32858a4dd750c28ababa0..e3bb368a15590c39fe0f1fd2a35f47e1cb76d29d 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1580,7 +1580,7 @@ struct ext4_sb_info { struct task_struct *s_mmp_tsk; /* record the last minlen when FITRIM is called. */ - atomic_t s_last_trim_minblks; + unsigned long s_last_trim_minblks; /* Reference to checksum algorithm driver via cryptoapi */ struct crypto_shash *s_chksum_driver; diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index ac3feb415dd3f18380df59c280dfdd2b5fcf5f96..cf2018419634925eb1748404aa31878081036c24 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -16,6 +16,7 @@ #include #include #include +#include #include /* @@ -5882,19 +5883,19 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, * @sb: super block for the file system * @start: starting block of the free extent in the alloc. group * @count: number of blocks to TRIM - * @group: alloc. group we are working with * @e4b: ext4 buddy for the group * * Trim "count" blocks starting at "start" in the "group". To assure that no * one will allocate those blocks, mark it as used in buddy bitmap. This must * be called with under the group lock. */ -static int ext4_trim_extent(struct super_block *sb, int start, int count, - ext4_group_t group, struct ext4_buddy *e4b) +static int ext4_trim_extent(struct super_block *sb, + int start, int count, struct ext4_buddy *e4b) __releases(bitlock) __acquires(bitlock) { struct ext4_free_extent ex; + ext4_group_t group = e4b->bd_group; int ret = 0; trace_ext4_trim_extent(sb, group, start, count); @@ -5917,6 +5918,71 @@ __acquires(bitlock) return ret; } +static ext4_grpblk_t ext4_last_grp_cluster(struct super_block *sb, + ext4_group_t grp) +{ + if (grp < ext4_get_groups_count(sb)) + return EXT4_CLUSTERS_PER_GROUP(sb) - 1; + return (ext4_blocks_count(EXT4_SB(sb)->s_es) - + ext4_group_first_block_no(sb, grp) - 1) >> + EXT4_CLUSTER_BITS(sb); +} + +static bool ext4_trim_interrupted(void) +{ + return fatal_signal_pending(current) || freezing(current); +} + +static int ext4_try_to_trim_range(struct super_block *sb, + struct ext4_buddy *e4b, ext4_grpblk_t start, + ext4_grpblk_t max, ext4_grpblk_t minblocks) +{ + ext4_grpblk_t next, count, free_count; + bool set_trimmed = false; + void *bitmap; + + bitmap = e4b->bd_bitmap; + if (start == 0 && max >= ext4_last_grp_cluster(sb, e4b->bd_group)) + set_trimmed = true; + start = max(e4b->bd_info->bb_first_free, start); + count = 0; + free_count = 0; + + while (start <= max) { + start = mb_find_next_zero_bit(bitmap, max + 1, start); + if (start > max) + break; + next = mb_find_next_bit(bitmap, max + 1, start); + + if ((next - start) >= minblocks) { + int ret = ext4_trim_extent(sb, start, next - start, e4b); + + if (ret && ret != -EOPNOTSUPP) + return count; + count += next - start; + } + free_count += next - start; + start = next + 1; + + if (ext4_trim_interrupted()) + return count; + + if (need_resched()) { + ext4_unlock_group(sb, e4b->bd_group); + cond_resched(); + ext4_lock_group(sb, e4b->bd_group); + } + + if ((e4b->bd_info->bb_free - free_count) < minblocks) + break; + } + + if (set_trimmed) + EXT4_MB_GRP_SET_TRIMMED(e4b->bd_info); + + return count; +} + /** * ext4_trim_all_free -- function to trim all free space in alloc. group * @sb: super block for file system @@ -5940,10 +6006,8 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group, ext4_grpblk_t start, ext4_grpblk_t max, ext4_grpblk_t minblocks) { - void *bitmap; - ext4_grpblk_t next, count = 0, free_count = 0; struct ext4_buddy e4b; - int ret = 0; + int ret; trace_ext4_trim_all_free(sb, group, start, max); @@ -5953,58 +6017,20 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group, ret, group); return ret; } - bitmap = e4b.bd_bitmap; ext4_lock_group(sb, group); - if (EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) && - minblocks >= atomic_read(&EXT4_SB(sb)->s_last_trim_minblks)) - goto out; - start = (e4b.bd_info->bb_first_free > start) ? - e4b.bd_info->bb_first_free : start; - - while (start <= max) { - start = mb_find_next_zero_bit(bitmap, max + 1, start); - if (start > max) - break; - next = mb_find_next_bit(bitmap, max + 1, start); - - if ((next - start) >= minblocks) { - ret = ext4_trim_extent(sb, start, - next - start, group, &e4b); - if (ret && ret != -EOPNOTSUPP) - break; - ret = 0; - count += next - start; - } - free_count += next - start; - start = next + 1; - - if (fatal_signal_pending(current)) { - count = -ERESTARTSYS; - break; - } - - if (need_resched()) { - ext4_unlock_group(sb, group); - cond_resched(); - ext4_lock_group(sb, group); - } - - if ((e4b.bd_info->bb_free - free_count) < minblocks) - break; - } + if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) || + minblocks < EXT4_SB(sb)->s_last_trim_minblks) + ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks); + else + ret = 0; - if (!ret) { - ret = count; - EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info); - } -out: ext4_unlock_group(sb, group); ext4_mb_unload_buddy(&e4b); ext4_debug("trimmed %d blocks in the group %d\n", - count, group); + ret, group); return ret; } @@ -6049,7 +6075,7 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) if (minlen > EXT4_CLUSTERS_PER_GROUP(sb)) goto out; } - if (end >= max_blks) + if (end >= max_blks - 1) end = max_blks - 1; if (end <= first_data_blk) goto out; @@ -6066,6 +6092,8 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; for (group = first_group; group <= last_group; group++) { + if (ext4_trim_interrupted()) + break; grp = ext4_get_group_info(sb, group); if (!grp) continue; @@ -6084,10 +6112,9 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) */ if (group == last_group) end = last_cluster; - if (grp->bb_free >= minlen) { cnt = ext4_trim_all_free(sb, group, first_cluster, - end, minlen); + end, minlen); if (cnt < 0) { ret = cnt; break; @@ -6103,7 +6130,7 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) } if (!ret) - atomic_set(&EXT4_SB(sb)->s_last_trim_minblks, minlen); + EXT4_SB(sb)->s_last_trim_minblks = minlen; out: range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits; @@ -6132,8 +6159,7 @@ ext4_mballoc_query_range( ext4_lock_group(sb, group); - start = (e4b.bd_info->bb_first_free > start) ? - e4b.bd_info->bb_first_free : start; + start = max(e4b.bd_info->bb_first_free, start); if (end >= EXT4_CLUSTERS_PER_GROUP(sb)) end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 4644d7bb93d5f879472843fbb59131c465dfc0e5..2145f9940f3006d897e0698f6b98889d2c7b2302 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -339,17 +339,17 @@ static struct ext4_dir_entry_tail *get_dirent_tail(struct inode *inode, struct buffer_head *bh) { struct ext4_dir_entry_tail *t; + int blocksize = EXT4_BLOCK_SIZE(inode->i_sb); #ifdef PARANOID struct ext4_dir_entry *d, *top; d = (struct ext4_dir_entry *)bh->b_data; top = (struct ext4_dir_entry *)(bh->b_data + - (EXT4_BLOCK_SIZE(inode->i_sb) - - sizeof(struct ext4_dir_entry_tail))); - while (d < top && d->rec_len) + (blocksize - sizeof(struct ext4_dir_entry_tail))); + while (d < top && ext4_rec_len_from_disk(d->rec_len, blocksize)) d = (struct ext4_dir_entry *)(((void *)d) + - le16_to_cpu(d->rec_len)); + ext4_rec_len_from_disk(d->rec_len, blocksize)); if (d != top) return NULL; @@ -360,7 +360,8 @@ static struct ext4_dir_entry_tail *get_dirent_tail(struct inode *inode, #endif if (t->det_reserved_zero1 || - le16_to_cpu(t->det_rec_len) != sizeof(struct ext4_dir_entry_tail) || + (ext4_rec_len_from_disk(t->det_rec_len, blocksize) != + sizeof(struct ext4_dir_entry_tail)) || t->det_reserved_zero2 || t->det_reserved_ft != EXT4_FT_DIR_CSUM) return NULL; @@ -441,13 +442,14 @@ static struct dx_countlimit *get_dx_countlimit(struct inode *inode, struct ext4_dir_entry *dp; struct dx_root_info *root; int count_offset; + int blocksize = EXT4_BLOCK_SIZE(inode->i_sb); + unsigned int rlen = ext4_rec_len_from_disk(dirent->rec_len, blocksize); - if (le16_to_cpu(dirent->rec_len) == EXT4_BLOCK_SIZE(inode->i_sb)) + if (rlen == blocksize) count_offset = 8; - else if (le16_to_cpu(dirent->rec_len) == 12) { + else if (rlen == 12) { dp = (struct ext4_dir_entry *)(((void *)dirent) + 12); - if (le16_to_cpu(dp->rec_len) != - EXT4_BLOCK_SIZE(inode->i_sb) - 12) + if (ext4_rec_len_from_disk(dp->rec_len, blocksize) != blocksize - 12) return NULL; root = (struct dx_root_info *)(((void *)dp + 12)); if (root->reserved_zero || @@ -1256,6 +1258,7 @@ static int dx_make_map(struct inode *dir, struct buffer_head *bh, unsigned int buflen = bh->b_size; char *base = bh->b_data; struct dx_hash_info h = *hinfo; + int blocksize = EXT4_BLOCK_SIZE(dir->i_sb); if (ext4_has_metadata_csum(dir->i_sb)) buflen -= sizeof(struct ext4_dir_entry_tail); @@ -1269,11 +1272,12 @@ static int dx_make_map(struct inode *dir, struct buffer_head *bh, map_tail--; map_tail->hash = h.hash; map_tail->offs = ((char *) de - base)>>2; - map_tail->size = le16_to_cpu(de->rec_len); + map_tail->size = ext4_rec_len_from_disk(de->rec_len, + blocksize); count++; cond_resched(); } - de = ext4_next_entry(de, dir->i_sb->s_blocksize); + de = ext4_next_entry(de, blocksize); } return count; } diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c index cef3303d949955cfc012c7af74534d76fde57722..a9c078fc2302a0512c9b3f4d8d7b52a1ee233d38 100644 --- a/fs/jfs/jfs_dmap.c +++ b/fs/jfs/jfs_dmap.c @@ -269,6 +269,7 @@ int dbUnmount(struct inode *ipbmap, int mounterror) /* free the memory for the in-memory bmap. */ kfree(bmp); + JFS_SBI(ipbmap->i_sb)->bmap = NULL; return (0); } diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c index 937ca07b58b1d30eaf600832070c9a7406bd9c2e..67c67604b8c85eaf154b0db61715439aee2c15fe 100644 --- a/fs/jfs/jfs_imap.c +++ b/fs/jfs/jfs_imap.c @@ -195,6 +195,7 @@ int diUnmount(struct inode *ipimap, int mounterror) * free in-memory control structure */ kfree(imap); + JFS_IP(ipimap)->i_imap = NULL; return (0); } diff --git a/fs/locks.c b/fs/locks.c index f0acf5eba0390bc509717e7fc54951bc8c6f9be9..4e4b36c330f96e7859379b9d44750120729c5278 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -1339,6 +1339,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request, out: spin_unlock(&ctx->flc_lock); percpu_up_read(&file_rwsem); + trace_posix_lock_inode(inode, request, error); /* * Free any unused locks. */ @@ -1347,7 +1348,6 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request, if (new_fl2) locks_free_lock(new_fl2); locks_dispose_list(&dispose); - trace_posix_lock_inode(inode, request, error); return error; } diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 018af6ec97b4002eb2d5a1bd8da5e63b07005bea..5d86ffa72ceab2c2a82a686319021b8c44fd6351 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -525,7 +525,9 @@ static void nfs_direct_add_page_head(struct list_head *list, kref_get(&head->wb_kref); } -static void nfs_direct_join_group(struct list_head *list, struct inode *inode) +static void nfs_direct_join_group(struct list_head *list, + struct nfs_commit_info *cinfo, + struct inode *inode) { struct nfs_page *req, *subreq; @@ -547,7 +549,7 @@ static void nfs_direct_join_group(struct list_head *list, struct inode *inode) nfs_release_request(subreq); } } while ((subreq = subreq->wb_this_page) != req); - nfs_join_page_group(req, inode); + nfs_join_page_group(req, cinfo, inode); } } @@ -573,7 +575,7 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) nfs_init_cinfo_from_dreq(&cinfo, dreq); nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo); - nfs_direct_join_group(&reqs, dreq->inode); + nfs_direct_join_group(&reqs, &cinfo, dreq->inode); dreq->count = 0; dreq->max_count = 0; diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index a8a02081942d2d8e1fa082a1a45b2cb8477a98ac..e4f2820ba5a596eb81f09770a965f5770ddd7e9a 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -1240,6 +1240,7 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg, case -EPFNOSUPPORT: case -EPROTONOSUPPORT: case -EOPNOTSUPP: + case -EINVAL: case -ECONNREFUSED: case -ECONNRESET: case -EHOSTDOWN: diff --git a/fs/nfs/write.c b/fs/nfs/write.c index dc08a0c02f09517b5dcc58f7abadba8c549f8cda..d3cd099ffb6e17702dd35a21cc317a2c70243e45 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -58,7 +58,8 @@ static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops; static const struct nfs_commit_completion_ops nfs_commit_completion_ops; static const struct nfs_rw_ops nfs_rw_write_ops; static void nfs_inode_remove_request(struct nfs_page *req); -static void nfs_clear_request_commit(struct nfs_page *req); +static void nfs_clear_request_commit(struct nfs_commit_info *cinfo, + struct nfs_page *req); static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, struct inode *inode); static struct nfs_page * @@ -500,8 +501,8 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, * the (former) group. All subrequests are removed from any write or commit * lists, unlinked from the group and destroyed. */ -void -nfs_join_page_group(struct nfs_page *head, struct inode *inode) +void nfs_join_page_group(struct nfs_page *head, struct nfs_commit_info *cinfo, + struct inode *inode) { struct nfs_page *subreq; struct nfs_page *destroy_list = NULL; @@ -531,7 +532,7 @@ nfs_join_page_group(struct nfs_page *head, struct inode *inode) * Commit list removal accounting is done after locks are dropped */ subreq = head; do { - nfs_clear_request_commit(subreq); + nfs_clear_request_commit(cinfo, subreq); subreq = subreq->wb_this_page; } while (subreq != head); @@ -565,8 +566,10 @@ nfs_lock_and_join_requests(struct page *page) { struct inode *inode = page_file_mapping(page)->host; struct nfs_page *head; + struct nfs_commit_info cinfo; int ret; + nfs_init_cinfo_from_inode(&cinfo, inode); /* * A reference is taken only on the head request which acts as a * reference to the whole page group - the group will not be destroyed @@ -583,7 +586,7 @@ nfs_lock_and_join_requests(struct page *page) return ERR_PTR(ret); } - nfs_join_page_group(head, inode); + nfs_join_page_group(head, &cinfo, inode); return head; } @@ -944,18 +947,16 @@ nfs_clear_page_commit(struct page *page) } /* Called holding the request lock on @req */ -static void -nfs_clear_request_commit(struct nfs_page *req) +static void nfs_clear_request_commit(struct nfs_commit_info *cinfo, + struct nfs_page *req) { if (test_bit(PG_CLEAN, &req->wb_flags)) { struct nfs_open_context *ctx = nfs_req_openctx(req); struct inode *inode = d_inode(ctx->dentry); - struct nfs_commit_info cinfo; - nfs_init_cinfo_from_inode(&cinfo, inode); mutex_lock(&NFS_I(inode)->commit_mutex); - if (!pnfs_clear_request_commit(req, &cinfo)) { - nfs_request_remove_commit_list(req, &cinfo); + if (!pnfs_clear_request_commit(req, cinfo)) { + nfs_request_remove_commit_list(req, cinfo); } mutex_unlock(&NFS_I(inode)->commit_mutex); nfs_clear_page_commit(req->wb_page); diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 3c651cbcf89713b740a1e27f8fcf060e0f132e24..e84996c3867c7adc92b71580adf36173da113a43 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -881,8 +881,8 @@ nfsd4_rename(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, rename->rn_tname, rename->rn_tnamelen); if (status) return status; - set_change_info(&rename->rn_sinfo, &cstate->current_fh); - set_change_info(&rename->rn_tinfo, &cstate->save_fh); + set_change_info(&rename->rn_sinfo, &cstate->save_fh); + set_change_info(&rename->rn_tinfo, &cstate->current_fh); return nfs_ok; } diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c index 0e734c8b4dfa202c87d9adba602ef11374d9eaf0..244e4258ce16ff69c266c9c247dd139efc643da6 100644 --- a/fs/overlayfs/file.c +++ b/fs/overlayfs/file.c @@ -19,7 +19,6 @@ struct ovl_aio_req { struct kiocb iocb; refcount_t ref; struct kiocb *orig_iocb; - struct fd fd; }; static struct kmem_cache *ovl_aio_request_cachep; @@ -261,7 +260,7 @@ static rwf_t ovl_iocb_to_rwf(int ifl) static inline void ovl_aio_put(struct ovl_aio_req *aio_req) { if (refcount_dec_and_test(&aio_req->ref)) { - fdput(aio_req->fd); + fput(aio_req->iocb.ki_filp); kmem_cache_free(ovl_aio_request_cachep, aio_req); } } @@ -327,10 +326,9 @@ static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter) if (!aio_req) goto out; - aio_req->fd = real; real.flags = 0; aio_req->orig_iocb = iocb; - kiocb_clone(&aio_req->iocb, iocb, real.file); + kiocb_clone(&aio_req->iocb, iocb, get_file(real.file)); aio_req->iocb.ki_complete = ovl_aio_rw_complete; refcount_set(&aio_req->ref, 2); ret = vfs_iocb_iter_read(real.file, &aio_req->iocb, iter); @@ -399,10 +397,9 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter) /* Pacify lockdep, same trick as done in aio_write() */ __sb_writers_release(file_inode(real.file)->i_sb, SB_FREEZE_WRITE); - aio_req->fd = real; real.flags = 0; aio_req->orig_iocb = iocb; - kiocb_clone(&aio_req->iocb, iocb, real.file); + kiocb_clone(&aio_req->iocb, iocb, get_file(real.file)); aio_req->iocb.ki_flags = ifl; aio_req->iocb.ki_complete = ovl_aio_rw_complete; refcount_set(&aio_req->ref, 2); diff --git a/fs/proc/base.c b/fs/proc/base.c index d8e5fb88acdf9777815d476cc8371f58e6b97b72..6ca5df69b92efd8e4b22536075aa7234609cbaed 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -2072,7 +2072,7 @@ void proc_pid_evict_inode(struct proc_inode *ei) put_pid(pid); } -struct inode *proc_pid_make_inode(struct super_block * sb, +struct inode *proc_pid_make_inode(struct super_block *sb, struct task_struct *task, umode_t mode) { struct inode * inode; @@ -2101,11 +2101,6 @@ struct inode *proc_pid_make_inode(struct super_block * sb, /* Let the pid remember us for quick removal */ ei->pid = pid; - if (S_ISDIR(mode)) { - spin_lock(&pid->lock); - hlist_add_head_rcu(&ei->sibling_inodes, &pid->inodes); - spin_unlock(&pid->lock); - } task_dump_owner(task, 0, &inode->i_uid, &inode->i_gid); security_task_to_inode(task, inode); @@ -2118,6 +2113,39 @@ struct inode *proc_pid_make_inode(struct super_block * sb, return NULL; } +/* + * Generating an inode and adding it into @pid->inodes, so that task will + * invalidate inode's dentry before being released. + * + * This helper is used for creating dir-type entries under '/proc' and + * '/proc//task'. Other entries(eg. fd, stat) under '/proc/' + * can be released by invalidating '/proc/' dentry. + * In theory, dentries under '/proc//task' can also be released by + * invalidating '/proc/' dentry, we reserve it to handle single + * thread exiting situation: Any one of threads should invalidate its + * '/proc//task/' dentry before released. + */ +static struct inode *proc_pid_make_base_inode(struct super_block *sb, + struct task_struct *task, umode_t mode) +{ + struct inode *inode; + struct proc_inode *ei; + struct pid *pid; + + inode = proc_pid_make_inode(sb, task, mode); + if (!inode) + return NULL; + + /* Let proc_flush_pid find this directory inode */ + ei = PROC_I(inode); + pid = ei->pid; + spin_lock(&pid->lock); + hlist_add_head_rcu(&ei->sibling_inodes, &pid->inodes); + spin_unlock(&pid->lock); + + return inode; +} + int pid_getattr(const struct path *path, struct kstat *stat, u32 request_mask, unsigned int query_flags) { @@ -3560,7 +3588,8 @@ static struct dentry *proc_pid_instantiate(struct dentry * dentry, { struct inode *inode; - inode = proc_pid_make_inode(dentry->d_sb, task, S_IFDIR | S_IRUGO | S_IXUGO); + inode = proc_pid_make_base_inode(dentry->d_sb, task, + S_IFDIR | S_IRUGO | S_IXUGO); if (!inode) return ERR_PTR(-ENOENT); @@ -3868,7 +3897,8 @@ static struct dentry *proc_task_instantiate(struct dentry *dentry, struct task_struct *task, const void *ptr) { struct inode *inode; - inode = proc_pid_make_inode(dentry->d_sb, task, S_IFDIR | S_IRUGO | S_IXUGO); + inode = proc_pid_make_base_inode(dentry->d_sb, task, + S_IFDIR | S_IRUGO | S_IXUGO); if (!inode) return ERR_PTR(-ENOENT); diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c index 4b70571368526694cfe52097ea7b07b98ad81bd1..7d78984af4349334c494b8172bab20866b910a35 100644 --- a/fs/tracefs/inode.c +++ b/fs/tracefs/inode.c @@ -554,6 +554,9 @@ static struct dentry *__create_dir(const char *name, struct dentry *parent, */ struct dentry *tracefs_create_dir(const char *name, struct dentry *parent) { + if (security_locked_down(LOCKDOWN_TRACEFS)) + return NULL; + return __create_dir(name, parent, &simple_dir_inode_operations); } diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h index 1a12baa58e409b05ea715704cb8e27ff6b95c195..136dba94c646fcaf3868dd63eeb55102c0f91f2a 100644 --- a/include/linux/acpi_iort.h +++ b/include/linux/acpi_iort.h @@ -21,6 +21,7 @@ */ #define IORT_SMMU_V3_PMCG_GENERIC 0x00000000 /* Generic SMMUv3 PMCG */ #define IORT_SMMU_V3_PMCG_HISI_HIP08 0x00000001 /* HiSilicon HIP08 PMCG */ +#define IORT_SMMU_V3_PMCG_HISI_HIP09 0x00000002 /* HiSilicon HIP09 PMCG */ int iort_register_domain_token(int trans_id, phys_addr_t base, struct fwnode_handle *fw_node); diff --git a/include/linux/if_team.h b/include/linux/if_team.h index 5dd1657947b75c1b0a3f2fe13ec90b89aaa02882..762c77d13e7dd24cc327bd7a4fcdcdf3a451c64d 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@ -189,6 +189,8 @@ struct team { struct net_device *dev; /* associated netdevice */ struct team_pcpu_stats __percpu *pcpu_stats; + const struct header_ops *header_ops_cache; + struct mutex lock; /* used for overall locking, e.g. port lists write */ /* diff --git a/include/linux/libata.h b/include/linux/libata.h index d3600fc7f7c4de89e5b7265142a4957d51785bbd..2de6b4a613944e45bca371078ce106d982703caf 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -187,7 +187,7 @@ enum { ATA_LFLAG_NO_LPM = (1 << 8), /* disable LPM on this link */ ATA_LFLAG_RST_ONCE = (1 << 9), /* limit recovery to one reset */ ATA_LFLAG_CHANGED = (1 << 10), /* LPM state changed on this link */ - ATA_LFLAG_NO_DB_DELAY = (1 << 11), /* no debounce delay on link resume */ + ATA_LFLAG_NO_DEBOUNCE_DELAY = (1 << 11), /* no debounce delay on link resume */ /* struct ata_port flags */ ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ @@ -260,6 +260,10 @@ enum { ATA_HOST_PARALLEL_SCAN = (1 << 2), /* Ports on this host can be scanned in parallel */ ATA_HOST_IGNORE_ATA = (1 << 3), /* Ignore ATA devices on this host. */ + ATA_HOST_NO_PART = (1 << 4), /* Host does not support partial */ + ATA_HOST_NO_SSC = (1 << 5), /* Host does not support slumber */ + ATA_HOST_NO_DEVSLP = (1 << 6), /* Host does not support devslp */ + /* bits 24:31 of host->flags are reserved for LLD specific flags */ /* various lengths of time */ diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index f0373a6cb5fb60ed732c74c9eb76a95a7330eedd..40aa09a21f75d876becd84e08a8bf8c84250cd1c 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -145,7 +145,9 @@ extern void nfs_unlock_request(struct nfs_page *req); extern void nfs_unlock_and_release_request(struct nfs_page *); extern struct nfs_page *nfs_page_group_lock_head(struct nfs_page *req); extern int nfs_page_group_lock_subrequests(struct nfs_page *head); -extern void nfs_join_page_group(struct nfs_page *head, struct inode *inode); +extern void nfs_join_page_group(struct nfs_page *head, + struct nfs_commit_info *cinfo, + struct inode *inode); extern int nfs_page_group_lock(struct nfs_page *); extern void nfs_page_group_unlock(struct nfs_page *); extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int); diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index f3b7c7bf059624f126d7277b6c162cfdabcaa8a8..3b038a2cc007130746b38771279de6950fd13617 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1072,15 +1072,31 @@ extern int perf_event_output(struct perf_event *event, struct pt_regs *regs); static inline bool -is_default_overflow_handler(struct perf_event *event) +__is_default_overflow_handler(perf_overflow_handler_t overflow_handler) { - if (likely(event->overflow_handler == perf_event_output_forward)) + if (likely(overflow_handler == perf_event_output_forward)) return true; - if (unlikely(event->overflow_handler == perf_event_output_backward)) + if (unlikely(overflow_handler == perf_event_output_backward)) return true; return false; } +#define is_default_overflow_handler(event) \ + __is_default_overflow_handler((event)->overflow_handler) + +#ifdef CONFIG_BPF_SYSCALL +static inline bool uses_default_overflow_handler(struct perf_event *event) +{ + if (likely(is_default_overflow_handler(event))) + return true; + + return __is_default_overflow_handler(event->orig_overflow_handler); +} +#else +#define uses_default_overflow_handler(event) \ + is_default_overflow_handler(event) +#endif + extern void perf_event_header__init_id(struct perf_event_header *header, struct perf_sample_data *data, diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index e8304e929e2835c14fcee7bf4be373a52aa7fbb4..de21a45a4ee7d4f99332a213359c81b7cac51227 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -110,10 +110,36 @@ static inline struct task_struct *get_task_struct(struct task_struct *t) } extern void __put_task_struct(struct task_struct *t); +extern void __put_task_struct_rcu_cb(struct rcu_head *rhp); static inline void put_task_struct(struct task_struct *t) { - if (refcount_dec_and_test(&t->usage)) + if (!refcount_dec_and_test(&t->usage)) + return; + + /* + * under PREEMPT_RT, we can't call put_task_struct + * in atomic context because it will indirectly + * acquire sleeping locks. + * + * call_rcu() will schedule delayed_put_task_struct_rcu() + * to be called in process context. + * + * __put_task_struct() is called when + * refcount_dec_and_test(&t->usage) succeeds. + * + * This means that it can't "conflict" with + * put_task_struct_rcu_user() which abuses ->rcu the same + * way; rcu_users has a reference so task->usage can't be + * zero after rcu_users 1 -> 0 transition. + * + * delayed_free_task() also uses ->rcu, but it is only called + * when it fails to fork a process. Therefore, there is no + * way it can conflict with put_task_struct(). + */ + if (IS_ENABLED(CONFIG_PREEMPT_RT) && !preemptible()) + call_rcu(&t->rcu, __put_task_struct_rcu_cb); + else __put_task_struct(t); } diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index fb89b05066f43d3c44f56d2502fae272a7e239e1..0928a60b8f82577dbcb750c5e5aab45c23f58ff3 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -307,10 +307,10 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu __seqprop_case((s), mutex, prop), \ __seqprop_case((s), ww_mutex, prop)) -#define __seqcount_ptr(s) __seqprop(s, ptr) -#define __seqcount_sequence(s) __seqprop(s, sequence) -#define __seqcount_lock_preemptible(s) __seqprop(s, preemptible) -#define __seqcount_assert_lock_held(s) __seqprop(s, assert) +#define seqprop_ptr(s) __seqprop(s, ptr) +#define seqprop_sequence(s) __seqprop(s, sequence) +#define seqprop_preemptible(s) __seqprop(s, preemptible) +#define seqprop_assert(s) __seqprop(s, assert) /** * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier @@ -330,7 +330,7 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu ({ \ unsigned __seq; \ \ - while ((__seq = __seqcount_sequence(s)) & 1) \ + while ((__seq = seqprop_sequence(s)) & 1) \ cpu_relax(); \ \ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \ @@ -359,7 +359,7 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu */ #define read_seqcount_begin(s) \ ({ \ - seqcount_lockdep_reader_access(__seqcount_ptr(s)); \ + seqcount_lockdep_reader_access(seqprop_ptr(s)); \ raw_read_seqcount_begin(s); \ }) @@ -376,7 +376,7 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu */ #define raw_read_seqcount(s) \ ({ \ - unsigned __seq = __seqcount_sequence(s); \ + unsigned __seq = seqprop_sequence(s); \ \ smp_rmb(); \ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \ @@ -425,9 +425,9 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu * Return: true if a read section retry is required, else false */ #define __read_seqcount_retry(s, start) \ - __read_seqcount_t_retry(__seqcount_ptr(s), start) + do___read_seqcount_retry(seqprop_ptr(s), start) -static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start) +static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start) { kcsan_atomic_next(0); return unlikely(READ_ONCE(s->sequence) != start); @@ -445,12 +445,12 @@ static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start) * Return: true if a read section retry is required, else false */ #define read_seqcount_retry(s, start) \ - read_seqcount_t_retry(__seqcount_ptr(s), start) + do_read_seqcount_retry(seqprop_ptr(s), start) -static inline int read_seqcount_t_retry(const seqcount_t *s, unsigned start) +static inline int do_read_seqcount_retry(const seqcount_t *s, unsigned start) { smp_rmb(); - return __read_seqcount_t_retry(s, start); + return do___read_seqcount_retry(s, start); } /** @@ -459,13 +459,13 @@ static inline int read_seqcount_t_retry(const seqcount_t *s, unsigned start) */ #define raw_write_seqcount_begin(s) \ do { \ - if (__seqcount_lock_preemptible(s)) \ + if (seqprop_preemptible(s)) \ preempt_disable(); \ \ - raw_write_seqcount_t_begin(__seqcount_ptr(s)); \ + do_raw_write_seqcount_begin(seqprop_ptr(s)); \ } while (0) -static inline void raw_write_seqcount_t_begin(seqcount_t *s) +static inline void do_raw_write_seqcount_begin(seqcount_t *s) { kcsan_nestable_atomic_begin(); s->sequence++; @@ -478,13 +478,13 @@ static inline void raw_write_seqcount_t_begin(seqcount_t *s) */ #define raw_write_seqcount_end(s) \ do { \ - raw_write_seqcount_t_end(__seqcount_ptr(s)); \ + do_raw_write_seqcount_end(seqprop_ptr(s)); \ \ - if (__seqcount_lock_preemptible(s)) \ + if (seqprop_preemptible(s)) \ preempt_enable(); \ } while (0) -static inline void raw_write_seqcount_t_end(seqcount_t *s) +static inline void do_raw_write_seqcount_end(seqcount_t *s) { smp_wmb(); s->sequence++; @@ -501,18 +501,18 @@ static inline void raw_write_seqcount_t_end(seqcount_t *s) */ #define write_seqcount_begin_nested(s, subclass) \ do { \ - __seqcount_assert_lock_held(s); \ + seqprop_assert(s); \ \ - if (__seqcount_lock_preemptible(s)) \ + if (seqprop_preemptible(s)) \ preempt_disable(); \ \ - write_seqcount_t_begin_nested(__seqcount_ptr(s), subclass); \ + do_write_seqcount_begin_nested(seqprop_ptr(s), subclass); \ } while (0) -static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass) +static inline void do_write_seqcount_begin_nested(seqcount_t *s, int subclass) { - raw_write_seqcount_t_begin(s); seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); + do_raw_write_seqcount_begin(s); } /** @@ -528,17 +528,17 @@ static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass) */ #define write_seqcount_begin(s) \ do { \ - __seqcount_assert_lock_held(s); \ + seqprop_assert(s); \ \ - if (__seqcount_lock_preemptible(s)) \ + if (seqprop_preemptible(s)) \ preempt_disable(); \ \ - write_seqcount_t_begin(__seqcount_ptr(s)); \ + do_write_seqcount_begin(seqprop_ptr(s)); \ } while (0) -static inline void write_seqcount_t_begin(seqcount_t *s) +static inline void do_write_seqcount_begin(seqcount_t *s) { - write_seqcount_t_begin_nested(s, 0); + do_write_seqcount_begin_nested(s, 0); } /** @@ -549,16 +549,16 @@ static inline void write_seqcount_t_begin(seqcount_t *s) */ #define write_seqcount_end(s) \ do { \ - write_seqcount_t_end(__seqcount_ptr(s)); \ + do_write_seqcount_end(seqprop_ptr(s)); \ \ - if (__seqcount_lock_preemptible(s)) \ + if (seqprop_preemptible(s)) \ preempt_enable(); \ } while (0) -static inline void write_seqcount_t_end(seqcount_t *s) +static inline void do_write_seqcount_end(seqcount_t *s) { seqcount_release(&s->dep_map, _RET_IP_); - raw_write_seqcount_t_end(s); + do_raw_write_seqcount_end(s); } /** @@ -603,9 +603,9 @@ static inline void write_seqcount_t_end(seqcount_t *s) * } */ #define raw_write_seqcount_barrier(s) \ - raw_write_seqcount_t_barrier(__seqcount_ptr(s)) + do_raw_write_seqcount_barrier(seqprop_ptr(s)) -static inline void raw_write_seqcount_t_barrier(seqcount_t *s) +static inline void do_raw_write_seqcount_barrier(seqcount_t *s) { kcsan_nestable_atomic_begin(); s->sequence++; @@ -623,9 +623,9 @@ static inline void raw_write_seqcount_t_barrier(seqcount_t *s) * will complete successfully and see data older than this. */ #define write_seqcount_invalidate(s) \ - write_seqcount_t_invalidate(__seqcount_ptr(s)) + do_write_seqcount_invalidate(seqprop_ptr(s)) -static inline void write_seqcount_t_invalidate(seqcount_t *s) +static inline void do_write_seqcount_invalidate(seqcount_t *s) { smp_wmb(); kcsan_nestable_atomic_begin(); @@ -862,9 +862,9 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) } /* - * For all seqlock_t write side functions, use write_seqcount_*t*_begin() - * instead of the generic write_seqcount_begin(). This way, no redundant - * lockdep_assert_held() checks are added. + * For all seqlock_t write side functions, use the the internal + * do_write_seqcount_begin() instead of generic write_seqcount_begin(). + * This way, no redundant lockdep_assert_held() checks are added. */ /** @@ -883,7 +883,7 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) static inline void write_seqlock(seqlock_t *sl) { spin_lock(&sl->lock); - write_seqcount_t_begin(&sl->seqcount.seqcount); + do_write_seqcount_begin(&sl->seqcount.seqcount); } /** @@ -895,7 +895,7 @@ static inline void write_seqlock(seqlock_t *sl) */ static inline void write_sequnlock(seqlock_t *sl) { - write_seqcount_t_end(&sl->seqcount.seqcount); + do_write_seqcount_end(&sl->seqcount.seqcount); spin_unlock(&sl->lock); } @@ -909,7 +909,7 @@ static inline void write_sequnlock(seqlock_t *sl) static inline void write_seqlock_bh(seqlock_t *sl) { spin_lock_bh(&sl->lock); - write_seqcount_t_begin(&sl->seqcount.seqcount); + do_write_seqcount_begin(&sl->seqcount.seqcount); } /** @@ -922,7 +922,7 @@ static inline void write_seqlock_bh(seqlock_t *sl) */ static inline void write_sequnlock_bh(seqlock_t *sl) { - write_seqcount_t_end(&sl->seqcount.seqcount); + do_write_seqcount_end(&sl->seqcount.seqcount); spin_unlock_bh(&sl->lock); } @@ -936,7 +936,7 @@ static inline void write_sequnlock_bh(seqlock_t *sl) static inline void write_seqlock_irq(seqlock_t *sl) { spin_lock_irq(&sl->lock); - write_seqcount_t_begin(&sl->seqcount.seqcount); + do_write_seqcount_begin(&sl->seqcount.seqcount); } /** @@ -948,7 +948,7 @@ static inline void write_seqlock_irq(seqlock_t *sl) */ static inline void write_sequnlock_irq(seqlock_t *sl) { - write_seqcount_t_end(&sl->seqcount.seqcount); + do_write_seqcount_end(&sl->seqcount.seqcount); spin_unlock_irq(&sl->lock); } @@ -957,7 +957,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) unsigned long flags; spin_lock_irqsave(&sl->lock, flags); - write_seqcount_t_begin(&sl->seqcount.seqcount); + do_write_seqcount_begin(&sl->seqcount.seqcount); return flags; } @@ -986,7 +986,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) static inline void write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) { - write_seqcount_t_end(&sl->seqcount.seqcount); + do_write_seqcount_end(&sl->seqcount.seqcount); spin_unlock_irqrestore(&sl->lock, flags); } diff --git a/include/net/netfilter/ipv4/nf_reject.h b/include/net/netfilter/ipv4/nf_reject.h index 40e0e0623f46130a4646ab027f85579244e98745..d8207a82d761aeb36efa710db1d51f65b81606d8 100644 --- a/include/net/netfilter/ipv4/nf_reject.h +++ b/include/net/netfilter/ipv4/nf_reject.h @@ -8,8 +8,8 @@ #include void nf_send_unreach(struct sk_buff *skb_in, int code, int hook); -void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook); - +void nf_send_reset(struct net *net, struct sock *, struct sk_buff *oldskb, + int hook); const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb, struct tcphdr *_oth, int hook); struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb, diff --git a/include/net/netfilter/ipv6/nf_reject.h b/include/net/netfilter/ipv6/nf_reject.h index 4a3ef9ebdf6f6e3f1fe9412afd5f7fe072b99176..86e87bc2c51676f90e9bea4dcfe4fe5b4338f1a9 100644 --- a/include/net/netfilter/ipv6/nf_reject.h +++ b/include/net/netfilter/ipv6/nf_reject.h @@ -7,9 +7,8 @@ void nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code, unsigned int hooknum); - -void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook); - +void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb, + int hook); const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb, struct tcphdr *otcph, unsigned int *otcplen, int hook); diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index eec29dd6681cabf091e1573098074bb4ce56d324..152cd46915d6d2c26804fd0fb6e8209dae88d3b0 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -28,6 +28,16 @@ struct nft_pktinfo { struct xt_action_param xt; }; +static inline struct sock *nft_sk(const struct nft_pktinfo *pkt) +{ + return pkt->xt.state->sk; +} + +static inline unsigned int nft_thoff(const struct nft_pktinfo *pkt) +{ + return pkt->xt.thoff; +} + static inline struct net *nft_net(const struct nft_pktinfo *pkt) { return pkt->xt.state->net; @@ -373,7 +383,8 @@ struct nft_set_ops { const struct nft_set *set, const struct nft_set_elem *elem, unsigned int flags); - + void (*commit)(const struct nft_set *set); + void (*abort)(const struct nft_set *set); u64 (*privsize)(const struct nlattr * const nla[], const struct nft_set_desc *desc); bool (*estimate)(const struct nft_set_desc *desc, @@ -406,6 +417,7 @@ struct nft_set_type { * * @list: table set list node * @bindings: list of set bindings + * @refs: internal refcounting for async set destruction * @table: table this set belongs to * @net: netnamespace this set belongs to * @name: name of the set @@ -435,6 +447,7 @@ struct nft_set_type { struct nft_set { struct list_head list; struct list_head bindings; + refcount_t refs; struct nft_table *table; possible_net_t net; char *name; @@ -454,9 +467,11 @@ struct nft_set { u16 udlen; unsigned char *udata; struct nft_expr *expr; + struct list_head pending_update; /* runtime data below here */ const struct nft_set_ops *ops ____cacheline_aligned; - u16 flags:14, + u16 flags:13, + dead:1, genmask:2; u8 klen; u8 dlen; @@ -474,6 +489,11 @@ static inline void *nft_set_priv(const struct nft_set *set) return (void *)set->data; } +static inline bool nft_set_gc_is_pending(const struct nft_set *s) +{ + return refcount_read(&s->refs) != 1; +} + static inline struct nft_set *nft_set_container_of(const void *priv) { return (void *)priv - offsetof(struct nft_set, data); @@ -690,62 +710,6 @@ void nft_set_elem_destroy(const struct nft_set *set, void *elem, void nf_tables_set_elem_destroy(const struct nft_ctx *ctx, const struct nft_set *set, void *elem); -/** - * struct nft_set_gc_batch_head - nf_tables set garbage collection batch - * - * @rcu: rcu head - * @set: set the elements belong to - * @cnt: count of elements - */ -struct nft_set_gc_batch_head { - struct rcu_head rcu; - const struct nft_set *set; - unsigned int cnt; -}; - -#define NFT_SET_GC_BATCH_SIZE ((PAGE_SIZE - \ - sizeof(struct nft_set_gc_batch_head)) / \ - sizeof(void *)) - -/** - * struct nft_set_gc_batch - nf_tables set garbage collection batch - * - * @head: GC batch head - * @elems: garbage collection elements - */ -struct nft_set_gc_batch { - struct nft_set_gc_batch_head head; - void *elems[NFT_SET_GC_BATCH_SIZE]; -}; - -struct nft_set_gc_batch *nft_set_gc_batch_alloc(const struct nft_set *set, - gfp_t gfp); -void nft_set_gc_batch_release(struct rcu_head *rcu); - -static inline void nft_set_gc_batch_complete(struct nft_set_gc_batch *gcb) -{ - if (gcb != NULL) - call_rcu(&gcb->head.rcu, nft_set_gc_batch_release); -} - -static inline struct nft_set_gc_batch * -nft_set_gc_batch_check(const struct nft_set *set, struct nft_set_gc_batch *gcb, - gfp_t gfp) -{ - if (gcb != NULL) { - if (gcb->head.cnt + 1 < ARRAY_SIZE(gcb->elems)) - return gcb; - nft_set_gc_batch_complete(gcb); - } - return nft_set_gc_batch_alloc(set, gfp); -} - -static inline void nft_set_gc_batch_add(struct nft_set_gc_batch *gcb, - void *elem) -{ - gcb->elems[gcb->head.cnt++] = elem; -} - struct nft_expr_ops; /** * struct nft_expr_type - nf_tables expression type @@ -1413,39 +1377,30 @@ static inline void nft_set_elem_change_active(const struct net *net, #endif /* IS_ENABLED(CONFIG_NF_TABLES) */ -/* - * We use a free bit in the genmask field to indicate the element - * is busy, meaning it is currently being processed either by - * the netlink API or GC. - * - * Even though the genmask is only a single byte wide, this works - * because the extension structure if fully constant once initialized, - * so there are no non-atomic write accesses unless it is already - * marked busy. - */ -#define NFT_SET_ELEM_BUSY_MASK (1 << 2) +#define NFT_SET_ELEM_DEAD_MASK (1 << 2) #if defined(__LITTLE_ENDIAN_BITFIELD) -#define NFT_SET_ELEM_BUSY_BIT 2 +#define NFT_SET_ELEM_DEAD_BIT 2 #elif defined(__BIG_ENDIAN_BITFIELD) -#define NFT_SET_ELEM_BUSY_BIT (BITS_PER_LONG - BITS_PER_BYTE + 2) +#define NFT_SET_ELEM_DEAD_BIT (BITS_PER_LONG - BITS_PER_BYTE + 2) #else #error #endif -static inline int nft_set_elem_mark_busy(struct nft_set_ext *ext) +static inline void nft_set_elem_dead(struct nft_set_ext *ext) { unsigned long *word = (unsigned long *)ext; BUILD_BUG_ON(offsetof(struct nft_set_ext, genmask) != 0); - return test_and_set_bit(NFT_SET_ELEM_BUSY_BIT, word); + set_bit(NFT_SET_ELEM_DEAD_BIT, word); } -static inline void nft_set_elem_clear_busy(struct nft_set_ext *ext) +static inline int nft_set_elem_is_dead(const struct nft_set_ext *ext) { unsigned long *word = (unsigned long *)ext; - clear_bit(NFT_SET_ELEM_BUSY_BIT, word); + BUILD_BUG_ON(offsetof(struct nft_set_ext, genmask) != 0); + return test_bit(NFT_SET_ELEM_DEAD_BIT, word); } /** @@ -1573,6 +1528,35 @@ struct nft_trans_flowtable { #define nft_trans_flowtable_flags(trans) \ (((struct nft_trans_flowtable *)trans->data)->flags) +#define NFT_TRANS_GC_BATCHCOUNT 256 + +struct nft_trans_gc { + struct list_head list; + struct net *net; + struct nft_set *set; + u32 seq; + u16 count; + void *priv[NFT_TRANS_GC_BATCHCOUNT]; + struct rcu_head rcu; +}; + +struct nft_trans_gc *nft_trans_gc_alloc(struct nft_set *set, + unsigned int gc_seq, gfp_t gfp); +void nft_trans_gc_destroy(struct nft_trans_gc *trans); + +struct nft_trans_gc *nft_trans_gc_queue_async(struct nft_trans_gc *gc, + unsigned int gc_seq, gfp_t gfp); +void nft_trans_gc_queue_async_done(struct nft_trans_gc *gc); + +struct nft_trans_gc *nft_trans_gc_queue_sync(struct nft_trans_gc *gc, gfp_t gfp); +void nft_trans_gc_queue_sync_done(struct nft_trans_gc *trans); + +void nft_trans_gc_elem_add(struct nft_trans_gc *gc, void *priv); + +void nft_setelem_data_deactivate(const struct net *net, + const struct nft_set *set, + struct nft_set_elem *elem); + int __init nft_chain_filter_init(void); void nft_chain_filter_fini(void); @@ -1593,6 +1577,7 @@ struct nftables_pernet { struct mutex commit_mutex; unsigned int base_seq; u8 validate_state; + unsigned int gc_seq; }; #endif /* _NET_NF_TABLES_H */ diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 98272cb5f6178fb83a11390f1c840a42886ddc9b..1d8dd58f83a501cc96db1cc58b84f73b504a3bab 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -797,11 +797,13 @@ enum nft_exthdr_flags { * @NFT_EXTHDR_OP_IPV6: match against ipv6 extension headers * @NFT_EXTHDR_OP_TCP: match against tcp options * @NFT_EXTHDR_OP_IPV4: match against ipv4 options + * @NFT_EXTHDR_OP_SCTP: match against sctp chunks */ enum nft_exthdr_op { NFT_EXTHDR_OP_IPV6, NFT_EXTHDR_OP_TCPOPT, NFT_EXTHDR_OP_IPV4, + NFT_EXTHDR_OP_SCTP, __NFT_EXTHDR_OP_MAX }; #define NFT_EXTHDR_OP_MAX (__NFT_EXTHDR_OP_MAX - 1) diff --git a/include/uapi/linux/netfilter_bridge/ebtables.h b/include/uapi/linux/netfilter_bridge/ebtables.h index a494cf43a7552e0caf53bd178de2ea674ae88db9..b0caad82b6937baea3c8055c81ae9c7d4f7533d2 100644 --- a/include/uapi/linux/netfilter_bridge/ebtables.h +++ b/include/uapi/linux/netfilter_bridge/ebtables.h @@ -182,12 +182,14 @@ struct ebt_entry { unsigned char sourcemsk[ETH_ALEN]; unsigned char destmac[ETH_ALEN]; unsigned char destmsk[ETH_ALEN]; - /* sizeof ebt_entry + matches */ - unsigned int watchers_offset; - /* sizeof ebt_entry + matches + watchers */ - unsigned int target_offset; - /* sizeof ebt_entry + matches + watchers + target */ - unsigned int next_offset; + __struct_group(/* no tag */, offsets, /* no attrs */, + /* sizeof ebt_entry + matches */ + unsigned int watchers_offset; + /* sizeof ebt_entry + matches + watchers */ + unsigned int target_offset; + /* sizeof ebt_entry + matches + watchers + target */ + unsigned int next_offset; + ); unsigned char elems[0] __attribute__ ((aligned (__alignof__(struct ebt_replace)))); }; diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c index 0ee2347ba510de8d839933facafc5a9a7bba4623..a047a2053d41a0084187ff5070932caa2902271a 100644 --- a/kernel/bpf/queue_stack_maps.c +++ b/kernel/bpf/queue_stack_maps.c @@ -111,7 +111,12 @@ static int __queue_map_get(struct bpf_map *map, void *value, bool delete) int err = 0; void *ptr; - raw_spin_lock_irqsave(&qs->lock, flags); + if (in_nmi()) { + if (!raw_spin_trylock_irqsave(&qs->lock, flags)) + return -EBUSY; + } else { + raw_spin_lock_irqsave(&qs->lock, flags); + } if (queue_stack_map_is_empty(qs)) { memset(value, 0, qs->map.value_size); @@ -141,7 +146,12 @@ static int __stack_map_get(struct bpf_map *map, void *value, bool delete) void *ptr; u32 index; - raw_spin_lock_irqsave(&qs->lock, flags); + if (in_nmi()) { + if (!raw_spin_trylock_irqsave(&qs->lock, flags)) + return -EBUSY; + } else { + raw_spin_lock_irqsave(&qs->lock, flags); + } if (queue_stack_map_is_empty(qs)) { memset(value, 0, qs->map.value_size); @@ -206,7 +216,12 @@ static int queue_stack_map_push_elem(struct bpf_map *map, void *value, if (flags & BPF_NOEXIST || flags > BPF_EXIST) return -EINVAL; - raw_spin_lock_irqsave(&qs->lock, irq_flags); + if (in_nmi()) { + if (!raw_spin_trylock_irqsave(&qs->lock, irq_flags)) + return -EBUSY; + } else { + raw_spin_lock_irqsave(&qs->lock, irq_flags); + } if (queue_stack_map_is_full(qs)) { if (!replace) { diff --git a/kernel/fork.c b/kernel/fork.c index cd6a83d1abce565de04f50ee925d10839bb4e143..70ca6cca1eb65f975be2d3bd72090926ebce4f33 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -755,6 +755,14 @@ void __put_task_struct(struct task_struct *tsk) } EXPORT_SYMBOL_GPL(__put_task_struct); +void __put_task_struct_rcu_cb(struct rcu_head *rhp) +{ + struct task_struct *task = container_of(rhp, struct task_struct, rcu); + + __put_task_struct(task); +} +EXPORT_SYMBOL_GPL(__put_task_struct_rcu_cb); + void __init __weak arch_task_cache_init(void) { } /* diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c index 6c05365ed80fc219a68149a5f5cd78f68b711e52..3b9783eda67969544d773ba2162f93407b1b46db 100644 --- a/kernel/rcu/rcuscale.c +++ b/kernel/rcu/rcuscale.c @@ -372,7 +372,7 @@ rcu_scale_writer(void *arg) sched_set_fifo_low(current); if (holdoff) - schedule_timeout_uninterruptible(holdoff * HZ); + schedule_timeout_idle(holdoff * HZ); /* * Wait until rcu_end_inkernel_boot() is called for normal GP tests diff --git a/kernel/scftorture.c b/kernel/scftorture.c index 060ee0b1569a0cff23c3f3e367c051c4f4d5c903..be86207a2ab6848e8f9f580b61c5cf8ce10f047a 100644 --- a/kernel/scftorture.c +++ b/kernel/scftorture.c @@ -158,7 +158,8 @@ static void scf_torture_stats_print(void) scfs.n_all_wait += scf_stats_p[i].n_all_wait; } if (atomic_read(&n_errs) || atomic_read(&n_mb_in_errs) || - atomic_read(&n_mb_out_errs) || atomic_read(&n_alloc_errs)) + atomic_read(&n_mb_out_errs) || + (!IS_ENABLED(CONFIG_KASAN) && atomic_read(&n_alloc_errs))) bangstr = "!!! "; pr_alert("%s %sscf_invoked_count %s: %lld single: %lld/%lld single_ofl: %lld/%lld many: %lld/%lld all: %lld/%lld ", SCFTORT_FLAG, bangstr, isdone ? "VER" : "ver", invoked_count, @@ -306,7 +307,8 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra preempt_disable(); if (scfsp->scfs_prim == SCF_PRIM_SINGLE || scfsp->scfs_wait) { scfcp = kmalloc(sizeof(*scfcp), GFP_ATOMIC); - if (WARN_ON_ONCE(!scfcp)) { + if (!scfcp) { + WARN_ON_ONCE(!IS_ENABLED(CONFIG_KASAN)); atomic_inc(&n_alloc_errs); } else { scfcp->scfc_cpu = -1; diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index d3e0ae1168069c26db6a8415405e16449d2f0dab..196eec0423ff2e1d31c6bdd4ab5ecf919469f273 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -4559,6 +4559,33 @@ int tracing_open_generic_tr(struct inode *inode, struct file *filp) return 0; } +/* + * The private pointer of the inode is the trace_event_file. + * Update the tr ref count associated to it. + */ +int tracing_open_file_tr(struct inode *inode, struct file *filp) +{ + struct trace_event_file *file = inode->i_private; + int ret; + + ret = tracing_check_open_get_tr(file->tr); + if (ret) + return ret; + + filp->private_data = inode->i_private; + + return 0; +} + +int tracing_release_file_tr(struct inode *inode, struct file *filp) +{ + struct trace_event_file *file = inode->i_private; + + trace_array_put(file->tr); + + return 0; +} + static int tracing_release(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; @@ -7324,10 +7351,11 @@ static const struct file_operations tracing_max_lat_fops = { #endif static const struct file_operations set_tracer_fops = { - .open = tracing_open_generic, + .open = tracing_open_generic_tr, .read = tracing_set_trace_read, .write = tracing_set_trace_write, .llseek = generic_file_llseek, + .release = tracing_release_generic_tr, }; static const struct file_operations tracing_pipe_fops = { @@ -8366,12 +8394,33 @@ trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, return cnt; } +static int tracing_open_options(struct inode *inode, struct file *filp) +{ + struct trace_option_dentry *topt = inode->i_private; + int ret; + + ret = tracing_check_open_get_tr(topt->tr); + if (ret) + return ret; + + filp->private_data = inode->i_private; + return 0; +} + +static int tracing_release_options(struct inode *inode, struct file *file) +{ + struct trace_option_dentry *topt = file->private_data; + + trace_array_put(topt->tr); + return 0; +} static const struct file_operations trace_options_fops = { - .open = tracing_open_generic, + .open = tracing_open_options, .read = trace_options_read, .write = trace_options_write, .llseek = generic_file_llseek, + .release = tracing_release_options, }; /* diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index dfde855dafda7a6c291a4e12fd4113e68f2cb47d..7fa00b83dfa4b9195c26f65bcaf1db51caf00f5d 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -730,6 +730,8 @@ void tracing_reset_all_online_cpus(void); void tracing_reset_all_online_cpus_unlocked(void); int tracing_open_generic(struct inode *inode, struct file *filp); int tracing_open_generic_tr(struct inode *inode, struct file *filp); +int tracing_open_file_tr(struct inode *inode, struct file *filp); +int tracing_release_file_tr(struct inode *inode, struct file *filp); bool tracing_is_disabled(void); bool tracer_tracing_is_on(struct trace_array *tr); void tracer_tracing_on(struct trace_array *tr); diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index a46d34d840f69af5bdf8e68dda79f3cc7d0fb798..321cfda1b333889f383daa7b512387cf26c85152 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -1855,9 +1855,10 @@ static const struct file_operations ftrace_set_event_notrace_pid_fops = { }; static const struct file_operations ftrace_enable_fops = { - .open = tracing_open_generic, + .open = tracing_open_file_tr, .read = event_enable_read, .write = event_enable_write, + .release = tracing_release_file_tr, .llseek = default_llseek, }; @@ -1874,9 +1875,10 @@ static const struct file_operations ftrace_event_id_fops = { }; static const struct file_operations ftrace_event_filter_fops = { - .open = tracing_open_generic, + .open = tracing_open_file_tr, .read = event_filter_read, .write = event_filter_write, + .release = tracing_release_file_tr, .llseek = default_llseek, }; diff --git a/kernel/trace/trace_events_inject.c b/kernel/trace/trace_events_inject.c index 22bcf7c51d1ee5cf9407aed3a2f47531780335a0..149c7dc6a4473a0b881340946363bd9f05ff40d8 100644 --- a/kernel/trace/trace_events_inject.c +++ b/kernel/trace/trace_events_inject.c @@ -323,7 +323,8 @@ event_inject_read(struct file *file, char __user *buf, size_t size, } const struct file_operations event_inject_fops = { - .open = tracing_open_generic, + .open = tracing_open_file_tr, .read = event_inject_read, .write = event_inject_write, + .release = tracing_release_file_tr, }; diff --git a/lib/kobject.c b/lib/kobject.c index ea53b30cf4837969326dac3282e1fd364edbd3a9..cd3e1a98eff9e43dac67afc8bfa27172b37b5548 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -874,6 +874,11 @@ int kset_register(struct kset *k) if (!k) return -EINVAL; + if (!k->kobj.ktype) { + pr_err("must have a ktype to be initialized properly!\n"); + return -EINVAL; + } + kset_init(k); err = kobject_add_internal(&k->kobj); if (err) diff --git a/lib/mpi/mpi-cmp.c b/lib/mpi/mpi-cmp.c index c4cfa3ff058189c41eb7b7a5e84bb8e2ec92fa7d..0835b6213235e575492308f321ee48a2374bef48 100644 --- a/lib/mpi/mpi-cmp.c +++ b/lib/mpi/mpi-cmp.c @@ -25,8 +25,12 @@ int mpi_cmp_ui(MPI u, unsigned long v) mpi_limb_t limb = v; mpi_normalize(u); - if (!u->nlimbs && !limb) - return 0; + if (u->nlimbs == 0) { + if (v == 0) + return 0; + else + return -1; + } if (u->sign) return -1; if (u->nlimbs > 1) diff --git a/mm/filemap.c b/mm/filemap.c index 3a983bc1a71c98d50920ddcff72a1075c8428bff..3b0d8c6dd58704823de8485934cba7a6f61eb28b 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2203,6 +2203,9 @@ ssize_t generic_file_buffered_read(struct kiocb *iocb, if (unlikely(*ppos >= inode->i_sb->s_maxbytes)) return 0; + if (unlikely(!iov_iter_count(iter))) + return 0; + iov_iter_truncate(iter, inode->i_sb->s_maxbytes); index = *ppos >> PAGE_SHIFT; diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 4610f3a13966fa4f1379545c8fd1a2cf2f3ec273..f2ef75c7ccc68af658f4d2a18172420d0386b7c1 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -118,7 +118,7 @@ static int deliver_clone(const struct net_bridge_port *prev, skb = skb_clone(skb, GFP_ATOMIC); if (!skb) { - dev->stats.tx_dropped++; + DEV_STATS_INC(dev, tx_dropped); return -ENOMEM; } @@ -255,7 +255,7 @@ static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb, skb = skb_copy(skb, GFP_ATOMIC); if (!skb) { - dev->stats.tx_dropped++; + DEV_STATS_INC(dev, tx_dropped); return; } diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index bf5bf148091f909a088cd243486045393418793a..52dd0708fd143d58ac6ecc82d1225dbc529eaea4 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -145,12 +145,12 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb if ((mdst && mdst->host_joined) || br_multicast_is_router(br)) { local_rcv = true; - br->dev->stats.multicast++; + DEV_STATS_INC(br->dev, multicast); } mcast_hit = true; } else { local_rcv = true; - br->dev->stats.multicast++; + DEV_STATS_INC(br->dev, multicast); } break; case BR_PKT_UNICAST: diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 8335b7e4bcf6f5e12c5b61a53162bb3c1cd314a7..bab14186f9ad5ebbeed226370ea3e22de416e785 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -2001,8 +2001,7 @@ static int size_entry_mwt(const struct ebt_entry *entry, const unsigned char *ba return ret; offsets[0] = sizeof(struct ebt_entry); /* matches come first */ - memcpy(&offsets[1], &entry->watchers_offset, - sizeof(offsets) - sizeof(offsets[0])); + memcpy(&offsets[1], &entry->offsets, sizeof(entry->offsets)); if (state->buf_kern_start) { buf_start = state->buf_kern_start + state->buf_kern_offset; diff --git a/net/core/devlink.c b/net/core/devlink.c index 00c6944ed6342e2b430438b35493d4cb054e8de0..38666dde893409ebe93c36f9a3bb307843c93fcc 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -3620,7 +3620,7 @@ static int devlink_param_get(struct devlink *devlink, const struct devlink_param *param, struct devlink_param_gset_ctx *ctx) { - if (!param->get || devlink->reload_failed) + if (!param->get) return -EOPNOTSUPP; return param->get(devlink, param->id, ctx); } @@ -3629,7 +3629,7 @@ static int devlink_param_set(struct devlink *devlink, const struct devlink_param *param, struct devlink_param_gset_ctx *ctx) { - if (!param->set || devlink->reload_failed) + if (!param->set) return -EOPNOTSUPP; return param->set(devlink, param->id, ctx); } diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 398dc3e47d0c80b76fb76648d7059ba4008c6fa5..f2a0a4e6dd74840d8947c066f432cda739b4ddf2 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -243,13 +243,8 @@ static int dccp_v4_err(struct sk_buff *skb, u32 info) int err; struct net *net = dev_net(skb->dev); - /* For the first __dccp_basic_hdr_len() check, we only need dh->dccph_x, - * which is in byte 7 of the dccp header. - * Our caller (icmp_socket_deliver()) already pulled 8 bytes for us. - * - * Later on, we want to access the sequence number fields, which are - * beyond 8 bytes, so we have to pskb_may_pull() ourselves. - */ + if (!pskb_may_pull(skb, offset + sizeof(*dh))) + return -EINVAL; dh = (struct dccp_hdr *)(skb->data + offset); if (!pskb_may_pull(skb, offset + __dccp_basic_hdr_len(dh))) return -EINVAL; diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index bfe11e96af7c904fa1c3d444749b524561f94bdd..6d6bbd43a1419131893dc6f791b8ba550f585fe7 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -76,13 +76,8 @@ static int dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, __u64 seq; struct net *net = dev_net(skb->dev); - /* For the first __dccp_basic_hdr_len() check, we only need dh->dccph_x, - * which is in byte 7 of the dccp header. - * Our caller (icmpv6_notify()) already pulled 8 bytes for us. - * - * Later on, we want to access the sequence number fields, which are - * beyond 8 bytes, so we have to pskb_may_pull() ourselves. - */ + if (!pskb_may_pull(skb, offset + sizeof(*dh))) + return -EINVAL; dh = (struct dccp_hdr *)(skb->data + offset); if (!pskb_may_pull(skb, offset + __dccp_basic_hdr_len(dh))) return -EINVAL; diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c index e16b98ee6266effae80b0edad5f214898075567a..4b88407347629455cd0ecfc70de7fdb79cf38559 100644 --- a/net/ipv4/netfilter/ipt_REJECT.c +++ b/net/ipv4/netfilter/ipt_REJECT.c @@ -56,7 +56,8 @@ reject_tg(struct sk_buff *skb, const struct xt_action_param *par) nf_send_unreach(skb, ICMP_PKT_FILTERED, hook); break; case IPT_TCP_RESET: - nf_send_reset(xt_net(par), skb, hook); + nf_send_reset(xt_net(par), par->state->sk, skb, hook); + break; case IPT_ICMP_ECHOREPLY: /* Doesn't happen. */ break; diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c index 93b07739807b270854e663b16ac103b159009d72..efe14a6a5d9b8aa4206810b3c2f3f9058ac08910 100644 --- a/net/ipv4/netfilter/nf_reject_ipv4.c +++ b/net/ipv4/netfilter/nf_reject_ipv4.c @@ -112,7 +112,8 @@ static int nf_reject_fill_skb_dst(struct sk_buff *skb_in) } /* Send RST reply */ -void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook) +void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb, + int hook) { struct net_device *br_indev __maybe_unused; struct sk_buff *nskb; @@ -144,8 +145,7 @@ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook) niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP, ip4_dst_hoplimit(skb_dst(nskb))); nf_reject_ip_tcphdr_put(nskb, oldskb, oth); - - if (ip_route_me_harder(net, nskb->sk, nskb, RTN_UNSPEC)) + if (ip_route_me_harder(net, sk, nskb, RTN_UNSPEC)) goto free_nskb; niph = ip_hdr(nskb); diff --git a/net/ipv4/netfilter/nft_reject_ipv4.c b/net/ipv4/netfilter/nft_reject_ipv4.c index e408f813f5d802a2cde2ab33e5a45033ce2ca2f2..55fc23a8f7a70611ff39af92dd1e2586f6f00aeb 100644 --- a/net/ipv4/netfilter/nft_reject_ipv4.c +++ b/net/ipv4/netfilter/nft_reject_ipv4.c @@ -27,7 +27,8 @@ static void nft_reject_ipv4_eval(const struct nft_expr *expr, nf_send_unreach(pkt->skb, priv->icmp_code, nft_hook(pkt)); break; case NFT_REJECT_TCP_RST: - nf_send_reset(nft_net(pkt), pkt->skb, nft_hook(pkt)); + nf_send_reset(nft_net(pkt), nft_sk(pkt), pkt->skb, + nft_hook(pkt)); break; default: break; diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c index 3ac5485049f09b3ba30a19b7296809236b6c18a8..a35019d2e480ce917711fd6a1aa6f2fe899094b5 100644 --- a/net/ipv6/netfilter/ip6t_REJECT.c +++ b/net/ipv6/netfilter/ip6t_REJECT.c @@ -61,7 +61,7 @@ reject_tg6(struct sk_buff *skb, const struct xt_action_param *par) /* Do nothing */ break; case IP6T_TCP_RESET: - nf_send_reset6(net, skb, xt_hooknum(par)); + nf_send_reset6(net, par->state->sk, skb, xt_hooknum(par)); break; case IP6T_ICMP6_POLICY_FAIL: nf_send_unreach6(net, skb, ICMPV6_POLICY_FAIL, xt_hooknum(par)); diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c index bf95513736c927e847b5d663be26dd3c669b3ad6..832d9f9cd10addeff5ef4556429d5beca815abf6 100644 --- a/net/ipv6/netfilter/nf_reject_ipv6.c +++ b/net/ipv6/netfilter/nf_reject_ipv6.c @@ -141,7 +141,8 @@ static int nf_reject6_fill_skb_dst(struct sk_buff *skb_in) return 0; } -void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) +void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb, + int hook) { struct net_device *br_indev __maybe_unused; struct sk_buff *nskb; @@ -233,7 +234,7 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) dev_queue_xmit(nskb); } else #endif - ip6_local_out(net, nskb->sk, nskb); + ip6_local_out(net, sk, nskb); } EXPORT_SYMBOL_GPL(nf_send_reset6); diff --git a/net/ipv6/netfilter/nft_reject_ipv6.c b/net/ipv6/netfilter/nft_reject_ipv6.c index c1098a1968e1e894632c3e6bd9e659e4ffacc717..ed69c768797ec6f9f0a0c582d25908c298cc18da 100644 --- a/net/ipv6/netfilter/nft_reject_ipv6.c +++ b/net/ipv6/netfilter/nft_reject_ipv6.c @@ -28,7 +28,8 @@ static void nft_reject_ipv6_eval(const struct nft_expr *expr, nft_hook(pkt)); break; case NFT_REJECT_TCP_RST: - nf_send_reset6(nft_net(pkt), pkt->skb, nft_hook(pkt)); + nf_send_reset6(nft_net(pkt), nft_sk(pkt), pkt->skb, + nft_hook(pkt)); break; default: break; diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index 55ac0cc12657c47bd6c2ef0bb95830cb83fcd321..26613e3731d027359af424b53874a8e5140b5af3 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -682,6 +682,14 @@ __ip_set_put(struct ip_set *set) /* set->ref can be swapped out by ip_set_swap, netlink events (like dump) need * a separate reference counter */ +static void +__ip_set_get_netlink(struct ip_set *set) +{ + write_lock_bh(&ip_set_ref_lock); + set->ref_netlink++; + write_unlock_bh(&ip_set_ref_lock); +} + static void __ip_set_put_netlink(struct ip_set *set) { @@ -1705,11 +1713,11 @@ call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set, do { if (retried) { - __ip_set_get(set); + __ip_set_get_netlink(set); nfnl_unlock(NFNL_SUBSYS_IPSET); cond_resched(); nfnl_lock(NFNL_SUBSYS_IPSET); - __ip_set_put(set); + __ip_set_put_netlink(set); } ip_set_lock(set); diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 984855f9eea21a7cdfcd607caf1f1951da6dcf27..2bfdca23185e3069a4c6406d352b48205595d417 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -32,7 +32,9 @@ static LIST_HEAD(nf_tables_expressions); static LIST_HEAD(nf_tables_objects); static LIST_HEAD(nf_tables_flowtables); static LIST_HEAD(nf_tables_destroy_list); +static LIST_HEAD(nf_tables_gc_list); static DEFINE_SPINLOCK(nf_tables_destroy_list_lock); +static DEFINE_SPINLOCK(nf_tables_gc_list_lock); static u64 table_handle; enum { @@ -124,6 +126,9 @@ static void nft_validate_state_update(struct net *net, u8 new_validate_state) static void nf_tables_trans_destroy_work(struct work_struct *w); static DECLARE_WORK(trans_destroy_work, nf_tables_trans_destroy_work); +static void nft_trans_gc_work(struct work_struct *work); +static DECLARE_WORK(trans_gc_work, nft_trans_gc_work); + static void nft_ctx_init(struct nft_ctx *ctx, struct net *net, const struct sk_buff *skb, @@ -298,12 +303,18 @@ static int nft_netdev_register_hooks(struct net *net, } static void nft_netdev_unregister_hooks(struct net *net, - struct list_head *hook_list) + struct list_head *hook_list, + bool release_netdev) { - struct nft_hook *hook; + struct nft_hook *hook, *next; - list_for_each_entry(hook, hook_list, list) + list_for_each_entry_safe(hook, next, hook_list, list) { nf_unregister_net_hook(net, &hook->ops); + if (release_netdev) { + list_del(&hook->list); + kfree_rcu(hook, rcu); + } + } } static int nf_tables_register_hook(struct net *net, @@ -329,9 +340,10 @@ static int nf_tables_register_hook(struct net *net, return nf_register_net_hook(net, &basechain->ops); } -static void nf_tables_unregister_hook(struct net *net, - const struct nft_table *table, - struct nft_chain *chain) +static void __nf_tables_unregister_hook(struct net *net, + const struct nft_table *table, + struct nft_chain *chain, + bool release_netdev) { struct nft_base_chain *basechain; const struct nf_hook_ops *ops; @@ -346,11 +358,19 @@ static void nf_tables_unregister_hook(struct net *net, return basechain->type->ops_unregister(net, ops); if (nft_base_chain_netdev(table->family, basechain->ops.hooknum)) - nft_netdev_unregister_hooks(net, &basechain->hook_list); + nft_netdev_unregister_hooks(net, &basechain->hook_list, + release_netdev); else nf_unregister_net_hook(net, &basechain->ops); } +static void nf_tables_unregister_hook(struct net *net, + const struct nft_table *table, + struct nft_chain *chain) +{ + return __nf_tables_unregister_hook(net, table, chain, false); +} + static void nft_trans_commit_list_add_tail(struct net *net, struct nft_trans *trans) { struct nftables_pernet *nft_net; @@ -559,10 +579,6 @@ static int nft_trans_set_add(const struct nft_ctx *ctx, int msg_type, return 0; } -static void nft_setelem_data_deactivate(const struct net *net, - const struct nft_set *set, - struct nft_set_elem *elem); - static int nft_mapelem_deactivate(const struct nft_ctx *ctx, struct nft_set *set, const struct nft_set_iter *iter, @@ -1293,8 +1309,7 @@ static int nft_flush_table(struct nft_ctx *ctx) if (!nft_is_active_next(ctx->net, set)) continue; - if (nft_set_is_anonymous(set) && - !list_empty(&set->bindings)) + if (nft_set_is_anonymous(set)) continue; err = nft_delset(ctx, set); @@ -4501,6 +4516,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk, } INIT_LIST_HEAD(&set->bindings); + refcount_set(&set->refs, 1); set->table = table; write_pnet(&set->net, net); set->ops = ops; @@ -4536,6 +4552,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk, } set->handle = nf_tables_alloc_handle(table); + INIT_LIST_HEAD(&set->pending_update); err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set); if (err < 0) @@ -4560,6 +4577,14 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk, return err; } +static void nft_set_put(struct nft_set *set) +{ + if (refcount_dec_and_test(&set->refs)) { + kfree(set->name); + kvfree(set); + } +} + static void nft_set_destroy(const struct nft_ctx *ctx, struct nft_set *set) { if (WARN_ON(set->use > 0)) @@ -4569,8 +4594,7 @@ static void nft_set_destroy(const struct nft_ctx *ctx, struct nft_set *set) nft_expr_destroy(ctx, set->expr); set->ops->destroy(ctx, set); - kfree(set->name); - kvfree(set); + nft_set_put(set); } static int nf_tables_delset(struct net *net, struct sock *nlsk, @@ -4955,8 +4979,12 @@ static int nf_tables_dump_setelem(const struct nft_ctx *ctx, const struct nft_set_iter *iter, struct nft_set_elem *elem) { + const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv); struct nft_set_dump_args *args; + if (nft_set_elem_expired(ext)) + return 0; + args = container_of(iter, struct nft_set_dump_args, iter); return nf_tables_fill_setelem(args->skb, set, elem); } @@ -5650,7 +5678,8 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, goto err_elem_expr; } - ext->genmask = nft_genmask_cur(ctx->net) | NFT_SET_ELEM_BUSY_MASK; + ext->genmask = nft_genmask_cur(ctx->net); + err = set->ops->insert(ctx->net, set, &elem, &ext2); if (err) { if (err == -EEXIST) { @@ -5790,9 +5819,9 @@ static void nft_setelem_data_activate(const struct net *net, nft_use_inc_restore(&(*nft_set_ext_obj(ext))->use); } -static void nft_setelem_data_deactivate(const struct net *net, - const struct nft_set *set, - struct nft_set_elem *elem) +void nft_setelem_data_deactivate(const struct net *net, + const struct nft_set *set, + struct nft_set_elem *elem) { const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv); @@ -5934,8 +5963,10 @@ static int nf_tables_delsetelem(struct net *net, struct sock *nlsk, if (IS_ERR(set)) return PTR_ERR(set); - if (!list_empty(&set->bindings) && - (set->flags & (NFT_SET_CONSTANT | NFT_SET_ANONYMOUS))) + if (nft_set_is_anonymous(set)) + return -EOPNOTSUPP; + + if (!list_empty(&set->bindings) && (set->flags & NFT_SET_CONSTANT)) return -EBUSY; if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) { @@ -5958,29 +5989,6 @@ static int nf_tables_delsetelem(struct net *net, struct sock *nlsk, return err; } -void nft_set_gc_batch_release(struct rcu_head *rcu) -{ - struct nft_set_gc_batch *gcb; - unsigned int i; - - gcb = container_of(rcu, struct nft_set_gc_batch, head.rcu); - for (i = 0; i < gcb->head.cnt; i++) - nft_set_elem_destroy(gcb->head.set, gcb->elems[i], true); - kfree(gcb); -} - -struct nft_set_gc_batch *nft_set_gc_batch_alloc(const struct nft_set *set, - gfp_t gfp) -{ - struct nft_set_gc_batch *gcb; - - gcb = kzalloc(sizeof(*gcb), gfp); - if (gcb == NULL) - return gcb; - gcb->head.set = set; - return gcb; -} - /* * Stateful objects */ @@ -6856,13 +6864,25 @@ static void nft_unregister_flowtable_hook(struct net *net, FLOW_BLOCK_UNBIND); } -static void nft_unregister_flowtable_net_hooks(struct net *net, - struct list_head *hook_list) +static void __nft_unregister_flowtable_net_hooks(struct net *net, + struct list_head *hook_list, + bool release_netdev) { - struct nft_hook *hook; + struct nft_hook *hook, *next; - list_for_each_entry(hook, hook_list, list) + list_for_each_entry_safe(hook, next, hook_list, list) { nf_unregister_net_hook(net, &hook->ops); + if (release_netdev) { + list_del(&hook->list); + kfree_rcu(hook, rcu); + } + } +} + +static void nft_unregister_flowtable_net_hooks(struct net *net, + struct list_head *hook_list) +{ + __nft_unregister_flowtable_net_hooks(net, hook_list, false); } static int nft_register_flowtable_net_hooks(struct net *net, @@ -8024,6 +8044,190 @@ void nft_chain_del(struct nft_chain *chain) list_del_rcu(&chain->list); } +static void nft_trans_gc_setelem_remove(struct nft_ctx *ctx, + struct nft_trans_gc *trans) +{ + void **priv = trans->priv; + unsigned int i; + + for (i = 0; i < trans->count; i++) { + struct nft_set_elem elem = { + .priv = priv[i], + }; + + nft_setelem_data_deactivate(ctx->net, trans->set, &elem); + trans->set->ops->remove(trans->net, trans->set, &elem); + } +} + +void nft_trans_gc_destroy(struct nft_trans_gc *trans) +{ + nft_set_put(trans->set); + put_net(trans->net); + kfree(trans); +} + +static void nft_trans_gc_trans_free(struct rcu_head *rcu) +{ + struct nft_set_elem elem = {}; + struct nft_trans_gc *trans; + struct nft_ctx ctx = {}; + unsigned int i; + + trans = container_of(rcu, struct nft_trans_gc, rcu); + ctx.net = read_pnet(&trans->set->net); + + for (i = 0; i < trans->count; i++) { + elem.priv = trans->priv[i]; + atomic_dec(&trans->set->nelems); + + nf_tables_set_elem_destroy(&ctx, trans->set, elem.priv); + } + + nft_trans_gc_destroy(trans); +} + +static bool nft_trans_gc_work_done(struct nft_trans_gc *trans) +{ + struct nftables_pernet *nft_net; + struct nft_ctx ctx = {}; + + nft_net = net_generic(trans->net, nf_tables_net_id); + + mutex_lock(&nft_net->commit_mutex); + + /* Check for race with transaction, otherwise this batch refers to + * stale objects that might not be there anymore. Skip transaction if + * set has been destroyed from control plane transaction in case gc + * worker loses race. + */ + if (READ_ONCE(nft_net->gc_seq) != trans->seq || trans->set->dead) { + mutex_unlock(&nft_net->commit_mutex); + return false; + } + + ctx.net = trans->net; + ctx.table = trans->set->table; + + nft_trans_gc_setelem_remove(&ctx, trans); + mutex_unlock(&nft_net->commit_mutex); + + return true; +} + +static void nft_trans_gc_work(struct work_struct *work) +{ + struct nft_trans_gc *trans, *next; + LIST_HEAD(trans_gc_list); + + spin_lock(&nf_tables_gc_list_lock); + list_splice_init(&nf_tables_gc_list, &trans_gc_list); + spin_unlock(&nf_tables_gc_list_lock); + + list_for_each_entry_safe(trans, next, &trans_gc_list, list) { + list_del(&trans->list); + if (!nft_trans_gc_work_done(trans)) { + nft_trans_gc_destroy(trans); + continue; + } + call_rcu(&trans->rcu, nft_trans_gc_trans_free); + } +} + +struct nft_trans_gc *nft_trans_gc_alloc(struct nft_set *set, + unsigned int gc_seq, gfp_t gfp) +{ + struct net *net = read_pnet(&set->net); + struct nft_trans_gc *trans; + + trans = kzalloc(sizeof(*trans), gfp); + if (!trans) + return NULL; + + trans->net = maybe_get_net(net); + if (!trans->net) { + kfree(trans); + return NULL; + } + + refcount_inc(&set->refs); + trans->set = set; + trans->seq = gc_seq; + + return trans; +} + +void nft_trans_gc_elem_add(struct nft_trans_gc *trans, void *priv) +{ + trans->priv[trans->count++] = priv; +} + +static void nft_trans_gc_queue_work(struct nft_trans_gc *trans) +{ + spin_lock(&nf_tables_gc_list_lock); + list_add_tail(&trans->list, &nf_tables_gc_list); + spin_unlock(&nf_tables_gc_list_lock); + + schedule_work(&trans_gc_work); +} + +static int nft_trans_gc_space(struct nft_trans_gc *trans) +{ + return NFT_TRANS_GC_BATCHCOUNT - trans->count; +} + +struct nft_trans_gc *nft_trans_gc_queue_async(struct nft_trans_gc *gc, + unsigned int gc_seq, gfp_t gfp) +{ + struct nft_set *set; + + if (nft_trans_gc_space(gc)) + return gc; + + set = gc->set; + nft_trans_gc_queue_work(gc); + + return nft_trans_gc_alloc(set, gc_seq, gfp); +} + +void nft_trans_gc_queue_async_done(struct nft_trans_gc *trans) +{ + if (trans->count == 0) { + nft_trans_gc_destroy(trans); + return; + } + + nft_trans_gc_queue_work(trans); +} + +struct nft_trans_gc *nft_trans_gc_queue_sync(struct nft_trans_gc *gc, gfp_t gfp) +{ + struct nft_set *set; + + if (WARN_ON_ONCE(!lockdep_commit_lock_is_held(gc->net))) + return NULL; + + if (nft_trans_gc_space(gc)) + return gc; + + set = gc->set; + call_rcu(&gc->rcu, nft_trans_gc_trans_free); + + return nft_trans_gc_alloc(set, 0, gfp); +} + +void nft_trans_gc_queue_sync_done(struct nft_trans_gc *trans) +{ + WARN_ON_ONCE(!lockdep_commit_lock_is_held(trans->net)); + + if (trans->count == 0) { + nft_trans_gc_destroy(trans); + return; + } + + call_rcu(&trans->rcu, nft_trans_gc_trans_free); +} + static void nf_tables_module_autoload_cleanup(struct net *net) { struct nftables_pernet *nft_net = net_generic(net, nf_tables_net_id); @@ -8168,13 +8372,45 @@ static void nf_tables_commit_audit_log(struct list_head *adl, u32 generation) } } +static void nft_set_commit_update(struct list_head *set_update_list) +{ + struct nft_set *set, *next; + + list_for_each_entry_safe(set, next, set_update_list, pending_update) { + list_del_init(&set->pending_update); + + if (!set->ops->commit) + continue; + + set->ops->commit(set); + } +} + +static unsigned int nft_gc_seq_begin(struct nftables_pernet *nft_net) +{ + unsigned int gc_seq; + + /* Bump gc counter, it becomes odd, this is the busy mark. */ + gc_seq = READ_ONCE(nft_net->gc_seq); + WRITE_ONCE(nft_net->gc_seq, ++gc_seq); + + return gc_seq; +} + +static void nft_gc_seq_end(struct nftables_pernet *nft_net, unsigned int gc_seq) +{ + WRITE_ONCE(nft_net->gc_seq, ++gc_seq); +} + static int nf_tables_commit(struct net *net, struct sk_buff *skb) { struct nftables_pernet *nft_net = net_generic(net, nf_tables_net_id); struct nft_trans *trans, *next; + LIST_HEAD(set_update_list); struct nft_trans_elem *te; struct nft_chain *chain; struct nft_table *table; + unsigned int gc_seq; LIST_HEAD(adl); int err; @@ -8247,6 +8483,8 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) while (++nft_net->base_seq == 0) ; + gc_seq = nft_gc_seq_begin(nft_net); + /* step 3. Start new generation, rules_gen_X now in use. */ net->nft.gencursor = nft_gencursor_next(net); @@ -8326,6 +8564,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) nft_trans_destroy(trans); break; case NFT_MSG_DELSET: + nft_trans_set(trans)->dead = 1; list_del_rcu(&nft_trans_set(trans)->list); nf_tables_set_notify(&trans->ctx, nft_trans_set(trans), NFT_MSG_DELSET, GFP_KERNEL); @@ -8337,6 +8576,11 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) nf_tables_setelem_notify(&trans->ctx, te->set, &te->elem, NFT_MSG_NEWSETELEM, 0); + if (te->set->ops->commit && + list_empty(&te->set->pending_update)) { + list_add_tail(&te->set->pending_update, + &set_update_list); + } nft_trans_destroy(trans); break; case NFT_MSG_DELSETELEM: @@ -8348,6 +8592,11 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) te->set->ops->remove(net, te->set, &te->elem); atomic_dec(&te->set->nelems); te->set->ndeact--; + if (te->set->ops->commit && + list_empty(&te->set->pending_update)) { + list_add_tail(&te->set->pending_update, + &set_update_list); + } break; case NFT_MSG_NEWOBJ: if (nft_trans_obj_update(trans)) { @@ -8408,9 +8657,13 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) } } + nft_set_commit_update(&set_update_list); + nft_commit_notify(net, NETLINK_CB(skb).portid); nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN); nf_tables_commit_audit_log(&adl, nft_net->base_seq); + + nft_gc_seq_end(nft_net, gc_seq); nf_tables_commit_release(net); return 0; @@ -8464,10 +8717,25 @@ static void nf_tables_abort_release(struct nft_trans *trans) kfree(trans); } +static void nft_set_abort_update(struct list_head *set_update_list) +{ + struct nft_set *set, *next; + + list_for_each_entry_safe(set, next, set_update_list, pending_update) { + list_del_init(&set->pending_update); + + if (!set->ops->abort) + continue; + + set->ops->abort(set); + } +} + static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) { struct nftables_pernet *nft_net = net_generic(net, nf_tables_net_id); struct nft_trans *trans, *next; + LIST_HEAD(set_update_list); struct nft_trans_elem *te; if (action == NFNL_ABORT_VALIDATE && @@ -8556,6 +8824,12 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) te = (struct nft_trans_elem *)trans->data; te->set->ops->remove(net, te->set, &te->elem); atomic_dec(&te->set->nelems); + + if (te->set->ops->abort && + list_empty(&te->set->pending_update)) { + list_add_tail(&te->set->pending_update, + &set_update_list); + } break; case NFT_MSG_DELSETELEM: te = (struct nft_trans_elem *)trans->data; @@ -8564,6 +8838,11 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) te->set->ops->activate(net, te->set, &te->elem); te->set->ndeact--; + if (te->set->ops->abort && + list_empty(&te->set->pending_update)) { + list_add_tail(&te->set->pending_update, + &set_update_list); + } nft_trans_destroy(trans); break; case NFT_MSG_NEWOBJ: @@ -8604,6 +8883,8 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) } } + nft_set_abort_update(&set_update_list); + synchronize_rcu(); list_for_each_entry_safe_reverse(trans, next, @@ -8624,7 +8905,12 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb, enum nfnl_abort_action action) { struct nftables_pernet *nft_net = net_generic(net, nf_tables_net_id); - int ret = __nf_tables_abort(net, action); + unsigned int gc_seq; + int ret; + + gc_seq = nft_gc_seq_begin(nft_net); + ret = __nf_tables_abort(net, action); + nft_gc_seq_end(nft_net, gc_seq); mutex_unlock(&nft_net->commit_mutex); @@ -9230,16 +9516,25 @@ int __nft_release_basechain(struct nft_ctx *ctx) } EXPORT_SYMBOL_GPL(__nft_release_basechain); +static void __nft_release_hook(struct net *net, struct nft_table *table) +{ + struct nft_flowtable *flowtable; + struct nft_chain *chain; + + list_for_each_entry(chain, &table->chains, list) + __nf_tables_unregister_hook(net, table, chain, true); + list_for_each_entry(flowtable, &table->flowtables, list) + __nft_unregister_flowtable_net_hooks(net, &flowtable->hook_list, + true); +} + static void __nft_release_hooks(struct net *net) { struct nftables_pernet *nft_net = net_generic(net, nf_tables_net_id); struct nft_table *table; - struct nft_chain *chain; - list_for_each_entry(table, &nft_net->tables, list) { - list_for_each_entry(chain, &table->chains, list) - nf_tables_unregister_hook(net, table, chain); - } + list_for_each_entry(table, &nft_net->tables, list) + __nft_release_hook(net, table); } static void __nft_release_table(struct net *net, struct nft_table *table) @@ -9316,6 +9611,7 @@ static int __net_init nf_tables_init_net(struct net *net) mutex_init(&nft_net->commit_mutex); nft_net->base_seq = 1; nft_net->validate_state = NFT_VALIDATE_SKIP; + nft_net->gc_seq = 0; return 0; } @@ -9332,21 +9628,34 @@ static void __net_exit nf_tables_pre_exit_net(struct net *net) static void __net_exit nf_tables_exit_net(struct net *net) { struct nftables_pernet *nft_net = net_generic(net, nf_tables_net_id); + unsigned int gc_seq; mutex_lock(&nft_net->commit_mutex); + + gc_seq = nft_gc_seq_begin(nft_net); + if (!list_empty(&nft_net->commit_list)) __nf_tables_abort(net, NFNL_ABORT_NONE); __nft_release_tables(net); + + nft_gc_seq_end(nft_net, gc_seq); + mutex_unlock(&nft_net->commit_mutex); WARN_ON_ONCE(!list_empty(&nft_net->tables)); WARN_ON_ONCE(!list_empty(&nft_net->module_list)); WARN_ON_ONCE(!list_empty(&nft_net->notify_list)); } +static void nf_tables_exit_batch(struct list_head *net_exit_list) +{ + flush_work(&trans_gc_work); +} + static struct pernet_operations nf_tables_net_ops = { .init = nf_tables_init_net, .pre_exit = nf_tables_pre_exit_net, .exit = nf_tables_exit_net, + .exit_batch = nf_tables_exit_batch, .id = &nf_tables_net_id, .size = sizeof(struct nftables_pernet), }; @@ -9411,6 +9720,7 @@ static void __exit nf_tables_module_exit(void) nft_chain_filter_fini(); nft_chain_route_fini(); unregister_pernet_subsys(&nf_tables_net_ops); + cancel_work_sync(&trans_gc_work); cancel_work_sync(&trans_destroy_work); rcu_barrier(); rhltable_destroy(&nft_objname_ht); diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c index 9dc18429ed875e3164b3a219cd9634adb0b6e217..b0d711d498c66cd048b2aec102b91012d81d8d85 100644 --- a/net/netfilter/nf_tables_core.c +++ b/net/netfilter/nf_tables_core.c @@ -125,7 +125,7 @@ static bool nft_payload_fast_eval(const struct nft_expr *expr, else { if (!pkt->tprot_set) return false; - ptr = skb_network_header(skb) + pkt->xt.thoff; + ptr = skb_network_header(skb) + nft_thoff(pkt); } ptr += priv->offset; diff --git a/net/netfilter/nf_tables_trace.c b/net/netfilter/nf_tables_trace.c index 0cf3278007ba50d3cf306f4e0dce1a00ac995444..e4fe2f0780eb6fc284f702334ab5be47f24f0033 100644 --- a/net/netfilter/nf_tables_trace.c +++ b/net/netfilter/nf_tables_trace.c @@ -113,17 +113,17 @@ static int nf_trace_fill_pkt_info(struct sk_buff *nlskb, int off = skb_network_offset(skb); unsigned int len, nh_end; - nh_end = pkt->tprot_set ? pkt->xt.thoff : skb->len; + nh_end = pkt->tprot_set ? nft_thoff(pkt) : skb->len; len = min_t(unsigned int, nh_end - skb_network_offset(skb), NFT_TRACETYPE_NETWORK_HSIZE); if (trace_fill_header(nlskb, NFTA_TRACE_NETWORK_HEADER, skb, off, len)) return -1; if (pkt->tprot_set) { - len = min_t(unsigned int, skb->len - pkt->xt.thoff, + len = min_t(unsigned int, skb->len - nft_thoff(pkt), NFT_TRACETYPE_TRANSPORT_HSIZE); if (trace_fill_header(nlskb, NFTA_TRACE_TRANSPORT_HEADER, skb, - pkt->xt.thoff, len)) + nft_thoff(pkt), len)) return -1; } diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c index ca268293cfa12248c3f90ee0ccfbe424520e6bee..2431fed183fff280a0fec4aee4b3d703ef87d651 100644 --- a/net/netfilter/nft_exthdr.c +++ b/net/netfilter/nft_exthdr.c @@ -10,8 +10,10 @@ #include #include #include +#include #include #include +#include #include struct nft_exthdr { @@ -174,7 +176,7 @@ nft_tcp_header_pointer(const struct nft_pktinfo *pkt, if (!pkt->tprot_set || pkt->tprot != IPPROTO_TCP) return NULL; - tcph = skb_header_pointer(pkt->skb, pkt->xt.thoff, sizeof(*tcph), buffer); + tcph = skb_header_pointer(pkt->skb, nft_thoff(pkt), sizeof(*tcph), buffer); if (!tcph) return NULL; @@ -182,7 +184,7 @@ nft_tcp_header_pointer(const struct nft_pktinfo *pkt, if (*tcphdr_len < sizeof(*tcph) || *tcphdr_len > len) return NULL; - return skb_header_pointer(pkt->skb, pkt->xt.thoff, *tcphdr_len, buffer); + return skb_header_pointer(pkt->skb, nft_thoff(pkt), *tcphdr_len, buffer); } static void nft_exthdr_tcp_eval(const struct nft_expr *expr, @@ -241,9 +243,14 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr, tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len); if (!tcph) - return; + goto err; + + if (skb_ensure_writable(pkt->skb, nft_thoff(pkt) + tcphdr_len)) + goto err; + tcph = (struct tcphdr *)(pkt->skb->data + nft_thoff(pkt)); opt = (u8 *)tcph; + for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) { union { __be16 v16; @@ -256,16 +263,7 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr, continue; if (i + optl > tcphdr_len || priv->len + priv->offset > optl) - return; - - if (skb_ensure_writable(pkt->skb, - pkt->xt.thoff + i + priv->len)) - return; - - tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, - &tcphdr_len); - if (!tcph) - return; + goto err; offset = i + priv->offset; @@ -308,6 +306,103 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr, return; } + return; +err: + regs->verdict.code = NFT_BREAK; +} + +static void nft_exthdr_tcp_strip_eval(const struct nft_expr *expr, + struct nft_regs *regs, + const struct nft_pktinfo *pkt) +{ + u8 buff[sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE]; + struct nft_exthdr *priv = nft_expr_priv(expr); + unsigned int i, tcphdr_len, optl; + struct tcphdr *tcph; + u8 *opt; + + tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len); + if (!tcph) + goto err; + + if (skb_ensure_writable(pkt->skb, nft_thoff(pkt) + tcphdr_len)) + goto drop; + + tcph = (struct tcphdr *)(pkt->skb->data + nft_thoff(pkt)); + opt = (u8 *)tcph; + + for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) { + unsigned int j; + + optl = optlen(opt, i); + if (priv->type != opt[i]) + continue; + + if (i + optl > tcphdr_len) + goto drop; + + for (j = 0; j < optl; ++j) { + u16 n = TCPOPT_NOP; + u16 o = opt[i+j]; + + if ((i + j) % 2 == 0) { + o <<= 8; + n <<= 8; + } + inet_proto_csum_replace2(&tcph->check, pkt->skb, htons(o), + htons(n), false); + } + memset(opt + i, TCPOPT_NOP, optl); + return; + } + + /* option not found, continue. This allows to do multiple + * option removals per rule. + */ + return; +err: + regs->verdict.code = NFT_BREAK; + return; +drop: + /* can't remove, no choice but to drop */ + regs->verdict.code = NF_DROP; +} + +static void nft_exthdr_sctp_eval(const struct nft_expr *expr, + struct nft_regs *regs, + const struct nft_pktinfo *pkt) +{ + unsigned int offset = nft_thoff(pkt) + sizeof(struct sctphdr); + struct nft_exthdr *priv = nft_expr_priv(expr); + u32 *dest = ®s->data[priv->dreg]; + const struct sctp_chunkhdr *sch; + struct sctp_chunkhdr _sch; + + do { + sch = skb_header_pointer(pkt->skb, offset, sizeof(_sch), &_sch); + if (!sch || !sch->length) + break; + + if (sch->type == priv->type) { + if (priv->flags & NFT_EXTHDR_F_PRESENT) { + nft_reg_store8(dest, true); + return; + } + if (priv->offset + priv->len > ntohs(sch->length) || + offset + ntohs(sch->length) > pkt->skb->len) + break; + + dest[priv->len / NFT_REG32_SIZE] = 0; + memcpy(dest, (char *)sch + priv->offset, priv->len); + return; + } + offset += SCTP_PAD4(ntohs(sch->length)); + } while (offset < pkt->skb->len); + + if (priv->flags & NFT_EXTHDR_F_PRESENT) + nft_reg_store8(dest, false); + else + regs->verdict.code = NFT_BREAK; } static const struct nla_policy nft_exthdr_policy[NFTA_EXTHDR_MAX + 1] = { @@ -417,6 +512,28 @@ static int nft_exthdr_tcp_set_init(const struct nft_ctx *ctx, priv->len); } +static int nft_exthdr_tcp_strip_init(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nlattr * const tb[]) +{ + struct nft_exthdr *priv = nft_expr_priv(expr); + + if (tb[NFTA_EXTHDR_SREG] || + tb[NFTA_EXTHDR_DREG] || + tb[NFTA_EXTHDR_FLAGS] || + tb[NFTA_EXTHDR_OFFSET] || + tb[NFTA_EXTHDR_LEN]) + return -EINVAL; + + if (!tb[NFTA_EXTHDR_TYPE]) + return -EINVAL; + + priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]); + priv->op = NFT_EXTHDR_OP_TCPOPT; + + return 0; +} + static int nft_exthdr_ipv4_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) @@ -477,6 +594,13 @@ static int nft_exthdr_dump_set(struct sk_buff *skb, const struct nft_expr *expr) return nft_exthdr_dump_common(skb, priv); } +static int nft_exthdr_dump_strip(struct sk_buff *skb, const struct nft_expr *expr) +{ + const struct nft_exthdr *priv = nft_expr_priv(expr); + + return nft_exthdr_dump_common(skb, priv); +} + static const struct nft_expr_ops nft_exthdr_ipv6_ops = { .type = &nft_exthdr_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)), @@ -509,6 +633,22 @@ static const struct nft_expr_ops nft_exthdr_tcp_set_ops = { .dump = nft_exthdr_dump_set, }; +static const struct nft_expr_ops nft_exthdr_tcp_strip_ops = { + .type = &nft_exthdr_type, + .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)), + .eval = nft_exthdr_tcp_strip_eval, + .init = nft_exthdr_tcp_strip_init, + .dump = nft_exthdr_dump_strip, +}; + +static const struct nft_expr_ops nft_exthdr_sctp_ops = { + .type = &nft_exthdr_type, + .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)), + .eval = nft_exthdr_sctp_eval, + .init = nft_exthdr_init, + .dump = nft_exthdr_dump, +}; + static const struct nft_expr_ops * nft_exthdr_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[]) @@ -528,7 +668,7 @@ nft_exthdr_select_ops(const struct nft_ctx *ctx, return &nft_exthdr_tcp_set_ops; if (tb[NFTA_EXTHDR_DREG]) return &nft_exthdr_tcp_ops; - break; + return &nft_exthdr_tcp_strip_ops; case NFT_EXTHDR_OP_IPV6: if (tb[NFTA_EXTHDR_DREG]) return &nft_exthdr_ipv6_ops; @@ -539,6 +679,10 @@ nft_exthdr_select_ops(const struct nft_ctx *ctx, return &nft_exthdr_ipv4_ops; } break; + case NFT_EXTHDR_OP_SCTP: + if (tb[NFTA_EXTHDR_DREG]) + return &nft_exthdr_sctp_ops; + break; } return ERR_PTR(-EOPNOTSUPP); diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c index d868eade601763432e97bc6d7f918d2ccbedca83..a44340dd3ce649aedc600d9cbfd3e254460b7546 100644 --- a/net/netfilter/nft_flow_offload.c +++ b/net/netfilter/nft_flow_offload.c @@ -90,7 +90,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr, switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) { case IPPROTO_TCP: - tcph = skb_header_pointer(pkt->skb, pkt->xt.thoff, + tcph = skb_header_pointer(pkt->skb, nft_thoff(pkt), sizeof(_tcph), &_tcph); if (unlikely(!tcph || tcph->fin || tcph->rst)) goto out; diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c index 74c220eeec1a87daccf9eb21595da899eb48ee5d..b2b63c3653d495b2a4c022fd132c1a068cea6786 100644 --- a/net/netfilter/nft_payload.c +++ b/net/netfilter/nft_payload.c @@ -110,7 +110,7 @@ void nft_payload_eval(const struct nft_expr *expr, case NFT_PAYLOAD_TRANSPORT_HEADER: if (!pkt->tprot_set) goto err; - offset = pkt->xt.thoff; + offset = nft_thoff(pkt); break; default: BUG(); @@ -510,7 +510,7 @@ static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt, *l4csum_offset = offsetof(struct tcphdr, check); break; case IPPROTO_UDP: - if (!nft_payload_udp_checksum(skb, pkt->xt.thoff)) + if (!nft_payload_udp_checksum(skb, nft_thoff(pkt))) return -1; fallthrough; case IPPROTO_UDPLITE: @@ -523,7 +523,7 @@ static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt, return -1; } - *l4csum_offset += pkt->xt.thoff; + *l4csum_offset += nft_thoff(pkt); return 0; } @@ -615,7 +615,7 @@ static void nft_payload_set_eval(const struct nft_expr *expr, case NFT_PAYLOAD_TRANSPORT_HEADER: if (!pkt->tprot_set) goto err; - offset = pkt->xt.thoff; + offset = nft_thoff(pkt); break; default: BUG(); @@ -646,7 +646,7 @@ static void nft_payload_set_eval(const struct nft_expr *expr, if (priv->csum_type == NFT_PAYLOAD_CSUM_SCTP && pkt->tprot == IPPROTO_SCTP && skb->ip_summed != CHECKSUM_PARTIAL) { - if (nft_payload_csum_sctp(skb, pkt->xt.thoff)) + if (nft_payload_csum_sctp(skb, nft_thoff(pkt))) goto err; } diff --git a/net/netfilter/nft_reject_inet.c b/net/netfilter/nft_reject_inet.c index cf8f2646e93c6e0fbe4ca8e8df66c8b90c40dad1..c00b94a1668245e13e6ceb72a16f86bf2c11b17c 100644 --- a/net/netfilter/nft_reject_inet.c +++ b/net/netfilter/nft_reject_inet.c @@ -28,7 +28,8 @@ static void nft_reject_inet_eval(const struct nft_expr *expr, nft_hook(pkt)); break; case NFT_REJECT_TCP_RST: - nf_send_reset(nft_net(pkt), pkt->skb, nft_hook(pkt)); + nf_send_reset(nft_net(pkt), nft_sk(pkt), + pkt->skb, nft_hook(pkt)); break; case NFT_REJECT_ICMPX_UNREACH: nf_send_unreach(pkt->skb, @@ -44,7 +45,8 @@ static void nft_reject_inet_eval(const struct nft_expr *expr, priv->icmp_code, nft_hook(pkt)); break; case NFT_REJECT_TCP_RST: - nf_send_reset6(nft_net(pkt), pkt->skb, nft_hook(pkt)); + nf_send_reset6(nft_net(pkt), nft_sk(pkt), + pkt->skb, nft_hook(pkt)); break; case NFT_REJECT_ICMPX_UNREACH: nf_send_unreach6(nft_net(pkt), pkt->skb, diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c index 51d3e6f0934a9361c8ff32a4a29a4e1f4d9ca014..f0a9ad1c4ea44230198dee53f61ce430461743d7 100644 --- a/net/netfilter/nft_set_hash.c +++ b/net/netfilter/nft_set_hash.c @@ -17,6 +17,9 @@ #include #include #include +#include + +extern unsigned int nf_tables_net_id; /* We target a hash table size of 4, element hint is 75% of final size */ #define NFT_RHASH_ELEMENT_HINT 3 @@ -59,6 +62,8 @@ static inline int nft_rhash_cmp(struct rhashtable_compare_arg *arg, if (memcmp(nft_set_ext_key(&he->ext), x->key, x->set->klen)) return 1; + if (nft_set_elem_is_dead(&he->ext)) + return 1; if (nft_set_elem_expired(&he->ext)) return 1; if (!nft_set_elem_active(&he->ext, x->genmask)) @@ -187,7 +192,6 @@ static void nft_rhash_activate(const struct net *net, const struct nft_set *set, struct nft_rhash_elem *he = elem->priv; nft_set_elem_change_active(net, set, &he->ext); - nft_set_elem_clear_busy(&he->ext); } static bool nft_rhash_flush(const struct net *net, @@ -195,12 +199,9 @@ static bool nft_rhash_flush(const struct net *net, { struct nft_rhash_elem *he = priv; - if (!nft_set_elem_mark_busy(&he->ext) || - !nft_is_active(net, &he->ext)) { - nft_set_elem_change_active(net, set, &he->ext); - return true; - } - return false; + nft_set_elem_change_active(net, set, &he->ext); + + return true; } static void *nft_rhash_deactivate(const struct net *net, @@ -217,9 +218,8 @@ static void *nft_rhash_deactivate(const struct net *net, rcu_read_lock(); he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params); - if (he != NULL && - !nft_rhash_flush(net, set, he)) - he = NULL; + if (he) + nft_set_elem_change_active(net, set, &he->ext); rcu_read_unlock(); @@ -251,7 +251,9 @@ static bool nft_rhash_delete(const struct nft_set *set, if (he == NULL) return false; - return rhashtable_remove_fast(&priv->ht, &he->node, nft_rhash_params) == 0; + nft_set_elem_dead(&he->ext); + + return true; } static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set, @@ -277,8 +279,6 @@ static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set, if (iter->count < iter->skip) goto cont; - if (nft_set_elem_expired(&he->ext)) - goto cont; if (!nft_set_elem_active(&he->ext, iter->genmask)) goto cont; @@ -297,49 +297,75 @@ static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set, static void nft_rhash_gc(struct work_struct *work) { + struct nftables_pernet *nft_net; struct nft_set *set; struct nft_rhash_elem *he; struct nft_rhash *priv; - struct nft_set_gc_batch *gcb = NULL; struct rhashtable_iter hti; + struct nft_trans_gc *gc; + struct net *net; + u32 gc_seq; priv = container_of(work, struct nft_rhash, gc_work.work); set = nft_set_container_of(priv); + net = read_pnet(&set->net); + nft_net = net_generic(net, nf_tables_net_id); + gc_seq = READ_ONCE(nft_net->gc_seq); + + if (nft_set_gc_is_pending(set)) + goto done; + + gc = nft_trans_gc_alloc(set, gc_seq, GFP_KERNEL); + if (!gc) + goto done; rhashtable_walk_enter(&priv->ht, &hti); rhashtable_walk_start(&hti); while ((he = rhashtable_walk_next(&hti))) { if (IS_ERR(he)) { - if (PTR_ERR(he) != -EAGAIN) - break; - continue; + nft_trans_gc_destroy(gc); + gc = NULL; + goto try_later; } + /* Ruleset has been updated, try later. */ + if (READ_ONCE(nft_net->gc_seq) != gc_seq) { + nft_trans_gc_destroy(gc); + gc = NULL; + goto try_later; + } + + if (nft_set_elem_is_dead(&he->ext)) + goto dead_elem; + if (nft_set_ext_exists(&he->ext, NFT_SET_EXT_EXPR)) { struct nft_expr *expr = nft_set_ext_expr(&he->ext); if (expr->ops->gc && expr->ops->gc(read_pnet(&set->net), expr)) - goto gc; + goto needs_gc_run; } + if (!nft_set_elem_expired(&he->ext)) continue; -gc: - if (nft_set_elem_mark_busy(&he->ext)) - continue; - - gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC); - if (gcb == NULL) - break; - rhashtable_remove_fast(&priv->ht, &he->node, nft_rhash_params); - atomic_dec(&set->nelems); - nft_set_gc_batch_add(gcb, he); +needs_gc_run: + nft_set_elem_dead(&he->ext); +dead_elem: + gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC); + if (!gc) + goto try_later; + + nft_trans_gc_elem_add(gc, he); } + +try_later: rhashtable_walk_stop(&hti); rhashtable_walk_exit(&hti); - nft_set_gc_batch_complete(gcb); + if (gc) + nft_trans_gc_queue_async_done(gc); +done: queue_delayed_work(system_power_efficient_wq, &priv->gc_work, nft_set_gc_interval(set)); } @@ -374,7 +400,7 @@ static int nft_rhash_init(const struct nft_set *set, return err; INIT_DEFERRABLE_WORK(&priv->gc_work, nft_rhash_gc); - if (set->flags & NFT_SET_TIMEOUT) + if (set->flags & (NFT_SET_TIMEOUT | NFT_SET_EVAL)) nft_rhash_gc_init(set); return 0; @@ -402,7 +428,6 @@ static void nft_rhash_destroy(const struct nft_ctx *ctx, }; cancel_delayed_work_sync(&priv->gc_work); - rcu_barrier(); rhashtable_free_and_destroy(&priv->ht, nft_rhash_elem_destroy, (void *)&rhash_ctx); } diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c index 4865a7c0ea5e4e66e75f0f25375406e3f3d0049e..e9478cdffefee48054087e8f5675ab33e046badc 100644 --- a/net/netfilter/nft_set_pipapo.c +++ b/net/netfilter/nft_set_pipapo.c @@ -566,8 +566,9 @@ static struct nft_pipapo_elem *pipapo_get(const struct net *net, goto out; if (last) { - if (nft_set_elem_expired(&f->mt[b].e->ext) || - (genmask && + if (nft_set_elem_expired(&f->mt[b].e->ext)) + goto next_match; + if ((genmask && !nft_set_elem_active(&f->mt[b].e->ext, genmask))) goto next_match; @@ -1536,15 +1537,32 @@ static void pipapo_drop(struct nft_pipapo_match *m, } } +static void nft_pipapo_gc_deactivate(struct net *net, struct nft_set *set, + struct nft_pipapo_elem *e) +{ + struct nft_set_elem elem = { + .priv = e, + }; + + nft_setelem_data_deactivate(net, set, &elem); +} + /** * pipapo_gc() - Drop expired entries from set, destroy start and end elements - * @set: nftables API set representation + * @_set: nftables API set representation * @m: Matching data */ -static void pipapo_gc(const struct nft_set *set, struct nft_pipapo_match *m) +static void pipapo_gc(const struct nft_set *_set, struct nft_pipapo_match *m) { + struct nft_set *set = (struct nft_set *) _set; struct nft_pipapo *priv = nft_set_priv(set); + struct net *net = read_pnet(&set->net); int rules_f0, first_rule = 0; + struct nft_trans_gc *gc; + + gc = nft_trans_gc_alloc(set, 0, GFP_KERNEL); + if (!gc) + return; while ((rules_f0 = pipapo_rules_same_key(m->f, first_rule))) { union nft_pipapo_map_bucket rulemap[NFT_PIPAPO_MAX_FIELDS]; @@ -1569,13 +1587,19 @@ static void pipapo_gc(const struct nft_set *set, struct nft_pipapo_match *m) f--; i--; e = f->mt[rulemap[i].to].e; - if (nft_set_elem_expired(&e->ext) && - !nft_set_elem_mark_busy(&e->ext)) { + /* synchronous gc never fails, there is no need to set on + * NFT_SET_ELEM_DEAD_BIT. + */ + if (nft_set_elem_expired(&e->ext)) { priv->dirty = true; - pipapo_drop(m, rulemap); - rcu_barrier(); - nft_set_elem_destroy(set, e, true); + gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC); + if (!gc) + return; + + nft_pipapo_gc_deactivate(net, set, e); + pipapo_drop(m, rulemap); + nft_trans_gc_elem_add(gc, e); /* And check again current first rule, which is now the * first we haven't checked. @@ -1585,7 +1609,10 @@ static void pipapo_gc(const struct nft_set *set, struct nft_pipapo_match *m) } } - priv->last_gc = jiffies; + if (gc) { + nft_trans_gc_queue_sync_done(gc); + priv->last_gc = jiffies; + } } /** @@ -1603,17 +1630,10 @@ static void pipapo_free_fields(struct nft_pipapo_match *m) } } -/** - * pipapo_reclaim_match - RCU callback to free fields from old matching data - * @rcu: RCU head - */ -static void pipapo_reclaim_match(struct rcu_head *rcu) +static void pipapo_free_match(struct nft_pipapo_match *m) { - struct nft_pipapo_match *m; int i; - m = container_of(rcu, struct nft_pipapo_match, rcu); - for_each_possible_cpu(i) kfree(*per_cpu_ptr(m->scratch, i)); @@ -1628,7 +1648,19 @@ static void pipapo_reclaim_match(struct rcu_head *rcu) } /** - * pipapo_commit() - Replace lookup data with current working copy + * pipapo_reclaim_match - RCU callback to free fields from old matching data + * @rcu: RCU head + */ +static void pipapo_reclaim_match(struct rcu_head *rcu) +{ + struct nft_pipapo_match *m; + + m = container_of(rcu, struct nft_pipapo_match, rcu); + pipapo_free_match(m); +} + +/** + * nft_pipapo_commit() - Replace lookup data with current working copy * @set: nftables API set representation * * While at it, check if we should perform garbage collection on the working @@ -1638,7 +1670,7 @@ static void pipapo_reclaim_match(struct rcu_head *rcu) * We also need to create a new working copy for subsequent insertions and * deletions. */ -static void pipapo_commit(const struct nft_set *set) +static void nft_pipapo_commit(const struct nft_set *set) { struct nft_pipapo *priv = nft_set_priv(set); struct nft_pipapo_match *new_clone, *old; @@ -1663,6 +1695,26 @@ static void pipapo_commit(const struct nft_set *set) priv->clone = new_clone; } +static void nft_pipapo_abort(const struct nft_set *set) +{ + struct nft_pipapo *priv = nft_set_priv(set); + struct nft_pipapo_match *new_clone, *m; + + if (!priv->dirty) + return; + + m = rcu_dereference(priv->match); + + new_clone = pipapo_clone(m); + if (IS_ERR(new_clone)) + return; + + priv->dirty = false; + + pipapo_free_match(priv->clone); + priv->clone = new_clone; +} + /** * nft_pipapo_activate() - Mark element reference as active given key, commit * @net: Network namespace @@ -1670,8 +1722,7 @@ static void pipapo_commit(const struct nft_set *set) * @elem: nftables API element representation containing key data * * On insertion, elements are added to a copy of the matching data currently - * in use for lookups, and not directly inserted into current lookup data, so - * we'll take care of that by calling pipapo_commit() here. Both + * in use for lookups, and not directly inserted into current lookup data. Both * nft_pipapo_insert() and nft_pipapo_activate() are called once for each * element, hence we can't purpose either one as a real commit operation. */ @@ -1679,16 +1730,9 @@ static void nft_pipapo_activate(const struct net *net, const struct nft_set *set, const struct nft_set_elem *elem) { - struct nft_pipapo_elem *e; - - e = pipapo_get(net, set, (const u8 *)elem->key.val.data, 0); - if (IS_ERR(e)) - return; + struct nft_pipapo_elem *e = elem->priv; nft_set_elem_change_active(net, set, &e->ext); - nft_set_elem_clear_busy(&e->ext); - - pipapo_commit(set); } /** @@ -1900,10 +1944,6 @@ static void nft_pipapo_remove(const struct net *net, const struct nft_set *set, data = (const u8 *)nft_set_ext_key(&e->ext); - e = pipapo_get(net, set, data, 0); - if (IS_ERR(e)) - return; - while ((rules_f0 = pipapo_rules_same_key(m->f, first_rule))) { union nft_pipapo_map_bucket rulemap[NFT_PIPAPO_MAX_FIELDS]; const u8 *match_start, *match_end; @@ -1938,7 +1978,6 @@ static void nft_pipapo_remove(const struct net *net, const struct nft_set *set, if (i == m->field_count) { priv->dirty = true; pipapo_drop(m, rulemap); - pipapo_commit(set); return; } @@ -2245,6 +2284,8 @@ const struct nft_set_type nft_set_pipapo_type = { .init = nft_pipapo_init, .destroy = nft_pipapo_destroy, .gc_init = nft_pipapo_gc_init, + .commit = nft_pipapo_commit, + .abort = nft_pipapo_abort, .elemsize = offsetof(struct nft_pipapo_elem, ext), }, }; @@ -2267,6 +2308,8 @@ const struct nft_set_type nft_set_pipapo_avx2_type = { .init = nft_pipapo_init, .destroy = nft_pipapo_destroy, .gc_init = nft_pipapo_gc_init, + .commit = nft_pipapo_commit, + .abort = nft_pipapo_abort, .elemsize = offsetof(struct nft_pipapo_elem, ext), }, }; diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index eae760adae4d579a9c35a554a8cb9a75ee68e066..cc32e19b4041a96a181adba17d5aa838a31bd084 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c @@ -14,6 +14,9 @@ #include #include #include +#include + +extern unsigned int nf_tables_net_id; struct nft_rbtree { struct rb_root root; @@ -46,6 +49,12 @@ static int nft_rbtree_cmp(const struct nft_set *set, set->klen); } +static bool nft_rbtree_elem_expired(const struct nft_rbtree_elem *rbe) +{ + return nft_set_elem_expired(&rbe->ext) || + nft_set_elem_is_dead(&rbe->ext); +} + static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set, const u32 *key, const struct nft_set_ext **ext, unsigned int seq) @@ -80,7 +89,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set continue; } - if (nft_set_elem_expired(&rbe->ext)) + if (nft_rbtree_elem_expired(rbe)) return false; if (nft_rbtree_interval_end(rbe)) { @@ -98,7 +107,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set if (set->flags & NFT_SET_INTERVAL && interval != NULL && nft_set_elem_active(&interval->ext, genmask) && - !nft_set_elem_expired(&interval->ext) && + !nft_rbtree_elem_expired(interval) && nft_rbtree_interval_start(interval)) { *ext = &interval->ext; return true; @@ -214,6 +223,18 @@ static void *nft_rbtree_get(const struct net *net, const struct nft_set *set, return rbe; } +static void nft_rbtree_gc_remove(struct net *net, struct nft_set *set, + struct nft_rbtree *priv, + struct nft_rbtree_elem *rbe) +{ + struct nft_set_elem elem = { + .priv = rbe, + }; + + nft_setelem_data_deactivate(net, set, &elem); + rb_erase(&rbe->node, &priv->root); +} + static int nft_rbtree_gc_elem(const struct nft_set *__set, struct nft_rbtree *priv, struct nft_rbtree_elem *rbe, @@ -221,11 +242,12 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set, { struct nft_set *set = (struct nft_set *)__set; struct rb_node *prev = rb_prev(&rbe->node); + struct net *net = read_pnet(&set->net); struct nft_rbtree_elem *rbe_prev; - struct nft_set_gc_batch *gcb; + struct nft_trans_gc *gc; - gcb = nft_set_gc_batch_check(set, NULL, GFP_ATOMIC); - if (!gcb) + gc = nft_trans_gc_alloc(set, 0, GFP_ATOMIC); + if (!gc) return -ENOMEM; /* search for end interval coming before this element. @@ -243,17 +265,28 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set, if (prev) { rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node); + nft_rbtree_gc_remove(net, set, priv, rbe_prev); - rb_erase(&rbe_prev->node, &priv->root); - atomic_dec(&set->nelems); - nft_set_gc_batch_add(gcb, rbe_prev); + /* There is always room in this trans gc for this element, + * memory allocation never actually happens, hence, the warning + * splat in such case. No need to set NFT_SET_ELEM_DEAD_BIT, + * this is synchronous gc which never fails. + */ + gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC); + if (WARN_ON_ONCE(!gc)) + return -ENOMEM; + + nft_trans_gc_elem_add(gc, rbe_prev); } - rb_erase(&rbe->node, &priv->root); - atomic_dec(&set->nelems); + nft_rbtree_gc_remove(net, set, priv, rbe); + gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC); + if (WARN_ON_ONCE(!gc)) + return -ENOMEM; - nft_set_gc_batch_add(gcb, rbe); - nft_set_gc_batch_complete(gcb); + nft_trans_gc_elem_add(gc, rbe); + + nft_trans_gc_queue_sync_done(gc); return 0; } @@ -281,6 +314,7 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL; struct rb_node *node, *next, *parent, **p, *first = NULL; struct nft_rbtree *priv = nft_set_priv(set); + u8 cur_genmask = nft_genmask_cur(net); u8 genmask = nft_genmask_next(net); int d, err; @@ -326,8 +360,11 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, if (!nft_set_elem_active(&rbe->ext, genmask)) continue; - /* perform garbage collection to avoid bogus overlap reports. */ - if (nft_set_elem_expired(&rbe->ext)) { + /* perform garbage collection to avoid bogus overlap reports + * but skip new elements in this transaction. + */ + if (nft_set_elem_expired(&rbe->ext) && + nft_set_elem_active(&rbe->ext, cur_genmask)) { err = nft_rbtree_gc_elem(set, priv, rbe, genmask); if (err < 0) return err; @@ -481,7 +518,6 @@ static void nft_rbtree_activate(const struct net *net, struct nft_rbtree_elem *rbe = elem->priv; nft_set_elem_change_active(net, set, &rbe->ext); - nft_set_elem_clear_busy(&rbe->ext); } static bool nft_rbtree_flush(const struct net *net, @@ -489,12 +525,9 @@ static bool nft_rbtree_flush(const struct net *net, { struct nft_rbtree_elem *rbe = priv; - if (!nft_set_elem_mark_busy(&rbe->ext) || - !nft_is_active(net, &rbe->ext)) { - nft_set_elem_change_active(net, set, &rbe->ext); - return true; - } - return false; + nft_set_elem_change_active(net, set, &rbe->ext); + + return true; } static void *nft_rbtree_deactivate(const struct net *net, @@ -551,8 +584,6 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx, if (iter->count < iter->skip) goto cont; - if (nft_set_elem_expired(&rbe->ext)) - goto cont; if (!nft_set_elem_active(&rbe->ext, iter->genmask)) goto cont; @@ -571,26 +602,42 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx, static void nft_rbtree_gc(struct work_struct *work) { - struct nft_rbtree_elem *rbe, *rbe_end = NULL, *rbe_prev = NULL; - struct nft_set_gc_batch *gcb = NULL; + struct nft_rbtree_elem *rbe, *rbe_end = NULL; + struct nftables_pernet *nft_net; struct nft_rbtree *priv; + struct nft_trans_gc *gc; struct rb_node *node; struct nft_set *set; + unsigned int gc_seq; struct net *net; - u8 genmask; priv = container_of(work, struct nft_rbtree, gc_work.work); set = nft_set_container_of(priv); net = read_pnet(&set->net); - genmask = nft_genmask_cur(net); + nft_net = net_generic(net, nf_tables_net_id); + gc_seq = READ_ONCE(nft_net->gc_seq); - write_lock_bh(&priv->lock); - write_seqcount_begin(&priv->count); + if (nft_set_gc_is_pending(set)) + goto done; + + gc = nft_trans_gc_alloc(set, gc_seq, GFP_KERNEL); + if (!gc) + goto done; + + read_lock_bh(&priv->lock); for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) { + + /* Ruleset has been updated, try later. */ + if (READ_ONCE(nft_net->gc_seq) != gc_seq) { + nft_trans_gc_destroy(gc); + gc = NULL; + goto try_later; + } + rbe = rb_entry(node, struct nft_rbtree_elem, node); - if (!nft_set_elem_active(&rbe->ext, genmask)) - continue; + if (nft_set_elem_is_dead(&rbe->ext)) + goto dead_elem; /* elements are reversed in the rbtree for historical reasons, * from highest to lowest value, that is why end element is @@ -603,40 +650,32 @@ static void nft_rbtree_gc(struct work_struct *work) if (!nft_set_elem_expired(&rbe->ext)) continue; - if (nft_set_elem_mark_busy(&rbe->ext)) { - rbe_end = NULL; + nft_set_elem_dead(&rbe->ext); + + if (!rbe_end) continue; - } - if (rbe_prev) { - rb_erase(&rbe_prev->node, &priv->root); - rbe_prev = NULL; - } - gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC); - if (!gcb) - break; + nft_set_elem_dead(&rbe_end->ext); - atomic_dec(&set->nelems); - nft_set_gc_batch_add(gcb, rbe); - rbe_prev = rbe; + gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC); + if (!gc) + goto try_later; - if (rbe_end) { - atomic_dec(&set->nelems); - nft_set_gc_batch_add(gcb, rbe_end); - rb_erase(&rbe_end->node, &priv->root); - rbe_end = NULL; - } - node = rb_next(node); - if (!node) - break; - } - if (rbe_prev) - rb_erase(&rbe_prev->node, &priv->root); - write_seqcount_end(&priv->count); - write_unlock_bh(&priv->lock); + nft_trans_gc_elem_add(gc, rbe_end); + rbe_end = NULL; +dead_elem: + gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC); + if (!gc) + goto try_later; - nft_set_gc_batch_complete(gcb); + nft_trans_gc_elem_add(gc, rbe); + } +try_later: + read_unlock_bh(&priv->lock); + if (gc) + nft_trans_gc_queue_async_done(gc); +done: queue_delayed_work(system_power_efficient_wq, &priv->gc_work, nft_set_gc_interval(set)); } diff --git a/net/netfilter/nft_synproxy.c b/net/netfilter/nft_synproxy.c index 59c4dfaf2ea1fc97cade542fae9901fa89742217..1133e06f3c40eb4cd4b1c13ed6a7e4dd6734f3c5 100644 --- a/net/netfilter/nft_synproxy.c +++ b/net/netfilter/nft_synproxy.c @@ -109,7 +109,7 @@ static void nft_synproxy_do_eval(const struct nft_synproxy *priv, { struct synproxy_options opts = {}; struct sk_buff *skb = pkt->skb; - int thoff = pkt->xt.thoff; + int thoff = nft_thoff(pkt); const struct tcphdr *tcp; struct tcphdr _tcph; @@ -123,7 +123,7 @@ static void nft_synproxy_do_eval(const struct nft_synproxy *priv, return; } - tcp = skb_header_pointer(skb, pkt->xt.thoff, + tcp = skb_header_pointer(skb, thoff, sizeof(struct tcphdr), &_tcph); if (!tcp) { diff --git a/net/netfilter/nft_tproxy.c b/net/netfilter/nft_tproxy.c index c49d318f8e6ed649357f973ffbd15a7baaf23e34..f8d277e05ef4ff7359d8ffff69d9e17091598a82 100644 --- a/net/netfilter/nft_tproxy.c +++ b/net/netfilter/nft_tproxy.c @@ -88,9 +88,9 @@ static void nft_tproxy_eval_v6(const struct nft_expr *expr, const struct nft_tproxy *priv = nft_expr_priv(expr); struct sk_buff *skb = pkt->skb; const struct ipv6hdr *iph = ipv6_hdr(skb); - struct in6_addr taddr; - int thoff = pkt->xt.thoff; + int thoff = nft_thoff(pkt); struct udphdr _hdr, *hp; + struct in6_addr taddr; __be16 tport = 0; struct sock *sk; int l4proto; diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c index 5f741e51b4baa46f825b7a676969cde3159b2c21..bb38124a5d3dba5df4c18810201ea1c558d01226 100644 --- a/net/rds/rdma_transport.c +++ b/net/rds/rdma_transport.c @@ -86,10 +86,12 @@ static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id, break; case RDMA_CM_EVENT_ADDR_RESOLVED: - rdma_set_service_type(cm_id, conn->c_tos); - /* XXX do we need to clean up if this fails? */ - ret = rdma_resolve_route(cm_id, + if (conn) { + rdma_set_service_type(cm_id, conn->c_tos); + /* XXX do we need to clean up if this fails? */ + ret = rdma_resolve_route(cm_id, RDS_RDMA_RESOLVE_TIMEOUT_MS); + } break; case RDMA_CM_EVENT_ROUTE_RESOLVED: diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 697522371914c504bcdaf188515fd524758b5021..2046c16b29f093deac97a6995f0b76c3a6904c97 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -548,34 +548,6 @@ config CLS_U32_MARK help Say Y here to be able to use netfilter marks as u32 key. -config NET_CLS_RSVP - tristate "IPv4 Resource Reservation Protocol (RSVP)" - select NET_CLS - help - The Resource Reservation Protocol (RSVP) permits end systems to - request a minimum and maximum data flow rate for a connection; this - is important for real time data such as streaming sound or video. - - Say Y here if you want to be able to classify outgoing packets based - on their RSVP requests. - - To compile this code as a module, choose M here: the - module will be called cls_rsvp. - -config NET_CLS_RSVP6 - tristate "IPv6 Resource Reservation Protocol (RSVP6)" - select NET_CLS - help - The Resource Reservation Protocol (RSVP) permits end systems to - request a minimum and maximum data flow rate for a connection; this - is important for real time data such as streaming sound or video. - - Say Y here if you want to be able to classify outgoing packets based - on their RSVP requests and you are using the IPv6 protocol. - - To compile this code as a module, choose M here: the - module will be called cls_rsvp6. - config NET_CLS_FLOW tristate "Flow classifier" select NET_CLS diff --git a/net/sched/Makefile b/net/sched/Makefile index 4311fdb211197ec073f70f2adbbe26024e38c16c..df2bcd785f7d1d542631e47171b16fcd59fc43f8 100644 --- a/net/sched/Makefile +++ b/net/sched/Makefile @@ -68,8 +68,6 @@ obj-$(CONFIG_NET_SCH_TAPRIO) += sch_taprio.o obj-$(CONFIG_NET_CLS_U32) += cls_u32.o obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o obj-$(CONFIG_NET_CLS_FW) += cls_fw.o -obj-$(CONFIG_NET_CLS_RSVP) += cls_rsvp.o -obj-$(CONFIG_NET_CLS_RSVP6) += cls_rsvp6.o obj-$(CONFIG_NET_CLS_BASIC) += cls_basic.o obj-$(CONFIG_NET_CLS_FLOW) += cls_flow.o obj-$(CONFIG_NET_CLS_CGROUP) += cls_cgroup.o diff --git a/net/sched/cls_rsvp.c b/net/sched/cls_rsvp.c deleted file mode 100644 index de1c1d4da59779ed0c81ab9ec88458225684adaf..0000000000000000000000000000000000000000 --- a/net/sched/cls_rsvp.c +++ /dev/null @@ -1,24 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * net/sched/cls_rsvp.c Special RSVP packet classifier for IPv4. - * - * Authors: Alexey Kuznetsov, - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define RSVP_DST_LEN 1 -#define RSVP_ID "rsvp" -#define RSVP_OPS cls_rsvp_ops - -#include "cls_rsvp.h" -MODULE_LICENSE("GPL"); diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h deleted file mode 100644 index d36949d9382c41dda54d12269fe1e8ff9c7e397f..0000000000000000000000000000000000000000 --- a/net/sched/cls_rsvp.h +++ /dev/null @@ -1,777 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * net/sched/cls_rsvp.h Template file for RSVPv[46] classifiers. - * - * Authors: Alexey Kuznetsov, - */ - -/* - Comparing to general packet classification problem, - RSVP needs only sevaral relatively simple rules: - - * (dst, protocol) are always specified, - so that we are able to hash them. - * src may be exact, or may be wildcard, so that - we can keep a hash table plus one wildcard entry. - * source port (or flow label) is important only if src is given. - - IMPLEMENTATION. - - We use a two level hash table: The top level is keyed by - destination address and protocol ID, every bucket contains a list - of "rsvp sessions", identified by destination address, protocol and - DPI(="Destination Port ID"): triple (key, mask, offset). - - Every bucket has a smaller hash table keyed by source address - (cf. RSVP flowspec) and one wildcard entry for wildcard reservations. - Every bucket is again a list of "RSVP flows", selected by - source address and SPI(="Source Port ID" here rather than - "security parameter index"): triple (key, mask, offset). - - - NOTE 1. All the packets with IPv6 extension headers (but AH and ESP) - and all fragmented packets go to the best-effort traffic class. - - - NOTE 2. Two "port id"'s seems to be redundant, rfc2207 requires - only one "Generalized Port Identifier". So that for classic - ah, esp (and udp,tcp) both *pi should coincide or one of them - should be wildcard. - - At first sight, this redundancy is just a waste of CPU - resources. But DPI and SPI add the possibility to assign different - priorities to GPIs. Look also at note 4 about tunnels below. - - - NOTE 3. One complication is the case of tunneled packets. - We implement it as following: if the first lookup - matches a special session with "tunnelhdr" value not zero, - flowid doesn't contain the true flow ID, but the tunnel ID (1...255). - In this case, we pull tunnelhdr bytes and restart lookup - with tunnel ID added to the list of keys. Simple and stupid 8)8) - It's enough for PIMREG and IPIP. - - - NOTE 4. Two GPIs make it possible to parse even GRE packets. - F.e. DPI can select ETH_P_IP (and necessary flags to make - tunnelhdr correct) in GRE protocol field and SPI matches - GRE key. Is it not nice? 8)8) - - - Well, as result, despite its simplicity, we get a pretty - powerful classification engine. */ - - -struct rsvp_head { - u32 tmap[256/32]; - u32 hgenerator; - u8 tgenerator; - struct rsvp_session __rcu *ht[256]; - struct rcu_head rcu; -}; - -struct rsvp_session { - struct rsvp_session __rcu *next; - __be32 dst[RSVP_DST_LEN]; - struct tc_rsvp_gpi dpi; - u8 protocol; - u8 tunnelid; - /* 16 (src,sport) hash slots, and one wildcard source slot */ - struct rsvp_filter __rcu *ht[16 + 1]; - struct rcu_head rcu; -}; - - -struct rsvp_filter { - struct rsvp_filter __rcu *next; - __be32 src[RSVP_DST_LEN]; - struct tc_rsvp_gpi spi; - u8 tunnelhdr; - - struct tcf_result res; - struct tcf_exts exts; - - u32 handle; - struct rsvp_session *sess; - struct rcu_work rwork; -}; - -static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid) -{ - unsigned int h = (__force __u32)dst[RSVP_DST_LEN - 1]; - - h ^= h>>16; - h ^= h>>8; - return (h ^ protocol ^ tunnelid) & 0xFF; -} - -static inline unsigned int hash_src(__be32 *src) -{ - unsigned int h = (__force __u32)src[RSVP_DST_LEN-1]; - - h ^= h>>16; - h ^= h>>8; - h ^= h>>4; - return h & 0xF; -} - -#define RSVP_APPLY_RESULT() \ -{ \ - int r = tcf_exts_exec(skb, &f->exts, res); \ - if (r < 0) \ - continue; \ - else if (r > 0) \ - return r; \ -} - -static int rsvp_classify(struct sk_buff *skb, const struct tcf_proto *tp, - struct tcf_result *res) -{ - struct rsvp_head *head = rcu_dereference_bh(tp->root); - struct rsvp_session *s; - struct rsvp_filter *f; - unsigned int h1, h2; - __be32 *dst, *src; - u8 protocol; - u8 tunnelid = 0; - u8 *xprt; -#if RSVP_DST_LEN == 4 - struct ipv6hdr *nhptr; - - if (!pskb_network_may_pull(skb, sizeof(*nhptr))) - return -1; - nhptr = ipv6_hdr(skb); -#else - struct iphdr *nhptr; - - if (!pskb_network_may_pull(skb, sizeof(*nhptr))) - return -1; - nhptr = ip_hdr(skb); -#endif -restart: - -#if RSVP_DST_LEN == 4 - src = &nhptr->saddr.s6_addr32[0]; - dst = &nhptr->daddr.s6_addr32[0]; - protocol = nhptr->nexthdr; - xprt = ((u8 *)nhptr) + sizeof(struct ipv6hdr); -#else - src = &nhptr->saddr; - dst = &nhptr->daddr; - protocol = nhptr->protocol; - xprt = ((u8 *)nhptr) + (nhptr->ihl<<2); - if (ip_is_fragment(nhptr)) - return -1; -#endif - - h1 = hash_dst(dst, protocol, tunnelid); - h2 = hash_src(src); - - for (s = rcu_dereference_bh(head->ht[h1]); s; - s = rcu_dereference_bh(s->next)) { - if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN - 1] && - protocol == s->protocol && - !(s->dpi.mask & - (*(u32 *)(xprt + s->dpi.offset) ^ s->dpi.key)) && -#if RSVP_DST_LEN == 4 - dst[0] == s->dst[0] && - dst[1] == s->dst[1] && - dst[2] == s->dst[2] && -#endif - tunnelid == s->tunnelid) { - - for (f = rcu_dereference_bh(s->ht[h2]); f; - f = rcu_dereference_bh(f->next)) { - if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN - 1] && - !(f->spi.mask & (*(u32 *)(xprt + f->spi.offset) ^ f->spi.key)) -#if RSVP_DST_LEN == 4 - && - src[0] == f->src[0] && - src[1] == f->src[1] && - src[2] == f->src[2] -#endif - ) { - *res = f->res; - RSVP_APPLY_RESULT(); - -matched: - if (f->tunnelhdr == 0) - return 0; - - tunnelid = f->res.classid; - nhptr = (void *)(xprt + f->tunnelhdr - sizeof(*nhptr)); - goto restart; - } - } - - /* And wildcard bucket... */ - for (f = rcu_dereference_bh(s->ht[16]); f; - f = rcu_dereference_bh(f->next)) { - *res = f->res; - RSVP_APPLY_RESULT(); - goto matched; - } - return -1; - } - } - return -1; -} - -static void rsvp_replace(struct tcf_proto *tp, struct rsvp_filter *n, u32 h) -{ - struct rsvp_head *head = rtnl_dereference(tp->root); - struct rsvp_session *s; - struct rsvp_filter __rcu **ins; - struct rsvp_filter *pins; - unsigned int h1 = h & 0xFF; - unsigned int h2 = (h >> 8) & 0xFF; - - for (s = rtnl_dereference(head->ht[h1]); s; - s = rtnl_dereference(s->next)) { - for (ins = &s->ht[h2], pins = rtnl_dereference(*ins); ; - ins = &pins->next, pins = rtnl_dereference(*ins)) { - if (pins->handle == h) { - RCU_INIT_POINTER(n->next, pins->next); - rcu_assign_pointer(*ins, n); - return; - } - } - } - - /* Something went wrong if we are trying to replace a non-existant - * node. Mind as well halt instead of silently failing. - */ - BUG_ON(1); -} - -static void *rsvp_get(struct tcf_proto *tp, u32 handle) -{ - struct rsvp_head *head = rtnl_dereference(tp->root); - struct rsvp_session *s; - struct rsvp_filter *f; - unsigned int h1 = handle & 0xFF; - unsigned int h2 = (handle >> 8) & 0xFF; - - if (h2 > 16) - return NULL; - - for (s = rtnl_dereference(head->ht[h1]); s; - s = rtnl_dereference(s->next)) { - for (f = rtnl_dereference(s->ht[h2]); f; - f = rtnl_dereference(f->next)) { - if (f->handle == handle) - return f; - } - } - return NULL; -} - -static int rsvp_init(struct tcf_proto *tp) -{ - struct rsvp_head *data; - - data = kzalloc(sizeof(struct rsvp_head), GFP_KERNEL); - if (data) { - rcu_assign_pointer(tp->root, data); - return 0; - } - return -ENOBUFS; -} - -static void __rsvp_delete_filter(struct rsvp_filter *f) -{ - tcf_exts_destroy(&f->exts); - tcf_exts_put_net(&f->exts); - kfree(f); -} - -static void rsvp_delete_filter_work(struct work_struct *work) -{ - struct rsvp_filter *f = container_of(to_rcu_work(work), - struct rsvp_filter, - rwork); - rtnl_lock(); - __rsvp_delete_filter(f); - rtnl_unlock(); -} - -static void rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f) -{ - tcf_unbind_filter(tp, &f->res); - /* all classifiers are required to call tcf_exts_destroy() after rcu - * grace period, since converted-to-rcu actions are relying on that - * in cleanup() callback - */ - if (tcf_exts_get_net(&f->exts)) - tcf_queue_work(&f->rwork, rsvp_delete_filter_work); - else - __rsvp_delete_filter(f); -} - -static void rsvp_destroy(struct tcf_proto *tp, bool rtnl_held, - struct netlink_ext_ack *extack) -{ - struct rsvp_head *data = rtnl_dereference(tp->root); - int h1, h2; - - if (data == NULL) - return; - - for (h1 = 0; h1 < 256; h1++) { - struct rsvp_session *s; - - while ((s = rtnl_dereference(data->ht[h1])) != NULL) { - RCU_INIT_POINTER(data->ht[h1], s->next); - - for (h2 = 0; h2 <= 16; h2++) { - struct rsvp_filter *f; - - while ((f = rtnl_dereference(s->ht[h2])) != NULL) { - rcu_assign_pointer(s->ht[h2], f->next); - rsvp_delete_filter(tp, f); - } - } - kfree_rcu(s, rcu); - } - } - kfree_rcu(data, rcu); -} - -static int rsvp_delete(struct tcf_proto *tp, void *arg, bool *last, - bool rtnl_held, struct netlink_ext_ack *extack) -{ - struct rsvp_head *head = rtnl_dereference(tp->root); - struct rsvp_filter *nfp, *f = arg; - struct rsvp_filter __rcu **fp; - unsigned int h = f->handle; - struct rsvp_session __rcu **sp; - struct rsvp_session *nsp, *s = f->sess; - int i, h1; - - fp = &s->ht[(h >> 8) & 0xFF]; - for (nfp = rtnl_dereference(*fp); nfp; - fp = &nfp->next, nfp = rtnl_dereference(*fp)) { - if (nfp == f) { - RCU_INIT_POINTER(*fp, f->next); - rsvp_delete_filter(tp, f); - - /* Strip tree */ - - for (i = 0; i <= 16; i++) - if (s->ht[i]) - goto out; - - /* OK, session has no flows */ - sp = &head->ht[h & 0xFF]; - for (nsp = rtnl_dereference(*sp); nsp; - sp = &nsp->next, nsp = rtnl_dereference(*sp)) { - if (nsp == s) { - RCU_INIT_POINTER(*sp, s->next); - kfree_rcu(s, rcu); - goto out; - } - } - - break; - } - } - -out: - *last = true; - for (h1 = 0; h1 < 256; h1++) { - if (rcu_access_pointer(head->ht[h1])) { - *last = false; - break; - } - } - - return 0; -} - -static unsigned int gen_handle(struct tcf_proto *tp, unsigned salt) -{ - struct rsvp_head *data = rtnl_dereference(tp->root); - int i = 0xFFFF; - - while (i-- > 0) { - u32 h; - - if ((data->hgenerator += 0x10000) == 0) - data->hgenerator = 0x10000; - h = data->hgenerator|salt; - if (!rsvp_get(tp, h)) - return h; - } - return 0; -} - -static int tunnel_bts(struct rsvp_head *data) -{ - int n = data->tgenerator >> 5; - u32 b = 1 << (data->tgenerator & 0x1F); - - if (data->tmap[n] & b) - return 0; - data->tmap[n] |= b; - return 1; -} - -static void tunnel_recycle(struct rsvp_head *data) -{ - struct rsvp_session __rcu **sht = data->ht; - u32 tmap[256/32]; - int h1, h2; - - memset(tmap, 0, sizeof(tmap)); - - for (h1 = 0; h1 < 256; h1++) { - struct rsvp_session *s; - for (s = rtnl_dereference(sht[h1]); s; - s = rtnl_dereference(s->next)) { - for (h2 = 0; h2 <= 16; h2++) { - struct rsvp_filter *f; - - for (f = rtnl_dereference(s->ht[h2]); f; - f = rtnl_dereference(f->next)) { - if (f->tunnelhdr == 0) - continue; - data->tgenerator = f->res.classid; - tunnel_bts(data); - } - } - } - } - - memcpy(data->tmap, tmap, sizeof(tmap)); -} - -static u32 gen_tunnel(struct rsvp_head *data) -{ - int i, k; - - for (k = 0; k < 2; k++) { - for (i = 255; i > 0; i--) { - if (++data->tgenerator == 0) - data->tgenerator = 1; - if (tunnel_bts(data)) - return data->tgenerator; - } - tunnel_recycle(data); - } - return 0; -} - -static const struct nla_policy rsvp_policy[TCA_RSVP_MAX + 1] = { - [TCA_RSVP_CLASSID] = { .type = NLA_U32 }, - [TCA_RSVP_DST] = { .len = RSVP_DST_LEN * sizeof(u32) }, - [TCA_RSVP_SRC] = { .len = RSVP_DST_LEN * sizeof(u32) }, - [TCA_RSVP_PINFO] = { .len = sizeof(struct tc_rsvp_pinfo) }, -}; - -static int rsvp_change(struct net *net, struct sk_buff *in_skb, - struct tcf_proto *tp, unsigned long base, - u32 handle, - struct nlattr **tca, - void **arg, bool ovr, bool rtnl_held, - struct netlink_ext_ack *extack) -{ - struct rsvp_head *data = rtnl_dereference(tp->root); - struct rsvp_filter *f, *nfp; - struct rsvp_filter __rcu **fp; - struct rsvp_session *nsp, *s; - struct rsvp_session __rcu **sp; - struct tc_rsvp_pinfo *pinfo = NULL; - struct nlattr *opt = tca[TCA_OPTIONS]; - struct nlattr *tb[TCA_RSVP_MAX + 1]; - struct tcf_exts e; - unsigned int h1, h2; - __be32 *dst; - int err; - - if (opt == NULL) - return handle ? -EINVAL : 0; - - err = nla_parse_nested_deprecated(tb, TCA_RSVP_MAX, opt, rsvp_policy, - NULL); - if (err < 0) - return err; - - err = tcf_exts_init(&e, net, TCA_RSVP_ACT, TCA_RSVP_POLICE); - if (err < 0) - return err; - err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr, true, - extack); - if (err < 0) - goto errout2; - - f = *arg; - if (f) { - /* Node exists: adjust only classid */ - struct rsvp_filter *n; - - if (f->handle != handle && handle) - goto errout2; - - n = kmemdup(f, sizeof(*f), GFP_KERNEL); - if (!n) { - err = -ENOMEM; - goto errout2; - } - - err = tcf_exts_init(&n->exts, net, TCA_RSVP_ACT, - TCA_RSVP_POLICE); - if (err < 0) { - kfree(n); - goto errout2; - } - - if (tb[TCA_RSVP_CLASSID]) { - n->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]); - tcf_bind_filter(tp, &n->res, base); - } - - tcf_exts_change(&n->exts, &e); - rsvp_replace(tp, n, handle); - return 0; - } - - /* Now more serious part... */ - err = -EINVAL; - if (handle) - goto errout2; - if (tb[TCA_RSVP_DST] == NULL) - goto errout2; - - err = -ENOBUFS; - f = kzalloc(sizeof(struct rsvp_filter), GFP_KERNEL); - if (f == NULL) - goto errout2; - - err = tcf_exts_init(&f->exts, net, TCA_RSVP_ACT, TCA_RSVP_POLICE); - if (err < 0) - goto errout; - h2 = 16; - if (tb[TCA_RSVP_SRC]) { - memcpy(f->src, nla_data(tb[TCA_RSVP_SRC]), sizeof(f->src)); - h2 = hash_src(f->src); - } - if (tb[TCA_RSVP_PINFO]) { - pinfo = nla_data(tb[TCA_RSVP_PINFO]); - f->spi = pinfo->spi; - f->tunnelhdr = pinfo->tunnelhdr; - } - if (tb[TCA_RSVP_CLASSID]) - f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]); - - dst = nla_data(tb[TCA_RSVP_DST]); - h1 = hash_dst(dst, pinfo ? pinfo->protocol : 0, pinfo ? pinfo->tunnelid : 0); - - err = -ENOMEM; - if ((f->handle = gen_handle(tp, h1 | (h2<<8))) == 0) - goto errout; - - if (f->tunnelhdr) { - err = -EINVAL; - if (f->res.classid > 255) - goto errout; - - err = -ENOMEM; - if (f->res.classid == 0 && - (f->res.classid = gen_tunnel(data)) == 0) - goto errout; - } - - for (sp = &data->ht[h1]; - (s = rtnl_dereference(*sp)) != NULL; - sp = &s->next) { - if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] && - pinfo && pinfo->protocol == s->protocol && - memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0 && -#if RSVP_DST_LEN == 4 - dst[0] == s->dst[0] && - dst[1] == s->dst[1] && - dst[2] == s->dst[2] && -#endif - pinfo->tunnelid == s->tunnelid) { - -insert: - /* OK, we found appropriate session */ - - fp = &s->ht[h2]; - - f->sess = s; - if (f->tunnelhdr == 0) - tcf_bind_filter(tp, &f->res, base); - - tcf_exts_change(&f->exts, &e); - - fp = &s->ht[h2]; - for (nfp = rtnl_dereference(*fp); nfp; - fp = &nfp->next, nfp = rtnl_dereference(*fp)) { - __u32 mask = nfp->spi.mask & f->spi.mask; - - if (mask != f->spi.mask) - break; - } - RCU_INIT_POINTER(f->next, nfp); - rcu_assign_pointer(*fp, f); - - *arg = f; - return 0; - } - } - - /* No session found. Create new one. */ - - err = -ENOBUFS; - s = kzalloc(sizeof(struct rsvp_session), GFP_KERNEL); - if (s == NULL) - goto errout; - memcpy(s->dst, dst, sizeof(s->dst)); - - if (pinfo) { - s->dpi = pinfo->dpi; - s->protocol = pinfo->protocol; - s->tunnelid = pinfo->tunnelid; - } - sp = &data->ht[h1]; - for (nsp = rtnl_dereference(*sp); nsp; - sp = &nsp->next, nsp = rtnl_dereference(*sp)) { - if ((nsp->dpi.mask & s->dpi.mask) != s->dpi.mask) - break; - } - RCU_INIT_POINTER(s->next, nsp); - rcu_assign_pointer(*sp, s); - - goto insert; - -errout: - tcf_exts_destroy(&f->exts); - kfree(f); -errout2: - tcf_exts_destroy(&e); - return err; -} - -static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg, - bool rtnl_held) -{ - struct rsvp_head *head = rtnl_dereference(tp->root); - unsigned int h, h1; - - if (arg->stop) - return; - - for (h = 0; h < 256; h++) { - struct rsvp_session *s; - - for (s = rtnl_dereference(head->ht[h]); s; - s = rtnl_dereference(s->next)) { - for (h1 = 0; h1 <= 16; h1++) { - struct rsvp_filter *f; - - for (f = rtnl_dereference(s->ht[h1]); f; - f = rtnl_dereference(f->next)) { - if (arg->count < arg->skip) { - arg->count++; - continue; - } - if (arg->fn(tp, f, arg) < 0) { - arg->stop = 1; - return; - } - arg->count++; - } - } - } - } -} - -static int rsvp_dump(struct net *net, struct tcf_proto *tp, void *fh, - struct sk_buff *skb, struct tcmsg *t, bool rtnl_held) -{ - struct rsvp_filter *f = fh; - struct rsvp_session *s; - struct nlattr *nest; - struct tc_rsvp_pinfo pinfo; - - if (f == NULL) - return skb->len; - s = f->sess; - - t->tcm_handle = f->handle; - - nest = nla_nest_start_noflag(skb, TCA_OPTIONS); - if (nest == NULL) - goto nla_put_failure; - - if (nla_put(skb, TCA_RSVP_DST, sizeof(s->dst), &s->dst)) - goto nla_put_failure; - pinfo.dpi = s->dpi; - pinfo.spi = f->spi; - pinfo.protocol = s->protocol; - pinfo.tunnelid = s->tunnelid; - pinfo.tunnelhdr = f->tunnelhdr; - pinfo.pad = 0; - if (nla_put(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo)) - goto nla_put_failure; - if (f->res.classid && - nla_put_u32(skb, TCA_RSVP_CLASSID, f->res.classid)) - goto nla_put_failure; - if (((f->handle >> 8) & 0xFF) != 16 && - nla_put(skb, TCA_RSVP_SRC, sizeof(f->src), f->src)) - goto nla_put_failure; - - if (tcf_exts_dump(skb, &f->exts) < 0) - goto nla_put_failure; - - nla_nest_end(skb, nest); - - if (tcf_exts_dump_stats(skb, &f->exts) < 0) - goto nla_put_failure; - return skb->len; - -nla_put_failure: - nla_nest_cancel(skb, nest); - return -1; -} - -static void rsvp_bind_class(void *fh, u32 classid, unsigned long cl, void *q, - unsigned long base) -{ - struct rsvp_filter *f = fh; - - if (f && f->res.classid == classid) { - if (cl) - __tcf_bind_filter(q, &f->res, base); - else - __tcf_unbind_filter(q, &f->res); - } -} - -static struct tcf_proto_ops RSVP_OPS __read_mostly = { - .kind = RSVP_ID, - .classify = rsvp_classify, - .init = rsvp_init, - .destroy = rsvp_destroy, - .get = rsvp_get, - .change = rsvp_change, - .delete = rsvp_delete, - .walk = rsvp_walk, - .dump = rsvp_dump, - .bind_class = rsvp_bind_class, - .owner = THIS_MODULE, -}; - -static int __init init_rsvp(void) -{ - return register_tcf_proto_ops(&RSVP_OPS); -} - -static void __exit exit_rsvp(void) -{ - unregister_tcf_proto_ops(&RSVP_OPS); -} - -module_init(init_rsvp) -module_exit(exit_rsvp) diff --git a/net/sched/cls_rsvp6.c b/net/sched/cls_rsvp6.c deleted file mode 100644 index 64078846000ef51c49e45b1c2feb8b923e9e5d8f..0000000000000000000000000000000000000000 --- a/net/sched/cls_rsvp6.c +++ /dev/null @@ -1,24 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * net/sched/cls_rsvp6.c Special RSVP packet classifier for IPv6. - * - * Authors: Alexey Kuznetsov, - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define RSVP_DST_LEN 4 -#define RSVP_ID "rsvp6" -#define RSVP_OPS cls_rsvp6_ops - -#include "cls_rsvp.h" -MODULE_LICENSE("GPL"); diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index e1ce0f261f0be15a86ce45f3038b42f50c689888..e9a3fca4aedc08d84e596e8ea5f1e1c22a57e53d 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -2630,6 +2630,7 @@ rpc_decode_header(struct rpc_task *task, struct xdr_stream *xdr) case rpc_autherr_rejectedverf: case rpcsec_gsserr_credproblem: case rpcsec_gsserr_ctxproblem: + rpcauth_invalcred(task); if (!task->tk_cred_retry) break; task->tk_cred_retry--; diff --git a/samples/hw_breakpoint/data_breakpoint.c b/samples/hw_breakpoint/data_breakpoint.c index 418c46fe5ffc3974c660f85008a864b4edc5da59..b99322f188e5930a4f4b6868a42e54a0c95ee49c 100644 --- a/samples/hw_breakpoint/data_breakpoint.c +++ b/samples/hw_breakpoint/data_breakpoint.c @@ -70,7 +70,9 @@ static int __init hw_break_module_init(void) static void __exit hw_break_module_exit(void) { unregister_wide_hw_breakpoint(sample_hbp); - symbol_put(ksym_name); +#ifdef CONFIG_MODULE_UNLOAD + __symbol_put(ksym_name); +#endif printk(KERN_INFO "HW Breakpoint for %s write uninstalled\n", ksym_name); } diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c index f96e70c85f84af6eb85a86bba948a6d3581130a1..801c89a3a1b6ffd0fcc09ece116e99bf8779b570 100644 --- a/sound/hda/intel-dsp-config.c +++ b/sound/hda/intel-dsp-config.c @@ -368,6 +368,14 @@ static const struct config_entry config_table[] = { }, #endif +/* Lunar Lake */ +#if IS_ENABLED(CONFIG_SND_SOC_SOF_LUNARLAKE) + /* Lunarlake-P */ + { + .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE, + .device = PCI_DEVICE_ID_INTEL_HDA_LNL_P, + }, +#endif }; static const struct config_entry *snd_intel_dsp_find_config diff --git a/sound/soc/fsl/imx-audmix.c b/sound/soc/fsl/imx-audmix.c index 77d8234c7ac49aecdcc9fa5dc37b5111372987c1..bb2aab1d2389e346a9dd941688b331c5db5c47b9 100644 --- a/sound/soc/fsl/imx-audmix.c +++ b/sound/soc/fsl/imx-audmix.c @@ -322,7 +322,7 @@ static int imx_audmix_probe(struct platform_device *pdev) if (IS_ERR(priv->cpu_mclk)) { ret = PTR_ERR(priv->cpu_mclk); dev_err(&cpu_pdev->dev, "failed to get DAI mclk1: %d\n", ret); - return -EINVAL; + return ret; } priv->audmix_pdev = audmix_pdev; diff --git a/sound/soc/meson/axg-spdifin.c b/sound/soc/meson/axg-spdifin.c index d0d09f945b489bc03550793a57b888f19a952e7d..7aaded1fc376b44d01fa99875b07c6e0dd9334f1 100644 --- a/sound/soc/meson/axg-spdifin.c +++ b/sound/soc/meson/axg-spdifin.c @@ -112,34 +112,6 @@ static int axg_spdifin_prepare(struct snd_pcm_substream *substream, return 0; } -static int axg_spdifin_startup(struct snd_pcm_substream *substream, - struct snd_soc_dai *dai) -{ - struct axg_spdifin *priv = snd_soc_dai_get_drvdata(dai); - int ret; - - ret = clk_prepare_enable(priv->refclk); - if (ret) { - dev_err(dai->dev, - "failed to enable spdifin reference clock\n"); - return ret; - } - - regmap_update_bits(priv->map, SPDIFIN_CTRL0, SPDIFIN_CTRL0_EN, - SPDIFIN_CTRL0_EN); - - return 0; -} - -static void axg_spdifin_shutdown(struct snd_pcm_substream *substream, - struct snd_soc_dai *dai) -{ - struct axg_spdifin *priv = snd_soc_dai_get_drvdata(dai); - - regmap_update_bits(priv->map, SPDIFIN_CTRL0, SPDIFIN_CTRL0_EN, 0); - clk_disable_unprepare(priv->refclk); -} - static void axg_spdifin_write_mode_param(struct regmap *map, int mode, unsigned int val, unsigned int num_per_reg, @@ -251,25 +223,38 @@ static int axg_spdifin_dai_probe(struct snd_soc_dai *dai) ret = axg_spdifin_sample_mode_config(dai, priv); if (ret) { dev_err(dai->dev, "mode configuration failed\n"); - clk_disable_unprepare(priv->pclk); - return ret; + goto pclk_err; } + ret = clk_prepare_enable(priv->refclk); + if (ret) { + dev_err(dai->dev, + "failed to enable spdifin reference clock\n"); + goto pclk_err; + } + + regmap_update_bits(priv->map, SPDIFIN_CTRL0, SPDIFIN_CTRL0_EN, + SPDIFIN_CTRL0_EN); + return 0; + +pclk_err: + clk_disable_unprepare(priv->pclk); + return ret; } static int axg_spdifin_dai_remove(struct snd_soc_dai *dai) { struct axg_spdifin *priv = snd_soc_dai_get_drvdata(dai); + regmap_update_bits(priv->map, SPDIFIN_CTRL0, SPDIFIN_CTRL0_EN, 0); + clk_disable_unprepare(priv->refclk); clk_disable_unprepare(priv->pclk); return 0; } static const struct snd_soc_dai_ops axg_spdifin_ops = { .prepare = axg_spdifin_prepare, - .startup = axg_spdifin_startup, - .shutdown = axg_spdifin_shutdown, }; static int axg_spdifin_iec958_info(struct snd_kcontrol *kcontrol, diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index 3e7706c251e9eafa479b4e13ba7a0055c27da259..89905b4e93091e82aca7947371eefd844df12445 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -824,33 +824,36 @@ else endif endif -ifeq ($(feature-libbfd), 1) - EXTLIBS += -lbfd -lopcodes -else - # we are on a system that requires -liberty and (maybe) -lz - # to link against -lbfd; test each case individually here - - # call all detections now so we get correct - # status in VF output - $(call feature_check,libbfd-liberty) - $(call feature_check,libbfd-liberty-z) - ifeq ($(feature-libbfd-liberty), 1) - EXTLIBS += -lbfd -lopcodes -liberty - FEATURE_CHECK_LDFLAGS-disassembler-four-args += -liberty -ldl +ifndef NO_LIBBFD + ifeq ($(feature-libbfd), 1) + EXTLIBS += -lbfd -lopcodes else - ifeq ($(feature-libbfd-liberty-z), 1) - EXTLIBS += -lbfd -lopcodes -liberty -lz - FEATURE_CHECK_LDFLAGS-disassembler-four-args += -liberty -lz -ldl + # we are on a system that requires -liberty and (maybe) -lz + # to link against -lbfd; test each case individually here + + # call all detections now so we get correct + # status in VF output + $(call feature_check,libbfd-liberty) + $(call feature_check,libbfd-liberty-z) + + ifeq ($(feature-libbfd-liberty), 1) + EXTLIBS += -lbfd -lopcodes -liberty + FEATURE_CHECK_LDFLAGS-disassembler-four-args += -liberty -ldl + else + ifeq ($(feature-libbfd-liberty-z), 1) + EXTLIBS += -lbfd -lopcodes -liberty -lz + FEATURE_CHECK_LDFLAGS-disassembler-four-args += -liberty -lz -ldl + endif endif + $(call feature_check,disassembler-four-args) endif - $(call feature_check,disassembler-four-args) -endif -ifeq ($(feature-libbfd-buildid), 1) - CFLAGS += -DHAVE_LIBBFD_BUILDID_SUPPORT -else - msg := $(warning Old version of libbfd/binutils things like PE executable profiling will not be available); + ifeq ($(feature-libbfd-buildid), 1) + CFLAGS += -DHAVE_LIBBFD_BUILDID_SUPPORT + else + msg := $(warning Old version of libbfd/binutils things like PE executable profiling will not be available); + endif endif ifdef NO_DEMANGLE diff --git a/tools/perf/pmu-events/Build b/tools/perf/pmu-events/Build index 215ba30b85343ad1874b1fc52c05fccbd8948bb3..a055dee6a46af77ebe7345934f28eeaee66704ac 100644 --- a/tools/perf/pmu-events/Build +++ b/tools/perf/pmu-events/Build @@ -6,10 +6,13 @@ pmu-events-y += pmu-events.o JDIR = pmu-events/arch/$(SRCARCH) JSON = $(shell [ -d $(JDIR) ] && \ find $(JDIR) -name '*.json' -o -name 'mapfile.csv') +JDIR_TEST = pmu-events/arch/test +JSON_TEST = $(shell [ -d $(JDIR_TEST) ] && \ + find $(JDIR_TEST) -name '*.json') # # Locate/process JSON files in pmu-events/arch/ # directory and create tables in pmu-events.c. # -$(OUTPUT)pmu-events/pmu-events.c: $(JSON) $(JEVENTS) +$(OUTPUT)pmu-events/pmu-events.c: $(JSON) $(JSON_TEST) $(JEVENTS) $(Q)$(call echo-cmd,gen)$(JEVENTS) $(SRCARCH) pmu-events/arch $(OUTPUT)pmu-events/pmu-events.c $(V) diff --git a/tools/testing/selftests/ftrace/ftracetest b/tools/testing/selftests/ftrace/ftracetest index 8ec1922e974eba126080c2c06807da2a626d2e54..55314cd197ab95de529a12b4dcaf8b9c8b917802 100755 --- a/tools/testing/selftests/ftrace/ftracetest +++ b/tools/testing/selftests/ftrace/ftracetest @@ -30,6 +30,9 @@ err_ret=1 # kselftest skip code is 4 err_skip=4 +# umount required +UMOUNT_DIR="" + # cgroup RT scheduling prevents chrt commands from succeeding, which # induces failures in test wakeup tests. Disable for the duration of # the tests. @@ -44,6 +47,9 @@ setup() { cleanup() { echo $sched_rt_runtime_orig > $sched_rt_runtime + if [ -n "${UMOUNT_DIR}" ]; then + umount ${UMOUNT_DIR} ||: + fi } errexit() { # message @@ -155,11 +161,13 @@ if [ -z "$TRACING_DIR" ]; then mount -t tracefs nodev /sys/kernel/tracing || errexit "Failed to mount /sys/kernel/tracing" TRACING_DIR="/sys/kernel/tracing" + UMOUNT_DIR=${TRACING_DIR} # If debugfs exists, then so does /sys/kernel/debug elif [ -d "/sys/kernel/debug" ]; then mount -t debugfs nodev /sys/kernel/debug || errexit "Failed to mount /sys/kernel/debug" TRACING_DIR="/sys/kernel/debug/tracing" + UMOUNT_DIR=${TRACING_DIR} else err_ret=$err_skip errexit "debugfs and tracefs are not configured in this kernel" diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c index b599f1fa99b55aba5d359163f35a9fc32713e5ee..44a25a9f1f72269e32572512c5182cc40a75d739 100644 --- a/tools/testing/selftests/net/tls.c +++ b/tools/testing/selftests/net/tls.c @@ -384,11 +384,12 @@ TEST_F(tls, sendmsg_large) msg.msg_iov = &vec; msg.msg_iovlen = 1; - EXPECT_EQ(sendmsg(self->cfd, &msg, 0), send_len); + EXPECT_EQ(sendmsg(self->fd, &msg, 0), send_len); } - while (recvs++ < sends) - EXPECT_NE(recv(self->fd, mem, send_len, 0), -1); + while (recvs++ < sends) { + EXPECT_NE(recv(self->cfd, mem, send_len, 0), -1); + } free(mem); } @@ -416,9 +417,9 @@ TEST_F(tls, sendmsg_multiple) msg.msg_iov = vec; msg.msg_iovlen = iov_len; - EXPECT_EQ(sendmsg(self->cfd, &msg, 0), total_len); + EXPECT_EQ(sendmsg(self->fd, &msg, 0), total_len); buf = malloc(total_len); - EXPECT_NE(recv(self->fd, buf, total_len, 0), -1); + EXPECT_NE(recv(self->cfd, buf, total_len, 0), -1); for (i = 0; i < iov_len; i++) { EXPECT_EQ(memcmp(test_strs[i], buf + len_cmp, strlen(test_strs[i])),