diff --git a/.mailmap b/.mailmap index 5dd3181219827..e50662536c486 100644 --- a/.mailmap +++ b/.mailmap @@ -538,6 +538,8 @@ Shuah Khan Sibi Sankar Sid Manning Simon Arlott +Simon Horman +Simon Horman Simon Kelley Sricharan Ramabadhran Srinivas Ramana diff --git a/Documentation/ABI/testing/sysfs-class-led-trigger-netdev b/Documentation/ABI/testing/sysfs-class-led-trigger-netdev index 78b62a23b14a0..f6d9d72ce77b7 100644 --- a/Documentation/ABI/testing/sysfs-class-led-trigger-netdev +++ b/Documentation/ABI/testing/sysfs-class-led-trigger-netdev @@ -13,7 +13,7 @@ Description: Specifies the duration of the LED blink in milliseconds. Defaults to 50 ms. - With hw_control ON, the interval value MUST be set to the + When offloaded is true, the interval value MUST be set to the default value and cannot be changed. Trying to set any value in this specific mode will return an EINVAL error. @@ -44,8 +44,8 @@ Description: If set to 1, the LED will blink for the milliseconds specified in interval to signal transmission. - With hw_control ON, the blink interval is controlled by hardware - and won't reflect the value set in interval. + When offloaded is true, the blink interval is controlled by + hardware and won't reflect the value set in interval. What: /sys/class/leds//rx Date: Dec 2017 @@ -59,21 +59,21 @@ Description: If set to 1, the LED will blink for the milliseconds specified in interval to signal reception. - With hw_control ON, the blink interval is controlled by hardware - and won't reflect the value set in interval. + When offloaded is true, the blink interval is controlled by + hardware and won't reflect the value set in interval. -What: /sys/class/leds//hw_control +What: /sys/class/leds//offloaded Date: Jun 2023 KernelVersion: 6.5 Contact: linux-leds@vger.kernel.org Description: - Communicate whether the LED trigger modes are driven by hardware - or software fallback is used. + Communicate whether the LED trigger modes are offloaded to + hardware or whether software fallback is used. If 0, the LED is using software fallback to blink. - If 1, the LED is using hardware control to blink and signal the - requested modes. + If 1, the LED blinking in requested mode is offloaded to + hardware. What: /sys/class/leds//link_10 Date: Jun 2023 diff --git a/Documentation/admin-guide/hw-vuln/srso.rst b/Documentation/admin-guide/hw-vuln/srso.rst index af59a93956621..b6cfb51cb0b46 100644 --- a/Documentation/admin-guide/hw-vuln/srso.rst +++ b/Documentation/admin-guide/hw-vuln/srso.rst @@ -141,8 +141,8 @@ sequence. To ensure the safety of this mitigation, the kernel must ensure that the safe return sequence is itself free from attacker interference. In Zen3 and Zen4, this is accomplished by creating a BTB alias between the -untraining function srso_untrain_ret_alias() and the safe return -function srso_safe_ret_alias() which results in evicting a potentially +untraining function srso_alias_untrain_ret() and the safe return +function srso_alias_safe_ret() which results in evicting a potentially poisoned BTB entry and using that safe one for all function returns. In older Zen1 and Zen2, this is accomplished using a reinterpretation diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sa8775p-tlmm.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,sa8775p-tlmm.yaml index e608a4f1bcaec..e119a226a4b18 100644 --- a/Documentation/devicetree/bindings/pinctrl/qcom,sa8775p-tlmm.yaml +++ b/Documentation/devicetree/bindings/pinctrl/qcom,sa8775p-tlmm.yaml @@ -87,7 +87,7 @@ $defs: emac0_mdc, emac0_mdio, emac0_ptp_aux, emac0_ptp_pps, emac1_mcg0, emac1_mcg1, emac1_mcg2, emac1_mcg3, emac1_mdc, emac1_mdio, emac1_ptp_aux, emac1_ptp_pps, gcc_gp1, gcc_gp2, gcc_gp3, - gcc_gp4, gcc_gp5, hs0_mi2s, hs1_mi2s, hs2_mi2s, ibi_i3c, + gcc_gp4, gcc_gp5, gpio, hs0_mi2s, hs1_mi2s, hs2_mi2s, ibi_i3c, jitter_bist, mdp0_vsync0, mdp0_vsync1, mdp0_vsync2, mdp0_vsync3, mdp0_vsync4, mdp0_vsync5, mdp0_vsync6, mdp0_vsync7, mdp0_vsync8, mdp1_vsync0, mdp1_vsync1, mdp1_vsync2, mdp1_vsync3, mdp1_vsync4, diff --git a/Documentation/i2c/writing-clients.rst b/Documentation/i2c/writing-clients.rst index b7d3ae7458f86..41ddc10f1ac7b 100644 --- a/Documentation/i2c/writing-clients.rst +++ b/Documentation/i2c/writing-clients.rst @@ -46,7 +46,7 @@ driver model device node, and its I2C address. }, .id_table = foo_idtable, - .probe_new = foo_probe, + .probe = foo_probe, .remove = foo_remove, /* if device autodetection is needed: */ .class = I2C_CLASS_SOMETHING, diff --git a/Documentation/networking/nf_conntrack-sysctl.rst b/Documentation/networking/nf_conntrack-sysctl.rst index 8b1045c3b59e0..c383a394c6656 100644 --- a/Documentation/networking/nf_conntrack-sysctl.rst +++ b/Documentation/networking/nf_conntrack-sysctl.rst @@ -178,10 +178,10 @@ nf_conntrack_sctp_timeout_established - INTEGER (seconds) Default is set to (hb_interval * path_max_retrans + rto_max) nf_conntrack_sctp_timeout_shutdown_sent - INTEGER (seconds) - default 0.3 + default 3 nf_conntrack_sctp_timeout_shutdown_recd - INTEGER (seconds) - default 0.3 + default 3 nf_conntrack_sctp_timeout_shutdown_ack_sent - INTEGER (seconds) default 3 diff --git a/MAINTAINERS b/MAINTAINERS index 4e0f5313102da..8d8d59b17c0eb 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8816,6 +8816,7 @@ R: Michael Walle S: Maintained F: drivers/gpio/gpio-regmap.c F: include/linux/gpio/regmap.h +K: (devm_)?gpio_regmap_(un)?register GPIO SUBSYSTEM M: Linus Walleij @@ -14816,6 +14817,16 @@ F: net/netfilter/xt_CONNSECMARK.c F: net/netfilter/xt_SECMARK.c F: net/netlabel/ +NETWORKING [MACSEC] +M: Sabrina Dubroca +L: netdev@vger.kernel.org +S: Maintained +F: drivers/net/macsec.c +F: include/net/macsec.h +F: include/uapi/linux/if_macsec.h +K: macsec +K: \bmdo_ + NETWORKING [MPTCP] M: Matthieu Baerts M: Mat Martineau @@ -19237,13 +19248,6 @@ F: Documentation/devicetree/bindings/serial/serial.yaml F: drivers/tty/serdev/ F: include/linux/serdev.h -SERIAL DRIVERS -M: Greg Kroah-Hartman -L: linux-serial@vger.kernel.org -S: Maintained -F: Documentation/devicetree/bindings/serial/ -F: drivers/tty/serial/ - SERIAL IR RECEIVER M: Sean Young L: linux-media@vger.kernel.org @@ -21072,6 +21076,39 @@ S: Maintained F: Documentation/devicetree/bindings/sound/davinci-mcasp-audio.yaml F: sound/soc/ti/ +TEXAS INSTRUMENTS AUDIO (ASoC/HDA) DRIVERS +M: Shenghao Ding +M: Kevin Lu +M: Baojun Xu +L: alsa-devel@alsa-project.org (moderated for non-subscribers) +S: Maintained +F: Documentation/devicetree/bindings/sound/tas2552.txt +F: Documentation/devicetree/bindings/sound/tas2562.yaml +F: Documentation/devicetree/bindings/sound/tas2770.yaml +F: Documentation/devicetree/bindings/sound/tas27xx.yaml +F: Documentation/devicetree/bindings/sound/ti,pcm1681.txt +F: Documentation/devicetree/bindings/sound/ti,pcm3168a.yaml +F: Documentation/devicetree/bindings/sound/ti,tlv320*.yaml +F: Documentation/devicetree/bindings/sound/tlv320adcx140.yaml +F: Documentation/devicetree/bindings/sound/tlv320aic31xx.txt +F: Documentation/devicetree/bindings/sound/tpa6130a2.txt +F: include/sound/tas2*.h +F: include/sound/tlv320*.h +F: include/sound/tpa6130a2-plat.h +F: sound/pci/hda/tas2781_hda_i2c.c +F: sound/soc/codecs/pcm1681.c +F: sound/soc/codecs/pcm1789*.* +F: sound/soc/codecs/pcm179x*.* +F: sound/soc/codecs/pcm186x*.* +F: sound/soc/codecs/pcm3008.* +F: sound/soc/codecs/pcm3060*.* +F: sound/soc/codecs/pcm3168a*.* +F: sound/soc/codecs/pcm5102a.c +F: sound/soc/codecs/pcm512x*.* +F: sound/soc/codecs/tas2*.* +F: sound/soc/codecs/tlv320*.* +F: sound/soc/codecs/tpa6130a2.* + TEXAS INSTRUMENTS DMA DRIVERS M: Peter Ujfalusi L: dmaengine@vger.kernel.org @@ -21648,20 +21685,16 @@ W: https://github.com/srcres258/linux-doc T: git git://github.com/srcres258/linux-doc.git doc-zh-tw F: Documentation/translations/zh_TW/ -TTY LAYER +TTY LAYER AND SERIAL DRIVERS M: Greg Kroah-Hartman M: Jiri Slaby L: linux-kernel@vger.kernel.org L: linux-serial@vger.kernel.org S: Supported T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty.git +F: Documentation/devicetree/bindings/serial/ F: Documentation/driver-api/serial/ F: drivers/tty/ -F: drivers/tty/serial/serial_base.h -F: drivers/tty/serial/serial_base_bus.c -F: drivers/tty/serial/serial_core.c -F: drivers/tty/serial/serial_ctrl.c -F: drivers/tty/serial/serial_port.c F: include/linux/selection.h F: include/linux/serial.h F: include/linux/serial_core.h @@ -22488,7 +22521,6 @@ L: virtualization@lists.linux-foundation.org S: Maintained F: drivers/block/virtio_blk.c F: drivers/scsi/virtio_scsi.c -F: drivers/vhost/scsi.c F: include/uapi/linux/virtio_blk.h F: include/uapi/linux/virtio_scsi.h @@ -22587,6 +22619,16 @@ F: include/linux/vhost_iotlb.h F: include/uapi/linux/vhost.h F: kernel/vhost_task.c +VIRTIO HOST (VHOST-SCSI) +M: "Michael S. Tsirkin" +M: Jason Wang +M: Mike Christie +R: Paolo Bonzini +R: Stefan Hajnoczi +L: virtualization@lists.linux-foundation.org +S: Maintained +F: drivers/vhost/scsi.c + VIRTIO I2C DRIVER M: Conghui Chen M: Viresh Kumar diff --git a/Makefile b/Makefile index 00cfb37a9ab8e..2fdd8b40b7e04 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ VERSION = 6 PATCHLEVEL = 5 SUBLEVEL = 0 -EXTRAVERSION = -rc6 +EXTRAVERSION = NAME = Hurr durr I'ma ninja sloth # *DOCUMENTATION* diff --git a/arch/arm/boot/dts/arm/integratorap.dts b/arch/arm/boot/dts/arm/integratorap.dts index 5b52d75bc6bed..d9927d3181dce 100644 --- a/arch/arm/boot/dts/arm/integratorap.dts +++ b/arch/arm/boot/dts/arm/integratorap.dts @@ -158,7 +158,7 @@ valid-mask = <0x003fffff>; }; - pci: pciv3@62000000 { + pci: pci@62000000 { compatible = "arm,integrator-ap-pci", "v3,v360epc-pci"; device_type = "pci"; #interrupt-cells = <1>; diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-mira.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-mira.dtsi index 1a599c294ab86..1ca4d219609f6 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-mira.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-mira.dtsi @@ -182,7 +182,7 @@ pinctrl-0 = <&pinctrl_rtc_int>; reg = <0x68>; interrupt-parent = <&gpio7>; - interrupts = <8 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <8 IRQ_TYPE_LEVEL_LOW>; status = "disabled"; }; }; diff --git a/arch/arm/boot/dts/nxp/imx/imx6sx.dtsi b/arch/arm/boot/dts/nxp/imx/imx6sx.dtsi index 3a43086665522..a05069d49cb88 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6sx.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6sx.dtsi @@ -863,7 +863,6 @@ reg = <0>; ldb_from_lcdif1: endpoint { - remote-endpoint = <&lcdif1_to_ldb>; }; }; @@ -1010,6 +1009,8 @@ <&clks IMX6SX_CLK_USDHC1>; clock-names = "ipg", "ahb", "per"; bus-width = <4>; + fsl,tuning-start-tap = <20>; + fsl,tuning-step= <2>; status = "disabled"; }; @@ -1022,6 +1023,8 @@ <&clks IMX6SX_CLK_USDHC2>; clock-names = "ipg", "ahb", "per"; bus-width = <4>; + fsl,tuning-start-tap = <20>; + fsl,tuning-step= <2>; status = "disabled"; }; @@ -1034,6 +1037,8 @@ <&clks IMX6SX_CLK_USDHC3>; clock-names = "ipg", "ahb", "per"; bus-width = <4>; + fsl,tuning-start-tap = <20>; + fsl,tuning-step= <2>; status = "disabled"; }; @@ -1309,11 +1314,8 @@ power-domains = <&pd_disp>; status = "disabled"; - ports { - port { - lcdif1_to_ldb: endpoint { - remote-endpoint = <&ldb_from_lcdif1>; - }; + port { + lcdif1_to_ldb: endpoint { }; }; }; diff --git a/arch/arm/boot/dts/nxp/imx/imx7s.dtsi b/arch/arm/boot/dts/nxp/imx/imx7s.dtsi index 54026c2c93fac..6ffb428dc939c 100644 --- a/arch/arm/boot/dts/nxp/imx/imx7s.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx7s.dtsi @@ -1184,6 +1184,8 @@ <&clks IMX7D_USDHC1_ROOT_CLK>; clock-names = "ipg", "ahb", "per"; bus-width = <4>; + fsl,tuning-step = <2>; + fsl,tuning-start-tap = <20>; status = "disabled"; }; @@ -1196,6 +1198,8 @@ <&clks IMX7D_USDHC2_ROOT_CLK>; clock-names = "ipg", "ahb", "per"; bus-width = <4>; + fsl,tuning-step = <2>; + fsl,tuning-start-tap = <20>; status = "disabled"; }; @@ -1208,6 +1212,8 @@ <&clks IMX7D_USDHC3_ROOT_CLK>; clock-names = "ipg", "ahb", "per"; bus-width = <4>; + fsl,tuning-step = <2>; + fsl,tuning-start-tap = <20>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/ti/omap/am335x-bone-common.dtsi b/arch/arm/boot/dts/ti/omap/am335x-bone-common.dtsi index b958607c71dcf..96451c8a815cc 100644 --- a/arch/arm/boot/dts/ti/omap/am335x-bone-common.dtsi +++ b/arch/arm/boot/dts/ti/omap/am335x-bone-common.dtsi @@ -145,6 +145,8 @@ /* MDIO */ AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT_PULLUP | SLEWCTRL_FAST, MUX_MODE0) AM33XX_PADCONF(AM335X_PIN_MDC, PIN_OUTPUT_PULLUP, MUX_MODE0) + /* Added to support GPIO controlled PHY reset */ + AM33XX_PADCONF(AM335X_PIN_UART0_CTSN, PIN_OUTPUT_PULLUP, MUX_MODE7) >; }; @@ -153,6 +155,8 @@ /* MDIO reset value */ AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT_PULLDOWN, MUX_MODE7) AM33XX_PADCONF(AM335X_PIN_MDC, PIN_INPUT_PULLDOWN, MUX_MODE7) + /* Added to support GPIO controlled PHY reset */ + AM33XX_PADCONF(AM335X_PIN_UART0_CTSN, PIN_INPUT_PULLDOWN, MUX_MODE7) >; }; @@ -215,6 +219,7 @@ baseboard_eeprom: baseboard_eeprom@50 { compatible = "atmel,24c256"; reg = <0x50>; + vcc-supply = <&ldo4_reg>; #address-cells = <1>; #size-cells = <1>; @@ -377,6 +382,10 @@ ethphy0: ethernet-phy@0 { reg = <0>; + /* Support GPIO reset on revision C3 boards */ + reset-gpios = <&gpio1 8 GPIO_ACTIVE_LOW>; + reset-assert-us = <300>; + reset-deassert-us = <6500>; }; }; diff --git a/arch/arm/mach-zynq/pm.c b/arch/arm/mach-zynq/pm.c index 8ba450ab559c8..61ad965ef3aca 100644 --- a/arch/arm/mach-zynq/pm.c +++ b/arch/arm/mach-zynq/pm.c @@ -8,8 +8,8 @@ */ #include +#include #include -#include #include "common.h" /* register offsets */ diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi index d6b36f04f3dc1..1a647d4072ba0 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi @@ -1221,10 +1221,9 @@ compatible = "fsl,imx8mm-mipi-csi2"; reg = <0x32e30000 0x1000>; interrupts = ; - assigned-clocks = <&clk IMX8MM_CLK_CSI1_CORE>, - <&clk IMX8MM_CLK_CSI1_PHY_REF>; - assigned-clock-parents = <&clk IMX8MM_SYS_PLL2_1000M>, - <&clk IMX8MM_SYS_PLL2_1000M>; + assigned-clocks = <&clk IMX8MM_CLK_CSI1_CORE>; + assigned-clock-parents = <&clk IMX8MM_SYS_PLL2_1000M>; + clock-frequency = <333000000>; clocks = <&clk IMX8MM_CLK_DISP_APB_ROOT>, <&clk IMX8MM_CLK_CSI1_ROOT>, diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi index 9869fe7652fca..aa38dd6dc9ba5 100644 --- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi @@ -1175,10 +1175,8 @@ compatible = "fsl,imx8mm-mipi-csi2"; reg = <0x32e30000 0x1000>; interrupts = ; - assigned-clocks = <&clk IMX8MN_CLK_CAMERA_PIXEL>, - <&clk IMX8MN_CLK_CSI1_PHY_REF>; - assigned-clock-parents = <&clk IMX8MN_SYS_PLL2_1000M>, - <&clk IMX8MN_SYS_PLL2_1000M>; + assigned-clocks = <&clk IMX8MN_CLK_CAMERA_PIXEL>; + assigned-clock-parents = <&clk IMX8MN_SYS_PLL2_1000M>; assigned-clock-rates = <333000000>; clock-frequency = <333000000>; clocks = <&clk IMX8MN_CLK_DISP_APB_ROOT>, diff --git a/arch/arm64/boot/dts/freescale/imx93.dtsi b/arch/arm64/boot/dts/freescale/imx93.dtsi index 8643612ace8c7..1d8dd14b65cfa 100644 --- a/arch/arm64/boot/dts/freescale/imx93.dtsi +++ b/arch/arm64/boot/dts/freescale/imx93.dtsi @@ -340,7 +340,7 @@ anatop: anatop@44480000 { compatible = "fsl,imx93-anatop", "syscon"; - reg = <0x44480000 0x10000>; + reg = <0x44480000 0x2000>; }; adc1: adc@44530000 { diff --git a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts index 9022ad726741c..a9e7b832c18ce 100644 --- a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts +++ b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts @@ -121,7 +121,7 @@ }; }; - pm8150l-thermal { + pm8150l-pcb-thermal { polling-delay-passive = <0>; polling-delay = <0>; thermal-sensors = <&pm8150l_adc_tm 1>; diff --git a/arch/arm64/boot/dts/qcom/sa8775p-ride.dts b/arch/arm64/boot/dts/qcom/sa8775p-ride.dts index ab767cfa51ff5..26f5a4e0ffed3 100644 --- a/arch/arm64/boot/dts/qcom/sa8775p-ride.dts +++ b/arch/arm64/boot/dts/qcom/sa8775p-ride.dts @@ -153,8 +153,8 @@ vreg_l4c: ldo4 { regulator-name = "vreg_l4c"; - regulator-min-microvolt = <1100000>; - regulator-max-microvolt = <1300000>; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; regulator-initial-mode = ; /* * FIXME: This should have regulator-allow-set-load but diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi index e25dc2bb52a7e..06df931d8cad3 100644 --- a/arch/arm64/boot/dts/qcom/sc7180.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi @@ -3120,8 +3120,8 @@ reg = <0 0x0ae94400 0 0x200>, <0 0x0ae94600 0 0x280>, <0 0x0ae94a00 0 0x1e0>; - reg-names = "dsi0_phy", - "dsi0_phy_lane", + reg-names = "dsi_phy", + "dsi_phy_lane", "dsi_pll"; #clock-cells = <1>; diff --git a/arch/arm64/boot/dts/qcom/sc8180x.dtsi b/arch/arm64/boot/dts/qcom/sc8180x.dtsi index d3ae185356365..be78a933d8eb2 100644 --- a/arch/arm64/boot/dts/qcom/sc8180x.dtsi +++ b/arch/arm64/boot/dts/qcom/sc8180x.dtsi @@ -3561,7 +3561,7 @@ }; osm_l3: interconnect@18321000 { - compatible = "qcom,sc8180x-osm-l3"; + compatible = "qcom,sc8180x-osm-l3", "qcom,osm-l3"; reg = <0 0x18321000 0 0x1400>; clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GPLL0>; diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi index 18c822abdb88f..b46e55bb8bdec 100644 --- a/arch/arm64/boot/dts/qcom/sm8150.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi @@ -56,7 +56,7 @@ qcom,freq-domain = <&cpufreq_hw 0>; operating-points-v2 = <&cpu0_opp_table>; interconnects = <&gem_noc MASTER_AMPSS_M0 0 &mc_virt SLAVE_EBI_CH0 0>, - <&osm_l3 MASTER_OSM_L3_APPS 0 &osm_l3 SLAVE_OSM_L3 0>; + <&osm_l3 MASTER_OSM_L3_APPS &osm_l3 SLAVE_OSM_L3>; power-domains = <&CPU_PD0>; power-domain-names = "psci"; #cooling-cells = <2>; @@ -85,7 +85,7 @@ qcom,freq-domain = <&cpufreq_hw 0>; operating-points-v2 = <&cpu0_opp_table>; interconnects = <&gem_noc MASTER_AMPSS_M0 0 &mc_virt SLAVE_EBI_CH0 0>, - <&osm_l3 MASTER_OSM_L3_APPS 0 &osm_l3 SLAVE_OSM_L3 0>; + <&osm_l3 MASTER_OSM_L3_APPS &osm_l3 SLAVE_OSM_L3>; power-domains = <&CPU_PD1>; power-domain-names = "psci"; #cooling-cells = <2>; @@ -109,7 +109,7 @@ qcom,freq-domain = <&cpufreq_hw 0>; operating-points-v2 = <&cpu0_opp_table>; interconnects = <&gem_noc MASTER_AMPSS_M0 0 &mc_virt SLAVE_EBI_CH0 0>, - <&osm_l3 MASTER_OSM_L3_APPS 0 &osm_l3 SLAVE_OSM_L3 0>; + <&osm_l3 MASTER_OSM_L3_APPS &osm_l3 SLAVE_OSM_L3>; power-domains = <&CPU_PD2>; power-domain-names = "psci"; #cooling-cells = <2>; @@ -133,7 +133,7 @@ qcom,freq-domain = <&cpufreq_hw 0>; operating-points-v2 = <&cpu0_opp_table>; interconnects = <&gem_noc MASTER_AMPSS_M0 0 &mc_virt SLAVE_EBI_CH0 0>, - <&osm_l3 MASTER_OSM_L3_APPS 0 &osm_l3 SLAVE_OSM_L3 0>; + <&osm_l3 MASTER_OSM_L3_APPS &osm_l3 SLAVE_OSM_L3>; power-domains = <&CPU_PD3>; power-domain-names = "psci"; #cooling-cells = <2>; @@ -157,7 +157,7 @@ qcom,freq-domain = <&cpufreq_hw 1>; operating-points-v2 = <&cpu4_opp_table>; interconnects = <&gem_noc MASTER_AMPSS_M0 0 &mc_virt SLAVE_EBI_CH0 0>, - <&osm_l3 MASTER_OSM_L3_APPS 0 &osm_l3 SLAVE_OSM_L3 0>; + <&osm_l3 MASTER_OSM_L3_APPS &osm_l3 SLAVE_OSM_L3>; power-domains = <&CPU_PD4>; power-domain-names = "psci"; #cooling-cells = <2>; @@ -181,7 +181,7 @@ qcom,freq-domain = <&cpufreq_hw 1>; operating-points-v2 = <&cpu4_opp_table>; interconnects = <&gem_noc MASTER_AMPSS_M0 0 &mc_virt SLAVE_EBI_CH0 0>, - <&osm_l3 MASTER_OSM_L3_APPS 0 &osm_l3 SLAVE_OSM_L3 0>; + <&osm_l3 MASTER_OSM_L3_APPS &osm_l3 SLAVE_OSM_L3>; power-domains = <&CPU_PD5>; power-domain-names = "psci"; #cooling-cells = <2>; @@ -205,7 +205,7 @@ qcom,freq-domain = <&cpufreq_hw 1>; operating-points-v2 = <&cpu4_opp_table>; interconnects = <&gem_noc MASTER_AMPSS_M0 0 &mc_virt SLAVE_EBI_CH0 0>, - <&osm_l3 MASTER_OSM_L3_APPS 0 &osm_l3 SLAVE_OSM_L3 0>; + <&osm_l3 MASTER_OSM_L3_APPS &osm_l3 SLAVE_OSM_L3>; power-domains = <&CPU_PD6>; power-domain-names = "psci"; #cooling-cells = <2>; @@ -229,7 +229,7 @@ qcom,freq-domain = <&cpufreq_hw 2>; operating-points-v2 = <&cpu7_opp_table>; interconnects = <&gem_noc MASTER_AMPSS_M0 0 &mc_virt SLAVE_EBI_CH0 0>, - <&osm_l3 MASTER_OSM_L3_APPS 0 &osm_l3 SLAVE_OSM_L3 0>; + <&osm_l3 MASTER_OSM_L3_APPS &osm_l3 SLAVE_OSM_L3>; power-domains = <&CPU_PD7>; power-domain-names = "psci"; #cooling-cells = <2>; @@ -4342,7 +4342,7 @@ clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GPLL0>; clock-names = "xo", "alternate"; - #interconnect-cells = <2>; + #interconnect-cells = <1>; }; cpufreq_hw: cpufreq@18323000 { diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi index 83ab6de459bc4..1efa07f2caff4 100644 --- a/arch/arm64/boot/dts/qcom/sm8250.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi @@ -107,7 +107,7 @@ qcom,freq-domain = <&cpufreq_hw 0>; operating-points-v2 = <&cpu0_opp_table>; interconnects = <&gem_noc MASTER_AMPSS_M0 0 &mc_virt SLAVE_EBI_CH0 0>, - <&epss_l3 MASTER_OSM_L3_APPS 0 &epss_l3 SLAVE_OSM_L3 0>; + <&epss_l3 MASTER_OSM_L3_APPS &epss_l3 SLAVE_OSM_L3>; #cooling-cells = <2>; L2_0: l2-cache { compatible = "cache"; @@ -138,7 +138,7 @@ qcom,freq-domain = <&cpufreq_hw 0>; operating-points-v2 = <&cpu0_opp_table>; interconnects = <&gem_noc MASTER_AMPSS_M0 0 &mc_virt SLAVE_EBI_CH0 0>, - <&epss_l3 MASTER_OSM_L3_APPS 0 &epss_l3 SLAVE_OSM_L3 0>; + <&epss_l3 MASTER_OSM_L3_APPS &epss_l3 SLAVE_OSM_L3>; #cooling-cells = <2>; L2_100: l2-cache { compatible = "cache"; @@ -163,7 +163,7 @@ qcom,freq-domain = <&cpufreq_hw 0>; operating-points-v2 = <&cpu0_opp_table>; interconnects = <&gem_noc MASTER_AMPSS_M0 0 &mc_virt SLAVE_EBI_CH0 0>, - <&epss_l3 MASTER_OSM_L3_APPS 0 &epss_l3 SLAVE_OSM_L3 0>; + <&epss_l3 MASTER_OSM_L3_APPS &epss_l3 SLAVE_OSM_L3>; #cooling-cells = <2>; L2_200: l2-cache { compatible = "cache"; @@ -188,7 +188,7 @@ qcom,freq-domain = <&cpufreq_hw 0>; operating-points-v2 = <&cpu0_opp_table>; interconnects = <&gem_noc MASTER_AMPSS_M0 0 &mc_virt SLAVE_EBI_CH0 0>, - <&epss_l3 MASTER_OSM_L3_APPS 0 &epss_l3 SLAVE_OSM_L3 0>; + <&epss_l3 MASTER_OSM_L3_APPS &epss_l3 SLAVE_OSM_L3>; #cooling-cells = <2>; L2_300: l2-cache { compatible = "cache"; @@ -213,7 +213,7 @@ qcom,freq-domain = <&cpufreq_hw 1>; operating-points-v2 = <&cpu4_opp_table>; interconnects = <&gem_noc MASTER_AMPSS_M0 0 &mc_virt SLAVE_EBI_CH0 0>, - <&epss_l3 MASTER_OSM_L3_APPS 0 &epss_l3 SLAVE_OSM_L3 0>; + <&epss_l3 MASTER_OSM_L3_APPS &epss_l3 SLAVE_OSM_L3>; #cooling-cells = <2>; L2_400: l2-cache { compatible = "cache"; @@ -238,7 +238,7 @@ qcom,freq-domain = <&cpufreq_hw 1>; operating-points-v2 = <&cpu4_opp_table>; interconnects = <&gem_noc MASTER_AMPSS_M0 0 &mc_virt SLAVE_EBI_CH0 0>, - <&epss_l3 MASTER_OSM_L3_APPS 0 &epss_l3 SLAVE_OSM_L3 0>; + <&epss_l3 MASTER_OSM_L3_APPS &epss_l3 SLAVE_OSM_L3>; #cooling-cells = <2>; L2_500: l2-cache { compatible = "cache"; @@ -263,7 +263,7 @@ qcom,freq-domain = <&cpufreq_hw 1>; operating-points-v2 = <&cpu4_opp_table>; interconnects = <&gem_noc MASTER_AMPSS_M0 0 &mc_virt SLAVE_EBI_CH0 0>, - <&epss_l3 MASTER_OSM_L3_APPS 0 &epss_l3 SLAVE_OSM_L3 0>; + <&epss_l3 MASTER_OSM_L3_APPS &epss_l3 SLAVE_OSM_L3>; #cooling-cells = <2>; L2_600: l2-cache { compatible = "cache"; @@ -288,7 +288,7 @@ qcom,freq-domain = <&cpufreq_hw 2>; operating-points-v2 = <&cpu7_opp_table>; interconnects = <&gem_noc MASTER_AMPSS_M0 0 &mc_virt SLAVE_EBI_CH0 0>, - <&epss_l3 MASTER_OSM_L3_APPS 0 &epss_l3 SLAVE_OSM_L3 0>; + <&epss_l3 MASTER_OSM_L3_APPS &epss_l3 SLAVE_OSM_L3>; #cooling-cells = <2>; L2_700: l2-cache { compatible = "cache"; @@ -5679,7 +5679,7 @@ clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GPLL0>; clock-names = "xo", "alternate"; - #interconnect-cells = <2>; + #interconnect-cells = <1>; }; cpufreq_hw: cpufreq@18591000 { diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi index 88ef478cb5ccb..ec451c616f3e4 100644 --- a/arch/arm64/boot/dts/qcom/sm8350.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi @@ -1744,6 +1744,8 @@ qcom,controlled-remotely; iommus = <&apps_smmu 0x594 0x0011>, <&apps_smmu 0x596 0x0011>; + /* FIXME: Probing BAM DMA causes some abort and system hang */ + status = "fail"; }; crypto: crypto@1dfa000 { @@ -1755,6 +1757,8 @@ <&apps_smmu 0x596 0x0011>; interconnects = <&aggre2_noc MASTER_CRYPTO 0 &mc_virt SLAVE_EBI1 0>; interconnect-names = "memory"; + /* FIXME: dependency BAM DMA is disabled */ + status = "disabled"; }; ipa: ipa@1e40000 { diff --git a/arch/arm64/boot/dts/rockchip/px30.dtsi b/arch/arm64/boot/dts/rockchip/px30.dtsi index 8332c8aaf49be..42ce78beb4134 100644 --- a/arch/arm64/boot/dts/rockchip/px30.dtsi +++ b/arch/arm64/boot/dts/rockchip/px30.dtsi @@ -291,14 +291,14 @@ }; power-domain@PX30_PD_MMC_NAND { reg = ; - clocks = <&cru HCLK_NANDC>, - <&cru HCLK_EMMC>, - <&cru HCLK_SDIO>, - <&cru HCLK_SFC>, - <&cru SCLK_EMMC>, - <&cru SCLK_NANDC>, - <&cru SCLK_SDIO>, - <&cru SCLK_SFC>; + clocks = <&cru HCLK_NANDC>, + <&cru HCLK_EMMC>, + <&cru HCLK_SDIO>, + <&cru HCLK_SFC>, + <&cru SCLK_EMMC>, + <&cru SCLK_NANDC>, + <&cru SCLK_SDIO>, + <&cru SCLK_SFC>; pm_qos = <&qos_emmc>, <&qos_nand>, <&qos_sdio>, <&qos_sfc>; #power-domain-cells = <0>; diff --git a/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts index 7ea48167747c6..9232357f4fec9 100644 --- a/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts +++ b/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts @@ -106,7 +106,6 @@ regulator-name = "vdd_core"; regulator-min-microvolt = <827000>; regulator-max-microvolt = <1340000>; - regulator-init-microvolt = <1015000>; regulator-settling-time-up-us = <250>; regulator-always-on; regulator-boot-on; diff --git a/arch/arm64/boot/dts/rockchip/rk3308-rock-pi-s.dts b/arch/arm64/boot/dts/rockchip/rk3308-rock-pi-s.dts index a71f249ed384e..e9810d2f04071 100644 --- a/arch/arm64/boot/dts/rockchip/rk3308-rock-pi-s.dts +++ b/arch/arm64/boot/dts/rockchip/rk3308-rock-pi-s.dts @@ -105,7 +105,6 @@ regulator-name = "vdd_core"; regulator-min-microvolt = <827000>; regulator-max-microvolt = <1340000>; - regulator-init-microvolt = <1015000>; regulator-settling-time-up-us = <250>; regulator-always-on; regulator-boot-on; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-eaidk-610.dts b/arch/arm64/boot/dts/rockchip/rk3399-eaidk-610.dts index d1f343345f674..6464ef4d113dd 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-eaidk-610.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-eaidk-610.dts @@ -773,7 +773,7 @@ compatible = "brcm,bcm4329-fmac"; reg = <1>; interrupt-parent = <&gpio0>; - interrupts = ; + interrupts = ; interrupt-names = "host-wake"; pinctrl-names = "default"; pinctrl-0 = <&wifi_host_wake_l>; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi index b6e082f1f6d97..7c5f441a2219e 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi @@ -375,7 +375,6 @@ vcc_sdio: LDO_REG4 { regulator-always-on; regulator-boot-on; - regulator-init-microvolt = <3000000>; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <3300000>; regulator-name = "vcc_sdio"; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts index 028eb508ae302..8bfd5f88d1ef6 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts @@ -548,9 +548,8 @@ &sdhci { max-frequency = <150000000>; bus-width = <8>; - mmc-hs400-1_8v; + mmc-hs200-1_8v; non-removable; - mmc-hs400-enhanced-strobe; status = "okay"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi index 907071d4fe804..980c4534313a2 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi @@ -45,7 +45,7 @@ sdio_pwrseq: sdio-pwrseq { compatible = "mmc-pwrseq-simple"; clocks = <&rk808 1>; - clock-names = "ext_clock"; + clock-names = "lpo"; pinctrl-names = "default"; pinctrl-0 = <&wifi_enable_h>; reset-gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>; @@ -645,9 +645,9 @@ }; &sdhci { + max-frequency = <150000000>; bus-width = <8>; - mmc-hs400-1_8v; - mmc-hs400-enhanced-strobe; + mmc-hs200-1_8v; non-removable; status = "okay"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4b-plus.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4b-plus.dts index cec3b7b1b9474..8a17c1eaae15e 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4b-plus.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4b-plus.dts @@ -31,7 +31,7 @@ compatible = "brcm,bcm4329-fmac"; reg = <1>; interrupt-parent = <&gpio0>; - interrupts = ; + interrupts = ; interrupt-names = "host-wake"; pinctrl-names = "default"; pinctrl-0 = <&wifi_host_wake_l>; diff --git a/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rgxx3.dtsi b/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rgxx3.dtsi index a2c31d53b45bc..8cbf3d9a4f22e 100644 --- a/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rgxx3.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rgxx3.dtsi @@ -356,7 +356,6 @@ regulator-boot-on; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; - regulator-init-microvolt = <900000>; regulator-ramp-delay = <6001>; regulator-initial-mode = <0x2>; regulator-name = "vdd_logic"; @@ -371,7 +370,6 @@ regulator-boot-on; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; - regulator-init-microvolt = <900000>; regulator-ramp-delay = <6001>; regulator-initial-mode = <0x2>; regulator-name = "vdd_gpu"; @@ -533,7 +531,6 @@ regulator-boot-on; regulator-min-microvolt = <712500>; regulator-max-microvolt = <1390000>; - regulator-init-microvolt = <900000>; regulator-name = "vdd_cpu"; regulator-ramp-delay = <2300>; vin-supply = <&vcc_sys>; diff --git a/arch/arm64/boot/dts/rockchip/rk3566-box-demo.dts b/arch/arm64/boot/dts/rockchip/rk3566-box-demo.dts index 410cd3e5e7bca..0c18406e4c597 100644 --- a/arch/arm64/boot/dts/rockchip/rk3566-box-demo.dts +++ b/arch/arm64/boot/dts/rockchip/rk3566-box-demo.dts @@ -239,7 +239,7 @@ &gmac1 { assigned-clocks = <&cru SCLK_GMAC1_RX_TX>, <&cru SCLK_GMAC1>; - assigned-clock-parents = <&cru SCLK_GMAC1_RGMII_SPEED>, <&gmac1_clkin>; + assigned-clock-parents = <&cru SCLK_GMAC1_RGMII_SPEED>, <&gmac1_clkin>; phy-mode = "rgmii"; clock_in_out = "input"; pinctrl-names = "default"; @@ -416,7 +416,7 @@ compatible = "brcm,bcm4329-fmac"; reg = <1>; interrupt-parent = <&gpio2>; - interrupts = ; + interrupts = ; interrupt-names = "host-wake"; pinctrl-names = "default"; pinctrl-0 = <&wifi_host_wake_h>; diff --git a/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts b/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts index ff936b7135797..1c6d83b47cd21 100644 --- a/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts +++ b/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts @@ -218,7 +218,6 @@ regulator-boot-on; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; - regulator-init-microvolt = <900000>; regulator-ramp-delay = <6001>; regulator-initial-mode = <0x2>; @@ -233,7 +232,6 @@ regulator-boot-on; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; - regulator-init-microvolt = <900000>; regulator-ramp-delay = <6001>; regulator-initial-mode = <0x2>; @@ -259,7 +257,6 @@ regulator-boot-on; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; - regulator-init-microvolt = <900000>; regulator-ramp-delay = <6001>; regulator-initial-mode = <0x2>; diff --git a/arch/arm64/boot/dts/rockchip/rk3566-pinenote.dtsi b/arch/arm64/boot/dts/rockchip/rk3566-pinenote.dtsi index 8d61f824c12dc..d899087bf0b55 100644 --- a/arch/arm64/boot/dts/rockchip/rk3566-pinenote.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3566-pinenote.dtsi @@ -264,7 +264,6 @@ regulator-always-on; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; - regulator-init-microvolt = <900000>; regulator-ramp-delay = <6001>; regulator-initial-mode = <0x2>; @@ -278,7 +277,6 @@ regulator-name = "vdd_gpu_npu"; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; - regulator-init-microvolt = <900000>; regulator-ramp-delay = <6001>; regulator-initial-mode = <0x2>; diff --git a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts index 25a8c781f4e75..854d02b46e6fc 100644 --- a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts +++ b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts @@ -366,7 +366,6 @@ regulator-boot-on; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; - regulator-init-microvolt = <900000>; regulator-ramp-delay = <6001>; regulator-initial-mode = <0x2>; regulator-name = "vdd_logic"; @@ -381,7 +380,6 @@ regulator-boot-on; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; - regulator-init-microvolt = <900000>; regulator-ramp-delay = <6001>; regulator-initial-mode = <0x2>; regulator-name = "vdd_gpu"; diff --git a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts index b276eb0810c70..2d92713be2a09 100644 --- a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts +++ b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts @@ -277,7 +277,6 @@ regulator-boot-on; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; - regulator-init-microvolt = <900000>; regulator-ramp-delay = <6001>; regulator-state-mem { @@ -292,7 +291,6 @@ regulator-boot-on; regulator-min-microvolt = <900000>; regulator-max-microvolt = <1350000>; - regulator-init-microvolt = <900000>; regulator-ramp-delay = <6001>; regulator-state-mem { diff --git a/arch/arm64/boot/dts/rockchip/rk3566-radxa-cm3-io.dts b/arch/arm64/boot/dts/rockchip/rk3566-radxa-cm3-io.dts index 5e4236af4fcb8..1b1c67d5b1ef3 100644 --- a/arch/arm64/boot/dts/rockchip/rk3566-radxa-cm3-io.dts +++ b/arch/arm64/boot/dts/rockchip/rk3566-radxa-cm3-io.dts @@ -137,8 +137,8 @@ &mdio1 { rgmii_phy1: ethernet-phy@0 { - compatible="ethernet-phy-ieee802.3-c22"; - reg= <0x0>; + compatible = "ethernet-phy-ieee802.3-c22"; + reg = <0x0>; }; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3566-roc-pc.dts b/arch/arm64/boot/dts/rockchip/rk3566-roc-pc.dts index 42889c5900bd5..938092fce1866 100644 --- a/arch/arm64/boot/dts/rockchip/rk3566-roc-pc.dts +++ b/arch/arm64/boot/dts/rockchip/rk3566-roc-pc.dts @@ -278,7 +278,6 @@ regulator-boot-on; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; - regulator-init-microvolt = <900000>; regulator-ramp-delay = <6001>; regulator-state-mem { @@ -291,7 +290,6 @@ regulator-name = "vdd_gpu"; regulator-min-microvolt = <900000>; regulator-max-microvolt = <1350000>; - regulator-init-microvolt = <900000>; regulator-ramp-delay = <6001>; regulator-state-mem { diff --git a/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi b/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi index 31aa2b8efe393..63bae36b8f7e7 100644 --- a/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi @@ -234,7 +234,6 @@ regulator-boot-on; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; - regulator-init-microvolt = <900000>; regulator-ramp-delay = <6001>; regulator-initial-mode = <0x2>; regulator-state-mem { @@ -249,7 +248,6 @@ regulator-boot-on; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; - regulator-init-microvolt = <900000>; regulator-ramp-delay = <6001>; regulator-initial-mode = <0x2>; regulator-state-mem { @@ -272,7 +270,6 @@ regulator-boot-on; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; - regulator-init-microvolt = <900000>; regulator-initial-mode = <0x2>; regulator-name = "vdd_npu"; regulator-state-mem { diff --git a/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts b/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts index ff0bf24cc1a2a..f9127ddfbb7df 100644 --- a/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts +++ b/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts @@ -308,7 +308,6 @@ regulator-name = "vdd_logic"; regulator-always-on; regulator-boot-on; - regulator-init-microvolt = <900000>; regulator-initial-mode = <0x2>; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; @@ -322,7 +321,6 @@ vdd_gpu: DCDC_REG2 { regulator-name = "vdd_gpu"; regulator-always-on; - regulator-init-microvolt = <900000>; regulator-initial-mode = <0x2>; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; @@ -346,7 +344,6 @@ vdd_npu: DCDC_REG4 { regulator-name = "vdd_npu"; - regulator-init-microvolt = <900000>; regulator-initial-mode = <0x2>; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; diff --git a/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts b/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts index 674792567fa6e..19f8fc369b130 100644 --- a/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts +++ b/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts @@ -293,7 +293,6 @@ regulator-name = "vdd_logic"; regulator-always-on; regulator-boot-on; - regulator-init-microvolt = <900000>; regulator-initial-mode = <0x2>; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; @@ -307,7 +306,6 @@ vdd_gpu: DCDC_REG2 { regulator-name = "vdd_gpu"; regulator-always-on; - regulator-init-microvolt = <900000>; regulator-initial-mode = <0x2>; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; @@ -331,7 +329,6 @@ vdd_npu: DCDC_REG4 { regulator-name = "vdd_npu"; - regulator-init-microvolt = <900000>; regulator-initial-mode = <0x2>; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; diff --git a/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dtsi b/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dtsi index 25e205632a686..89e84e3a92629 100644 --- a/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dtsi @@ -173,7 +173,6 @@ regulator-name = "vdd_logic"; regulator-always-on; regulator-boot-on; - regulator-init-microvolt = <900000>; regulator-initial-mode = <0x2>; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; @@ -187,7 +186,6 @@ vdd_gpu: DCDC_REG2 { regulator-name = "vdd_gpu"; regulator-always-on; - regulator-init-microvolt = <900000>; regulator-initial-mode = <0x2>; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; @@ -211,7 +209,6 @@ vdd_npu: DCDC_REG4 { regulator-name = "vdd_npu"; - regulator-init-microvolt = <900000>; regulator-initial-mode = <0x2>; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; @@ -330,7 +327,6 @@ vcca1v8_image: LDO_REG9 { regulator-name = "vcca1v8_image"; - regulator-init-microvolt = <950000>; regulator-min-microvolt = <950000>; regulator-max-microvolt = <1800000>; diff --git a/arch/arm64/boot/dts/rockchip/rk3568-lubancat-2.dts b/arch/arm64/boot/dts/rockchip/rk3568-lubancat-2.dts index e653b067aa5d9..a8a4cc190eb32 100644 --- a/arch/arm64/boot/dts/rockchip/rk3568-lubancat-2.dts +++ b/arch/arm64/boot/dts/rockchip/rk3568-lubancat-2.dts @@ -243,7 +243,6 @@ regulator-boot-on; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; - regulator-init-microvolt = <900000>; regulator-ramp-delay = <6001>; regulator-initial-mode = <0x2>; @@ -258,7 +257,6 @@ regulator-boot-on; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; - regulator-init-microvolt = <900000>; regulator-ramp-delay = <6001>; regulator-initial-mode = <0x2>; @@ -284,7 +282,6 @@ regulator-boot-on; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; - regulator-init-microvolt = <900000>; regulator-ramp-delay = <6001>; regulator-initial-mode = <0x2>; diff --git a/arch/arm64/boot/dts/rockchip/rk3568-nanopi-r5s.dtsi b/arch/arm64/boot/dts/rockchip/rk3568-nanopi-r5s.dtsi index 58ba328ea7824..93189f8306400 100644 --- a/arch/arm64/boot/dts/rockchip/rk3568-nanopi-r5s.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3568-nanopi-r5s.dtsi @@ -232,7 +232,6 @@ regulator-name = "vdd_logic"; regulator-always-on; regulator-boot-on; - regulator-init-microvolt = <900000>; regulator-initial-mode = <0x2>; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; @@ -246,7 +245,6 @@ vdd_gpu: DCDC_REG2 { regulator-name = "vdd_gpu"; regulator-always-on; - regulator-init-microvolt = <900000>; regulator-initial-mode = <0x2>; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; @@ -270,7 +268,6 @@ vdd_npu: DCDC_REG4 { regulator-name = "vdd_npu"; - regulator-init-microvolt = <900000>; regulator-initial-mode = <0x2>; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; diff --git a/arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts b/arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts index 59ecf868dbd0e..a337f547caf53 100644 --- a/arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts +++ b/arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts @@ -291,7 +291,6 @@ regulator-name = "vdd_logic"; regulator-always-on; regulator-boot-on; - regulator-init-microvolt = <900000>; regulator-initial-mode = <0x2>; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; @@ -305,7 +304,6 @@ vdd_gpu: DCDC_REG2 { regulator-name = "vdd_gpu"; regulator-always-on; - regulator-init-microvolt = <900000>; regulator-initial-mode = <0x2>; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; @@ -329,7 +327,6 @@ vdd_npu: DCDC_REG4 { regulator-name = "vdd_npu"; - regulator-init-microvolt = <900000>; regulator-initial-mode = <0x2>; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; diff --git a/arch/arm64/boot/dts/rockchip/rk3568-radxa-cm3i.dtsi b/arch/arm64/boot/dts/rockchip/rk3568-radxa-cm3i.dtsi index c50fbdd48680c..45b03dcbbad45 100644 --- a/arch/arm64/boot/dts/rockchip/rk3568-radxa-cm3i.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3568-radxa-cm3i.dtsi @@ -163,7 +163,6 @@ regulator-name = "vdd_logic"; regulator-always-on; regulator-boot-on; - regulator-init-microvolt = <900000>; regulator-initial-mode = <0x2>; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; @@ -177,7 +176,6 @@ vdd_gpu: DCDC_REG2 { regulator-name = "vdd_gpu"; regulator-always-on; - regulator-init-microvolt = <900000>; regulator-initial-mode = <0x2>; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; @@ -201,7 +199,6 @@ vdd_npu: DCDC_REG4 { regulator-name = "vdd_npu"; - regulator-init-microvolt = <900000>; regulator-initial-mode = <0x2>; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; diff --git a/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts b/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts index 917f5b2b8aab8..e05ab11981f55 100644 --- a/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts +++ b/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts @@ -350,7 +350,6 @@ regulator-name = "vdd_logic"; regulator-always-on; regulator-boot-on; - regulator-init-microvolt = <900000>; regulator-initial-mode = <0x2>; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; @@ -364,7 +363,6 @@ vdd_gpu: DCDC_REG2 { regulator-name = "vdd_gpu"; regulator-always-on; - regulator-init-microvolt = <900000>; regulator-initial-mode = <0x2>; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; @@ -388,7 +386,6 @@ vdd_npu: DCDC_REG4 { regulator-name = "vdd_npu"; - regulator-init-microvolt = <900000>; regulator-initial-mode = <0x2>; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; diff --git a/arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts b/arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts index afda976680bca..51537030f8e39 100644 --- a/arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts +++ b/arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts @@ -337,7 +337,6 @@ regulator-boot-on; regulator-min-microvolt = <550000>; regulator-max-microvolt = <950000>; - regulator-init-microvolt = <750000>; regulator-ramp-delay = <12500>; regulator-name = "vdd_vdenc_s0"; diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts b/arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts index 4d9ed2a027363..1a60a275ddf9b 100644 --- a/arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts +++ b/arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts @@ -125,19 +125,19 @@ cpu-supply = <&vdd_cpu_lit_s0>; }; -&cpu_b0{ +&cpu_b0 { cpu-supply = <&vdd_cpu_big0_s0>; }; -&cpu_b1{ +&cpu_b1 { cpu-supply = <&vdd_cpu_big0_s0>; }; -&cpu_b2{ +&cpu_b2 { cpu-supply = <&vdd_cpu_big1_s0>; }; -&cpu_b3{ +&cpu_b3 { cpu-supply = <&vdd_cpu_big1_s0>; }; diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 67f2fb781f59e..8df46f186c64b 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -356,7 +356,7 @@ static inline int sme_max_virtualisable_vl(void) return vec_max_virtualisable_vl(ARM64_VEC_SME); } -extern void sme_alloc(struct task_struct *task); +extern void sme_alloc(struct task_struct *task, bool flush); extern unsigned int sme_get_vl(void); extern int sme_set_current_vl(unsigned long arg); extern int sme_get_current_vl(void); @@ -388,7 +388,7 @@ static inline void sme_smstart_sm(void) { } static inline void sme_smstop_sm(void) { } static inline void sme_smstop(void) { } -static inline void sme_alloc(struct task_struct *task) { } +static inline void sme_alloc(struct task_struct *task, bool flush) { } static inline void sme_setup(void) { } static inline unsigned int sme_get_vl(void) { return 0; } static inline int sme_max_vl(void) { return 0; } diff --git a/arch/arm64/include/uapi/asm/bitsperlong.h b/arch/arm64/include/uapi/asm/bitsperlong.h new file mode 100644 index 0000000000000..485d60bee26ca --- /dev/null +++ b/arch/arm64/include/uapi/asm/bitsperlong.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef __ASM_BITSPERLONG_H +#define __ASM_BITSPERLONG_H + +#define __BITS_PER_LONG 64 + +#include + +#endif /* __ASM_BITSPERLONG_H */ diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 75c37b1c55aaf..087c05aa960ea 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -1285,9 +1285,9 @@ void fpsimd_release_task(struct task_struct *dead_task) * the interest of testability and predictability, the architecture * guarantees that when ZA is enabled it will be zeroed. */ -void sme_alloc(struct task_struct *task) +void sme_alloc(struct task_struct *task, bool flush) { - if (task->thread.sme_state) { + if (task->thread.sme_state && flush) { memset(task->thread.sme_state, 0, sme_state_size(task)); return; } @@ -1515,7 +1515,7 @@ void do_sme_acc(unsigned long esr, struct pt_regs *regs) } sve_alloc(current, false); - sme_alloc(current); + sme_alloc(current, true); if (!current->thread.sve_state || !current->thread.sme_state) { force_sig(SIGKILL); return; diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 5b9b4305248b8..187aa2b175b4f 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -881,6 +881,13 @@ static int sve_set_common(struct task_struct *target, break; case ARM64_VEC_SME: target->thread.svcr |= SVCR_SM_MASK; + + /* + * Disable traps and ensure there is SME storage but + * preserve any currently set values in ZA/ZT. + */ + sme_alloc(target, false); + set_tsk_thread_flag(target, TIF_SME); break; default: WARN_ON_ONCE(1); @@ -1100,7 +1107,7 @@ static int za_set(struct task_struct *target, } /* Allocate/reinit ZA storage */ - sme_alloc(target); + sme_alloc(target, true); if (!target->thread.sme_state) { ret = -ENOMEM; goto out; @@ -1170,8 +1177,13 @@ static int zt_set(struct task_struct *target, if (!system_supports_sme2()) return -EINVAL; + /* Ensure SVE storage in case this is first use of SME */ + sve_alloc(target, false); + if (!target->thread.sve_state) + return -ENOMEM; + if (!thread_za_enabled(&target->thread)) { - sme_alloc(target); + sme_alloc(target, true); if (!target->thread.sme_state) return -ENOMEM; } @@ -1179,8 +1191,10 @@ static int zt_set(struct task_struct *target, ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, thread_zt_state(&target->thread), 0, ZT_SIG_REG_BYTES); - if (ret == 0) + if (ret == 0) { target->thread.svcr |= SVCR_ZA_MASK; + set_tsk_thread_flag(target, TIF_SME); + } fpsimd_flush_task_state(target); diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index e304f7ebec2a9..c7ebe744c64e3 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -475,7 +475,7 @@ static int restore_za_context(struct user_ctxs *user) fpsimd_flush_task_state(current); /* From now, fpsimd_thread_switch() won't touch thread.sve_state */ - sme_alloc(current); + sme_alloc(current, true); if (!current->thread.sme_state) { current->thread.svcr &= ~SVCR_ZA_MASK; clear_thread_flag(TIF_SME); diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index e71d5bf2cee0f..465759f6b0edd 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -662,5 +662,3 @@ source "kernel/power/Kconfig" source "drivers/acpi/Kconfig" endmenu - -source "drivers/firmware/Kconfig" diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile index b1e5db51b61ca..ef87bab46754e 100644 --- a/arch/loongarch/Makefile +++ b/arch/loongarch/Makefile @@ -83,8 +83,8 @@ KBUILD_CFLAGS_KERNEL += -fPIE LDFLAGS_vmlinux += -static -pie --no-dynamic-linker -z notext endif -cflags-y += -ffreestanding cflags-y += $(call cc-option, -mno-check-zero-division) +cflags-y += -fno-builtin-memcpy -fno-builtin-memmove -fno-builtin-memset load-y = 0x9000000000200000 bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) diff --git a/arch/loongarch/include/asm/Kbuild b/arch/loongarch/include/asm/Kbuild index 6b222f227342b..93783fa24f6e9 100644 --- a/arch/loongarch/include/asm/Kbuild +++ b/arch/loongarch/include/asm/Kbuild @@ -1,6 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 generic-y += dma-contiguous.h -generic-y += export.h generic-y += mcs_spinlock.h generic-y += parport.h generic-y += early_ioremap.h diff --git a/arch/loongarch/include/asm/fpu.h b/arch/loongarch/include/asm/fpu.h index b541f6248837e..c2d8962fda00b 100644 --- a/arch/loongarch/include/asm/fpu.h +++ b/arch/loongarch/include/asm/fpu.h @@ -173,16 +173,30 @@ static inline void restore_fp(struct task_struct *tsk) _restore_fp(&tsk->thread.fpu); } -static inline union fpureg *get_fpu_regs(struct task_struct *tsk) +static inline void save_fpu_regs(struct task_struct *tsk) { + unsigned int euen; + if (tsk == current) { preempt_disable(); - if (is_fpu_owner()) + + euen = csr_read32(LOONGARCH_CSR_EUEN); + +#ifdef CONFIG_CPU_HAS_LASX + if (euen & CSR_EUEN_LASXEN) + _save_lasx(¤t->thread.fpu); + else +#endif +#ifdef CONFIG_CPU_HAS_LSX + if (euen & CSR_EUEN_LSXEN) + _save_lsx(¤t->thread.fpu); + else +#endif + if (euen & CSR_EUEN_FPEN) _save_fp(¤t->thread.fpu); + preempt_enable(); } - - return tsk->thread.fpu.fpr; } static inline int is_simd_owner(void) diff --git a/arch/loongarch/include/asm/ptrace.h b/arch/loongarch/include/asm/ptrace.h index 35f0958163acc..f3ddaed9ef7f0 100644 --- a/arch/loongarch/include/asm/ptrace.h +++ b/arch/loongarch/include/asm/ptrace.h @@ -162,7 +162,7 @@ static inline void regs_set_return_value(struct pt_regs *regs, unsigned long val #define instruction_pointer(regs) ((regs)->csr_era) #define profile_pc(regs) instruction_pointer(regs) -extern void die(const char *, struct pt_regs *) __noreturn; +extern void die(const char *str, struct pt_regs *regs); static inline void die_if_kernel(const char *str, struct pt_regs *regs) { diff --git a/arch/loongarch/include/asm/smp.h b/arch/loongarch/include/asm/smp.h index 416b653bccb4e..66ecb480c894a 100644 --- a/arch/loongarch/include/asm/smp.h +++ b/arch/loongarch/include/asm/smp.h @@ -98,8 +98,6 @@ static inline void __cpu_die(unsigned int cpu) { loongson_cpu_die(cpu); } - -extern void __noreturn play_dead(void); #endif #endif /* __ASM_SMP_H */ diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S index f3df5f0a45094..501094a09f5d8 100644 --- a/arch/loongarch/kernel/fpu.S +++ b/arch/loongarch/kernel/fpu.S @@ -6,12 +6,12 @@ * * Copyright (C) 2020-2022 Loongson Technology Corporation Limited */ +#include #include #include #include #include #include -#include #include #include #include diff --git a/arch/loongarch/kernel/hw_breakpoint.c b/arch/loongarch/kernel/hw_breakpoint.c index 021b59c248fac..fc55c4de2a11f 100644 --- a/arch/loongarch/kernel/hw_breakpoint.c +++ b/arch/loongarch/kernel/hw_breakpoint.c @@ -207,8 +207,7 @@ static int hw_breakpoint_control(struct perf_event *bp, write_wb_reg(CSR_CFG_CTRL, i, 0, CTRL_PLV_ENABLE); } else { ctrl = encode_ctrl_reg(info->ctrl); - write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl | CTRL_PLV_ENABLE | - 1 << MWPnCFG3_LoadEn | 1 << MWPnCFG3_StoreEn); + write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl | CTRL_PLV_ENABLE); } enable = csr_read64(LOONGARCH_CSR_CRMD); csr_write64(CSR_CRMD_WE | enable, LOONGARCH_CSR_CRMD); diff --git a/arch/loongarch/kernel/mcount.S b/arch/loongarch/kernel/mcount.S index cb8e5803de4b0..3015896016a0b 100644 --- a/arch/loongarch/kernel/mcount.S +++ b/arch/loongarch/kernel/mcount.S @@ -5,7 +5,7 @@ * Copyright (C) 2022 Loongson Technology Corporation Limited */ -#include +#include #include #include #include diff --git a/arch/loongarch/kernel/mcount_dyn.S b/arch/loongarch/kernel/mcount_dyn.S index e16ab0b98e5a9..482aa553aa2d5 100644 --- a/arch/loongarch/kernel/mcount_dyn.S +++ b/arch/loongarch/kernel/mcount_dyn.S @@ -3,7 +3,6 @@ * Copyright (C) 2022 Loongson Technology Corporation Limited */ -#include #include #include #include diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c index 2e04eb07abb6e..4ee1e9d6a65f1 100644 --- a/arch/loongarch/kernel/process.c +++ b/arch/loongarch/kernel/process.c @@ -61,13 +61,6 @@ EXPORT_SYMBOL(__stack_chk_guard); unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; EXPORT_SYMBOL(boot_option_idle_override); -#ifdef CONFIG_HOTPLUG_CPU -void __noreturn arch_cpu_idle_dead(void) -{ - play_dead(); -} -#endif - asmlinkage void ret_from_fork(void); asmlinkage void ret_from_kernel_thread(void); diff --git a/arch/loongarch/kernel/ptrace.c b/arch/loongarch/kernel/ptrace.c index a0767c3a0f0a9..f72adbf530c64 100644 --- a/arch/loongarch/kernel/ptrace.c +++ b/arch/loongarch/kernel/ptrace.c @@ -147,6 +147,8 @@ static int fpr_get(struct task_struct *target, { int r; + save_fpu_regs(target); + if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t)) r = gfpr_get(target, &to); else @@ -278,6 +280,8 @@ static int simd_get(struct task_struct *target, { const unsigned int wr_size = NUM_FPU_REGS * regset->size; + save_fpu_regs(target); + if (!tsk_used_math(target)) { /* The task hasn't used FP or LSX, fill with 0xff */ copy_pad_fprs(target, regset, &to, 0); diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c index 8ea1bbcf13a7e..6667b0a90f81d 100644 --- a/arch/loongarch/kernel/smp.c +++ b/arch/loongarch/kernel/smp.c @@ -317,7 +317,7 @@ void loongson_cpu_die(unsigned int cpu) mb(); } -void play_dead(void) +void __noreturn arch_cpu_idle_dead(void) { register uint64_t addr; register void (*init_fn)(void); diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c index 8fb5e7a771450..89699db45cec1 100644 --- a/arch/loongarch/kernel/traps.c +++ b/arch/loongarch/kernel/traps.c @@ -383,16 +383,15 @@ void show_registers(struct pt_regs *regs) static DEFINE_RAW_SPINLOCK(die_lock); -void __noreturn die(const char *str, struct pt_regs *regs) +void die(const char *str, struct pt_regs *regs) { + int ret; static int die_counter; - int sig = SIGSEGV; oops_enter(); - if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr, - SIGSEGV) == NOTIFY_STOP) - sig = 0; + ret = notify_die(DIE_OOPS, str, regs, 0, + current->thread.trap_nr, SIGSEGV); console_verbose(); raw_spin_lock_irq(&die_lock); @@ -405,6 +404,9 @@ void __noreturn die(const char *str, struct pt_regs *regs) oops_exit(); + if (ret == NOTIFY_STOP) + return; + if (regs && kexec_should_crash(current)) crash_kexec(regs); @@ -414,7 +416,7 @@ void __noreturn die(const char *str, struct pt_regs *regs) if (panic_on_oops) panic("Fatal exception"); - make_task_dead(sig); + make_task_dead(SIGSEGV); } static inline void setup_vint_size(unsigned int size) diff --git a/arch/loongarch/lib/clear_user.S b/arch/loongarch/lib/clear_user.S index 9dcf717193874..0790eadce166d 100644 --- a/arch/loongarch/lib/clear_user.S +++ b/arch/loongarch/lib/clear_user.S @@ -3,12 +3,12 @@ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited */ +#include #include #include #include #include #include -#include #include .irp to, 0, 1, 2, 3, 4, 5, 6, 7 diff --git a/arch/loongarch/lib/copy_user.S b/arch/loongarch/lib/copy_user.S index fecd08cad702d..bfe3d2793d000 100644 --- a/arch/loongarch/lib/copy_user.S +++ b/arch/loongarch/lib/copy_user.S @@ -3,12 +3,12 @@ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited */ +#include #include #include #include #include #include -#include #include .irp to, 0, 1, 2, 3, 4, 5, 6, 7 diff --git a/arch/loongarch/lib/memcpy.S b/arch/loongarch/lib/memcpy.S index 39ce6621c7043..cc30b3b6252f7 100644 --- a/arch/loongarch/lib/memcpy.S +++ b/arch/loongarch/lib/memcpy.S @@ -3,11 +3,11 @@ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited */ +#include #include #include #include #include -#include #include SYM_FUNC_START(memcpy) diff --git a/arch/loongarch/lib/memmove.S b/arch/loongarch/lib/memmove.S index 45b725ba78670..7dc76d1484b6a 100644 --- a/arch/loongarch/lib/memmove.S +++ b/arch/loongarch/lib/memmove.S @@ -3,11 +3,11 @@ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited */ +#include #include #include #include #include -#include #include SYM_FUNC_START(memmove) diff --git a/arch/loongarch/lib/memset.S b/arch/loongarch/lib/memset.S index b39c6194e3ae8..3f20f7996e8ed 100644 --- a/arch/loongarch/lib/memset.S +++ b/arch/loongarch/lib/memset.S @@ -3,11 +3,11 @@ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited */ +#include #include #include #include #include -#include #include .macro fill_to_64 r0 diff --git a/arch/loongarch/lib/unaligned.S b/arch/loongarch/lib/unaligned.S index 9177fd638f072..185f82d85810a 100644 --- a/arch/loongarch/lib/unaligned.S +++ b/arch/loongarch/lib/unaligned.S @@ -9,7 +9,6 @@ #include #include #include -#include #include .L_fixup_handle_unaligned: diff --git a/arch/loongarch/mm/page.S b/arch/loongarch/mm/page.S index 4c874a7af0ad6..7ad76551d3133 100644 --- a/arch/loongarch/mm/page.S +++ b/arch/loongarch/mm/page.S @@ -2,9 +2,9 @@ /* * Copyright (C) 2020-2022 Loongson Technology Corporation Limited */ +#include #include #include -#include #include #include diff --git a/arch/loongarch/mm/tlbex.S b/arch/loongarch/mm/tlbex.S index 4ad78703de6f4..ca17dd3a19153 100644 --- a/arch/loongarch/mm/tlbex.S +++ b/arch/loongarch/mm/tlbex.S @@ -3,7 +3,6 @@ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited */ #include -#include #include #include #include diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 0e5ebfe8d9d29..ae03b8679696e 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -406,7 +407,7 @@ LDREG 0(\ptp),\pte bb,<,n \pte,_PAGE_PRESENT_BIT,3f b \fault - stw \spc,0(\tmp) + stw \tmp1,0(\tmp) 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) #endif 2: LDREG 0(\ptp),\pte @@ -415,24 +416,22 @@ .endm /* Release page_table_lock without reloading lock address. - Note that the values in the register spc are limited to - NR_SPACE_IDS (262144). Thus, the stw instruction always - stores a nonzero value even when register spc is 64 bits. We use an ordered store to ensure all prior accesses are performed prior to releasing the lock. */ - .macro ptl_unlock0 spc,tmp + .macro ptl_unlock0 spc,tmp,tmp2 #ifdef CONFIG_TLB_PTLOCK -98: or,COND(=) %r0,\spc,%r0 - stw,ma \spc,0(\tmp) +98: ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmp2 + or,COND(=) %r0,\spc,%r0 + stw,ma \tmp2,0(\tmp) 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) #endif .endm /* Release page_table_lock. */ - .macro ptl_unlock1 spc,tmp + .macro ptl_unlock1 spc,tmp,tmp2 #ifdef CONFIG_TLB_PTLOCK 98: get_ptl \tmp - ptl_unlock0 \spc,\tmp + ptl_unlock0 \spc,\tmp,\tmp2 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) #endif .endm @@ -1125,7 +1124,7 @@ dtlb_miss_20w: idtlbt pte,prot - ptl_unlock1 spc,t0 + ptl_unlock1 spc,t0,t1 rfir nop @@ -1151,7 +1150,7 @@ nadtlb_miss_20w: idtlbt pte,prot - ptl_unlock1 spc,t0 + ptl_unlock1 spc,t0,t1 rfir nop @@ -1185,7 +1184,7 @@ dtlb_miss_11: mtsp t1, %sr1 /* Restore sr1 */ - ptl_unlock1 spc,t0 + ptl_unlock1 spc,t0,t1 rfir nop @@ -1218,7 +1217,7 @@ nadtlb_miss_11: mtsp t1, %sr1 /* Restore sr1 */ - ptl_unlock1 spc,t0 + ptl_unlock1 spc,t0,t1 rfir nop @@ -1247,7 +1246,7 @@ dtlb_miss_20: idtlbt pte,prot - ptl_unlock1 spc,t0 + ptl_unlock1 spc,t0,t1 rfir nop @@ -1275,7 +1274,7 @@ nadtlb_miss_20: idtlbt pte,prot - ptl_unlock1 spc,t0 + ptl_unlock1 spc,t0,t1 rfir nop @@ -1320,7 +1319,7 @@ itlb_miss_20w: iitlbt pte,prot - ptl_unlock1 spc,t0 + ptl_unlock1 spc,t0,t1 rfir nop @@ -1344,7 +1343,7 @@ naitlb_miss_20w: iitlbt pte,prot - ptl_unlock1 spc,t0 + ptl_unlock1 spc,t0,t1 rfir nop @@ -1378,7 +1377,7 @@ itlb_miss_11: mtsp t1, %sr1 /* Restore sr1 */ - ptl_unlock1 spc,t0 + ptl_unlock1 spc,t0,t1 rfir nop @@ -1402,7 +1401,7 @@ naitlb_miss_11: mtsp t1, %sr1 /* Restore sr1 */ - ptl_unlock1 spc,t0 + ptl_unlock1 spc,t0,t1 rfir nop @@ -1432,7 +1431,7 @@ itlb_miss_20: iitlbt pte,prot - ptl_unlock1 spc,t0 + ptl_unlock1 spc,t0,t1 rfir nop @@ -1452,7 +1451,7 @@ naitlb_miss_20: iitlbt pte,prot - ptl_unlock1 spc,t0 + ptl_unlock1 spc,t0,t1 rfir nop @@ -1482,7 +1481,7 @@ dbit_trap_20w: idtlbt pte,prot - ptl_unlock0 spc,t0 + ptl_unlock0 spc,t0,t1 rfir nop #else @@ -1508,7 +1507,7 @@ dbit_trap_11: mtsp t1, %sr1 /* Restore sr1 */ - ptl_unlock0 spc,t0 + ptl_unlock0 spc,t0,t1 rfir nop @@ -1528,7 +1527,7 @@ dbit_trap_20: idtlbt pte,prot - ptl_unlock0 spc,t0 + ptl_unlock0 spc,t0,t1 rfir nop #endif diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c index 4caf5e3079eb4..359577ec16801 100644 --- a/arch/powerpc/kernel/rtas_flash.c +++ b/arch/powerpc/kernel/rtas_flash.c @@ -709,9 +709,9 @@ static int __init rtas_flash_init(void) if (!rtas_validate_flash_data.buf) return -ENOMEM; - flash_block_cache = kmem_cache_create("rtas_flash_cache", - RTAS_BLK_SIZE, RTAS_BLK_SIZE, 0, - NULL); + flash_block_cache = kmem_cache_create_usercopy("rtas_flash_cache", + RTAS_BLK_SIZE, RTAS_BLK_SIZE, + 0, 0, RTAS_BLK_SIZE, NULL); if (!flash_block_cache) { printk(KERN_ERR "%s: failed to create block cache\n", __func__); diff --git a/arch/powerpc/mm/book3s64/subpage_prot.c b/arch/powerpc/mm/book3s64/subpage_prot.c index 0dc85556dec55..ec98e526167e5 100644 --- a/arch/powerpc/mm/book3s64/subpage_prot.c +++ b/arch/powerpc/mm/book3s64/subpage_prot.c @@ -145,6 +145,7 @@ static int subpage_walk_pmd_entry(pmd_t *pmd, unsigned long addr, static const struct mm_walk_ops subpage_walk_ops = { .pmd_entry = subpage_walk_pmd_entry, + .walk_lock = PGWALK_WRLOCK_VERIFY, }; static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr, diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 4c07b9189c867..bea7b73e895dd 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -570,24 +570,30 @@ config TOOLCHAIN_HAS_ZIHINTPAUSE config TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI def_bool y # https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=aed44286efa8ae8717a77d94b51ac3614e2ca6dc - depends on AS_IS_GNU && AS_VERSION >= 23800 - help - Newer binutils versions default to ISA spec version 20191213 which - moves some instructions from the I extension to the Zicsr and Zifencei - extensions. + # https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=98416dbb0a62579d4a7a4a76bab51b5b52fec2cd + depends on AS_IS_GNU && AS_VERSION >= 23600 + help + Binutils-2.38 and GCC-12.1.0 bumped the default ISA spec to the newer + 20191213 version, which moves some instructions from the I extension to + the Zicsr and Zifencei extensions. This requires explicitly specifying + Zicsr and Zifencei when binutils >= 2.38 or GCC >= 12.1.0. Zicsr + and Zifencei are supported in binutils from version 2.36 onwards. + To make life easier, and avoid forcing toolchains that default to a + newer ISA spec to version 2.2, relax the check to binutils >= 2.36. + For clang < 17 or GCC < 11.3.0, for which this is not possible or need + special treatment, this is dealt with in TOOLCHAIN_NEEDS_OLD_ISA_SPEC. config TOOLCHAIN_NEEDS_OLD_ISA_SPEC def_bool y depends on TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI # https://github.com/llvm/llvm-project/commit/22e199e6afb1263c943c0c0d4498694e15bf8a16 - depends on CC_IS_CLANG && CLANG_VERSION < 170000 - help - Certain versions of clang do not support zicsr and zifencei via -march - but newer versions of binutils require it for the reasons noted in the - help text of CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI. This - option causes an older ISA spec compatible with these older versions - of clang to be passed to GAS, which has the same result as passing zicsr - and zifencei to -march. + # https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=d29f5d6ab513c52fd872f532c492e35ae9fd6671 + depends on (CC_IS_CLANG && CLANG_VERSION < 170000) || (CC_IS_GCC && GCC_VERSION < 110300) + help + Certain versions of clang and GCC do not support zicsr and zifencei via + -march. This option causes an older ISA spec compatible with these older + versions of clang and GCC to be passed to GAS, which has the same result + as passing zicsr and zifencei to -march. config FPU bool "FPU support" diff --git a/arch/riscv/include/asm/insn.h b/arch/riscv/include/asm/insn.h index 4e1505cef8aa4..fce00400c9bcc 100644 --- a/arch/riscv/include/asm/insn.h +++ b/arch/riscv/include/asm/insn.h @@ -110,6 +110,7 @@ #define RVC_INSN_FUNCT4_OPOFF 12 #define RVC_INSN_FUNCT3_MASK GENMASK(15, 13) #define RVC_INSN_FUNCT3_OPOFF 13 +#define RVC_INSN_J_RS1_MASK GENMASK(11, 7) #define RVC_INSN_J_RS2_MASK GENMASK(6, 2) #define RVC_INSN_OPCODE_MASK GENMASK(1, 0) #define RVC_ENCODE_FUNCT3(f_) (RVC_FUNCT3_##f_ << RVC_INSN_FUNCT3_OPOFF) @@ -245,8 +246,6 @@ __RISCV_INSN_FUNCS(c_jal, RVC_MASK_C_JAL, RVC_MATCH_C_JAL) __RISCV_INSN_FUNCS(auipc, RVG_MASK_AUIPC, RVG_MATCH_AUIPC) __RISCV_INSN_FUNCS(jalr, RVG_MASK_JALR, RVG_MATCH_JALR) __RISCV_INSN_FUNCS(jal, RVG_MASK_JAL, RVG_MATCH_JAL) -__RISCV_INSN_FUNCS(c_jr, RVC_MASK_C_JR, RVC_MATCH_C_JR) -__RISCV_INSN_FUNCS(c_jalr, RVC_MASK_C_JALR, RVC_MATCH_C_JALR) __RISCV_INSN_FUNCS(c_j, RVC_MASK_C_J, RVC_MATCH_C_J) __RISCV_INSN_FUNCS(beq, RVG_MASK_BEQ, RVG_MATCH_BEQ) __RISCV_INSN_FUNCS(bne, RVG_MASK_BNE, RVG_MATCH_BNE) @@ -273,6 +272,18 @@ static __always_inline bool riscv_insn_is_branch(u32 code) return (code & RV_INSN_OPCODE_MASK) == RVG_OPCODE_BRANCH; } +static __always_inline bool riscv_insn_is_c_jr(u32 code) +{ + return (code & RVC_MASK_C_JR) == RVC_MATCH_C_JR && + (code & RVC_INSN_J_RS1_MASK) != 0; +} + +static __always_inline bool riscv_insn_is_c_jalr(u32 code) +{ + return (code & RVC_MASK_C_JALR) == RVC_MATCH_C_JALR && + (code & RVC_INSN_J_RS1_MASK) != 0; +} + #define RV_IMM_SIGN(x) (-(((x) >> 31) & 1)) #define RVC_IMM_SIGN(x) (-(((x) >> 12) & 1)) #define RV_X(X, s, mask) (((X) >> (s)) & (mask)) diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h index 3d78930cab513..c5ee07b3df071 100644 --- a/arch/riscv/include/asm/vector.h +++ b/arch/riscv/include/asm/vector.h @@ -70,8 +70,9 @@ static __always_inline void __vstate_csr_save(struct __riscv_v_ext_state *dest) "csrr %1, " __stringify(CSR_VTYPE) "\n\t" "csrr %2, " __stringify(CSR_VL) "\n\t" "csrr %3, " __stringify(CSR_VCSR) "\n\t" + "csrr %4, " __stringify(CSR_VLENB) "\n\t" : "=r" (dest->vstart), "=r" (dest->vtype), "=r" (dest->vl), - "=r" (dest->vcsr) : :); + "=r" (dest->vcsr), "=r" (dest->vlenb) : :); } static __always_inline void __vstate_csr_restore(struct __riscv_v_ext_state *src) diff --git a/arch/riscv/include/uapi/asm/bitsperlong.h b/arch/riscv/include/uapi/asm/bitsperlong.h new file mode 100644 index 0000000000000..7d0b32e3b7017 --- /dev/null +++ b/arch/riscv/include/uapi/asm/bitsperlong.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (C) 2012 ARM Ltd. + * Copyright (C) 2015 Regents of the University of California + */ + +#ifndef _UAPI_ASM_RISCV_BITSPERLONG_H +#define _UAPI_ASM_RISCV_BITSPERLONG_H + +#define __BITS_PER_LONG (__SIZEOF_POINTER__ * 8) + +#include + +#endif /* _UAPI_ASM_RISCV_BITSPERLONG_H */ diff --git a/arch/riscv/include/uapi/asm/ptrace.h b/arch/riscv/include/uapi/asm/ptrace.h index e17c550986a69..283800130614b 100644 --- a/arch/riscv/include/uapi/asm/ptrace.h +++ b/arch/riscv/include/uapi/asm/ptrace.h @@ -97,6 +97,7 @@ struct __riscv_v_ext_state { unsigned long vl; unsigned long vtype; unsigned long vcsr; + unsigned long vlenb; void *datap; /* * In signal handler, datap will be set a correct user stack offset diff --git a/arch/riscv/kernel/compat_vdso/Makefile b/arch/riscv/kernel/compat_vdso/Makefile index 189345773e7e1..b86e5e2c3aea9 100644 --- a/arch/riscv/kernel/compat_vdso/Makefile +++ b/arch/riscv/kernel/compat_vdso/Makefile @@ -11,7 +11,13 @@ compat_vdso-syms += flush_icache COMPAT_CC := $(CC) COMPAT_LD := $(LD) -COMPAT_CC_FLAGS := -march=rv32g -mabi=ilp32 +# binutils 2.35 does not support the zifencei extension, but in the ISA +# spec 20191213, G stands for IMAFD_ZICSR_ZIFENCEI. +ifdef CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI + COMPAT_CC_FLAGS := -march=rv32g -mabi=ilp32 +else + COMPAT_CC_FLAGS := -march=rv32imafd -mabi=ilp32 +endif COMPAT_LD_FLAGS := -melf32lriscv # Disable attributes, as they're useless and break the build. diff --git a/arch/riscv/kernel/irq.c b/arch/riscv/kernel/irq.c index d0577cc6a0813..a8efa053c4a52 100644 --- a/arch/riscv/kernel/irq.c +++ b/arch/riscv/kernel/irq.c @@ -84,6 +84,9 @@ void do_softirq_own_stack(void) : [sp] "r" (sp) : "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7", "t0", "t1", "t2", "t3", "t4", "t5", "t6", +#ifndef CONFIG_FRAME_POINTER + "s0", +#endif "memory"); } else #endif diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c index 1d572cf3140f0..487303e3ef229 100644 --- a/arch/riscv/kernel/ptrace.c +++ b/arch/riscv/kernel/ptrace.c @@ -25,9 +25,6 @@ enum riscv_regset { #ifdef CONFIG_FPU REGSET_F, #endif -#ifdef CONFIG_RISCV_ISA_V - REGSET_V, -#endif }; static int riscv_gpr_get(struct task_struct *target, @@ -84,61 +81,6 @@ static int riscv_fpr_set(struct task_struct *target, } #endif -#ifdef CONFIG_RISCV_ISA_V -static int riscv_vr_get(struct task_struct *target, - const struct user_regset *regset, - struct membuf to) -{ - struct __riscv_v_ext_state *vstate = &target->thread.vstate; - - if (!riscv_v_vstate_query(task_pt_regs(target))) - return -EINVAL; - - /* - * Ensure the vector registers have been saved to the memory before - * copying them to membuf. - */ - if (target == current) - riscv_v_vstate_save(current, task_pt_regs(current)); - - /* Copy vector header from vstate. */ - membuf_write(&to, vstate, offsetof(struct __riscv_v_ext_state, datap)); - membuf_zero(&to, sizeof(vstate->datap)); - - /* Copy all the vector registers from vstate. */ - return membuf_write(&to, vstate->datap, riscv_v_vsize); -} - -static int riscv_vr_set(struct task_struct *target, - const struct user_regset *regset, - unsigned int pos, unsigned int count, - const void *kbuf, const void __user *ubuf) -{ - int ret, size; - struct __riscv_v_ext_state *vstate = &target->thread.vstate; - - if (!riscv_v_vstate_query(task_pt_regs(target))) - return -EINVAL; - - /* Copy rest of the vstate except datap */ - ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vstate, 0, - offsetof(struct __riscv_v_ext_state, datap)); - if (unlikely(ret)) - return ret; - - /* Skip copy datap. */ - size = sizeof(vstate->datap); - count -= size; - ubuf += size; - - /* Copy all the vector registers. */ - pos = 0; - ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vstate->datap, - 0, riscv_v_vsize); - return ret; -} -#endif - static const struct user_regset riscv_user_regset[] = { [REGSET_X] = { .core_note_type = NT_PRSTATUS, @@ -158,17 +100,6 @@ static const struct user_regset riscv_user_regset[] = { .set = riscv_fpr_set, }, #endif -#ifdef CONFIG_RISCV_ISA_V - [REGSET_V] = { - .core_note_type = NT_RISCV_VECTOR, - .align = 16, - .n = ((32 * RISCV_MAX_VLENB) + - sizeof(struct __riscv_v_ext_state)) / sizeof(__u32), - .size = sizeof(__u32), - .regset_get = riscv_vr_get, - .set = riscv_vr_set, - }, -#endif }; static const struct user_regset_view riscv_user_native_view = { diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index f910dfccbf5d2..f798c853bede6 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -297,7 +297,7 @@ asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs) asmlinkage __visible __trap_section void do_trap_ecall_u(struct pt_regs *regs) { if (user_mode(regs)) { - ulong syscall = regs->a7; + long syscall = regs->a7; regs->epc += 4; regs->orig_a0 = regs->a0; @@ -306,9 +306,9 @@ asmlinkage __visible __trap_section void do_trap_ecall_u(struct pt_regs *regs) syscall = syscall_enter_from_user_mode(regs, syscall); - if (syscall < NR_syscalls) + if (syscall >= 0 && syscall < NR_syscalls) syscall_handler(regs, syscall); - else + else if (syscall != -1) regs->a0 = -ENOSYS; syscall_exit_to_user_mode(regs); @@ -372,6 +372,9 @@ asmlinkage void noinstr do_irq(struct pt_regs *regs) : [sp] "r" (sp), [regs] "r" (regs) : "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7", "t0", "t1", "t2", "t3", "t4", "t5", "t6", +#ifndef CONFIG_FRAME_POINTER + "s0", +#endif "memory"); } else #endif diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S index ec486e5369d9b..09b47ebacf2e8 100644 --- a/arch/riscv/lib/uaccess.S +++ b/arch/riscv/lib/uaccess.S @@ -17,8 +17,11 @@ ENTRY(__asm_copy_from_user) li t6, SR_SUM csrs CSR_STATUS, t6 - /* Save for return value */ - mv t5, a2 + /* + * Save the terminal address which will be used to compute the number + * of bytes copied in case of a fixup exception. + */ + add t5, a0, a2 /* * Register allocation for code below: @@ -176,7 +179,7 @@ ENTRY(__asm_copy_from_user) 10: /* Disable access to user memory */ csrc CSR_STATUS, t6 - mv a0, t5 + sub a0, t5, a0 ret ENDPROC(__asm_copy_to_user) ENDPROC(__asm_copy_from_user) @@ -228,7 +231,7 @@ ENTRY(__clear_user) 11: /* Disable access to user memory */ csrc CSR_STATUS, t6 - mv a0, a1 + sub a0, a3, a0 ret ENDPROC(__clear_user) EXPORT_SYMBOL(__clear_user) diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c index ea3d61de065b3..161d0b34c2cb2 100644 --- a/arch/riscv/mm/pageattr.c +++ b/arch/riscv/mm/pageattr.c @@ -102,6 +102,7 @@ static const struct mm_walk_ops pageattr_ops = { .pmd_entry = pageattr_pmd_entry, .pte_entry = pageattr_pte_entry, .pte_hole = pageattr_pte_hole, + .walk_lock = PGWALK_RDLOCK, }; static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask, diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index 9c8af31be9705..906a7bfc2a787 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -2514,6 +2514,7 @@ static int thp_split_walk_pmd_entry(pmd_t *pmd, unsigned long addr, static const struct mm_walk_ops thp_split_walk_ops = { .pmd_entry = thp_split_walk_pmd_entry, + .walk_lock = PGWALK_WRLOCK_VERIFY, }; static inline void thp_split_mm(struct mm_struct *mm) @@ -2565,6 +2566,7 @@ static int __zap_zero_pages(pmd_t *pmd, unsigned long start, static const struct mm_walk_ops zap_zero_walk_ops = { .pmd_entry = __zap_zero_pages, + .walk_lock = PGWALK_WRLOCK, }; /* @@ -2655,6 +2657,7 @@ static const struct mm_walk_ops enable_skey_walk_ops = { .hugetlb_entry = __s390_enable_skey_hugetlb, .pte_entry = __s390_enable_skey_pte, .pmd_entry = __s390_enable_skey_pmd, + .walk_lock = PGWALK_WRLOCK, }; int s390_enable_skey(void) @@ -2692,6 +2695,7 @@ static int __s390_reset_cmma(pte_t *pte, unsigned long addr, static const struct mm_walk_ops reset_cmma_walk_ops = { .pte_entry = __s390_reset_cmma, + .walk_lock = PGWALK_WRLOCK, }; void s390_reset_cmma(struct mm_struct *mm) @@ -2728,6 +2732,7 @@ static int s390_gather_pages(pte_t *ptep, unsigned long addr, static const struct mm_walk_ops gather_pages_ops = { .pte_entry = s390_gather_pages, + .walk_lock = PGWALK_RDLOCK, }; /* diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h index 117903881fe43..ce8f50192ae3e 100644 --- a/arch/x86/include/asm/entry-common.h +++ b/arch/x86/include/asm/entry-common.h @@ -92,6 +92,7 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, static __always_inline void arch_exit_to_user_mode(void) { mds_user_clear_cpu_buffers(); + amd_clear_divider(); } #define arch_exit_to_user_mode arch_exit_to_user_mode diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 3faf044569a5d..c55cc243592e9 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -272,9 +272,9 @@ .endm #ifdef CONFIG_CPU_UNRET_ENTRY -#define CALL_ZEN_UNTRAIN_RET "call zen_untrain_ret" +#define CALL_UNTRAIN_RET "call entry_untrain_ret" #else -#define CALL_ZEN_UNTRAIN_RET "" +#define CALL_UNTRAIN_RET "" #endif /* @@ -282,7 +282,7 @@ * return thunk isn't mapped into the userspace tables (then again, AMD * typically has NO_MELTDOWN). * - * While zen_untrain_ret() doesn't clobber anything but requires stack, + * While retbleed_untrain_ret() doesn't clobber anything but requires stack, * entry_ibpb() will clobber AX, CX, DX. * * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point @@ -293,14 +293,20 @@ defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO) VALIDATE_UNRET_END ALTERNATIVE_3 "", \ - CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \ + CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \ "call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \ __stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH #endif +.endm -#ifdef CONFIG_CPU_SRSO - ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \ - "call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS +.macro UNTRAIN_RET_VM +#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \ + defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO) + VALIDATE_UNRET_END + ALTERNATIVE_3 "", \ + CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \ + "call entry_ibpb", X86_FEATURE_IBPB_ON_VMEXIT, \ + __stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH #endif .endm @@ -309,15 +315,10 @@ defined(CONFIG_CALL_DEPTH_TRACKING) VALIDATE_UNRET_END ALTERNATIVE_3 "", \ - CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \ + CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \ "call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \ __stringify(RESET_CALL_DEPTH_FROM_CALL), X86_FEATURE_CALL_DEPTH #endif - -#ifdef CONFIG_CPU_SRSO - ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \ - "call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS -#endif .endm @@ -341,17 +342,24 @@ extern retpoline_thunk_t __x86_indirect_thunk_array[]; extern retpoline_thunk_t __x86_indirect_call_thunk_array[]; extern retpoline_thunk_t __x86_indirect_jump_thunk_array[]; +#ifdef CONFIG_RETHUNK extern void __x86_return_thunk(void); -extern void zen_untrain_ret(void); +#else +static inline void __x86_return_thunk(void) {} +#endif + +extern void retbleed_return_thunk(void); +extern void srso_return_thunk(void); +extern void srso_alias_return_thunk(void); + +extern void retbleed_untrain_ret(void); extern void srso_untrain_ret(void); -extern void srso_untrain_ret_alias(void); +extern void srso_alias_untrain_ret(void); + +extern void entry_untrain_ret(void); extern void entry_ibpb(void); -#ifdef CONFIG_CALL_THUNKS extern void (*x86_return_thunk)(void); -#else -#define x86_return_thunk (&__x86_return_thunk) -#endif #ifdef CONFIG_CALL_DEPTH_TRACKING extern void __x86_return_skl(void); @@ -478,9 +486,6 @@ enum ssb_mitigation { SPEC_STORE_BYPASS_SECCOMP, }; -extern char __indirect_thunk_start[]; -extern char __indirect_thunk_end[]; - static __always_inline void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature) { diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 2dcf3a06af090..099d58d02a262 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -687,10 +687,6 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) #ifdef CONFIG_RETHUNK -#ifdef CONFIG_CALL_THUNKS -void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk; -#endif - /* * Rewrite the compiler generated return thunk tail-calls. * diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 70f9d56f93057..7eca6a8abbb1c 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -1329,3 +1329,4 @@ void noinstr amd_clear_divider(void) asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0) :: "a" (0), "d" (0), "r" (1)); } +EXPORT_SYMBOL_GPL(amd_clear_divider); diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index d02f73c5339dd..f081d26616ac1 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -63,6 +63,8 @@ EXPORT_SYMBOL_GPL(x86_pred_cmd); static DEFINE_MUTEX(spec_ctrl_mutex); +void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk; + /* Update SPEC_CTRL MSR and its cached copy unconditionally */ static void update_spec_ctrl(u64 val) { @@ -165,6 +167,11 @@ void __init cpu_select_mitigations(void) md_clear_select_mitigation(); srbds_select_mitigation(); l1d_flush_select_mitigation(); + + /* + * srso_select_mitigation() depends and must run after + * retbleed_select_mitigation(). + */ srso_select_mitigation(); gds_select_mitigation(); } @@ -1035,6 +1042,9 @@ static void __init retbleed_select_mitigation(void) setup_force_cpu_cap(X86_FEATURE_RETHUNK); setup_force_cpu_cap(X86_FEATURE_UNRET); + if (IS_ENABLED(CONFIG_RETHUNK)) + x86_return_thunk = retbleed_return_thunk; + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) pr_err(RETBLEED_UNTRAIN_MSG); @@ -1044,6 +1054,7 @@ static void __init retbleed_select_mitigation(void) case RETBLEED_MITIGATION_IBPB: setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); + setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); mitigate_smt = true; break; @@ -2417,9 +2428,10 @@ static void __init srso_select_mitigation(void) * Zen1/2 with SMT off aren't vulnerable after the right * IBPB microcode has been applied. */ - if ((boot_cpu_data.x86 < 0x19) && - (!cpu_smt_possible() || (cpu_smt_control == CPU_SMT_DISABLED))) + if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) { setup_force_cpu_cap(X86_FEATURE_SRSO_NO); + return; + } } if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { @@ -2448,11 +2460,15 @@ static void __init srso_select_mitigation(void) * like ftrace, static_call, etc. */ setup_force_cpu_cap(X86_FEATURE_RETHUNK); + setup_force_cpu_cap(X86_FEATURE_UNRET); - if (boot_cpu_data.x86 == 0x19) + if (boot_cpu_data.x86 == 0x19) { setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS); - else + x86_return_thunk = srso_alias_return_thunk; + } else { setup_force_cpu_cap(X86_FEATURE_SRSO); + x86_return_thunk = srso_return_thunk; + } srso_mitigation = SRSO_MITIGATION_SAFE_RET; } else { pr_err("WARNING: kernel not compiled with CPU_SRSO.\n"); @@ -2696,6 +2712,9 @@ static ssize_t retbleed_show_state(char *buf) static ssize_t srso_show_state(char *buf) { + if (boot_cpu_has(X86_FEATURE_SRSO_NO)) + return sysfs_emit(buf, "Mitigation: SMT disabled\n"); + return sysfs_emit(buf, "%s%s\n", srso_strings[srso_mitigation], (cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode")); diff --git a/arch/x86/kernel/fpu/context.h b/arch/x86/kernel/fpu/context.h index af5cbdd9bd29a..f6d856bd50bc5 100644 --- a/arch/x86/kernel/fpu/context.h +++ b/arch/x86/kernel/fpu/context.h @@ -19,8 +19,7 @@ * FPU state for a task MUST let the rest of the kernel know that the * FPU registers are no longer valid for this task. * - * Either one of these invalidation functions is enough. Invalidate - * a resource you control: CPU if using the CPU for something else + * Invalidate a resource you control: CPU if using the CPU for something else * (with preemption disabled), FPU for the current task, or a task that * is prevented from running by the current task. */ diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 1015af1ae562b..98e507cc7d34c 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -679,7 +679,7 @@ static void fpu_reset_fpregs(void) struct fpu *fpu = ¤t->thread.fpu; fpregs_lock(); - fpu__drop(fpu); + __fpu_invalidate_fpregs_state(fpu); /* * This does not change the actual hardware registers. It just * resets the memory image and sets TIF_NEED_FPU_LOAD so a diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 0bab497c94369..1afbc4866b100 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -882,6 +882,13 @@ void __init fpu__init_system_xstate(unsigned int legacy_size) goto out_disable; } + /* + * CPU capabilities initialization runs before FPU init. So + * X86_FEATURE_OSXSAVE is not set. Now that XSAVE is completely + * functional, set the feature bit so depending code works. + */ + setup_force_cpu_cap(X86_FEATURE_OSXSAVE); + print_xstate_offset_size(); pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n", fpu_kernel_cfg.max_features, diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 57b0037d0a996..517821b48391a 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -226,7 +226,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real) } /* Check whether insn is indirect jump */ -static int __insn_is_indirect_jump(struct insn *insn) +static int insn_is_indirect_jump(struct insn *insn) { return ((insn->opcode.bytes[0] == 0xff && (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */ @@ -260,26 +260,6 @@ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len) return (start <= target && target <= start + len); } -static int insn_is_indirect_jump(struct insn *insn) -{ - int ret = __insn_is_indirect_jump(insn); - -#ifdef CONFIG_RETPOLINE - /* - * Jump to x86_indirect_thunk_* is treated as an indirect jump. - * Note that even with CONFIG_RETPOLINE=y, the kernel compiled with - * older gcc may use indirect jump. So we add this check instead of - * replace indirect-jump check. - */ - if (!ret) - ret = insn_jump_into_range(insn, - (unsigned long)__indirect_thunk_start, - (unsigned long)__indirect_thunk_end - - (unsigned long)__indirect_thunk_start); -#endif - return ret; -} - /* Decode whole function to ensure any instructions don't jump into target */ static int can_optimize(unsigned long paddr) { @@ -334,9 +314,21 @@ static int can_optimize(unsigned long paddr) /* Recover address */ insn.kaddr = (void *)addr; insn.next_byte = (void *)(addr + insn.length); - /* Check any instructions don't jump into target */ - if (insn_is_indirect_jump(&insn) || - insn_jump_into_range(&insn, paddr + INT3_INSN_SIZE, + /* + * Check any instructions don't jump into target, indirectly or + * directly. + * + * The indirect case is present to handle a code with jump + * tables. When the kernel uses retpolines, the check should in + * theory additionally look for jumps to indirect thunks. + * However, the kernel built with retpolines or IBT has jump + * tables disabled so the check can be skipped altogether. + */ + if (!IS_ENABLED(CONFIG_RETPOLINE) && + !IS_ENABLED(CONFIG_X86_KERNEL_IBT) && + insn_is_indirect_jump(&insn)) + return 0; + if (insn_jump_into_range(&insn, paddr + INT3_INSN_SIZE, DISP32_SIZE)) return 0; addr += insn.length; diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c index b70670a985978..77a9316da4357 100644 --- a/arch/x86/kernel/static_call.c +++ b/arch/x86/kernel/static_call.c @@ -186,6 +186,19 @@ EXPORT_SYMBOL_GPL(arch_static_call_transform); */ bool __static_call_fixup(void *tramp, u8 op, void *dest) { + unsigned long addr = (unsigned long)tramp; + /* + * Not all .return_sites are a static_call trampoline (most are not). + * Check if the 3 bytes after the return are still kernel text, if not, + * then this definitely is not a trampoline and we need not worry + * further. + * + * This avoids the memcmp() below tripping over pagefaults etc.. + */ + if (((addr >> PAGE_SHIFT) != ((addr + 7) >> PAGE_SHIFT)) && + !kernel_text_address(addr + 7)) + return false; + if (memcmp(tramp+5, tramp_ud, 3)) { /* Not a trampoline site, not our problem. */ return false; diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 1885326a8f659..4a817d20ce3bb 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -206,8 +206,6 @@ DEFINE_IDTENTRY(exc_divide_error) { do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE, FPE_INTDIV, error_get_trap_addr(regs)); - - amd_clear_divider(); } DEFINE_IDTENTRY(exc_overflow) diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index ef06211bae4cc..83d41c2601d7b 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -133,27 +133,25 @@ SECTIONS KPROBES_TEXT SOFTIRQENTRY_TEXT #ifdef CONFIG_RETPOLINE - __indirect_thunk_start = .; - *(.text.__x86.indirect_thunk) - *(.text.__x86.return_thunk) - __indirect_thunk_end = .; + *(.text..__x86.indirect_thunk) + *(.text..__x86.return_thunk) #endif STATIC_CALL_TEXT ALIGN_ENTRY_TEXT_BEGIN #ifdef CONFIG_CPU_SRSO - *(.text.__x86.rethunk_untrain) + *(.text..__x86.rethunk_untrain) #endif ENTRY_TEXT #ifdef CONFIG_CPU_SRSO /* - * See the comment above srso_untrain_ret_alias()'s + * See the comment above srso_alias_untrain_ret()'s * definition. */ - . = srso_untrain_ret_alias | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20); - *(.text.__x86.rethunk_safe) + . = srso_alias_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20); + *(.text..__x86.rethunk_safe) #endif ALIGN_ENTRY_TEXT_END *(.gnu.warning) @@ -523,7 +521,7 @@ INIT_PER_CPU(irq_stack_backing_store); #endif #ifdef CONFIG_RETHUNK -. = ASSERT((__ret & 0x3f) == 0, "__ret not cacheline-aligned"); +. = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned"); . = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned"); #endif @@ -538,8 +536,8 @@ INIT_PER_CPU(irq_stack_backing_store); * Instead do: (A | B) - (A & B) in order to compute the XOR * of the two function addresses: */ -. = ASSERT(((ABSOLUTE(srso_untrain_ret_alias) | srso_safe_ret_alias) - - (ABSOLUTE(srso_untrain_ret_alias) & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)), +. = ASSERT(((ABSOLUTE(srso_alias_untrain_ret) | srso_alias_safe_ret) - + (ABSOLUTE(srso_alias_untrain_ret) & srso_alias_safe_ret)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)), "SRSO function pair won't alias"); #endif diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 03e852dedcc1b..d4bfdc607fe7f 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4006,6 +4006,8 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in guest_state_enter_irqoff(); + amd_clear_divider(); + if (sev_es_guest(vcpu->kvm)) __svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted); else diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S index 265452fc9ebe9..ef2ebabb059c8 100644 --- a/arch/x86/kvm/svm/vmenter.S +++ b/arch/x86/kvm/svm/vmenter.S @@ -222,10 +222,7 @@ SYM_FUNC_START(__svm_vcpu_run) * because interrupt handlers won't sanitize 'ret' if the return is * from the kernel. */ - UNTRAIN_RET - - /* SRSO */ - ALTERNATIVE "", "call entry_ibpb", X86_FEATURE_IBPB_ON_VMEXIT + UNTRAIN_RET_VM /* * Clear all general purpose registers except RSP and RAX to prevent @@ -362,7 +359,7 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run) * because interrupt handlers won't sanitize RET if the return is * from the kernel. */ - UNTRAIN_RET + UNTRAIN_RET_VM /* "Pop" @spec_ctrl_intercepted. */ pop %_ASM_BX diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index 2cff585f22f29..cd86aeb5fdd3e 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -13,7 +13,7 @@ #include #include - .section .text.__x86.indirect_thunk + .section .text..__x86.indirect_thunk .macro POLINE reg @@ -133,75 +133,106 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array) #ifdef CONFIG_RETHUNK /* - * srso_untrain_ret_alias() and srso_safe_ret_alias() are placed at + * srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at * special addresses: * - * - srso_untrain_ret_alias() is 2M aligned - * - srso_safe_ret_alias() is also in the same 2M page but bits 2, 8, 14 + * - srso_alias_untrain_ret() is 2M aligned + * - srso_alias_safe_ret() is also in the same 2M page but bits 2, 8, 14 * and 20 in its virtual address are set (while those bits in the - * srso_untrain_ret_alias() function are cleared). + * srso_alias_untrain_ret() function are cleared). * * This guarantees that those two addresses will alias in the branch * target buffer of Zen3/4 generations, leading to any potential * poisoned entries at that BTB slot to get evicted. * - * As a result, srso_safe_ret_alias() becomes a safe return. + * As a result, srso_alias_safe_ret() becomes a safe return. */ #ifdef CONFIG_CPU_SRSO - .section .text.__x86.rethunk_untrain + .section .text..__x86.rethunk_untrain -SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) +SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) + UNWIND_HINT_FUNC ANNOTATE_NOENDBR ASM_NOP2 lfence - jmp __x86_return_thunk -SYM_FUNC_END(srso_untrain_ret_alias) -__EXPORT_THUNK(srso_untrain_ret_alias) - - .section .text.__x86.rethunk_safe + jmp srso_alias_return_thunk +SYM_FUNC_END(srso_alias_untrain_ret) +__EXPORT_THUNK(srso_alias_untrain_ret) + + .section .text..__x86.rethunk_safe +#else +/* dummy definition for alternatives */ +SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) + ANNOTATE_UNRET_SAFE + ret + int3 +SYM_FUNC_END(srso_alias_untrain_ret) #endif -/* Needs a definition for the __x86_return_thunk alternative below. */ -SYM_START(srso_safe_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) -#ifdef CONFIG_CPU_SRSO - add $8, %_ASM_SP +SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE) + lea 8(%_ASM_SP), %_ASM_SP UNWIND_HINT_FUNC -#endif ANNOTATE_UNRET_SAFE ret int3 -SYM_FUNC_END(srso_safe_ret_alias) +SYM_FUNC_END(srso_alias_safe_ret) - .section .text.__x86.return_thunk + .section .text..__x86.return_thunk + +SYM_CODE_START(srso_alias_return_thunk) + UNWIND_HINT_FUNC + ANNOTATE_NOENDBR + call srso_alias_safe_ret + ud2 +SYM_CODE_END(srso_alias_return_thunk) + +/* + * Some generic notes on the untraining sequences: + * + * They are interchangeable when it comes to flushing potentially wrong + * RET predictions from the BTB. + * + * The SRSO Zen1/2 (MOVABS) untraining sequence is longer than the + * Retbleed sequence because the return sequence done there + * (srso_safe_ret()) is longer and the return sequence must fully nest + * (end before) the untraining sequence. Therefore, the untraining + * sequence must fully overlap the return sequence. + * + * Regarding alignment - the instructions which need to be untrained, + * must all start at a cacheline boundary for Zen1/2 generations. That + * is, instruction sequences starting at srso_safe_ret() and + * the respective instruction sequences at retbleed_return_thunk() + * must start at a cacheline boundary. + */ /* * Safety details here pertain to the AMD Zen{1,2} microarchitecture: - * 1) The RET at __x86_return_thunk must be on a 64 byte boundary, for + * 1) The RET at retbleed_return_thunk must be on a 64 byte boundary, for * alignment within the BTB. - * 2) The instruction at zen_untrain_ret must contain, and not + * 2) The instruction at retbleed_untrain_ret must contain, and not * end with, the 0xc3 byte of the RET. * 3) STIBP must be enabled, or SMT disabled, to prevent the sibling thread * from re-poisioning the BTB prediction. */ .align 64 - .skip 64 - (__ret - zen_untrain_ret), 0xcc -SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) + .skip 64 - (retbleed_return_thunk - retbleed_untrain_ret), 0xcc +SYM_START(retbleed_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) ANNOTATE_NOENDBR /* - * As executed from zen_untrain_ret, this is: + * As executed from retbleed_untrain_ret, this is: * * TEST $0xcc, %bl * LFENCE - * JMP __x86_return_thunk + * JMP retbleed_return_thunk * * Executing the TEST instruction has a side effect of evicting any BTB * prediction (potentially attacker controlled) attached to the RET, as - * __x86_return_thunk + 1 isn't an instruction boundary at the moment. + * retbleed_return_thunk + 1 isn't an instruction boundary at the moment. */ .byte 0xf6 /* - * As executed from __x86_return_thunk, this is a plain RET. + * As executed from retbleed_return_thunk, this is a plain RET. * * As part of the TEST above, RET is the ModRM byte, and INT3 the imm8. * @@ -213,13 +244,13 @@ SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) * With SMT enabled and STIBP active, a sibling thread cannot poison * RET's prediction to a type of its choice, but can evict the * prediction due to competitive sharing. If the prediction is - * evicted, __x86_return_thunk will suffer Straight Line Speculation + * evicted, retbleed_return_thunk will suffer Straight Line Speculation * which will be contained safely by the INT3. */ -SYM_INNER_LABEL(__ret, SYM_L_GLOBAL) +SYM_INNER_LABEL(retbleed_return_thunk, SYM_L_GLOBAL) ret int3 -SYM_CODE_END(__ret) +SYM_CODE_END(retbleed_return_thunk) /* * Ensure the TEST decoding / BTB invalidation is complete. @@ -230,16 +261,16 @@ SYM_CODE_END(__ret) * Jump back and execute the RET in the middle of the TEST instruction. * INT3 is for SLS protection. */ - jmp __ret + jmp retbleed_return_thunk int3 -SYM_FUNC_END(zen_untrain_ret) -__EXPORT_THUNK(zen_untrain_ret) +SYM_FUNC_END(retbleed_untrain_ret) +__EXPORT_THUNK(retbleed_untrain_ret) /* - * SRSO untraining sequence for Zen1/2, similar to zen_untrain_ret() + * SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret() * above. On kernel entry, srso_untrain_ret() is executed which is a * - * movabs $0xccccccc308c48348,%rax + * movabs $0xccccc30824648d48,%rax * * and when the return thunk executes the inner label srso_safe_ret() * later, it is a stack manipulation and a RET which is mispredicted and @@ -251,22 +282,44 @@ SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) ANNOTATE_NOENDBR .byte 0x48, 0xb8 +/* + * This forces the function return instruction to speculate into a trap + * (UD2 in srso_return_thunk() below). This RET will then mispredict + * and execution will continue at the return site read from the top of + * the stack. + */ SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL) - add $8, %_ASM_SP + lea 8(%_ASM_SP), %_ASM_SP ret int3 int3 - int3 + /* end of movabs */ lfence call srso_safe_ret - int3 + ud2 SYM_CODE_END(srso_safe_ret) SYM_FUNC_END(srso_untrain_ret) __EXPORT_THUNK(srso_untrain_ret) -SYM_FUNC_START(__x86_return_thunk) - ALTERNATIVE_2 "jmp __ret", "call srso_safe_ret", X86_FEATURE_SRSO, \ - "call srso_safe_ret_alias", X86_FEATURE_SRSO_ALIAS +SYM_CODE_START(srso_return_thunk) + UNWIND_HINT_FUNC + ANNOTATE_NOENDBR + call srso_safe_ret + ud2 +SYM_CODE_END(srso_return_thunk) + +SYM_FUNC_START(entry_untrain_ret) + ALTERNATIVE_2 "jmp retbleed_untrain_ret", \ + "jmp srso_untrain_ret", X86_FEATURE_SRSO, \ + "jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS +SYM_FUNC_END(entry_untrain_ret) +__EXPORT_THUNK(entry_untrain_ret) + +SYM_CODE_START(__x86_return_thunk) + UNWIND_HINT_FUNC + ANNOTATE_NOENDBR + ANNOTATE_UNRET_SAFE + ret int3 SYM_CODE_END(__x86_return_thunk) EXPORT_SYMBOL(__x86_return_thunk) diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index fc49be622e05b..9faafcd10e177 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -136,7 +136,9 @@ static void blkg_free_workfn(struct work_struct *work) blkcg_policy[i]->pd_free_fn(blkg->pd[i]); if (blkg->parent) blkg_put(blkg->parent); + spin_lock_irq(&q->queue_lock); list_del_init(&blkg->q_node); + spin_unlock_irq(&q->queue_lock); mutex_unlock(&q->blkcg_mutex); blk_put_queue(q); diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c index ad9844c5b40cb..e6468eab2681e 100644 --- a/block/blk-crypto-fallback.c +++ b/block/blk-crypto-fallback.c @@ -78,7 +78,7 @@ static struct blk_crypto_fallback_keyslot { struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX]; } *blk_crypto_keyslots; -static struct blk_crypto_profile blk_crypto_fallback_profile; +static struct blk_crypto_profile *blk_crypto_fallback_profile; static struct workqueue_struct *blk_crypto_wq; static mempool_t *blk_crypto_bounce_page_pool; static struct bio_set crypto_bio_split; @@ -292,7 +292,7 @@ static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr) * Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for * this bio's algorithm and key. */ - blk_st = blk_crypto_get_keyslot(&blk_crypto_fallback_profile, + blk_st = blk_crypto_get_keyslot(blk_crypto_fallback_profile, bc->bc_key, &slot); if (blk_st != BLK_STS_OK) { src_bio->bi_status = blk_st; @@ -395,7 +395,7 @@ static void blk_crypto_fallback_decrypt_bio(struct work_struct *work) * Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for * this bio's algorithm and key. */ - blk_st = blk_crypto_get_keyslot(&blk_crypto_fallback_profile, + blk_st = blk_crypto_get_keyslot(blk_crypto_fallback_profile, bc->bc_key, &slot); if (blk_st != BLK_STS_OK) { bio->bi_status = blk_st; @@ -499,7 +499,7 @@ bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr) return false; } - if (!__blk_crypto_cfg_supported(&blk_crypto_fallback_profile, + if (!__blk_crypto_cfg_supported(blk_crypto_fallback_profile, &bc->bc_key->crypto_cfg)) { bio->bi_status = BLK_STS_NOTSUPP; return false; @@ -526,7 +526,7 @@ bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr) int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key) { - return __blk_crypto_evict_key(&blk_crypto_fallback_profile, key); + return __blk_crypto_evict_key(blk_crypto_fallback_profile, key); } static bool blk_crypto_fallback_inited; @@ -534,7 +534,6 @@ static int blk_crypto_fallback_init(void) { int i; int err; - struct blk_crypto_profile *profile = &blk_crypto_fallback_profile; if (blk_crypto_fallback_inited) return 0; @@ -545,18 +544,27 @@ static int blk_crypto_fallback_init(void) if (err) goto out; - err = blk_crypto_profile_init(profile, blk_crypto_num_keyslots); - if (err) + /* Dynamic allocation is needed because of lockdep_register_key(). */ + blk_crypto_fallback_profile = + kzalloc(sizeof(*blk_crypto_fallback_profile), GFP_KERNEL); + if (!blk_crypto_fallback_profile) { + err = -ENOMEM; goto fail_free_bioset; + } + + err = blk_crypto_profile_init(blk_crypto_fallback_profile, + blk_crypto_num_keyslots); + if (err) + goto fail_free_profile; err = -ENOMEM; - profile->ll_ops = blk_crypto_fallback_ll_ops; - profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE; + blk_crypto_fallback_profile->ll_ops = blk_crypto_fallback_ll_ops; + blk_crypto_fallback_profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE; /* All blk-crypto modes have a crypto API fallback. */ for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) - profile->modes_supported[i] = 0xFFFFFFFF; - profile->modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0; + blk_crypto_fallback_profile->modes_supported[i] = 0xFFFFFFFF; + blk_crypto_fallback_profile->modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0; blk_crypto_wq = alloc_workqueue("blk_crypto_wq", WQ_UNBOUND | WQ_HIGHPRI | @@ -597,7 +605,9 @@ static int blk_crypto_fallback_init(void) fail_free_wq: destroy_workqueue(blk_crypto_wq); fail_destroy_profile: - blk_crypto_profile_destroy(profile); + blk_crypto_profile_destroy(blk_crypto_fallback_profile); +fail_free_profile: + kfree(blk_crypto_fallback_profile); fail_free_bioset: bioset_exit(&crypto_bio_split); out: diff --git a/block/blk-mq.c b/block/blk-mq.c index b04ff6f56926f..953f08354c8c3 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -681,6 +681,21 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, } EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); +static void blk_mq_finish_request(struct request *rq) +{ + struct request_queue *q = rq->q; + + if (rq->rq_flags & RQF_USE_SCHED) { + q->elevator->type->ops.finish_request(rq); + /* + * For postflush request that may need to be + * completed twice, we should clear this flag + * to avoid double finish_request() on the rq. + */ + rq->rq_flags &= ~RQF_USE_SCHED; + } +} + static void __blk_mq_free_request(struct request *rq) { struct request_queue *q = rq->q; @@ -707,9 +722,7 @@ void blk_mq_free_request(struct request *rq) { struct request_queue *q = rq->q; - if ((rq->rq_flags & RQF_USE_SCHED) && - q->elevator->type->ops.finish_request) - q->elevator->type->ops.finish_request(rq); + blk_mq_finish_request(rq); if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq))) laptop_io_completion(q->disk->bdi); @@ -1020,6 +1033,8 @@ inline void __blk_mq_end_request(struct request *rq, blk_status_t error) if (blk_mq_need_time_stamp(rq)) __blk_mq_end_request_acct(rq, ktime_get_ns()); + blk_mq_finish_request(rq); + if (rq->end_io) { rq_qos_done(rq->q, rq); if (rq->end_io(rq, error) == RQ_END_IO_FREE) @@ -1074,6 +1089,8 @@ void blk_mq_end_request_batch(struct io_comp_batch *iob) if (iob->need_ts) __blk_mq_end_request_acct(rq, now); + blk_mq_finish_request(rq); + rq_qos_done(rq->q, rq); /* diff --git a/block/elevator.c b/block/elevator.c index 8400e303fbcbd..5ff093cb3cf8f 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -499,6 +499,9 @@ void elv_unregister_queue(struct request_queue *q) int elv_register(struct elevator_type *e) { + /* finish request is mandatory */ + if (WARN_ON_ONCE(!e->ops.finish_request)) + return -EINVAL; /* insert_requests and dispatch_request are mandatory */ if (WARN_ON_ONCE(!e->ops.insert_requests || !e->ops.dispatch_request)) return -EINVAL; diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 06b15b9f661ca..10efb56d8b481 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -1241,6 +1241,8 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, return -ENOMEM; } + rsgl->sgl.need_unpin = + iov_iter_extract_will_pin(&msg->msg_iter); rsgl->sgl.sgt.sgl = rsgl->sgl.sgl; rsgl->sgl.sgt.nents = 0; rsgl->sgl.sgt.orig_nents = 0; @@ -1255,8 +1257,6 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, } sg_mark_end(rsgl->sgl.sgt.sgl + rsgl->sgl.sgt.nents - 1); - rsgl->sgl.need_unpin = - iov_iter_extract_will_pin(&msg->msg_iter); /* chain the new scatterlist with previous one */ if (areq->last_rsgl) diff --git a/drivers/accel/qaic/qaic_control.c b/drivers/accel/qaic/qaic_control.c index cfbc92da426fa..388abd40024ba 100644 --- a/drivers/accel/qaic/qaic_control.c +++ b/drivers/accel/qaic/qaic_control.c @@ -392,18 +392,31 @@ static int find_and_map_user_pages(struct qaic_device *qdev, struct qaic_manage_trans_dma_xfer *in_trans, struct ioctl_resources *resources, struct dma_xfer *xfer) { + u64 xfer_start_addr, remaining, end, total; unsigned long need_pages; struct page **page_list; unsigned long nr_pages; struct sg_table *sgt; - u64 xfer_start_addr; int ret; int i; - xfer_start_addr = in_trans->addr + resources->xferred_dma_size; + if (check_add_overflow(in_trans->addr, resources->xferred_dma_size, &xfer_start_addr)) + return -EINVAL; - need_pages = DIV_ROUND_UP(in_trans->size + offset_in_page(xfer_start_addr) - - resources->xferred_dma_size, PAGE_SIZE); + if (in_trans->size < resources->xferred_dma_size) + return -EINVAL; + remaining = in_trans->size - resources->xferred_dma_size; + if (remaining == 0) + return 0; + + if (check_add_overflow(xfer_start_addr, remaining, &end)) + return -EINVAL; + + total = remaining + offset_in_page(xfer_start_addr); + if (total >= SIZE_MAX) + return -EINVAL; + + need_pages = DIV_ROUND_UP(total, PAGE_SIZE); nr_pages = need_pages; @@ -435,7 +448,7 @@ static int find_and_map_user_pages(struct qaic_device *qdev, ret = sg_alloc_table_from_pages(sgt, page_list, nr_pages, offset_in_page(xfer_start_addr), - in_trans->size - resources->xferred_dma_size, GFP_KERNEL); + remaining, GFP_KERNEL); if (ret) { ret = -ENOMEM; goto free_sgt; @@ -566,9 +579,6 @@ static int encode_dma(struct qaic_device *qdev, void *trans, struct wrapper_list QAIC_MANAGE_EXT_MSG_LENGTH) return -ENOMEM; - if (in_trans->addr + in_trans->size < in_trans->addr || !in_trans->size) - return -EINVAL; - xfer = kmalloc(sizeof(*xfer), GFP_KERNEL); if (!xfer) return -ENOMEM; diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c index e9a1cb779b305..6b6d981a71be7 100644 --- a/drivers/accel/qaic/qaic_data.c +++ b/drivers/accel/qaic/qaic_data.c @@ -1021,6 +1021,7 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi bo->dbc = dbc; srcu_read_unlock(&dbc->ch_lock, rcu_id); drm_gem_object_put(obj); + kfree(slice_ent); srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index a4d9f149b48d7..32cfa3f4efd3d 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -501,9 +501,13 @@ static const struct dmi_system_id maingear_laptop[] = { static const struct dmi_system_id pcspecialist_laptop[] = { { .ident = "PCSpecialist Elimina Pro 16 M", + /* + * Some models have product-name "Elimina Pro 16 M", + * others "GM6BGEQ". Match on board-name to match both. + */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "PCSpecialist"), - DMI_MATCH(DMI_PRODUCT_NAME, "Elimina Pro 16 M"), + DMI_MATCH(DMI_BOARD_NAME, "GM6BGEQ"), }, }, { } diff --git a/drivers/block/rnbd/rnbd-clt-sysfs.c b/drivers/block/rnbd/rnbd-clt-sysfs.c index c36d8b1ceeed7..39887556cf959 100644 --- a/drivers/block/rnbd/rnbd-clt-sysfs.c +++ b/drivers/block/rnbd/rnbd-clt-sysfs.c @@ -25,7 +25,7 @@ static struct device *rnbd_dev; static const struct class rnbd_dev_class = { - .name = "rnbd_client", + .name = "rnbd-client", }; static struct kobject *rnbd_devs_kobj; diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 21fe9854703f9..4cb23b9e06ea4 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -2142,6 +2142,8 @@ static int sysc_reset(struct sysc *ddata) sysc_val = sysc_read_sysconfig(ddata); sysc_val |= sysc_mask; sysc_write(ddata, sysc_offset, sysc_val); + /* Flush posted write */ + sysc_val = sysc_read_sysconfig(ddata); } if (ddata->cfg.srst_udelay) diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c index 4fb4fd4b06bda..737aa70e2cb3d 100644 --- a/drivers/clk/clk-devres.c +++ b/drivers/clk/clk-devres.c @@ -205,18 +205,19 @@ EXPORT_SYMBOL(devm_clk_put); struct clk *devm_get_clk_from_child(struct device *dev, struct device_node *np, const char *con_id) { - struct clk **ptr, *clk; + struct devm_clk_state *state; + struct clk *clk; - ptr = devres_alloc(devm_clk_release, sizeof(*ptr), GFP_KERNEL); - if (!ptr) + state = devres_alloc(devm_clk_release, sizeof(*state), GFP_KERNEL); + if (!state) return ERR_PTR(-ENOMEM); clk = of_clk_get_by_name(np, con_id); if (!IS_ERR(clk)) { - *ptr = clk; - devres_add(dev, ptr); + state->clk = clk; + devres_add(dev, state); } else { - devres_free(ptr); + devres_free(state); } return clk; diff --git a/drivers/clk/keystone/syscon-clk.c b/drivers/clk/keystone/syscon-clk.c index d33f741194881..935d9a2d8c2b3 100644 --- a/drivers/clk/keystone/syscon-clk.c +++ b/drivers/clk/keystone/syscon-clk.c @@ -151,8 +151,10 @@ static int ti_syscon_gate_clk_probe(struct platform_device *pdev) data[i].name); } - return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, - hw_data); + if (num_clks == 1) + return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, + hw_data->hws[0]); + return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, hw_data); } #define TI_SYSCON_CLK_GATE(_name, _offset, _bit_idx) \ diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index ff9ddbbca3774..68e73775392d9 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -382,8 +382,8 @@ static void kick_trng(struct device *dev, int ent_delay) val = ent_delay; /* min. freq. count, equal to 1/4 of the entropy sample length */ wr_reg32(&r4tst->rtfrqmin, val >> 2); - /* max. freq. count, equal to 16 times the entropy sample length */ - wr_reg32(&r4tst->rtfrqmax, val << 4); + /* disable maximum frequency count */ + wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE); } wr_reg32(&r4tst->rtsdctl, (val << RTSDCTL_ENT_DLY_SHIFT) | diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index 63f0aeb66db6b..f0a35277fd844 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -191,6 +191,7 @@ static const struct dma_fence_ops timeline_fence_ops = { */ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc) { + LIST_HEAD(signalled); struct sync_pt *pt, *next; trace_sync_timeline(obj); @@ -203,21 +204,20 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc) if (!timeline_fence_signaled(&pt->base)) break; - list_del_init(&pt->link); + dma_fence_get(&pt->base); + + list_move_tail(&pt->link, &signalled); rb_erase(&pt->node, &obj->pt_tree); - /* - * A signal callback may release the last reference to this - * fence, causing it to be freed. That operation has to be - * last to avoid a use after free inside this loop, and must - * be after we remove the fence from the timeline in order to - * prevent deadlocking on timeline->lock inside - * timeline_fence_release(). - */ dma_fence_signal_locked(&pt->base); } spin_unlock_irq(&obj->lock); + + list_for_each_entry_safe(pt, next, &signalled, link) { + list_del_init(&pt->link); + dma_fence_put(&pt->base); + } } /** diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c index f1f6f1c329877..533d815725794 100644 --- a/drivers/gpio/gpio-sim.c +++ b/drivers/gpio/gpio-sim.c @@ -291,6 +291,15 @@ static void gpio_sim_mutex_destroy(void *data) mutex_destroy(lock); } +static void gpio_sim_dispose_mappings(void *data) +{ + struct gpio_sim_chip *chip = data; + unsigned int i; + + for (i = 0; i < chip->gc.ngpio; i++) + irq_dispose_mapping(irq_find_mapping(chip->irq_sim, i)); +} + static void gpio_sim_sysfs_remove(void *data) { struct gpio_sim_chip *chip = data; @@ -402,10 +411,14 @@ static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev) if (!chip->pull_map) return -ENOMEM; - chip->irq_sim = devm_irq_domain_create_sim(dev, NULL, num_lines); + chip->irq_sim = devm_irq_domain_create_sim(dev, swnode, num_lines); if (IS_ERR(chip->irq_sim)) return PTR_ERR(chip->irq_sim); + ret = devm_add_action_or_reset(dev, gpio_sim_dispose_mappings, chip); + if (ret) + return ret; + mutex_init(&chip->lock); ret = devm_add_action_or_reset(dev, gpio_sim_mutex_destroy, &chip->lock); diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c index 530dfd19d7b52..50503a4525eb0 100644 --- a/drivers/gpio/gpiolib-sysfs.c +++ b/drivers/gpio/gpiolib-sysfs.c @@ -515,8 +515,9 @@ static ssize_t unexport_store(const struct class *class, * they may be undone on its behalf too. */ if (test_and_clear_bit(FLAG_SYSFS, &desc->flags)) { - status = 0; + gpiod_unexport(desc); gpiod_free(desc); + status = 0; } done: if (status) @@ -781,8 +782,10 @@ void gpiochip_sysfs_unregister(struct gpio_device *gdev) mutex_unlock(&sysfs_lock); /* unregister gpiod class devices owned by sysfs */ - for_each_gpio_desc_with_flag(chip, desc, FLAG_SYSFS) + for_each_gpio_desc_with_flag(chip, desc, FLAG_SYSFS) { + gpiod_unexport(desc); gpiod_free(desc); + } } static int __init gpiolib_sysfs_init(void) diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 251c875b5c34c..76e0c38026c3e 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -2167,12 +2167,18 @@ static bool gpiod_free_commit(struct gpio_desc *desc) void gpiod_free(struct gpio_desc *desc) { - if (desc && desc->gdev && gpiod_free_commit(desc)) { - module_put(desc->gdev->owner); - gpio_device_put(desc->gdev); - } else { + /* + * We must not use VALIDATE_DESC_VOID() as the underlying gdev->chip + * may already be NULL but we still want to put the references. + */ + if (!desc) + return; + + if (!gpiod_free_commit(desc)) WARN_ON(extra_checks); - } + + module_put(desc->gdev->owner); + gpio_device_put(desc->gdev); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 45e9d737e5b81..6238701cde237 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3722,10 +3722,11 @@ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev) { if (amdgpu_mcbp == 1) adev->gfx.mcbp = true; - - if ((adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 0, 0)) && - (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 0, 0)) && - adev->gfx.num_gfx_rings) + else if (amdgpu_mcbp == 0) + adev->gfx.mcbp = false; + else if ((adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 0, 0)) && + (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 0, 0)) && + adev->gfx.num_gfx_rings) adev->gfx.mcbp = true; if (amdgpu_sriov_vf(adev)) @@ -4393,6 +4394,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true); cancel_delayed_work_sync(&adev->delayed_init_work); + flush_delayed_work(&adev->gfx.gfx_off_delay_work); amdgpu_ras_suspend(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index c694b41f6461b..7537f5aa76f0c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -551,6 +551,41 @@ int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev) return 0; } +/** + * amdgpu_fence_need_ring_interrupt_restore - helper function to check whether + * fence driver interrupts need to be restored. + * + * @ring: ring that to be checked + * + * Interrupts for rings that belong to GFX IP don't need to be restored + * when the target power state is s0ix. + * + * Return true if need to restore interrupts, false otherwise. + */ +static bool amdgpu_fence_need_ring_interrupt_restore(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + bool is_gfx_power_domain = false; + + switch (ring->funcs->type) { + case AMDGPU_RING_TYPE_SDMA: + /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */ + if (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0)) + is_gfx_power_domain = true; + break; + case AMDGPU_RING_TYPE_GFX: + case AMDGPU_RING_TYPE_COMPUTE: + case AMDGPU_RING_TYPE_KIQ: + case AMDGPU_RING_TYPE_MES: + is_gfx_power_domain = true; + break; + default: + break; + } + + return !(adev->in_s0ix && is_gfx_power_domain); +} + /** * amdgpu_fence_driver_hw_fini - tear down the fence driver * for all possible rings. @@ -579,7 +614,8 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev) amdgpu_fence_driver_force_completion(ring); if (!drm_dev_is_unplugged(adev_to_drm(adev)) && - ring->fence_drv.irq_src) + ring->fence_drv.irq_src && + amdgpu_fence_need_ring_interrupt_restore(ring)) amdgpu_irq_put(adev, ring->fence_drv.irq_src, ring->fence_drv.irq_type); @@ -655,7 +691,8 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev) continue; /* enable the interrupt */ - if (ring->fence_drv.irq_src) + if (ring->fence_drv.irq_src && + amdgpu_fence_need_ring_interrupt_restore(ring)) amdgpu_irq_get(adev, ring->fence_drv.irq_src, ring->fence_drv.irq_type); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index a33d4bc34cee7..fd81b04559d49 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -692,15 +692,8 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable) if (adev->gfx.gfx_off_req_count == 0 && !adev->gfx.gfx_off_state) { - /* If going to s2idle, no need to wait */ - if (adev->in_s0ix) { - if (!amdgpu_dpm_set_powergating_by_smu(adev, - AMD_IP_BLOCK_TYPE_GFX, true)) - adev->gfx.gfx_off_state = true; - } else { - schedule_delayed_work(&adev->gfx.gfx_off_delay_work, + schedule_delayed_work(&adev->gfx.gfx_off_delay_work, delay); - } } } else { if (adev->gfx.gfx_off_req_count == 0) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c index b779ee4bbaa7b..e1ee1c7117fb9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c @@ -397,7 +397,7 @@ void amdgpu_sw_ring_ib_begin(struct amdgpu_ring *ring) struct amdgpu_ring_mux *mux = &adev->gfx.muxer; WARN_ON(!ring->is_sw_ring); - if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT) { + if (adev->gfx.mcbp && ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT) { if (amdgpu_mcbp_scan(mux) > 0) amdgpu_mcbp_trigger_preempt(mux); return; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c index 9c9cca1294989..565a1fa436d4b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c @@ -239,8 +239,13 @@ static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev) for (i = 1; i < MAX_XCP; i++) { ret = amdgpu_xcp_drm_dev_alloc(&p_ddev); - if (ret) + if (ret == -ENOSPC) { + dev_warn(adev->dev, + "Skip xcp node #%d when out of drm node resource.", i); + return 0; + } else if (ret) { return ret; + } /* Redirect all IOCTLs to the primary device */ adev->xcp_mgr->xcp[i].rdev = p_ddev->render->dev; @@ -328,6 +333,9 @@ int amdgpu_xcp_dev_register(struct amdgpu_device *adev, return 0; for (i = 1; i < MAX_XCP; i++) { + if (!adev->xcp_mgr->xcp[i].ddev) + break; + ret = drm_dev_register(adev->xcp_mgr->xcp[i].ddev, ent->driver_data); if (ret) return ret; @@ -345,6 +353,9 @@ void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev) return; for (i = 1; i < MAX_XCP; i++) { + if (!adev->xcp_mgr->xcp[i].ddev) + break; + p_ddev = adev->xcp_mgr->xcp[i].ddev; drm_dev_unplug(p_ddev); p_ddev->render->dev = adev->xcp_mgr->xcp[i].rdev; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 61fc62f3e0034..4a17bb7c7b27d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1965,7 +1965,14 @@ int kfd_topology_add_device(struct kfd_node *gpu) const char *asic_name = amdgpu_asic_name[gpu->adev->asic_type]; gpu_id = kfd_generate_gpu_id(gpu); - pr_debug("Adding new GPU (ID: 0x%x) to topology\n", gpu_id); + if (gpu->xcp && !gpu->xcp->ddev) { + dev_warn(gpu->adev->dev, + "Won't add GPU (ID: 0x%x) to topology since it has no drm node assigned.", + gpu_id); + return 0; + } else { + pr_debug("Adding new GPU (ID: 0x%x) to topology\n", gpu_id); + } /* Check to see if this gpu device exists in the topology_device_list. * If so, assign the gpu to that device, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 4cc8de2627ce9..9f2e24398cd77 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -712,7 +712,7 @@ static const struct dc_debug_options debug_defaults_drv = { .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = true, - .pipe_split_policy = MPC_SPLIT_DYNAMIC, + .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, .vsr_support = true, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 0cda3b276f611..f0800c0c5168c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -588,7 +588,9 @@ static int sienna_cichlid_tables_init(struct smu_context *smu) return -ENOMEM; } -static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *smu) +static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *smu, + bool use_metrics_v3, + bool use_metrics_v2) { struct smu_table_context *smu_table= &smu->smu_table; SmuMetricsExternal_t *metrics_ext = @@ -596,13 +598,11 @@ static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *s uint32_t throttler_status = 0; int i; - if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) && - (smu->smc_fw_version >= 0x3A4900)) { + if (use_metrics_v3) { for (i = 0; i < THROTTLER_COUNT; i++) throttler_status |= (metrics_ext->SmuMetrics_V3.ThrottlingPercentage[i] ? 1U << i : 0); - } else if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) && - (smu->smc_fw_version >= 0x3A4300)) { + } else if (use_metrics_v2) { for (i = 0; i < THROTTLER_COUNT; i++) throttler_status |= (metrics_ext->SmuMetrics_V2.ThrottlingPercentage[i] ? 1U << i : 0); @@ -864,7 +864,7 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu, metrics->TemperatureVrSoc) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; break; case METRICS_THROTTLER_STATUS: - *value = sienna_cichlid_get_throttler_status_locked(smu); + *value = sienna_cichlid_get_throttler_status_locked(smu, use_metrics_v3, use_metrics_v2); break; case METRICS_CURR_FANSPEED: *value = use_metrics_v3 ? metrics_v3->CurrFanSpeed : @@ -4017,7 +4017,7 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu, gpu_metrics->current_dclk1 = use_metrics_v3 ? metrics_v3->CurrClock[PPCLK_DCLK_1] : use_metrics_v2 ? metrics_v2->CurrClock[PPCLK_DCLK_1] : metrics->CurrClock[PPCLK_DCLK_1]; - gpu_metrics->throttle_status = sienna_cichlid_get_throttler_status_locked(smu); + gpu_metrics->throttle_status = sienna_cichlid_get_throttler_status_locked(smu, use_metrics_v3, use_metrics_v2); gpu_metrics->indep_throttle_status = smu_cmn_get_indep_throttler_status(gpu_metrics->throttle_status, sienna_cichlid_throttler_map); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index fddcd834bcecc..0fb6be11a0cc7 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -331,6 +331,7 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu) struct smu_13_0_0_powerplay_table *powerplay_table = table_context->power_play_table; struct smu_baco_context *smu_baco = &smu->smu_baco; + PPTable_t *pptable = smu->smu_table.driver_pptable; #if 0 PPTable_t *pptable = smu->smu_table.driver_pptable; const OverDriveLimits_t * const overdrive_upperlimits = @@ -371,6 +372,9 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu) table_context->thermal_controller_type = powerplay_table->thermal_controller_type; + smu->adev->pm.no_fan = + !(pptable->SkuTable.FeaturesToRun[0] & (1 << FEATURE_FAN_CONTROL_BIT)); + return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index fe4ee2daa5d8d..dc6104a04dce6 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -81,9 +81,10 @@ #define EPSILON 1 #define smnPCIE_ESM_CTRL 0x193D0 -#define smnPCIE_LC_LINK_WIDTH_CNTL 0x1ab40288 +#define smnPCIE_LC_LINK_WIDTH_CNTL 0x1a340288 #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4 +#define MAX_LINK_WIDTH 6 static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), @@ -708,16 +709,19 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu, *value = SMUQ10_TO_UINT(metrics->SocketPower) << 8; break; case METRICS_TEMPERATURE_HOTSPOT: - *value = SMUQ10_TO_UINT(metrics->MaxSocketTemperature); + *value = SMUQ10_TO_UINT(metrics->MaxSocketTemperature) * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; break; case METRICS_TEMPERATURE_MEM: - *value = SMUQ10_TO_UINT(metrics->MaxHbmTemperature); + *value = SMUQ10_TO_UINT(metrics->MaxHbmTemperature) * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; break; /* This is the max of all VRs and not just SOC VR. * No need to define another data type for the same. */ case METRICS_TEMPERATURE_VRSOC: - *value = SMUQ10_TO_UINT(metrics->MaxVrTemperature); + *value = SMUQ10_TO_UINT(metrics->MaxVrTemperature) * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; break; default: *value = UINT_MAX; @@ -1966,6 +1970,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table struct amdgpu_device *adev = smu->adev; int ret = 0, inst0, xcc0; MetricsTable_t *metrics; + u16 link_width_level; inst0 = adev->sdma.instance[0].aid_id; xcc0 = GET_INST(GC, 0); @@ -2016,8 +2021,12 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table gpu_metrics->throttle_status = 0; if (!(adev->flags & AMD_IS_APU)) { + link_width_level = smu_v13_0_6_get_current_pcie_link_width_level(smu); + if (link_width_level > MAX_LINK_WIDTH) + link_width_level = 0; + gpu_metrics->pcie_link_width = - smu_v13_0_6_get_current_pcie_link_width_level(smu); + DECODE_LANE_WIDTH(link_width_level); gpu_metrics->pcie_link_speed = smu_v13_0_6_get_current_pcie_link_speed(smu); } diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c index 043b8109e64aa..73ec60757dbcb 100644 --- a/drivers/gpu/drm/bridge/samsung-dsim.c +++ b/drivers/gpu/drm/bridge/samsung-dsim.c @@ -1386,6 +1386,18 @@ static void samsung_dsim_disable_irq(struct samsung_dsim *dsi) disable_irq(dsi->irq); } +static void samsung_dsim_set_stop_state(struct samsung_dsim *dsi, bool enable) +{ + u32 reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG); + + if (enable) + reg |= DSIM_FORCE_STOP_STATE; + else + reg &= ~DSIM_FORCE_STOP_STATE; + + samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg); +} + static int samsung_dsim_init(struct samsung_dsim *dsi) { const struct samsung_dsim_driver_data *driver_data = dsi->driver_data; @@ -1445,15 +1457,12 @@ static void samsung_dsim_atomic_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct samsung_dsim *dsi = bridge_to_dsi(bridge); - u32 reg; if (samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type)) { samsung_dsim_set_display_mode(dsi); samsung_dsim_set_display_enable(dsi, true); } else { - reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG); - reg &= ~DSIM_FORCE_STOP_STATE; - samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg); + samsung_dsim_set_stop_state(dsi, false); } dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE; @@ -1463,16 +1472,12 @@ static void samsung_dsim_atomic_disable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct samsung_dsim *dsi = bridge_to_dsi(bridge); - u32 reg; if (!(dsi->state & DSIM_STATE_ENABLED)) return; - if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type)) { - reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG); - reg |= DSIM_FORCE_STOP_STATE; - samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg); - } + if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type)) + samsung_dsim_set_stop_state(dsi, true); dsi->state &= ~DSIM_STATE_VIDOUT_AVAILABLE; } @@ -1775,6 +1780,8 @@ static ssize_t samsung_dsim_host_transfer(struct mipi_dsi_host *host, if (ret) return ret; + samsung_dsim_set_stop_state(dsi, false); + ret = mipi_dsi_create_packet(&xfer.packet, msg); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index e0dbd9140726b..1f470968ed14b 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -3456,6 +3456,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto connector->base.id, connector->name); return NULL; } + if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) { + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Composite sync not supported\n", + connector->base.id, connector->name); + } /* it is incorrect if hsync/vsync width is zero */ if (!hsync_pulse_width || !vsync_pulse_width) { @@ -3502,27 +3506,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto if (info->quirks & EDID_QUIRK_DETAILED_SYNC_PP) { mode->flags |= DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC; } else { - switch (pt->misc & DRM_EDID_PT_SYNC_MASK) { - case DRM_EDID_PT_ANALOG_CSYNC: - case DRM_EDID_PT_BIPOLAR_ANALOG_CSYNC: - drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Analog composite sync!\n", - connector->base.id, connector->name); - mode->flags |= DRM_MODE_FLAG_CSYNC | DRM_MODE_FLAG_NCSYNC; - break; - case DRM_EDID_PT_DIGITAL_CSYNC: - drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Digital composite sync!\n", - connector->base.id, connector->name); - mode->flags |= DRM_MODE_FLAG_CSYNC; - mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ? - DRM_MODE_FLAG_PCSYNC : DRM_MODE_FLAG_NCSYNC; - break; - case DRM_EDID_PT_DIGITAL_SEPARATE_SYNC: - mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ? - DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC; - mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ? - DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC; - break; - } + mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ? + DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC; + mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ? + DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC; } set_size: diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 2fb9bf901a2cc..3f479483d7d80 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -262,6 +262,26 @@ static bool drm_kms_helper_enable_hpd(struct drm_device *dev) } #define DRM_OUTPUT_POLL_PERIOD (10*HZ) +static void reschedule_output_poll_work(struct drm_device *dev) +{ + unsigned long delay = DRM_OUTPUT_POLL_PERIOD; + + if (dev->mode_config.delayed_event) + /* + * FIXME: + * + * Use short (1s) delay to handle the initial delayed event. + * This delay should not be needed, but Optimus/nouveau will + * fail in a mysterious way if the delayed event is handled as + * soon as possible like it is done in + * drm_helper_probe_single_connector_modes() in case the poll + * was enabled before. + */ + delay = HZ; + + schedule_delayed_work(&dev->mode_config.output_poll_work, delay); +} + /** * drm_kms_helper_poll_enable - re-enable output polling. * @dev: drm_device @@ -279,37 +299,41 @@ static bool drm_kms_helper_enable_hpd(struct drm_device *dev) */ void drm_kms_helper_poll_enable(struct drm_device *dev) { - bool poll = false; - unsigned long delay = DRM_OUTPUT_POLL_PERIOD; - if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll || dev->mode_config.poll_running) return; - poll = drm_kms_helper_enable_hpd(dev); - - if (dev->mode_config.delayed_event) { - /* - * FIXME: - * - * Use short (1s) delay to handle the initial delayed event. - * This delay should not be needed, but Optimus/nouveau will - * fail in a mysterious way if the delayed event is handled as - * soon as possible like it is done in - * drm_helper_probe_single_connector_modes() in case the poll - * was enabled before. - */ - poll = true; - delay = HZ; - } - - if (poll) - schedule_delayed_work(&dev->mode_config.output_poll_work, delay); + if (drm_kms_helper_enable_hpd(dev) || + dev->mode_config.delayed_event) + reschedule_output_poll_work(dev); dev->mode_config.poll_running = true; } EXPORT_SYMBOL(drm_kms_helper_poll_enable); +/** + * drm_kms_helper_poll_reschedule - reschedule the output polling work + * @dev: drm_device + * + * This function reschedules the output polling work, after polling for a + * connector has been enabled. + * + * Drivers must call this helper after enabling polling for a connector by + * setting %DRM_CONNECTOR_POLL_CONNECT / %DRM_CONNECTOR_POLL_DISCONNECT flags + * in drm_connector::polled. Note that after disabling polling by clearing these + * flags for a connector will stop the output polling work automatically if + * the polling is disabled for all other connectors as well. + * + * The function can be called only after polling has been enabled by calling + * drm_kms_helper_poll_init() / drm_kms_helper_poll_enable(). + */ +void drm_kms_helper_poll_reschedule(struct drm_device *dev) +{ + if (dev->mode_config.poll_running) + reschedule_output_poll_work(dev); +} +EXPORT_SYMBOL(drm_kms_helper_poll_reschedule); + static enum drm_connector_status drm_helper_probe_detect_ctx(struct drm_connector *connector, bool force) { diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c index f0ee9bcf661df..b0c6a2a86f2f3 100644 --- a/drivers/gpu/drm/i915/display/intel_display_device.c +++ b/drivers/gpu/drm/i915/display/intel_display_device.c @@ -662,10 +662,24 @@ static const struct intel_display_device_info xe_lpdp_display = { BIT(TRANSCODER_C) | BIT(TRANSCODER_D), }; +/* + * Separate detection for no display cases to keep the display id array simple. + * + * IVB Q requires subvendor and subdevice matching to differentiate from IVB D + * GT2 server. + */ +static bool has_no_display(struct pci_dev *pdev) +{ + static const struct pci_device_id ids[] = { + INTEL_IVB_Q_IDS(0), + {} + }; + + return pci_match_id(ids, pdev); +} + #undef INTEL_VGA_DEVICE -#undef INTEL_QUANTA_VGA_DEVICE #define INTEL_VGA_DEVICE(id, info) { id, info } -#define INTEL_QUANTA_VGA_DEVICE(info) { 0x16a, info } static const struct { u32 devid; @@ -690,7 +704,6 @@ static const struct { INTEL_IRONLAKE_M_IDS(&ilk_m_display), INTEL_SNB_D_IDS(&snb_display), INTEL_SNB_M_IDS(&snb_display), - INTEL_IVB_Q_IDS(NULL), /* must be first IVB in list */ INTEL_IVB_M_IDS(&ivb_display), INTEL_IVB_D_IDS(&ivb_display), INTEL_HSW_IDS(&hsw_display), @@ -775,6 +788,11 @@ intel_display_device_probe(struct drm_i915_private *i915, bool has_gmdid, if (has_gmdid) return probe_gmdid_display(i915, gmdid_ver, gmdid_rel, gmdid_step); + if (has_no_display(pdev)) { + drm_dbg_kms(&i915->drm, "Device doesn't have display\n"); + return &no_display; + } + for (i = 0; i < ARRAY_SIZE(intel_display_ids); i++) { if (intel_display_ids[i].devid == pdev->device) return intel_display_ids[i].info; diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index 1160fa20433bd..5eac7032bb5a9 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -211,7 +211,7 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv) /* Enable polling and queue hotplug re-enabling. */ if (hpd_disabled) { - drm_kms_helper_poll_enable(&dev_priv->drm); + drm_kms_helper_poll_reschedule(&dev_priv->drm); mod_delayed_work(dev_priv->unordered_wq, &dev_priv->display.hotplug.reenable_work, msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); @@ -649,7 +649,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work) drm_connector_list_iter_end(&conn_iter); if (enabled) - drm_kms_helper_poll_enable(&dev_priv->drm); + drm_kms_helper_poll_reschedule(&dev_priv->drm); mutex_unlock(&dev_priv->drm.mode_config.mutex); diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index 21f92123c8446..67e3aaf9b4320 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -2752,7 +2752,7 @@ static struct intel_sdvo_connector *intel_sdvo_connector_alloc(void) __drm_atomic_helper_connector_reset(&sdvo_connector->base.base, &conn_state->base.base); - INIT_LIST_HEAD(&sdvo_connector->base.panel.fixed_modes); + intel_panel_init_alloc(&sdvo_connector->base); return sdvo_connector; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c index ee9f83af7cf68..477df260ae3ac 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c @@ -470,12 +470,19 @@ int intel_guc_slpc_set_ignore_eff_freq(struct intel_guc_slpc *slpc, bool val) ret = slpc_set_param(slpc, SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY, val); - if (ret) + if (ret) { guc_probe_error(slpc_to_guc(slpc), "Failed to set efficient freq(%d): %pe\n", val, ERR_PTR(ret)); - else + } else { slpc->ignore_eff_freq = val; + /* Set min to RPn when we disable efficient freq */ + if (val) + ret = slpc_set_param(slpc, + SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ, + slpc->min_freq); + } + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&slpc->lock); return ret; @@ -602,9 +609,8 @@ static int slpc_set_softlimits(struct intel_guc_slpc *slpc) return ret; if (!slpc->min_freq_softlimit) { - ret = intel_guc_slpc_get_min_freq(slpc, &slpc->min_freq_softlimit); - if (unlikely(ret)) - return ret; + /* Min softlimit is initialized to RPn */ + slpc->min_freq_softlimit = slpc->min_freq; slpc_to_gt(slpc)->defaults.min_freq = slpc->min_freq_softlimit; } else { return intel_guc_slpc_set_min_freq(slpc, @@ -755,6 +761,9 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc) return ret; } + /* Set cached value of ignore efficient freq */ + intel_guc_slpc_set_ignore_eff_freq(slpc, slpc->ignore_eff_freq); + /* Revert SLPC min/max to softlimits if necessary */ ret = slpc_set_softlimits(slpc); if (unlikely(ret)) { @@ -765,9 +774,6 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc) /* Set cached media freq ratio mode */ intel_guc_slpc_set_media_ratio_mode(slpc, slpc->media_ratio_mode); - /* Set cached value of ignore efficient freq */ - intel_guc_slpc_set_ignore_eff_freq(slpc, slpc->ignore_eff_freq); - return 0; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c index ddd146265beb4..fa70defcb5b2b 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -26,6 +26,7 @@ * The kernel driver is only responsible for loading the HuC firmware and * triggering its security authentication. This is done differently depending * on the platform: + * * - older platforms (from Gen9 to most Gen12s): the load is performed via DMA * and the authentication via GuC * - DG2: load and authentication are both performed via GSC. @@ -33,6 +34,7 @@ * not-DG2 older platforms), while the authentication is done in 2-steps, * a first auth for clear-media workloads via GuC and a second one for all * workloads via GSC. + * * On platforms where the GuC does the authentication, to correctly do so the * HuC binary must be loaded before the GuC one. * Loading the HuC is optional; however, not using the HuC might negatively diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index 0ad0c5885ec27..7d8671fdf4477 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -443,7 +443,6 @@ static int i915_pcode_init(struct drm_i915_private *i915) static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) { struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); - struct pci_dev *root_pdev; int ret; if (i915_inject_probe_failure(dev_priv)) @@ -557,15 +556,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) intel_bw_init_hw(dev_priv); - /* - * FIXME: Temporary hammer to avoid freezing the machine on our DGFX - * This should be totally removed when we handle the pci states properly - * on runtime PM and on s2idle cases. - */ - root_pdev = pcie_find_root_port(pdev); - if (root_pdev) - pci_d3cold_disable(root_pdev); - return 0; err_opregion: @@ -591,7 +581,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) static void i915_driver_hw_remove(struct drm_i915_private *dev_priv) { struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); - struct pci_dev *root_pdev; i915_perf_fini(dev_priv); @@ -599,10 +588,6 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv) if (pdev->msi_enabled) pci_disable_msi(pdev); - - root_pdev = pcie_find_root_port(pdev); - if (root_pdev) - pci_d3cold_enable(root_pdev); } /** @@ -1517,6 +1502,8 @@ static int intel_runtime_suspend(struct device *kdev) { struct drm_i915_private *dev_priv = kdev_to_i915(kdev); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); + struct pci_dev *root_pdev; struct intel_gt *gt; int ret, i; @@ -1568,6 +1555,15 @@ static int intel_runtime_suspend(struct device *kdev) drm_err(&dev_priv->drm, "Unclaimed access detected prior to suspending\n"); + /* + * FIXME: Temporary hammer to avoid freezing the machine on our DGFX + * This should be totally removed when we handle the pci states properly + * on runtime PM. + */ + root_pdev = pcie_find_root_port(pdev); + if (root_pdev) + pci_d3cold_disable(root_pdev); + rpm->suspended = true; /* @@ -1606,6 +1602,8 @@ static int intel_runtime_resume(struct device *kdev) { struct drm_i915_private *dev_priv = kdev_to_i915(kdev); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); + struct pci_dev *root_pdev; struct intel_gt *gt; int ret, i; @@ -1619,6 +1617,11 @@ static int intel_runtime_resume(struct device *kdev) intel_opregion_notify_adapter(dev_priv, PCI_D0); rpm->suspended = false; + + root_pdev = pcie_find_root_port(pdev); + if (root_pdev) + pci_d3cold_enable(root_pdev); + if (intel_uncore_unclaimed_mmio(&dev_priv->uncore)) drm_dbg(&dev_priv->drm, "Unclaimed access during suspend, bios?\n"); diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index a2e0033e8a260..622f6eb9a8bfd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -1408,8 +1408,7 @@ nouveau_connector_create(struct drm_device *dev, ret = nvif_conn_ctor(&disp->disp, nv_connector->base.name, nv_connector->index, &nv_connector->conn); if (ret) { - kfree(nv_connector); - return ERR_PTR(ret); + goto drm_conn_err; } ret = nvif_conn_event_ctor(&nv_connector->conn, "kmsHotplug", @@ -1426,8 +1425,7 @@ nouveau_connector_create(struct drm_device *dev, if (ret) { nvif_event_dtor(&nv_connector->hpd); nvif_conn_dtor(&nv_connector->conn); - kfree(nv_connector); - return ERR_PTR(ret); + goto drm_conn_err; } } } @@ -1475,4 +1473,9 @@ nouveau_connector_create(struct drm_device *dev, drm_connector_register(connector); return connector; + +drm_conn_err: + drm_connector_cleanup(connector); + kfree(nv_connector); + return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c index 8f4f137a2af6b..213008499caaa 100644 --- a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c +++ b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c @@ -404,38 +404,30 @@ static int jdi_panel_add(struct jdi_panel *jdi) ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(jdi->supplies), jdi->supplies); - if (ret < 0) { - dev_err(dev, "failed to init regulator, ret=%d\n", ret); - return ret; - } + if (ret < 0) + return dev_err_probe(dev, ret, + "failed to init regulator, ret=%d\n", ret); jdi->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW); if (IS_ERR(jdi->enable_gpio)) { - ret = PTR_ERR(jdi->enable_gpio); - dev_err(dev, "cannot get enable-gpio %d\n", ret); - return ret; + return dev_err_probe(dev, PTR_ERR(jdi->enable_gpio), + "cannot get enable-gpio %d\n", ret); } jdi->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); - if (IS_ERR(jdi->reset_gpio)) { - ret = PTR_ERR(jdi->reset_gpio); - dev_err(dev, "cannot get reset-gpios %d\n", ret); - return ret; - } + if (IS_ERR(jdi->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(jdi->reset_gpio), + "cannot get reset-gpios %d\n", ret); jdi->dcdc_en_gpio = devm_gpiod_get(dev, "dcdc-en", GPIOD_OUT_LOW); - if (IS_ERR(jdi->dcdc_en_gpio)) { - ret = PTR_ERR(jdi->dcdc_en_gpio); - dev_err(dev, "cannot get dcdc-en-gpio %d\n", ret); - return ret; - } + if (IS_ERR(jdi->dcdc_en_gpio)) + return dev_err_probe(dev, PTR_ERR(jdi->dcdc_en_gpio), + "cannot get dcdc-en-gpio %d\n", ret); jdi->backlight = drm_panel_create_dsi_backlight(jdi->dsi); - if (IS_ERR(jdi->backlight)) { - ret = PTR_ERR(jdi->backlight); - dev_err(dev, "failed to register backlight %d\n", ret); - return ret; - } + if (IS_ERR(jdi->backlight)) + return dev_err_probe(dev, PTR_ERR(jdi->backlight), + "failed to register backlight %d\n", ret); drm_panel_init(&jdi->base, &jdi->dsi->dev, &jdi_panel_funcs, DRM_MODE_CONNECTOR_DSI); diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index aaba36b3a674b..b38d0e95cd542 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -999,21 +999,21 @@ static const struct panel_desc auo_g104sn02 = { .connector_type = DRM_MODE_CONNECTOR_LVDS, }; -static const struct drm_display_mode auo_g121ean01_mode = { - .clock = 66700, - .hdisplay = 1280, - .hsync_start = 1280 + 58, - .hsync_end = 1280 + 58 + 8, - .htotal = 1280 + 58 + 8 + 70, - .vdisplay = 800, - .vsync_start = 800 + 6, - .vsync_end = 800 + 6 + 4, - .vtotal = 800 + 6 + 4 + 10, +static const struct display_timing auo_g121ean01_timing = { + .pixelclock = { 60000000, 74400000, 90000000 }, + .hactive = { 1280, 1280, 1280 }, + .hfront_porch = { 20, 50, 100 }, + .hback_porch = { 20, 50, 100 }, + .hsync_len = { 30, 100, 200 }, + .vactive = { 800, 800, 800 }, + .vfront_porch = { 2, 10, 25 }, + .vback_porch = { 2, 10, 25 }, + .vsync_len = { 4, 18, 50 }, }; static const struct panel_desc auo_g121ean01 = { - .modes = &auo_g121ean01_mode, - .num_modes = 1, + .timings = &auo_g121ean01_timing, + .num_timings = 1, .bpc = 8, .size = { .width = 261, diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c index 58dfb15a8757f..e78de99e99335 100644 --- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c +++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c @@ -96,7 +96,7 @@ static int panfrost_read_speedbin(struct device *dev) * keep going without it; any other error means that we are * supposed to read the bin value, but we failed doing so. */ - if (ret != -ENOENT) { + if (ret != -ENOENT && ret != -EOPNOTSUPP) { DRM_DEV_ERROR(dev, "Cannot read speed-bin (%d).", ret); return ret; } diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index ea993d7162e8c..307a890fde133 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -310,7 +310,7 @@ int qxl_gem_object_create_with_handle(struct qxl_device *qdev, u32 domain, size_t size, struct qxl_surface *surf, - struct qxl_bo **qobj, + struct drm_gem_object **gobj, uint32_t *handle); void qxl_gem_object_free(struct drm_gem_object *gobj); int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv); diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c index d636ba6854513..17df5c7ccf691 100644 --- a/drivers/gpu/drm/qxl/qxl_dumb.c +++ b/drivers/gpu/drm/qxl/qxl_dumb.c @@ -34,6 +34,7 @@ int qxl_mode_dumb_create(struct drm_file *file_priv, { struct qxl_device *qdev = to_qxl(dev); struct qxl_bo *qobj; + struct drm_gem_object *gobj; uint32_t handle; int r; struct qxl_surface surf; @@ -62,11 +63,13 @@ int qxl_mode_dumb_create(struct drm_file *file_priv, r = qxl_gem_object_create_with_handle(qdev, file_priv, QXL_GEM_DOMAIN_CPU, - args->size, &surf, &qobj, + args->size, &surf, &gobj, &handle); if (r) return r; + qobj = gem_to_qxl_bo(gobj); qobj->is_dumb = true; + drm_gem_object_put(gobj); args->pitch = pitch; args->handle = handle; return 0; diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c index a08da0bd9098b..fc5e3763c3595 100644 --- a/drivers/gpu/drm/qxl/qxl_gem.c +++ b/drivers/gpu/drm/qxl/qxl_gem.c @@ -72,32 +72,41 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size, return 0; } +/* + * If the caller passed a valid gobj pointer, it is responsible to call + * drm_gem_object_put() when it no longer needs to acess the object. + * + * If gobj is NULL, it is handled internally. + */ int qxl_gem_object_create_with_handle(struct qxl_device *qdev, struct drm_file *file_priv, u32 domain, size_t size, struct qxl_surface *surf, - struct qxl_bo **qobj, + struct drm_gem_object **gobj, uint32_t *handle) { - struct drm_gem_object *gobj; int r; + struct drm_gem_object *local_gobj; - BUG_ON(!qobj); BUG_ON(!handle); r = qxl_gem_object_create(qdev, size, 0, domain, false, false, surf, - &gobj); + &local_gobj); if (r) return -ENOMEM; - r = drm_gem_handle_create(file_priv, gobj, handle); + r = drm_gem_handle_create(file_priv, local_gobj, handle); if (r) return r; - /* drop reference from allocate - handle holds it now */ - *qobj = gem_to_qxl_bo(gobj); - drm_gem_object_put(gobj); + + if (gobj) + *gobj = local_gobj; + else + /* drop reference from allocate - handle holds it now */ + drm_gem_object_put(local_gobj); + return 0; } diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c index 30f58b21372aa..dd0f834d881ce 100644 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c @@ -38,7 +38,6 @@ int qxl_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr struct qxl_device *qdev = to_qxl(dev); struct drm_qxl_alloc *qxl_alloc = data; int ret; - struct qxl_bo *qobj; uint32_t handle; u32 domain = QXL_GEM_DOMAIN_VRAM; @@ -50,7 +49,7 @@ int qxl_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr domain, qxl_alloc->size, NULL, - &qobj, &handle); + NULL, &handle); if (ret) { DRM_ERROR("%s: failed to create gem ret=%d\n", __func__, ret); @@ -386,7 +385,6 @@ int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, struct drm_file *fi { struct qxl_device *qdev = to_qxl(dev); struct drm_qxl_alloc_surf *param = data; - struct qxl_bo *qobj; int handle; int ret; int size, actual_stride; @@ -406,7 +404,7 @@ int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, struct drm_file *fi QXL_GEM_DOMAIN_SURFACE, size, &surf, - &qobj, &handle); + NULL, &handle); if (ret) { DRM_ERROR("%s: failed to create gem ret=%d\n", __func__, ret); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index 82094c137855b..c43853597776f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -497,10 +497,9 @@ static int vmw_user_bo_synccpu_release(struct drm_file *filp, if (!(flags & drm_vmw_synccpu_allow_cs)) { atomic_dec(&vmw_bo->cpu_writers); } - ttm_bo_put(&vmw_bo->tbo); + vmw_user_bo_unref(vmw_bo); } - drm_gem_object_put(&vmw_bo->tbo.base); return ret; } @@ -540,8 +539,7 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, return ret; ret = vmw_user_bo_synccpu_grab(vbo, arg->flags); - vmw_bo_unreference(&vbo); - drm_gem_object_put(&vbo->tbo.base); + vmw_user_bo_unref(vbo); if (unlikely(ret != 0)) { if (ret == -ERESTARTSYS || ret == -EBUSY) return -EBUSY; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h index 50a836e709949..1d433fceed3d8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h @@ -195,6 +195,14 @@ static inline struct vmw_bo *vmw_bo_reference(struct vmw_bo *buf) return buf; } +static inline void vmw_user_bo_unref(struct vmw_bo *vbo) +{ + if (vbo) { + ttm_bo_put(&vbo->tbo); + drm_gem_object_put(&vbo->tbo.base); + } +} + static inline struct vmw_bo *to_vmw_bo(struct drm_gem_object *gobj) { return container_of((gobj), struct vmw_bo, tbo.base); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 3810a9984a7fd..58bfdf203ecae 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -1513,4 +1513,16 @@ static inline bool vmw_has_fences(struct vmw_private *vmw) return (vmw_fifo_caps(vmw) & SVGA_FIFO_CAP_FENCE) != 0; } +static inline bool vmw_shadertype_is_valid(enum vmw_sm_type shader_model, + u32 shader_type) +{ + SVGA3dShaderType max_allowed = SVGA3D_SHADERTYPE_PREDX_MAX; + + if (shader_model >= VMW_SM_5) + max_allowed = SVGA3D_SHADERTYPE_MAX; + else if (shader_model >= VMW_SM_4) + max_allowed = SVGA3D_SHADERTYPE_DX10_MAX; + return shader_type >= SVGA3D_SHADERTYPE_MIN && shader_type < max_allowed; +} + #endif diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 6b9aa2b4ef54a..98e0723ca6f5e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -1164,8 +1164,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, } vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB); ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo); - ttm_bo_put(&vmw_bo->tbo); - drm_gem_object_put(&vmw_bo->tbo.base); + vmw_user_bo_unref(vmw_bo); if (unlikely(ret != 0)) return ret; @@ -1221,8 +1220,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM); ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo); - ttm_bo_put(&vmw_bo->tbo); - drm_gem_object_put(&vmw_bo->tbo.base); + vmw_user_bo_unref(vmw_bo); if (unlikely(ret != 0)) return ret; @@ -1992,7 +1990,7 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, cmd = container_of(header, typeof(*cmd), header); - if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) { + if (!vmw_shadertype_is_valid(VMW_SM_LEGACY, cmd->body.type)) { VMW_DEBUG_USER("Illegal shader type %u.\n", (unsigned int) cmd->body.type); return -EINVAL; @@ -2115,8 +2113,6 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer); - SVGA3dShaderType max_shader_num = has_sm5_context(dev_priv) ? - SVGA3D_NUM_SHADERTYPE : SVGA3D_NUM_SHADERTYPE_DX10; struct vmw_resource *res = NULL; struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); @@ -2133,6 +2129,14 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, if (unlikely(ret != 0)) return ret; + if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type) || + cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) { + VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n", + (unsigned int) cmd->body.type, + (unsigned int) cmd->body.slot); + return -EINVAL; + } + binding.bi.ctx = ctx_node->ctx; binding.bi.res = res; binding.bi.bt = vmw_ctx_binding_cb; @@ -2141,14 +2145,6 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, binding.size = cmd->body.sizeInBytes; binding.slot = cmd->body.slot; - if (binding.shader_slot >= max_shader_num || - binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) { - VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n", - (unsigned int) cmd->body.type, - (unsigned int) binding.slot); - return -EINVAL; - } - vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, binding.slot); @@ -2207,15 +2203,13 @@ static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv, { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) = container_of(header, typeof(*cmd), header); - SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ? - SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX; u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dShaderResourceViewId); if ((u64) cmd->body.startView + (u64) num_sr_view > (u64) SVGA3D_DX_MAX_SRVIEWS || - cmd->body.type >= max_allowed) { + !vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) { VMW_DEBUG_USER("Invalid shader binding.\n"); return -EINVAL; } @@ -2239,8 +2233,6 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader); - SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ? - SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX; struct vmw_resource *res = NULL; struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); struct vmw_ctx_bindinfo_shader binding; @@ -2251,8 +2243,7 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv, cmd = container_of(header, typeof(*cmd), header); - if (cmd->body.type >= max_allowed || - cmd->body.type < SVGA3D_SHADERTYPE_MIN) { + if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) { VMW_DEBUG_USER("Illegal shader type %u.\n", (unsigned int) cmd->body.type); return -EINVAL; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index b62207be3363e..1489ad73c103f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1665,10 +1665,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, err_out: /* vmw_user_lookup_handle takes one ref so does new_fb */ - if (bo) { - vmw_bo_unreference(&bo); - drm_gem_object_put(&bo->tbo.base); - } + if (bo) + vmw_user_bo_unref(bo); if (surface) vmw_surface_unreference(&surface); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c index 7e112319a23ce..fb85f244c3d02 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c @@ -451,8 +451,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data, ret = vmw_overlay_update_stream(dev_priv, buf, arg, true); - vmw_bo_unreference(&buf); - drm_gem_object_put(&buf->tbo.base); + vmw_user_bo_unref(buf); out_unlock: mutex_unlock(&overlay->mutex); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index e7226db8b2424..1e81ff2422cf6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c @@ -809,8 +809,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv, shader_type, num_input_sig, num_output_sig, tfile, shader_handle); out_bad_arg: - vmw_bo_unreference(&buffer); - drm_gem_object_put(&buffer->tbo.base); + vmw_user_bo_unref(buffer); return ret; } diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c index 2d8342fdc25de..05c80680dff47 100644 --- a/drivers/i2c/busses/i2c-bcm-iproc.c +++ b/drivers/i2c/busses/i2c-bcm-iproc.c @@ -233,13 +233,14 @@ static inline u32 iproc_i2c_rd_reg(struct bcm_iproc_i2c_dev *iproc_i2c, u32 offset) { u32 val; + unsigned long flags; if (iproc_i2c->idm_base) { - spin_lock(&iproc_i2c->idm_lock); + spin_lock_irqsave(&iproc_i2c->idm_lock, flags); writel(iproc_i2c->ape_addr_mask, iproc_i2c->idm_base + IDM_CTRL_DIRECT_OFFSET); val = readl(iproc_i2c->base + offset); - spin_unlock(&iproc_i2c->idm_lock); + spin_unlock_irqrestore(&iproc_i2c->idm_lock, flags); } else { val = readl(iproc_i2c->base + offset); } @@ -250,12 +251,14 @@ static inline u32 iproc_i2c_rd_reg(struct bcm_iproc_i2c_dev *iproc_i2c, static inline void iproc_i2c_wr_reg(struct bcm_iproc_i2c_dev *iproc_i2c, u32 offset, u32 val) { + unsigned long flags; + if (iproc_i2c->idm_base) { - spin_lock(&iproc_i2c->idm_lock); + spin_lock_irqsave(&iproc_i2c->idm_lock, flags); writel(iproc_i2c->ape_addr_mask, iproc_i2c->idm_base + IDM_CTRL_DIRECT_OFFSET); writel(val, iproc_i2c->base + offset); - spin_unlock(&iproc_i2c->idm_lock); + spin_unlock_irqrestore(&iproc_i2c->idm_lock, flags); } else { writel(val, iproc_i2c->base + offset); } diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index 3bfd7a2232dbd..24bef0025c988 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -588,9 +588,21 @@ i2c_dw_read(struct dw_i2c_dev *dev) u32 flags = msgs[dev->msg_read_idx].flags; regmap_read(dev->map, DW_IC_DATA_CMD, &tmp); + tmp &= DW_IC_DATA_CMD_DAT; /* Ensure length byte is a valid value */ - if (flags & I2C_M_RECV_LEN && - (tmp & DW_IC_DATA_CMD_DAT) <= I2C_SMBUS_BLOCK_MAX && tmp > 0) { + if (flags & I2C_M_RECV_LEN) { + /* + * if IC_EMPTYFIFO_HOLD_MASTER_EN is set, which cannot be + * detected from the registers, the controller can be + * disabled if the STOP bit is set. But it is only set + * after receiving block data response length in + * I2C_FUNC_SMBUS_BLOCK_DATA case. That needs to read + * another byte with STOP bit set when the block data + * response length is invalid to complete the transaction. + */ + if (!tmp || tmp > I2C_SMBUS_BLOCK_MAX) + tmp = 1; + len = i2c_dw_recv_len(dev, tmp); } *buf++ = tmp; diff --git a/drivers/i2c/busses/i2c-hisi.c b/drivers/i2c/busses/i2c-hisi.c index e067671b3ce2e..0980c773cb5b1 100644 --- a/drivers/i2c/busses/i2c-hisi.c +++ b/drivers/i2c/busses/i2c-hisi.c @@ -330,6 +330,14 @@ static irqreturn_t hisi_i2c_irq(int irq, void *context) struct hisi_i2c_controller *ctlr = context; u32 int_stat; + /* + * Don't handle the interrupt if cltr->completion is NULL. We may + * reach here because the interrupt is spurious or the transfer is + * started by another port (e.g. firmware) rather than us. + */ + if (!ctlr->completion) + return IRQ_NONE; + int_stat = readl(ctlr->iobase + HISI_I2C_INT_MSTAT); hisi_i2c_clear_int(ctlr, int_stat); if (!(int_stat & HISI_I2C_INT_ALL)) diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c index c3287c887c6fa..150d923ca7f14 100644 --- a/drivers/i2c/busses/i2c-imx-lpi2c.c +++ b/drivers/i2c/busses/i2c-imx-lpi2c.c @@ -209,6 +209,9 @@ static int lpi2c_imx_config(struct lpi2c_imx_struct *lpi2c_imx) lpi2c_imx_set_mode(lpi2c_imx); clk_rate = clk_get_rate(lpi2c_imx->clks[0].clk); + if (!clk_rate) + return -EINVAL; + if (lpi2c_imx->mode == HS || lpi2c_imx->mode == ULTRA_FAST) filt = 0; else diff --git a/drivers/i2c/busses/i2c-sun6i-p2wi.c b/drivers/i2c/busses/i2c-sun6i-p2wi.c index ad8270cdbd3eb..fa6020dced595 100644 --- a/drivers/i2c/busses/i2c-sun6i-p2wi.c +++ b/drivers/i2c/busses/i2c-sun6i-p2wi.c @@ -250,7 +250,8 @@ static int p2wi_probe(struct platform_device *pdev) p2wi->rstc = devm_reset_control_get_exclusive(dev, NULL); if (IS_ERR(p2wi->rstc)) { - dev_err(dev, "failed to retrieve reset controller: %d\n", ret); + dev_err(dev, "failed to retrieve reset controller: %pe\n", + p2wi->rstc); return PTR_ERR(p2wi->rstc); } diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index bcbbf23aa530c..03fc10b45bd6e 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -442,7 +442,7 @@ static int tegra_i2c_init_dma(struct tegra_i2c_dev *i2c_dev) if (IS_VI(i2c_dev)) return 0; - if (!i2c_dev->hw->has_apb_dma) { + if (i2c_dev->hw->has_apb_dma) { if (!IS_ENABLED(CONFIG_TEGRA20_APB_DMA)) { dev_dbg(i2c_dev->dev, "APB DMA support not enabled\n"); return 0; @@ -460,6 +460,7 @@ static int tegra_i2c_init_dma(struct tegra_i2c_dev *i2c_dev) i2c_dev->dma_chan = dma_request_chan(i2c_dev->dev, "tx"); if (IS_ERR(i2c_dev->dma_chan)) { err = PTR_ERR(i2c_dev->dma_chan); + i2c_dev->dma_chan = NULL; goto err_out; } diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c index c9bc5a91ec832..03c58e50cc44f 100644 --- a/drivers/leds/trigger/ledtrig-netdev.c +++ b/drivers/leds/trigger/ledtrig-netdev.c @@ -406,15 +406,15 @@ static ssize_t interval_store(struct device *dev, static DEVICE_ATTR_RW(interval); -static ssize_t hw_control_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t offloaded_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct led_netdev_data *trigger_data = led_trigger_get_drvdata(dev); return sprintf(buf, "%d\n", trigger_data->hw_control); } -static DEVICE_ATTR_RO(hw_control); +static DEVICE_ATTR_RO(offloaded); static struct attribute *netdev_trig_attrs[] = { &dev_attr_device_name.attr, @@ -427,7 +427,7 @@ static struct attribute *netdev_trig_attrs[] = { &dev_attr_rx.attr, &dev_attr_tx.attr, &dev_attr_interval.attr, - &dev_attr_hw_control.attr, + &dev_attr_offloaded.attr, NULL }; ATTRIBUTE_GROUPS(netdev_trig); diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c index 40cb3cb87ba17..60425c99a2b8b 100644 --- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c +++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c @@ -1310,6 +1310,8 @@ static int mtk_jpeg_probe(struct platform_device *pdev) jpeg->dev = &pdev->dev; jpeg->variant = of_device_get_match_data(jpeg->dev); + platform_set_drvdata(pdev, jpeg); + ret = devm_of_platform_populate(&pdev->dev); if (ret) { v4l2_err(&jpeg->v4l2_dev, "Master of platform populate failed."); @@ -1381,8 +1383,6 @@ static int mtk_jpeg_probe(struct platform_device *pdev) jpeg->variant->dev_name, jpeg->vdev->num, VIDEO_MAJOR, jpeg->vdev->minor); - platform_set_drvdata(pdev, jpeg); - pm_runtime_enable(&pdev->dev); return 0; diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c index 9ff439a50f534..315e97a2450eb 100644 --- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c +++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c @@ -821,6 +821,8 @@ static int vb2ops_venc_queue_setup(struct vb2_queue *vq, return -EINVAL; if (*nplanes) { + if (*nplanes != q_data->fmt->num_planes) + return -EINVAL; for (i = 0; i < *nplanes; i++) if (sizes[i] < q_data->sizeimage[i]) return -EINVAL; diff --git a/drivers/media/platform/nxp/imx7-media-csi.c b/drivers/media/platform/nxp/imx7-media-csi.c index 0bd2613b9320f..791bde67f439c 100644 --- a/drivers/media/platform/nxp/imx7-media-csi.c +++ b/drivers/media/platform/nxp/imx7-media-csi.c @@ -9,7 +9,9 @@ #include #include #include +#include #include +#include #include #include #include @@ -1137,8 +1139,9 @@ __imx7_csi_video_try_fmt(struct v4l2_pix_format *pixfmt, * TODO: Implement configurable stride support. */ walign = 8 * 8 / cc->bpp; - v4l_bound_align_image(&pixfmt->width, 1, 0xffff, walign, - &pixfmt->height, 1, 0xffff, 1, 0); + pixfmt->width = clamp(round_up(pixfmt->width, walign), walign, + round_down(65535U, walign)); + pixfmt->height = clamp(pixfmt->height, 1U, 65535U); pixfmt->bytesperline = pixfmt->width * cc->bpp / 8; pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height; diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c index 5ac2a424b13df..f4988f03640ae 100644 --- a/drivers/media/usb/uvc/uvc_v4l2.c +++ b/drivers/media/usb/uvc/uvc_v4l2.c @@ -45,7 +45,7 @@ static int uvc_control_add_xu_mapping(struct uvc_video_chain *chain, map->menu_names = NULL; map->menu_mapping = NULL; - map->menu_mask = BIT_MASK(xmap->menu_count); + map->menu_mask = GENMASK(xmap->menu_count - 1, 0); size = xmap->menu_count * sizeof(*map->menu_mapping); map->menu_mapping = kzalloc(size, GFP_KERNEL); diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index f701efb1fa785..b6f4be25b31b0 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -2097,14 +2097,14 @@ static void mmc_blk_mq_poll_completion(struct mmc_queue *mq, mmc_blk_urgent_bkops(mq, mqrq); } -static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req) +static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, enum mmc_issue_type issue_type) { unsigned long flags; bool put_card; spin_lock_irqsave(&mq->lock, flags); - mq->in_flight[mmc_issue_type(mq, req)] -= 1; + mq->in_flight[issue_type] -= 1; put_card = (mmc_tot_in_flight(mq) == 0); @@ -2117,6 +2117,7 @@ static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req) static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req, bool can_sleep) { + enum mmc_issue_type issue_type = mmc_issue_type(mq, req); struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); struct mmc_request *mrq = &mqrq->brq.mrq; struct mmc_host *host = mq->card->host; @@ -2136,7 +2137,7 @@ static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req, blk_mq_complete_request(req); } - mmc_blk_mq_dec_in_flight(mq, req); + mmc_blk_mq_dec_in_flight(mq, issue_type); } void mmc_blk_mq_recovery(struct mmc_queue *mq) diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c index b01ffb4d09737..3215063bcf868 100644 --- a/drivers/mmc/host/sdhci_f_sdh30.c +++ b/drivers/mmc/host/sdhci_f_sdh30.c @@ -210,13 +210,16 @@ static int sdhci_f_sdh30_remove(struct platform_device *pdev) { struct sdhci_host *host = platform_get_drvdata(pdev); struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host); - - reset_control_assert(priv->rst); - clk_disable_unprepare(priv->clk); - clk_disable_unprepare(priv->clk_iface); + struct clk *clk_iface = priv->clk_iface; + struct reset_control *rst = priv->rst; + struct clk *clk = priv->clk; sdhci_pltfm_unregister(pdev); + reset_control_assert(rst); + clk_disable_unprepare(clk); + clk_disable_unprepare(clk_iface); + return 0; } diff --git a/drivers/mmc/host/sunplus-mmc.c b/drivers/mmc/host/sunplus-mmc.c index db5e0dcdfa7f3..2bdebeb1f8e49 100644 --- a/drivers/mmc/host/sunplus-mmc.c +++ b/drivers/mmc/host/sunplus-mmc.c @@ -863,11 +863,9 @@ static int spmmc_drv_probe(struct platform_device *pdev) struct spmmc_host *host; int ret = 0; - mmc = mmc_alloc_host(sizeof(*host), &pdev->dev); - if (!mmc) { - ret = -ENOMEM; - goto probe_free_host; - } + mmc = devm_mmc_alloc_host(&pdev->dev, sizeof(struct spmmc_host)); + if (!mmc) + return -ENOMEM; host = mmc_priv(mmc); host->mmc = mmc; @@ -902,7 +900,7 @@ static int spmmc_drv_probe(struct platform_device *pdev) ret = mmc_of_parse(mmc); if (ret) - goto probe_free_host; + goto clk_disable; mmc->ops = &spmmc_ops; mmc->f_min = SPMMC_MIN_CLK; @@ -911,7 +909,7 @@ static int spmmc_drv_probe(struct platform_device *pdev) ret = mmc_regulator_get_supply(mmc); if (ret) - goto probe_free_host; + goto clk_disable; if (!mmc->ocr_avail) mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; @@ -927,14 +925,17 @@ static int spmmc_drv_probe(struct platform_device *pdev) host->tuning_info.enable_tuning = 1; pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); - mmc_add_host(mmc); + ret = mmc_add_host(mmc); + if (ret) + goto pm_disable; - return ret; + return 0; -probe_free_host: - if (mmc) - mmc_free_host(mmc); +pm_disable: + pm_runtime_disable(&pdev->dev); +clk_disable: + clk_disable_unprepare(host->clk); return ret; } @@ -948,7 +949,6 @@ static int spmmc_drv_remove(struct platform_device *dev) pm_runtime_put_noidle(&dev->dev); pm_runtime_disable(&dev->dev); platform_set_drvdata(dev, NULL); - mmc_free_host(host->mmc); return 0; } diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c index 521af9251f335..bf2a92fba0ed8 100644 --- a/drivers/mmc/host/wbsd.c +++ b/drivers/mmc/host/wbsd.c @@ -1705,8 +1705,6 @@ static int wbsd_init(struct device *dev, int base, int irq, int dma, wbsd_release_resources(host); wbsd_free_mmc(dev); - - mmc_free_host(mmc); return ret; } diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index b9dbad3a8af82..fc5da5d7744da 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -660,10 +660,10 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond) return NULL; arp = (struct arp_pkt *)skb_network_header(skb); - /* Don't modify or load balance ARPs that do not originate locally - * (e.g.,arrive via a bridge). + /* Don't modify or load balance ARPs that do not originate + * from the bond itself or a VLAN directly above the bond. */ - if (!bond_slave_has_mac_rx(bond, arp->mac_src)) + if (!bond_slave_has_mac_rcu(bond, arp->mac_src)) return NULL; dev = ip_dev_find(dev_net(bond->dev), arp->ip_src); diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c index 4068d962203d6..98c669ad51414 100644 --- a/drivers/net/can/vxcan.c +++ b/drivers/net/can/vxcan.c @@ -192,12 +192,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev, nla_peer = data[VXCAN_INFO_PEER]; ifmp = nla_data(nla_peer); - err = rtnl_nla_parse_ifla(peer_tb, - nla_data(nla_peer) + - sizeof(struct ifinfomsg), - nla_len(nla_peer) - - sizeof(struct ifinfomsg), - NULL); + err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack); if (err < 0) return err; diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 38b3c6dda386a..b8bb9f3b3609f 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -1006,6 +1006,10 @@ mt753x_trap_frames(struct mt7530_priv *priv) mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK, MT753X_BPDU_CPU_ONLY); + /* Trap 802.1X PAE frames to the CPU port(s) */ + mt7530_rmw(priv, MT753X_BPC, MT753X_PAE_PORT_FW_MASK, + MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY)); + /* Trap LLDP frames with :0E MAC DA to the CPU port(s) */ mt7530_rmw(priv, MT753X_RGAC2, MT753X_R0E_PORT_FW_MASK, MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY)); diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h index 08045b035e6ab..17e42d30fff4b 100644 --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h @@ -66,6 +66,8 @@ enum mt753x_id { /* Registers for BPDU and PAE frame control*/ #define MT753X_BPC 0x24 #define MT753X_BPDU_PORT_FW_MASK GENMASK(2, 0) +#define MT753X_PAE_PORT_FW_MASK GENMASK(18, 16) +#define MT753X_PAE_PORT_FW(x) FIELD_PREP(MT753X_PAE_PORT_FW_MASK, x) /* Register for :03 and :0E MAC DA frame control */ #define MT753X_RGAC2 0x2c diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index c7d51a539451e..7af2f08a62f14 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3034,6 +3034,14 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip) /* If there is a GPIO connected to the reset pin, toggle it */ if (gpiod) { + /* If the switch has just been reset and not yet completed + * loading EEPROM, the reset may interrupt the I2C transaction + * mid-byte, causing the first EEPROM read after the reset + * from the wrong location resulting in the switch booting + * to wrong mode and inoperable. + */ + mv88e6xxx_g1_wait_eeprom_done(chip); + gpiod_set_value_cansleep(gpiod, 1); usleep_range(10000, 20000); gpiod_set_value_cansleep(gpiod, 0); diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 1c113957fcf45..f16daa9b1765f 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -1069,6 +1069,9 @@ static u64 vsc9959_tas_remaining_gate_len_ps(u64 gate_len_ns) if (gate_len_ns == U64_MAX) return U64_MAX; + if (gate_len_ns < VSC9959_TAS_MIN_GATE_LEN_NS) + return 0; + return (gate_len_ns - VSC9959_TAS_MIN_GATE_LEN_NS) * PSEC_PER_NSEC; } diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 392ec09a1d8a6..3e4fb3c3e8342 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -1793,11 +1793,9 @@ static int b44_nway_reset(struct net_device *dev) b44_readphy(bp, MII_BMCR, &bmcr); b44_readphy(bp, MII_BMCR, &bmcr); r = -EINVAL; - if (bmcr & BMCR_ANENABLE) { - b44_writephy(bp, MII_BMCR, - bmcr | BMCR_ANRESTART); - r = 0; - } + if (bmcr & BMCR_ANENABLE) + r = b44_writephy(bp, MII_BMCR, + bmcr | BMCR_ANRESTART); spin_unlock_irq(&bp->lock); return r; diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 10c7c232cc4ec..52ee3751187a2 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1448,7 +1448,7 @@ int bgmac_phy_connect_direct(struct bgmac *bgmac) int err; phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); - if (!phy_dev || IS_ERR(phy_dev)) { + if (IS_ERR(phy_dev)) { dev_err(bgmac->dev, "Failed to register fixed PHY device\n"); return -ENODEV; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 8bcde0a6e011c..e2a4e1088b7f4 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1508,6 +1508,8 @@ struct bnx2x { bool cnic_loaded; struct cnic_eth_dev *(*cnic_probe)(struct net_device *); + bool nic_stopped; + /* Flag that indicates that we can start looking for FCoE L2 queue * completions in the default status block. */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 6ea5521074d32..e9c1e1bb55806 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -2715,6 +2715,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) bnx2x_add_all_napi(bp); DP(NETIF_MSG_IFUP, "napi added\n"); bnx2x_napi_enable(bp); + bp->nic_stopped = false; if (IS_PF(bp)) { /* set pf load just before approaching the MCP */ @@ -2960,6 +2961,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) load_error1: bnx2x_napi_disable(bp); bnx2x_del_all_napi(bp); + bp->nic_stopped = true; /* clear pf_load status, as it was already set */ if (IS_PF(bp)) @@ -3095,14 +3097,17 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) if (!CHIP_IS_E1x(bp)) bnx2x_pf_disable(bp); - /* Disable HW interrupts, NAPI */ - bnx2x_netif_stop(bp, 1); - /* Delete all NAPI objects */ - bnx2x_del_all_napi(bp); - if (CNIC_LOADED(bp)) - bnx2x_del_all_napi_cnic(bp); - /* Release IRQs */ - bnx2x_free_irq(bp); + if (!bp->nic_stopped) { + /* Disable HW interrupts, NAPI */ + bnx2x_netif_stop(bp, 1); + /* Delete all NAPI objects */ + bnx2x_del_all_napi(bp); + if (CNIC_LOADED(bp)) + bnx2x_del_all_napi_cnic(bp); + /* Release IRQs */ + bnx2x_free_irq(bp); + bp->nic_stopped = true; + } /* Report UNLOAD_DONE to MCP */ bnx2x_send_unload_done(bp, false); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 1e7a6f1d42233..0d8e61c63c7c6 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -9474,15 +9474,18 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link) } } - /* Disable HW interrupts, NAPI */ - bnx2x_netif_stop(bp, 1); - /* Delete all NAPI objects */ - bnx2x_del_all_napi(bp); - if (CNIC_LOADED(bp)) - bnx2x_del_all_napi_cnic(bp); + if (!bp->nic_stopped) { + /* Disable HW interrupts, NAPI */ + bnx2x_netif_stop(bp, 1); + /* Delete all NAPI objects */ + bnx2x_del_all_napi(bp); + if (CNIC_LOADED(bp)) + bnx2x_del_all_napi_cnic(bp); - /* Release IRQs */ - bnx2x_free_irq(bp); + /* Release IRQs */ + bnx2x_free_irq(bp); + bp->nic_stopped = true; + } /* Reset the chip, unless PCI function is offline. If we reach this * point following a PCI error handling, it means device is really @@ -14238,13 +14241,16 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev) } bnx2x_drain_tx_queues(bp); bnx2x_send_unload_req(bp, UNLOAD_RECOVERY); - bnx2x_netif_stop(bp, 1); - bnx2x_del_all_napi(bp); + if (!bp->nic_stopped) { + bnx2x_netif_stop(bp, 1); + bnx2x_del_all_napi(bp); - if (CNIC_LOADED(bp)) - bnx2x_del_all_napi_cnic(bp); + if (CNIC_LOADED(bp)) + bnx2x_del_all_napi_cnic(bp); - bnx2x_free_irq(bp); + bnx2x_free_irq(bp); + bp->nic_stopped = true; + } /* Report UNLOAD_DONE to MCP */ bnx2x_send_unload_done(bp, true); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 0657a0f5170f4..8946a931e87ee 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -529,13 +529,16 @@ void bnx2x_vfpf_close_vf(struct bnx2x *bp) bnx2x_vfpf_finalize(bp, &req->first_tlv); free_irq: - /* Disable HW interrupts, NAPI */ - bnx2x_netif_stop(bp, 0); - /* Delete all NAPI objects */ - bnx2x_del_all_napi(bp); - - /* Release IRQs */ - bnx2x_free_irq(bp); + if (!bp->nic_stopped) { + /* Disable HW interrupts, NAPI */ + bnx2x_netif_stop(bp, 0); + /* Delete all NAPI objects */ + bnx2x_del_all_napi(bp); + + /* Release IRQs */ + bnx2x_free_irq(bp); + bp->nic_stopped = true; + } } static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 0092e46c46f83..cc3afb605b1ec 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -617,7 +617,7 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv) }; phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); - if (!phydev || IS_ERR(phydev)) { + if (IS_ERR(phydev)) { dev_err(kdev, "failed to register fixed PHY device\n"); return -ENODEV; } diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 5ef073a79ce94..cb2810f175ccd 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -6881,7 +6881,10 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) ri->data = NULL; - skb = build_skb(data, frag_size); + if (frag_size) + skb = build_skb(data, frag_size); + else + skb = slab_build_skb(data); if (!skb) { tg3_frag_free(frag_size != 0, data); goto drop_it_no_recycle; diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index f6a0f12a6d520..82929ee76739d 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -5194,6 +5194,9 @@ static int __maybe_unused macb_suspend(struct device *dev) unsigned int q; int err; + if (!device_may_wakeup(&bp->dev->dev)) + phy_exit(bp->sgmii_phy); + if (!netif_running(netdev)) return 0; @@ -5254,7 +5257,6 @@ static int __maybe_unused macb_suspend(struct device *dev) if (!(bp->wol & MACB_WOL_ENABLED)) { rtnl_lock(); phylink_stop(bp->phylink); - phy_exit(bp->sgmii_phy); rtnl_unlock(); spin_lock_irqsave(&bp->lock, flags); macb_reset_hw(bp); @@ -5284,6 +5286,9 @@ static int __maybe_unused macb_resume(struct device *dev) unsigned int q; int err; + if (!device_may_wakeup(&bp->dev->dev)) + phy_init(bp->sgmii_phy); + if (!netif_running(netdev)) return 0; @@ -5344,8 +5349,6 @@ static int __maybe_unused macb_resume(struct device *dev) macb_set_rx_mode(netdev); macb_restore_features(bp); rtnl_lock(); - if (!device_may_wakeup(&bp->dev->dev)) - phy_init(bp->sgmii_phy); phylink_start(bp->phylink); rtnl_unlock(); diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c index c2e7037c7ba1c..7750702900fa6 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c @@ -1466,7 +1466,7 @@ static void make_established(struct sock *sk, u32 snd_isn, unsigned int opt) tp->write_seq = snd_isn; tp->snd_nxt = snd_isn; tp->snd_una = snd_isn; - inet_sk(sk)->inet_id = get_random_u16(); + atomic_set(&inet_sk(sk)->inet_id, get_random_u16()); assign_rxopt(sk, opt); if (tp->rcv_wnd > (RCV_BUFSIZ_M << 10)) diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 113fcb3e353ea..832a2ae019509 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -203,7 +203,7 @@ static inline void ibmveth_flush_buffer(void *addr, unsigned long length) unsigned long offset; for (offset = 0; offset < length; offset += SMP_CACHE_BYTES) - asm("dcbfl %0,%1" :: "b" (addr), "r" (offset)); + asm("dcbf %0,%1,1" :: "b" (addr), "r" (offset)); } /* replenish the buffers for a pool. note that we don't need to diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 29ad1797adcea..a86bfa3bba74a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -2609,7 +2609,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) retval = i40e_correct_mac_vlan_filters (vsi, &tmp_add_list, &tmp_del_list, vlan_filters); - else + else if (pf->vf) retval = i40e_correct_vf_mac_vlan_filters (vsi, &tmp_add_list, &tmp_del_list, vlan_filters, pf->vf[vsi->vf_id].trusted); @@ -2782,7 +2782,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) } /* if the VF is not trusted do not do promisc */ - if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) { + if (vsi->type == I40E_VSI_SRIOV && pf->vf && + !pf->vf[vsi->vf_id].trusted) { clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); goto out; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 9da0c87f03288..f99c1f7fec406 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -210,11 +210,11 @@ static int i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset, * @hw: pointer to the HW structure. * @module_pointer: module pointer location in words from the NVM beginning * @offset: offset in words from module start - * @words: number of words to write - * @data: buffer with words to write to the Shadow RAM + * @words: number of words to read + * @data: buffer with words to read to the Shadow RAM * @last_command: tells the AdminQ that this is the last command * - * Writes a 16 bit words buffer to the Shadow RAM using the admin command. + * Reads a 16 bit words buffer to the Shadow RAM using the admin command. **/ static int i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer, u32 offset, @@ -234,18 +234,18 @@ static int i40e_read_nvm_aq(struct i40e_hw *hw, */ if ((offset + words) > hw->nvm.sr_size) i40e_debug(hw, I40E_DEBUG_NVM, - "NVM write error: offset %d beyond Shadow RAM limit %d\n", + "NVM read error: offset %d beyond Shadow RAM limit %d\n", (offset + words), hw->nvm.sr_size); else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) - /* We can write only up to 4KB (one sector), in one AQ write */ + /* We can read only up to 4KB (one sector), in one AQ write */ i40e_debug(hw, I40E_DEBUG_NVM, - "NVM write fail error: tried to write %d words, limit is %d.\n", + "NVM read fail error: tried to read %d words, limit is %d.\n", words, I40E_SR_SECTOR_SIZE_IN_WORDS); else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) - /* A single write cannot spread over two sectors */ + /* A single read cannot spread over two sectors */ i40e_debug(hw, I40E_DEBUG_NVM, - "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n", + "NVM read error: cannot spread over two sectors in a single read offset=%d words=%d\n", offset, words); else ret_code = i40e_aq_read_nvm(hw, module_pointer, diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index 460ca561819a9..a34303ad057d0 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -1289,6 +1289,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe fltr->ip_mask.src_port = fsp->m_u.tcp_ip4_spec.psrc; fltr->ip_mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst; fltr->ip_mask.tos = fsp->m_u.tcp_ip4_spec.tos; + fltr->ip_ver = 4; break; case AH_V4_FLOW: case ESP_V4_FLOW: @@ -1300,6 +1301,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.ah_ip4_spec.ip4dst; fltr->ip_mask.spi = fsp->m_u.ah_ip4_spec.spi; fltr->ip_mask.tos = fsp->m_u.ah_ip4_spec.tos; + fltr->ip_ver = 4; break; case IPV4_USER_FLOW: fltr->ip_data.v4_addrs.src_ip = fsp->h_u.usr_ip4_spec.ip4src; @@ -1312,6 +1314,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe fltr->ip_mask.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes; fltr->ip_mask.tos = fsp->m_u.usr_ip4_spec.tos; fltr->ip_mask.proto = fsp->m_u.usr_ip4_spec.proto; + fltr->ip_ver = 4; break; case TCP_V6_FLOW: case UDP_V6_FLOW: @@ -1330,6 +1333,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe fltr->ip_mask.src_port = fsp->m_u.tcp_ip6_spec.psrc; fltr->ip_mask.dst_port = fsp->m_u.tcp_ip6_spec.pdst; fltr->ip_mask.tclass = fsp->m_u.tcp_ip6_spec.tclass; + fltr->ip_ver = 6; break; case AH_V6_FLOW: case ESP_V6_FLOW: @@ -1345,6 +1349,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe sizeof(struct in6_addr)); fltr->ip_mask.spi = fsp->m_u.ah_ip6_spec.spi; fltr->ip_mask.tclass = fsp->m_u.ah_ip6_spec.tclass; + fltr->ip_ver = 6; break; case IPV6_USER_FLOW: memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src, @@ -1361,6 +1366,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe fltr->ip_mask.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes; fltr->ip_mask.tclass = fsp->m_u.usr_ip6_spec.tclass; fltr->ip_mask.proto = fsp->m_u.usr_ip6_spec.l4_proto; + fltr->ip_ver = 6; break; case ETHER_FLOW: fltr->eth_data.etype = fsp->h_u.ether_spec.h_proto; @@ -1371,6 +1377,10 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe return -EINVAL; } + err = iavf_validate_fdir_fltr_masks(adapter, fltr); + if (err) + return err; + if (iavf_fdir_is_dup_fltr(adapter, fltr)) return -EEXIST; diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.c b/drivers/net/ethernet/intel/iavf/iavf_fdir.c index 505e82ebafe47..03e774bd2a5b4 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_fdir.c +++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.c @@ -18,6 +18,79 @@ static const struct in6_addr ipv6_addr_full_mask = { } }; +static const struct in6_addr ipv6_addr_zero_mask = { + .in6_u = { + .u6_addr8 = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + } + } +}; + +/** + * iavf_validate_fdir_fltr_masks - validate Flow Director filter fields masks + * @adapter: pointer to the VF adapter structure + * @fltr: Flow Director filter data structure + * + * Returns 0 if all masks of packet fields are either full or empty. Returns + * error on at least one partial mask. + */ +int iavf_validate_fdir_fltr_masks(struct iavf_adapter *adapter, + struct iavf_fdir_fltr *fltr) +{ + if (fltr->eth_mask.etype && fltr->eth_mask.etype != htons(U16_MAX)) + goto partial_mask; + + if (fltr->ip_ver == 4) { + if (fltr->ip_mask.v4_addrs.src_ip && + fltr->ip_mask.v4_addrs.src_ip != htonl(U32_MAX)) + goto partial_mask; + + if (fltr->ip_mask.v4_addrs.dst_ip && + fltr->ip_mask.v4_addrs.dst_ip != htonl(U32_MAX)) + goto partial_mask; + + if (fltr->ip_mask.tos && fltr->ip_mask.tos != U8_MAX) + goto partial_mask; + } else if (fltr->ip_ver == 6) { + if (memcmp(&fltr->ip_mask.v6_addrs.src_ip, &ipv6_addr_zero_mask, + sizeof(struct in6_addr)) && + memcmp(&fltr->ip_mask.v6_addrs.src_ip, &ipv6_addr_full_mask, + sizeof(struct in6_addr))) + goto partial_mask; + + if (memcmp(&fltr->ip_mask.v6_addrs.dst_ip, &ipv6_addr_zero_mask, + sizeof(struct in6_addr)) && + memcmp(&fltr->ip_mask.v6_addrs.dst_ip, &ipv6_addr_full_mask, + sizeof(struct in6_addr))) + goto partial_mask; + + if (fltr->ip_mask.tclass && fltr->ip_mask.tclass != U8_MAX) + goto partial_mask; + } + + if (fltr->ip_mask.proto && fltr->ip_mask.proto != U8_MAX) + goto partial_mask; + + if (fltr->ip_mask.src_port && fltr->ip_mask.src_port != htons(U16_MAX)) + goto partial_mask; + + if (fltr->ip_mask.dst_port && fltr->ip_mask.dst_port != htons(U16_MAX)) + goto partial_mask; + + if (fltr->ip_mask.spi && fltr->ip_mask.spi != htonl(U32_MAX)) + goto partial_mask; + + if (fltr->ip_mask.l4_header && + fltr->ip_mask.l4_header != htonl(U32_MAX)) + goto partial_mask; + + return 0; + +partial_mask: + dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, partial masks are not supported\n"); + return -EOPNOTSUPP; +} + /** * iavf_pkt_udp_no_pay_len - the length of UDP packet without payload * @fltr: Flow Director filter data structure @@ -263,8 +336,6 @@ iavf_fill_fdir_ip4_hdr(struct iavf_fdir_fltr *fltr, VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST); } - fltr->ip_ver = 4; - return 0; } @@ -309,8 +380,6 @@ iavf_fill_fdir_ip6_hdr(struct iavf_fdir_fltr *fltr, VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST); } - fltr->ip_ver = 6; - return 0; } diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.h b/drivers/net/ethernet/intel/iavf/iavf_fdir.h index 33c55c366315b..9eb9f73f6adf3 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_fdir.h +++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.h @@ -110,6 +110,8 @@ struct iavf_fdir_fltr { struct virtchnl_fdir_add vc_add_msg; }; +int iavf_validate_fdir_fltr_masks(struct iavf_adapter *adapter, + struct iavf_fdir_fltr *fltr); int iavf_fill_fdir_add_msg(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr); void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr); bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr); diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index b678bdf96f3a0..074bf9403cd1b 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -435,7 +435,8 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring) /* Receive Packet Data Buffer Size. * The Packet Data Buffer Size is defined in 128 byte units. */ - rlan_ctx.dbuf = ring->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; + rlan_ctx.dbuf = DIV_ROUND_UP(ring->rx_buf_len, + BIT_ULL(ICE_RLAN_CTX_DBUF_S)); /* use 32 byte descriptors */ rlan_ctx.dsize = 1; diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c index ad0a007b73988..8f232c41a89e3 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c @@ -538,6 +538,12 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode, break; case DEVLINK_ESWITCH_MODE_SWITCHDEV: { + if (ice_is_adq_active(pf)) { + dev_err(ice_pf_to_dev(pf), "Couldn't change eswitch mode to switchdev - ADQ is active. Delete ADQ configs and try again, e.g. tc qdisc del dev $PF root"); + NL_SET_ERR_MSG_MOD(extack, "Couldn't change eswitch mode to switchdev - ADQ is active. Delete ADQ configs and try again, e.g. tc qdisc del dev $PF root"); + return -EOPNOTSUPP; + } + dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev", pf->hw.pf_id); NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev"); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index cf92c39467c8e..b40dfe6ae3217 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -8823,6 +8823,11 @@ ice_setup_tc(struct net_device *netdev, enum tc_setup_type type, ice_setup_tc_block_cb, np, np, true); case TC_SETUP_QDISC_MQPRIO: + if (ice_is_eswitch_mode_switchdev(pf)) { + netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n"); + return -EOPNOTSUPP; + } + if (pf->adev) { mutex_lock(&pf->adev_mutex); device_lock(&pf->adev->dev); diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index 1f66914c7a202..31314e7540f8c 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -1131,7 +1131,7 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) if (!vf) return -EINVAL; - ret = ice_check_vf_ready_for_reset(vf); + ret = ice_check_vf_ready_for_cfg(vf); if (ret) goto out_put_vf; @@ -1246,7 +1246,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) goto out_put_vf; } - ret = ice_check_vf_ready_for_reset(vf); + ret = ice_check_vf_ready_for_cfg(vf); if (ret) goto out_put_vf; @@ -1300,7 +1300,7 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) return -EOPNOTSUPP; } - ret = ice_check_vf_ready_for_reset(vf); + ret = ice_check_vf_ready_for_cfg(vf); if (ret) goto out_put_vf; @@ -1613,7 +1613,7 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, if (!vf) return -EINVAL; - ret = ice_check_vf_ready_for_reset(vf); + ret = ice_check_vf_ready_for_cfg(vf); if (ret) goto out_put_vf; diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index b26ce4425f455..ea3310be8354c 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -185,25 +185,6 @@ int ice_check_vf_ready_for_cfg(struct ice_vf *vf) return 0; } -/** - * ice_check_vf_ready_for_reset - check if VF is ready to be reset - * @vf: VF to check if it's ready to be reset - * - * The purpose of this function is to ensure that the VF is not in reset, - * disabled, and is both initialized and active, thus enabling us to safely - * initialize another reset. - */ -int ice_check_vf_ready_for_reset(struct ice_vf *vf) -{ - int ret; - - ret = ice_check_vf_ready_for_cfg(vf); - if (!ret && !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) - ret = -EAGAIN; - - return ret; -} - /** * ice_trigger_vf_reset - Reset a VF on HW * @vf: pointer to the VF structure @@ -631,11 +612,17 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags) return 0; } + if (flags & ICE_VF_RESET_LOCK) + mutex_lock(&vf->cfg_lock); + else + lockdep_assert_held(&vf->cfg_lock); + if (ice_is_vf_disabled(vf)) { vsi = ice_get_vf_vsi(vf); if (!vsi) { dev_dbg(dev, "VF is already removed\n"); - return -EINVAL; + err = -EINVAL; + goto out_unlock; } ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); @@ -644,14 +631,9 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags) dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n", vf->vf_id); - return 0; + goto out_unlock; } - if (flags & ICE_VF_RESET_LOCK) - mutex_lock(&vf->cfg_lock); - else - lockdep_assert_held(&vf->cfg_lock); - /* Set VF disable bit state here, before triggering reset */ set_bit(ICE_VF_STATE_DIS, vf->vf_states); ice_trigger_vf_reset(vf, flags & ICE_VF_RESET_VFLR, false); diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h index 67172fdd9bc27..48fea6fa03621 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h @@ -215,7 +215,6 @@ u16 ice_get_num_vfs(struct ice_pf *pf); struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf); bool ice_is_vf_disabled(struct ice_vf *vf); int ice_check_vf_ready_for_cfg(struct ice_vf *vf); -int ice_check_vf_ready_for_reset(struct ice_vf *vf); void ice_set_vf_state_dis(struct ice_vf *vf); bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf); void diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index efbc2968a7bf6..dcf628b1fccd9 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -3947,7 +3947,6 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event, ice_vc_notify_vf_link_state(vf); break; case VIRTCHNL_OP_RESET_VF: - clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states); ops->reset_vf(vf); break; case VIRTCHNL_OP_ADD_ETH_ADDR: diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 405886ee52615..319c544b9f04c 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -1385,18 +1385,6 @@ void igb_ptp_init(struct igb_adapter *adapter) return; } - spin_lock_init(&adapter->tmreg_lock); - INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work); - - if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK) - INIT_DELAYED_WORK(&adapter->ptp_overflow_work, - igb_ptp_overflow_check); - - adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; - adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; - - igb_ptp_reset(adapter); - adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, &adapter->pdev->dev); if (IS_ERR(adapter->ptp_clock)) { @@ -1406,6 +1394,18 @@ void igb_ptp_init(struct igb_adapter *adapter) dev_info(&adapter->pdev->dev, "added PHC on %s\n", adapter->netdev->name); adapter->ptp_flags |= IGB_PTP_ENABLED; + + spin_lock_init(&adapter->tmreg_lock); + INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work); + + if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK) + INIT_DELAYED_WORK(&adapter->ptp_overflow_work, + igb_ptp_overflow_check); + + adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + + igb_ptp_reset(adapter); } } diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index 44a5070299465..2f780cc90883c 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -546,7 +546,7 @@ #define IGC_PTM_CTRL_START_NOW BIT(29) /* Start PTM Now */ #define IGC_PTM_CTRL_EN BIT(30) /* Enable PTM */ #define IGC_PTM_CTRL_TRIG BIT(31) /* PTM Cycle trigger */ -#define IGC_PTM_CTRL_SHRT_CYC(usec) (((usec) & 0x2f) << 2) +#define IGC_PTM_CTRL_SHRT_CYC(usec) (((usec) & 0x3f) << 2) #define IGC_PTM_CTRL_PTM_TO(usec) (((usec) & 0xff) << 8) #define IGC_PTM_SHORT_CYC_DEFAULT 10 /* Default Short/interrupted cycle interval */ diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c index 1cc6af2feb38a..565320ec24f81 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c @@ -55,7 +55,7 @@ static int octep_send_mbox_req(struct octep_device *oct, list_add_tail(&d->list, &oct->ctrl_req_wait_list); ret = wait_event_interruptible_timeout(oct->ctrl_req_wait_q, (d->done != 0), - jiffies + msecs_to_jiffies(500)); + msecs_to_jiffies(500)); list_del(&d->list); if (ret == 0 || ret == 1) return -EAGAIN; diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c index 43eb6e8713511..4424de2ffd70c 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c @@ -1038,6 +1038,10 @@ static void octep_device_cleanup(struct octep_device *oct) { int i; + oct->poll_non_ioq_intr = false; + cancel_delayed_work_sync(&oct->intr_poll_task); + cancel_work_sync(&oct->ctrl_mbox_task); + dev_info(&oct->pdev->dev, "Cleaning up Octeon Device ...\n"); for (i = 0; i < OCTEP_MAX_VF; i++) { @@ -1200,14 +1204,11 @@ static void octep_remove(struct pci_dev *pdev) if (!oct) return; - cancel_work_sync(&oct->tx_timeout_task); - cancel_work_sync(&oct->ctrl_mbox_task); netdev = oct->netdev; if (netdev->reg_state == NETREG_REGISTERED) unregister_netdev(netdev); - oct->poll_non_ioq_intr = false; - cancel_delayed_work_sync(&oct->intr_poll_task); + cancel_work_sync(&oct->tx_timeout_task); octep_device_cleanup(oct); pci_release_mem_regions(pdev); free_netdev(netdev); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 04b0e885f9d2e..c2f68678e947e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -4270,9 +4270,10 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, if (link < 0) return NIX_AF_ERR_RX_LINK_INVALID; - nix_find_link_frs(rvu, req, pcifunc); linkcfg: + nix_find_link_frs(rvu, req, pcifunc); + cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link)); cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16); if (req->update_minlen) diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c index 985cff910f30c..3b651efcc25e1 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed.c +++ b/drivers/net/ethernet/mediatek/mtk_wed.c @@ -221,9 +221,13 @@ void mtk_wed_fe_reset(void) for (i = 0; i < ARRAY_SIZE(hw_list); i++) { struct mtk_wed_hw *hw = hw_list[i]; - struct mtk_wed_device *dev = hw->wed_dev; + struct mtk_wed_device *dev; int err; + if (!hw) + break; + + dev = hw->wed_dev; if (!dev || !dev->wlan.reset) continue; @@ -244,8 +248,12 @@ void mtk_wed_fe_reset_complete(void) for (i = 0; i < ARRAY_SIZE(hw_list); i++) { struct mtk_wed_hw *hw = hw_list[i]; - struct mtk_wed_device *dev = hw->wed_dev; + struct mtk_wed_device *dev; + + if (!hw) + break; + dev = hw->wed_dev; if (!dev || !dev->wlan.reset_complete) continue; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h index 9e8e6184f9e43..ecfe93a479da8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h @@ -84,6 +84,8 @@ enum mlx5e_xdp_xmit_mode { * MLX5E_XDP_XMIT_MODE_XSK: * none. */ +#define MLX5E_XDP_FIFO_ENTRIES2DS_MAX_RATIO 4 + union mlx5e_xdp_info { enum mlx5e_xdp_xmit_mode mode; union { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index c27df14df1457..f7b494125eee8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1298,11 +1298,13 @@ static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa) { struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo; int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); - int entries = wq_sz * MLX5_SEND_WQEBB_NUM_DS * 2; /* upper bound for maximum num of - * entries of all xmit_modes. - */ + int entries; size_t size; + /* upper bound for maximum num of entries of all xmit_modes. */ + entries = roundup_pow_of_two(wq_sz * MLX5_SEND_WQEBB_NUM_DS * + MLX5E_XDP_FIFO_ENTRIES2DS_MAX_RATIO); + size = array_size(sizeof(*xdpi_fifo->xi), entries); xdpi_fifo->xi = kvzalloc_node(size, GFP_KERNEL, numa); if (!xdpi_fifo->xi) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index aab7059bf6e97..244cfd4709035 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -245,12 +245,20 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns, mlx5_lag_is_shared_fdb(dev) && mlx5_lag_is_master(dev)) { struct mlx5_core_dev *peer_dev; - int i; + int i, j; mlx5_lag_for_each_peer_mdev(dev, peer_dev, i) { err = mlx5_cmd_set_slave_root_fdb(dev, peer_dev, !disconnect, (!disconnect) ? ft->id : 0); if (err && !disconnect) { + mlx5_lag_for_each_peer_mdev(dev, peer_dev, j) { + if (j < i) + mlx5_cmd_set_slave_root_fdb(dev, peer_dev, 1, + ns->root_ft->id); + else + break; + } + MLX5_SET(set_flow_table_root_in, in, op_mod, 0); MLX5_SET(set_flow_table_root_in, in, table_id, ns->root_ft->id); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c index f0b2963ebac32..973de2adc9430 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c @@ -32,8 +32,8 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = { MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8), MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2), MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6), - MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_MSB, 0x18, 17, 3), - MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_LSB, 0x18, 20, 8), + MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_MSB, 0x18, 17, 4), + MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_LSB, 0x18, 21, 8), MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_96_127, 0x20, 4), MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_64_95, 0x24, 4), MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_32_63, 0x28, 4), diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index c968309657dd1..51eea1f0529c8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -517,11 +517,15 @@ static void mlxsw_pci_skb_cb_ts_set(struct mlxsw_pci *mlxsw_pci, struct sk_buff *skb, enum mlxsw_pci_cqe_v cqe_v, char *cqe) { + u8 ts_type; + if (cqe_v != MLXSW_PCI_CQE_V2) return; - if (mlxsw_pci_cqe2_time_stamp_type_get(cqe) != - MLXSW_PCI_CQE_TIME_STAMP_TYPE_UTC) + ts_type = mlxsw_pci_cqe2_time_stamp_type_get(cqe); + + if (ts_type != MLXSW_PCI_CQE_TIME_STAMP_TYPE_UTC && + ts_type != MLXSW_PCI_CQE_TIME_STAMP_TYPE_MIRROR_UTC) return; mlxsw_skb_cb(skb)->cqe_ts.sec = mlxsw_pci_cqe2_time_stamp_sec_get(cqe); diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 8165bf31a99ae..17160e867befb 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -97,14 +97,6 @@ MLXSW_ITEM32(reg, sspr, m, 0x00, 31, 1); */ MLXSW_ITEM32_LP(reg, sspr, 0x00, 16, 0x00, 12); -/* reg_sspr_sub_port - * Virtual port within the physical port. - * Should be set to 0 when virtual ports are not enabled on the port. - * - * Access: RW - */ -MLXSW_ITEM32(reg, sspr, sub_port, 0x00, 8, 8); - /* reg_sspr_system_port * Unique identifier within the stacking domain that represents all the ports * that are available in the system (external ports). @@ -120,7 +112,6 @@ static inline void mlxsw_reg_sspr_pack(char *payload, u16 local_port) MLXSW_REG_ZERO(sspr, payload); mlxsw_reg_sspr_m_set(payload, 1); mlxsw_reg_sspr_local_port_set(payload, local_port); - mlxsw_reg_sspr_sub_port_set(payload, 0); mlxsw_reg_sspr_system_port_set(payload, local_port); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c index e4f4cded2b6f9..b1178b7a7f51a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c @@ -193,7 +193,7 @@ mlxsw_sp2_mr_tcam_rule_parse(struct mlxsw_sp_acl_rule *rule, key->vrid, GENMASK(7, 0)); mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB, - key->vrid >> 8, GENMASK(2, 0)); + key->vrid >> 8, GENMASK(3, 0)); switch (key->proto) { case MLXSW_SP_L3_PROTO_IPV4: return mlxsw_sp2_mr_tcam_rule_parse4(rulei, key); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c index 4dea39f2b304b..ae2d6f12b7992 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c @@ -171,7 +171,7 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_2[] = { static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_4[] = { MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_LSB, 0x04, 24, 8), - MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_MSB, 0x00, 0, 3), + MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER_MSB, 0x00, 0, 3, 0, true), }; static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_0[] = { @@ -321,7 +321,7 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5b[] = { static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_4b[] = { MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_LSB, 0x04, 13, 8), - MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER_MSB, 0x04, 21, 4, 0, true), + MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_MSB, 0x04, 21, 4), }; static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2b[] = { diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 4b004a7281903..99df00c30b8c6 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -176,6 +176,15 @@ static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param) } #endif +static int __maybe_unused qede_suspend(struct device *dev) +{ + dev_info(dev, "Device does not support suspend operation\n"); + + return -EOPNOTSUPP; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(qede_pm_ops, qede_suspend, NULL); + static const struct pci_error_handlers qede_err_handler = { .error_detected = qede_io_error_detected, }; @@ -190,6 +199,7 @@ static struct pci_driver qede_pci_driver = { .sriov_configure = qede_sriov_configure, #endif .err_handler = &qede_err_handler, + .driver.pm = &qede_pm_ops, }; static struct qed_eth_cb_ops qede_ll_ops = { diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index 7adde9639c8ab..35d8e9811998d 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -1194,7 +1194,7 @@ int ef100_probe_netdev_pf(struct efx_nic *efx) net_dev->features |= NETIF_F_HW_TC; efx->fixed_features |= NETIF_F_HW_TC; } - return rc; + return 0; } int ef100_probe_vf(struct efx_nic *efx) diff --git a/drivers/net/ethernet/sfc/falcon/selftest.c b/drivers/net/ethernet/sfc/falcon/selftest.c index cf1d67b6d86db..c3dc88e6c26cf 100644 --- a/drivers/net/ethernet/sfc/falcon/selftest.c +++ b/drivers/net/ethernet/sfc/falcon/selftest.c @@ -428,7 +428,7 @@ static int ef4_begin_loopback(struct ef4_tx_queue *tx_queue) for (i = 0; i < state->packet_count; i++) { /* Allocate an skb, holding an extra reference for * transmit completion counting */ - skb = alloc_skb(EF4_LOOPBACK_PAYLOAD_LEN, GFP_KERNEL); + skb = alloc_skb(sizeof(state->payload), GFP_KERNEL); if (!skb) return -ENOMEM; state->skbs[i] = skb; diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c index 19a0b8584afbb..563c1e317ce99 100644 --- a/drivers/net/ethernet/sfc/selftest.c +++ b/drivers/net/ethernet/sfc/selftest.c @@ -426,7 +426,7 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue) for (i = 0; i < state->packet_count; i++) { /* Allocate an skb, holding an extra reference for * transmit completion counting */ - skb = alloc_skb(EFX_LOOPBACK_PAYLOAD_LEN, GFP_KERNEL); + skb = alloc_skb(sizeof(state->payload), GFP_KERNEL); if (!skb) return -ENOMEM; state->skbs[i] = skb; diff --git a/drivers/net/ethernet/sfc/siena/selftest.c b/drivers/net/ethernet/sfc/siena/selftest.c index b55fd33469727..526da43d4b617 100644 --- a/drivers/net/ethernet/sfc/siena/selftest.c +++ b/drivers/net/ethernet/sfc/siena/selftest.c @@ -426,7 +426,7 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue) for (i = 0; i < state->packet_count; i++) { /* Allocate an skb, holding an extra reference for * transmit completion counting */ - skb = alloc_skb(EFX_LOOPBACK_PAYLOAD_LEN, GFP_KERNEL); + skb = alloc_skb(sizeof(state->payload), GFP_KERNEL); if (!skb) return -ENOMEM; state->skbs[i] = skb; diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c index 15ebd39739224..fe268b6c1cacf 100644 --- a/drivers/net/ethernet/sfc/tc.c +++ b/drivers/net/ethernet/sfc/tc.c @@ -1657,10 +1657,10 @@ int efx_init_tc(struct efx_nic *efx) rc = efx_tc_configure_fallback_acts_reps(efx); if (rc) return rc; - efx->tc->up = true; rc = flow_indr_dev_register(efx_tc_indr_setup_cb, efx); if (rc) return rc; + efx->tc->up = true; return 0; } diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index b15dd9a3ad540..1b55928e89b8a 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -748,7 +748,8 @@ static int ipvlan_device_event(struct notifier_block *unused, write_pnet(&port->pnet, newnet); - ipvlan_migrate_l3s_hook(oldnet, newnet); + if (port->mode == IPVLAN_MODE_L3S) + ipvlan_migrate_l3s_hook(oldnet, newnet); break; } case NETDEV_UNREGISTER: diff --git a/drivers/net/mdio/mdio-bitbang.c b/drivers/net/mdio/mdio-bitbang.c index b83932562be21..81b7748c10ce0 100644 --- a/drivers/net/mdio/mdio-bitbang.c +++ b/drivers/net/mdio/mdio-bitbang.c @@ -186,7 +186,7 @@ int mdiobb_read_c45(struct mii_bus *bus, int phy, int devad, int reg) struct mdiobb_ctrl *ctrl = bus->priv; mdiobb_cmd_addr(ctrl, phy, devad, reg); - mdiobb_cmd(ctrl, MDIO_C45_READ, phy, reg); + mdiobb_cmd(ctrl, MDIO_C45_READ, phy, devad); return mdiobb_read_common(bus, phy); } @@ -222,7 +222,7 @@ int mdiobb_write_c45(struct mii_bus *bus, int phy, int devad, int reg, u16 val) struct mdiobb_ctrl *ctrl = bus->priv; mdiobb_cmd_addr(ctrl, phy, devad, reg); - mdiobb_cmd(ctrl, MDIO_C45_WRITE, phy, reg); + mdiobb_cmd(ctrl, MDIO_C45_WRITE, phy, devad); return mdiobb_write_common(bus, val); } diff --git a/drivers/net/pcs/pcs-rzn1-miic.c b/drivers/net/pcs/pcs-rzn1-miic.c index 323bec5e57f83..3560991690038 100644 --- a/drivers/net/pcs/pcs-rzn1-miic.c +++ b/drivers/net/pcs/pcs-rzn1-miic.c @@ -313,15 +313,21 @@ struct phylink_pcs *miic_create(struct device *dev, struct device_node *np) pdev = of_find_device_by_node(pcs_np); of_node_put(pcs_np); - if (!pdev || !platform_get_drvdata(pdev)) + if (!pdev || !platform_get_drvdata(pdev)) { + if (pdev) + put_device(&pdev->dev); return ERR_PTR(-EPROBE_DEFER); + } miic_port = kzalloc(sizeof(*miic_port), GFP_KERNEL); - if (!miic_port) + if (!miic_port) { + put_device(&pdev->dev); return ERR_PTR(-ENOMEM); + } miic = platform_get_drvdata(pdev); device_link_add(dev, miic->dev, DL_FLAG_AUTOREMOVE_CONSUMER); + put_device(&pdev->dev); miic_port->miic = miic; miic_port->port = port - 1; diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 59cae0d808aa2..04b2e6eeb1952 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -542,6 +542,17 @@ static int bcm54xx_resume(struct phy_device *phydev) return bcm54xx_config_init(phydev); } +static int bcm54810_read_mmd(struct phy_device *phydev, int devnum, u16 regnum) +{ + return -EOPNOTSUPP; +} + +static int bcm54810_write_mmd(struct phy_device *phydev, int devnum, u16 regnum, + u16 val) +{ + return -EOPNOTSUPP; +} + static int bcm54811_config_init(struct phy_device *phydev) { int err, reg; @@ -1103,6 +1114,8 @@ static struct phy_driver broadcom_drivers[] = { .get_strings = bcm_phy_get_strings, .get_stats = bcm54xx_get_stats, .probe = bcm54xx_phy_probe, + .read_mmd = bcm54810_read_mmd, + .write_mmd = bcm54810_write_mmd, .config_init = bcm54xx_config_init, .config_aneg = bcm5481_config_aneg, .config_intr = bcm_phy_config_intr, diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index bdf00b2b2c1dc..a9ecfdd19624f 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -1184,9 +1184,11 @@ void phy_stop_machine(struct phy_device *phydev) static void phy_process_error(struct phy_device *phydev) { - mutex_lock(&phydev->lock); + /* phydev->lock must be held for the state change to be safe */ + if (!mutex_is_locked(&phydev->lock)) + phydev_err(phydev, "PHY-device data unsafe context\n"); + phydev->state = PHY_ERROR; - mutex_unlock(&phydev->lock); phy_trigger_machine(phydev); } @@ -1195,7 +1197,9 @@ static void phy_error_precise(struct phy_device *phydev, const void *func, int err) { WARN(1, "%pS: returned: %d\n", func, err); + mutex_lock(&phydev->lock); phy_process_error(phydev); + mutex_unlock(&phydev->lock); } /** @@ -1204,8 +1208,7 @@ static void phy_error_precise(struct phy_device *phydev, * * Moves the PHY to the ERROR state in response to a read * or write error, and tells the controller the link is down. - * Must not be called from interrupt context, or while the - * phydev->lock is held. + * Must be called with phydev->lock held. */ void phy_error(struct phy_device *phydev) { diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 61921d4dbb13a..c7cf61fe41cf7 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -3216,6 +3216,8 @@ static int phy_probe(struct device *dev) goto out; } + phy_disable_interrupts(phydev); + /* Start out supporting everything. Eventually, * a controller will attach, and may modify one * or both of these values @@ -3333,16 +3335,6 @@ static int phy_remove(struct device *dev) return 0; } -static void phy_shutdown(struct device *dev) -{ - struct phy_device *phydev = to_phy_device(dev); - - if (phydev->state == PHY_READY || !phydev->attached_dev) - return; - - phy_disable_interrupts(phydev); -} - /** * phy_driver_register - register a phy_driver with the PHY layer * @new_driver: new phy_driver to register @@ -3376,7 +3368,6 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner) new_driver->mdiodrv.driver.bus = &mdio_bus_type; new_driver->mdiodrv.driver.probe = phy_probe; new_driver->mdiodrv.driver.remove = phy_remove; - new_driver->mdiodrv.driver.shutdown = phy_shutdown; new_driver->mdiodrv.driver.owner = owner; new_driver->mdiodrv.driver.probe_type = PROBE_FORCE_SYNCHRONOUS; diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index e8dd47bffe435..208a9393c2dfd 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -258,6 +258,16 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, switch (id->base.extended_cc) { case SFF8024_ECC_UNSPEC: break; + case SFF8024_ECC_100G_25GAUI_C2M_AOC: + if (br_min <= 28000 && br_max >= 25000) { + /* 25GBASE-R, possibly with FEC */ + __set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces); + /* There is currently no link mode for 25000base + * with unspecified range, reuse SR. + */ + phylink_set(modes, 25000baseSR_Full); + } + break; case SFF8024_ECC_100GBASE_SR4_25GBASE_SR: phylink_set(modes, 100000baseSR4_Full); phylink_set(modes, 25000baseSR_Full); diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index d3dc22509ea58..382756c3fb837 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -2200,7 +2200,9 @@ static void team_setup(struct net_device *dev) dev->hw_features = TEAM_VLAN_FEATURES | NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER; + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_STAG_FILTER; dev->hw_features |= NETIF_F_GSO_ENCAP_ALL; dev->features |= dev->hw_features; diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 614f3e3efab09..ef8eacb596f73 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -1081,8 +1081,9 @@ static int __veth_napi_enable_range(struct net_device *dev, int start, int end) err_xdp_ring: for (i--; i >= start; i--) ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free); + i = end; err_page_pool: - for (i = start; i < end; i++) { + for (i--; i >= start; i--) { page_pool_destroy(priv->rq[i].page_pool); priv->rq[i].page_pool = NULL; } @@ -1860,10 +1861,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, nla_peer = data[VETH_INFO_PEER]; ifmp = nla_data(nla_peer); - err = rtnl_nla_parse_ifla(peer_tb, - nla_data(nla_peer) + sizeof(struct ifinfomsg), - nla_len(nla_peer) - sizeof(struct ifinfomsg), - NULL); + err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack); if (err < 0) return err; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 1270c8d23463f..8e9f4cfe941f6 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -2761,7 +2761,7 @@ static void virtnet_init_default_rss(struct virtnet_info *vi) vi->ctrl->rss.indirection_table[i] = indir_val; } - vi->ctrl->rss.max_tx_vq = vi->curr_queue_pairs; + vi->ctrl->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0; vi->ctrl->rss.hash_key_length = vi->rss_key_size; netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size); @@ -4219,8 +4219,6 @@ static int virtnet_probe(struct virtio_device *vdev) if (vi->has_rss || vi->has_rss_hash_report) virtnet_init_default_rss(vi); - _virtnet_set_queues(vi, vi->curr_queue_pairs); - /* serialize netdev register + virtio_device_ready() with ndo_open() */ rtnl_lock(); @@ -4233,6 +4231,8 @@ static int virtnet_probe(struct virtio_device *vdev) virtio_device_ready(vdev); + _virtnet_set_queues(vi, vi->curr_queue_pairs); + /* a random MAC address has been assigned, notify the device. * We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there * because many devices work fine without getting MAC explicitly diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig index b20409f8c13ab..20971304fdef4 100644 --- a/drivers/net/wireless/intel/iwlwifi/Kconfig +++ b/drivers/net/wireless/intel/iwlwifi/Kconfig @@ -66,6 +66,7 @@ config IWLMVM tristate "Intel Wireless WiFi MVM Firmware support" select WANT_DEV_COREDUMP depends on MAC80211 + depends on PTP_1588_CLOCK_OPTIONAL help This is the driver that supports the MVM firmware. The list of the devices that use this firmware is available here: diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c index e311d406b1705..4999636eaa926 100644 --- a/drivers/of/dynamic.c +++ b/drivers/of/dynamic.c @@ -63,15 +63,14 @@ int of_reconfig_notifier_unregister(struct notifier_block *nb) } EXPORT_SYMBOL_GPL(of_reconfig_notifier_unregister); -#ifdef DEBUG -const char *action_names[] = { +static const char *action_names[] = { + [0] = "INVALID", [OF_RECONFIG_ATTACH_NODE] = "ATTACH_NODE", [OF_RECONFIG_DETACH_NODE] = "DETACH_NODE", [OF_RECONFIG_ADD_PROPERTY] = "ADD_PROPERTY", [OF_RECONFIG_REMOVE_PROPERTY] = "REMOVE_PROPERTY", [OF_RECONFIG_UPDATE_PROPERTY] = "UPDATE_PROPERTY", }; -#endif int of_reconfig_notify(unsigned long action, struct of_reconfig_data *p) { @@ -620,21 +619,9 @@ static int __of_changeset_entry_apply(struct of_changeset_entry *ce) } ret = __of_add_property(ce->np, ce->prop); - if (ret) { - pr_err("changeset: add_property failed @%pOF/%s\n", - ce->np, - ce->prop->name); - break; - } break; case OF_RECONFIG_REMOVE_PROPERTY: ret = __of_remove_property(ce->np, ce->prop); - if (ret) { - pr_err("changeset: remove_property failed @%pOF/%s\n", - ce->np, - ce->prop->name); - break; - } break; case OF_RECONFIG_UPDATE_PROPERTY: @@ -648,20 +635,17 @@ static int __of_changeset_entry_apply(struct of_changeset_entry *ce) } ret = __of_update_property(ce->np, ce->prop, &old_prop); - if (ret) { - pr_err("changeset: update_property failed @%pOF/%s\n", - ce->np, - ce->prop->name); - break; - } break; default: ret = -EINVAL; } raw_spin_unlock_irqrestore(&devtree_lock, flags); - if (ret) + if (ret) { + pr_err("changeset: apply failed: %-15s %pOF:%s\n", + action_names[ce->action], ce->np, ce->prop->name); return ret; + } switch (ce->action) { case OF_RECONFIG_ATTACH_NODE: @@ -947,6 +931,9 @@ int of_changeset_action(struct of_changeset *ocs, unsigned long action, if (!ce) return -ENOMEM; + if (WARN_ON(action >= ARRAY_SIZE(action_names))) + return -EINVAL; + /* get a reference to the node */ ce->action = action; ce->np = of_node_get(np); diff --git a/drivers/of/kexec.c b/drivers/of/kexec.c index f26d2ba8a3715..68278340cecfe 100644 --- a/drivers/of/kexec.c +++ b/drivers/of/kexec.c @@ -184,7 +184,8 @@ int __init ima_free_kexec_buffer(void) if (ret) return ret; - return memblock_phys_free(addr, size); + memblock_free_late(addr, size); + return 0; } #endif diff --git a/drivers/of/platform.c b/drivers/of/platform.c index 0c3475e7d2ffb..6a557eb866d0a 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -141,7 +141,7 @@ struct platform_device *of_device_alloc(struct device_node *np, } /* setup generic device info */ - device_set_node(&dev->dev, of_fwnode_handle(np)); + device_set_node(&dev->dev, of_fwnode_handle(of_node_get(np))); dev->dev.parent = parent ? : &platform_bus; if (bus_id) @@ -239,7 +239,7 @@ static struct amba_device *of_amba_device_create(struct device_node *node, dev->dev.dma_mask = &dev->dev.coherent_dma_mask; /* setup generic device info */ - device_set_node(&dev->dev, of_fwnode_handle(node)); + device_set_node(&dev->dev, of_fwnode_handle(of_node_get(node))); dev->dev.parent = parent ? : &platform_bus; dev->dev.platform_data = platform_data; if (bus_id) diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index a406a12eb2080..b545fcb22536d 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -664,12 +664,12 @@ static void __init of_unittest_parse_phandle_with_args_map(void) memset(&args, 0, sizeof(args)); EXPECT_BEGIN(KERN_INFO, - "OF: /testcase-data/phandle-tests/consumer-b: could not find phandle"); + "OF: /testcase-data/phandle-tests/consumer-b: could not find phandle 12345678"); rc = of_parse_phandle_with_args_map(np, "phandle-list-bad-phandle", "phandle", 0, &args); EXPECT_END(KERN_INFO, - "OF: /testcase-data/phandle-tests/consumer-b: could not find phandle"); + "OF: /testcase-data/phandle-tests/consumer-b: could not find phandle 12345678"); unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index 4a8c1b57a90d6..4dff656af3ad2 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -862,6 +862,33 @@ static const struct pinconf_ops amd_pinconf_ops = { .pin_config_group_set = amd_pinconf_group_set, }; +static void amd_gpio_irq_init(struct amd_gpio *gpio_dev) +{ + struct pinctrl_desc *desc = gpio_dev->pctrl->desc; + unsigned long flags; + u32 pin_reg, mask; + int i; + + mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3) | + BIT(WAKE_CNTRL_OFF_S4); + + for (i = 0; i < desc->npins; i++) { + int pin = desc->pins[i].number; + const struct pin_desc *pd = pin_desc_get(gpio_dev->pctrl, pin); + + if (!pd) + continue; + + raw_spin_lock_irqsave(&gpio_dev->lock, flags); + + pin_reg = readl(gpio_dev->base + pin * 4); + pin_reg &= ~mask; + writel(pin_reg, gpio_dev->base + pin * 4); + + raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); + } +} + #ifdef CONFIG_PM_SLEEP static bool amd_gpio_should_save(struct amd_gpio *gpio_dev, unsigned int pin) { @@ -1099,6 +1126,9 @@ static int amd_gpio_probe(struct platform_device *pdev) return PTR_ERR(gpio_dev->pctrl); } + /* Disable and mask interrupts */ + amd_gpio_irq_init(gpio_dev); + girq = &gpio_dev->gc.irq; gpio_irq_chip_set_chip(girq, &amd_gpio_irqchip); /* This will let us handle the parent IRQ in the driver */ diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index 2585ef2b27935..115b83e2d8e65 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -1038,6 +1038,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct msm_pinctrl *pctrl = gpiochip_get_data(gc); const struct msm_pingroup *g; + u32 intr_target_mask = GENMASK(2, 0); unsigned long flags; bool was_enabled; u32 val; @@ -1074,13 +1075,15 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) * With intr_target_use_scm interrupts are routed to * application cpu using scm calls. */ + if (g->intr_target_width) + intr_target_mask = GENMASK(g->intr_target_width - 1, 0); + if (pctrl->intr_target_use_scm) { u32 addr = pctrl->phys_base[0] + g->intr_target_reg; int ret; qcom_scm_io_readl(addr, &val); - - val &= ~(7 << g->intr_target_bit); + val &= ~(intr_target_mask << g->intr_target_bit); val |= g->intr_target_kpss_val << g->intr_target_bit; ret = qcom_scm_io_writel(addr, val); @@ -1090,7 +1093,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) d->hwirq); } else { val = msm_readl_intr_target(pctrl, g); - val &= ~(7 << g->intr_target_bit); + val &= ~(intr_target_mask << g->intr_target_bit); val |= g->intr_target_kpss_val << g->intr_target_bit; msm_writel_intr_target(val, pctrl, g); } diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h index 5e4410bed8237..1d2f2e904da19 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.h +++ b/drivers/pinctrl/qcom/pinctrl-msm.h @@ -59,6 +59,7 @@ struct pinctrl_pin_desc; * @intr_status_bit: Offset in @intr_status_reg for reading and acking the interrupt * status. * @intr_target_bit: Offset in @intr_target_reg for configuring the interrupt routing. + * @intr_target_width: Number of bits used for specifying interrupt routing target. * @intr_target_kpss_val: Value in @intr_target_bit for specifying that the interrupt from * this gpio should get routed to the KPSS processor. * @intr_raw_status_bit: Offset in @intr_cfg_reg for the raw status bit. @@ -100,6 +101,7 @@ struct msm_pingroup { unsigned intr_ack_high:1; unsigned intr_target_bit:5; + unsigned intr_target_width:5; unsigned intr_target_kpss_val:5; unsigned intr_raw_status_bit:5; unsigned intr_polarity_bit:5; diff --git a/drivers/pinctrl/qcom/pinctrl-sa8775p.c b/drivers/pinctrl/qcom/pinctrl-sa8775p.c index 8a5cd15512b97..8fdea25d8d67e 100644 --- a/drivers/pinctrl/qcom/pinctrl-sa8775p.c +++ b/drivers/pinctrl/qcom/pinctrl-sa8775p.c @@ -46,6 +46,7 @@ .intr_enable_bit = 0, \ .intr_status_bit = 0, \ .intr_target_bit = 5, \ + .intr_target_width = 4, \ .intr_target_kpss_val = 3, \ .intr_raw_status_bit = 4, \ .intr_polarity_bit = 1, \ diff --git a/drivers/pinctrl/renesas/pinctrl-rza2.c b/drivers/pinctrl/renesas/pinctrl-rza2.c index 40b1326a10776..5591ddf16fdfd 100644 --- a/drivers/pinctrl/renesas/pinctrl-rza2.c +++ b/drivers/pinctrl/renesas/pinctrl-rza2.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -46,6 +47,7 @@ struct rza2_pinctrl_priv { struct pinctrl_dev *pctl; struct pinctrl_gpio_range gpio_range; int npins; + struct mutex mutex; /* serialize adding groups and functions */ }; #define RZA2_PDR(port) (0x0000 + (port) * 2) /* Direction 16-bit */ @@ -358,10 +360,14 @@ static int rza2_dt_node_to_map(struct pinctrl_dev *pctldev, psel_val[i] = MUX_FUNC(value); } + mutex_lock(&priv->mutex); + /* Register a single pin group listing all the pins we read from DT */ gsel = pinctrl_generic_add_group(pctldev, np->name, pins, npins, NULL); - if (gsel < 0) - return gsel; + if (gsel < 0) { + ret = gsel; + goto unlock; + } /* * Register a single group function where the 'data' is an array PSEL @@ -390,6 +396,8 @@ static int rza2_dt_node_to_map(struct pinctrl_dev *pctldev, (*map)->data.mux.function = np->name; *num_maps = 1; + mutex_unlock(&priv->mutex); + return 0; remove_function: @@ -398,6 +406,9 @@ static int rza2_dt_node_to_map(struct pinctrl_dev *pctldev, remove_group: pinctrl_generic_remove_group(pctldev, gsel); +unlock: + mutex_unlock(&priv->mutex); + dev_err(priv->dev, "Unable to parse DT node %s\n", np->name); return ret; @@ -473,6 +484,8 @@ static int rza2_pinctrl_probe(struct platform_device *pdev) if (IS_ERR(priv->base)) return PTR_ERR(priv->base); + mutex_init(&priv->mutex); + platform_set_drvdata(pdev, priv); priv->npins = (int)(uintptr_t)of_device_get_match_data(&pdev->dev) * diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c index b53d26167da52..6e8a76556e238 100644 --- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c +++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -149,10 +150,11 @@ struct rzg2l_pinctrl { struct gpio_chip gpio_chip; struct pinctrl_gpio_range gpio_range; DECLARE_BITMAP(tint_slot, RZG2L_TINT_MAX_INTERRUPT); - spinlock_t bitmap_lock; + spinlock_t bitmap_lock; /* protect tint_slot bitmap */ unsigned int hwirq[RZG2L_TINT_MAX_INTERRUPT]; - spinlock_t lock; + spinlock_t lock; /* lock read/write registers */ + struct mutex mutex; /* serialize adding groups and functions */ }; static const unsigned int iolh_groupa_mA[] = { 2, 4, 8, 12 }; @@ -362,11 +364,13 @@ static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev, name = np->name; } + mutex_lock(&pctrl->mutex); + /* Register a single pin group listing all the pins we read from DT */ gsel = pinctrl_generic_add_group(pctldev, name, pins, num_pinmux, NULL); if (gsel < 0) { ret = gsel; - goto done; + goto unlock; } /* @@ -380,6 +384,8 @@ static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev, goto remove_group; } + mutex_unlock(&pctrl->mutex); + maps[idx].type = PIN_MAP_TYPE_MUX_GROUP; maps[idx].data.mux.group = name; maps[idx].data.mux.function = name; @@ -391,6 +397,8 @@ static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev, remove_group: pinctrl_generic_remove_group(pctldev, gsel); +unlock: + mutex_unlock(&pctrl->mutex); done: *index = idx; kfree(configs); @@ -1509,6 +1517,7 @@ static int rzg2l_pinctrl_probe(struct platform_device *pdev) spin_lock_init(&pctrl->lock); spin_lock_init(&pctrl->bitmap_lock); + mutex_init(&pctrl->mutex); platform_set_drvdata(pdev, pctrl); diff --git a/drivers/pinctrl/renesas/pinctrl-rzv2m.c b/drivers/pinctrl/renesas/pinctrl-rzv2m.c index 35b23c1a5684d..9146101ea9e2f 100644 --- a/drivers/pinctrl/renesas/pinctrl-rzv2m.c +++ b/drivers/pinctrl/renesas/pinctrl-rzv2m.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -123,7 +124,8 @@ struct rzv2m_pinctrl { struct gpio_chip gpio_chip; struct pinctrl_gpio_range gpio_range; - spinlock_t lock; + spinlock_t lock; /* lock read/write registers */ + struct mutex mutex; /* serialize adding groups and functions */ }; static const unsigned int drv_1_8V_group2_uA[] = { 1800, 3800, 7800, 11000 }; @@ -322,11 +324,13 @@ static int rzv2m_dt_subnode_to_map(struct pinctrl_dev *pctldev, name = np->name; } + mutex_lock(&pctrl->mutex); + /* Register a single pin group listing all the pins we read from DT */ gsel = pinctrl_generic_add_group(pctldev, name, pins, num_pinmux, NULL); if (gsel < 0) { ret = gsel; - goto done; + goto unlock; } /* @@ -340,6 +344,8 @@ static int rzv2m_dt_subnode_to_map(struct pinctrl_dev *pctldev, goto remove_group; } + mutex_unlock(&pctrl->mutex); + maps[idx].type = PIN_MAP_TYPE_MUX_GROUP; maps[idx].data.mux.group = name; maps[idx].data.mux.function = name; @@ -351,6 +357,8 @@ static int rzv2m_dt_subnode_to_map(struct pinctrl_dev *pctldev, remove_group: pinctrl_generic_remove_group(pctldev, gsel); +unlock: + mutex_unlock(&pctrl->mutex); done: *index = idx; kfree(configs); @@ -1071,6 +1079,7 @@ static int rzv2m_pinctrl_probe(struct platform_device *pdev) } spin_lock_init(&pctrl->lock); + mutex_init(&pctrl->mutex); platform_set_drvdata(pdev, pctrl); diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c index a79318e90a139..b600b77d91ef2 100644 --- a/drivers/platform/mellanox/mlxbf-tmfifo.c +++ b/drivers/platform/mellanox/mlxbf-tmfifo.c @@ -887,6 +887,7 @@ static bool mlxbf_tmfifo_virtio_notify(struct virtqueue *vq) tm_vdev = fifo->vdev[VIRTIO_ID_CONSOLE]; mlxbf_tmfifo_console_output(tm_vdev, vring); spin_unlock_irqrestore(&fifo->spin_lock[0], flags); + set_bit(MLXBF_TM_TX_LWM_IRQ, &fifo->pend_events); } else if (test_and_set_bit(MLXBF_TM_TX_LWM_IRQ, &fifo->pend_events)) { return true; diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index d2fee9a3e2390..6d9297c1d96c1 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -1049,6 +1049,11 @@ static const struct key_entry ideapad_keymap[] = { { KE_IGNORE, 0x03 | IDEAPAD_WMI_KEY }, /* Customizable Lenovo Hotkey ("star" with 'S' inside) */ { KE_KEY, 0x01 | IDEAPAD_WMI_KEY, { KEY_FAVORITES } }, + { KE_KEY, 0x04 | IDEAPAD_WMI_KEY, { KEY_SELECTIVE_SCREENSHOT } }, + /* Lenovo Support */ + { KE_KEY, 0x07 | IDEAPAD_WMI_KEY, { KEY_HELP } }, + { KE_KEY, 0x0e | IDEAPAD_WMI_KEY, { KEY_PICKUP_PHONE } }, + { KE_KEY, 0x0f | IDEAPAD_WMI_KEY, { KEY_HANGUP_PHONE } }, /* Dark mode toggle */ { KE_KEY, 0x13 | IDEAPAD_WMI_KEY, { KEY_PROG1 } }, /* Sound profile switch */ diff --git a/drivers/platform/x86/lenovo-ymc.c b/drivers/platform/x86/lenovo-ymc.c index f360370d50027..e1fbc35504d49 100644 --- a/drivers/platform/x86/lenovo-ymc.c +++ b/drivers/platform/x86/lenovo-ymc.c @@ -36,6 +36,13 @@ static const struct dmi_system_id ec_trigger_quirk_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "82QF"), }, }, + { + /* Lenovo Yoga 7 14ACN6 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "82N7"), + }, + }, { } }; diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c index dfd5ec9f75c90..a0621665a6d22 100644 --- a/drivers/regulator/da9063-regulator.c +++ b/drivers/regulator/da9063-regulator.c @@ -778,9 +778,6 @@ static int da9063_check_xvp_constraints(struct regulator_config *config) const struct notification_limit *uv_l = &constr->under_voltage_limits; const struct notification_limit *ov_l = &constr->over_voltage_limits; - if (!config->init_data) /* No config in DT, pointers will be invalid */ - return 0; - /* make sure that only one severity is used to clarify if unchanged, enabled or disabled */ if ((!!uv_l->prot + !!uv_l->err + !!uv_l->warn) > 1) { dev_err(config->dev, "%s: at most one voltage monitoring severity allowed!\n", @@ -1031,9 +1028,12 @@ static int da9063_regulator_probe(struct platform_device *pdev) config.of_node = da9063_reg_matches[id].of_node; config.regmap = da9063->regmap; - ret = da9063_check_xvp_constraints(&config); - if (ret) - return ret; + /* Checking constraints requires init_data from DT. */ + if (config.init_data) { + ret = da9063_check_xvp_constraints(&config); + if (ret) + return ret; + } regl->rdev = devm_regulator_register(&pdev->dev, ®l->desc, &config); diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c index f3b280af07737..cd077b7c4aff3 100644 --- a/drivers/regulator/qcom-rpmh-regulator.c +++ b/drivers/regulator/qcom-rpmh-regulator.c @@ -1068,7 +1068,7 @@ static const struct rpmh_vreg_init_data pm8550_vreg_data[] = { RPMH_VREG("ldo9", "ldo%s9", &pmic5_pldo, "vdd-l8-l9"), RPMH_VREG("ldo10", "ldo%s10", &pmic5_nldo515, "vdd-l1-l4-l10"), RPMH_VREG("ldo11", "ldo%s11", &pmic5_nldo515, "vdd-l11"), - RPMH_VREG("ldo12", "ldo%s12", &pmic5_pldo, "vdd-l12"), + RPMH_VREG("ldo12", "ldo%s12", &pmic5_nldo515, "vdd-l12"), RPMH_VREG("ldo13", "ldo%s13", &pmic5_pldo, "vdd-l2-l13-l14"), RPMH_VREG("ldo14", "ldo%s14", &pmic5_pldo, "vdd-l2-l13-l14"), RPMH_VREG("ldo15", "ldo%s15", &pmic5_nldo515, "vdd-l15"), diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c index 711252e52d8e1..95a86e0dfd77a 100644 --- a/drivers/scsi/raid_class.c +++ b/drivers/scsi/raid_class.c @@ -209,54 +209,6 @@ raid_attr_ro_state(level); raid_attr_ro_fn(resync); raid_attr_ro_state_fn(state); -static void raid_component_release(struct device *dev) -{ - struct raid_component *rc = - container_of(dev, struct raid_component, dev); - dev_printk(KERN_ERR, rc->dev.parent, "COMPONENT RELEASE\n"); - put_device(rc->dev.parent); - kfree(rc); -} - -int raid_component_add(struct raid_template *r,struct device *raid_dev, - struct device *component_dev) -{ - struct device *cdev = - attribute_container_find_class_device(&r->raid_attrs.ac, - raid_dev); - struct raid_component *rc; - struct raid_data *rd = dev_get_drvdata(cdev); - int err; - - rc = kzalloc(sizeof(*rc), GFP_KERNEL); - if (!rc) - return -ENOMEM; - - INIT_LIST_HEAD(&rc->node); - device_initialize(&rc->dev); - rc->dev.release = raid_component_release; - rc->dev.parent = get_device(component_dev); - rc->num = rd->component_count++; - - dev_set_name(&rc->dev, "component-%d", rc->num); - list_add_tail(&rc->node, &rd->component_list); - rc->dev.class = &raid_class.class; - err = device_add(&rc->dev); - if (err) - goto err_out; - - return 0; - -err_out: - put_device(&rc->dev); - list_del(&rc->node); - rd->component_count--; - put_device(component_dev); - kfree(rc); - return err; -} -EXPORT_SYMBOL(raid_component_add); - struct raid_template * raid_class_attach(struct raid_function_template *ft) { diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c index e429ad23c396d..4db3ba62fcd36 100644 --- a/drivers/scsi/snic/snic_disc.c +++ b/drivers/scsi/snic/snic_disc.c @@ -303,12 +303,11 @@ snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid) "Snic Tgt: device_add, with err = %d\n", ret); - put_device(&tgt->dev); put_device(&snic->shost->shost_gendev); spin_lock_irqsave(snic->shost->host_lock, flags); list_del(&tgt->list); spin_unlock_irqrestore(snic->shost->host_lock, flags); - kfree(tgt); + put_device(&tgt->dev); tgt = NULL; return tgt; diff --git a/drivers/soc/aspeed/aspeed-socinfo.c b/drivers/soc/aspeed/aspeed-socinfo.c index 1ca140356a084..3f759121dc00a 100644 --- a/drivers/soc/aspeed/aspeed-socinfo.c +++ b/drivers/soc/aspeed/aspeed-socinfo.c @@ -137,6 +137,7 @@ static int __init aspeed_socinfo_init(void) soc_dev = soc_device_register(attrs); if (IS_ERR(soc_dev)) { + kfree(attrs->machine); kfree(attrs->soc_id); kfree(attrs->serial_number); kfree(attrs); diff --git a/drivers/soc/aspeed/aspeed-uart-routing.c b/drivers/soc/aspeed/aspeed-uart-routing.c index ef8b24fd18518..59123e1f27acb 100644 --- a/drivers/soc/aspeed/aspeed-uart-routing.c +++ b/drivers/soc/aspeed/aspeed-uart-routing.c @@ -524,7 +524,7 @@ static ssize_t aspeed_uart_routing_store(struct device *dev, struct aspeed_uart_routing_selector *sel = to_routing_selector(attr); int val; - val = match_string(sel->options, -1, buf); + val = __sysfs_match_string(sel->options, -1, buf); if (val < 0) { dev_err(dev, "invalid value \"%s\"\n", buf); return -EINVAL; diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c index 3c9813ad064bc..12c940ba074ab 100644 --- a/drivers/spi/spi-cadence.c +++ b/drivers/spi/spi-cadence.c @@ -317,12 +317,6 @@ static void cdns_spi_process_fifo(struct cdns_spi *xspi, int ntx, int nrx) xspi->rx_bytes -= nrx; while (ntx || nrx) { - /* When xspi in busy condition, bytes may send failed, - * then spi control did't work thoroughly, add one byte delay - */ - if (cdns_spi_read(xspi, CDNS_SPI_ISR) & CDNS_SPI_IXR_TXFULL) - udelay(10); - if (ntx) { if (xspi->txbuf) cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++); @@ -392,6 +386,11 @@ static irqreturn_t cdns_spi_irq(int irq, void *dev_id) if (xspi->tx_bytes) { cdns_spi_process_fifo(xspi, trans_cnt, trans_cnt); } else { + /* Fixed delay due to controller limitation with + * RX_NEMPTY incorrect status + * Xilinx AR:65885 contains more details + */ + udelay(10); cdns_spi_process_fifo(xspi, 0, trans_cnt); cdns_spi_write(xspi, CDNS_SPI_IDR, CDNS_SPI_IXR_DEFAULT); @@ -439,12 +438,18 @@ static int cdns_transfer_one(struct spi_controller *ctlr, cdns_spi_setup_transfer(spi, transfer); } else { /* Set TX empty threshold to half of FIFO depth - * only if TX bytes are more than half FIFO depth. + * only if TX bytes are more than FIFO depth. */ if (xspi->tx_bytes > xspi->tx_fifo_depth) cdns_spi_write(xspi, CDNS_SPI_THLD, xspi->tx_fifo_depth >> 1); } + /* When xspi in busy condition, bytes may send failed, + * then spi control didn't work thoroughly, add one byte delay + */ + if (cdns_spi_read(xspi, CDNS_SPI_ISR) & CDNS_SPI_IXR_TXFULL) + udelay(10); + cdns_spi_process_fifo(xspi, xspi->tx_fifo_depth, 0); spi_transfer_delay_exec(transfer); diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c index d16ee6e54de94..b6d66caba4c02 100644 --- a/drivers/spi/spi-stm32.c +++ b/drivers/spi/spi-stm32.c @@ -1003,9 +1003,9 @@ static int stm32_spi_prepare_msg(struct spi_controller *ctrl, if (spi->cfg->set_number_of_data) { int ret; - ret = spi_split_transfers_maxsize(ctrl, msg, - STM32H7_SPI_TSIZE_MAX, - GFP_KERNEL | GFP_DMA); + ret = spi_split_transfers_maxwords(ctrl, msg, + STM32H7_SPI_TSIZE_MAX, + GFP_KERNEL | GFP_DMA); if (ret) return ret; } diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c index 3f5b1556ece02..fddc633097736 100644 --- a/drivers/spi/spi-sun6i.c +++ b/drivers/spi/spi-sun6i.c @@ -106,6 +106,7 @@ struct sun6i_spi { struct reset_control *rstc; struct completion done; + struct completion dma_rx_done; const u8 *tx_buf; u8 *rx_buf; @@ -200,6 +201,13 @@ static size_t sun6i_spi_max_transfer_size(struct spi_device *spi) return SUN6I_MAX_XFER_SIZE - 1; } +static void sun6i_spi_dma_rx_cb(void *param) +{ + struct sun6i_spi *sspi = param; + + complete(&sspi->dma_rx_done); +} + static int sun6i_spi_prepare_dma(struct sun6i_spi *sspi, struct spi_transfer *tfr) { @@ -211,7 +219,7 @@ static int sun6i_spi_prepare_dma(struct sun6i_spi *sspi, struct dma_slave_config rxconf = { .direction = DMA_DEV_TO_MEM, .src_addr = sspi->dma_addr_rx, - .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, + .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, .src_maxburst = 8, }; @@ -224,6 +232,8 @@ static int sun6i_spi_prepare_dma(struct sun6i_spi *sspi, DMA_PREP_INTERRUPT); if (!rxdesc) return -EINVAL; + rxdesc->callback_param = sspi; + rxdesc->callback = sun6i_spi_dma_rx_cb; } txdesc = NULL; @@ -279,6 +289,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master, return -EINVAL; reinit_completion(&sspi->done); + reinit_completion(&sspi->dma_rx_done); sspi->tx_buf = tfr->tx_buf; sspi->rx_buf = tfr->rx_buf; sspi->len = tfr->len; @@ -479,6 +490,22 @@ static int sun6i_spi_transfer_one(struct spi_master *master, start = jiffies; timeout = wait_for_completion_timeout(&sspi->done, msecs_to_jiffies(tx_time)); + + if (!use_dma) { + sun6i_spi_drain_fifo(sspi); + } else { + if (timeout && rx_len) { + /* + * Even though RX on the peripheral side has finished + * RX DMA might still be in flight + */ + timeout = wait_for_completion_timeout(&sspi->dma_rx_done, + timeout); + if (!timeout) + dev_warn(&master->dev, "RX DMA timeout\n"); + } + } + end = jiffies; if (!timeout) { dev_warn(&master->dev, @@ -506,7 +533,6 @@ static irqreturn_t sun6i_spi_handler(int irq, void *dev_id) /* Transfer complete */ if (status & SUN6I_INT_CTL_TC) { sun6i_spi_write(sspi, SUN6I_INT_STA_REG, SUN6I_INT_CTL_TC); - sun6i_spi_drain_fifo(sspi); complete(&sspi->done); return IRQ_HANDLED; } @@ -665,6 +691,7 @@ static int sun6i_spi_probe(struct platform_device *pdev) } init_completion(&sspi->done); + init_completion(&sspi->dma_rx_done); sspi->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); if (IS_ERR(sspi->rstc)) { diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig index 341abaed4ce2c..069de553127c4 100644 --- a/drivers/tty/Kconfig +++ b/drivers/tty/Kconfig @@ -164,6 +164,9 @@ config LEGACY_TIOCSTI userspace depends on this functionality to continue operating normally. + Processes which run with CAP_SYS_ADMIN, such as BRLTTY, can + use TIOCSTI even when this is set to N. + This functionality can be changed at runtime with the dev.tty.legacy_tiocsti sysctl. This configuration option sets the default value of the sysctl. diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 1cdefac4dd1b5..739f522cb893c 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -3042,12 +3042,13 @@ static void gsm_error(struct gsm_mux *gsm) static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc) { int i; - struct gsm_dlci *dlci = gsm->dlci[0]; + struct gsm_dlci *dlci; struct gsm_msg *txq, *ntxq; gsm->dead = true; mutex_lock(&gsm->mutex); + dlci = gsm->dlci[0]; if (dlci) { if (disc && dlci->state != DLCI_CLOSED) { gsm_dlci_begin_close(dlci); diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index 914e0e6251bfc..3449f8790e462 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -497,6 +497,7 @@ static struct uart_8250_port *serial8250_setup_port(int index) up = &serial8250_ports[index]; up->port.line = index; + up->port.port_id = index; serial8250_init_port(up); if (!base_ops) @@ -1040,6 +1041,7 @@ int serial8250_register_8250_port(const struct uart_8250_port *up) uart_remove_one_port(&serial8250_reg, &uart->port); uart->port.ctrl_id = up->port.ctrl_id; + uart->port.port_id = up->port.port_id; uart->port.iobase = up->port.iobase; uart->port.membase = up->port.membase; uart->port.irq = up->port.irq; @@ -1202,6 +1204,7 @@ void serial8250_unregister_port(int line) uart->port.flags &= ~UPF_BOOT_AUTOCONF; uart->port.type = PORT_UNKNOWN; uart->port.dev = &serial8250_isa_devs->dev; + uart->port.port_id = line; uart->capabilities = 0; serial8250_init_port(uart); serial8250_apply_quirks(uart); diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 16aeb14201378..483bb552cdc40 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -703,9 +703,6 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep) static void serial8250_clear_IER(struct uart_8250_port *up) { - /* Port locked to synchronize UART_IER access against the console. */ - lockdep_assert_held_once(&up->port.lock); - if (up->capabilities & UART_CAP_UUE) serial_out(up, UART_IER, UART_IER_UUE); else @@ -3278,6 +3275,7 @@ void serial8250_init_port(struct uart_8250_port *up) spin_lock_init(&port->lock); port->ctrl_id = 0; + port->pm = NULL; port->ops = &serial8250_pops; port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE); diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index 4d80fae201779..c569a08b5b199 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -1139,8 +1139,8 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport) unsigned long sr = lpuart32_read(&sport->port, UARTSTAT); if (sr & (UARTSTAT_PE | UARTSTAT_FE)) { - /* Read DR to clear the error flags */ - lpuart32_read(&sport->port, UARTDATA); + /* Clear the error flags */ + lpuart32_write(&sport->port, sr, UARTSTAT); if (sr & UARTSTAT_PE) sport->port.icount.parity++; diff --git a/drivers/tty/serial/serial_base.h b/drivers/tty/serial/serial_base.h index 9faac0ff6b895..c74c548f0db62 100644 --- a/drivers/tty/serial/serial_base.h +++ b/drivers/tty/serial/serial_base.h @@ -16,6 +16,7 @@ struct device; struct serial_ctrl_device { struct device dev; + struct ida port_ida; }; struct serial_port_device { diff --git a/drivers/tty/serial/serial_base_bus.c b/drivers/tty/serial/serial_base_bus.c index 6ff59c89d867d..3dfcf20c4eb68 100644 --- a/drivers/tty/serial/serial_base_bus.c +++ b/drivers/tty/serial/serial_base_bus.c @@ -10,6 +10,7 @@ #include #include +#include #include #include #include @@ -19,11 +20,25 @@ static bool serial_base_initialized; +static const struct device_type serial_ctrl_type = { + .name = "ctrl", +}; + +static const struct device_type serial_port_type = { + .name = "port", +}; + static int serial_base_match(struct device *dev, struct device_driver *drv) { - int len = strlen(drv->name); + if (dev->type == &serial_ctrl_type && + str_has_prefix(drv->name, serial_ctrl_type.name)) + return 1; - return !strncmp(dev_name(dev), drv->name, len); + if (dev->type == &serial_port_type && + str_has_prefix(drv->name, serial_port_type.name)) + return 1; + + return 0; } static struct bus_type serial_base_bus_type = { @@ -48,7 +63,8 @@ static int serial_base_device_init(struct uart_port *port, struct device *parent_dev, const struct device_type *type, void (*release)(struct device *dev), - int id) + unsigned int ctrl_id, + unsigned int port_id) { device_initialize(dev); dev->type = type; @@ -61,12 +77,15 @@ static int serial_base_device_init(struct uart_port *port, return -EPROBE_DEFER; } - return dev_set_name(dev, "%s.%s.%d", type->name, dev_name(port->dev), id); -} + if (type == &serial_ctrl_type) + return dev_set_name(dev, "%s:%d", dev_name(port->dev), ctrl_id); -static const struct device_type serial_ctrl_type = { - .name = "ctrl", -}; + if (type == &serial_port_type) + return dev_set_name(dev, "%s:%d.%d", dev_name(port->dev), + ctrl_id, port_id); + + return -EINVAL; +} static void serial_base_ctrl_release(struct device *dev) { @@ -81,6 +100,7 @@ void serial_base_ctrl_device_remove(struct serial_ctrl_device *ctrl_dev) return; device_del(&ctrl_dev->dev); + put_device(&ctrl_dev->dev); } struct serial_ctrl_device *serial_base_ctrl_add(struct uart_port *port, @@ -93,10 +113,12 @@ struct serial_ctrl_device *serial_base_ctrl_add(struct uart_port *port, if (!ctrl_dev) return ERR_PTR(-ENOMEM); + ida_init(&ctrl_dev->port_ida); + err = serial_base_device_init(port, &ctrl_dev->dev, parent, &serial_ctrl_type, serial_base_ctrl_release, - port->ctrl_id); + port->ctrl_id, 0); if (err) goto err_put_device; @@ -112,10 +134,6 @@ struct serial_ctrl_device *serial_base_ctrl_add(struct uart_port *port, return ERR_PTR(err); } -static const struct device_type serial_port_type = { - .name = "port", -}; - static void serial_base_port_release(struct device *dev) { struct serial_port_device *port_dev = to_serial_base_port_device(dev); @@ -127,16 +145,31 @@ struct serial_port_device *serial_base_port_add(struct uart_port *port, struct serial_ctrl_device *ctrl_dev) { struct serial_port_device *port_dev; + int min = 0, max = -1; /* Use -1 for max to apply IDA defaults */ int err; port_dev = kzalloc(sizeof(*port_dev), GFP_KERNEL); if (!port_dev) return ERR_PTR(-ENOMEM); + /* Device driver specified port_id vs automatic assignment? */ + if (port->port_id) { + min = port->port_id; + max = port->port_id; + } + + err = ida_alloc_range(&ctrl_dev->port_ida, min, max, GFP_KERNEL); + if (err < 0) { + kfree(port_dev); + return ERR_PTR(err); + } + + port->port_id = err; + err = serial_base_device_init(port, &port_dev->dev, &ctrl_dev->dev, &serial_port_type, serial_base_port_release, - port->line); + port->ctrl_id, port->port_id); if (err) goto err_put_device; @@ -150,16 +183,25 @@ struct serial_port_device *serial_base_port_add(struct uart_port *port, err_put_device: put_device(&port_dev->dev); + ida_free(&ctrl_dev->port_ida, port->port_id); return ERR_PTR(err); } void serial_base_port_device_remove(struct serial_port_device *port_dev) { + struct serial_ctrl_device *ctrl_dev; + struct device *parent; + if (!port_dev) return; + parent = port_dev->dev.parent; + ctrl_dev = to_serial_base_ctrl_device(parent); + device_del(&port_dev->dev); + ida_free(&ctrl_dev->port_ida, port_dev->port->port_id); + put_device(&port_dev->dev); } static int serial_base_init(void) diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index 6fb0e007af636..386674ead7f0d 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -580,7 +580,6 @@ static bool ufshcd_mcq_sqe_search(struct ufs_hba *hba, { struct ufshcd_lrb *lrbp = &hba->lrb[task_tag]; struct utp_transfer_req_desc *utrd; - u32 mask = hwq->max_entries - 1; __le64 cmd_desc_base_addr; bool ret = false; u64 addr, match; @@ -608,7 +607,10 @@ static bool ufshcd_mcq_sqe_search(struct ufs_hba *hba, ret = true; goto out; } - sq_head_slot = (sq_head_slot + 1) & mask; + + sq_head_slot++; + if (sq_head_slot == hwq->max_entries) + sq_head_slot = 0; } out: diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index 8d6fd4c3324f2..c1557d21b027e 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -321,7 +321,7 @@ static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host) ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0, REG_UFS_CFG1); - if (host->hw_ver.major == 0x05) + if (host->hw_ver.major >= 0x05) ufshcd_rmwl(host->hba, QUNIPRO_G4_SEL, 0, REG_UFS_CFG0); /* make sure above configuration is applied before we return */ diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h index 25fc4120b618d..b53420e874acb 100644 --- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h +++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h @@ -31,6 +31,7 @@ struct mlx5_vdpa_mr { struct list_head head; unsigned long num_directs; unsigned long num_klms; + /* state of dvq mr */ bool initialized; /* serialize mkey creation and destruction */ @@ -121,6 +122,7 @@ int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *io int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb, unsigned int asid); void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev); +void mlx5_vdpa_destroy_mr_asid(struct mlx5_vdpa_dev *mvdev, unsigned int asid); #define mlx5_vdpa_warn(__dev, format, ...) \ dev_warn((__dev)->mdev->device, "%s:%d:(pid %d) warning: " format, __func__, __LINE__, \ diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c index 03e5432297912..5a1971fcd87b1 100644 --- a/drivers/vdpa/mlx5/core/mr.c +++ b/drivers/vdpa/mlx5/core/mr.c @@ -489,60 +489,103 @@ static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr } } -void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev) +static void _mlx5_vdpa_destroy_cvq_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid) +{ + if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid) + return; + + prune_iotlb(mvdev); +} + +static void _mlx5_vdpa_destroy_dvq_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid) { struct mlx5_vdpa_mr *mr = &mvdev->mr; - mutex_lock(&mr->mkey_mtx); + if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] != asid) + return; + if (!mr->initialized) - goto out; + return; - prune_iotlb(mvdev); if (mr->user_mr) destroy_user_mr(mvdev, mr); else destroy_dma_mr(mvdev, mr); mr->initialized = false; -out: +} + +void mlx5_vdpa_destroy_mr_asid(struct mlx5_vdpa_dev *mvdev, unsigned int asid) +{ + struct mlx5_vdpa_mr *mr = &mvdev->mr; + + mutex_lock(&mr->mkey_mtx); + + _mlx5_vdpa_destroy_dvq_mr(mvdev, asid); + _mlx5_vdpa_destroy_cvq_mr(mvdev, asid); + mutex_unlock(&mr->mkey_mtx); } -static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, - struct vhost_iotlb *iotlb, unsigned int asid) +void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev) +{ + mlx5_vdpa_destroy_mr_asid(mvdev, mvdev->group2asid[MLX5_VDPA_CVQ_GROUP]); + mlx5_vdpa_destroy_mr_asid(mvdev, mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]); +} + +static int _mlx5_vdpa_create_cvq_mr(struct mlx5_vdpa_dev *mvdev, + struct vhost_iotlb *iotlb, + unsigned int asid) +{ + if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid) + return 0; + + return dup_iotlb(mvdev, iotlb); +} + +static int _mlx5_vdpa_create_dvq_mr(struct mlx5_vdpa_dev *mvdev, + struct vhost_iotlb *iotlb, + unsigned int asid) { struct mlx5_vdpa_mr *mr = &mvdev->mr; int err; - if (mr->initialized) + if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] != asid) return 0; - if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) { - if (iotlb) - err = create_user_mr(mvdev, iotlb); - else - err = create_dma_mr(mvdev, mr); + if (mr->initialized) + return 0; - if (err) - return err; - } + if (iotlb) + err = create_user_mr(mvdev, iotlb); + else + err = create_dma_mr(mvdev, mr); - if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] == asid) { - err = dup_iotlb(mvdev, iotlb); - if (err) - goto out_err; - } + if (err) + return err; mr->initialized = true; + + return 0; +} + +static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, + struct vhost_iotlb *iotlb, unsigned int asid) +{ + int err; + + err = _mlx5_vdpa_create_dvq_mr(mvdev, iotlb, asid); + if (err) + return err; + + err = _mlx5_vdpa_create_cvq_mr(mvdev, iotlb, asid); + if (err) + goto out_err; + return 0; out_err: - if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) { - if (iotlb) - destroy_user_mr(mvdev, mr); - else - destroy_dma_mr(mvdev, mr); - } + _mlx5_vdpa_destroy_dvq_mr(mvdev, asid); return err; } diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index 9138ef2fb2c85..37be945a02308 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -2517,7 +2517,15 @@ static int mlx5_vdpa_set_driver_features(struct vdpa_device *vdev, u64 features) else ndev->rqt_size = 1; - ndev->cur_num_vqs = 2 * ndev->rqt_size; + /* Device must start with 1 queue pair, as per VIRTIO v1.2 spec, section + * 5.1.6.5.5 "Device operation in multiqueue mode": + * + * Multiqueue is disabled by default. + * The driver enables multiqueue by sending a command using class + * VIRTIO_NET_CTRL_MQ. The command selects the mode of multiqueue + * operation, as follows: ... + */ + ndev->cur_num_vqs = 2; update_cvq_info(mvdev); return err; @@ -2636,7 +2644,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, goto err_mr; teardown_driver(ndev); - mlx5_vdpa_destroy_mr(mvdev); + mlx5_vdpa_destroy_mr_asid(mvdev, asid); err = mlx5_vdpa_create_mr(mvdev, iotlb, asid); if (err) goto err_mr; @@ -2652,7 +2660,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, return 0; err_setup: - mlx5_vdpa_destroy_mr(mvdev); + mlx5_vdpa_destroy_mr_asid(mvdev, asid); err_mr: return err; } @@ -3548,17 +3556,6 @@ static void mlx5v_remove(struct auxiliary_device *adev) kfree(mgtdev); } -static void mlx5v_shutdown(struct auxiliary_device *auxdev) -{ - struct mlx5_vdpa_mgmtdev *mgtdev; - struct mlx5_vdpa_net *ndev; - - mgtdev = auxiliary_get_drvdata(auxdev); - ndev = mgtdev->ndev; - - free_irqs(ndev); -} - static const struct auxiliary_device_id mlx5v_id_table[] = { { .name = MLX5_ADEV_NAME ".vnet", }, {}, @@ -3570,7 +3567,6 @@ static struct auxiliary_driver mlx5v_driver = { .name = "vnet", .probe = mlx5v_probe, .remove = mlx5v_remove, - .shutdown = mlx5v_shutdown, .id_table = mlx5v_id_table, }; diff --git a/drivers/vdpa/pds/Makefile b/drivers/vdpa/pds/Makefile index 2e22418e3ab30..c2d314d4614d3 100644 --- a/drivers/vdpa/pds/Makefile +++ b/drivers/vdpa/pds/Makefile @@ -5,6 +5,5 @@ obj-$(CONFIG_PDS_VDPA) := pds_vdpa.o pds_vdpa-y := aux_drv.o \ cmds.o \ + debugfs.o \ vdpa_dev.o - -pds_vdpa-$(CONFIG_DEBUG_FS) += debugfs.o diff --git a/drivers/vdpa/pds/debugfs.c b/drivers/vdpa/pds/debugfs.c index 21a0dc0cb607c..9b04aad6ec35d 100644 --- a/drivers/vdpa/pds/debugfs.c +++ b/drivers/vdpa/pds/debugfs.c @@ -176,6 +176,7 @@ static int identity_show(struct seq_file *seq, void *v) { struct pds_vdpa_aux *vdpa_aux = seq->private; struct vdpa_mgmt_dev *mgmt; + u64 hw_features; seq_printf(seq, "aux_dev: %s\n", dev_name(&vdpa_aux->padev->aux_dev.dev)); @@ -183,8 +184,9 @@ static int identity_show(struct seq_file *seq, void *v) mgmt = &vdpa_aux->vdpa_mdev; seq_printf(seq, "max_vqs: %d\n", mgmt->max_supported_vqs); seq_printf(seq, "config_attr_mask: %#llx\n", mgmt->config_attr_mask); - seq_printf(seq, "supported_features: %#llx\n", mgmt->supported_features); - print_feature_bits_all(seq, mgmt->supported_features); + hw_features = le64_to_cpu(vdpa_aux->ident.hw_features); + seq_printf(seq, "hw_features: %#llx\n", hw_features); + print_feature_bits_all(seq, hw_features); return 0; } @@ -200,7 +202,6 @@ static int config_show(struct seq_file *seq, void *v) { struct pds_vdpa_device *pdsv = seq->private; struct virtio_net_config vc; - u64 driver_features; u8 status; memcpy_fromio(&vc, pdsv->vdpa_aux->vd_mdev.device, @@ -223,12 +224,8 @@ static int config_show(struct seq_file *seq, void *v) status = vp_modern_get_status(&pdsv->vdpa_aux->vd_mdev); seq_printf(seq, "dev_status: %#x\n", status); print_status_bits(seq, status); - - seq_printf(seq, "req_features: %#llx\n", pdsv->req_features); - print_feature_bits_all(seq, pdsv->req_features); - driver_features = vp_modern_get_driver_features(&pdsv->vdpa_aux->vd_mdev); - seq_printf(seq, "driver_features: %#llx\n", driver_features); - print_feature_bits_all(seq, driver_features); + seq_printf(seq, "negotiated_features: %#llx\n", pdsv->negotiated_features); + print_feature_bits_all(seq, pdsv->negotiated_features); seq_printf(seq, "vdpa_index: %d\n", pdsv->vdpa_index); seq_printf(seq, "num_vqs: %d\n", pdsv->num_vqs); diff --git a/drivers/vdpa/pds/vdpa_dev.c b/drivers/vdpa/pds/vdpa_dev.c index 5071a4d58f8db..52b2449182ad7 100644 --- a/drivers/vdpa/pds/vdpa_dev.c +++ b/drivers/vdpa/pds/vdpa_dev.c @@ -126,11 +126,9 @@ static void pds_vdpa_release_irq(struct pds_vdpa_device *pdsv, int qid) static void pds_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev, u16 qid, bool ready) { struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); - struct pci_dev *pdev = pdsv->vdpa_aux->padev->vf_pdev; struct device *dev = &pdsv->vdpa_dev.dev; u64 driver_features; u16 invert_idx = 0; - int irq; int err; dev_dbg(dev, "%s: qid %d ready %d => %d\n", @@ -143,19 +141,6 @@ static void pds_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev, u16 qid, bool re invert_idx = PDS_VDPA_PACKED_INVERT_IDX; if (ready) { - irq = pci_irq_vector(pdev, qid); - snprintf(pdsv->vqs[qid].irq_name, sizeof(pdsv->vqs[qid].irq_name), - "vdpa-%s-%d", dev_name(dev), qid); - - err = request_irq(irq, pds_vdpa_isr, 0, - pdsv->vqs[qid].irq_name, &pdsv->vqs[qid]); - if (err) { - dev_err(dev, "%s: no irq for qid %d: %pe\n", - __func__, qid, ERR_PTR(err)); - return; - } - pdsv->vqs[qid].irq = irq; - /* Pass vq setup info to DSC using adminq to gather up and * send all info at once so FW can do its full set up in * one easy operation @@ -164,7 +149,6 @@ static void pds_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev, u16 qid, bool re if (err) { dev_err(dev, "Failed to init vq %d: %pe\n", qid, ERR_PTR(err)); - pds_vdpa_release_irq(pdsv, qid); ready = false; } } else { @@ -172,7 +156,6 @@ static void pds_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev, u16 qid, bool re if (err) dev_err(dev, "%s: reset_vq failed qid %d: %pe\n", __func__, qid, ERR_PTR(err)); - pds_vdpa_release_irq(pdsv, qid); } pdsv->vqs[qid].ready = ready; @@ -318,6 +301,7 @@ static int pds_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 featur struct device *dev = &pdsv->vdpa_dev.dev; u64 driver_features; u64 nego_features; + u64 hw_features; u64 missing; if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)) && features) { @@ -325,21 +309,26 @@ static int pds_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 featur return -EOPNOTSUPP; } - pdsv->req_features = features; - /* Check for valid feature bits */ - nego_features = features & le64_to_cpu(pdsv->vdpa_aux->ident.hw_features); - missing = pdsv->req_features & ~nego_features; + nego_features = features & pdsv->supported_features; + missing = features & ~nego_features; if (missing) { dev_err(dev, "Can't support all requested features in %#llx, missing %#llx features\n", - pdsv->req_features, missing); + features, missing); return -EOPNOTSUPP; } + pdsv->negotiated_features = nego_features; + driver_features = pds_vdpa_get_driver_features(vdpa_dev); dev_dbg(dev, "%s: %#llx => %#llx\n", __func__, driver_features, nego_features); + /* if we're faking the F_MAC, strip it before writing to device */ + hw_features = le64_to_cpu(pdsv->vdpa_aux->ident.hw_features); + if (!(hw_features & BIT_ULL(VIRTIO_NET_F_MAC))) + nego_features &= ~BIT_ULL(VIRTIO_NET_F_MAC); + if (driver_features == nego_features) return 0; @@ -352,7 +341,7 @@ static u64 pds_vdpa_get_driver_features(struct vdpa_device *vdpa_dev) { struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); - return vp_modern_get_driver_features(&pdsv->vdpa_aux->vd_mdev); + return pdsv->negotiated_features; } static void pds_vdpa_set_config_cb(struct vdpa_device *vdpa_dev, @@ -389,6 +378,72 @@ static u8 pds_vdpa_get_status(struct vdpa_device *vdpa_dev) return vp_modern_get_status(&pdsv->vdpa_aux->vd_mdev); } +static int pds_vdpa_request_irqs(struct pds_vdpa_device *pdsv) +{ + struct pci_dev *pdev = pdsv->vdpa_aux->padev->vf_pdev; + struct pds_vdpa_aux *vdpa_aux = pdsv->vdpa_aux; + struct device *dev = &pdsv->vdpa_dev.dev; + int max_vq, nintrs, qid, err; + + max_vq = vdpa_aux->vdpa_mdev.max_supported_vqs; + + nintrs = pci_alloc_irq_vectors(pdev, max_vq, max_vq, PCI_IRQ_MSIX); + if (nintrs < 0) { + dev_err(dev, "Couldn't get %d msix vectors: %pe\n", + max_vq, ERR_PTR(nintrs)); + return nintrs; + } + + for (qid = 0; qid < pdsv->num_vqs; ++qid) { + int irq = pci_irq_vector(pdev, qid); + + snprintf(pdsv->vqs[qid].irq_name, sizeof(pdsv->vqs[qid].irq_name), + "vdpa-%s-%d", dev_name(dev), qid); + + err = request_irq(irq, pds_vdpa_isr, 0, + pdsv->vqs[qid].irq_name, + &pdsv->vqs[qid]); + if (err) { + dev_err(dev, "%s: no irq for qid %d: %pe\n", + __func__, qid, ERR_PTR(err)); + goto err_release; + } + + pdsv->vqs[qid].irq = irq; + } + + vdpa_aux->nintrs = nintrs; + + return 0; + +err_release: + while (qid--) + pds_vdpa_release_irq(pdsv, qid); + + pci_free_irq_vectors(pdev); + + vdpa_aux->nintrs = 0; + + return err; +} + +static void pds_vdpa_release_irqs(struct pds_vdpa_device *pdsv) +{ + struct pci_dev *pdev = pdsv->vdpa_aux->padev->vf_pdev; + struct pds_vdpa_aux *vdpa_aux = pdsv->vdpa_aux; + int qid; + + if (!vdpa_aux->nintrs) + return; + + for (qid = 0; qid < pdsv->num_vqs; qid++) + pds_vdpa_release_irq(pdsv, qid); + + pci_free_irq_vectors(pdev); + + vdpa_aux->nintrs = 0; +} + static void pds_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status) { struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev); @@ -399,6 +454,11 @@ static void pds_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status) old_status = pds_vdpa_get_status(vdpa_dev); dev_dbg(dev, "%s: old %#x new %#x\n", __func__, old_status, status); + if (status & ~old_status & VIRTIO_CONFIG_S_DRIVER_OK) { + if (pds_vdpa_request_irqs(pdsv)) + status = old_status | VIRTIO_CONFIG_S_FAILED; + } + pds_vdpa_cmd_set_status(pdsv, status); /* Note: still working with FW on the need for this reset cmd */ @@ -409,6 +469,8 @@ static void pds_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status) pdsv->vqs[i].avail_idx = 0; pdsv->vqs[i].used_idx = 0; } + + pds_vdpa_cmd_set_mac(pdsv, pdsv->mac); } if (status & ~old_status & VIRTIO_CONFIG_S_FEATURES_OK) { @@ -418,6 +480,20 @@ static void pds_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status) i, &pdsv->vqs[i].notify_pa); } } + + if (old_status & ~status & VIRTIO_CONFIG_S_DRIVER_OK) + pds_vdpa_release_irqs(pdsv); +} + +static void pds_vdpa_init_vqs_entry(struct pds_vdpa_device *pdsv, int qid, + void __iomem *notify) +{ + memset(&pdsv->vqs[qid], 0, sizeof(pdsv->vqs[0])); + pdsv->vqs[qid].qid = qid; + pdsv->vqs[qid].pdsv = pdsv; + pdsv->vqs[qid].ready = false; + pdsv->vqs[qid].irq = VIRTIO_MSI_NO_VECTOR; + pdsv->vqs[qid].notify = notify; } static int pds_vdpa_reset(struct vdpa_device *vdpa_dev) @@ -441,14 +517,17 @@ static int pds_vdpa_reset(struct vdpa_device *vdpa_dev) if (err) dev_err(dev, "%s: reset_vq failed qid %d: %pe\n", __func__, i, ERR_PTR(err)); - pds_vdpa_release_irq(pdsv, i); - memset(&pdsv->vqs[i], 0, sizeof(pdsv->vqs[0])); - pdsv->vqs[i].ready = false; } } pds_vdpa_set_status(vdpa_dev, 0); + if (status & VIRTIO_CONFIG_S_DRIVER_OK) { + /* Reset the vq info */ + for (i = 0; i < pdsv->num_vqs && !err; i++) + pds_vdpa_init_vqs_entry(pdsv, i, pdsv->vqs[i].notify); + } + return 0; } @@ -532,7 +611,6 @@ static int pds_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name, struct device *dma_dev; struct pci_dev *pdev; struct device *dev; - u8 mac[ETH_ALEN]; int err; int i; @@ -563,7 +641,7 @@ static int pds_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name, if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) { u64 unsupp_features = - add_config->device_features & ~mgmt->supported_features; + add_config->device_features & ~pdsv->supported_features; if (unsupp_features) { dev_err(dev, "Unsupported features: %#llx\n", unsupp_features); @@ -614,29 +692,30 @@ static int pds_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name, } /* Set a mac, either from the user config if provided - * or set a random mac if default is 00:..:00 + * or use the device's mac if not 00:..:00 + * or set a random mac */ if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR)) { - ether_addr_copy(mac, add_config->net.mac); - pds_vdpa_cmd_set_mac(pdsv, mac); + ether_addr_copy(pdsv->mac, add_config->net.mac); } else { struct virtio_net_config __iomem *vc; vc = pdsv->vdpa_aux->vd_mdev.device; - memcpy_fromio(mac, vc->mac, sizeof(mac)); - if (is_zero_ether_addr(mac)) { - eth_random_addr(mac); - dev_info(dev, "setting random mac %pM\n", mac); - pds_vdpa_cmd_set_mac(pdsv, mac); + memcpy_fromio(pdsv->mac, vc->mac, sizeof(pdsv->mac)); + if (is_zero_ether_addr(pdsv->mac) && + (pdsv->supported_features & BIT_ULL(VIRTIO_NET_F_MAC))) { + eth_random_addr(pdsv->mac); + dev_info(dev, "setting random mac %pM\n", pdsv->mac); } } + pds_vdpa_cmd_set_mac(pdsv, pdsv->mac); for (i = 0; i < pdsv->num_vqs; i++) { - pdsv->vqs[i].qid = i; - pdsv->vqs[i].pdsv = pdsv; - pdsv->vqs[i].irq = VIRTIO_MSI_NO_VECTOR; - pdsv->vqs[i].notify = vp_modern_map_vq_notify(&pdsv->vdpa_aux->vd_mdev, - i, &pdsv->vqs[i].notify_pa); + void __iomem *notify; + + notify = vp_modern_map_vq_notify(&pdsv->vdpa_aux->vd_mdev, + i, &pdsv->vqs[i].notify_pa); + pds_vdpa_init_vqs_entry(pdsv, i, notify); } pdsv->vdpa_dev.mdev = &vdpa_aux->vdpa_mdev; @@ -746,24 +825,19 @@ int pds_vdpa_get_mgmt_info(struct pds_vdpa_aux *vdpa_aux) max_vqs = min_t(u16, dev_intrs, max_vqs); mgmt->max_supported_vqs = min_t(u16, PDS_VDPA_MAX_QUEUES, max_vqs); - vdpa_aux->nintrs = mgmt->max_supported_vqs; + vdpa_aux->nintrs = 0; mgmt->ops = &pds_vdpa_mgmt_dev_ops; mgmt->id_table = pds_vdpa_id_table; mgmt->device = dev; mgmt->supported_features = le64_to_cpu(vdpa_aux->ident.hw_features); + + /* advertise F_MAC even if the device doesn't */ + mgmt->supported_features |= BIT_ULL(VIRTIO_NET_F_MAC); + mgmt->config_attr_mask = BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR); mgmt->config_attr_mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP); mgmt->config_attr_mask |= BIT_ULL(VDPA_ATTR_DEV_FEATURES); - err = pci_alloc_irq_vectors(pdev, vdpa_aux->nintrs, vdpa_aux->nintrs, - PCI_IRQ_MSIX); - if (err < 0) { - dev_err(dev, "Couldn't get %d msix vectors: %pe\n", - vdpa_aux->nintrs, ERR_PTR(err)); - return err; - } - vdpa_aux->nintrs = err; - return 0; } diff --git a/drivers/vdpa/pds/vdpa_dev.h b/drivers/vdpa/pds/vdpa_dev.h index a1bc37de95374..d984ba24a7dae 100644 --- a/drivers/vdpa/pds/vdpa_dev.h +++ b/drivers/vdpa/pds/vdpa_dev.h @@ -35,10 +35,11 @@ struct pds_vdpa_device { struct pds_vdpa_aux *vdpa_aux; struct pds_vdpa_vq_info vqs[PDS_VDPA_MAX_QUEUES]; - u64 supported_features; /* specified device features */ - u64 req_features; /* features requested by vdpa */ + u64 supported_features; /* supported device features */ + u64 negotiated_features; /* negotiated features */ u8 vdpa_index; /* rsvd for future subdevice use */ u8 num_vqs; /* num vqs in use */ + u8 mac[ETH_ALEN]; /* mac selected when the device was added */ struct vdpa_callback config_cb; struct notifier_block nb; }; diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c index 965e32529eb85..a7612e0783b36 100644 --- a/drivers/vdpa/vdpa.c +++ b/drivers/vdpa/vdpa.c @@ -1247,44 +1247,41 @@ static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = { [VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING }, [VDPA_ATTR_DEV_NAME] = { .type = NLA_STRING }, [VDPA_ATTR_DEV_NET_CFG_MACADDR] = NLA_POLICY_ETH_ADDR, + [VDPA_ATTR_DEV_NET_CFG_MAX_VQP] = { .type = NLA_U16 }, /* virtio spec 1.1 section 5.1.4.1 for valid MTU range */ [VDPA_ATTR_DEV_NET_CFG_MTU] = NLA_POLICY_MIN(NLA_U16, 68), + [VDPA_ATTR_DEV_QUEUE_INDEX] = { .type = NLA_U32 }, + [VDPA_ATTR_DEV_FEATURES] = { .type = NLA_U64 }, }; static const struct genl_ops vdpa_nl_ops[] = { { .cmd = VDPA_CMD_MGMTDEV_GET, - .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = vdpa_nl_cmd_mgmtdev_get_doit, .dumpit = vdpa_nl_cmd_mgmtdev_get_dumpit, }, { .cmd = VDPA_CMD_DEV_NEW, - .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = vdpa_nl_cmd_dev_add_set_doit, .flags = GENL_ADMIN_PERM, }, { .cmd = VDPA_CMD_DEV_DEL, - .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = vdpa_nl_cmd_dev_del_set_doit, .flags = GENL_ADMIN_PERM, }, { .cmd = VDPA_CMD_DEV_GET, - .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = vdpa_nl_cmd_dev_get_doit, .dumpit = vdpa_nl_cmd_dev_get_dumpit, }, { .cmd = VDPA_CMD_DEV_CONFIG_GET, - .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = vdpa_nl_cmd_dev_config_get_doit, .dumpit = vdpa_nl_cmd_dev_config_get_dumpit, }, { .cmd = VDPA_CMD_DEV_VSTATS_GET, - .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = vdpa_nl_cmd_dev_stats_get_doit, .flags = GENL_ADMIN_PERM, }, diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c index dc38ed21319da..df7869537ef14 100644 --- a/drivers/vdpa/vdpa_user/vduse_dev.c +++ b/drivers/vdpa/vdpa_user/vduse_dev.c @@ -935,10 +935,10 @@ static void vduse_dev_irq_inject(struct work_struct *work) { struct vduse_dev *dev = container_of(work, struct vduse_dev, inject); - spin_lock_irq(&dev->irq_lock); + spin_lock_bh(&dev->irq_lock); if (dev->config_cb.callback) dev->config_cb.callback(dev->config_cb.private); - spin_unlock_irq(&dev->irq_lock); + spin_unlock_bh(&dev->irq_lock); } static void vduse_vq_irq_inject(struct work_struct *work) @@ -946,10 +946,10 @@ static void vduse_vq_irq_inject(struct work_struct *work) struct vduse_virtqueue *vq = container_of(work, struct vduse_virtqueue, inject); - spin_lock_irq(&vq->irq_lock); + spin_lock_bh(&vq->irq_lock); if (vq->ready && vq->cb.callback) vq->cb.callback(vq->cb.private); - spin_unlock_irq(&vq->irq_lock); + spin_unlock_bh(&vq->irq_lock); } static bool vduse_vq_signal_irqfd(struct vduse_virtqueue *vq) diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index c83f7f043470d..abef0619c7901 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -25,6 +25,8 @@ #include #include #include +#include +#include #include #include #include @@ -75,6 +77,9 @@ struct vhost_scsi_cmd { u32 tvc_prot_sgl_count; /* Saved unpacked SCSI LUN for vhost_scsi_target_queue_cmd() */ u32 tvc_lun; + u32 copied_iov:1; + const void *saved_iter_addr; + struct iov_iter saved_iter; /* Pointer to the SGL formatted memory from virtio-scsi */ struct scatterlist *tvc_sgl; struct scatterlist *tvc_prot_sgl; @@ -328,8 +333,13 @@ static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd) int i; if (tv_cmd->tvc_sgl_count) { - for (i = 0; i < tv_cmd->tvc_sgl_count; i++) - put_page(sg_page(&tv_cmd->tvc_sgl[i])); + for (i = 0; i < tv_cmd->tvc_sgl_count; i++) { + if (tv_cmd->copied_iov) + __free_page(sg_page(&tv_cmd->tvc_sgl[i])); + else + put_page(sg_page(&tv_cmd->tvc_sgl[i])); + } + kfree(tv_cmd->saved_iter_addr); } if (tv_cmd->tvc_prot_sgl_count) { for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++) @@ -504,6 +514,28 @@ static void vhost_scsi_evt_work(struct vhost_work *work) mutex_unlock(&vq->mutex); } +static int vhost_scsi_copy_sgl_to_iov(struct vhost_scsi_cmd *cmd) +{ + struct iov_iter *iter = &cmd->saved_iter; + struct scatterlist *sg = cmd->tvc_sgl; + struct page *page; + size_t len; + int i; + + for (i = 0; i < cmd->tvc_sgl_count; i++) { + page = sg_page(&sg[i]); + len = sg[i].length; + + if (copy_page_to_iter(page, 0, len, iter) != len) { + pr_err("Could not copy data while handling misaligned cmd. Error %zu\n", + len); + return -1; + } + } + + return 0; +} + /* Fill in status and signal that we are done processing this command * * This is scheduled in the vhost work queue so we are called with the owner @@ -527,15 +559,20 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__, cmd, se_cmd->residual_count, se_cmd->scsi_status); - memset(&v_rsp, 0, sizeof(v_rsp)); - v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count); - /* TODO is status_qualifier field needed? */ - v_rsp.status = se_cmd->scsi_status; - v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq, - se_cmd->scsi_sense_length); - memcpy(v_rsp.sense, cmd->tvc_sense_buf, - se_cmd->scsi_sense_length); + + if (cmd->saved_iter_addr && vhost_scsi_copy_sgl_to_iov(cmd)) { + v_rsp.response = VIRTIO_SCSI_S_BAD_TARGET; + } else { + v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, + se_cmd->residual_count); + /* TODO is status_qualifier field needed? */ + v_rsp.status = se_cmd->scsi_status; + v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq, + se_cmd->scsi_sense_length); + memcpy(v_rsp.sense, cmd->tvc_sense_buf, + se_cmd->scsi_sense_length); + } iov_iter_init(&iov_iter, ITER_DEST, cmd->tvc_resp_iov, cmd->tvc_in_iovs, sizeof(v_rsp)); @@ -613,12 +650,12 @@ static int vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter, struct scatterlist *sgl, - bool write) + bool is_prot) { struct page **pages = cmd->tvc_upages; struct scatterlist *sg = sgl; - ssize_t bytes; - size_t offset; + ssize_t bytes, mapped_bytes; + size_t offset, mapped_offset; unsigned int npages = 0; bytes = iov_iter_get_pages2(iter, pages, LONG_MAX, @@ -627,13 +664,53 @@ vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd, if (bytes <= 0) return bytes < 0 ? bytes : -EFAULT; + mapped_bytes = bytes; + mapped_offset = offset; + while (bytes) { unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes); + /* + * The block layer requires bios/requests to be a multiple of + * 512 bytes, but Windows can send us vecs that are misaligned. + * This can result in bios and later requests with misaligned + * sizes if we have to break up a cmd/scatterlist into multiple + * bios. + * + * We currently only break up a command into multiple bios if + * we hit the vec/seg limit, so check if our sgl_count is + * greater than the max and if a vec in the cmd has a + * misaligned offset/size. + */ + if (!is_prot && + (offset & (SECTOR_SIZE - 1) || n & (SECTOR_SIZE - 1)) && + cmd->tvc_sgl_count > BIO_MAX_VECS) { + WARN_ONCE(true, + "vhost-scsi detected misaligned IO. Performance may be degraded."); + goto revert_iter_get_pages; + } + sg_set_page(sg++, pages[npages++], n, offset); bytes -= n; offset = 0; } + return npages; + +revert_iter_get_pages: + iov_iter_revert(iter, mapped_bytes); + + npages = 0; + while (mapped_bytes) { + unsigned int n = min_t(unsigned int, PAGE_SIZE - mapped_offset, + mapped_bytes); + + put_page(pages[npages++]); + + mapped_bytes -= n; + mapped_offset = 0; + } + + return -EINVAL; } static int @@ -657,25 +734,80 @@ vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls) } static int -vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write, - struct iov_iter *iter, - struct scatterlist *sg, int sg_count) +vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter, + struct scatterlist *sg, int sg_count) +{ + size_t len = iov_iter_count(iter); + unsigned int nbytes = 0; + struct page *page; + int i; + + if (cmd->tvc_data_direction == DMA_FROM_DEVICE) { + cmd->saved_iter_addr = dup_iter(&cmd->saved_iter, iter, + GFP_KERNEL); + if (!cmd->saved_iter_addr) + return -ENOMEM; + } + + for (i = 0; i < sg_count; i++) { + page = alloc_page(GFP_KERNEL); + if (!page) { + i--; + goto err; + } + + nbytes = min_t(unsigned int, PAGE_SIZE, len); + sg_set_page(&sg[i], page, nbytes, 0); + + if (cmd->tvc_data_direction == DMA_TO_DEVICE && + copy_page_from_iter(page, 0, nbytes, iter) != nbytes) + goto err; + + len -= nbytes; + } + + cmd->copied_iov = 1; + return 0; + +err: + pr_err("Could not read %u bytes while handling misaligned cmd\n", + nbytes); + + for (; i >= 0; i--) + __free_page(sg_page(&sg[i])); + kfree(cmd->saved_iter_addr); + return -ENOMEM; +} + +static int +vhost_scsi_map_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter, + struct scatterlist *sg, int sg_count, bool is_prot) { struct scatterlist *p = sg; + size_t revert_bytes; int ret; while (iov_iter_count(iter)) { - ret = vhost_scsi_map_to_sgl(cmd, iter, sg, write); + ret = vhost_scsi_map_to_sgl(cmd, iter, sg, is_prot); if (ret < 0) { + revert_bytes = 0; + while (p < sg) { - struct page *page = sg_page(p++); - if (page) + struct page *page = sg_page(p); + + if (page) { put_page(page); + revert_bytes += p->length; + } + p++; } + + iov_iter_revert(iter, revert_bytes); return ret; } sg += ret; } + return 0; } @@ -685,7 +817,6 @@ vhost_scsi_mapal(struct vhost_scsi_cmd *cmd, size_t data_bytes, struct iov_iter *data_iter) { int sgl_count, ret; - bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE); if (prot_bytes) { sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes, @@ -698,9 +829,9 @@ vhost_scsi_mapal(struct vhost_scsi_cmd *cmd, pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__, cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count); - ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter, - cmd->tvc_prot_sgl, - cmd->tvc_prot_sgl_count); + ret = vhost_scsi_map_iov_to_sgl(cmd, prot_iter, + cmd->tvc_prot_sgl, + cmd->tvc_prot_sgl_count, true); if (ret < 0) { cmd->tvc_prot_sgl_count = 0; return ret; @@ -716,8 +847,14 @@ vhost_scsi_mapal(struct vhost_scsi_cmd *cmd, pr_debug("%s data_sg %p data_sgl_count %u\n", __func__, cmd->tvc_sgl, cmd->tvc_sgl_count); - ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter, - cmd->tvc_sgl, cmd->tvc_sgl_count); + ret = vhost_scsi_map_iov_to_sgl(cmd, data_iter, cmd->tvc_sgl, + cmd->tvc_sgl_count, false); + if (ret == -EINVAL) { + sg_init_table(cmd->tvc_sgl, cmd->tvc_sgl_count); + ret = vhost_scsi_copy_iov_to_sgl(cmd, data_iter, cmd->tvc_sgl, + cmd->tvc_sgl_count); + } + if (ret < 0) { cmd->tvc_sgl_count = 0; return ret; diff --git a/drivers/video/fbdev/amifb.c b/drivers/video/fbdev/amifb.c index d88265dbebf4c..f216b2c702a11 100644 --- a/drivers/video/fbdev/amifb.c +++ b/drivers/video/fbdev/amifb.c @@ -687,7 +687,7 @@ struct fb_var_cursorinfo { __u16 height; __u16 xspot; __u16 yspot; - __u8 data[1]; /* field with [height][width] */ + DECLARE_FLEX_ARRAY(__u8, data); /* field with [height][width] */ }; struct fb_cursorstate { diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c index 987c5f5f02414..f245da138e689 100644 --- a/drivers/video/fbdev/atmel_lcdfb.c +++ b/drivers/video/fbdev/atmel_lcdfb.c @@ -1308,7 +1308,7 @@ static struct platform_driver atmel_lcdfb_driver = { .resume = atmel_lcdfb_resume, .driver = { .name = "atmel_lcdfb", - .of_match_table = of_match_ptr(atmel_lcdfb_dt_ids), + .of_match_table = atmel_lcdfb_dt_ids, }, }; diff --git a/drivers/video/fbdev/goldfishfb.c b/drivers/video/fbdev/goldfishfb.c index 6fa2108fd912d..e41c9fef4a3b6 100644 --- a/drivers/video/fbdev/goldfishfb.c +++ b/drivers/video/fbdev/goldfishfb.c @@ -203,8 +203,8 @@ static int goldfish_fb_probe(struct platform_device *pdev) } fb->irq = platform_get_irq(pdev, 0); - if (fb->irq <= 0) { - ret = -ENODEV; + if (fb->irq < 0) { + ret = fb->irq; goto err_no_irq; } diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c index 51fbf02a03430..76b50b6c98ad9 100644 --- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c +++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c @@ -519,7 +519,9 @@ static int mmphw_probe(struct platform_device *pdev) "unable to get clk %s\n", mi->clk_name); goto failed; } - clk_prepare_enable(ctrl->clk); + ret = clk_prepare_enable(ctrl->clk); + if (ret) + goto failed; /* init global regs */ ctrl_set_default(ctrl); diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c index 11c3737982790..46881a6915498 100644 --- a/drivers/video/fbdev/ssd1307fb.c +++ b/drivers/video/fbdev/ssd1307fb.c @@ -399,8 +399,8 @@ static int ssd1307fb_init(struct ssd1307fb_par *par) /* Enable the PWM */ pwm_enable(par->pwm); - dev_dbg(&par->client->dev, "Using PWM%d with a %lluns period.\n", - par->pwm->pwm, pwm_get_period(par->pwm)); + dev_dbg(&par->client->dev, "Using PWM %s with a %lluns period.\n", + par->pwm->label, pwm_get_period(par->pwm)); } /* Set initial contrast */ diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c index 835f6cc2fb664..fa5226c198cc6 100644 --- a/drivers/virtio/virtio_mem.c +++ b/drivers/virtio/virtio_mem.c @@ -38,11 +38,6 @@ module_param(bbm_block_size, ulong, 0444); MODULE_PARM_DESC(bbm_block_size, "Big Block size in bytes. Default is 0 (auto-detection)."); -static bool bbm_safe_unplug = true; -module_param(bbm_safe_unplug, bool, 0444); -MODULE_PARM_DESC(bbm_safe_unplug, - "Use a safe unplug mechanism in BBM, avoiding long/endless loops"); - /* * virtio-mem currently supports the following modes of operation: * @@ -173,6 +168,13 @@ struct virtio_mem { /* The number of subblocks per Linux memory block. */ uint32_t sbs_per_mb; + /* + * Some of the Linux memory blocks tracked as "partially + * plugged" are completely unplugged and can be offlined + * and removed -- which previously failed. + */ + bool have_unplugged_mb; + /* Summary of all memory block states. */ unsigned long mb_count[VIRTIO_MEM_SBM_MB_COUNT]; @@ -746,11 +748,15 @@ static int virtio_mem_offline_and_remove_memory(struct virtio_mem *vm, * immediately instead of waiting. */ virtio_mem_retry(vm); - } else { - dev_dbg(&vm->vdev->dev, - "offlining and removing memory failed: %d\n", rc); + return 0; } - return rc; + dev_dbg(&vm->vdev->dev, "offlining and removing memory failed: %d\n", rc); + /* + * We don't really expect this to fail, because we fake-offlined all + * memory already. But it could fail in corner cases. + */ + WARN_ON_ONCE(rc != -ENOMEM && rc != -EBUSY); + return rc == -ENOMEM ? -ENOMEM : -EBUSY; } /* @@ -766,6 +772,34 @@ static int virtio_mem_sbm_offline_and_remove_mb(struct virtio_mem *vm, return virtio_mem_offline_and_remove_memory(vm, addr, size); } +/* + * Try (offlining and) removing memory from Linux in case all subblocks are + * unplugged. Can be called on online and offline memory blocks. + * + * May modify the state of memory blocks in virtio-mem. + */ +static int virtio_mem_sbm_try_remove_unplugged_mb(struct virtio_mem *vm, + unsigned long mb_id) +{ + int rc; + + /* + * Once all subblocks of a memory block were unplugged, offline and + * remove it. + */ + if (!virtio_mem_sbm_test_sb_unplugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) + return 0; + + /* offline_and_remove_memory() works for online and offline memory. */ + mutex_unlock(&vm->hotplug_mutex); + rc = virtio_mem_sbm_offline_and_remove_mb(vm, mb_id); + mutex_lock(&vm->hotplug_mutex); + if (!rc) + virtio_mem_sbm_set_mb_state(vm, mb_id, + VIRTIO_MEM_SBM_MB_UNUSED); + return rc; +} + /* * See virtio_mem_offline_and_remove_memory(): Try to offline and remove a * all Linux memory blocks covered by the big block. @@ -1155,7 +1189,8 @@ static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages) * Try to allocate a range, marking pages fake-offline, effectively * fake-offlining them. */ -static int virtio_mem_fake_offline(unsigned long pfn, unsigned long nr_pages) +static int virtio_mem_fake_offline(struct virtio_mem *vm, unsigned long pfn, + unsigned long nr_pages) { const bool is_movable = is_zone_movable_page(pfn_to_page(pfn)); int rc, retry_count; @@ -1168,6 +1203,14 @@ static int virtio_mem_fake_offline(unsigned long pfn, unsigned long nr_pages) * some guarantees. */ for (retry_count = 0; retry_count < 5; retry_count++) { + /* + * If the config changed, stop immediately and go back to the + * main loop: avoid trying to keep unplugging if the device + * might have decided to not remove any more memory. + */ + if (atomic_read(&vm->config_changed)) + return -EAGAIN; + rc = alloc_contig_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE, GFP_KERNEL); if (rc == -ENOMEM) @@ -1917,7 +1960,7 @@ static int virtio_mem_sbm_unplug_sb_online(struct virtio_mem *vm, start_pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) + sb_id * vm->sbm.sb_size); - rc = virtio_mem_fake_offline(start_pfn, nr_pages); + rc = virtio_mem_fake_offline(vm, start_pfn, nr_pages); if (rc) return rc; @@ -1989,20 +2032,10 @@ static int virtio_mem_sbm_unplug_any_sb_online(struct virtio_mem *vm, } unplugged: - /* - * Once all subblocks of a memory block were unplugged, offline and - * remove it. This will usually not fail, as no memory is in use - * anymore - however some other notifiers might NACK the request. - */ - if (virtio_mem_sbm_test_sb_unplugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) { - mutex_unlock(&vm->hotplug_mutex); - rc = virtio_mem_sbm_offline_and_remove_mb(vm, mb_id); - mutex_lock(&vm->hotplug_mutex); - if (!rc) - virtio_mem_sbm_set_mb_state(vm, mb_id, - VIRTIO_MEM_SBM_MB_UNUSED); - } - + rc = virtio_mem_sbm_try_remove_unplugged_mb(vm, mb_id); + if (rc) + vm->sbm.have_unplugged_mb = 1; + /* Ignore errors, this is not critical. We'll retry later. */ return 0; } @@ -2111,38 +2144,32 @@ static int virtio_mem_bbm_offline_remove_and_unplug_bb(struct virtio_mem *vm, VIRTIO_MEM_BBM_BB_ADDED)) return -EINVAL; - if (bbm_safe_unplug) { - /* - * Start by fake-offlining all memory. Once we marked the device - * block as fake-offline, all newly onlined memory will - * automatically be kept fake-offline. Protect from concurrent - * onlining/offlining until we have a consistent state. - */ - mutex_lock(&vm->hotplug_mutex); - virtio_mem_bbm_set_bb_state(vm, bb_id, - VIRTIO_MEM_BBM_BB_FAKE_OFFLINE); + /* + * Start by fake-offlining all memory. Once we marked the device + * block as fake-offline, all newly onlined memory will + * automatically be kept fake-offline. Protect from concurrent + * onlining/offlining until we have a consistent state. + */ + mutex_lock(&vm->hotplug_mutex); + virtio_mem_bbm_set_bb_state(vm, bb_id, VIRTIO_MEM_BBM_BB_FAKE_OFFLINE); - for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { - page = pfn_to_online_page(pfn); - if (!page) - continue; + for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { + page = pfn_to_online_page(pfn); + if (!page) + continue; - rc = virtio_mem_fake_offline(pfn, PAGES_PER_SECTION); - if (rc) { - end_pfn = pfn; - goto rollback_safe_unplug; - } + rc = virtio_mem_fake_offline(vm, pfn, PAGES_PER_SECTION); + if (rc) { + end_pfn = pfn; + goto rollback; } - mutex_unlock(&vm->hotplug_mutex); } + mutex_unlock(&vm->hotplug_mutex); rc = virtio_mem_bbm_offline_and_remove_bb(vm, bb_id); if (rc) { - if (bbm_safe_unplug) { - mutex_lock(&vm->hotplug_mutex); - goto rollback_safe_unplug; - } - return rc; + mutex_lock(&vm->hotplug_mutex); + goto rollback; } rc = virtio_mem_bbm_unplug_bb(vm, bb_id); @@ -2154,7 +2181,7 @@ static int virtio_mem_bbm_offline_remove_and_unplug_bb(struct virtio_mem *vm, VIRTIO_MEM_BBM_BB_UNUSED); return rc; -rollback_safe_unplug: +rollback: for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { page = pfn_to_online_page(pfn); if (!page) @@ -2260,12 +2287,13 @@ static int virtio_mem_unplug_request(struct virtio_mem *vm, uint64_t diff) /* * Try to unplug all blocks that couldn't be unplugged before, for example, - * because the hypervisor was busy. + * because the hypervisor was busy. Further, offline and remove any memory + * blocks where we previously failed. */ -static int virtio_mem_unplug_pending_mb(struct virtio_mem *vm) +static int virtio_mem_cleanup_pending_mb(struct virtio_mem *vm) { unsigned long id; - int rc; + int rc = 0; if (!vm->in_sbm) { virtio_mem_bbm_for_each_bb(vm, id, @@ -2287,6 +2315,27 @@ static int virtio_mem_unplug_pending_mb(struct virtio_mem *vm) VIRTIO_MEM_SBM_MB_UNUSED); } + if (!vm->sbm.have_unplugged_mb) + return 0; + + /* + * Let's retry (offlining and) removing completely unplugged Linux + * memory blocks. + */ + vm->sbm.have_unplugged_mb = false; + + mutex_lock(&vm->hotplug_mutex); + virtio_mem_sbm_for_each_mb(vm, id, VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL) + rc |= virtio_mem_sbm_try_remove_unplugged_mb(vm, id); + virtio_mem_sbm_for_each_mb(vm, id, VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL) + rc |= virtio_mem_sbm_try_remove_unplugged_mb(vm, id); + virtio_mem_sbm_for_each_mb(vm, id, VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL) + rc |= virtio_mem_sbm_try_remove_unplugged_mb(vm, id); + mutex_unlock(&vm->hotplug_mutex); + + if (rc) + vm->sbm.have_unplugged_mb = true; + /* Ignore errors, this is not critical. We'll retry later. */ return 0; } @@ -2368,9 +2417,9 @@ static void virtio_mem_run_wq(struct work_struct *work) virtio_mem_refresh_config(vm); } - /* Unplug any leftovers from previous runs */ + /* Cleanup any leftovers from previous runs */ if (!rc) - rc = virtio_mem_unplug_pending_mb(vm); + rc = virtio_mem_cleanup_pending_mb(vm); if (!rc && vm->requested_size != vm->plugged_size) { if (vm->requested_size > vm->plugged_size) { @@ -2382,6 +2431,13 @@ static void virtio_mem_run_wq(struct work_struct *work) } } + /* + * Keep retrying to offline and remove completely unplugged Linux + * memory blocks. + */ + if (!rc && vm->in_sbm && vm->sbm.have_unplugged_mb) + rc = -EBUSY; + switch (rc) { case 0: vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS; diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index a46a4a29e9295..97760f6112959 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -607,9 +607,8 @@ static void virtio_mmio_release_dev(struct device *_d) struct virtio_device *vdev = container_of(_d, struct virtio_device, dev); struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); - struct platform_device *pdev = vm_dev->pdev; - devm_kfree(&pdev->dev, vm_dev); + kfree(vm_dev); } /* Platform device */ @@ -620,7 +619,7 @@ static int virtio_mmio_probe(struct platform_device *pdev) unsigned long magic; int rc; - vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL); + vm_dev = kzalloc(sizeof(*vm_dev), GFP_KERNEL); if (!vm_dev) return -ENOMEM; diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index a6c86f916dbdf..c2524a7207cfa 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -557,8 +557,6 @@ static int virtio_pci_probe(struct pci_dev *pci_dev, pci_set_master(pci_dev); - vp_dev->is_legacy = vp_dev->ldev.ioaddr ? true : false; - rc = register_virtio_device(&vp_dev->vdev); reg_dev = vp_dev; if (rc) diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c index 2257f1b3d8ae1..d9cbb02b35a11 100644 --- a/drivers/virtio/virtio_pci_legacy.c +++ b/drivers/virtio/virtio_pci_legacy.c @@ -223,6 +223,7 @@ int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev) vp_dev->config_vector = vp_config_vector; vp_dev->setup_vq = setup_vq; vp_dev->del_vq = del_vq; + vp_dev->is_legacy = true; return 0; } diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c index 989e2d7184ce4..961161da59000 100644 --- a/drivers/virtio/virtio_vdpa.c +++ b/drivers/virtio/virtio_vdpa.c @@ -393,11 +393,13 @@ static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs, cb.callback = virtio_vdpa_config_cb; cb.private = vd_dev; ops->set_config_cb(vdpa, &cb); + kfree(masks); return 0; err_setup_vq: virtio_vdpa_del_vqs(vdev); + kfree(masks); return err; } diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index f2d2b313bde5a..9419f4e37a58c 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -443,6 +443,7 @@ struct btrfs_drop_extents_args { struct btrfs_file_private { void *filldir_buf; + u64 last_index; struct extent_state *llseek_cached_state; }; diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 6b457b010cbc4..6d51db066503b 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -1632,6 +1632,7 @@ int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode) } bool btrfs_readdir_get_delayed_items(struct inode *inode, + u64 last_index, struct list_head *ins_list, struct list_head *del_list) { @@ -1651,14 +1652,14 @@ bool btrfs_readdir_get_delayed_items(struct inode *inode, mutex_lock(&delayed_node->mutex); item = __btrfs_first_delayed_insertion_item(delayed_node); - while (item) { + while (item && item->index <= last_index) { refcount_inc(&item->refs); list_add_tail(&item->readdir_list, ins_list); item = __btrfs_next_delayed_item(item); } item = __btrfs_first_delayed_deletion_item(delayed_node); - while (item) { + while (item && item->index <= last_index) { refcount_inc(&item->refs); list_add_tail(&item->readdir_list, del_list); item = __btrfs_next_delayed_item(item); diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h index 4f21daa3dbc7b..dc1085b2a3976 100644 --- a/fs/btrfs/delayed-inode.h +++ b/fs/btrfs/delayed-inode.h @@ -148,6 +148,7 @@ void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info); /* Used for readdir() */ bool btrfs_readdir_get_delayed_items(struct inode *inode, + u64 last_index, struct list_head *ins_list, struct list_head *del_list); void btrfs_readdir_put_delayed_items(struct inode *inode, diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index ca765d62324f5..90ad3006ef3a7 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -902,7 +902,30 @@ static void submit_extent_page(struct btrfs_bio_ctrl *bio_ctrl, size -= len; pg_offset += len; disk_bytenr += len; - bio_ctrl->len_to_oe_boundary -= len; + + /* + * len_to_oe_boundary defaults to U32_MAX, which isn't page or + * sector aligned. alloc_new_bio() then sets it to the end of + * our ordered extent for writes into zoned devices. + * + * When len_to_oe_boundary is tracking an ordered extent, we + * trust the ordered extent code to align things properly, and + * the check above to cap our write to the ordered extent + * boundary is correct. + * + * When len_to_oe_boundary is U32_MAX, the cap above would + * result in a 4095 byte IO for the last page right before + * we hit the bio limit of UINT_MAX. bio_add_page() has all + * the checks required to make sure we don't overflow the bio, + * and we should just ignore len_to_oe_boundary completely + * unless we're using it to track an ordered extent. + * + * It's pretty hard to make a bio sized U32_MAX, but it can + * happen when the page cache is able to feed us contiguous + * pages for large extents. + */ + if (bio_ctrl->len_to_oe_boundary != U32_MAX) + bio_ctrl->len_to_oe_boundary -= len; /* Ordered extent boundary: move on to a new bio. */ if (bio_ctrl->len_to_oe_boundary == 0) diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 0cdb3e86f29b5..a6d8368ed0edd 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -760,8 +760,6 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end, if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) { start = em_end; - if (end != (u64)-1) - len = start + len - em_end; goto next; } @@ -829,8 +827,8 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end, if (!split) goto remove_em; } - split->start = start + len; - split->len = em_end - (start + len); + split->start = end; + split->len = em_end - end; split->block_start = em->block_start; split->flags = flags; split->compress_type = em->compress_type; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 9055e19b01ef1..aa090b0b5d298 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -5872,6 +5872,74 @@ static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, return d_splice_alias(inode, dentry); } +/* + * Find the highest existing sequence number in a directory and then set the + * in-memory index_cnt variable to the first free sequence number. + */ +static int btrfs_set_inode_index_count(struct btrfs_inode *inode) +{ + struct btrfs_root *root = inode->root; + struct btrfs_key key, found_key; + struct btrfs_path *path; + struct extent_buffer *leaf; + int ret; + + key.objectid = btrfs_ino(inode); + key.type = BTRFS_DIR_INDEX_KEY; + key.offset = (u64)-1; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) + goto out; + /* FIXME: we should be able to handle this */ + if (ret == 0) + goto out; + ret = 0; + + if (path->slots[0] == 0) { + inode->index_cnt = BTRFS_DIR_START_INDEX; + goto out; + } + + path->slots[0]--; + + leaf = path->nodes[0]; + btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); + + if (found_key.objectid != btrfs_ino(inode) || + found_key.type != BTRFS_DIR_INDEX_KEY) { + inode->index_cnt = BTRFS_DIR_START_INDEX; + goto out; + } + + inode->index_cnt = found_key.offset + 1; +out: + btrfs_free_path(path); + return ret; +} + +static int btrfs_get_dir_last_index(struct btrfs_inode *dir, u64 *index) +{ + if (dir->index_cnt == (u64)-1) { + int ret; + + ret = btrfs_inode_delayed_dir_index_count(dir); + if (ret) { + ret = btrfs_set_inode_index_count(dir); + if (ret) + return ret; + } + } + + *index = dir->index_cnt; + + return 0; +} + /* * All this infrastructure exists because dir_emit can fault, and we are holding * the tree lock when doing readdir. For now just allocate a buffer and copy @@ -5884,10 +5952,17 @@ static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, static int btrfs_opendir(struct inode *inode, struct file *file) { struct btrfs_file_private *private; + u64 last_index; + int ret; + + ret = btrfs_get_dir_last_index(BTRFS_I(inode), &last_index); + if (ret) + return ret; private = kzalloc(sizeof(struct btrfs_file_private), GFP_KERNEL); if (!private) return -ENOMEM; + private->last_index = last_index; private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!private->filldir_buf) { kfree(private); @@ -5954,7 +6029,8 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) INIT_LIST_HEAD(&ins_list); INIT_LIST_HEAD(&del_list); - put = btrfs_readdir_get_delayed_items(inode, &ins_list, &del_list); + put = btrfs_readdir_get_delayed_items(inode, private->last_index, + &ins_list, &del_list); again: key.type = BTRFS_DIR_INDEX_KEY; @@ -5972,6 +6048,8 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) break; if (found_key.offset < ctx->pos) continue; + if (found_key.offset > private->last_index) + break; if (btrfs_should_delete_dir_index(&del_list, found_key.offset)) continue; di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item); @@ -6107,57 +6185,6 @@ static int btrfs_update_time(struct inode *inode, struct timespec64 *now, return dirty ? btrfs_dirty_inode(BTRFS_I(inode)) : 0; } -/* - * find the highest existing sequence number in a directory - * and then set the in-memory index_cnt variable to reflect - * free sequence numbers - */ -static int btrfs_set_inode_index_count(struct btrfs_inode *inode) -{ - struct btrfs_root *root = inode->root; - struct btrfs_key key, found_key; - struct btrfs_path *path; - struct extent_buffer *leaf; - int ret; - - key.objectid = btrfs_ino(inode); - key.type = BTRFS_DIR_INDEX_KEY; - key.offset = (u64)-1; - - path = btrfs_alloc_path(); - if (!path) - return -ENOMEM; - - ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); - if (ret < 0) - goto out; - /* FIXME: we should be able to handle this */ - if (ret == 0) - goto out; - ret = 0; - - if (path->slots[0] == 0) { - inode->index_cnt = BTRFS_DIR_START_INDEX; - goto out; - } - - path->slots[0]--; - - leaf = path->nodes[0]; - btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - - if (found_key.objectid != btrfs_ino(inode) || - found_key.type != BTRFS_DIR_INDEX_KEY) { - inode->index_cnt = BTRFS_DIR_START_INDEX; - goto out; - } - - inode->index_cnt = found_key.offset + 1; -out: - btrfs_free_path(path); - return ret; -} - /* * helper to find a free sequence number in a given directory. This current * code is very simple, later versions will do smarter things in the btree diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 4cae41bd6de07..7289f5bff397d 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -605,7 +605,8 @@ static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr btrfs_stack_header_bytenr(header), logical); return; } - if (memcmp(header->fsid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE) != 0) { + if (memcmp(header->fsid, fs_info->fs_devices->metadata_uuid, + BTRFS_FSID_SIZE) != 0) { bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree); bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree); btrfs_warn_rl(fs_info, diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 2ecb76cf3d91f..6aa9bf3661ac8 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -4638,8 +4638,7 @@ int btrfs_cancel_balance(struct btrfs_fs_info *fs_info) } } - BUG_ON(fs_info->balance_ctl || - test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); + ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); atomic_dec(&fs_info->balance_cancel_req); mutex_unlock(&fs_info->balance_mutex); return 0; diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 9a18c5a69ace6..aaffaaa336cc5 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -472,20 +472,26 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter, return result; } -static void -nfs_direct_join_group(struct list_head *list, struct inode *inode) +static void nfs_direct_join_group(struct list_head *list, struct inode *inode) { - struct nfs_page *req, *next; + struct nfs_page *req, *subreq; list_for_each_entry(req, list, wb_list) { - if (req->wb_head != req || req->wb_this_page == req) + if (req->wb_head != req) continue; - for (next = req->wb_this_page; - next != req->wb_head; - next = next->wb_this_page) { - nfs_list_remove_request(next); - nfs_release_request(next); - } + subreq = req->wb_this_page; + if (subreq == req) + continue; + do { + /* + * Remove subrequests from this list before freeing + * them in the call to nfs_join_page_group(). + */ + if (!list_empty(&subreq->wb_list)) { + nfs_list_remove_request(subreq); + nfs_release_request(subreq); + } + } while ((subreq = subreq->wb_this_page) != req); nfs_join_page_group(req, inode); } } diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index 63802d1955566..49f78e23b34c0 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c @@ -1377,7 +1377,6 @@ ssize_t nfs42_proc_getxattr(struct inode *inode, const char *name, for (i = 0; i < np; i++) { pages[i] = alloc_page(GFP_KERNEL); if (!pages[i]) { - np = i + 1; err = -ENOMEM; goto out; } @@ -1401,8 +1400,8 @@ ssize_t nfs42_proc_getxattr(struct inode *inode, const char *name, } while (exception.retry); out: - while (--np >= 0) - __free_page(pages[np]); + while (--i >= 0) + __free_page(pages[i]); kfree(pages); return err; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index e1a886b58354c..832fa226b8f26 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -6004,9 +6004,8 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, out_ok: ret = res.acl_len; out_free: - for (i = 0; i < npages; i++) - if (pages[i]) - __free_page(pages[i]); + while (--i >= 0) + __free_page(pages[i]); if (res.acl_scratch) __free_page(res.acl_scratch); kfree(pages); @@ -7181,8 +7180,15 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata) } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid)) goto out_restart; break; - case -NFS4ERR_BAD_STATEID: case -NFS4ERR_OLD_STATEID: + if (data->arg.new_lock_owner != 0 && + nfs4_refresh_open_old_stateid(&data->arg.open_stateid, + lsp->ls_state)) + goto out_restart; + if (nfs4_refresh_lock_old_stateid(&data->arg.lock_stateid, lsp)) + goto out_restart; + fallthrough; + case -NFS4ERR_BAD_STATEID: case -NFS4ERR_STALE_STATEID: case -NFS4ERR_EXPIRED: if (data->arg.new_lock_owner != 0) { diff --git a/fs/nfs/sysfs.c b/fs/nfs/sysfs.c index acda8f033d30d..bf378ecd5d9fd 100644 --- a/fs/nfs/sysfs.c +++ b/fs/nfs/sysfs.c @@ -345,8 +345,10 @@ void nfs_sysfs_move_sb_to_server(struct nfs_server *server) int ret = -ENOMEM; s = kasprintf(GFP_KERNEL, "server-%d", server->s_sysfs_id); - if (s) + if (s) { ret = kobject_rename(&server->kobj, s); + kfree(s); + } if (ret < 0) pr_warn("NFS: rename sysfs %s failed (%d)\n", server->kobj.name, ret); diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 3aefbad4cc099..daf305daa7516 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -1354,9 +1354,9 @@ static void revoke_delegation(struct nfs4_delegation *dp) trace_nfsd_stid_revoke(&dp->dl_stid); if (clp->cl_minorversion) { + spin_lock(&clp->cl_lock); dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID; refcount_inc(&dp->dl_stid.sc_count); - spin_lock(&clp->cl_lock); list_add(&dp->dl_recall_lru, &clp->cl_revoked); spin_unlock(&clp->cl_lock); } diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index 1b8b1aab9a156..4302ca0ff6ed5 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -1105,6 +1105,7 @@ static ssize_t write_v4_end_grace(struct file *file, char *buf, size_t size) if (!nn->nfsd_serv) return -EBUSY; trace_nfsd_end_grace(netns(file)); + nfsd4_end_grace(nn); break; default: return -EINVAL; diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 581691e4be491..7ec16879756e8 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -725,6 +725,11 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode, struct folio *folio = fbatch.folios[i]; folio_lock(folio); + if (unlikely(folio->mapping != mapping)) { + /* Exclude folios removed from the address space */ + folio_unlock(folio); + continue; + } head = folio_buffers(folio); if (!head) { create_empty_buffers(&folio->page, i_blocksize(inode), 0); diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 507cd4e59d074..fafff1bd34cd7 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -587,8 +587,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, bool migration = false; if (pmd_present(*pmd)) { - /* FOLL_DUMP will return -EFAULT on huge zero page */ - page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP); + page = vm_normal_page_pmd(vma, addr, *pmd); } else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) { swp_entry_t entry = pmd_to_swp_entry(*pmd); @@ -758,12 +757,14 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask, static const struct mm_walk_ops smaps_walk_ops = { .pmd_entry = smaps_pte_range, .hugetlb_entry = smaps_hugetlb_range, + .walk_lock = PGWALK_RDLOCK, }; static const struct mm_walk_ops smaps_shmem_walk_ops = { .pmd_entry = smaps_pte_range, .hugetlb_entry = smaps_hugetlb_range, .pte_hole = smaps_pte_hole, + .walk_lock = PGWALK_RDLOCK, }; /* @@ -1245,6 +1246,7 @@ static int clear_refs_test_walk(unsigned long start, unsigned long end, static const struct mm_walk_ops clear_refs_walk_ops = { .pmd_entry = clear_refs_pte_range, .test_walk = clear_refs_test_walk, + .walk_lock = PGWALK_WRLOCK, }; static ssize_t clear_refs_write(struct file *file, const char __user *buf, @@ -1622,6 +1624,7 @@ static const struct mm_walk_ops pagemap_ops = { .pmd_entry = pagemap_pmd_range, .pte_hole = pagemap_pte_hole, .hugetlb_entry = pagemap_hugetlb_range, + .walk_lock = PGWALK_RDLOCK, }; /* @@ -1935,6 +1938,7 @@ static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask, static const struct mm_walk_ops show_numa_ops = { .hugetlb_entry = gather_hugetlb_stats, .pmd_entry = gather_pte_stats, + .walk_lock = PGWALK_RDLOCK, }; /* diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c index fb4162a52844a..aec6e91374742 100644 --- a/fs/smb/client/cifs_debug.c +++ b/fs/smb/client/cifs_debug.c @@ -153,6 +153,11 @@ cifs_dump_channel(struct seq_file *m, int i, struct cifs_chan *chan) in_flight(server), atomic_read(&server->in_send), atomic_read(&server->num_waiters)); +#ifdef CONFIG_NET_NS + if (server->net) + seq_printf(m, " Net namespace: %u ", server->net->ns.inum); +#endif /* NET_NS */ + } static inline const char *smb_speed_to_str(size_t bps) @@ -430,10 +435,15 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) server->reconnect_instance, server->srv_count, server->sec_mode, in_flight(server)); +#ifdef CONFIG_NET_NS + if (server->net) + seq_printf(m, " Net namespace: %u ", server->net->ns.inum); +#endif /* NET_NS */ seq_printf(m, "\nIn Send: %d In MaxReq Wait: %d", atomic_read(&server->in_send), atomic_read(&server->num_waiters)); + if (server->leaf_fullpath) { seq_printf(m, "\nDFS leaf full path: %s", server->leaf_fullpath); diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c index fc5acc95cd13f..6bc44f79d2e90 100644 --- a/fs/smb/client/file.c +++ b/fs/smb/client/file.c @@ -4681,9 +4681,9 @@ static int cifs_readpage_worker(struct file *file, struct page *page, io_error: kunmap(page); - unlock_page(page); read_complete: + unlock_page(page); return rc; } @@ -4878,9 +4878,11 @@ void cifs_oplock_break(struct work_struct *work) struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo, oplock_break); struct inode *inode = d_inode(cfile->dentry); + struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct cifsInodeInfo *cinode = CIFS_I(inode); - struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); - struct TCP_Server_Info *server = tcon->ses->server; + struct cifs_tcon *tcon; + struct TCP_Server_Info *server; + struct tcon_link *tlink; int rc = 0; bool purge_cache = false, oplock_break_cancelled; __u64 persistent_fid, volatile_fid; @@ -4889,6 +4891,12 @@ void cifs_oplock_break(struct work_struct *work) wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS, TASK_UNINTERRUPTIBLE); + tlink = cifs_sb_tlink(cifs_sb); + if (IS_ERR(tlink)) + goto out; + tcon = tlink_tcon(tlink); + server = tcon->ses->server; + server->ops->downgrade_oplock(server, cinode, cfile->oplock_level, cfile->oplock_epoch, &purge_cache); @@ -4938,18 +4946,19 @@ void cifs_oplock_break(struct work_struct *work) /* * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require * an acknowledgment to be sent when the file has already been closed. - * check for server null, since can race with kill_sb calling tree disconnect. */ spin_lock(&cinode->open_file_lock); - if (tcon->ses && tcon->ses->server && !oplock_break_cancelled && - !list_empty(&cinode->openFileList)) { + /* check list empty since can race with kill_sb calling tree disconnect */ + if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) { spin_unlock(&cinode->open_file_lock); - rc = tcon->ses->server->ops->oplock_response(tcon, persistent_fid, - volatile_fid, net_fid, cinode); + rc = server->ops->oplock_response(tcon, persistent_fid, + volatile_fid, net_fid, cinode); cifs_dbg(FYI, "Oplock release rc = %d\n", rc); } else spin_unlock(&cinode->open_file_lock); + cifs_put_tlink(tlink); +out: cifs_done_oplock_break(cinode); } diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c index 4946a0c596009..67e16c2ac90e6 100644 --- a/fs/smb/client/fs_context.c +++ b/fs/smb/client/fs_context.c @@ -231,6 +231,8 @@ cifs_parse_security_flavors(struct fs_context *fc, char *value, struct smb3_fs_c break; case Opt_sec_none: ctx->nullauth = 1; + kfree(ctx->username); + ctx->username = NULL; break; default: cifs_errorf(fc, "bad security option: %s\n", value); @@ -1201,6 +1203,8 @@ static int smb3_fs_context_parse_param(struct fs_context *fc, case Opt_user: kfree(ctx->username); ctx->username = NULL; + if (ctx->nullauth) + break; if (strlen(param->string) == 0) { /* null user, ie. anonymous authentication */ ctx->nullauth = 1; diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h index 02f2ac4dd2df6..e69cece404b3c 100644 --- a/include/drm/display/drm_dp.h +++ b/include/drm/display/drm_dp.h @@ -1537,7 +1537,7 @@ enum drm_dp_phy { #define DP_BRANCH_OUI_HEADER_SIZE 0xc #define DP_RECEIVER_CAP_SIZE 0xf -#define DP_DSC_RECEIVER_CAP_SIZE 0xf +#define DP_DSC_RECEIVER_CAP_SIZE 0x10 /* DSC Capabilities 0x60 through 0x6F */ #define EDP_PSR_RECEIVER_CAP_SIZE 2 #define EDP_DISPLAY_CTL_CAP_SIZE 3 #define DP_LTTPR_COMMON_CAP_SIZE 8 diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index 169755d3de190..48e93f909ef6f 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -61,15 +61,9 @@ struct std_timing { u8 vfreq_aspect; } __attribute__((packed)); -#define DRM_EDID_PT_SYNC_MASK (3 << 3) -# define DRM_EDID_PT_ANALOG_CSYNC (0 << 3) -# define DRM_EDID_PT_BIPOLAR_ANALOG_CSYNC (1 << 3) -# define DRM_EDID_PT_DIGITAL_CSYNC (2 << 3) -# define DRM_EDID_PT_CSYNC_ON_RGB (1 << 1) /* analog csync only */ -# define DRM_EDID_PT_CSYNC_SERRATE (1 << 2) -# define DRM_EDID_PT_DIGITAL_SEPARATE_SYNC (3 << 3) -# define DRM_EDID_PT_HSYNC_POSITIVE (1 << 1) /* also digital csync */ -# define DRM_EDID_PT_VSYNC_POSITIVE (1 << 2) +#define DRM_EDID_PT_HSYNC_POSITIVE (1 << 1) +#define DRM_EDID_PT_VSYNC_POSITIVE (1 << 2) +#define DRM_EDID_PT_SEPARATE_SYNC (3 << 3) #define DRM_EDID_PT_STEREO (1 << 5) #define DRM_EDID_PT_INTERLACED (1 << 7) diff --git a/include/drm/drm_probe_helper.h b/include/drm/drm_probe_helper.h index 4977e0ab72dbb..fad3c4003b2b5 100644 --- a/include/drm/drm_probe_helper.h +++ b/include/drm/drm_probe_helper.h @@ -25,6 +25,7 @@ void drm_kms_helper_connector_hotplug_event(struct drm_connector *connector); void drm_kms_helper_poll_disable(struct drm_device *dev); void drm_kms_helper_poll_enable(struct drm_device *dev); +void drm_kms_helper_poll_reschedule(struct drm_device *dev); bool drm_kms_helper_is_poll_worker(void); enum drm_mode_status drm_crtc_helper_mode_valid_fixed(struct drm_crtc *crtc, diff --git a/include/linux/clk.h b/include/linux/clk.h index 1ef0133242374..06f1b292f8a00 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -183,6 +183,39 @@ int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale); */ bool clk_is_match(const struct clk *p, const struct clk *q); +/** + * clk_rate_exclusive_get - get exclusivity over the rate control of a + * producer + * @clk: clock source + * + * This function allows drivers to get exclusive control over the rate of a + * provider. It prevents any other consumer to execute, even indirectly, + * opereation which could alter the rate of the provider or cause glitches + * + * If exlusivity is claimed more than once on clock, even by the same driver, + * the rate effectively gets locked as exclusivity can't be preempted. + * + * Must not be called from within atomic context. + * + * Returns success (0) or negative errno. + */ +int clk_rate_exclusive_get(struct clk *clk); + +/** + * clk_rate_exclusive_put - release exclusivity over the rate control of a + * producer + * @clk: clock source + * + * This function allows drivers to release the exclusivity it previously got + * from clk_rate_exclusive_get() + * + * The caller must balance the number of clk_rate_exclusive_get() and + * clk_rate_exclusive_put() calls. + * + * Must not be called from within atomic context. + */ +void clk_rate_exclusive_put(struct clk *clk); + #else static inline int clk_notifier_register(struct clk *clk, @@ -236,6 +269,13 @@ static inline bool clk_is_match(const struct clk *p, const struct clk *q) return p == q; } +static inline int clk_rate_exclusive_get(struct clk *clk) +{ + return 0; +} + +static inline void clk_rate_exclusive_put(struct clk *clk) {} + #endif #ifdef CONFIG_HAVE_CLK_PREPARE @@ -583,38 +623,6 @@ struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id); */ struct clk *devm_get_clk_from_child(struct device *dev, struct device_node *np, const char *con_id); -/** - * clk_rate_exclusive_get - get exclusivity over the rate control of a - * producer - * @clk: clock source - * - * This function allows drivers to get exclusive control over the rate of a - * provider. It prevents any other consumer to execute, even indirectly, - * opereation which could alter the rate of the provider or cause glitches - * - * If exlusivity is claimed more than once on clock, even by the same driver, - * the rate effectively gets locked as exclusivity can't be preempted. - * - * Must not be called from within atomic context. - * - * Returns success (0) or negative errno. - */ -int clk_rate_exclusive_get(struct clk *clk); - -/** - * clk_rate_exclusive_put - release exclusivity over the rate control of a - * producer - * @clk: clock source - * - * This function allows drivers to release the exclusivity it previously got - * from clk_rate_exclusive_get() - * - * The caller must balance the number of clk_rate_exclusive_get() and - * clk_rate_exclusive_put() calls. - * - * Must not be called from within atomic context. - */ -void clk_rate_exclusive_put(struct clk *clk); /** * clk_enable - inform the system when the clock source should be running. @@ -974,14 +982,6 @@ static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {} static inline void devm_clk_put(struct device *dev, struct clk *clk) {} - -static inline int clk_rate_exclusive_get(struct clk *clk) -{ - return 0; -} - -static inline void clk_rate_exclusive_put(struct clk *clk) {} - static inline int clk_enable(struct clk *clk) { return 0; diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 20284387b841b..e718dbe928bae 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -25,9 +25,6 @@ static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) #endif vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf); -struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, - unsigned long addr, pmd_t *pmd, - unsigned int flags); bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long next); int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, diff --git a/include/linux/mm.h b/include/linux/mm.h index 406ab9ea818fe..34f9dba17c1a7 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3421,15 +3421,24 @@ static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags) * Indicates whether GUP can follow a PROT_NONE mapped page, or whether * a (NUMA hinting) fault is required. */ -static inline bool gup_can_follow_protnone(unsigned int flags) +static inline bool gup_can_follow_protnone(struct vm_area_struct *vma, + unsigned int flags) { /* - * FOLL_FORCE has to be able to make progress even if the VMA is - * inaccessible. Further, FOLL_FORCE access usually does not represent - * application behaviour and we should avoid triggering NUMA hinting - * faults. + * If callers don't want to honor NUMA hinting faults, no need to + * determine if we would actually have to trigger a NUMA hinting fault. */ - return flags & FOLL_FORCE; + if (!(flags & FOLL_HONOR_NUMA_FAULT)) + return true; + + /* + * NUMA hinting faults don't apply in inaccessible (PROT_NONE) VMAs. + * + * Requiring a fault here even for inaccessible VMAs would mean that + * FOLL_FORCE cannot make any progress, because handle_mm_fault() + * refuses to process NUMA hinting faults in inaccessible VMAs. + */ + return !vma_is_accessible(vma); } typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 5e74ce4a28cd6..7d30dc4ff0ff1 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -1286,6 +1286,15 @@ enum { FOLL_PCI_P2PDMA = 1 << 10, /* allow interrupts from generic signals */ FOLL_INTERRUPTIBLE = 1 << 11, + /* + * Always honor (trigger) NUMA hinting faults. + * + * FOLL_WRITE implicitly honors NUMA hinting faults because a + * PROT_NONE-mapped page is not writable (exceptions with FOLL_FORCE + * apply). get_user_pages_fast_only() always implicitly honors NUMA + * hinting faults. + */ + FOLL_HONOR_NUMA_FAULT = 1 << 12, /* See also internal only FOLL flags in mm/internal.h */ }; diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h index 27a6df448ee56..27cd1e59ccf77 100644 --- a/include/linux/pagewalk.h +++ b/include/linux/pagewalk.h @@ -6,6 +6,16 @@ struct mm_walk; +/* Locking requirement during a page walk. */ +enum page_walk_lock { + /* mmap_lock should be locked for read to stabilize the vma tree */ + PGWALK_RDLOCK = 0, + /* vma will be write-locked during the walk */ + PGWALK_WRLOCK = 1, + /* vma is expected to be already write-locked during the walk */ + PGWALK_WRLOCK_VERIFY = 2, +}; + /** * struct mm_walk_ops - callbacks for walk_page_range * @pgd_entry: if set, called for each non-empty PGD (top-level) entry @@ -66,6 +76,7 @@ struct mm_walk_ops { int (*pre_vma)(unsigned long start, unsigned long end, struct mm_walk *walk); void (*post_vma)(struct mm_walk *walk); + enum page_walk_lock walk_lock; }; /* diff --git a/include/linux/raid_class.h b/include/linux/raid_class.h index 6a9b177d5c414..e50416ba9cd93 100644 --- a/include/linux/raid_class.h +++ b/include/linux/raid_class.h @@ -77,7 +77,3 @@ DEFINE_RAID_ATTRIBUTE(enum raid_state, state) struct raid_template *raid_class_attach(struct raid_function_template *); void raid_class_release(struct raid_template *); - -int __must_check raid_component_add(struct raid_template *, struct device *, - struct device *); - diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 6d58c57acdaa1..a156d2ed8d9ee 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -459,7 +459,8 @@ struct uart_port { struct serial_rs485 *rs485); int (*iso7816_config)(struct uart_port *, struct serial_iso7816 *iso7816); - int ctrl_id; /* optional serial core controller id */ + unsigned int ctrl_id; /* optional serial core controller id */ + unsigned int port_id; /* optional serial core port id */ unsigned int irq; /* irq number */ unsigned long irqflags; /* irq flags */ unsigned int uartclk; /* base uart clock */ diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 3930e676436c9..1e8bbdb8da905 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -59,6 +59,17 @@ int trace_raw_output_prep(struct trace_iterator *iter, extern __printf(2, 3) void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...); +/* Used to find the offset and length of dynamic fields in trace events */ +struct trace_dynamic_info { +#ifdef CONFIG_CPU_BIG_ENDIAN + u16 offset; + u16 len; +#else + u16 len; + u16 offset; +#endif +}; + /* * The trace entry - the most basic unit of tracing. This is what * is printed in the end as a single line in the trace output, such as: diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index bdf8de2cdd935..7b4dd69555e49 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -155,6 +155,10 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, if (gso_type & SKB_GSO_UDP) nh_off -= thlen; + /* Kernel has a special handling for GSO_BY_FRAGS. */ + if (gso_size == GSO_BY_FRAGS) + return -EINVAL; + /* Too small packets are not really GSO ones. */ if (skb->len - nh_off > gso_size) { shinfo->gso_size = gso_size; diff --git a/include/net/bonding.h b/include/net/bonding.h index 30ac427cf0c61..5b8b1b644a2db 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -722,23 +722,14 @@ static inline struct slave *bond_slave_has_mac(struct bonding *bond, } /* Caller must hold rcu_read_lock() for read */ -static inline bool bond_slave_has_mac_rx(struct bonding *bond, const u8 *mac) +static inline bool bond_slave_has_mac_rcu(struct bonding *bond, const u8 *mac) { struct list_head *iter; struct slave *tmp; - struct netdev_hw_addr *ha; bond_for_each_slave_rcu(bond, tmp, iter) if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr)) return true; - - if (netdev_uc_empty(bond->dev)) - return false; - - netdev_for_each_uc_addr(ha, bond->dev) - if (ether_addr_equal_64bits(mac, ha->addr)) - return true; - return false; } diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index 0bb32bfc61832..491ceb7ebe5d1 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -222,8 +222,8 @@ struct inet_sock { __s16 uc_ttl; __u16 cmsg_flags; struct ip_options_rcu __rcu *inet_opt; + atomic_t inet_id; __be16 inet_sport; - __u16 inet_id; __u8 tos; __u8 min_ttl; diff --git a/include/net/ip.h b/include/net/ip.h index 332521170d9b8..19adacd5ece03 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -538,8 +538,19 @@ static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb, * generator as much as we can. */ if (sk && inet_sk(sk)->inet_daddr) { - iph->id = htons(inet_sk(sk)->inet_id); - inet_sk(sk)->inet_id += segs; + int val; + + /* avoid atomic operations for TCP, + * as we hold socket lock at this point. + */ + if (sk_is_tcp(sk)) { + sock_owned_by_me(sk); + val = atomic_read(&inet_sk(sk)->inet_id); + atomic_set(&inet_sk(sk)->inet_id, val + segs); + } else { + val = atomic_add_return(segs, &inet_sk(sk)->inet_id); + } + iph->id = htons(val); return; } if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) { diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 3a8a2d2c58c38..2a55ae932c568 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -6612,6 +6612,7 @@ void ieee80211_stop_rx_ba_session(struct ieee80211_vif *vif, u16 ba_rx_bitmap, * marks frames marked in the bitmap as having been filtered. Afterwards, it * checks if any frames in the window starting from @ssn can now be released * (in case they were only waiting for frames that were filtered.) + * (Only work correctly if @max_rx_aggregation_subframes <= 64 frames) */ void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid, u16 ssn, u64 filtered, diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 35870858ddf26..dd40c75011d25 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -534,6 +534,7 @@ struct nft_set_elem_expr { * @expr: stateful expression * @ops: set ops * @flags: set flags + * @dead: set will be freed, never cleared * @genmask: generation mask * @klen: key length * @dlen: data length @@ -586,6 +587,11 @@ static inline void *nft_set_priv(const struct nft_set *set) return (void *)set->data; } +static inline bool nft_set_gc_is_pending(const struct nft_set *s) +{ + return refcount_read(&s->refs) != 1; +} + static inline struct nft_set *nft_set_container_of(const void *priv) { return (void *)priv - offsetof(struct nft_set, data); @@ -1728,6 +1734,7 @@ struct nftables_pernet { u64 table_handle; unsigned int base_seq; unsigned int gc_seq; + u8 validate_state; }; extern unsigned int nf_tables_net_id; diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index d9076a7a430c2..6506221c5fe31 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h @@ -190,8 +190,8 @@ int rtnl_delete_link(struct net_device *dev, u32 portid, const struct nlmsghdr * int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm, u32 portid, const struct nlmsghdr *nlh); -int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len, - struct netlink_ext_ack *exterr); +int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer, + struct netlink_ext_ack *exterr); struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid); #define MODULE_ALIAS_RTNL_LINK(kind) MODULE_ALIAS("rtnl-link-" kind) diff --git a/include/net/sock.h b/include/net/sock.h index 2eb916d1ff648..690e22139543f 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1323,6 +1323,7 @@ struct proto { /* * Pressure flag: try to collapse. * Technical note: it is used by multiple contexts non atomically. + * Make sure to use READ_ONCE()/WRITE_ONCE() for all reads/writes. * All the __sk_mem_schedule() is of this nature: accounting * is strict, actions are advisory and have some latency. */ @@ -1420,6 +1421,12 @@ static inline bool sk_has_memory_pressure(const struct sock *sk) return sk->sk_prot->memory_pressure != NULL; } +static inline bool sk_under_global_memory_pressure(const struct sock *sk) +{ + return sk->sk_prot->memory_pressure && + !!READ_ONCE(*sk->sk_prot->memory_pressure); +} + static inline bool sk_under_memory_pressure(const struct sock *sk) { if (!sk->sk_prot->memory_pressure) @@ -1429,7 +1436,7 @@ static inline bool sk_under_memory_pressure(const struct sock *sk) mem_cgroup_under_socket_pressure(sk->sk_memcg)) return true; - return !!*sk->sk_prot->memory_pressure; + return !!READ_ONCE(*sk->sk_prot->memory_pressure); } static inline long @@ -1506,7 +1513,7 @@ proto_memory_pressure(struct proto *prot) { if (!prot->memory_pressure) return false; - return !!*prot->memory_pressure; + return !!READ_ONCE(*prot->memory_pressure); } diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 151ca95dd08db..363c7d5105542 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -1984,6 +1984,7 @@ static inline void xfrm_dev_state_free(struct xfrm_state *x) if (dev->xfrmdev_ops->xdo_dev_state_free) dev->xfrmdev_ops->xdo_dev_state_free(x); xso->dev = NULL; + xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED; netdev_put(dev, &xso->dev_tracker); } } diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h index 0c8cf359ea5b4..e0e1591383312 100644 --- a/include/uapi/linux/elf.h +++ b/include/uapi/linux/elf.h @@ -443,7 +443,6 @@ typedef struct elf64_shdr { #define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */ #define NT_MIPS_FP_MODE 0x801 /* MIPS floating-point mode */ #define NT_MIPS_MSA 0x802 /* MIPS SIMD registers */ -#define NT_RISCV_VECTOR 0x900 /* RISC-V vector registers */ #define NT_LOONGARCH_CPUCFG 0xa00 /* LoongArch CPU config registers */ #define NT_LOONGARCH_CSR 0xa01 /* LoongArch control and status registers */ #define NT_LOONGARCH_LSX 0xa02 /* LoongArch Loongson SIMD Extension registers */ diff --git a/include/video/kyro.h b/include/video/kyro.h index b958c2e9c9158..418eef6c55231 100644 --- a/include/video/kyro.h +++ b/include/video/kyro.h @@ -38,18 +38,6 @@ struct kyrofb_info { int wc_cookie; }; -extern int kyro_dev_init(void); -extern void kyro_dev_reset(void); - -extern unsigned char *kyro_dev_physical_fb_ptr(void); -extern unsigned char *kyro_dev_virtual_fb_ptr(void); -extern void *kyro_dev_physical_regs_ptr(void); -extern void *kyro_dev_virtual_regs_ptr(void); -extern unsigned int kyro_dev_fb_size(void); -extern unsigned int kyro_dev_regs_size(void); - -extern u32 kyro_dev_overlay_offset(void); - /* * benedict.gaster@superh.com * Added the follow IOCTLS for the creation of overlay services... diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index edec335c0a7ac..5f2c66860ac64 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c @@ -68,11 +68,16 @@ static int irq_sw_resend(struct irq_desc *desc) */ if (!desc->parent_irq) return -EINVAL; + + desc = irq_to_desc(desc->parent_irq); + if (!desc) + return -EINVAL; } /* Add to resend_list and activate the softirq: */ raw_spin_lock(&irq_resend_lock); - hlist_add_head(&desc->resend_node, &irq_resend_list); + if (hlist_unhashed(&desc->resend_node)) + hlist_add_head(&desc->resend_node, &irq_resend_list); raw_spin_unlock(&irq_resend_lock); tasklet_schedule(&resend_tasklet); return 0; diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index b8870078ef58d..8e64aaad53619 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -4213,8 +4213,15 @@ static void *s_start(struct seq_file *m, loff_t *pos) * will point to the same string as current_trace->name. */ mutex_lock(&trace_types_lock); - if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) + if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) { + /* Close iter->trace before switching to the new current tracer */ + if (iter->trace->close) + iter->trace->close(iter); *iter->trace = *tr->current_trace; + /* Reopen the new current tracer */ + if (iter->trace->open) + iter->trace->open(iter); + } mutex_unlock(&trace_types_lock); #ifdef CONFIG_TRACER_MAX_TRACE @@ -5277,11 +5284,17 @@ int tracing_set_cpumask(struct trace_array *tr, !cpumask_test_cpu(cpu, tracing_cpumask_new)) { atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu); +#ifdef CONFIG_TRACER_MAX_TRACE + ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu); +#endif } if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && cpumask_test_cpu(cpu, tracing_cpumask_new)) { atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu); +#ifdef CONFIG_TRACER_MAX_TRACE + ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu); +#endif } } arch_spin_unlock(&tr->max_lock); @@ -6705,10 +6718,36 @@ tracing_max_lat_write(struct file *filp, const char __user *ubuf, #endif +static int open_pipe_on_cpu(struct trace_array *tr, int cpu) +{ + if (cpu == RING_BUFFER_ALL_CPUS) { + if (cpumask_empty(tr->pipe_cpumask)) { + cpumask_setall(tr->pipe_cpumask); + return 0; + } + } else if (!cpumask_test_cpu(cpu, tr->pipe_cpumask)) { + cpumask_set_cpu(cpu, tr->pipe_cpumask); + return 0; + } + return -EBUSY; +} + +static void close_pipe_on_cpu(struct trace_array *tr, int cpu) +{ + if (cpu == RING_BUFFER_ALL_CPUS) { + WARN_ON(!cpumask_full(tr->pipe_cpumask)); + cpumask_clear(tr->pipe_cpumask); + } else { + WARN_ON(!cpumask_test_cpu(cpu, tr->pipe_cpumask)); + cpumask_clear_cpu(cpu, tr->pipe_cpumask); + } +} + static int tracing_open_pipe(struct inode *inode, struct file *filp) { struct trace_array *tr = inode->i_private; struct trace_iterator *iter; + int cpu; int ret; ret = tracing_check_open_get_tr(tr); @@ -6716,13 +6755,16 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) return ret; mutex_lock(&trace_types_lock); + cpu = tracing_get_cpu(inode); + ret = open_pipe_on_cpu(tr, cpu); + if (ret) + goto fail_pipe_on_cpu; /* create a buffer to store the information to pass to userspace */ iter = kzalloc(sizeof(*iter), GFP_KERNEL); if (!iter) { ret = -ENOMEM; - __trace_array_put(tr); - goto out; + goto fail_alloc_iter; } trace_seq_init(&iter->seq); @@ -6745,7 +6787,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) iter->tr = tr; iter->array_buffer = &tr->array_buffer; - iter->cpu_file = tracing_get_cpu(inode); + iter->cpu_file = cpu; mutex_init(&iter->mutex); filp->private_data = iter; @@ -6755,12 +6797,15 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) nonseekable_open(inode, filp); tr->trace_ref++; -out: + mutex_unlock(&trace_types_lock); return ret; fail: kfree(iter); +fail_alloc_iter: + close_pipe_on_cpu(tr, cpu); +fail_pipe_on_cpu: __trace_array_put(tr); mutex_unlock(&trace_types_lock); return ret; @@ -6777,7 +6822,7 @@ static int tracing_release_pipe(struct inode *inode, struct file *file) if (iter->trace->pipe_close) iter->trace->pipe_close(iter); - + close_pipe_on_cpu(tr, iter->cpu_file); mutex_unlock(&trace_types_lock); free_cpumask_var(iter->started); @@ -9441,6 +9486,9 @@ static struct trace_array *trace_array_create(const char *name) if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL)) goto out_free_tr; + if (!alloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL)) + goto out_free_tr; + tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS; cpumask_copy(tr->tracing_cpumask, cpu_all_mask); @@ -9482,6 +9530,7 @@ static struct trace_array *trace_array_create(const char *name) out_free_tr: ftrace_free_ftrace_ops(tr); free_trace_buffers(tr); + free_cpumask_var(tr->pipe_cpumask); free_cpumask_var(tr->tracing_cpumask); kfree(tr->name); kfree(tr); @@ -9584,6 +9633,7 @@ static int __remove_instance(struct trace_array *tr) } kfree(tr->topts); + free_cpumask_var(tr->pipe_cpumask); free_cpumask_var(tr->tracing_cpumask); kfree(tr->name); kfree(tr); @@ -10381,12 +10431,14 @@ __init static int tracer_alloc_buffers(void) if (trace_create_savedcmd() < 0) goto out_free_temp_buffer; + if (!alloc_cpumask_var(&global_trace.pipe_cpumask, GFP_KERNEL)) + goto out_free_savedcmd; + /* TODO: make the number of buffers hot pluggable with CPUS */ if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) { MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n"); - goto out_free_savedcmd; + goto out_free_pipe_cpumask; } - if (global_trace.buffer_disabled) tracing_off(); @@ -10439,6 +10491,8 @@ __init static int tracer_alloc_buffers(void) return 0; +out_free_pipe_cpumask: + free_cpumask_var(global_trace.pipe_cpumask); out_free_savedcmd: free_saved_cmdlines_buffer(savedcmd); out_free_temp_buffer: diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index e1edc2197fc89..73eaec158473e 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -377,6 +377,8 @@ struct trace_array { struct list_head events; struct trace_event_file *trace_marker_file; cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ + /* one per_cpu trace_pipe can be opened by only one user */ + cpumask_var_t pipe_cpumask; int ref; int trace_ref; #ifdef CONFIG_FUNCTION_TRACER @@ -1295,6 +1297,14 @@ static inline void trace_branch_disable(void) /* set ring buffers to default size if not already done so */ int tracing_update_buffers(void); +union trace_synth_field { + u8 as_u8; + u16 as_u16; + u32 as_u32; + u64 as_u64; + struct trace_dynamic_info as_dynamic; +}; + struct ftrace_event_field { struct list_head link; const char *name; diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c index dd398afc8e25c..9897d0bfcab71 100644 --- a/kernel/trace/trace_events_synth.c +++ b/kernel/trace/trace_events_synth.c @@ -127,7 +127,7 @@ static bool synth_event_match(const char *system, const char *event, struct synth_trace_event { struct trace_entry ent; - u64 fields[]; + union trace_synth_field fields[]; }; static int synth_event_define_fields(struct trace_event_call *call) @@ -321,19 +321,19 @@ static const char *synth_field_fmt(char *type) static void print_synth_event_num_val(struct trace_seq *s, char *print_fmt, char *name, - int size, u64 val, char *space) + int size, union trace_synth_field *val, char *space) { switch (size) { case 1: - trace_seq_printf(s, print_fmt, name, (u8)val, space); + trace_seq_printf(s, print_fmt, name, val->as_u8, space); break; case 2: - trace_seq_printf(s, print_fmt, name, (u16)val, space); + trace_seq_printf(s, print_fmt, name, val->as_u16, space); break; case 4: - trace_seq_printf(s, print_fmt, name, (u32)val, space); + trace_seq_printf(s, print_fmt, name, val->as_u32, space); break; default: @@ -350,7 +350,7 @@ static enum print_line_t print_synth_event(struct trace_iterator *iter, struct trace_seq *s = &iter->seq; struct synth_trace_event *entry; struct synth_event *se; - unsigned int i, n_u64; + unsigned int i, j, n_u64; char print_fmt[32]; const char *fmt; @@ -374,43 +374,28 @@ static enum print_line_t print_synth_event(struct trace_iterator *iter, /* parameter values */ if (se->fields[i]->is_string) { if (se->fields[i]->is_dynamic) { - u32 offset, data_offset; - char *str_field; - - offset = (u32)entry->fields[n_u64]; - data_offset = offset & 0xffff; - - str_field = (char *)entry + data_offset; + union trace_synth_field *data = &entry->fields[n_u64]; trace_seq_printf(s, print_fmt, se->fields[i]->name, STR_VAR_LEN_MAX, - str_field, + (char *)entry + data->as_dynamic.offset, i == se->n_fields - 1 ? "" : " "); n_u64++; } else { trace_seq_printf(s, print_fmt, se->fields[i]->name, STR_VAR_LEN_MAX, - (char *)&entry->fields[n_u64], + (char *)&entry->fields[n_u64].as_u64, i == se->n_fields - 1 ? "" : " "); n_u64 += STR_VAR_LEN_MAX / sizeof(u64); } } else if (se->fields[i]->is_stack) { - u32 offset, data_offset, len; - unsigned long *p, *end; - - offset = (u32)entry->fields[n_u64]; - data_offset = offset & 0xffff; - len = offset >> 16; - - p = (void *)entry + data_offset; - end = (void *)p + len - (sizeof(long) - 1); + union trace_synth_field *data = &entry->fields[n_u64]; + unsigned long *p = (void *)entry + data->as_dynamic.offset; trace_seq_printf(s, "%s=STACK:\n", se->fields[i]->name); - - for (; *p && p < end; p++) - trace_seq_printf(s, "=> %pS\n", (void *)*p); + for (j = 1; j < data->as_dynamic.len / sizeof(long); j++) + trace_seq_printf(s, "=> %pS\n", (void *)p[j]); n_u64++; - } else { struct trace_print_flags __flags[] = { __def_gfpflag_names, {-1, NULL} }; @@ -419,13 +404,13 @@ static enum print_line_t print_synth_event(struct trace_iterator *iter, print_synth_event_num_val(s, print_fmt, se->fields[i]->name, se->fields[i]->size, - entry->fields[n_u64], + &entry->fields[n_u64], space); if (strcmp(se->fields[i]->type, "gfp_t") == 0) { trace_seq_puts(s, " ("); trace_print_flags_seq(s, "|", - entry->fields[n_u64], + entry->fields[n_u64].as_u64, __flags); trace_seq_putc(s, ')'); } @@ -454,21 +439,16 @@ static unsigned int trace_string(struct synth_trace_event *entry, int ret; if (is_dynamic) { - u32 data_offset; - - data_offset = struct_size(entry, fields, event->n_u64); - data_offset += data_size; - - len = fetch_store_strlen((unsigned long)str_val); + union trace_synth_field *data = &entry->fields[*n_u64]; - data_offset |= len << 16; - *(u32 *)&entry->fields[*n_u64] = data_offset; + data->as_dynamic.offset = struct_size(entry, fields, event->n_u64) + data_size; + data->as_dynamic.len = fetch_store_strlen((unsigned long)str_val); ret = fetch_store_string((unsigned long)str_val, &entry->fields[*n_u64], entry); (*n_u64)++; } else { - str_field = (char *)&entry->fields[*n_u64]; + str_field = (char *)&entry->fields[*n_u64].as_u64; #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE if ((unsigned long)str_val < TASK_SIZE) @@ -492,6 +472,7 @@ static unsigned int trace_stack(struct synth_trace_event *entry, unsigned int data_size, unsigned int *n_u64) { + union trace_synth_field *data = &entry->fields[*n_u64]; unsigned int len; u32 data_offset; void *data_loc; @@ -504,10 +485,6 @@ static unsigned int trace_stack(struct synth_trace_event *entry, break; } - /* Include the zero'd element if it fits */ - if (len < HIST_STACKTRACE_DEPTH) - len++; - len *= sizeof(long); /* Find the dynamic section to copy the stack into. */ @@ -515,8 +492,9 @@ static unsigned int trace_stack(struct synth_trace_event *entry, memcpy(data_loc, stack, len); /* Fill in the field that holds the offset/len combo */ - data_offset |= len << 16; - *(u32 *)&entry->fields[*n_u64] = data_offset; + + data->as_dynamic.offset = data_offset; + data->as_dynamic.len = len; (*n_u64)++; @@ -550,7 +528,8 @@ static notrace void trace_event_raw_event_synth(void *__data, str_val = (char *)(long)var_ref_vals[val_idx]; if (event->dynamic_fields[i]->is_stack) { - len = *((unsigned long *)str_val); + /* reserve one extra element for size */ + len = *((unsigned long *)str_val) + 1; len *= sizeof(unsigned long); } else { len = fetch_store_strlen((unsigned long)str_val); @@ -592,19 +571,19 @@ static notrace void trace_event_raw_event_synth(void *__data, switch (field->size) { case 1: - *(u8 *)&entry->fields[n_u64] = (u8)val; + entry->fields[n_u64].as_u8 = (u8)val; break; case 2: - *(u16 *)&entry->fields[n_u64] = (u16)val; + entry->fields[n_u64].as_u16 = (u16)val; break; case 4: - *(u32 *)&entry->fields[n_u64] = (u32)val; + entry->fields[n_u64].as_u32 = (u32)val; break; default: - entry->fields[n_u64] = val; + entry->fields[n_u64].as_u64 = val; break; } n_u64++; @@ -1791,19 +1770,19 @@ int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...) switch (field->size) { case 1: - *(u8 *)&state.entry->fields[n_u64] = (u8)val; + state.entry->fields[n_u64].as_u8 = (u8)val; break; case 2: - *(u16 *)&state.entry->fields[n_u64] = (u16)val; + state.entry->fields[n_u64].as_u16 = (u16)val; break; case 4: - *(u32 *)&state.entry->fields[n_u64] = (u32)val; + state.entry->fields[n_u64].as_u32 = (u32)val; break; default: - state.entry->fields[n_u64] = val; + state.entry->fields[n_u64].as_u64 = val; break; } n_u64++; @@ -1884,19 +1863,19 @@ int synth_event_trace_array(struct trace_event_file *file, u64 *vals, switch (field->size) { case 1: - *(u8 *)&state.entry->fields[n_u64] = (u8)val; + state.entry->fields[n_u64].as_u8 = (u8)val; break; case 2: - *(u16 *)&state.entry->fields[n_u64] = (u16)val; + state.entry->fields[n_u64].as_u16 = (u16)val; break; case 4: - *(u32 *)&state.entry->fields[n_u64] = (u32)val; + state.entry->fields[n_u64].as_u32 = (u32)val; break; default: - state.entry->fields[n_u64] = val; + state.entry->fields[n_u64].as_u64 = val; break; } n_u64++; @@ -2031,19 +2010,19 @@ static int __synth_event_add_val(const char *field_name, u64 val, } else { switch (field->size) { case 1: - *(u8 *)&trace_state->entry->fields[field->offset] = (u8)val; + trace_state->entry->fields[field->offset].as_u8 = (u8)val; break; case 2: - *(u16 *)&trace_state->entry->fields[field->offset] = (u16)val; + trace_state->entry->fields[field->offset].as_u16 = (u16)val; break; case 4: - *(u32 *)&trace_state->entry->fields[field->offset] = (u32)val; + trace_state->entry->fields[field->offset].as_u32 = (u32)val; break; default: - trace_state->entry->fields[field->offset] = val; + trace_state->entry->fields[field->offset].as_u64 = val; break; } } diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 590b3d51afae9..ba37f768e2f27 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -231,7 +231,8 @@ static void irqsoff_trace_open(struct trace_iterator *iter) { if (is_graph(iter->tr)) graph_trace_open(iter); - + else + iter->private = NULL; } static void irqsoff_trace_close(struct trace_iterator *iter) diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 330aee1c1a49e..0469a04a355f2 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -168,6 +168,8 @@ static void wakeup_trace_open(struct trace_iterator *iter) { if (is_graph(iter->tr)) graph_trace_open(iter); + else + iter->private = NULL; } static void wakeup_trace_close(struct trace_iterator *iter) diff --git a/lib/clz_ctz.c b/lib/clz_ctz.c index 0d3a686b5ba29..fb8c0c5c2bd27 100644 --- a/lib/clz_ctz.c +++ b/lib/clz_ctz.c @@ -28,36 +28,16 @@ int __weak __clzsi2(int val) } EXPORT_SYMBOL(__clzsi2); -int __weak __clzdi2(long val); -int __weak __ctzdi2(long val); -#if BITS_PER_LONG == 32 - -int __weak __clzdi2(long val) +int __weak __clzdi2(u64 val); +int __weak __clzdi2(u64 val) { - return 32 - fls((int)val); + return 64 - fls64(val); } EXPORT_SYMBOL(__clzdi2); -int __weak __ctzdi2(long val) +int __weak __ctzdi2(u64 val); +int __weak __ctzdi2(u64 val) { - return __ffs((u32)val); + return __ffs64(val); } EXPORT_SYMBOL(__ctzdi2); - -#elif BITS_PER_LONG == 64 - -int __weak __clzdi2(long val) -{ - return 64 - fls64((u64)val); -} -EXPORT_SYMBOL(__clzdi2); - -int __weak __ctzdi2(long val) -{ - return __ffs64((u64)val); -} -EXPORT_SYMBOL(__ctzdi2); - -#else -#error BITS_PER_LONG not 32 or 64 -#endif diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 4dd73cf936a63..f723024e14266 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -4265,6 +4265,10 @@ static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas) * mas_wr_append: Attempt to append * @wr_mas: the maple write state * + * This is currently unsafe in rcu mode since the end of the node may be cached + * by readers while the node contents may be updated which could result in + * inaccurate information. + * * Return: True if appended, false otherwise */ static inline bool mas_wr_append(struct ma_wr_state *wr_mas) @@ -4274,6 +4278,9 @@ static inline bool mas_wr_append(struct ma_wr_state *wr_mas) struct ma_state *mas = wr_mas->mas; unsigned char node_pivots = mt_pivots[wr_mas->type]; + if (mt_in_rcu(mas->tree)) + return false; + if (mas->offset != wr_mas->node_end) return false; diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 1a31065b2036a..976b9bd02a1b5 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -1136,7 +1136,6 @@ static void set_iter_tags(struct radix_tree_iter *iter, void __rcu **radix_tree_iter_resume(void __rcu **slot, struct radix_tree_iter *iter) { - slot++; iter->index = __radix_tree_iter_add(iter, 1); iter->next_index = iter->index; iter->tags = 0; diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index 2fcc9731528ac..e0e59d420fca4 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -386,6 +386,7 @@ static int damon_mkold_hugetlb_entry(pte_t *pte, unsigned long hmask, static const struct mm_walk_ops damon_mkold_ops = { .pmd_entry = damon_mkold_pmd_entry, .hugetlb_entry = damon_mkold_hugetlb_entry, + .walk_lock = PGWALK_RDLOCK, }; static void damon_va_mkold(struct mm_struct *mm, unsigned long addr) @@ -525,6 +526,7 @@ static int damon_young_hugetlb_entry(pte_t *pte, unsigned long hmask, static const struct mm_walk_ops damon_young_ops = { .pmd_entry = damon_young_pmd_entry, .hugetlb_entry = damon_young_hugetlb_entry, + .walk_lock = PGWALK_RDLOCK, }; static bool damon_va_young(struct mm_struct *mm, unsigned long addr, diff --git a/mm/gup.c b/mm/gup.c index 76d222ccc3ff4..6e2f9e9d65377 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -597,7 +597,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma, pte = ptep_get(ptep); if (!pte_present(pte)) goto no_page; - if (pte_protnone(pte) && !gup_can_follow_protnone(flags)) + if (pte_protnone(pte) && !gup_can_follow_protnone(vma, flags)) goto no_page; page = vm_normal_page(vma, address, pte); @@ -714,7 +714,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma, if (likely(!pmd_trans_huge(pmdval))) return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); - if (pmd_protnone(pmdval) && !gup_can_follow_protnone(flags)) + if (pmd_protnone(pmdval) && !gup_can_follow_protnone(vma, flags)) return no_page_table(vma, flags); ptl = pmd_lock(mm, pmd); @@ -851,6 +851,10 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, if (WARN_ON_ONCE(foll_flags & FOLL_PIN)) return NULL; + /* + * We never set FOLL_HONOR_NUMA_FAULT because callers don't expect + * to fail on PROT_NONE-mapped pages. + */ page = follow_page_mask(vma, address, foll_flags, &ctx); if (ctx.pgmap) put_dev_pagemap(ctx.pgmap); @@ -2227,6 +2231,13 @@ static bool is_valid_gup_args(struct page **pages, int *locked, gup_flags |= FOLL_UNLOCKABLE; } + /* + * For now, always trigger NUMA hinting faults. Some GUP users like + * KVM require the hint to be as the calling context of GUP is + * functionally similar to a memory reference from task context. + */ + gup_flags |= FOLL_HONOR_NUMA_FAULT; + /* FOLL_GET and FOLL_PIN are mutually exclusive. */ if (WARN_ON_ONCE((gup_flags & (FOLL_PIN | FOLL_GET)) == (FOLL_PIN | FOLL_GET))) @@ -2551,7 +2562,14 @@ static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, struct page *page; struct folio *folio; - if (pte_protnone(pte) && !gup_can_follow_protnone(flags)) + /* + * Always fallback to ordinary GUP on PROT_NONE-mapped pages: + * pte_access_permitted() better should reject these pages + * either way: otherwise, GUP-fast might succeed in + * cases where ordinary GUP would fail due to VMA access + * permissions. + */ + if (pte_protnone(pte)) goto pte_unmap; if (!pte_access_permitted(pte, flags & FOLL_WRITE)) @@ -2970,8 +2988,8 @@ static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned lo if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) || pmd_devmap(pmd))) { - if (pmd_protnone(pmd) && - !gup_can_follow_protnone(flags)) + /* See gup_pte_range() */ + if (pmd_protnone(pmd)) return 0; if (!gup_huge_pmd(pmd, pmdp, addr, next, flags, @@ -3151,7 +3169,7 @@ static int internal_get_user_pages_fast(unsigned long start, if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM | FOLL_FORCE | FOLL_PIN | FOLL_GET | FOLL_FAST_ONLY | FOLL_NOFAULT | - FOLL_PCI_P2PDMA))) + FOLL_PCI_P2PDMA | FOLL_HONOR_NUMA_FAULT))) return -EINVAL; if (gup_flags & FOLL_PIN) diff --git a/mm/hmm.c b/mm/hmm.c index 855e25e59d8f0..277ddcab4947d 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -562,6 +562,7 @@ static const struct mm_walk_ops hmm_walk_ops = { .pte_hole = hmm_vma_walk_hole, .hugetlb_entry = hmm_vma_walk_hugetlb_entry, .test_walk = hmm_vma_walk_test, + .walk_lock = PGWALK_RDLOCK, }; /** diff --git a/mm/huge_memory.c b/mm/huge_memory.c index eb3678360b97e..164d22365bdee 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1467,8 +1467,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) return ERR_PTR(-EFAULT); - /* Full NUMA hinting faults to serialise migration in fault paths */ - if (pmd_protnone(*pmd) && !gup_can_follow_protnone(flags)) + if (pmd_protnone(*pmd) && !gup_can_follow_protnone(vma, flags)) return NULL; if (!pmd_write(*pmd) && gup_must_unshare(vma, flags, page)) @@ -1613,7 +1612,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, * If other processes are mapping this folio, we couldn't discard * the folio unless they all do MADV_FREE so let's skip the folio. */ - if (folio_mapcount(folio) != 1) + if (folio_estimated_sharers(folio) != 1) goto out; if (!folio_trylock(folio)) diff --git a/mm/internal.h b/mm/internal.h index a7d9e980429a5..8ed127c1c808c 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -924,6 +924,13 @@ int migrate_device_coherent_page(struct page *page); struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags); int __must_check try_grab_page(struct page *page, unsigned int flags); +/* + * mm/huge_memory.c + */ +struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmd, + unsigned int flags); + enum { /* mark page accessed */ FOLL_TOUCH = 1 << 16, @@ -997,6 +1004,16 @@ static inline bool gup_must_unshare(struct vm_area_struct *vma, if (IS_ENABLED(CONFIG_HAVE_FAST_GUP)) smp_rmb(); + /* + * During GUP-fast we might not get called on the head page for a + * hugetlb page that is mapped using cont-PTE, because GUP-fast does + * not work with the abstracted hugetlb PTEs that always point at the + * head page. For hugetlb, PageAnonExclusive only applies on the head + * page (as it cannot be partially COW-shared), so lookup the head page. + */ + if (unlikely(!PageHead(page) && PageHuge(page))) + page = compound_head(page); + /* * Note that PageKsm() pages cannot be exclusive, and consequently, * cannot get pinned. diff --git a/mm/ksm.c b/mm/ksm.c index d20d7662419be..d7b5b95e936e9 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -455,6 +455,12 @@ static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long nex static const struct mm_walk_ops break_ksm_ops = { .pmd_entry = break_ksm_pmd_entry, + .walk_lock = PGWALK_RDLOCK, +}; + +static const struct mm_walk_ops break_ksm_lock_vma_ops = { + .pmd_entry = break_ksm_pmd_entry, + .walk_lock = PGWALK_WRLOCK, }; /* @@ -470,16 +476,17 @@ static const struct mm_walk_ops break_ksm_ops = { * of the process that owns 'vma'. We also do not want to enforce * protection keys here anyway. */ -static int break_ksm(struct vm_area_struct *vma, unsigned long addr) +static int break_ksm(struct vm_area_struct *vma, unsigned long addr, bool lock_vma) { vm_fault_t ret = 0; + const struct mm_walk_ops *ops = lock_vma ? + &break_ksm_lock_vma_ops : &break_ksm_ops; do { int ksm_page; cond_resched(); - ksm_page = walk_page_range_vma(vma, addr, addr + 1, - &break_ksm_ops, NULL); + ksm_page = walk_page_range_vma(vma, addr, addr + 1, ops, NULL); if (WARN_ON_ONCE(ksm_page < 0)) return ksm_page; if (!ksm_page) @@ -565,7 +572,7 @@ static void break_cow(struct ksm_rmap_item *rmap_item) mmap_read_lock(mm); vma = find_mergeable_vma(mm, addr); if (vma) - break_ksm(vma, addr); + break_ksm(vma, addr, false); mmap_read_unlock(mm); } @@ -871,7 +878,7 @@ static void remove_trailing_rmap_items(struct ksm_rmap_item **rmap_list) * in cmp_and_merge_page on one of the rmap_items we would be removing. */ static int unmerge_ksm_pages(struct vm_area_struct *vma, - unsigned long start, unsigned long end) + unsigned long start, unsigned long end, bool lock_vma) { unsigned long addr; int err = 0; @@ -882,7 +889,7 @@ static int unmerge_ksm_pages(struct vm_area_struct *vma, if (signal_pending(current)) err = -ERESTARTSYS; else - err = break_ksm(vma, addr); + err = break_ksm(vma, addr, lock_vma); } return err; } @@ -1029,7 +1036,7 @@ static int unmerge_and_remove_all_rmap_items(void) if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) continue; err = unmerge_ksm_pages(vma, - vma->vm_start, vma->vm_end); + vma->vm_start, vma->vm_end, false); if (err) goto error; } @@ -2530,7 +2537,7 @@ static int __ksm_del_vma(struct vm_area_struct *vma) return 0; if (vma->anon_vma) { - err = unmerge_ksm_pages(vma, vma->vm_start, vma->vm_end); + err = unmerge_ksm_pages(vma, vma->vm_start, vma->vm_end, true); if (err) return err; } @@ -2668,7 +2675,7 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start, return 0; /* just ignore the advice */ if (vma->anon_vma) { - err = unmerge_ksm_pages(vma, start, end); + err = unmerge_ksm_pages(vma, start, end, true); if (err) return err; } diff --git a/mm/madvise.c b/mm/madvise.c index 886f06066622f..ec30f48f8f2e1 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -233,6 +233,7 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, static const struct mm_walk_ops swapin_walk_ops = { .pmd_entry = swapin_walk_pmd_entry, + .walk_lock = PGWALK_RDLOCK, }; static void shmem_swapin_range(struct vm_area_struct *vma, @@ -383,7 +384,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, folio = pfn_folio(pmd_pfn(orig_pmd)); /* Do not interfere with other mappings of this folio */ - if (folio_mapcount(folio) != 1) + if (folio_estimated_sharers(folio) != 1) goto huge_unlock; if (pageout_anon_only_filter && !folio_test_anon(folio)) @@ -457,7 +458,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, if (folio_test_large(folio)) { int err; - if (folio_mapcount(folio) != 1) + if (folio_estimated_sharers(folio) != 1) break; if (pageout_anon_only_filter && !folio_test_anon(folio)) break; @@ -534,6 +535,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, static const struct mm_walk_ops cold_walk_ops = { .pmd_entry = madvise_cold_or_pageout_pte_range, + .walk_lock = PGWALK_RDLOCK, }; static void madvise_cold_page_range(struct mmu_gather *tlb, @@ -678,7 +680,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, if (folio_test_large(folio)) { int err; - if (folio_mapcount(folio) != 1) + if (folio_estimated_sharers(folio) != 1) break; if (!folio_trylock(folio)) break; @@ -757,6 +759,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, static const struct mm_walk_ops madvise_free_walk_ops = { .pmd_entry = madvise_free_pte_range, + .walk_lock = PGWALK_RDLOCK, }; static int madvise_free_single_vma(struct vm_area_struct *vma, diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e8ca4bdcb03cc..315fd5f45e3c0 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -6024,6 +6024,7 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, static const struct mm_walk_ops precharge_walk_ops = { .pmd_entry = mem_cgroup_count_precharge_pte_range, + .walk_lock = PGWALK_RDLOCK, }; static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) @@ -6303,6 +6304,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, static const struct mm_walk_ops charge_walk_ops = { .pmd_entry = mem_cgroup_move_charge_pte_range, + .walk_lock = PGWALK_RDLOCK, }; static void mem_cgroup_move_charge(void) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 9a285038d7658..fe121fdb05f7f 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -831,6 +831,7 @@ static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask, static const struct mm_walk_ops hwp_walk_ops = { .pmd_entry = hwpoison_pte_range, .hugetlb_entry = hwpoison_hugetlb_range, + .walk_lock = PGWALK_RDLOCK, }; /* @@ -2740,10 +2741,13 @@ int soft_offline_page(unsigned long pfn, int flags) if (ret > 0) { ret = soft_offline_in_use_page(page); } else if (ret == 0) { - if (!page_handle_poison(page, true, false) && try_again) { - try_again = false; - flags &= ~MF_COUNT_INCREASED; - goto retry; + if (!page_handle_poison(page, true, false)) { + if (try_again) { + try_again = false; + flags &= ~MF_COUNT_INCREASED; + goto retry; + } + ret = -EBUSY; } } diff --git a/mm/memory.c b/mm/memory.c index 1ec1ef3418bf5..cdc4d4c1c858a 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5257,11 +5257,8 @@ EXPORT_SYMBOL_GPL(handle_mm_fault); static inline bool get_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs) { - /* Even if this succeeds, make it clear we *might* have slept */ - if (likely(mmap_read_trylock(mm))) { - might_sleep(); + if (likely(mmap_read_trylock(mm))) return true; - } if (regs && !user_mode(regs)) { unsigned long ip = instruction_pointer(regs); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index c53f8beeb507f..ec2eaceffd74b 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -718,6 +718,14 @@ static const struct mm_walk_ops queue_pages_walk_ops = { .hugetlb_entry = queue_folios_hugetlb, .pmd_entry = queue_folios_pte_range, .test_walk = queue_pages_test_walk, + .walk_lock = PGWALK_RDLOCK, +}; + +static const struct mm_walk_ops queue_pages_lock_vma_walk_ops = { + .hugetlb_entry = queue_folios_hugetlb, + .pmd_entry = queue_folios_pte_range, + .test_walk = queue_pages_test_walk, + .walk_lock = PGWALK_WRLOCK, }; /* @@ -738,7 +746,7 @@ static const struct mm_walk_ops queue_pages_walk_ops = { static int queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, nodemask_t *nodes, unsigned long flags, - struct list_head *pagelist) + struct list_head *pagelist, bool lock_vma) { int err; struct queue_pages qp = { @@ -749,8 +757,10 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, .end = end, .first = NULL, }; + const struct mm_walk_ops *ops = lock_vma ? + &queue_pages_lock_vma_walk_ops : &queue_pages_walk_ops; - err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp); + err = walk_page_range(mm, start, end, ops, &qp); if (!qp.first) /* whole range in hole */ @@ -1078,7 +1088,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest, vma = find_vma(mm, 0); VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))); queue_pages_range(mm, vma->vm_start, mm->task_size, &nmask, - flags | MPOL_MF_DISCONTIG_OK, &pagelist); + flags | MPOL_MF_DISCONTIG_OK, &pagelist, false); if (!list_empty(&pagelist)) { err = migrate_pages(&pagelist, alloc_migration_target, NULL, @@ -1321,12 +1331,8 @@ static long do_mbind(unsigned long start, unsigned long len, * Lock the VMAs before scanning for pages to migrate, to ensure we don't * miss a concurrently inserted page. */ - vma_iter_init(&vmi, mm, start); - for_each_vma_range(vmi, vma, end) - vma_start_write(vma); - ret = queue_pages_range(mm, start, end, nmask, - flags | MPOL_MF_INVERT, &pagelist); + flags | MPOL_MF_INVERT, &pagelist, true); if (ret < 0) { err = ret; diff --git a/mm/migrate_device.c b/mm/migrate_device.c index 8365158460ed1..d5f492356e3ef 100644 --- a/mm/migrate_device.c +++ b/mm/migrate_device.c @@ -279,6 +279,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, static const struct mm_walk_ops migrate_vma_walk_ops = { .pmd_entry = migrate_vma_collect_pmd, .pte_hole = migrate_vma_collect_hole, + .walk_lock = PGWALK_RDLOCK, }; /* diff --git a/mm/mincore.c b/mm/mincore.c index b7f7a516b26cd..dad3622cc963e 100644 --- a/mm/mincore.c +++ b/mm/mincore.c @@ -176,6 +176,7 @@ static const struct mm_walk_ops mincore_walk_ops = { .pmd_entry = mincore_pte_range, .pte_hole = mincore_unmapped_range, .hugetlb_entry = mincore_hugetlb, + .walk_lock = PGWALK_RDLOCK, }; /* diff --git a/mm/mlock.c b/mm/mlock.c index 0a0c996c5c214..479e09d0994c7 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -371,6 +371,7 @@ static void mlock_vma_pages_range(struct vm_area_struct *vma, { static const struct mm_walk_ops mlock_walk_ops = { .pmd_entry = mlock_pte_range, + .walk_lock = PGWALK_WRLOCK_VERIFY, }; /* diff --git a/mm/mprotect.c b/mm/mprotect.c index 6f658d4837047..3aef1340533a7 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -568,6 +568,7 @@ static const struct mm_walk_ops prot_none_walk_ops = { .pte_entry = prot_none_pte_entry, .hugetlb_entry = prot_none_hugetlb_entry, .test_walk = prot_none_test, + .walk_lock = PGWALK_WRLOCK, }; int diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 2022333805d3d..9b2d23fbf4d35 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -400,6 +400,33 @@ static int __walk_page_range(unsigned long start, unsigned long end, return err; } +static inline void process_mm_walk_lock(struct mm_struct *mm, + enum page_walk_lock walk_lock) +{ + if (walk_lock == PGWALK_RDLOCK) + mmap_assert_locked(mm); + else + mmap_assert_write_locked(mm); +} + +static inline void process_vma_walk_lock(struct vm_area_struct *vma, + enum page_walk_lock walk_lock) +{ +#ifdef CONFIG_PER_VMA_LOCK + switch (walk_lock) { + case PGWALK_WRLOCK: + vma_start_write(vma); + break; + case PGWALK_WRLOCK_VERIFY: + vma_assert_write_locked(vma); + break; + case PGWALK_RDLOCK: + /* PGWALK_RDLOCK is handled by process_mm_walk_lock */ + break; + } +#endif +} + /** * walk_page_range - walk page table with caller specific callbacks * @mm: mm_struct representing the target process of page table walk @@ -459,7 +486,7 @@ int walk_page_range(struct mm_struct *mm, unsigned long start, if (!walk.mm) return -EINVAL; - mmap_assert_locked(walk.mm); + process_mm_walk_lock(walk.mm, ops->walk_lock); vma = find_vma(walk.mm, start); do { @@ -474,6 +501,7 @@ int walk_page_range(struct mm_struct *mm, unsigned long start, if (ops->pte_hole) err = ops->pte_hole(start, next, -1, &walk); } else { /* inside vma */ + process_vma_walk_lock(vma, ops->walk_lock); walk.vma = vma; next = min(end, vma->vm_end); vma = find_vma(mm, vma->vm_end); @@ -549,7 +577,8 @@ int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start, if (start < vma->vm_start || end > vma->vm_end) return -EINVAL; - mmap_assert_locked(walk.mm); + process_mm_walk_lock(walk.mm, ops->walk_lock); + process_vma_walk_lock(vma, ops->walk_lock); return __walk_page_range(start, end, &walk); } @@ -566,7 +595,8 @@ int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops, if (!walk.mm) return -EINVAL; - mmap_assert_locked(walk.mm); + process_mm_walk_lock(walk.mm, ops->walk_lock); + process_vma_walk_lock(vma, ops->walk_lock); return __walk_page_range(vma->vm_start, vma->vm_end, &walk); } diff --git a/mm/shmem.c b/mm/shmem.c index f5af4b943e428..d963c747dabca 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -806,14 +806,16 @@ unsigned long shmem_partial_swap_usage(struct address_space *mapping, XA_STATE(xas, &mapping->i_pages, start); struct page *page; unsigned long swapped = 0; + unsigned long max = end - 1; rcu_read_lock(); - xas_for_each(&xas, page, end - 1) { + xas_for_each(&xas, page, max) { if (xas_retry(&xas, page)) continue; if (xa_is_value(page)) swapped++; - + if (xas.xa_index == max) + break; if (need_resched()) { xas_pause(&xas); cond_resched_rcu(); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 93cf99aba335b..228a4a5312f22 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2979,6 +2979,10 @@ void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot) free_vm_area(area); return NULL; } + + flush_cache_vmap((unsigned long)area->addr, + (unsigned long)area->addr + count * PAGE_SIZE); + return area->addr; } EXPORT_SYMBOL_GPL(vmap_pfn); diff --git a/mm/vmscan.c b/mm/vmscan.c index 1080209a568bb..2fe4a11d63f44 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -4284,6 +4284,7 @@ static void walk_mm(struct lruvec *lruvec, struct mm_struct *mm, struct lru_gen_ static const struct mm_walk_ops mm_walk_ops = { .test_walk = should_skip_vma, .p4d_entry = walk_pud_range, + .walk_lock = PGWALK_RDLOCK, }; int err; @@ -4853,16 +4854,17 @@ void lru_gen_release_memcg(struct mem_cgroup *memcg) spin_lock_irq(&pgdat->memcg_lru.lock); - VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list)); + if (hlist_nulls_unhashed(&lruvec->lrugen.list)) + goto unlock; gen = lruvec->lrugen.gen; - hlist_nulls_del_rcu(&lruvec->lrugen.list); + hlist_nulls_del_init_rcu(&lruvec->lrugen.list); pgdat->memcg_lru.nr_memcgs[gen]--; if (!pgdat->memcg_lru.nr_memcgs[gen] && gen == get_memcg_gen(pgdat->memcg_lru.seq)) WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); - +unlock: spin_unlock_irq(&pgdat->memcg_lru.lock); } } @@ -5434,8 +5436,10 @@ static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc) rcu_read_lock(); hlist_nulls_for_each_entry_rcu(lrugen, pos, &pgdat->memcg_lru.fifo[gen][bin], list) { - if (op) + if (op) { lru_gen_rotate_memcg(lruvec, op); + op = 0; + } mem_cgroup_put(memcg); @@ -5443,7 +5447,7 @@ static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc) memcg = lruvec_memcg(lruvec); if (!mem_cgroup_tryget(memcg)) { - op = 0; + lru_gen_release_memcg(memcg); memcg = NULL; continue; } diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index b3662119ddbce..e40aa3e3641cb 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -384,7 +384,8 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, dev->name); vlan_vid_add(dev, htons(ETH_P_8021Q), 0); } - if (event == NETDEV_DOWN) + if (event == NETDEV_DOWN && + (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) vlan_vid_del(dev, htons(ETH_P_8021Q), 0); vlan_info = rtnl_dereference(dev->vlan_info); diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c index acff565849ae9..1d704574e6bf5 100644 --- a/net/batman-adv/bat_v_elp.c +++ b/net/batman-adv/bat_v_elp.c @@ -505,7 +505,7 @@ int batadv_v_elp_packet_recv(struct sk_buff *skb, struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); struct batadv_elp_packet *elp_packet; struct batadv_hard_iface *primary_if; - struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb); + struct ethhdr *ethhdr; bool res; int ret = NET_RX_DROP; @@ -513,6 +513,7 @@ int batadv_v_elp_packet_recv(struct sk_buff *skb, if (!res) goto free_skb; + ethhdr = eth_hdr(skb); if (batadv_is_my_mac(bat_priv, ethhdr->h_source)) goto free_skb; diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c index e710e9afe78f3..e503ee0d896bd 100644 --- a/net/batman-adv/bat_v_ogm.c +++ b/net/batman-adv/bat_v_ogm.c @@ -123,8 +123,10 @@ static void batadv_v_ogm_send_to_if(struct sk_buff *skb, { struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); - if (hard_iface->if_status != BATADV_IF_ACTIVE) + if (hard_iface->if_status != BATADV_IF_ACTIVE) { + kfree_skb(skb); return; + } batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_TX); batadv_add_counter(bat_priv, BATADV_CNT_MGMT_TX_BYTES, @@ -985,7 +987,7 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb, { struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); struct batadv_ogm2_packet *ogm_packet; - struct ethhdr *ethhdr = eth_hdr(skb); + struct ethhdr *ethhdr; int ogm_offset; u8 *packet_pos; int ret = NET_RX_DROP; @@ -999,6 +1001,7 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb, if (!batadv_check_management_packet(skb, if_incoming, BATADV_OGM2_HLEN)) goto free_skb; + ethhdr = eth_hdr(skb); if (batadv_is_my_mac(bat_priv, ethhdr->h_source)) goto free_skb; diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 41c1ad33d009f..24c9c0c3f3166 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -630,7 +630,19 @@ int batadv_hardif_min_mtu(struct net_device *soft_iface) */ void batadv_update_min_mtu(struct net_device *soft_iface) { - soft_iface->mtu = batadv_hardif_min_mtu(soft_iface); + struct batadv_priv *bat_priv = netdev_priv(soft_iface); + int limit_mtu; + int mtu; + + mtu = batadv_hardif_min_mtu(soft_iface); + + if (bat_priv->mtu_set_by_user) + limit_mtu = bat_priv->mtu_set_by_user; + else + limit_mtu = ETH_DATA_LEN; + + mtu = min(mtu, limit_mtu); + dev_set_mtu(soft_iface, mtu); /* Check if the local translate table should be cleaned up to match a * new (and smaller) MTU. diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c index ad5714f737be2..6efbc9275aec2 100644 --- a/net/batman-adv/netlink.c +++ b/net/batman-adv/netlink.c @@ -495,7 +495,10 @@ static int batadv_netlink_set_mesh(struct sk_buff *skb, struct genl_info *info) attr = info->attrs[BATADV_ATTR_FRAGMENTATION_ENABLED]; atomic_set(&bat_priv->fragmentation, !!nla_get_u8(attr)); + + rtnl_lock(); batadv_update_min_mtu(bat_priv->soft_iface); + rtnl_unlock(); } if (info->attrs[BATADV_ATTR_GW_BANDWIDTH_DOWN]) { diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index d3fdf82282afe..85d00dc9ce32c 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -153,11 +153,14 @@ static int batadv_interface_set_mac_addr(struct net_device *dev, void *p) static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu) { + struct batadv_priv *bat_priv = netdev_priv(dev); + /* check ranges */ if (new_mtu < 68 || new_mtu > batadv_hardif_min_mtu(dev)) return -EINVAL; dev->mtu = new_mtu; + bat_priv->mtu_set_by_user = new_mtu; return 0; } diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 36ca31252a733..b95c36765d045 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -774,7 +774,6 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr, if (roamed_back) { batadv_tt_global_free(bat_priv, tt_global, "Roaming canceled"); - tt_global = NULL; } else { /* The global entry has to be marked as ROAMING and * has to be kept for consistency purpose diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index ca9449ec9836a..cf1a0eafe3abc 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -1546,6 +1546,12 @@ struct batadv_priv { /** @soft_iface: net device which holds this struct as private data */ struct net_device *soft_iface; + /** + * @mtu_set_by_user: MTU was set once by user + * protected by rtnl_lock + */ + int mtu_set_by_user; + /** * @bat_counters: mesh internal traffic statistic counters (see * batadv_counters) diff --git a/net/can/isotp.c b/net/can/isotp.c index 99770ed28531d..f02b5d3e47335 100644 --- a/net/can/isotp.c +++ b/net/can/isotp.c @@ -188,12 +188,6 @@ static bool isotp_register_rxid(struct isotp_sock *so) return (isotp_bc_flags(so) == 0); } -static bool isotp_register_txecho(struct isotp_sock *so) -{ - /* all modes but SF_BROADCAST register for tx echo skbs */ - return (isotp_bc_flags(so) != CAN_ISOTP_SF_BROADCAST); -} - static enum hrtimer_restart isotp_rx_timer_handler(struct hrtimer *hrtimer) { struct isotp_sock *so = container_of(hrtimer, struct isotp_sock, @@ -1209,7 +1203,7 @@ static int isotp_release(struct socket *sock) lock_sock(sk); /* remove current filters & unregister */ - if (so->bound && isotp_register_txecho(so)) { + if (so->bound) { if (so->ifindex) { struct net_device *dev; @@ -1332,14 +1326,12 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len) can_rx_register(net, dev, rx_id, SINGLE_MASK(rx_id), isotp_rcv, sk, "isotp", sk); - if (isotp_register_txecho(so)) { - /* no consecutive frame echo skb in flight */ - so->cfecho = 0; + /* no consecutive frame echo skb in flight */ + so->cfecho = 0; - /* register for echo skb's */ - can_rx_register(net, dev, tx_id, SINGLE_MASK(tx_id), - isotp_rcv_echo, sk, "isotpe", sk); - } + /* register for echo skb's */ + can_rx_register(net, dev, tx_id, SINGLE_MASK(tx_id), + isotp_rcv_echo, sk, "isotpe", sk); dev_put(dev); @@ -1560,7 +1552,7 @@ static void isotp_notify(struct isotp_sock *so, unsigned long msg, case NETDEV_UNREGISTER: lock_sock(sk); /* remove current filters & unregister */ - if (so->bound && isotp_register_txecho(so)) { + if (so->bound) { if (isotp_register_rxid(so)) can_rx_unregister(dev_net(dev), dev, so->rxid, SINGLE_MASK(so->rxid), diff --git a/net/can/raw.c b/net/can/raw.c index e10f59375659f..d50c3f3d892f9 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -85,6 +85,7 @@ struct raw_sock { int bound; int ifindex; struct net_device *dev; + netdevice_tracker dev_tracker; struct list_head notifier; int loopback; int recv_own_msgs; @@ -285,8 +286,10 @@ static void raw_notify(struct raw_sock *ro, unsigned long msg, case NETDEV_UNREGISTER: lock_sock(sk); /* remove current filters & unregister */ - if (ro->bound) + if (ro->bound) { raw_disable_allfilters(dev_net(dev), dev, sk); + netdev_put(dev, &ro->dev_tracker); + } if (ro->count > 1) kfree(ro->filter); @@ -391,10 +394,12 @@ static int raw_release(struct socket *sock) /* remove current filters & unregister */ if (ro->bound) { - if (ro->dev) + if (ro->dev) { raw_disable_allfilters(dev_net(ro->dev), ro->dev, sk); - else + netdev_put(ro->dev, &ro->dev_tracker); + } else { raw_disable_allfilters(sock_net(sk), NULL, sk); + } } if (ro->count > 1) @@ -445,10 +450,10 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len) goto out; } if (dev->type != ARPHRD_CAN) { - dev_put(dev); err = -ENODEV; - goto out; + goto out_put_dev; } + if (!(dev->flags & IFF_UP)) notify_enetdown = 1; @@ -456,7 +461,9 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len) /* filters set by default/setsockopt */ err = raw_enable_allfilters(sock_net(sk), dev, sk); - dev_put(dev); + if (err) + goto out_put_dev; + } else { ifindex = 0; @@ -467,18 +474,28 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len) if (!err) { if (ro->bound) { /* unregister old filters */ - if (ro->dev) + if (ro->dev) { raw_disable_allfilters(dev_net(ro->dev), ro->dev, sk); - else + /* drop reference to old ro->dev */ + netdev_put(ro->dev, &ro->dev_tracker); + } else { raw_disable_allfilters(sock_net(sk), NULL, sk); + } } ro->ifindex = ifindex; ro->bound = 1; + /* bind() ok -> hold a reference for new ro->dev */ ro->dev = dev; + if (ro->dev) + netdev_hold(ro->dev, &ro->dev_tracker, GFP_KERNEL); } - out: +out_put_dev: + /* remove potential reference from dev_get_by_index() */ + if (dev) + dev_put(dev); +out: release_sock(sk); rtnl_unlock(); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index aef25aa5cf1dd..00c94d9622b4a 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2268,13 +2268,27 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) return err; } -int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len, - struct netlink_ext_ack *exterr) +int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer, + struct netlink_ext_ack *exterr) { - return nla_parse_deprecated(tb, IFLA_MAX, head, len, ifla_policy, + const struct ifinfomsg *ifmp; + const struct nlattr *attrs; + size_t len; + + ifmp = nla_data(nla_peer); + attrs = nla_data(nla_peer) + sizeof(struct ifinfomsg); + len = nla_len(nla_peer) - sizeof(struct ifinfomsg); + + if (ifmp->ifi_index < 0) { + NL_SET_ERR_MSG_ATTR(exterr, nla_peer, + "ifindex can't be negative"); + return -EINVAL; + } + + return nla_parse_deprecated(tb, IFLA_MAX, attrs, len, ifla_policy, exterr); } -EXPORT_SYMBOL(rtnl_nla_parse_ifla); +EXPORT_SYMBOL(rtnl_nla_parse_ifinfomsg); struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]) { @@ -3547,6 +3561,9 @@ static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, if (ifm->ifi_index > 0) { link_specified = true; dev = __dev_get_by_index(net, ifm->ifi_index); + } else if (ifm->ifi_index < 0) { + NL_SET_ERR_MSG(extack, "ifindex can't be negative"); + return -EINVAL; } else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) { link_specified = true; dev = rtnl_dev_get(net, tb); diff --git a/net/core/sock.c b/net/core/sock.c index 732fc37a4771b..c9cffb7acbeae 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -3159,7 +3159,7 @@ void __sk_mem_reduce_allocated(struct sock *sk, int amount) if (mem_cgroup_sockets_enabled && sk->sk_memcg) mem_cgroup_uncharge_skmem(sk->sk_memcg, amount); - if (sk_under_memory_pressure(sk) && + if (sk_under_global_memory_pressure(sk) && (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0))) sk_leave_memory_pressure(sk); } diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index fa8079303cb06..a545ad71201c8 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -130,7 +130,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) inet->inet_daddr, inet->inet_sport, inet->inet_dport); - inet->inet_id = get_random_u16(); + atomic_set(&inet->inet_id, get_random_u16()); err = dccp_connect(sk); rt = NULL; @@ -432,7 +432,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk, RCU_INIT_POINTER(newinet->inet_opt, rcu_dereference(ireq->ireq_opt)); newinet->mc_index = inet_iif(skb); newinet->mc_ttl = ip_hdr(skb)->ttl; - newinet->inet_id = get_random_u16(); + atomic_set(&newinet->inet_id, get_random_u16()); if (dst == NULL && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL) goto put_and_exit; diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 4e3266e4d7c3c..fcc5c9d64f466 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -315,11 +315,15 @@ EXPORT_SYMBOL_GPL(dccp_disconnect); __poll_t dccp_poll(struct file *file, struct socket *sock, poll_table *wait) { - __poll_t mask; struct sock *sk = sock->sk; + __poll_t mask; + u8 shutdown; + int state; sock_poll_wait(file, sock, wait); - if (sk->sk_state == DCCP_LISTEN) + + state = inet_sk_state_load(sk); + if (state == DCCP_LISTEN) return inet_csk_listen_poll(sk); /* Socket is not locked. We are protected from async events @@ -328,20 +332,21 @@ __poll_t dccp_poll(struct file *file, struct socket *sock, */ mask = 0; - if (sk->sk_err) + if (READ_ONCE(sk->sk_err)) mask = EPOLLERR; + shutdown = READ_ONCE(sk->sk_shutdown); - if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED) + if (shutdown == SHUTDOWN_MASK || state == DCCP_CLOSED) mask |= EPOLLHUP; - if (sk->sk_shutdown & RCV_SHUTDOWN) + if (shutdown & RCV_SHUTDOWN) mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; /* Connected? */ - if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) { + if ((1 << state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) { if (atomic_read(&sk->sk_rmem_alloc) > 0) mask |= EPOLLIN | EPOLLRDNORM; - if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { + if (!(shutdown & SEND_SHUTDOWN)) { if (sk_stream_is_writeable(sk)) { mask |= EPOLLOUT | EPOLLWRNORM; } else { /* send SIGIO later */ @@ -359,7 +364,6 @@ __poll_t dccp_poll(struct file *file, struct socket *sock, } return mask; } - EXPORT_SYMBOL_GPL(dccp_poll); int dccp_ioctl(struct sock *sk, int cmd, int *karg) diff --git a/net/devlink/leftover.c b/net/devlink/leftover.c index 1f00f874471fa..bfed7929a904f 100644 --- a/net/devlink/leftover.c +++ b/net/devlink/leftover.c @@ -6704,6 +6704,7 @@ void devlink_notify_unregister(struct devlink *devlink) struct devlink_param_item *param_item; struct devlink_trap_item *trap_item; struct devlink_port *devlink_port; + struct devlink_linecard *linecard; struct devlink_rate *rate_node; struct devlink_region *region; unsigned long port_index; @@ -6732,6 +6733,8 @@ void devlink_notify_unregister(struct devlink *devlink) xa_for_each(&devlink->ports, port_index, devlink_port) devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_DEL); + list_for_each_entry_reverse(linecard, &devlink->linecard_list, list) + devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_DEL); devlink_notify(devlink, DEVLINK_CMD_DEL); } diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 9b2ca2fcc5a11..02736b83c3032 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -340,7 +340,7 @@ static int inet_create(struct net *net, struct socket *sock, int protocol, else inet->pmtudisc = IP_PMTUDISC_WANT; - inet->inet_id = 0; + atomic_set(&inet->inet_id, 0); sock_init_data(sock, sk); diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c index 4d1af0cd7d99e..cb5dbee9e018f 100644 --- a/net/ipv4/datagram.c +++ b/net/ipv4/datagram.c @@ -73,7 +73,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len reuseport_has_conns_set(sk); sk->sk_state = TCP_ESTABLISHED; sk_set_txhash(sk); - inet->inet_id = get_random_u16(); + atomic_set(&inet->inet_id, get_random_u16()); sk_dst_set(sk, &rt->dst); err = 0; diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 53bfd8af69203..d1e7d0ceb7edd 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -287,12 +287,12 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) switch (skb->protocol) { case htons(ETH_P_IP): - xfrm_decode_session(skb, &fl, AF_INET); memset(IPCB(skb), 0, sizeof(*IPCB(skb))); + xfrm_decode_session(skb, &fl, AF_INET); break; case htons(ETH_P_IPV6): - xfrm_decode_session(skb, &fl, AF_INET6); memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); + xfrm_decode_session(skb, &fl, AF_INET6); break; default: goto tx_err; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index a59cc4b838611..2dbdc26da86e4 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -312,7 +312,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) inet->inet_daddr)); } - inet->inet_id = get_random_u16(); + atomic_set(&inet->inet_id, get_random_u16()); if (tcp_fastopen_defer_connect(sk, &err)) return err; @@ -1596,7 +1596,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, inet_csk(newsk)->icsk_ext_hdr_len = 0; if (inet_opt) inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen; - newinet->inet_id = get_random_u16(); + atomic_set(&newinet->inet_id, get_random_u16()); /* Set ToS of the new socket based upon the value of incoming SYN. * ECT bits are set later in tcp_init_transfer(). diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 470f581eedd43..206418b6d7c48 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -591,7 +591,9 @@ void tcp_retransmit_timer(struct sock *sk) tcp_stream_is_thin(tp) && icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) { icsk->icsk_backoff = 0; - icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX); + icsk->icsk_rto = clamp(__tcp_set_rto(tp), + tcp_rto_min(sk), + TCP_RTO_MAX); } else if (sk->sk_state != TCP_SYN_SENT || icsk->icsk_backoff > READ_ONCE(net->ipv4.sysctl_tcp_syn_linear_timeouts)) { diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index 658bfed1df8b1..08d4b7132d4c4 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig @@ -152,7 +152,7 @@ config INET6_TUNNEL default n config IPV6_VTI -tristate "Virtual (secure) IPv6: tunneling" + tristate "Virtual (secure) IPv6: tunneling" select IPV6_TUNNEL select NET_IP_TUNNEL select XFRM diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 10b222865d46a..73c85d4e0e9cd 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -568,12 +568,12 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) vti6_addr_conflict(t, ipv6_hdr(skb))) goto tx_err; - xfrm_decode_session(skb, &fl, AF_INET6); memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); + xfrm_decode_session(skb, &fl, AF_INET6); break; case htons(ETH_P_IP): - xfrm_decode_session(skb, &fl, AF_INET); memset(IPCB(skb), 0, sizeof(*IPCB(skb))); + xfrm_decode_session(skb, &fl, AF_INET); break; default: goto tx_err; diff --git a/net/key/af_key.c b/net/key/af_key.c index ede3c6a603532..b4ea4cf9fad43 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -1848,9 +1848,9 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms if (ext_hdrs[SADB_X_EXT_FILTER - 1]) { struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1]; - if ((xfilter->sadb_x_filter_splen >= + if ((xfilter->sadb_x_filter_splen > (sizeof(xfrm_address_t) << 3)) || - (xfilter->sadb_x_filter_dplen >= + (xfilter->sadb_x_filter_dplen > (sizeof(xfrm_address_t) << 3))) { mutex_unlock(&pfk->dump_lock); return -EINVAL; diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 4f707d2a160fd..0af2599c17e8d 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1083,7 +1083,8 @@ static inline bool ieee80211_rx_reorder_ready(struct tid_ampdu_rx *tid_agg_rx, struct sk_buff *tail = skb_peek_tail(frames); struct ieee80211_rx_status *status; - if (tid_agg_rx->reorder_buf_filtered & BIT_ULL(index)) + if (tid_agg_rx->reorder_buf_filtered && + tid_agg_rx->reorder_buf_filtered & BIT_ULL(index)) return true; if (!tail) @@ -1124,7 +1125,8 @@ static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata, } no_frame: - tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index); + if (tid_agg_rx->reorder_buf_filtered) + tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index); tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num); } @@ -4264,6 +4266,7 @@ void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid, u16 ssn, u64 filtered, u16 received_mpdus) { + struct ieee80211_local *local; struct sta_info *sta; struct tid_ampdu_rx *tid_agg_rx; struct sk_buff_head frames; @@ -4281,6 +4284,11 @@ void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid, sta = container_of(pubsta, struct sta_info, sta); + local = sta->sdata->local; + WARN_ONCE(local->hw.max_rx_aggregation_subframes > 64, + "RX BA marker can't support max_rx_aggregation_subframes %u > 64\n", + local->hw.max_rx_aggregation_subframes); + if (!ieee80211_rx_data_set_sta(&rx, sta, -1)) return; diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 62606fb44d027..4bb0d90eca1cd 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -1876,6 +1876,7 @@ static int proc_do_sync_threshold(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { + struct netns_ipvs *ipvs = table->extra2; int *valp = table->data; int val[2]; int rc; @@ -1885,6 +1886,7 @@ proc_do_sync_threshold(struct ctl_table *table, int write, .mode = table->mode, }; + mutex_lock(&ipvs->sync_mutex); memcpy(val, valp, sizeof(val)); rc = proc_dointvec(&tmp, write, buffer, lenp, ppos); if (write) { @@ -1894,6 +1896,7 @@ proc_do_sync_threshold(struct ctl_table *table, int write, else memcpy(valp, val, sizeof(val)); } + mutex_unlock(&ipvs->sync_mutex); return rc; } @@ -4321,6 +4324,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs) ipvs->sysctl_sync_threshold[0] = DEFAULT_SYNC_THRESHOLD; ipvs->sysctl_sync_threshold[1] = DEFAULT_SYNC_PERIOD; tbl[idx].data = &ipvs->sysctl_sync_threshold; + tbl[idx].extra2 = ipvs; tbl[idx++].maxlen = sizeof(ipvs->sysctl_sync_threshold); ipvs->sysctl_sync_refresh_period = DEFAULT_SYNC_REFRESH_PERIOD; tbl[idx++].data = &ipvs->sysctl_sync_refresh_period; diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 91eacc9b0b987..b6bcc8f2f46b7 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -49,8 +49,8 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = { [SCTP_CONNTRACK_COOKIE_WAIT] = 3 SECS, [SCTP_CONNTRACK_COOKIE_ECHOED] = 3 SECS, [SCTP_CONNTRACK_ESTABLISHED] = 210 SECS, - [SCTP_CONNTRACK_SHUTDOWN_SENT] = 300 SECS / 1000, - [SCTP_CONNTRACK_SHUTDOWN_RECD] = 300 SECS / 1000, + [SCTP_CONNTRACK_SHUTDOWN_SENT] = 3 SECS, + [SCTP_CONNTRACK_SHUTDOWN_RECD] = 3 SECS, [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = 3 SECS, [SCTP_CONNTRACK_HEARTBEAT_SENT] = 30 SECS, }; @@ -105,7 +105,7 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = { { /* ORIGINAL */ /* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS */ -/* init */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW}, +/* init */ {sCL, sCL, sCW, sCE, sES, sCL, sCL, sSA, sCW}, /* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL}, /* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL}, /* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL}, diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index c62227ae77467..eb8b1167dced2 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -1373,7 +1373,7 @@ static int nf_tables_newtable(struct sk_buff *skb, const struct nfnl_info *info, if (table == NULL) goto err_kzalloc; - table->validate_state = NFT_VALIDATE_SKIP; + table->validate_state = nft_net->validate_state; table->name = nla_strdup(attr, GFP_KERNEL_ACCOUNT); if (table->name == NULL) goto err_strdup; @@ -7091,6 +7091,7 @@ static int nft_set_catchall_flush(const struct nft_ctx *ctx, ret = __nft_set_catchall_flush(ctx, set, &elem); if (ret < 0) break; + nft_set_elem_change_active(ctx->net, set, ext); } return ret; @@ -9050,9 +9051,8 @@ static int nf_tables_validate(struct net *net) return -EAGAIN; nft_validate_state_update(table, NFT_VALIDATE_SKIP); + break; } - - break; } return 0; @@ -9456,9 +9456,9 @@ static void nft_trans_gc_work(struct work_struct *work) struct nft_trans_gc *trans, *next; LIST_HEAD(trans_gc_list); - spin_lock(&nf_tables_destroy_list_lock); + spin_lock(&nf_tables_gc_list_lock); list_splice_init(&nf_tables_gc_list, &trans_gc_list); - spin_unlock(&nf_tables_destroy_list_lock); + spin_unlock(&nf_tables_gc_list_lock); list_for_each_entry_safe(trans, next, &trans_gc_list, list) { list_del(&trans->list); @@ -9480,9 +9480,14 @@ struct nft_trans_gc *nft_trans_gc_alloc(struct nft_set *set, if (!trans) return NULL; + trans->net = maybe_get_net(net); + if (!trans->net) { + kfree(trans); + return NULL; + } + refcount_inc(&set->refs); trans->set = set; - trans->net = get_net(net); trans->seq = gc_seq; return trans; @@ -9738,6 +9743,22 @@ static void nft_set_commit_update(struct list_head *set_update_list) } } +static unsigned int nft_gc_seq_begin(struct nftables_pernet *nft_net) +{ + unsigned int gc_seq; + + /* Bump gc counter, it becomes odd, this is the busy mark. */ + gc_seq = READ_ONCE(nft_net->gc_seq); + WRITE_ONCE(nft_net->gc_seq, ++gc_seq); + + return gc_seq; +} + +static void nft_gc_seq_end(struct nftables_pernet *nft_net, unsigned int gc_seq) +{ + WRITE_ONCE(nft_net->gc_seq, ++gc_seq); +} + static int nf_tables_commit(struct net *net, struct sk_buff *skb) { struct nftables_pernet *nft_net = nft_pernet(net); @@ -9777,8 +9798,10 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) } /* 0. Validate ruleset, otherwise roll back for error reporting. */ - if (nf_tables_validate(net) < 0) + if (nf_tables_validate(net) < 0) { + nft_net->validate_state = NFT_VALIDATE_DO; return -EAGAIN; + } err = nft_flow_rule_offload_commit(net); if (err < 0) @@ -9823,9 +9846,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) WRITE_ONCE(nft_net->base_seq, base_seq); - /* Bump gc counter, it becomes odd, this is the busy mark. */ - gc_seq = READ_ONCE(nft_net->gc_seq); - WRITE_ONCE(nft_net->gc_seq, ++gc_seq); + gc_seq = nft_gc_seq_begin(nft_net); /* step 3. Start new generation, rules_gen_X now in use. */ net->nft.gencursor = nft_gencursor_next(net); @@ -10038,7 +10059,8 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN); nf_tables_commit_audit_log(&adl, nft_net->base_seq); - WRITE_ONCE(nft_net->gc_seq, ++gc_seq); + nft_gc_seq_end(nft_net, gc_seq); + nft_net->validate_state = NFT_VALIDATE_SKIP; nf_tables_commit_release(net); return 0; @@ -10315,8 +10337,12 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb, enum nfnl_abort_action action) { struct nftables_pernet *nft_net = nft_pernet(net); - int ret = __nf_tables_abort(net, action); + unsigned int gc_seq; + int ret; + gc_seq = nft_gc_seq_begin(nft_net); + ret = __nf_tables_abort(net, action); + nft_gc_seq_end(nft_net, gc_seq); mutex_unlock(&nft_net->commit_mutex); return ret; @@ -11039,6 +11065,7 @@ static int nft_rcv_nl_event(struct notifier_block *this, unsigned long event, struct net *net = n->net; unsigned int deleted; bool restart = false; + unsigned int gc_seq; if (event != NETLINK_URELEASE || n->protocol != NETLINK_NETFILTER) return NOTIFY_DONE; @@ -11046,8 +11073,11 @@ static int nft_rcv_nl_event(struct notifier_block *this, unsigned long event, nft_net = nft_pernet(net); deleted = 0; mutex_lock(&nft_net->commit_mutex); + + gc_seq = nft_gc_seq_begin(nft_net); + if (!list_empty(&nf_tables_destroy_list)) - rcu_barrier(); + nf_tables_trans_destroy_flush_work(); again: list_for_each_entry(table, &nft_net->tables, list) { if (nft_table_has_owner(table) && @@ -11068,6 +11098,8 @@ static int nft_rcv_nl_event(struct notifier_block *this, unsigned long event, if (restart) goto again; } + nft_gc_seq_end(nft_net, gc_seq); + mutex_unlock(&nft_net->commit_mutex); return NOTIFY_DONE; @@ -11089,6 +11121,7 @@ static int __net_init nf_tables_init_net(struct net *net) mutex_init(&nft_net->commit_mutex); nft_net->base_seq = 1; nft_net->gc_seq = 0; + nft_net->validate_state = NFT_VALIDATE_SKIP; return 0; } @@ -11105,12 +11138,20 @@ static void __net_exit nf_tables_pre_exit_net(struct net *net) static void __net_exit nf_tables_exit_net(struct net *net) { struct nftables_pernet *nft_net = nft_pernet(net); + unsigned int gc_seq; mutex_lock(&nft_net->commit_mutex); + + gc_seq = nft_gc_seq_begin(nft_net); + if (!list_empty(&nft_net->commit_list) || !list_empty(&nft_net->module_list)) __nf_tables_abort(net, NFNL_ABORT_NONE); + __nft_release_tables(net); + + nft_gc_seq_end(nft_net, gc_seq); + mutex_unlock(&nft_net->commit_mutex); WARN_ON_ONCE(!list_empty(&nft_net->tables)); WARN_ON_ONCE(!list_empty(&nft_net->module_list)); diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c index 4fb34d76dbeaf..5c5cc01c73c5a 100644 --- a/net/netfilter/nft_dynset.c +++ b/net/netfilter/nft_dynset.c @@ -191,6 +191,9 @@ static int nft_dynset_init(const struct nft_ctx *ctx, if (IS_ERR(set)) return PTR_ERR(set); + if (set->flags & NFT_SET_OBJECT) + return -EOPNOTSUPP; + if (set->ops->update == NULL) return -EOPNOTSUPP; diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c index cef5df8460009..524763659f251 100644 --- a/net/netfilter/nft_set_hash.c +++ b/net/netfilter/nft_set_hash.c @@ -326,6 +326,9 @@ static void nft_rhash_gc(struct work_struct *work) nft_net = nft_pernet(net); gc_seq = READ_ONCE(nft_net->gc_seq); + if (nft_set_gc_is_pending(set)) + goto done; + gc = nft_trans_gc_alloc(set, gc_seq, GFP_KERNEL); if (!gc) goto done; diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c index a5b8301afe4af..6af9c9ed4b5c3 100644 --- a/net/netfilter/nft_set_pipapo.c +++ b/net/netfilter/nft_set_pipapo.c @@ -566,6 +566,8 @@ static struct nft_pipapo_elem *pipapo_get(const struct net *net, goto out; if (last) { + if (nft_set_elem_expired(&f->mt[b].e->ext)) + goto next_match; if ((genmask && !nft_set_elem_active(&f->mt[b].e->ext, genmask))) goto next_match; @@ -600,17 +602,8 @@ static struct nft_pipapo_elem *pipapo_get(const struct net *net, static void *nft_pipapo_get(const struct net *net, const struct nft_set *set, const struct nft_set_elem *elem, unsigned int flags) { - struct nft_pipapo_elem *ret; - - ret = pipapo_get(net, set, (const u8 *)elem->key.val.data, + return pipapo_get(net, set, (const u8 *)elem->key.val.data, nft_genmask_cur(net)); - if (IS_ERR(ret)) - return ret; - - if (nft_set_elem_expired(&ret->ext)) - return ERR_PTR(-ENOENT); - - return ret; } /** @@ -909,12 +902,14 @@ static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f) static int pipapo_insert(struct nft_pipapo_field *f, const uint8_t *k, int mask_bits) { - int rule = f->rules++, group, ret, bit_offset = 0; + int rule = f->rules, group, ret, bit_offset = 0; - ret = pipapo_resize(f, f->rules - 1, f->rules); + ret = pipapo_resize(f, f->rules, f->rules + 1); if (ret) return ret; + f->rules++; + for (group = 0; group < f->groups; group++) { int i, v; u8 mask; @@ -1059,7 +1054,9 @@ static int pipapo_expand(struct nft_pipapo_field *f, step++; if (step >= len) { if (!masks) { - pipapo_insert(f, base, 0); + err = pipapo_insert(f, base, 0); + if (err < 0) + return err; masks = 1; } goto out; @@ -1242,6 +1239,9 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set, else ret = pipapo_expand(f, start, end, f->groups * f->bb); + if (ret < 0) + return ret; + if (f->bsize > bsize_max) bsize_max = f->bsize; @@ -1549,7 +1549,7 @@ static void nft_pipapo_gc_deactivate(struct net *net, struct nft_set *set, /** * pipapo_gc() - Drop expired entries from set, destroy start and end elements - * @set: nftables API set representation + * @_set: nftables API set representation * @m: Matching data */ static void pipapo_gc(const struct nft_set *_set, struct nft_pipapo_match *m) @@ -1697,6 +1697,17 @@ static void nft_pipapo_commit(const struct nft_set *set) priv->clone = new_clone; } +static bool nft_pipapo_transaction_mutex_held(const struct nft_set *set) +{ +#ifdef CONFIG_PROVE_LOCKING + const struct net *net = read_pnet(&set->net); + + return lockdep_is_held(&nft_pernet(net)->commit_mutex); +#else + return true; +#endif +} + static void nft_pipapo_abort(const struct nft_set *set) { struct nft_pipapo *priv = nft_set_priv(set); @@ -1705,7 +1716,7 @@ static void nft_pipapo_abort(const struct nft_set *set) if (!priv->dirty) return; - m = rcu_dereference(priv->match); + m = rcu_dereference_protected(priv->match, nft_pipapo_transaction_mutex_held(set)); new_clone = pipapo_clone(m); if (IS_ERR(new_clone)) @@ -1732,11 +1743,7 @@ static void nft_pipapo_activate(const struct net *net, const struct nft_set *set, const struct nft_set_elem *elem) { - struct nft_pipapo_elem *e; - - e = pipapo_get(net, set, (const u8 *)elem->key.val.data, 0); - if (IS_ERR(e)) - return; + struct nft_pipapo_elem *e = elem->priv; nft_set_elem_change_active(net, set, &e->ext); } @@ -1950,10 +1957,6 @@ static void nft_pipapo_remove(const struct net *net, const struct nft_set *set, data = (const u8 *)nft_set_ext_key(&e->ext); - e = pipapo_get(net, set, data, 0); - if (IS_ERR(e)) - return; - while ((rules_f0 = pipapo_rules_same_key(m->f, first_rule))) { union nft_pipapo_map_bucket rulemap[NFT_PIPAPO_MAX_FIELDS]; const u8 *match_start, *match_end; diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index f9d4c8fcbbf82..c6435e7092319 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c @@ -611,6 +611,9 @@ static void nft_rbtree_gc(struct work_struct *work) nft_net = nft_pernet(net); gc_seq = READ_ONCE(nft_net->gc_seq); + if (nft_set_gc_is_pending(set)) + goto done; + gc = nft_trans_gc_alloc(set, gc_seq, GFP_KERNEL); if (!gc) goto done; diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index a6d2a0b1aa21e..3d7a91e64c88f 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -1829,7 +1829,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) parms.port_no = OVSP_LOCAL; parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID]; parms.desired_ifindex = a[OVS_DP_ATTR_IFINDEX] - ? nla_get_u32(a[OVS_DP_ATTR_IFINDEX]) : 0; + ? nla_get_s32(a[OVS_DP_ATTR_IFINDEX]) : 0; /* So far only local changes have been made, now need the lock. */ ovs_lock(); @@ -2049,7 +2049,7 @@ static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = { [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 }, [OVS_DP_ATTR_MASKS_CACHE_SIZE] = NLA_POLICY_RANGE(NLA_U32, 0, PCPU_MIN_UNIT_SIZE / sizeof(struct mask_cache_entry)), - [OVS_DP_ATTR_IFINDEX] = {.type = NLA_U32 }, + [OVS_DP_ATTR_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 0), }; static const struct genl_small_ops dp_datapath_genl_ops[] = { @@ -2302,7 +2302,7 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info) parms.port_no = port_no; parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID]; parms.desired_ifindex = a[OVS_VPORT_ATTR_IFINDEX] - ? nla_get_u32(a[OVS_VPORT_ATTR_IFINDEX]) : 0; + ? nla_get_s32(a[OVS_VPORT_ATTR_IFINDEX]) : 0; vport = new_vport(&parms); err = PTR_ERR(vport); @@ -2539,7 +2539,7 @@ static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = { [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 }, [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_UNSPEC }, [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED }, - [OVS_VPORT_ATTR_IFINDEX] = { .type = NLA_U32 }, + [OVS_VPORT_ATTR_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 0), [OVS_VPORT_ATTR_NETNSID] = { .type = NLA_S32 }, [OVS_VPORT_ATTR_UPCALL_STATS] = { .type = NLA_NESTED }, }; diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index aa6b1fe651519..e9eaf637220e9 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1547,10 +1547,28 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, return 0; } +static bool req_create_or_replace(struct nlmsghdr *n) +{ + return (n->nlmsg_flags & NLM_F_CREATE && + n->nlmsg_flags & NLM_F_REPLACE); +} + +static bool req_create_exclusive(struct nlmsghdr *n) +{ + return (n->nlmsg_flags & NLM_F_CREATE && + n->nlmsg_flags & NLM_F_EXCL); +} + +static bool req_change(struct nlmsghdr *n) +{ + return (!(n->nlmsg_flags & NLM_F_CREATE) && + !(n->nlmsg_flags & NLM_F_REPLACE) && + !(n->nlmsg_flags & NLM_F_EXCL)); +} + /* * Create/change qdisc. */ - static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, struct netlink_ext_ack *extack) { @@ -1644,27 +1662,35 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, * * We know, that some child q is already * attached to this parent and have choice: - * either to change it or to create/graft new one. + * 1) change it or 2) create/graft new one. + * If the requested qdisc kind is different + * than the existing one, then we choose graft. + * If they are the same then this is "change" + * operation - just let it fallthrough.. * * 1. We are allowed to create/graft only - * if CREATE and REPLACE flags are set. + * if the request is explicitly stating + * "please create if it doesn't exist". * - * 2. If EXCL is set, requestor wanted to say, - * that qdisc tcm_handle is not expected + * 2. If the request is to exclusive create + * then the qdisc tcm_handle is not expected * to exist, so that we choose create/graft too. * * 3. The last case is when no flags are set. + * This will happen when for example tc + * utility issues a "change" command. * Alas, it is sort of hole in API, we * cannot decide what to do unambiguously. - * For now we select create/graft, if - * user gave KIND, which does not match existing. + * For now we select create/graft. */ - if ((n->nlmsg_flags & NLM_F_CREATE) && - (n->nlmsg_flags & NLM_F_REPLACE) && - ((n->nlmsg_flags & NLM_F_EXCL) || - (tca[TCA_KIND] && - nla_strcmp(tca[TCA_KIND], q->ops->id)))) - goto create_n_graft; + if (tca[TCA_KIND] && + nla_strcmp(tca[TCA_KIND], q->ops->id)) { + if (req_create_or_replace(n) || + req_create_exclusive(n)) + goto create_n_graft; + else if (req_change(n)) + goto create_n_graft2; + } } } } else { @@ -1698,6 +1724,7 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag"); return -ENOENT; } +create_n_graft2: if (clid == TC_H_INGRESS) { if (dev_ingress_queue(dev)) { q = qdisc_create(dev, dev_ingress_queue(dev), diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 9388d98aebc03..76f1bce49a8e7 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -99,7 +99,7 @@ struct percpu_counter sctp_sockets_allocated; static void sctp_enter_memory_pressure(struct sock *sk) { - sctp_memory_pressure = 1; + WRITE_ONCE(sctp_memory_pressure, 1); } @@ -9479,7 +9479,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk, newinet->inet_rcv_saddr = inet->inet_rcv_saddr; newinet->inet_dport = htons(asoc->peer.port); newinet->pmtudisc = inet->pmtudisc; - newinet->inet_id = get_random_u16(); + atomic_set(&newinet->inet_id, get_random_u16()); newinet->uc_ttl = inet->uc_ttl; newinet->mc_loop = 1; diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index e43f263824114..2eb8df44f894d 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -1244,6 +1244,9 @@ static int svc_tcp_sendmsg(struct socket *sock, struct xdr_buf *xdr, if (ret != head->iov_len) goto out; + if (xdr_buf_pagecount(xdr)) + xdr->bvec[0].bv_offset = offset_in_page(xdr->page_base); + msg.msg_flags = MSG_SPLICE_PAGES; iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, xdr->bvec, xdr_buf_pagecount(xdr), xdr->page_len); diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index b098fde373abf..28c0771c4e8c3 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -935,9 +935,6 @@ struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt, if (!rep->rr_rdmabuf) goto out_free; - if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf)) - goto out_free_regbuf; - rep->rr_cid.ci_completion_id = atomic_inc_return(&r_xprt->rx_ep->re_completion_ids); @@ -956,8 +953,6 @@ struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt, spin_unlock(&buf->rb_lock); return rep; -out_free_regbuf: - rpcrdma_regbuf_free(rep->rr_rdmabuf); out_free: kfree(rep); out: @@ -1363,6 +1358,10 @@ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp) rep = rpcrdma_rep_create(r_xprt, temp); if (!rep) break; + if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf)) { + rpcrdma_rep_put(buf, rep); + break; + } rep->rr_cid.ci_queue_id = ep->re_attr.recv_cq->res.id; trace_xprtrdma_post_recv(rep); diff --git a/net/xfrm/xfrm_compat.c b/net/xfrm/xfrm_compat.c index 8cbf45a8bcdc2..655fe4ff86212 100644 --- a/net/xfrm/xfrm_compat.c +++ b/net/xfrm/xfrm_compat.c @@ -108,7 +108,7 @@ static const struct nla_policy compat_policy[XFRMA_MAX+1] = { [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) }, [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) }, [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) }, - [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) }, + [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_user_sec_ctx) }, [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) }, [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) }, [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 }, diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index 815b380804011..d5ee96789d4bf 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -180,6 +180,8 @@ static int xfrm4_remove_beet_encap(struct xfrm_state *x, struct sk_buff *skb) int optlen = 0; int err = -EINVAL; + skb->protocol = htons(ETH_P_IP); + if (unlikely(XFRM_MODE_SKB_CB(skb)->protocol == IPPROTO_BEETPH)) { struct ip_beet_phdr *ph; int phlen; @@ -232,6 +234,8 @@ static int xfrm4_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb) { int err = -EINVAL; + skb->protocol = htons(ETH_P_IP); + if (!pskb_may_pull(skb, sizeof(struct iphdr))) goto out; @@ -267,6 +271,8 @@ static int xfrm6_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb) { int err = -EINVAL; + skb->protocol = htons(ETH_P_IPV6); + if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) goto out; @@ -296,6 +302,8 @@ static int xfrm6_remove_beet_encap(struct xfrm_state *x, struct sk_buff *skb) int size = sizeof(struct ipv6hdr); int err; + skb->protocol = htons(ETH_P_IPV6); + err = skb_cow_head(skb, size + skb->mac_len); if (err) goto out; @@ -346,6 +354,7 @@ xfrm_inner_mode_encap_remove(struct xfrm_state *x, return xfrm6_remove_tunnel_encap(x, skb); break; } + return -EINVAL; } WARN_ON_ONCE(1); @@ -366,19 +375,6 @@ static int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb) return -EAFNOSUPPORT; } - switch (XFRM_MODE_SKB_CB(skb)->protocol) { - case IPPROTO_IPIP: - case IPPROTO_BEETPH: - skb->protocol = htons(ETH_P_IP); - break; - case IPPROTO_IPV6: - skb->protocol = htons(ETH_P_IPV6); - break; - default: - WARN_ON_ONCE(1); - break; - } - return xfrm_inner_mode_encap_remove(x, skb); } diff --git a/net/xfrm/xfrm_interface_core.c b/net/xfrm/xfrm_interface_core.c index a3319965470a7..b864740846902 100644 --- a/net/xfrm/xfrm_interface_core.c +++ b/net/xfrm/xfrm_interface_core.c @@ -537,8 +537,8 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev) switch (skb->protocol) { case htons(ETH_P_IPV6): - xfrm_decode_session(skb, &fl, AF_INET6); memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); + xfrm_decode_session(skb, &fl, AF_INET6); if (!dst) { fl.u.ip6.flowi6_oif = dev->ifindex; fl.u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC; @@ -552,8 +552,8 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev) } break; case htons(ETH_P_IP): - xfrm_decode_session(skb, &fl, AF_INET); memset(IPCB(skb), 0, sizeof(*IPCB(skb))); + xfrm_decode_session(skb, &fl, AF_INET); if (!dst) { struct rtable *rt; diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 49e63eea841dd..bda5327bf34df 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -1324,12 +1324,8 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr, struct xfrm_dev_offload *xso = &x->xso; if (xso->type == XFRM_DEV_OFFLOAD_PACKET) { - xso->dev->xfrmdev_ops->xdo_dev_state_delete(x); - xso->dir = 0; - netdev_put(xso->dev, &xso->dev_tracker); - xso->dev = NULL; - xso->real_dev = NULL; - xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED; + xfrm_dev_state_delete(x); + xfrm_dev_state_free(x); } #endif x->km.state = XFRM_STATE_DEAD; diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index c34a2a06ca940..ad01997c3aa9d 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -628,7 +628,7 @@ static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs, struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH]; struct nlattr *mt = attrs[XFRMA_MTIMER_THRESH]; - if (re) { + if (re && x->replay_esn && x->preplay_esn) { struct xfrm_replay_state_esn *replay_esn; replay_esn = nla_data(re); memcpy(x->replay_esn, replay_esn, @@ -1267,6 +1267,15 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb) sizeof(*filter), GFP_KERNEL); if (filter == NULL) return -ENOMEM; + + /* see addr_match(), (prefix length >> 5) << 2 + * will be used to compare xfrm_address_t + */ + if (filter->splen > (sizeof(xfrm_address_t) << 3) || + filter->dplen > (sizeof(xfrm_address_t) << 3)) { + kfree(filter); + return -EINVAL; + } } if (attrs[XFRMA_PROTO]) @@ -2336,6 +2345,7 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, NETLINK_CB(skb).portid); } } else { + xfrm_dev_policy_delete(xp); xfrm_audit_policy_delete(xp, err ? 0 : 1, true); if (err != 0) @@ -3015,7 +3025,7 @@ const struct nla_policy xfrma_policy[XFRMA_MAX+1] = { [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) }, [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) }, [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) }, - [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) }, + [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_user_sec_ctx) }, [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) }, [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) }, [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 }, @@ -3035,6 +3045,7 @@ const struct nla_policy xfrma_policy[XFRMA_MAX+1] = { [XFRMA_SET_MARK] = { .type = NLA_U32 }, [XFRMA_SET_MARK_MASK] = { .type = NLA_U32 }, [XFRMA_IF_ID] = { .type = NLA_U32 }, + [XFRMA_MTIMER_THRESH] = { .type = NLA_U32 }, }; EXPORT_SYMBOL_GPL(xfrma_policy); diff --git a/rust/macros/vtable.rs b/rust/macros/vtable.rs index 34d5e7fb5768a..ee06044fcd4f3 100644 --- a/rust/macros/vtable.rs +++ b/rust/macros/vtable.rs @@ -74,6 +74,7 @@ pub(crate) fn vtable(_attr: TokenStream, ts: TokenStream) -> TokenStream { const {gen_const_name}: bool = false;", ) .unwrap(); + consts.insert(gen_const_name); } } else { const_items = "const USE_VTABLE_ATTR: () = ();".to_owned(); diff --git a/samples/ftrace/ftrace-direct-modify.c b/samples/ftrace/ftrace-direct-modify.c index e5ed08098ff31..e2a6a69352dfb 100644 --- a/samples/ftrace/ftrace-direct-modify.c +++ b/samples/ftrace/ftrace-direct-modify.c @@ -105,7 +105,7 @@ asm ( " .type my_tramp1, @function\n" " .globl my_tramp1\n" " my_tramp1:" -" bti c\n" +" hint 34\n" // bti c " sub sp, sp, #16\n" " stp x9, x30, [sp]\n" " bl my_direct_func1\n" @@ -117,7 +117,7 @@ asm ( " .type my_tramp2, @function\n" " .globl my_tramp2\n" " my_tramp2:" -" bti c\n" +" hint 34\n" // bti c " sub sp, sp, #16\n" " stp x9, x30, [sp]\n" " bl my_direct_func2\n" diff --git a/samples/ftrace/ftrace-direct-multi-modify.c b/samples/ftrace/ftrace-direct-multi-modify.c index 292cff2b3f5d7..2e349834d63c3 100644 --- a/samples/ftrace/ftrace-direct-multi-modify.c +++ b/samples/ftrace/ftrace-direct-multi-modify.c @@ -112,7 +112,7 @@ asm ( " .type my_tramp1, @function\n" " .globl my_tramp1\n" " my_tramp1:" -" bti c\n" +" hint 34\n" // bti c " sub sp, sp, #32\n" " stp x9, x30, [sp]\n" " str x0, [sp, #16]\n" @@ -127,7 +127,7 @@ asm ( " .type my_tramp2, @function\n" " .globl my_tramp2\n" " my_tramp2:" -" bti c\n" +" hint 34\n" // bti c " sub sp, sp, #32\n" " stp x9, x30, [sp]\n" " str x0, [sp, #16]\n" diff --git a/samples/ftrace/ftrace-direct-multi.c b/samples/ftrace/ftrace-direct-multi.c index b4391e08c913e..9243dbfe4d0c1 100644 --- a/samples/ftrace/ftrace-direct-multi.c +++ b/samples/ftrace/ftrace-direct-multi.c @@ -75,7 +75,7 @@ asm ( " .type my_tramp, @function\n" " .globl my_tramp\n" " my_tramp:" -" bti c\n" +" hint 34\n" // bti c " sub sp, sp, #32\n" " stp x9, x30, [sp]\n" " str x0, [sp, #16]\n" diff --git a/samples/ftrace/ftrace-direct-too.c b/samples/ftrace/ftrace-direct-too.c index e9804c5307c0c..e39c3563ae4e4 100644 --- a/samples/ftrace/ftrace-direct-too.c +++ b/samples/ftrace/ftrace-direct-too.c @@ -81,7 +81,7 @@ asm ( " .type my_tramp, @function\n" " .globl my_tramp\n" " my_tramp:" -" bti c\n" +" hint 34\n" // bti c " sub sp, sp, #48\n" " stp x9, x30, [sp]\n" " stp x0, x1, [sp, #16]\n" diff --git a/samples/ftrace/ftrace-direct.c b/samples/ftrace/ftrace-direct.c index 20f4a7caa810e..32c477da1e9aa 100644 --- a/samples/ftrace/ftrace-direct.c +++ b/samples/ftrace/ftrace-direct.c @@ -72,7 +72,7 @@ asm ( " .type my_tramp, @function\n" " .globl my_tramp\n" " my_tramp:" -" bti c\n" +" hint 34\n" // bti c " sub sp, sp, #32\n" " stp x9, x30, [sp]\n" " str x0, [sp, #16]\n" diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c index 31b08b34c7224..dc904865af58e 100644 --- a/security/selinux/ss/policydb.c +++ b/security/selinux/ss/policydb.c @@ -2005,6 +2005,7 @@ static int filename_trans_read_helper(struct policydb *p, void *fp) if (!datum) goto out; + datum->next = NULL; *dst = datum; /* ebitmap_read() will at least init the bitmap */ @@ -2017,7 +2018,6 @@ static int filename_trans_read_helper(struct policydb *p, void *fp) goto out; datum->otype = le32_to_cpu(buf[0]); - datum->next = NULL; dst = &datum->next; } diff --git a/sound/pci/hda/patch_cs8409-tables.c b/sound/pci/hda/patch_cs8409-tables.c index b288874e401e5..36b411d1a9609 100644 --- a/sound/pci/hda/patch_cs8409-tables.c +++ b/sound/pci/hda/patch_cs8409-tables.c @@ -550,6 +550,10 @@ const struct snd_pci_quirk cs8409_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x0C50, "Dolphin", CS8409_DOLPHIN), SND_PCI_QUIRK(0x1028, 0x0C51, "Dolphin", CS8409_DOLPHIN), SND_PCI_QUIRK(0x1028, 0x0C52, "Dolphin", CS8409_DOLPHIN), + SND_PCI_QUIRK(0x1028, 0x0C73, "Dolphin", CS8409_DOLPHIN), + SND_PCI_QUIRK(0x1028, 0x0C75, "Dolphin", CS8409_DOLPHIN), + SND_PCI_QUIRK(0x1028, 0x0C7D, "Dolphin", CS8409_DOLPHIN), + SND_PCI_QUIRK(0x1028, 0x0C7F, "Dolphin", CS8409_DOLPHIN), {} /* terminator */ }; diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 44fccfb93cff4..dc7b7a407638a 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -9422,11 +9422,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x0cbd, "Dell Oasis 13 CS MTL-U", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1028, 0x0cbe, "Dell Oasis 13 2-IN-1 MTL-U", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1028, 0x0cbf, "Dell Oasis 13 Low Weight MTU-L", ALC245_FIXUP_CS35L41_SPI_2), - SND_PCI_QUIRK(0x1028, 0x0cc1, "Dell Oasis 14 MTL-H/U", ALC287_FIXUP_CS35L41_I2C_2), - SND_PCI_QUIRK(0x1028, 0x0cc2, "Dell Oasis 14 2-in-1 MTL-H/U", ALC287_FIXUP_CS35L41_I2C_2), - SND_PCI_QUIRK(0x1028, 0x0cc3, "Dell Oasis 14 Low Weight MTL-U", ALC287_FIXUP_CS35L41_I2C_2), - SND_PCI_QUIRK(0x1028, 0x0cc4, "Dell Oasis 16 MTL-H/U", ALC287_FIXUP_CS35L41_I2C_2), - SND_PCI_QUIRK(0x1028, 0x0cc5, "Dell Oasis MLK 14 RPL-P", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x1028, 0x0cc1, "Dell Oasis 14 MTL-H/U", ALC245_FIXUP_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1028, 0x0cc2, "Dell Oasis 14 2-in-1 MTL-H/U", ALC245_FIXUP_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1028, 0x0cc3, "Dell Oasis 14 Low Weight MTL-U", ALC245_FIXUP_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1028, 0x0cc4, "Dell Oasis 16 MTL-H/U", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), @@ -9617,7 +9616,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8b96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8b97, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8bf0, "HP", ALC236_FIXUP_HP_GPIO_LED), - SND_PCI_QUIRK(0x103c, 0x8c26, "HP HP EliteBook 800G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8c46, "HP EliteBook 830 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8c47, "HP EliteBook 840 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8c48, "HP EliteBook 860 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8c49, "HP Elite x360 830 2-in-1 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8c70, "HP EliteBook 835 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8c71, "HP EliteBook 845 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8c72, "HP EliteBook 865 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED), SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), @@ -10638,6 +10643,7 @@ static int patch_alc269(struct hda_codec *codec) spec = codec->spec; spec->gen.shared_mic_vref_pin = 0x18; codec->power_save_node = 0; + spec->en_3kpull_low = true; #ifdef CONFIG_PM codec->patch_ops.suspend = alc269_suspend; @@ -10720,14 +10726,16 @@ static int patch_alc269(struct hda_codec *codec) spec->shutup = alc256_shutup; spec->init_hook = alc256_init; spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */ - if (codec->bus->pci->vendor == PCI_VENDOR_ID_AMD) - spec->en_3kpull_low = true; + if (codec->core.vendor_id == 0x10ec0236 && + codec->bus->pci->vendor != PCI_VENDOR_ID_AMD) + spec->en_3kpull_low = false; break; case 0x10ec0257: spec->codec_variant = ALC269_TYPE_ALC257; spec->shutup = alc256_shutup; spec->init_hook = alc256_init; spec->gen.mixer_nid = 0; + spec->en_3kpull_low = false; break; case 0x10ec0215: case 0x10ec0245: diff --git a/sound/pci/ymfpci/ymfpci.c b/sound/pci/ymfpci/ymfpci.c index b033bd2909405..48444dda44def 100644 --- a/sound/pci/ymfpci/ymfpci.c +++ b/sound/pci/ymfpci/ymfpci.c @@ -152,8 +152,8 @@ static inline int snd_ymfpci_create_gameport(struct snd_ymfpci *chip, int dev, i void snd_ymfpci_free_gameport(struct snd_ymfpci *chip) { } #endif /* SUPPORT_JOYSTICK */ -static int snd_card_ymfpci_probe(struct pci_dev *pci, - const struct pci_device_id *pci_id) +static int __snd_card_ymfpci_probe(struct pci_dev *pci, + const struct pci_device_id *pci_id) { static int dev; struct snd_card *card; @@ -348,6 +348,12 @@ static int snd_card_ymfpci_probe(struct pci_dev *pci, return 0; } +static int snd_card_ymfpci_probe(struct pci_dev *pci, + const struct pci_device_id *pci_id) +{ + return snd_card_free_on_error(&pci->dev, __snd_card_ymfpci_probe(pci, pci_id)); +} + static struct pci_driver ymfpci_driver = { .name = KBUILD_MODNAME, .id_table = snd_ymfpci_ids, diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c index a2fe3bd4f9a1c..b304b3562c82b 100644 --- a/sound/soc/amd/yc/acp6x-mach.c +++ b/sound/soc/amd/yc/acp6x-mach.c @@ -217,7 +217,7 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { .driver_data = &acp6x_card, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_NAME, "82"), + DMI_MATCH(DMI_PRODUCT_NAME, "82V2"), } }, { @@ -248,6 +248,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "M3402RA"), } }, + { + .driver_data = &acp6x_card, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "M6500RC"), + } + }, { .driver_data = &acp6x_card, .matches = { diff --git a/sound/soc/codecs/cs35l41.c b/sound/soc/codecs/cs35l41.c index 6ac501f008eca..8a879b6f48290 100644 --- a/sound/soc/codecs/cs35l41.c +++ b/sound/soc/codecs/cs35l41.c @@ -168,7 +168,7 @@ static int cs35l41_get_fs_mon_config_index(int freq) static const DECLARE_TLV_DB_RANGE(dig_vol_tlv, 0, 0, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 1), 1, 913, TLV_DB_MINMAX_ITEM(-10200, 1200)); -static DECLARE_TLV_DB_SCALE(amp_gain_tlv, 0, 1, 1); +static DECLARE_TLV_DB_SCALE(amp_gain_tlv, 50, 100, 0); static const struct snd_kcontrol_new dre_ctrl = SOC_DAPM_SINGLE("Switch", CS35L41_PWR_CTRL3, 20, 1, 0); diff --git a/sound/soc/codecs/cs35l56-i2c.c b/sound/soc/codecs/cs35l56-i2c.c index ed2a41943d978..40666e6698ba9 100644 --- a/sound/soc/codecs/cs35l56-i2c.c +++ b/sound/soc/codecs/cs35l56-i2c.c @@ -62,10 +62,19 @@ static const struct i2c_device_id cs35l56_id_i2c[] = { }; MODULE_DEVICE_TABLE(i2c, cs35l56_id_i2c); +#ifdef CONFIG_ACPI +static const struct acpi_device_id cs35l56_asoc_acpi_match[] = { + { "CSC355C", 0 }, + {}, +}; +MODULE_DEVICE_TABLE(acpi, cs35l56_asoc_acpi_match); +#endif + static struct i2c_driver cs35l56_i2c_driver = { .driver = { .name = "cs35l56", .pm = &cs35l56_pm_ops_i2c_spi, + .acpi_match_table = ACPI_PTR(cs35l56_asoc_acpi_match), }, .id_table = cs35l56_id_i2c, .probe = cs35l56_i2c_probe, diff --git a/sound/soc/codecs/cs35l56-spi.c b/sound/soc/codecs/cs35l56-spi.c index 996aab10500ee..302f9c47407a4 100644 --- a/sound/soc/codecs/cs35l56-spi.c +++ b/sound/soc/codecs/cs35l56-spi.c @@ -59,10 +59,19 @@ static const struct spi_device_id cs35l56_id_spi[] = { }; MODULE_DEVICE_TABLE(spi, cs35l56_id_spi); +#ifdef CONFIG_ACPI +static const struct acpi_device_id cs35l56_asoc_acpi_match[] = { + { "CSC355C", 0 }, + {}, +}; +MODULE_DEVICE_TABLE(acpi, cs35l56_asoc_acpi_match); +#endif + static struct spi_driver cs35l56_spi_driver = { .driver = { .name = "cs35l56", .pm = &cs35l56_pm_ops_i2c_spi, + .acpi_match_table = ACPI_PTR(cs35l56_asoc_acpi_match), }, .id_table = cs35l56_id_spi, .probe = cs35l56_spi_probe, diff --git a/sound/soc/codecs/cs35l56.c b/sound/soc/codecs/cs35l56.c index c03f9d3c9a136..fd06b9f9d496d 100644 --- a/sound/soc/codecs/cs35l56.c +++ b/sound/soc/codecs/cs35l56.c @@ -5,7 +5,6 @@ // Copyright (C) 2023 Cirrus Logic, Inc. and // Cirrus Logic International Semiconductor Ltd. -#include #include #include #include @@ -1354,26 +1353,22 @@ static int cs35l56_dsp_init(struct cs35l56_private *cs35l56) return 0; } -static int cs35l56_acpi_get_name(struct cs35l56_private *cs35l56) +static int cs35l56_get_firmware_uid(struct cs35l56_private *cs35l56) { - acpi_handle handle = ACPI_HANDLE(cs35l56->dev); - const char *sub; + struct device *dev = cs35l56->dev; + const char *prop; + int ret; - /* If there is no ACPI_HANDLE, there is no ACPI for this system, return 0 */ - if (!handle) + ret = device_property_read_string(dev, "cirrus,firmware-uid", &prop); + /* If bad sw node property, return 0 and fallback to legacy firmware path */ + if (ret < 0) return 0; - sub = acpi_get_subsystem_id(handle); - if (IS_ERR(sub)) { - /* If bad ACPI, return 0 and fallback to legacy firmware path, otherwise fail */ - if (PTR_ERR(sub) == -ENODATA) - return 0; - else - return PTR_ERR(sub); - } + cs35l56->dsp.system_name = devm_kstrdup(dev, prop, GFP_KERNEL); + if (cs35l56->dsp.system_name == NULL) + return -ENOMEM; - cs35l56->dsp.system_name = sub; - dev_dbg(cs35l56->dev, "Subsystem ID: %s\n", cs35l56->dsp.system_name); + dev_dbg(dev, "Firmware UID: %s\n", cs35l56->dsp.system_name); return 0; } @@ -1417,7 +1412,7 @@ int cs35l56_common_probe(struct cs35l56_private *cs35l56) gpiod_set_value_cansleep(cs35l56->reset_gpio, 1); } - ret = cs35l56_acpi_get_name(cs35l56); + ret = cs35l56_get_firmware_uid(cs35l56); if (ret != 0) goto err; @@ -1604,8 +1599,6 @@ void cs35l56_remove(struct cs35l56_private *cs35l56) regcache_cache_only(cs35l56->regmap, true); - kfree(cs35l56->dsp.system_name); - gpiod_set_value_cansleep(cs35l56->reset_gpio, 0); regulator_bulk_disable(ARRAY_SIZE(cs35l56->supplies), cs35l56->supplies); } diff --git a/sound/soc/codecs/max98363.c b/sound/soc/codecs/max98363.c index b5c69bba0e488..2dfaf4fcfbd37 100644 --- a/sound/soc/codecs/max98363.c +++ b/sound/soc/codecs/max98363.c @@ -185,10 +185,10 @@ static int max98363_io_init(struct sdw_slave *slave) pm_runtime_get_noresume(dev); ret = regmap_read(max98363->regmap, MAX98363_R21FF_REV_ID, ®); - if (!ret) { + if (!ret) dev_info(dev, "Revision ID: %X\n", reg); - return ret; - } + else + goto out; if (max98363->first_hw_init) { regcache_cache_bypass(max98363->regmap, false); @@ -198,10 +198,11 @@ static int max98363_io_init(struct sdw_slave *slave) max98363->first_hw_init = true; max98363->hw_init = true; +out: pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); - return 0; + return ret; } #define MAX98363_RATES SNDRV_PCM_RATE_8000_192000 diff --git a/sound/soc/codecs/rt1308-sdw.c b/sound/soc/codecs/rt1308-sdw.c index f43520ca31874..e566c8ddd3e97 100644 --- a/sound/soc/codecs/rt1308-sdw.c +++ b/sound/soc/codecs/rt1308-sdw.c @@ -52,6 +52,7 @@ static bool rt1308_volatile_register(struct device *dev, unsigned int reg) case 0x300a: case 0xc000: case 0xc710: + case 0xcf01: case 0xc860 ... 0xc863: case 0xc870 ... 0xc873: return true; @@ -213,7 +214,7 @@ static int rt1308_io_init(struct device *dev, struct sdw_slave *slave) { struct rt1308_sdw_priv *rt1308 = dev_get_drvdata(dev); int ret = 0; - unsigned int tmp; + unsigned int tmp, hibernation_flag; if (rt1308->hw_init) return 0; @@ -242,6 +243,10 @@ static int rt1308_io_init(struct device *dev, struct sdw_slave *slave) pm_runtime_get_noresume(&slave->dev); + regmap_read(rt1308->regmap, 0xcf01, &hibernation_flag); + if ((hibernation_flag != 0x00) && rt1308->first_hw_init) + goto _preset_ready_; + /* sw reset */ regmap_write(rt1308->regmap, RT1308_SDW_RESET, 0); @@ -282,6 +287,12 @@ static int rt1308_io_init(struct device *dev, struct sdw_slave *slave) regmap_write(rt1308->regmap, 0xc100, 0xd7); regmap_write(rt1308->regmap, 0xc101, 0xd7); + /* apply BQ params */ + rt1308_apply_bq_params(rt1308); + + regmap_write(rt1308->regmap, 0xcf01, 0x01); + +_preset_ready_: if (rt1308->first_hw_init) { regcache_cache_bypass(rt1308->regmap, false); regcache_mark_dirty(rt1308->regmap); diff --git a/sound/soc/codecs/rt5665.c b/sound/soc/codecs/rt5665.c index 83c367af91dad..525713c33d716 100644 --- a/sound/soc/codecs/rt5665.c +++ b/sound/soc/codecs/rt5665.c @@ -4472,6 +4472,8 @@ static void rt5665_remove(struct snd_soc_component *component) struct rt5665_priv *rt5665 = snd_soc_component_get_drvdata(component); regmap_write(rt5665->regmap, RT5665_RESET, 0); + + regulator_bulk_disable(ARRAY_SIZE(rt5665->supplies), rt5665->supplies); } #ifdef CONFIG_PM diff --git a/sound/soc/codecs/tas2781-comlib.c b/sound/soc/codecs/tas2781-comlib.c index a88c6c28a3941..ffb26e4a7e2f0 100644 --- a/sound/soc/codecs/tas2781-comlib.c +++ b/sound/soc/codecs/tas2781-comlib.c @@ -57,16 +57,17 @@ static int tasdevice_change_chn_book(struct tasdevice_priv *tas_priv, if (client->addr != tasdev->dev_addr) { client->addr = tasdev->dev_addr; - if (tasdev->cur_book == book) { - ret = regmap_write(map, - TASDEVICE_PAGE_SELECT, 0); - if (ret < 0) { - dev_err(tas_priv->dev, "%s, E=%d\n", - __func__, ret); - goto out; - } + /* All tas2781s share the same regmap, clear the page + * inside regmap once switching to another tas2781. + * Register 0 at any pages and any books inside tas2781 + * is the same one for page-switching. + */ + ret = regmap_write(map, TASDEVICE_PAGE_SELECT, 0); + if (ret < 0) { + dev_err(tas_priv->dev, "%s, E=%d\n", + __func__, ret); + goto out; } - goto out; } if (tasdev->cur_book != book) { diff --git a/sound/soc/fsl/fsl_micfil.c b/sound/soc/fsl/fsl_micfil.c index 3f08082a55bec..9d01225dedd9a 100644 --- a/sound/soc/fsl/fsl_micfil.c +++ b/sound/soc/fsl/fsl_micfil.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause // Copyright 2018 NXP #include @@ -1254,4 +1254,4 @@ module_platform_driver(fsl_micfil_driver); MODULE_AUTHOR("Cosmin-Gabriel Samoila "); MODULE_DESCRIPTION("NXP PDM Microphone Interface (MICFIL) driver"); -MODULE_LICENSE("GPL v2"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/sound/soc/fsl/fsl_micfil.h b/sound/soc/fsl/fsl_micfil.h index 9237a1c4cb8f7..fee9fe3d91199 100644 --- a/sound/soc/fsl/fsl_micfil.h +++ b/sound/soc/fsl/fsl_micfil.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * PDM Microphone Interface for the NXP i.MX SoC * Copyright 2018 NXP diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c index dbee8c98ff012..0201029899cad 100644 --- a/sound/soc/intel/boards/sof_sdw.c +++ b/sound/soc/intel/boards/sof_sdw.c @@ -476,7 +476,7 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = { DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "Lunar Lake Client Platform"), }, - .driver_data = (void *)(RT711_JD2_100K), + .driver_data = (void *)(RT711_JD2), }, {} }; diff --git a/sound/soc/intel/boards/sof_sdw_cs42l42.c b/sound/soc/intel/boards/sof_sdw_cs42l42.c index c4a16e4c9f69c..ad130d913415e 100644 --- a/sound/soc/intel/boards/sof_sdw_cs42l42.c +++ b/sound/soc/intel/boards/sof_sdw_cs42l42.c @@ -99,9 +99,9 @@ static int cs42l42_rtd_init(struct snd_soc_pcm_runtime *rtd) jack = &ctx->sdw_headset; snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE); - snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND); - snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP); - snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN); + snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOLUMEUP); + snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEDOWN); + snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOICECOMMAND); ret = snd_soc_component_set_jack(component, jack, NULL); diff --git a/sound/soc/meson/axg-tdm-formatter.c b/sound/soc/meson/axg-tdm-formatter.c index 9883dc777f630..63333a2b0a9c3 100644 --- a/sound/soc/meson/axg-tdm-formatter.c +++ b/sound/soc/meson/axg-tdm-formatter.c @@ -30,27 +30,32 @@ int axg_tdm_formatter_set_channel_masks(struct regmap *map, struct axg_tdm_stream *ts, unsigned int offset) { - unsigned int val, ch = ts->channels; - unsigned long mask; - int i, j; + unsigned int ch = ts->channels; + u32 val[AXG_TDM_NUM_LANES]; + int i, j, k; + + /* + * We need to mimick the slot distribution used by the HW to keep the + * channel placement consistent regardless of the number of channel + * in the stream. This is why the odd algorithm below is used. + */ + memset(val, 0, sizeof(*val) * AXG_TDM_NUM_LANES); /* * Distribute the channels of the stream over the available slots - * of each TDM lane + * of each TDM lane. We need to go over the 32 slots ... */ - for (i = 0; i < AXG_TDM_NUM_LANES; i++) { - val = 0; - mask = ts->mask[i]; - - for (j = find_first_bit(&mask, 32); - (j < 32) && ch; - j = find_next_bit(&mask, 32, j + 1)) { - val |= 1 << j; - ch -= 1; + for (i = 0; (i < 32) && ch; i += 2) { + /* ... of all the lanes ... */ + for (j = 0; j < AXG_TDM_NUM_LANES; j++) { + /* ... then distribute the channels in pairs */ + for (k = 0; k < 2; k++) { + if ((BIT(i + k) & ts->mask[j]) && ch) { + val[j] |= BIT(i + k); + ch -= 1; + } + } } - - regmap_write(map, offset, val); - offset += regmap_get_reg_stride(map); } /* @@ -63,6 +68,11 @@ int axg_tdm_formatter_set_channel_masks(struct regmap *map, return -EINVAL; } + for (i = 0; i < AXG_TDM_NUM_LANES; i++) { + regmap_write(map, offset, val[i]); + offset += regmap_get_reg_stride(map); + } + return 0; } EXPORT_SYMBOL_GPL(axg_tdm_formatter_set_channel_masks); diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index 8896227e4fb76..3aa6b988cb4b4 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -38,6 +38,7 @@ static inline int _soc_pcm_ret(struct snd_soc_pcm_runtime *rtd, switch (ret) { case -EPROBE_DEFER: case -ENOTSUPP: + case -EINVAL: break; default: dev_err(rtd->dev, @@ -2466,8 +2467,11 @@ static int dpcm_fe_dai_prepare(struct snd_pcm_substream *substream) /* there is no point preparing this FE if there are no BEs */ if (list_empty(&fe->dpcm[stream].be_clients)) { - dev_err(fe->dev, "ASoC: no backend DAIs enabled for %s\n", - fe->dai_link->name); + /* dev_err_once() for visibility, dev_dbg() for debugging UCM profiles */ + dev_err_once(fe->dev, "ASoC: no backend DAIs enabled for %s, possibly missing ALSA mixer-based routing or UCM profile\n", + fe->dai_link->name); + dev_dbg(fe->dev, "ASoC: no backend DAIs enabled for %s\n", + fe->dai_link->name); ret = -EINVAL; goto out; } diff --git a/sound/soc/sof/intel/hda-dai-ops.c b/sound/soc/sof/intel/hda-dai-ops.c index f3513796c189c..f33051eac1c09 100644 --- a/sound/soc/sof/intel/hda-dai-ops.c +++ b/sound/soc/sof/intel/hda-dai-ops.c @@ -372,6 +372,7 @@ static const struct hda_dai_widget_dma_ops hda_ipc4_chain_dma_ops = { static int hda_ipc3_post_trigger(struct snd_sof_dev *sdev, struct snd_soc_dai *cpu_dai, struct snd_pcm_substream *substream, int cmd) { + struct hdac_ext_stream *hext_stream = hda_get_hext_stream(sdev, cpu_dai, substream); struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(cpu_dai, substream->stream); switch (cmd) { @@ -379,9 +380,17 @@ static int hda_ipc3_post_trigger(struct snd_sof_dev *sdev, struct snd_soc_dai *c case SNDRV_PCM_TRIGGER_STOP: { struct snd_sof_dai_config_data data = { 0 }; + int ret; data.dai_data = DMA_CHAN_INVALID; - return hda_dai_config(w, SOF_DAI_CONFIG_FLAGS_HW_FREE, &data); + ret = hda_dai_config(w, SOF_DAI_CONFIG_FLAGS_HW_FREE, &data); + if (ret < 0) + return ret; + + if (cmd == SNDRV_PCM_TRIGGER_STOP) + return hda_link_dma_cleanup(substream, hext_stream, cpu_dai); + + break; } case SNDRV_PCM_TRIGGER_PAUSE_PUSH: return hda_dai_config(w, SOF_DAI_CONFIG_FLAGS_PAUSE, NULL); diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c index 3297dea493aa6..863865f3d77eb 100644 --- a/sound/soc/sof/intel/hda-dai.c +++ b/sound/soc/sof/intel/hda-dai.c @@ -107,9 +107,8 @@ hda_dai_get_ops(struct snd_pcm_substream *substream, struct snd_soc_dai *cpu_dai return sdai->platform_private; } -static int hda_link_dma_cleanup(struct snd_pcm_substream *substream, - struct hdac_ext_stream *hext_stream, - struct snd_soc_dai *cpu_dai) +int hda_link_dma_cleanup(struct snd_pcm_substream *substream, struct hdac_ext_stream *hext_stream, + struct snd_soc_dai *cpu_dai) { const struct hda_dai_widget_dma_ops *ops = hda_dai_get_ops(substream, cpu_dai); struct sof_intel_hda_stream *hda_stream; diff --git a/sound/soc/sof/intel/hda.h b/sound/soc/sof/intel/hda.h index 3f7c6fb05e5d7..5b9e4ebcc18b2 100644 --- a/sound/soc/sof/intel/hda.h +++ b/sound/soc/sof/intel/hda.h @@ -963,5 +963,7 @@ const struct hda_dai_widget_dma_ops * hda_select_dai_widget_ops(struct snd_sof_dev *sdev, struct snd_sof_widget *swidget); int hda_dai_config(struct snd_soc_dapm_widget *w, unsigned int flags, struct snd_sof_dai_config_data *data); +int hda_link_dma_cleanup(struct snd_pcm_substream *substream, struct hdac_ext_stream *hext_stream, + struct snd_soc_dai *cpu_dai); #endif diff --git a/sound/soc/sof/ipc3.c b/sound/soc/sof/ipc3.c index 2c5aac31e8b07..580960ff273d1 100644 --- a/sound/soc/sof/ipc3.c +++ b/sound/soc/sof/ipc3.c @@ -1001,7 +1001,7 @@ void sof_ipc3_do_rx_work(struct snd_sof_dev *sdev, struct sof_ipc_cmd_hdr *hdr, ipc3_log_header(sdev->dev, "ipc rx", hdr->cmd); - if (hdr->size < sizeof(hdr) || hdr->size > SOF_IPC_MSG_MAX_SIZE) { + if (hdr->size < sizeof(*hdr) || hdr->size > SOF_IPC_MSG_MAX_SIZE) { dev_err(sdev->dev, "The received message size is invalid: %u\n", hdr->size); return; diff --git a/sound/soc/sof/ipc4-pcm.c b/sound/soc/sof/ipc4-pcm.c index 0c905bd0fab4d..027416eb2f504 100644 --- a/sound/soc/sof/ipc4-pcm.c +++ b/sound/soc/sof/ipc4-pcm.c @@ -708,6 +708,9 @@ static int sof_ipc4_pcm_hw_params(struct snd_soc_component *component, struct snd_sof_pcm *spcm; spcm = snd_sof_find_spcm_dai(component, rtd); + if (!spcm) + return -EINVAL; + time_info = spcm->stream[substream->stream].private; /* delay calculation is not supported by current fw_reg ABI */ if (!time_info) diff --git a/sound/soc/sof/ipc4-topology.c b/sound/soc/sof/ipc4-topology.c index a4e1a70b607d9..11361e1cd6881 100644 --- a/sound/soc/sof/ipc4-topology.c +++ b/sound/soc/sof/ipc4-topology.c @@ -1731,6 +1731,9 @@ sof_ipc4_prepare_copier_module(struct snd_sof_widget *swidget, *ipc_config_size = ipc_size; + /* update pipeline memory usage */ + sof_ipc4_update_resource_usage(sdev, swidget, &copier_data->base_config); + /* copy IPC data */ memcpy(*ipc_config_data, (void *)copier_data, sizeof(*copier_data)); if (gtw_cfg_config_length) @@ -1743,9 +1746,6 @@ sof_ipc4_prepare_copier_module(struct snd_sof_widget *swidget, gtw_cfg_config_length, &ipc4_copier->dma_config_tlv, dma_config_tlv_size); - /* update pipeline memory usage */ - sof_ipc4_update_resource_usage(sdev, swidget, &copier_data->base_config); - return 0; } diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index efb4a3311cc59..5d72dc8441cbb 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -4507,6 +4507,35 @@ YAMAHA_DEVICE(0x7010, "UB99"), } } }, +{ + /* Advanced modes of the Mythware XA001AU. + * For the standard mode, Mythware XA001AU has ID ffad:a001 + */ + USB_DEVICE_VENDOR_SPEC(0xffad, 0xa001), + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { + .vendor_name = "Mythware", + .product_name = "XA001AU", + .ifnum = QUIRK_ANY_INTERFACE, + .type = QUIRK_COMPOSITE, + .data = (const struct snd_usb_audio_quirk[]) { + { + .ifnum = 0, + .type = QUIRK_IGNORE_INTERFACE, + }, + { + .ifnum = 1, + .type = QUIRK_AUDIO_STANDARD_INTERFACE, + }, + { + .ifnum = 2, + .type = QUIRK_AUDIO_STANDARD_INTERFACE, + }, + { + .ifnum = -1 + } + } + } +}, #undef USB_DEVICE_VENDOR_SPEC #undef USB_AUDIO_DEVICE diff --git a/tools/arch/arm64/include/uapi/asm/bitsperlong.h b/tools/arch/arm64/include/uapi/asm/bitsperlong.h new file mode 100644 index 0000000000000..485d60bee26ca --- /dev/null +++ b/tools/arch/arm64/include/uapi/asm/bitsperlong.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef __ASM_BITSPERLONG_H +#define __ASM_BITSPERLONG_H + +#define __BITS_PER_LONG 64 + +#include + +#endif /* __ASM_BITSPERLONG_H */ diff --git a/tools/arch/riscv/include/uapi/asm/bitsperlong.h b/tools/arch/riscv/include/uapi/asm/bitsperlong.h new file mode 100644 index 0000000000000..0b9b58b57ff6e --- /dev/null +++ b/tools/arch/riscv/include/uapi/asm/bitsperlong.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 ARM Ltd. + * Copyright (C) 2015 Regents of the University of California + */ + +#ifndef _UAPI_ASM_RISCV_BITSPERLONG_H +#define _UAPI_ASM_RISCV_BITSPERLONG_H + +#define __BITS_PER_LONG (__SIZEOF_POINTER__ * 8) + +#include + +#endif /* _UAPI_ASM_RISCV_BITSPERLONG_H */ diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c index 2d51fa8da9e8c..c0f25d00181ec 100644 --- a/tools/objtool/arch/x86/decode.c +++ b/tools/objtool/arch/x86/decode.c @@ -824,8 +824,11 @@ bool arch_is_retpoline(struct symbol *sym) bool arch_is_rethunk(struct symbol *sym) { - return !strcmp(sym->name, "__x86_return_thunk") || - !strcmp(sym->name, "srso_untrain_ret") || - !strcmp(sym->name, "srso_safe_ret") || - !strcmp(sym->name, "__ret"); + return !strcmp(sym->name, "__x86_return_thunk"); +} + +bool arch_is_embedded_insn(struct symbol *sym) +{ + return !strcmp(sym->name, "retbleed_return_thunk") || + !strcmp(sym->name, "srso_safe_ret"); } diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 8936a05f0e5ac..1384090530dbe 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -389,7 +389,7 @@ static int decode_instructions(struct objtool_file *file) if (!strcmp(sec->name, ".noinstr.text") || !strcmp(sec->name, ".entry.text") || !strcmp(sec->name, ".cpuidle.text") || - !strncmp(sec->name, ".text.__x86.", 12)) + !strncmp(sec->name, ".text..__x86.", 13)) sec->noinstr = true; /* @@ -455,7 +455,7 @@ static int decode_instructions(struct objtool_file *file) return -1; } - if (func->return_thunk || func->alias != func) + if (func->embedded_insn || func->alias != func) continue; if (!find_insn(file, sec, func->offset)) { @@ -1288,16 +1288,33 @@ static int add_ignore_alternatives(struct objtool_file *file) return 0; } +/* + * Symbols that replace INSN_CALL_DYNAMIC, every (tail) call to such a symbol + * will be added to the .retpoline_sites section. + */ __weak bool arch_is_retpoline(struct symbol *sym) { return false; } +/* + * Symbols that replace INSN_RETURN, every (tail) call to such a symbol + * will be added to the .return_sites section. + */ __weak bool arch_is_rethunk(struct symbol *sym) { return false; } +/* + * Symbols that are embedded inside other instructions, because sometimes crazy + * code exists. These are mostly ignored for validation purposes. + */ +__weak bool arch_is_embedded_insn(struct symbol *sym) +{ + return false; +} + static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn) { struct reloc *reloc; @@ -1576,14 +1593,14 @@ static int add_jump_destinations(struct objtool_file *file) struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off); /* - * This is a special case for zen_untrain_ret(). + * This is a special case for retbleed_untrain_ret(). * It jumps to __x86_return_thunk(), but objtool * can't find the thunk's starting RET * instruction, because the RET is also in the * middle of another instruction. Objtool only * knows about the outer instruction. */ - if (sym && sym->return_thunk) { + if (sym && sym->embedded_insn) { add_return_call(file, insn, false); continue; } @@ -2502,6 +2519,9 @@ static int classify_symbols(struct objtool_file *file) if (arch_is_rethunk(func)) func->return_thunk = true; + if (arch_is_embedded_insn(func)) + func->embedded_insn = true; + if (arch_ftrace_match(func->name)) func->fentry = true; @@ -2630,12 +2650,17 @@ static int decode_sections(struct objtool_file *file) return 0; } -static bool is_fentry_call(struct instruction *insn) +static bool is_special_call(struct instruction *insn) { - if (insn->type == INSN_CALL && - insn_call_dest(insn) && - insn_call_dest(insn)->fentry) - return true; + if (insn->type == INSN_CALL) { + struct symbol *dest = insn_call_dest(insn); + + if (!dest) + return false; + + if (dest->fentry || dest->embedded_insn) + return true; + } return false; } @@ -3636,7 +3661,7 @@ static int validate_branch(struct objtool_file *file, struct symbol *func, if (ret) return ret; - if (opts.stackval && func && !is_fentry_call(insn) && + if (opts.stackval && func && !is_special_call(insn) && !has_valid_stack_frame(&state)) { WARN_INSN(insn, "call without frame pointer save/setup"); return 1; diff --git a/tools/objtool/include/objtool/arch.h b/tools/objtool/include/objtool/arch.h index 2b6d2ce4f9a5b..0b303eba660e4 100644 --- a/tools/objtool/include/objtool/arch.h +++ b/tools/objtool/include/objtool/arch.h @@ -90,6 +90,7 @@ int arch_decode_hint_reg(u8 sp_reg, int *base); bool arch_is_retpoline(struct symbol *sym); bool arch_is_rethunk(struct symbol *sym); +bool arch_is_embedded_insn(struct symbol *sym); int arch_rewrite_retpolines(struct objtool_file *file); diff --git a/tools/objtool/include/objtool/elf.h b/tools/objtool/include/objtool/elf.h index c532d70864dc5..9f71e988eca45 100644 --- a/tools/objtool/include/objtool/elf.h +++ b/tools/objtool/include/objtool/elf.h @@ -66,6 +66,7 @@ struct symbol { u8 fentry : 1; u8 profiling_func : 1; u8 warned : 1; + u8 embedded_insn : 1; struct list_head pv_target; struct reloc *relocs; }; diff --git a/tools/perf/util/thread-stack.c b/tools/perf/util/thread-stack.c index 374d142e7390d..c6a0a27b12c2a 100644 --- a/tools/perf/util/thread-stack.c +++ b/tools/perf/util/thread-stack.c @@ -1038,9 +1038,7 @@ static int thread_stack__trace_end(struct thread_stack *ts, static bool is_x86_retpoline(const char *name) { - const char *p = strstr(name, "__x86_indirect_thunk_"); - - return p == name || !strcmp(name, "__indirect_thunk_start"); + return strstr(name, "__x86_indirect_thunk_") == name; } /* diff --git a/tools/testing/selftests/cachestat/test_cachestat.c b/tools/testing/selftests/cachestat/test_cachestat.c index 54d09b820ed4b..c037553cfd92c 100644 --- a/tools/testing/selftests/cachestat/test_cachestat.c +++ b/tools/testing/selftests/cachestat/test_cachestat.c @@ -4,10 +4,12 @@ #include #include #include +#include #include #include #include #include +#include #include #include #include @@ -15,6 +17,8 @@ #include "../kselftest.h" +#define NR_TESTS 9 + static const char * const dev_files[] = { "/dev/zero", "/dev/null", "/dev/urandom", "/proc/version", "/proc" @@ -90,6 +94,20 @@ bool write_exactly(int fd, size_t filesize) return ret; } +/* + * fsync() is implemented via noop_fsync() on tmpfs. This makes the fsync() + * test fail below, so we need to check for test file living on a tmpfs. + */ +static bool is_on_tmpfs(int fd) +{ + struct statfs statfs_buf; + + if (fstatfs(fd, &statfs_buf)) + return false; + + return statfs_buf.f_type == TMPFS_MAGIC; +} + /* * Open/create the file at filename, (optionally) write random data to it * (exactly num_pages), then test the cachestat syscall on this file. @@ -97,13 +115,13 @@ bool write_exactly(int fd, size_t filesize) * If test_fsync == true, fsync the file, then check the number of dirty * pages. */ -bool test_cachestat(const char *filename, bool write_random, bool create, - bool test_fsync, unsigned long num_pages, int open_flags, - mode_t open_mode) +static int test_cachestat(const char *filename, bool write_random, bool create, + bool test_fsync, unsigned long num_pages, + int open_flags, mode_t open_mode) { size_t PS = sysconf(_SC_PAGESIZE); int filesize = num_pages * PS; - bool ret = true; + int ret = KSFT_PASS; long syscall_ret; struct cachestat cs; struct cachestat_range cs_range = { 0, filesize }; @@ -112,7 +130,7 @@ bool test_cachestat(const char *filename, bool write_random, bool create, if (fd == -1) { ksft_print_msg("Unable to create/open file.\n"); - ret = false; + ret = KSFT_FAIL; goto out; } else { ksft_print_msg("Create/open %s\n", filename); @@ -121,7 +139,7 @@ bool test_cachestat(const char *filename, bool write_random, bool create, if (write_random) { if (!write_exactly(fd, filesize)) { ksft_print_msg("Unable to access urandom.\n"); - ret = false; + ret = KSFT_FAIL; goto out1; } } @@ -132,7 +150,7 @@ bool test_cachestat(const char *filename, bool write_random, bool create, if (syscall_ret) { ksft_print_msg("Cachestat returned non-zero.\n"); - ret = false; + ret = KSFT_FAIL; goto out1; } else { @@ -142,15 +160,17 @@ bool test_cachestat(const char *filename, bool write_random, bool create, if (cs.nr_cache + cs.nr_evicted != num_pages) { ksft_print_msg( "Total number of cached and evicted pages is off.\n"); - ret = false; + ret = KSFT_FAIL; } } } if (test_fsync) { - if (fsync(fd)) { + if (is_on_tmpfs(fd)) { + ret = KSFT_SKIP; + } else if (fsync(fd)) { ksft_print_msg("fsync fails.\n"); - ret = false; + ret = KSFT_FAIL; } else { syscall_ret = syscall(cachestat_nr, fd, &cs_range, &cs, 0); @@ -161,13 +181,13 @@ bool test_cachestat(const char *filename, bool write_random, bool create, print_cachestat(&cs); if (cs.nr_dirty) { - ret = false; + ret = KSFT_FAIL; ksft_print_msg( "Number of dirty should be zero after fsync.\n"); } } else { ksft_print_msg("Cachestat (after fsync) returned non-zero.\n"); - ret = false; + ret = KSFT_FAIL; goto out1; } } @@ -236,13 +256,29 @@ bool test_cachestat_shmem(void) int main(void) { - int ret = 0; + int ret; + + ksft_print_header(); + + ret = syscall(__NR_cachestat, -1, NULL, NULL, 0); + if (ret == -1 && errno == ENOSYS) + ksft_exit_skip("cachestat syscall not available\n"); + + ksft_set_plan(NR_TESTS); + + if (ret == -1 && errno == EBADF) { + ksft_test_result_pass("bad file descriptor recognized\n"); + ret = 0; + } else { + ksft_test_result_fail("bad file descriptor ignored\n"); + ret = 1; + } for (int i = 0; i < 5; i++) { const char *dev_filename = dev_files[i]; if (test_cachestat(dev_filename, false, false, false, - 4, O_RDONLY, 0400)) + 4, O_RDONLY, 0400) == KSFT_PASS) ksft_test_result_pass("cachestat works with %s\n", dev_filename); else { ksft_test_result_fail("cachestat fails with %s\n", dev_filename); @@ -251,13 +287,27 @@ int main(void) } if (test_cachestat("tmpfilecachestat", true, true, - true, 4, O_CREAT | O_RDWR, 0400 | 0600)) + false, 4, O_CREAT | O_RDWR, 0600) == KSFT_PASS) ksft_test_result_pass("cachestat works with a normal file\n"); else { ksft_test_result_fail("cachestat fails with normal file\n"); ret = 1; } + switch (test_cachestat("tmpfilecachestat", true, true, + true, 4, O_CREAT | O_RDWR, 0600)) { + case KSFT_FAIL: + ksft_test_result_fail("cachestat fsync fails with normal file\n"); + ret = KSFT_FAIL; + break; + case KSFT_PASS: + ksft_test_result_pass("cachestat fsync works with a normal file\n"); + break; + case KSFT_SKIP: + ksft_test_result_skip("tmpfilecachestat is on tmpfs\n"); + break; + } + if (test_cachestat_shmem()) ksft_test_result_pass("cachestat works with a shmem file\n"); else { diff --git a/tools/testing/selftests/cgroup/test_kmem.c b/tools/testing/selftests/cgroup/test_kmem.c index 1b2cec9d18a42..ed2e50bb1e762 100644 --- a/tools/testing/selftests/cgroup/test_kmem.c +++ b/tools/testing/selftests/cgroup/test_kmem.c @@ -75,11 +75,11 @@ static int test_kmem_basic(const char *root) sleep(1); slab1 = cg_read_key_long(cg, "memory.stat", "slab "); - if (slab1 <= 0) + if (slab1 < 0) goto cleanup; current = cg_read_long(cg, "memory.current"); - if (current <= 0) + if (current < 0) goto cleanup; if (slab1 < slab0 / 2 && current < slab0 / 2) diff --git a/tools/testing/selftests/drivers/net/bonding/Makefile b/tools/testing/selftests/drivers/net/bonding/Makefile index 03f92d7aeb19b..8a72bb7de70f4 100644 --- a/tools/testing/selftests/drivers/net/bonding/Makefile +++ b/tools/testing/selftests/drivers/net/bonding/Makefile @@ -9,10 +9,12 @@ TEST_PROGS := \ mode-1-recovery-updelay.sh \ mode-2-recovery-updelay.sh \ bond_options.sh \ - bond-eth-type-change.sh + bond-eth-type-change.sh \ + bond_macvlan.sh TEST_FILES := \ lag_lib.sh \ + bond_topo_2d1c.sh \ bond_topo_3d1c.sh \ net_forwarding_lib.sh diff --git a/tools/testing/selftests/drivers/net/bonding/bond-break-lacpdu-tx.sh b/tools/testing/selftests/drivers/net/bonding/bond-break-lacpdu-tx.sh index 47ab90596acb2..6358df5752f90 100755 --- a/tools/testing/selftests/drivers/net/bonding/bond-break-lacpdu-tx.sh +++ b/tools/testing/selftests/drivers/net/bonding/bond-break-lacpdu-tx.sh @@ -57,8 +57,8 @@ ip link add name veth2-bond type veth peer name veth2-end # add ports ip link set fbond master fab-br0 -ip link set veth1-bond down master fbond -ip link set veth2-bond down master fbond +ip link set veth1-bond master fbond +ip link set veth2-bond master fbond # bring up ip link set veth1-end up diff --git a/tools/testing/selftests/drivers/net/bonding/bond_macvlan.sh b/tools/testing/selftests/drivers/net/bonding/bond_macvlan.sh new file mode 100755 index 0000000000000..b609fb6231f48 --- /dev/null +++ b/tools/testing/selftests/drivers/net/bonding/bond_macvlan.sh @@ -0,0 +1,99 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# Test macvlan over balance-alb + +lib_dir=$(dirname "$0") +source ${lib_dir}/bond_topo_2d1c.sh + +m1_ns="m1-$(mktemp -u XXXXXX)" +m2_ns="m1-$(mktemp -u XXXXXX)" +m1_ip4="192.0.2.11" +m1_ip6="2001:db8::11" +m2_ip4="192.0.2.12" +m2_ip6="2001:db8::12" + +cleanup() +{ + ip -n ${m1_ns} link del macv0 + ip netns del ${m1_ns} + ip -n ${m2_ns} link del macv0 + ip netns del ${m2_ns} + + client_destroy + server_destroy + gateway_destroy +} + +check_connection() +{ + local ns=${1} + local target=${2} + local message=${3:-"macvlan_over_bond"} + RET=0 + + + ip netns exec ${ns} ping ${target} -c 4 -i 0.1 &>/dev/null + check_err $? "ping failed" + log_test "$mode: $message" +} + +macvlan_over_bond() +{ + local param="$1" + RET=0 + + # setup new bond mode + bond_reset "${param}" + + ip -n ${s_ns} link add link bond0 name macv0 type macvlan mode bridge + ip -n ${s_ns} link set macv0 netns ${m1_ns} + ip -n ${m1_ns} link set dev macv0 up + ip -n ${m1_ns} addr add ${m1_ip4}/24 dev macv0 + ip -n ${m1_ns} addr add ${m1_ip6}/24 dev macv0 + + ip -n ${s_ns} link add link bond0 name macv0 type macvlan mode bridge + ip -n ${s_ns} link set macv0 netns ${m2_ns} + ip -n ${m2_ns} link set dev macv0 up + ip -n ${m2_ns} addr add ${m2_ip4}/24 dev macv0 + ip -n ${m2_ns} addr add ${m2_ip6}/24 dev macv0 + + sleep 2 + + check_connection "${c_ns}" "${s_ip4}" "IPv4: client->server" + check_connection "${c_ns}" "${s_ip6}" "IPv6: client->server" + check_connection "${c_ns}" "${m1_ip4}" "IPv4: client->macvlan_1" + check_connection "${c_ns}" "${m1_ip6}" "IPv6: client->macvlan_1" + check_connection "${c_ns}" "${m2_ip4}" "IPv4: client->macvlan_2" + check_connection "${c_ns}" "${m2_ip6}" "IPv6: client->macvlan_2" + check_connection "${m1_ns}" "${m2_ip4}" "IPv4: macvlan_1->macvlan_2" + check_connection "${m1_ns}" "${m2_ip6}" "IPv6: macvlan_1->macvlan_2" + + + sleep 5 + + check_connection "${s_ns}" "${c_ip4}" "IPv4: server->client" + check_connection "${s_ns}" "${c_ip6}" "IPv6: server->client" + check_connection "${m1_ns}" "${c_ip4}" "IPv4: macvlan_1->client" + check_connection "${m1_ns}" "${c_ip6}" "IPv6: macvlan_1->client" + check_connection "${m2_ns}" "${c_ip4}" "IPv4: macvlan_2->client" + check_connection "${m2_ns}" "${c_ip6}" "IPv6: macvlan_2->client" + check_connection "${m2_ns}" "${m1_ip4}" "IPv4: macvlan_2->macvlan_2" + check_connection "${m2_ns}" "${m1_ip6}" "IPv6: macvlan_2->macvlan_2" + + ip -n ${c_ns} neigh flush dev eth0 +} + +trap cleanup EXIT + +setup_prepare +ip netns add ${m1_ns} +ip netns add ${m2_ns} + +modes="active-backup balance-tlb balance-alb" + +for mode in $modes; do + macvlan_over_bond "mode $mode" +done + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/drivers/net/bonding/bond_options.sh b/tools/testing/selftests/drivers/net/bonding/bond_options.sh index 607ba5c38977f..c54d1697f439a 100755 --- a/tools/testing/selftests/drivers/net/bonding/bond_options.sh +++ b/tools/testing/selftests/drivers/net/bonding/bond_options.sh @@ -9,10 +9,7 @@ ALL_TESTS=" num_grat_arp " -REQUIRE_MZ=no -NUM_NETIFS=0 lib_dir=$(dirname "$0") -source ${lib_dir}/net_forwarding_lib.sh source ${lib_dir}/bond_topo_3d1c.sh skip_prio() diff --git a/tools/testing/selftests/drivers/net/bonding/bond_topo_2d1c.sh b/tools/testing/selftests/drivers/net/bonding/bond_topo_2d1c.sh new file mode 100644 index 0000000000000..a509ef949dcfd --- /dev/null +++ b/tools/testing/selftests/drivers/net/bonding/bond_topo_2d1c.sh @@ -0,0 +1,158 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# Topology for Bond mode 1,5,6 testing +# +# +-------------------------+ +# | bond0 | Server +# | + | 192.0.2.1/24 +# | eth0 | eth1 | 2001:db8::1/24 +# | +---+---+ | +# | | | | +# +-------------------------+ +# | | +# +-------------------------+ +# | | | | +# | +---+-------+---+ | Gateway +# | | br0 | | 192.0.2.254/24 +# | +-------+-------+ | 2001:db8::254/24 +# | | | +# +-------------------------+ +# | +# +-------------------------+ +# | | | Client +# | + | 192.0.2.10/24 +# | eth0 | 2001:db8::10/24 +# +-------------------------+ + +REQUIRE_MZ=no +NUM_NETIFS=0 +lib_dir=$(dirname "$0") +source ${lib_dir}/net_forwarding_lib.sh + +s_ns="s-$(mktemp -u XXXXXX)" +c_ns="c-$(mktemp -u XXXXXX)" +g_ns="g-$(mktemp -u XXXXXX)" +s_ip4="192.0.2.1" +c_ip4="192.0.2.10" +g_ip4="192.0.2.254" +s_ip6="2001:db8::1" +c_ip6="2001:db8::10" +g_ip6="2001:db8::254" + +gateway_create() +{ + ip netns add ${g_ns} + ip -n ${g_ns} link add br0 type bridge + ip -n ${g_ns} link set br0 up + ip -n ${g_ns} addr add ${g_ip4}/24 dev br0 + ip -n ${g_ns} addr add ${g_ip6}/24 dev br0 +} + +gateway_destroy() +{ + ip -n ${g_ns} link del br0 + ip netns del ${g_ns} +} + +server_create() +{ + ip netns add ${s_ns} + ip -n ${s_ns} link add bond0 type bond mode active-backup miimon 100 + + for i in $(seq 0 1); do + ip -n ${s_ns} link add eth${i} type veth peer name s${i} netns ${g_ns} + + ip -n ${g_ns} link set s${i} up + ip -n ${g_ns} link set s${i} master br0 + ip -n ${s_ns} link set eth${i} master bond0 + + tc -n ${g_ns} qdisc add dev s${i} clsact + done + + ip -n ${s_ns} link set bond0 up + ip -n ${s_ns} addr add ${s_ip4}/24 dev bond0 + ip -n ${s_ns} addr add ${s_ip6}/24 dev bond0 + sleep 2 +} + +# Reset bond with new mode and options +bond_reset() +{ + # Count the eth link number in real-time as this function + # maybe called from other topologies. + local link_num=$(ip -n ${s_ns} -br link show | grep -c "^eth") + local param="$1" + link_num=$((link_num -1)) + + ip -n ${s_ns} link set bond0 down + ip -n ${s_ns} link del bond0 + + ip -n ${s_ns} link add bond0 type bond $param + for i in $(seq 0 ${link_num}); do + ip -n ${s_ns} link set eth$i master bond0 + done + + ip -n ${s_ns} link set bond0 up + ip -n ${s_ns} addr add ${s_ip4}/24 dev bond0 + ip -n ${s_ns} addr add ${s_ip6}/24 dev bond0 + sleep 2 +} + +server_destroy() +{ + # Count the eth link number in real-time as this function + # maybe called from other topologies. + local link_num=$(ip -n ${s_ns} -br link show | grep -c "^eth") + link_num=$((link_num -1)) + for i in $(seq 0 ${link_num}); do + ip -n ${s_ns} link del eth${i} + done + ip netns del ${s_ns} +} + +client_create() +{ + ip netns add ${c_ns} + ip -n ${c_ns} link add eth0 type veth peer name c0 netns ${g_ns} + + ip -n ${g_ns} link set c0 up + ip -n ${g_ns} link set c0 master br0 + + ip -n ${c_ns} link set eth0 up + ip -n ${c_ns} addr add ${c_ip4}/24 dev eth0 + ip -n ${c_ns} addr add ${c_ip6}/24 dev eth0 +} + +client_destroy() +{ + ip -n ${c_ns} link del eth0 + ip netns del ${c_ns} +} + +setup_prepare() +{ + gateway_create + server_create + client_create +} + +cleanup() +{ + pre_cleanup + + client_destroy + server_destroy + gateway_destroy +} + +bond_check_connection() +{ + local msg=${1:-"check connection"} + + sleep 2 + ip netns exec ${s_ns} ping ${c_ip4} -c5 -i 0.1 &>/dev/null + check_err $? "${msg}: ping failed" + ip netns exec ${s_ns} ping6 ${c_ip6} -c5 -i 0.1 &>/dev/null + check_err $? "${msg}: ping6 failed" +} diff --git a/tools/testing/selftests/drivers/net/bonding/bond_topo_3d1c.sh b/tools/testing/selftests/drivers/net/bonding/bond_topo_3d1c.sh index 69ab99a560434..3a1333d9a85b3 100644 --- a/tools/testing/selftests/drivers/net/bonding/bond_topo_3d1c.sh +++ b/tools/testing/selftests/drivers/net/bonding/bond_topo_3d1c.sh @@ -25,121 +25,19 @@ # | eth0 | 2001:db8::10/24 # +-------------------------------------+ -s_ns="s-$(mktemp -u XXXXXX)" -c_ns="c-$(mktemp -u XXXXXX)" -g_ns="g-$(mktemp -u XXXXXX)" -s_ip4="192.0.2.1" -c_ip4="192.0.2.10" -g_ip4="192.0.2.254" -s_ip6="2001:db8::1" -c_ip6="2001:db8::10" -g_ip6="2001:db8::254" - -gateway_create() -{ - ip netns add ${g_ns} - ip -n ${g_ns} link add br0 type bridge - ip -n ${g_ns} link set br0 up - ip -n ${g_ns} addr add ${g_ip4}/24 dev br0 - ip -n ${g_ns} addr add ${g_ip6}/24 dev br0 -} - -gateway_destroy() -{ - ip -n ${g_ns} link del br0 - ip netns del ${g_ns} -} - -server_create() -{ - ip netns add ${s_ns} - ip -n ${s_ns} link add bond0 type bond mode active-backup miimon 100 - - for i in $(seq 0 2); do - ip -n ${s_ns} link add eth${i} type veth peer name s${i} netns ${g_ns} - - ip -n ${g_ns} link set s${i} up - ip -n ${g_ns} link set s${i} master br0 - ip -n ${s_ns} link set eth${i} master bond0 - - tc -n ${g_ns} qdisc add dev s${i} clsact - done - - ip -n ${s_ns} link set bond0 up - ip -n ${s_ns} addr add ${s_ip4}/24 dev bond0 - ip -n ${s_ns} addr add ${s_ip6}/24 dev bond0 - sleep 2 -} - -# Reset bond with new mode and options -bond_reset() -{ - local param="$1" - - ip -n ${s_ns} link set bond0 down - ip -n ${s_ns} link del bond0 - - ip -n ${s_ns} link add bond0 type bond $param - for i in $(seq 0 2); do - ip -n ${s_ns} link set eth$i master bond0 - done - - ip -n ${s_ns} link set bond0 up - ip -n ${s_ns} addr add ${s_ip4}/24 dev bond0 - ip -n ${s_ns} addr add ${s_ip6}/24 dev bond0 - sleep 2 -} - -server_destroy() -{ - for i in $(seq 0 2); do - ip -n ${s_ns} link del eth${i} - done - ip netns del ${s_ns} -} - -client_create() -{ - ip netns add ${c_ns} - ip -n ${c_ns} link add eth0 type veth peer name c0 netns ${g_ns} - - ip -n ${g_ns} link set c0 up - ip -n ${g_ns} link set c0 master br0 - - ip -n ${c_ns} link set eth0 up - ip -n ${c_ns} addr add ${c_ip4}/24 dev eth0 - ip -n ${c_ns} addr add ${c_ip6}/24 dev eth0 -} - -client_destroy() -{ - ip -n ${c_ns} link del eth0 - ip netns del ${c_ns} -} +source bond_topo_2d1c.sh setup_prepare() { gateway_create server_create client_create -} - -cleanup() -{ - pre_cleanup - - client_destroy - server_destroy - gateway_destroy -} - -bond_check_connection() -{ - local msg=${1:-"check connection"} - sleep 2 - ip netns exec ${s_ns} ping ${c_ip4} -c5 -i 0.1 &>/dev/null - check_err $? "${msg}: ping failed" - ip netns exec ${s_ns} ping6 ${c_ip6} -c5 -i 0.1 &>/dev/null - check_err $? "${msg}: ping6 failed" + # Add the extra device as we use 3 down links for bond0 + local i=2 + ip -n ${s_ns} link add eth${i} type veth peer name s${i} netns ${g_ns} + ip -n ${g_ns} link set s${i} up + ip -n ${g_ns} link set s${i} master br0 + ip -n ${s_ns} link set eth${i} master bond0 + tc -n ${g_ns} qdisc add dev s${i} clsact } diff --git a/tools/testing/selftests/drivers/net/mlxsw/sharedbuffer.sh b/tools/testing/selftests/drivers/net/mlxsw/sharedbuffer.sh index 7d9e73a43a49b..0c47faff9274b 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/sharedbuffer.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/sharedbuffer.sh @@ -98,12 +98,12 @@ sb_occ_etc_check() port_pool_test() { - local exp_max_occ=288 + local exp_max_occ=$(devlink_cell_size_get) local max_occ devlink sb occupancy clearmax $DEVLINK_DEV - $MZ $h1 -c 1 -p 160 -a $h1mac -b $h2mac -A 192.0.1.1 -B 192.0.1.2 \ + $MZ $h1 -c 1 -p 10 -a $h1mac -b $h2mac -A 192.0.1.1 -B 192.0.1.2 \ -t ip -q devlink sb occupancy snapshot $DEVLINK_DEV @@ -126,12 +126,12 @@ port_pool_test() port_tc_ip_test() { - local exp_max_occ=288 + local exp_max_occ=$(devlink_cell_size_get) local max_occ devlink sb occupancy clearmax $DEVLINK_DEV - $MZ $h1 -c 1 -p 160 -a $h1mac -b $h2mac -A 192.0.1.1 -B 192.0.1.2 \ + $MZ $h1 -c 1 -p 10 -a $h1mac -b $h2mac -A 192.0.1.1 -B 192.0.1.2 \ -t ip -q devlink sb occupancy snapshot $DEVLINK_DEV @@ -154,16 +154,12 @@ port_tc_ip_test() port_tc_arp_test() { - local exp_max_occ=96 + local exp_max_occ=$(devlink_cell_size_get) local max_occ - if [[ $MLXSW_CHIP != "mlxsw_spectrum" ]]; then - exp_max_occ=144 - fi - devlink sb occupancy clearmax $DEVLINK_DEV - $MZ $h1 -c 1 -p 160 -a $h1mac -A 192.0.1.1 -t arp -q + $MZ $h1 -c 1 -p 10 -a $h1mac -A 192.0.1.1 -t arp -q devlink sb occupancy snapshot $DEVLINK_DEV diff --git a/tools/testing/selftests/ftrace/test.d/00basic/snapshot1.tc b/tools/testing/selftests/ftrace/test.d/00basic/snapshot1.tc new file mode 100644 index 0000000000000..63b76cf2a360a --- /dev/null +++ b/tools/testing/selftests/ftrace/test.d/00basic/snapshot1.tc @@ -0,0 +1,31 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 +# description: Snapshot and tracing_cpumask +# requires: trace_marker tracing_cpumask snapshot +# flags: instance + +# This testcase is constrived to reproduce a problem that the cpu buffers +# become unavailable which is due to 'record_disabled' of array_buffer and +# max_buffer being messed up. + +# Store origin cpumask +ORIG_CPUMASK=`cat tracing_cpumask` + +# Stop tracing all cpu +echo 0 > tracing_cpumask + +# Take a snapshot of the main buffer +echo 1 > snapshot + +# Restore origin cpumask, note that there should be some cpus being traced +echo ${ORIG_CPUMASK} > tracing_cpumask + +# Set tracing on +echo 1 > tracing_on + +# Write a log into buffer +echo "test input 1" > trace_marker + +# Ensure the log writed so that cpu buffers are still available +grep -q "test input 1" trace +exit 0 diff --git a/tools/testing/selftests/mm/hmm-tests.c b/tools/testing/selftests/mm/hmm-tests.c index 4adaad1b822f0..20294553a5dd7 100644 --- a/tools/testing/selftests/mm/hmm-tests.c +++ b/tools/testing/selftests/mm/hmm-tests.c @@ -57,9 +57,14 @@ enum { #define ALIGN(x, a) (((x) + (a - 1)) & (~((a) - 1))) /* Just the flags we need, copied from mm.h: */ + +#ifndef FOLL_WRITE #define FOLL_WRITE 0x01 /* check pte is writable */ -#define FOLL_LONGTERM 0x10000 /* mapping lifetime is indefinite */ +#endif +#ifndef FOLL_LONGTERM +#define FOLL_LONGTERM 0x100 /* mapping lifetime is indefinite */ +#endif FIXTURE(hmm) { int fd; diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore index 501854a89cc01..2f9d378edec33 100644 --- a/tools/testing/selftests/net/.gitignore +++ b/tools/testing/selftests/net/.gitignore @@ -15,6 +15,7 @@ ip_local_port_range ipsec ipv6_flowlabel ipv6_flowlabel_mgr +log.txt msg_zerocopy nettest psock_fanout @@ -45,6 +46,7 @@ test_unix_oob timestamping tls toeplitz +tools tun txring_overwrite txtimestamp diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh index aff88f78e3391..5ea9d63915f77 100755 --- a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh @@ -72,7 +72,8 @@ test_span_gre_ttl() RET=0 - mirror_install $swp1 ingress $tundev "matchall $tcflags" + mirror_install $swp1 ingress $tundev \ + "prot ip flower $tcflags ip_prot icmp" tc filter add dev $h3 ingress pref 77 prot $prot \ flower skip_hw ip_ttl 50 action pass