diff --git a/Documentation/ABI/testing/sysfs-bus-iio-lptimer-stm32 b/Documentation/ABI/testing/sysfs-bus-iio-lptimer-stm32 deleted file mode 100644 index 73498ff666bd74a92269bf5e1006d34571869233..0000000000000000000000000000000000000000 --- a/Documentation/ABI/testing/sysfs-bus-iio-lptimer-stm32 +++ /dev/null @@ -1,62 +0,0 @@ -What: /sys/bus/iio/devices/iio:deviceX/in_count0_preset -KernelVersion: 4.13 -Contact: fabrice.gasnier@st.com -Description: - Reading returns the current preset value. Writing sets the - preset value. Encoder counts continuously from 0 to preset - value, depending on direction (up/down). - -What: /sys/bus/iio/devices/iio:deviceX/in_count_quadrature_mode_available -KernelVersion: 4.13 -Contact: fabrice.gasnier@st.com -Description: - Reading returns the list possible quadrature modes. - -What: /sys/bus/iio/devices/iio:deviceX/in_count0_quadrature_mode -KernelVersion: 4.13 -Contact: fabrice.gasnier@st.com -Description: - Configure the device counter quadrature modes: - - - non-quadrature: - Encoder IN1 input servers as the count input (up - direction). - - - quadrature: - Encoder IN1 and IN2 inputs are mixed to get direction - and count. - -What: /sys/bus/iio/devices/iio:deviceX/in_count_polarity_available -KernelVersion: 4.13 -Contact: fabrice.gasnier@st.com -Description: - Reading returns the list possible active edges. - -What: /sys/bus/iio/devices/iio:deviceX/in_count0_polarity -KernelVersion: 4.13 -Contact: fabrice.gasnier@st.com -Description: - Configure the device encoder/counter active edge: - - - rising-edge - - falling-edge - - both-edges - - In non-quadrature mode, device counts up on active edge. - - In quadrature mode, encoder counting scenarios are as follows: - - +---------+----------+--------------------+--------------------+ - | Active | Level on | IN1 signal | IN2 signal | - | edge | opposite +----------+---------+----------+---------+ - | | signal | Rising | Falling | Rising | Falling | - +---------+----------+----------+---------+----------+---------+ - | Rising | High -> | Down | - | Up | - | - | edge | Low -> | Up | - | Down | - | - +---------+----------+----------+---------+----------+---------+ - | Falling | High -> | - | Up | - | Down | - | edge | Low -> | - | Down | - | Up | - +---------+----------+----------+---------+----------+---------+ - | Both | High -> | Down | Up | Up | Down | - | edges | Low -> | Up | Down | Down | Up | - +---------+----------+----------+---------+----------+---------+ diff --git a/Documentation/accounting/psi.rst b/Documentation/accounting/psi.rst index f2b3439edcc2cc772c7f92cd3510060df6b5e8b8..860fe651d6453eed1652f721cb048c8b3840be33 100644 --- a/Documentation/accounting/psi.rst +++ b/Documentation/accounting/psi.rst @@ -92,7 +92,8 @@ Triggers can be set on more than one psi metric and more than one trigger for the same psi metric can be specified. However for each trigger a separate file descriptor is required to be able to poll it separately from others, therefore for each trigger a separate open() syscall should be made even -when opening the same psi interface file. +when opening the same psi interface file. Write operations to a file descriptor +with an already existing psi trigger will fail with EBUSY. Monitors activate only when system enters stall state for the monitored psi metric and deactivates upon exit from the stall state. While system is diff --git a/Documentation/admin-guide/hw-vuln/spectre.rst b/Documentation/admin-guide/hw-vuln/spectre.rst index e05e581af5cfe617f38112907aadea8d03f2a9d6..985181dba0bacda11526b27b45daadddfd2cb7be 100644 --- a/Documentation/admin-guide/hw-vuln/spectre.rst +++ b/Documentation/admin-guide/hw-vuln/spectre.rst @@ -468,7 +468,7 @@ Spectre variant 2 before invoking any firmware code to prevent Spectre variant 2 exploits using the firmware. - Using kernel address space randomization (CONFIG_RANDOMIZE_SLAB=y + Using kernel address space randomization (CONFIG_RANDOMIZE_BASE=y and CONFIG_SLAB_FREELIST_RANDOM=y in the kernel configuration) makes attacks on the kernel generally more difficult. diff --git a/Documentation/devicetree/bindings/arm/omap/omap.txt b/Documentation/devicetree/bindings/arm/omap/omap.txt index e77635c5422c6a03995bc45e02049d6b44e4b190..fa8b31660cadd1e2cfd0c7cc8c19bb971be3808e 100644 --- a/Documentation/devicetree/bindings/arm/omap/omap.txt +++ b/Documentation/devicetree/bindings/arm/omap/omap.txt @@ -119,6 +119,9 @@ Boards (incomplete list of examples): - OMAP3 BeagleBoard : Low cost community board compatible = "ti,omap3-beagle", "ti,omap3430", "ti,omap3" +- OMAP3 BeagleBoard A to B4 : Early BeagleBoard revisions A to B4 with a timer quirk + compatible = "ti,omap3-beagle-ab4", "ti,omap3-beagle", "ti,omap3430", "ti,omap3" + - OMAP3 Tobi with Overo : Commercial expansion board with daughter board compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap3430", "ti,omap3" diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml b/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml index 0da42ab8fd3a52b2f9fef7b1ff47919a273c3003..8a67bb889f18a851cc0c3d6e471bc25c9bc13f39 100644 --- a/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml +++ b/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml @@ -10,6 +10,9 @@ title: Amlogic specific extensions to the Synopsys Designware HDMI Controller maintainers: - Neil Armstrong +allOf: + - $ref: /schemas/sound/name-prefix.yaml# + description: | The Amlogic Meson Synopsys Designware Integration is composed of - A Synopsys DesignWare HDMI Controller IP @@ -99,6 +102,8 @@ properties: "#sound-dai-cells": const: 0 + sound-name-prefix: true + required: - compatible - reg diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml index a8d202c9d004c67f7009e17d8928cbb415e45355..b8cb1b4dae1ff02d836441380bd1be7ae98675e0 100644 --- a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml +++ b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml @@ -78,6 +78,10 @@ properties: interrupts: maxItems: 1 + amlogic,canvas: + description: should point to a canvas provider node + $ref: /schemas/types.yaml#/definitions/phandle + power-domains: maxItems: 1 description: phandle to the associated power domain @@ -106,6 +110,7 @@ required: - port@1 - "#address-cells" - "#size-cells" + - amlogic,canvas additionalProperties: false @@ -118,6 +123,7 @@ examples: interrupts = <3>; #address-cells = <1>; #size-cells = <0>; + amlogic,canvas = <&canvas>; /* CVBS VDAC output port */ port@0 { diff --git a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt index 0968b40aef1e8147a63087f8f360107285fb900d..e3501bfa22e904fe52b7944a37ab541eaf531fd2 100644 --- a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt +++ b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt @@ -31,7 +31,7 @@ tcan4x5x: tcan4x5x@0 { #address-cells = <1>; #size-cells = <1>; spi-max-frequency = <10000000>; - bosch,mram-cfg = <0x0 0 0 32 0 0 1 1>; + bosch,mram-cfg = <0x0 0 0 16 0 0 1 1>; interrupt-parent = <&gpio1>; interrupts = <14 IRQ_TYPE_LEVEL_LOW>; device-state-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>; diff --git a/Documentation/devicetree/bindings/thermal/thermal-zones.yaml b/Documentation/devicetree/bindings/thermal/thermal-zones.yaml index 164f71598c5956fa16b21284ba1e2c3d3523dca8..1b3954aa71c157069b9ec34a02c9d008a56d552b 100644 --- a/Documentation/devicetree/bindings/thermal/thermal-zones.yaml +++ b/Documentation/devicetree/bindings/thermal/thermal-zones.yaml @@ -199,12 +199,11 @@ patternProperties: contribution: $ref: /schemas/types.yaml#/definitions/uint32 - minimum: 0 - maximum: 100 description: - The percentage contribution of the cooling devices at the - specific trip temperature referenced in this map - to this thermal zone + The cooling contribution to the thermal zone of the referred + cooling device at the referred trip point. The contribution is + a ratio of the sum of all cooling contributions within a + thermal zone. required: - trip diff --git a/Documentation/devicetree/bindings/watchdog/samsung-wdt.yaml b/Documentation/devicetree/bindings/watchdog/samsung-wdt.yaml index 76cb9586ee00cab46858e1004089ab505c8c9a0c..93cd77a6e92c0132a51d353c3d5a75bdb032159a 100644 --- a/Documentation/devicetree/bindings/watchdog/samsung-wdt.yaml +++ b/Documentation/devicetree/bindings/watchdog/samsung-wdt.yaml @@ -39,8 +39,8 @@ properties: samsung,syscon-phandle: $ref: /schemas/types.yaml#/definitions/phandle description: - Phandle to the PMU system controller node (in case of Exynos5250 - and Exynos5420). + Phandle to the PMU system controller node (in case of Exynos5250, + Exynos5420 and Exynos7). required: - compatible @@ -58,6 +58,7 @@ allOf: enum: - samsung,exynos5250-wdt - samsung,exynos5420-wdt + - samsung,exynos7-wdt then: required: - samsung,syscon-phandle diff --git a/Documentation/driver-api/dmaengine/dmatest.rst b/Documentation/driver-api/dmaengine/dmatest.rst index ee268d445d38b64b173098630a1db287f7bcd1b0..d2e1d8b58e7dc13b7285ece750917952a180a559 100644 --- a/Documentation/driver-api/dmaengine/dmatest.rst +++ b/Documentation/driver-api/dmaengine/dmatest.rst @@ -143,13 +143,14 @@ Part 5 - Handling channel allocation Allocating Channels ------------------- -Channels are required to be configured prior to starting the test run. -Attempting to run the test without configuring the channels will fail. +Channels do not need to be configured prior to starting a test run. Attempting +to run the test without configuring the channels will result in testing any +channels that are available. Example:: % echo 1 > /sys/module/dmatest/parameters/run - dmatest: Could not start test, no channels configured + dmatest: No channels configured, continue with any Channels are registered using the "channel" parameter. Channels can be requested by their name, once requested, the channel is registered and a pending thread is added to the test list. diff --git a/Documentation/driver-api/firewire.rst b/Documentation/driver-api/firewire.rst index 94a2d7f01d99924e0d338b57e09387ecb230c41a..d3cfa73cbb2b47e624961f3e15aadb6528dfec37 100644 --- a/Documentation/driver-api/firewire.rst +++ b/Documentation/driver-api/firewire.rst @@ -19,7 +19,7 @@ of kernel interfaces is available via exported symbols in `firewire-core` module Firewire char device data structures ==================================== -.. include:: /ABI/stable/firewire-cdev +.. include:: ../ABI/stable/firewire-cdev :literal: .. kernel-doc:: include/uapi/linux/firewire-cdev.h @@ -28,7 +28,7 @@ Firewire char device data structures Firewire device probing and sysfs interfaces ============================================ -.. include:: /ABI/stable/sysfs-bus-firewire +.. include:: ../ABI/stable/sysfs-bus-firewire :literal: .. kernel-doc:: drivers/firewire/core-device.c diff --git a/Documentation/firmware-guide/acpi/dsd/data-node-references.rst b/Documentation/firmware-guide/acpi/dsd/data-node-references.rst index 9b17dc77d18c5c0c916007cf8cc2f160ba56969c..da0e46496fc4db82086fc05709f9bd04220cd268 100644 --- a/Documentation/firmware-guide/acpi/dsd/data-node-references.rst +++ b/Documentation/firmware-guide/acpi/dsd/data-node-references.rst @@ -5,7 +5,7 @@ Referencing hierarchical data nodes =================================== -:Copyright: |copy| 2018 Intel Corporation +:Copyright: |copy| 2018, 2021 Intel Corporation :Author: Sakari Ailus ACPI in general allows referring to device objects in the tree only. @@ -52,12 +52,14 @@ the ANOD object which is also the final target node of the reference. Name (NOD0, Package() { ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"), Package () { + Package () { "reg", 0 }, Package () { "random-property", 3 }, } }) Name (NOD1, Package() { ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"), Package () { + Package () { "reg", 1 }, Package () { "anothernode", "ANOD" }, } }) @@ -74,7 +76,11 @@ the ANOD object which is also the final target node of the reference. Name (_DSD, Package () { ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"), Package () { - Package () { "reference", ^DEV0, "node@1", "anothernode" }, + Package () { + "reference", Package () { + ^DEV0, "node@1", "anothernode" + } + }, } }) } diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst index 7272a4bd74dd05a89ba7668e69980c906596665b..28841609aa4f8d589d3952d7381d3ceaefa1f7fa 100644 --- a/Documentation/gpu/todo.rst +++ b/Documentation/gpu/todo.rst @@ -273,24 +273,6 @@ Contact: Daniel Vetter, Noralf Tronnes Level: Advanced -Garbage collect fbdev scrolling acceleration --------------------------------------------- - -Scroll acceleration is disabled in fbcon by hard-wiring p->scrollmode = -SCROLL_REDRAW. There's a ton of code this will allow us to remove: -- lots of code in fbcon.c -- a bunch of the hooks in fbcon_ops, maybe the remaining hooks could be called - directly instead of the function table (with a switch on p->rotate) -- fb_copyarea is unused after this, and can be deleted from all drivers - -Note that not all acceleration code can be deleted, since clearing and cursor -support is still accelerated, which might be good candidates for further -deletion projects. - -Contact: Daniel Vetter - -Level: Intermediate - idr_init_base() --------------- diff --git a/Makefile b/Makefile index f87bd990932df3b83efd023082f907ec83df4977..ec04d8ceb802c0e6334320a0eda38754a9b8e901 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 5 PATCHLEVEL = 10 -SUBLEVEL = 92 +SUBLEVEL = 103 EXTRAVERSION = NAME = Dare mighty things @@ -1073,7 +1073,7 @@ export mod_sign_cmd HOST_LIBELF_LIBS = $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf) has_libelf = $(call try-run,\ - echo "int main() {}" | $(HOSTCC) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0) + echo "int main() {}" | $(HOSTCC) $(KBUILD_HOSTLDFLAGS) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0) ifdef CONFIG_STACK_VALIDATION ifeq ($(has_libelf),1) diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index 8986a91a6f31b82c2ef81f7b7a9b4c425425a9a6..dd1cf70353986045b55accb19ee4cff56e8b3d91 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -400,12 +400,12 @@ choice Say Y here if you want kernel low-level debugging support on i.MX25. - config DEBUG_IMX21_IMX27_UART - bool "i.MX21 and i.MX27 Debug UART" - depends on SOC_IMX21 || SOC_IMX27 + config DEBUG_IMX27_UART + bool "i.MX27 Debug UART" + depends on SOC_IMX27 help Say Y here if you want kernel low-level debugging support - on i.MX21 or i.MX27. + on i.MX27. config DEBUG_IMX28_UART bool "i.MX28 Debug UART" @@ -1523,7 +1523,7 @@ config DEBUG_IMX_UART_PORT int "i.MX Debug UART Port Selection" depends on DEBUG_IMX1_UART || \ DEBUG_IMX25_UART || \ - DEBUG_IMX21_IMX27_UART || \ + DEBUG_IMX27_UART || \ DEBUG_IMX31_UART || \ DEBUG_IMX35_UART || \ DEBUG_IMX50_UART || \ @@ -1591,12 +1591,12 @@ config DEBUG_LL_INCLUDE default "debug/icedcc.S" if DEBUG_ICEDCC default "debug/imx.S" if DEBUG_IMX1_UART || \ DEBUG_IMX25_UART || \ - DEBUG_IMX21_IMX27_UART || \ + DEBUG_IMX27_UART || \ DEBUG_IMX31_UART || \ DEBUG_IMX35_UART || \ DEBUG_IMX50_UART || \ DEBUG_IMX51_UART || \ - DEBUG_IMX53_UART ||\ + DEBUG_IMX53_UART || \ DEBUG_IMX6Q_UART || \ DEBUG_IMX6SL_UART || \ DEBUG_IMX6SX_UART || \ diff --git a/arch/arm/boot/compressed/efi-header.S b/arch/arm/boot/compressed/efi-header.S index c0e7a745103e22b8618618e971ba3ab41b95e5af..230030c1308538f2cef2e9f7118d6e691ab8bd59 100644 --- a/arch/arm/boot/compressed/efi-header.S +++ b/arch/arm/boot/compressed/efi-header.S @@ -9,16 +9,22 @@ #include .macro __nop -#ifdef CONFIG_EFI_STUB - @ This is almost but not quite a NOP, since it does clobber the - @ condition flags. But it is the best we can do for EFI, since - @ PE/COFF expects the magic string "MZ" at offset 0, while the - @ ARM/Linux boot protocol expects an executable instruction - @ there. - .inst MZ_MAGIC | (0x1310 << 16) @ tstne r0, #0x4d000 -#else AR_CLASS( mov r0, r0 ) M_CLASS( nop.w ) + .endm + + .macro __initial_nops +#ifdef CONFIG_EFI_STUB + @ This is a two-instruction NOP, which happens to bear the + @ PE/COFF signature "MZ" in the first two bytes, so the kernel + @ is accepted as an EFI binary. Booting via the UEFI stub + @ will not execute those instructions, but the ARM/Linux + @ boot protocol does, so we need some NOPs here. + .inst MZ_MAGIC | (0xe225 << 16) @ eor r5, r5, 0x4d000 + eor r5, r5, 0x4d000 @ undo previous insn +#else + __nop + __nop #endif .endm diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 538c2b08b7e5f2fc5988d87f1bf148c143cbbbce..b8074ee044c2aea730106a1314c8146561fe556a 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -190,7 +190,8 @@ start: * were patching the initial instructions of the kernel, i.e * had started to exploit this "patch area". */ - .rept 7 + __initial_nops + .rept 5 __nop .endr #ifndef CONFIG_THUMB2_KERNEL diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile index ee163c52577ecb922f1cc002bb465aca731bf424..6bcb35ad76acb78dfa268c41b6a36be2738efc7c 100644 --- a/arch/arm/boot/dts/Makefile +++ b/arch/arm/boot/dts/Makefile @@ -750,6 +750,7 @@ dtb-$(CONFIG_ARCH_OMAP3) += \ logicpd-som-lv-37xx-devkit.dtb \ omap3430-sdp.dtb \ omap3-beagle.dtb \ + omap3-beagle-ab4.dtb \ omap3-beagle-xm.dtb \ omap3-beagle-xm-ab.dtb \ omap3-cm-t3517.dtb \ diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi index 9b1a24cc5e91f2f52c32eeb248831f60d13040e0..df3c8d1d8f641230f9b17b8dcde7f58336918eb1 100644 --- a/arch/arm/boot/dts/armada-38x.dtsi +++ b/arch/arm/boot/dts/armada-38x.dtsi @@ -168,7 +168,7 @@ }; uart0: serial@12000 { - compatible = "marvell,armada-38x-uart"; + compatible = "marvell,armada-38x-uart", "ns16550a"; reg = <0x12000 0x100>; reg-shift = <2>; interrupts = ; @@ -178,7 +178,7 @@ }; uart1: serial@12100 { - compatible = "marvell,armada-38x-uart"; + compatible = "marvell,armada-38x-uart", "ns16550a"; reg = <0x12100 0x100>; reg-shift = <2>; interrupts = ; diff --git a/arch/arm/boot/dts/gemini-nas4220b.dts b/arch/arm/boot/dts/gemini-nas4220b.dts index 13112a8a5dd88dbd7274e860ad3c17b4d295761b..6544c730340fa4cefd916c22d37342c3080084a9 100644 --- a/arch/arm/boot/dts/gemini-nas4220b.dts +++ b/arch/arm/boot/dts/gemini-nas4220b.dts @@ -84,7 +84,7 @@ partitions { compatible = "redboot-fis"; /* Eraseblock at 0xfe0000 */ - fis-index-block = <0x1fc>; + fis-index-block = <0x7f>; }; }; diff --git a/arch/arm/boot/dts/imx23-evk.dts b/arch/arm/boot/dts/imx23-evk.dts index 8cbaf1c8117456a11125de5f1f9986e1c9078e7d..3b609d987d88300f68f1402057dfc9b47b8bb146 100644 --- a/arch/arm/boot/dts/imx23-evk.dts +++ b/arch/arm/boot/dts/imx23-evk.dts @@ -79,7 +79,6 @@ MX23_PAD_LCD_RESET__GPIO_1_18 MX23_PAD_PWM3__GPIO_1_29 MX23_PAD_PWM4__GPIO_1_30 - MX23_PAD_SSP1_DETECT__SSP1_DETECT >; fsl,drive-strength = ; fsl,voltage = ; diff --git a/arch/arm/boot/dts/imx6qdl-udoo.dtsi b/arch/arm/boot/dts/imx6qdl-udoo.dtsi index d07d8f83456d241a288be60ce729579b9dded12f..ccfa8e320be62e26eea26a970eca940e440e2752 100644 --- a/arch/arm/boot/dts/imx6qdl-udoo.dtsi +++ b/arch/arm/boot/dts/imx6qdl-udoo.dtsi @@ -5,6 +5,8 @@ * Author: Fabio Estevam */ +#include + / { aliases { backlight = &backlight; @@ -226,6 +228,7 @@ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059 MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059 MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059 + MX6QDL_PAD_SD3_DAT5__GPIO7_IO00 0x1b0b0 >; }; @@ -304,7 +307,7 @@ &usdhc3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3>; - non-removable; + cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx7ulp.dtsi b/arch/arm/boot/dts/imx7ulp.dtsi index b7ea37ad4e55c45f5615766f3fa30bf7d9eab4a4..bcec98b96411437c831d9e91ddaf2f63d657ea9e 100644 --- a/arch/arm/boot/dts/imx7ulp.dtsi +++ b/arch/arm/boot/dts/imx7ulp.dtsi @@ -259,7 +259,7 @@ interrupts = ; clocks = <&pcc2 IMX7ULP_CLK_WDG1>; assigned-clocks = <&pcc2 IMX7ULP_CLK_WDG1>; - assigned-clocks-parents = <&scg1 IMX7ULP_CLK_FIRC_BUS_CLK>; + assigned-clock-parents = <&scg1 IMX7ULP_CLK_FIRC_BUS_CLK>; timeout-sec = <40>; }; diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi index 7649dd1e0b9ee5cd6aa67d0a8ef48a3f80e08a84..c928ae312e19c0c329efb8d68a4ca1f4a5561d21 100644 --- a/arch/arm/boot/dts/meson.dtsi +++ b/arch/arm/boot/dts/meson.dtsi @@ -42,14 +42,14 @@ }; uart_A: serial@84c0 { - compatible = "amlogic,meson6-uart", "amlogic,meson-uart"; + compatible = "amlogic,meson6-uart"; reg = <0x84c0 0x18>; interrupts = ; status = "disabled"; }; uart_B: serial@84dc { - compatible = "amlogic,meson6-uart", "amlogic,meson-uart"; + compatible = "amlogic,meson6-uart"; reg = <0x84dc 0x18>; interrupts = ; status = "disabled"; @@ -87,7 +87,7 @@ }; uart_C: serial@8700 { - compatible = "amlogic,meson6-uart", "amlogic,meson-uart"; + compatible = "amlogic,meson6-uart"; reg = <0x8700 0x18>; interrupts = ; status = "disabled"; @@ -203,7 +203,7 @@ }; uart_AO: serial@4c0 { - compatible = "amlogic,meson6-uart", "amlogic,meson-ao-uart", "amlogic,meson-uart"; + compatible = "amlogic,meson6-uart", "amlogic,meson-ao-uart"; reg = <0x4c0 0x18>; interrupts = ; status = "disabled"; diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi index 740a6c816266cc6a20469f592f547474cc351a2c..08533116a39ce0a80264cafe325cdac5d3abc15a 100644 --- a/arch/arm/boot/dts/meson8.dtsi +++ b/arch/arm/boot/dts/meson8.dtsi @@ -598,27 +598,27 @@ }; &uart_AO { - compatible = "amlogic,meson8-uart", "amlogic,meson-uart"; - clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_CLK81>; - clock-names = "baud", "xtal", "pclk"; + compatible = "amlogic,meson8-uart", "amlogic,meson-ao-uart"; + clocks = <&xtal>, <&clkc CLKID_CLK81>, <&clkc CLKID_CLK81>; + clock-names = "xtal", "pclk", "baud"; }; &uart_A { - compatible = "amlogic,meson8-uart", "amlogic,meson-uart"; - clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART0>; - clock-names = "baud", "xtal", "pclk"; + compatible = "amlogic,meson8-uart"; + clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>; + clock-names = "xtal", "pclk", "baud"; }; &uart_B { - compatible = "amlogic,meson8-uart", "amlogic,meson-uart"; - clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART1>; - clock-names = "baud", "xtal", "pclk"; + compatible = "amlogic,meson8-uart"; + clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>; + clock-names = "xtal", "pclk", "baud"; }; &uart_C { - compatible = "amlogic,meson8-uart", "amlogic,meson-uart"; - clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART2>; - clock-names = "baud", "xtal", "pclk"; + compatible = "amlogic,meson8-uart"; + clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>; + clock-names = "xtal", "pclk", "baud"; }; &usb0 { diff --git a/arch/arm/boot/dts/meson8b.dtsi b/arch/arm/boot/dts/meson8b.dtsi index 2401cdf5f7511be3bd16e4b77db46e3e88f7152f..f6eb7c803174e7c74c52244d3a5913113d248f41 100644 --- a/arch/arm/boot/dts/meson8b.dtsi +++ b/arch/arm/boot/dts/meson8b.dtsi @@ -586,27 +586,27 @@ }; &uart_AO { - compatible = "amlogic,meson8b-uart", "amlogic,meson-uart"; - clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_CLK81>; - clock-names = "baud", "xtal", "pclk"; + compatible = "amlogic,meson8b-uart", "amlogic,meson-ao-uart"; + clocks = <&xtal>, <&clkc CLKID_CLK81>, <&clkc CLKID_CLK81>; + clock-names = "xtal", "pclk", "baud"; }; &uart_A { - compatible = "amlogic,meson8b-uart", "amlogic,meson-uart"; - clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART0>; - clock-names = "baud", "xtal", "pclk"; + compatible = "amlogic,meson8b-uart"; + clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>; + clock-names = "xtal", "pclk", "baud"; }; &uart_B { - compatible = "amlogic,meson8b-uart", "amlogic,meson-uart"; - clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART1>; - clock-names = "baud", "xtal", "pclk"; + compatible = "amlogic,meson8b-uart"; + clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>; + clock-names = "xtal", "pclk", "baud"; }; &uart_C { - compatible = "amlogic,meson8b-uart", "amlogic,meson-uart"; - clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART2>; - clock-names = "baud", "xtal", "pclk"; + compatible = "amlogic,meson8b-uart"; + clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>; + clock-names = "xtal", "pclk", "baud"; }; &usb0 { diff --git a/arch/arm/boot/dts/omap3-beagle-ab4.dts b/arch/arm/boot/dts/omap3-beagle-ab4.dts new file mode 100644 index 0000000000000000000000000000000000000000..990ff2d8468684469dbe5704ebe6a75ef32bd66c --- /dev/null +++ b/arch/arm/boot/dts/omap3-beagle-ab4.dts @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-2.0-only +/dts-v1/; + +#include "omap3-beagle.dts" + +/ { + model = "TI OMAP3 BeagleBoard A to B4"; + compatible = "ti,omap3-beagle-ab4", "ti,omap3-beagle", "ti,omap3430", "ti,omap3"; +}; + +/* + * Workaround for capacitor C70 issue, see "Boards revision A and < B5" + * section at https://elinux.org/BeagleBoard_Community + */ + +/* Unusable as clocksource because of unreliable oscillator */ +&counter32k { + status = "disabled"; +}; + +/* Unusable as clockevent because of unreliable oscillator, allow to idle */ +&timer1_target { + /delete-property/ti,no-reset-on-init; + /delete-property/ti,no-idle; + timer@0 { + /delete-property/ti,timer-alwon; + }; +}; + +/* Preferred always-on timer for clocksource */ +&timer12_target { + ti,no-reset-on-init; + ti,no-idle; + timer@0 { + /* Always clocked by secure_32k_fck */ + }; +}; + +/* Preferred timer for clockevent */ +&timer2_target { + ti,no-reset-on-init; + ti,no-idle; + timer@0 { + assigned-clocks = <&gpt2_fck>; + assigned-clock-parents = <&sys_ck>; + }; +}; diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts index f9f34b8458e91c871de4a2e2d1093a306ba99cfd..0548b391334fdbf992beb9a3ae41c3edb7b741b4 100644 --- a/arch/arm/boot/dts/omap3-beagle.dts +++ b/arch/arm/boot/dts/omap3-beagle.dts @@ -304,39 +304,6 @@ phys = <0 &hsusb2_phy>; }; -/* Unusable as clocksource because of unreliable oscillator */ -&counter32k { - status = "disabled"; -}; - -/* Unusable as clockevent because if unreliable oscillator, allow to idle */ -&timer1_target { - /delete-property/ti,no-reset-on-init; - /delete-property/ti,no-idle; - timer@0 { - /delete-property/ti,timer-alwon; - }; -}; - -/* Preferred always-on timer for clocksource */ -&timer12_target { - ti,no-reset-on-init; - ti,no-idle; - timer@0 { - /* Always clocked by secure_32k_fck */ - }; -}; - -/* Preferred timer for clockevent */ -&timer2_target { - ti,no-reset-on-init; - ti,no-idle; - timer@0 { - assigned-clocks = <&gpt2_fck>; - assigned-clock-parents = <&sys_ck>; - }; -}; - &twl_gpio { ti,use-leds; /* pullups: BIT(1) */ diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index 32335d4ce478b77b4224f05f266c322641a76277..d40c3d2c4914e400cf1365397632dc3d59ebf0f8 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts @@ -8,6 +8,7 @@ #include "omap34xx.dtsi" #include +#include /* * Default secure signed bootloader (Nokia X-Loader) does not enable L3 firewall @@ -630,63 +631,92 @@ }; lp5523: lp5523@32 { + #address-cells = <1>; + #size-cells = <0>; compatible = "national,lp5523"; reg = <0x32>; clock-mode = /bits/ 8 <0>; /* LP55XX_CLOCK_AUTO */ - enable-gpio = <&gpio2 9 GPIO_ACTIVE_HIGH>; /* 41 */ + enable-gpios = <&gpio2 9 GPIO_ACTIVE_HIGH>; /* 41 */ - chan0 { + led@0 { + reg = <0>; chan-name = "lp5523:kb1"; led-cur = /bits/ 8 <50>; max-cur = /bits/ 8 <100>; + color = ; + function = LED_FUNCTION_KBD_BACKLIGHT; }; - chan1 { + led@1 { + reg = <1>; chan-name = "lp5523:kb2"; led-cur = /bits/ 8 <50>; max-cur = /bits/ 8 <100>; + color = ; + function = LED_FUNCTION_KBD_BACKLIGHT; }; - chan2 { + led@2 { + reg = <2>; chan-name = "lp5523:kb3"; led-cur = /bits/ 8 <50>; max-cur = /bits/ 8 <100>; + color = ; + function = LED_FUNCTION_KBD_BACKLIGHT; }; - chan3 { + led@3 { + reg = <3>; chan-name = "lp5523:kb4"; led-cur = /bits/ 8 <50>; max-cur = /bits/ 8 <100>; + color = ; + function = LED_FUNCTION_KBD_BACKLIGHT; }; - chan4 { + led@4 { + reg = <4>; chan-name = "lp5523:b"; led-cur = /bits/ 8 <50>; max-cur = /bits/ 8 <100>; + color = ; + function = LED_FUNCTION_STATUS; }; - chan5 { + led@5 { + reg = <5>; chan-name = "lp5523:g"; led-cur = /bits/ 8 <50>; max-cur = /bits/ 8 <100>; + color = ; + function = LED_FUNCTION_STATUS; }; - chan6 { + led@6 { + reg = <6>; chan-name = "lp5523:r"; led-cur = /bits/ 8 <50>; max-cur = /bits/ 8 <100>; + color = ; + function = LED_FUNCTION_STATUS; }; - chan7 { + led@7 { + reg = <7>; chan-name = "lp5523:kb5"; led-cur = /bits/ 8 <50>; max-cur = /bits/ 8 <100>; + color = ; + function = LED_FUNCTION_KBD_BACKLIGHT; }; - chan8 { + led@8 { + reg = <8>; chan-name = "lp5523:kb6"; led-cur = /bits/ 8 <50>; max-cur = /bits/ 8 <100>; + color = ; + function = LED_FUNCTION_KBD_BACKLIGHT; }; }; diff --git a/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts b/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts index 08bddbf0336da8833de225d3a5907c3d81050b7d..446d93c1c78241e388904ee6004c24e2f094a63c 100644 --- a/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts +++ b/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts @@ -154,10 +154,6 @@ cap-sd-highspeed; cap-mmc-highspeed; /* All direction control is used */ - st,sig-dir-cmd; - st,sig-dir-dat0; - st,sig-dir-dat2; - st,sig-dir-dat31; st,sig-pin-fbclk; full-pwr-cycle; vmmc-supply = <&ab8500_ldo_aux3_reg>; diff --git a/arch/arm/boot/dts/stm32f429-disco.dts b/arch/arm/boot/dts/stm32f429-disco.dts index 075ac57d0bf4af7ed5c5fea8233725963fd00a08..6435e099c632627708b6b5ad648643752db3a9e7 100644 --- a/arch/arm/boot/dts/stm32f429-disco.dts +++ b/arch/arm/boot/dts/stm32f429-disco.dts @@ -192,7 +192,7 @@ display: display@1{ /* Connect panel-ilitek-9341 to ltdc */ - compatible = "st,sf-tc240t-9370-t"; + compatible = "st,sf-tc240t-9370-t", "ilitek,ili9341"; reg = <1>; spi-3wire; spi-max-frequency = <10000000>; diff --git a/arch/arm/include/debug/imx-uart.h b/arch/arm/include/debug/imx-uart.h index c8eb83d4b8964b1a5e0d1a01ec8b486154bab456..3edbb3c5b42bfcfc81bf60db3d7baa55ae74af6e 100644 --- a/arch/arm/include/debug/imx-uart.h +++ b/arch/arm/include/debug/imx-uart.h @@ -11,13 +11,6 @@ #define IMX1_UART_BASE_ADDR(n) IMX1_UART##n##_BASE_ADDR #define IMX1_UART_BASE(n) IMX1_UART_BASE_ADDR(n) -#define IMX21_UART1_BASE_ADDR 0x1000a000 -#define IMX21_UART2_BASE_ADDR 0x1000b000 -#define IMX21_UART3_BASE_ADDR 0x1000c000 -#define IMX21_UART4_BASE_ADDR 0x1000d000 -#define IMX21_UART_BASE_ADDR(n) IMX21_UART##n##_BASE_ADDR -#define IMX21_UART_BASE(n) IMX21_UART_BASE_ADDR(n) - #define IMX25_UART1_BASE_ADDR 0x43f90000 #define IMX25_UART2_BASE_ADDR 0x43f94000 #define IMX25_UART3_BASE_ADDR 0x5000c000 @@ -26,6 +19,13 @@ #define IMX25_UART_BASE_ADDR(n) IMX25_UART##n##_BASE_ADDR #define IMX25_UART_BASE(n) IMX25_UART_BASE_ADDR(n) +#define IMX27_UART1_BASE_ADDR 0x1000a000 +#define IMX27_UART2_BASE_ADDR 0x1000b000 +#define IMX27_UART3_BASE_ADDR 0x1000c000 +#define IMX27_UART4_BASE_ADDR 0x1000d000 +#define IMX27_UART_BASE_ADDR(n) IMX27_UART##n##_BASE_ADDR +#define IMX27_UART_BASE(n) IMX27_UART_BASE_ADDR(n) + #define IMX31_UART1_BASE_ADDR 0x43f90000 #define IMX31_UART2_BASE_ADDR 0x43f94000 #define IMX31_UART3_BASE_ADDR 0x5000c000 @@ -112,10 +112,10 @@ #ifdef CONFIG_DEBUG_IMX1_UART #define UART_PADDR IMX_DEBUG_UART_BASE(IMX1) -#elif defined(CONFIG_DEBUG_IMX21_IMX27_UART) -#define UART_PADDR IMX_DEBUG_UART_BASE(IMX21) #elif defined(CONFIG_DEBUG_IMX25_UART) #define UART_PADDR IMX_DEBUG_UART_BASE(IMX25) +#elif defined(CONFIG_DEBUG_IMX27_UART) +#define UART_PADDR IMX_DEBUG_UART_BASE(IMX27) #elif defined(CONFIG_DEBUG_IMX31_UART) #define UART_PADDR IMX_DEBUG_UART_BASE(IMX31) #elif defined(CONFIG_DEBUG_IMX35_UART) diff --git a/arch/arm/kernel/perf_callchain.c b/arch/arm/kernel/perf_callchain.c index 3b69a76d341e784075a1f8ef053f0d308177feee..1626dfc6f6ce61c6aa7b94973190a86e6887f74a 100644 --- a/arch/arm/kernel/perf_callchain.c +++ b/arch/arm/kernel/perf_callchain.c @@ -62,9 +62,10 @@ user_backtrace(struct frame_tail __user *tail, void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); struct frame_tail __user *tail; - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { + if (guest_cbs && guest_cbs->is_in_guest()) { /* We don't support guest os callchain now */ return; } @@ -98,9 +99,10 @@ callchain_trace(struct stackframe *fr, void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); struct stackframe fr; - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { + if (guest_cbs && guest_cbs->is_in_guest()) { /* We don't support guest os callchain now */ return; } @@ -111,18 +113,21 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re unsigned long perf_instruction_pointer(struct pt_regs *regs) { - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) - return perf_guest_cbs->get_guest_ip(); + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); + + if (guest_cbs && guest_cbs->is_in_guest()) + return guest_cbs->get_guest_ip(); return instruction_pointer(regs); } unsigned long perf_misc_flags(struct pt_regs *regs) { + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); int misc = 0; - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { - if (perf_guest_cbs->is_user_mode()) + if (guest_cbs && guest_cbs->is_in_guest()) { + if (guest_cbs->is_user_mode()) misc |= PERF_RECORD_MISC_GUEST_USER; else misc |= PERF_RECORD_MISC_GUEST_KERNEL; diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index 2000fca6bd4e6ec585930729381e552e8f13c251..6098666e928d0678e8fa2cba26d51e039515ea2e 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c @@ -263,9 +263,9 @@ static int __init omapdss_init_of(void) } r = of_platform_populate(node, NULL, NULL, &pdev->dev); + put_device(&pdev->dev); if (r) { pr_err("Unable to populate DSS submodule devices\n"); - put_device(&pdev->dev); return r; } diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 9443f129859b23653cc2caf2abec4b02e43c0840..1fd67abca055b26dd489fc736769ffdb946dcf50 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -749,8 +749,10 @@ static int __init _init_clkctrl_providers(void) for_each_matching_node(np, ti_clkctrl_match_table) { ret = _setup_clkctrl_provider(np); - if (ret) + if (ret) { + of_node_put(np); break; + } } return ret; diff --git a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c index ee949255ced3f01ad4256b2b7bea3ba669006309..09ef73b99dd86a851a98bc7c75c433821c5b5bf1 100644 --- a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c +++ b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c @@ -154,8 +154,10 @@ static int __init rcar_gen2_regulator_quirk(void) return -ENODEV; for_each_matching_node_and_match(np, rcar_gen2_quirk_match, &id) { - if (!of_device_is_available(np)) + if (!of_device_is_available(np)) { + of_node_put(np); break; + } ret = of_property_read_u32(np, "reg", &addr); if (ret) /* Skip invalid entry and continue */ @@ -164,6 +166,7 @@ static int __init rcar_gen2_regulator_quirk(void) quirk = kzalloc(sizeof(*quirk), GFP_KERNEL); if (!quirk) { ret = -ENOMEM; + of_node_put(np); goto err_mem; } diff --git a/arch/arm/mach-socfpga/Kconfig b/arch/arm/mach-socfpga/Kconfig index c3bb68d57cea2e5143a61073744050c23be44233..b62ae4dafa2eb676a11e5b6ac9c01a4833dc2966 100644 --- a/arch/arm/mach-socfpga/Kconfig +++ b/arch/arm/mach-socfpga/Kconfig @@ -2,6 +2,7 @@ menuconfig ARCH_SOCFPGA bool "Altera SOCFPGA family" depends on ARCH_MULTI_V7 + select ARCH_HAS_RESET_CONTROLLER select ARCH_SUPPORTS_BIG_ENDIAN select ARM_AMBA select ARM_GIC @@ -18,6 +19,7 @@ menuconfig ARCH_SOCFPGA select PL310_ERRATA_727915 select PL310_ERRATA_753970 if PL310 select PL310_ERRATA_769419 + select RESET_CONTROLLER if ARCH_SOCFPGA config SOCFPGA_SUSPEND diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi index 959b299344e54c7346b596fd990e66a68d69faf0..075153a4d49fc597a616467f3decf8135125c7db 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi @@ -52,7 +52,7 @@ secure-monitor = <&sm>; }; - gpu_opp_table: gpu-opp-table { + gpu_opp_table: opp-table-gpu { compatible = "operating-points-v2"; opp-124999998 { @@ -101,6 +101,12 @@ no-map; }; + /* 32 MiB reserved for ARM Trusted Firmware (BL32) */ + secmon_reserved_bl32: secmon@5300000 { + reg = <0x0 0x05300000 0x0 0x2000000>; + no-map; + }; + linux,cma { compatible = "shared-dma-pool"; reusable; diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts index 4d5b3e514b514be0034aa35a9c57dd5df8c7fca9..71f91e31c181887b6aa6abbd69365f8917d8439c 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts +++ b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts @@ -157,14 +157,6 @@ regulator-always-on; }; - reserved-memory { - /* TEE Reserved Memory */ - bl32_reserved: bl32@5000000 { - reg = <0x0 0x05300000 0x0 0x2000000>; - no-map; - }; - }; - sdio_pwrseq: sdio-pwrseq { compatible = "mmc-pwrseq-simple"; reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>; diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi index 59b5f390887577d01afe9bda087e4dc6178bfa26..87e8e64ad5caee06df5c2a6a50338c74d8ae48ff 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi @@ -15,7 +15,7 @@ ethernet0 = ðmac; }; - dioo2133: audio-amplifier-0 { + dio2133: audio-amplifier-0 { compatible = "simple-audio-amplifier"; enable-gpios = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_HIGH>; VCC-supply = <&vcc_5v>; @@ -215,7 +215,7 @@ audio-widgets = "Line", "Lineout"; audio-aux-devs = <&tdmout_b>, <&tdmout_c>, <&tdmin_a>, <&tdmin_b>, <&tdmin_c>, <&tdmin_lb>, - <&dioo2133>; + <&dio2133>; audio-routing = "TDMOUT_B IN 0", "FRDDR_A OUT 1", "TDMOUT_B IN 1", "FRDDR_B OUT 1", "TDMOUT_B IN 2", "FRDDR_C OUT 1", @@ -543,7 +543,7 @@ pinctrl-0 = <&nor_pins>; pinctrl-names = "default"; - mx25u64: spi-flash@0 { + mx25u64: flash@0 { #address-cells = <1>; #size-cells = <1>; compatible = "mxicy,mx25u6435f", "jedec,spi-nor"; diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi index 0edd137151f89eb6ba63695d28abdfaf18cf6434..47cbb0a1eb183ca0c7993f9f69337a47f7306a81 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi @@ -43,6 +43,12 @@ no-map; }; + /* 32 MiB reserved for ARM Trusted Firmware (BL32) */ + secmon_reserved_bl32: secmon@5300000 { + reg = <0x0 0x05300000 0x0 0x2000000>; + no-map; + }; + linux,cma { compatible = "shared-dma-pool"; reusable; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi index a350fee1264d7b1dfe2cf83d3a8a41bd16efebc3..a4d34398da358c054749e6599bb445dee01ab3e9 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi @@ -6,6 +6,7 @@ */ #include "meson-gxbb.dtsi" +#include / { aliases { @@ -64,6 +65,7 @@ regulator-name = "VDDIO_AO18"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; + regulator-always-on; }; vcc_3v3: regulator-vcc_3v3 { @@ -161,6 +163,7 @@ status = "okay"; pinctrl-0 = <&hdmi_hpd_pins>, <&hdmi_i2c_pins>; pinctrl-names = "default"; + hdmi-supply = <&vddio_ao18>; }; &hdmi_tx_tmds_port { diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts index 5ab139a34c0186dd522b063000ef7f90e315a9c5..c21178e9c6064fc49a67219878ccfbac8004ee7e 100644 --- a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts +++ b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts @@ -203,14 +203,6 @@ regulator-always-on; }; - reserved-memory { - /* TEE Reserved Memory */ - bl32_reserved: bl32@5000000 { - reg = <0x0 0x05300000 0x0 0x2000000>; - no-map; - }; - }; - sdio_pwrseq: sdio-pwrseq { compatible = "mmc-pwrseq-simple"; reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts index 13cdc958ba3ea2182a60cfad0e29be68bb0fb567..71858c9376c25d87330e236b089c85a4c294e408 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts @@ -261,11 +261,6 @@ vcc-supply = <&sb_3v3>; }; - rtc@51 { - compatible = "nxp,pcf2129"; - reg = <0x51>; - }; - eeprom@56 { compatible = "atmel,24c512"; reg = <0x56>; @@ -307,6 +302,15 @@ }; +&i2c1 { + status = "okay"; + + rtc@51 { + compatible = "nxp,pcf2129"; + reg = <0x51>; + }; +}; + &enetc_port1 { phy-handle = <&qds_phy1>; phy-connection-type = "rgmii-id"; diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi index c86cf786f4061f0cc4df9b3660f79cb4b2851b4c..8d0d41973ff54288b91d0f8841129787e0acbc6c 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi @@ -524,7 +524,7 @@ assigned-clock-rates = <0>, <0>, <0>, <594000000>; status = "disabled"; - port@0 { + port { lcdif_mipi_dsi: endpoint { remote-endpoint = <&mipi_dsi_lcdif_in>; }; diff --git a/arch/arm64/boot/dts/marvell/cn9130.dtsi b/arch/arm64/boot/dts/marvell/cn9130.dtsi index a2b7e5ec979d325d935a42bdda4caa13dc565f09..327b04134134ff1b0dd19ce3cdff2b42f96beddb 100644 --- a/arch/arm64/boot/dts/marvell/cn9130.dtsi +++ b/arch/arm64/boot/dts/marvell/cn9130.dtsi @@ -11,6 +11,13 @@ model = "Marvell Armada CN9130 SoC"; compatible = "marvell,cn9130", "marvell,armada-ap807-quad", "marvell,armada-ap807"; + + aliases { + gpio1 = &cp0_gpio1; + gpio2 = &cp0_gpio2; + spi1 = &cp0_spi0; + spi2 = &cp0_spi1; + }; }; /* @@ -35,3 +42,11 @@ #undef CP11X_PCIE0_BASE #undef CP11X_PCIE1_BASE #undef CP11X_PCIE2_BASE + +&cp0_gpio1 { + status = "okay"; +}; + +&cp0_gpio2 { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/nvidia/tegra186.dtsi b/arch/arm64/boot/dts/nvidia/tegra186.dtsi index 0c46ab7bbbf376eecadb9f25b1b74abdd7ed2097..eec6418ecdb1a3cb6fecaec1cae7e4a88dbd9d54 100644 --- a/arch/arm64/boot/dts/nvidia/tegra186.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra186.dtsi @@ -985,7 +985,7 @@ ccplex@e000000 { compatible = "nvidia,tegra186-ccplex-cluster"; - reg = <0x0 0x0e000000 0x0 0x3fffff>; + reg = <0x0 0x0e000000 0x0 0x400000>; nvidia,bpmp = <&bpmp>; }; diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi index 9b5007e5f790fd802eefc6e15c7643cc423552a6..05cf606b85c9fd8907e851a718cc6e0e635cc78d 100644 --- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi @@ -782,13 +782,12 @@ reg = <0x3510000 0x10000>; interrupts = ; clocks = <&bpmp TEGRA194_CLK_HDA>, - <&bpmp TEGRA194_CLK_HDA2CODEC_2X>, - <&bpmp TEGRA194_CLK_HDA2HDMICODEC>; - clock-names = "hda", "hda2codec_2x", "hda2hdmi"; + <&bpmp TEGRA194_CLK_HDA2HDMICODEC>, + <&bpmp TEGRA194_CLK_HDA2CODEC_2X>; + clock-names = "hda", "hda2hdmi", "hda2codec_2x"; resets = <&bpmp TEGRA194_RESET_HDA>, - <&bpmp TEGRA194_RESET_HDA2CODEC_2X>, <&bpmp TEGRA194_RESET_HDA2HDMICODEC>; - reset-names = "hda", "hda2codec_2x", "hda2hdmi"; + reset-names = "hda", "hda2hdmi"; power-domains = <&bpmp TEGRA194_POWER_DOMAIN_DISP>; interconnects = <&mc TEGRA194_MEMORY_CLIENT_HDAR &emc>, <&mc TEGRA194_MEMORY_CLIENT_HDAW &emc>; diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi index 9cb8f7a052df947782f7c9236df00bcc8e48c105..2a1f03cdb52c77bb1bb9d9aa9850dde1c27b0227 100644 --- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi +++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi @@ -221,7 +221,7 @@ interrupts = ; gpio-controller; #gpio-cells = <2>; - gpio-ranges = <&tlmm 0 80>; + gpio-ranges = <&tlmm 0 0 80>; interrupt-controller; #interrupt-cells = <2>; diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi index b1ffc056eea0bbef80fbe4a6843708debae9ed10..291276a38d7cd77df5609b89c6134af5ff9a3beb 100644 --- a/arch/arm64/boot/dts/qcom/msm8916.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi @@ -18,8 +18,8 @@ #size-cells = <2>; aliases { - sdhc1 = &sdhc_1; /* SDC1 eMMC slot */ - sdhc2 = &sdhc_2; /* SDC2 SD card slot */ + mmc0 = &sdhc_1; /* SDC1 eMMC slot */ + mmc1 = &sdhc_2; /* SDC2 SD card slot */ }; chosen { }; diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi index eef17434d12ae624e42fb020807a22590bd1be31..ef5d03a1506930262c2d6308b255f56a10e2e834 100644 --- a/arch/arm64/boot/dts/qcom/msm8996.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi @@ -645,9 +645,6 @@ nvmem-cells = <&gpu_speed_bin>; nvmem-cell-names = "speed_bin"; - qcom,gpu-quirk-two-pass-use-wfi; - qcom,gpu-quirk-fault-detect-mask; - operating-points-v2 = <&gpu_opp_table>; gpu_opp_table: opp-table { diff --git a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts index ad6561843ba28489effeb34e75aecabbc4f91d9e..e080c317b5e3dd52090daaae94f2e9ae866ec6ca 100644 --- a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts +++ b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts @@ -365,6 +365,10 @@ dai@1 { reg = <1>; }; + + dai@2 { + reg = <2>; + }; }; &sound { @@ -377,6 +381,7 @@ "SpkrLeft IN", "SPK1 OUT", "SpkrRight IN", "SPK2 OUT", "MM_DL1", "MultiMedia1 Playback", + "MM_DL3", "MultiMedia3 Playback", "MultiMedia2 Capture", "MM_UL2"; mm1-dai-link { @@ -393,6 +398,13 @@ }; }; + mm3-dai-link { + link-name = "MultiMedia3"; + cpu { + sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA3>; + }; + }; + slim-dai-link { link-name = "SLIM Playback"; cpu { @@ -422,6 +434,21 @@ sound-dai = <&wcd9340 1>; }; }; + + slim-wcd-dai-link { + link-name = "SLIM WCD Playback"; + cpu { + sound-dai = <&q6afedai SLIMBUS_1_RX>; + }; + + platform { + sound-dai = <&q6routing>; + }; + + codec { + sound-dai = <&wcd9340 2>; + }; + }; }; &tlmm { diff --git a/arch/arm64/boot/dts/renesas/cat875.dtsi b/arch/arm64/boot/dts/renesas/cat875.dtsi index 801ea54b027c43d96a78ea29c3e7589a0ff15419..20f8adc635e72aa0ea0e20ed0a009120b9660003 100644 --- a/arch/arm64/boot/dts/renesas/cat875.dtsi +++ b/arch/arm64/boot/dts/renesas/cat875.dtsi @@ -18,6 +18,7 @@ pinctrl-names = "default"; renesas,no-ether-link; phy-handle = <&phy0>; + phy-mode = "rgmii-id"; status = "okay"; phy0: ethernet-phy@0 { diff --git a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi index 5832ad830ed149c06c371bc3f39734e2a76e5905..1ab9f9604af6c569788745ccd829df182407f2d5 100644 --- a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi @@ -25,7 +25,7 @@ #size-cells = <1>; ranges = <0x00 0x00 0x00100000 0x1c000>; - serdes_ln_ctrl: serdes-ln-ctrl@4080 { + serdes_ln_ctrl: mux-controller@4080 { compatible = "mmio-mux"; #mux-control-cells = <1>; mux-reg-masks = <0x4080 0x3>, <0x4084 0x3>, /* SERDES0 lane0/1 select */ diff --git a/arch/arm64/boot/dts/ti/k3-j7200.dtsi b/arch/arm64/boot/dts/ti/k3-j7200.dtsi index 66169bcf7c9a408e827dd21ffd51fe1617bd77d7..03a9623f0f9562e902184cc68490a27b175aac81 100644 --- a/arch/arm64/boot/dts/ti/k3-j7200.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j7200.dtsi @@ -60,7 +60,7 @@ i-cache-sets = <256>; d-cache-size = <0x8000>; d-cache-line-size = <64>; - d-cache-sets = <128>; + d-cache-sets = <256>; next-level-cache = <&L2_0>; }; @@ -74,7 +74,7 @@ i-cache-sets = <256>; d-cache-size = <0x8000>; d-cache-line-size = <64>; - d-cache-sets = <128>; + d-cache-sets = <256>; next-level-cache = <&L2_0>; }; }; @@ -84,7 +84,7 @@ cache-level = <2>; cache-size = <0x100000>; cache-line-size = <64>; - cache-sets = <2048>; + cache-sets = <1024>; next-level-cache = <&msmc_l3>; }; diff --git a/arch/arm64/boot/dts/ti/k3-j721e.dtsi b/arch/arm64/boot/dts/ti/k3-j721e.dtsi index cc483f7344af3132aaa0059eb3e5b6c38ea19a5a..a199227327ed299907f84fb925d9ca52832d7724 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j721e.dtsi @@ -61,7 +61,7 @@ i-cache-sets = <256>; d-cache-size = <0x8000>; d-cache-line-size = <64>; - d-cache-sets = <128>; + d-cache-sets = <256>; next-level-cache = <&L2_0>; }; @@ -75,7 +75,7 @@ i-cache-sets = <256>; d-cache-size = <0x8000>; d-cache-line-size = <64>; - d-cache-sets = <128>; + d-cache-sets = <256>; next-level-cache = <&L2_0>; }; }; @@ -85,7 +85,7 @@ cache-level = <2>; cache-size = <0x100000>; cache-line-size = <64>; - cache-sets = <2048>; + cache-sets = <1024>; next-level-cache = <&msmc_l3>; }; diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c index 88ff471b0bce5f2c49cb49cd38a3603fc952b2aa..58ae55d78a203e3fcb156b7c9973c62552f696df 100644 --- a/arch/arm64/kernel/perf_callchain.c +++ b/arch/arm64/kernel/perf_callchain.c @@ -102,7 +102,9 @@ compat_user_backtrace(struct compat_frame_tail __user *tail, void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); + + if (guest_cbs && guest_cbs->is_in_guest()) { /* We don't support guest os callchain now */ return; } @@ -147,9 +149,10 @@ static bool callchain_trace(void *data, unsigned long pc) void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); struct stackframe frame; - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { + if (guest_cbs && guest_cbs->is_in_guest()) { /* We don't support guest os callchain now */ return; } @@ -160,18 +163,21 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, unsigned long perf_instruction_pointer(struct pt_regs *regs) { - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) - return perf_guest_cbs->get_guest_ip(); + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); + + if (guest_cbs && guest_cbs->is_in_guest()) + return guest_cbs->get_guest_ip(); return instruction_pointer(regs); } unsigned long perf_misc_flags(struct pt_regs *regs) { + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); int misc = 0; - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { - if (perf_guest_cbs->is_user_mode()) + if (guest_cbs && guest_cbs->is_in_guest()) { + if (guest_cbs->is_user_mode()) misc |= PERF_RECORD_MISC_GUEST_USER; else misc |= PERF_RECORD_MISC_GUEST_KERNEL; diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 4999caff328187316a5cb21c13d9f5615759fa73..22275d8518eb39fa8bd4287deaad832453cfa0fb 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -511,34 +511,26 @@ static void entry_task_switch(struct task_struct *next) /* * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT. - * Assuming the virtual counter is enabled at the beginning of times: - * - * - disable access when switching from a 64bit task to a 32bit task - * - enable access when switching from a 32bit task to a 64bit task + * Ensure access is disabled when switching to a 32bit task, ensure + * access is enabled when switching to a 64bit task. */ -static void erratum_1418040_thread_switch(struct task_struct *prev, - struct task_struct *next) +static void erratum_1418040_thread_switch(struct task_struct *next) { - bool prev32, next32; - u64 val; - - if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040)) - return; - - prev32 = is_compat_thread(task_thread_info(prev)); - next32 = is_compat_thread(task_thread_info(next)); - - if (prev32 == next32 || !this_cpu_has_cap(ARM64_WORKAROUND_1418040)) + if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) || + !this_cpu_has_cap(ARM64_WORKAROUND_1418040)) return; - val = read_sysreg(cntkctl_el1); - - if (!next32) - val |= ARCH_TIMER_USR_VCT_ACCESS_EN; + if (is_compat_thread(task_thread_info(next))) + sysreg_clear_set(cntkctl_el1, ARCH_TIMER_USR_VCT_ACCESS_EN, 0); else - val &= ~ARCH_TIMER_USR_VCT_ACCESS_EN; + sysreg_clear_set(cntkctl_el1, 0, ARCH_TIMER_USR_VCT_ACCESS_EN); +} - write_sysreg(val, cntkctl_el1); +static void erratum_1418040_new_exec(void) +{ + preempt_disable(); + erratum_1418040_thread_switch(current); + preempt_enable(); } /* @@ -556,7 +548,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, entry_task_switch(next); uao_thread_switch(next); ssbs_thread_switch(next); - erratum_1418040_thread_switch(prev, next); + erratum_1418040_thread_switch(next); /* * Complete any pending TLB or cache maintenance on this CPU in case @@ -622,6 +614,7 @@ void arch_setup_new_exec(void) current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0; ptrauth_thread_init_user(current); + erratum_1418040_new_exec(); if (task_spec_ssb_noexec(current)) { arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS, diff --git a/arch/arm64/lib/clear_page.S b/arch/arm64/lib/clear_page.S index 073acbf02a7c842520eeb42df0df4f2dd3a15480..1fd5d790ab800321786324dbc4ea8804920e07f1 100644 --- a/arch/arm64/lib/clear_page.S +++ b/arch/arm64/lib/clear_page.S @@ -14,8 +14,9 @@ * Parameters: * x0 - dest */ -SYM_FUNC_START(clear_page) +SYM_FUNC_START_PI(clear_page) mrs x1, dczid_el0 + tbnz x1, #4, 2f /* Branch if DC ZVA is prohibited */ and w1, w1, #0xf mov x2, #4 lsl x1, x2, x1 @@ -25,5 +26,14 @@ SYM_FUNC_START(clear_page) tst x0, #(PAGE_SIZE - 1) b.ne 1b ret -SYM_FUNC_END(clear_page) + +2: stnp xzr, xzr, [x0] + stnp xzr, xzr, [x0, #16] + stnp xzr, xzr, [x0, #32] + stnp xzr, xzr, [x0, #48] + add x0, x0, #64 + tst x0, #(PAGE_SIZE - 1) + b.ne 2b + ret +SYM_FUNC_END_PI(clear_page) EXPORT_SYMBOL(clear_page) diff --git a/arch/arm64/lib/copy_page.S b/arch/arm64/lib/copy_page.S index e7a793961408d09c5e36991d45e2156f85faaca5..29144f4cd4492741729f7189f17e8edb86f7e39f 100644 --- a/arch/arm64/lib/copy_page.S +++ b/arch/arm64/lib/copy_page.S @@ -17,7 +17,7 @@ * x0 - dest * x1 - src */ -SYM_FUNC_START(copy_page) +SYM_FUNC_START_PI(copy_page) alternative_if ARM64_HAS_NO_HW_PREFETCH // Prefetch three cache lines ahead. prfm pldl1strm, [x1, #128] @@ -75,5 +75,5 @@ alternative_else_nop_endif stnp x16, x17, [x0, #112 - 256] ret -SYM_FUNC_END(copy_page) +SYM_FUNC_END_PI(copy_page) EXPORT_SYMBOL(copy_page) diff --git a/arch/csky/kernel/perf_callchain.c b/arch/csky/kernel/perf_callchain.c index ab55e98ee8f62dc3f98f4d6529d7c05775670878..35318a635a5fae7b6de24723f6f43338bab85b67 100644 --- a/arch/csky/kernel/perf_callchain.c +++ b/arch/csky/kernel/perf_callchain.c @@ -86,10 +86,11 @@ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry, void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); unsigned long fp = 0; /* C-SKY does not support virtualization. */ - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) + if (guest_cbs && guest_cbs->is_in_guest()) return; fp = regs->regs[4]; @@ -110,10 +111,11 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry, void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); struct stackframe fr; /* C-SKY does not support virtualization. */ - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { + if (guest_cbs && guest_cbs->is_in_guest()) { pr_warn("C-SKY does not support perf in guest mode!"); return; } diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 23d756fe0fd6c3b22779ebe958b967f901f83e49..3442bdd4314cbac824ae468782de331326aa8a86 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1985,6 +1985,10 @@ config SYS_HAS_CPU_MIPS64_R1 config SYS_HAS_CPU_MIPS64_R2 bool +config SYS_HAS_CPU_MIPS64_R5 + bool + select ARCH_HAS_SYNC_DMA_FOR_CPU if DMA_NONCOHERENT + config SYS_HAS_CPU_MIPS64_R6 bool select ARCH_HAS_SYNC_DMA_FOR_CPU if DMA_NONCOHERENT @@ -2146,7 +2150,7 @@ config CPU_SUPPORTS_ADDRWINCFG bool config CPU_SUPPORTS_HUGEPAGES bool - depends on !(32BIT && (ARCH_PHYS_ADDR_T_64BIT || EVA)) + depends on !(32BIT && (PHYS_ADDR_T_64BIT || EVA)) config MIPS_PGD_C0_CONTEXT bool default y if 64BIT && (CPU_MIPSR2 || CPU_MIPSR6) && !CPU_XLP diff --git a/arch/mips/bcm63xx/clk.c b/arch/mips/bcm63xx/clk.c index aba6e2d6a736c26a0f2e9c23d28bc95a4ee0446a..dcfa0ea912fe1633a10d0acbb87fe1cb85612ee4 100644 --- a/arch/mips/bcm63xx/clk.c +++ b/arch/mips/bcm63xx/clk.c @@ -387,6 +387,12 @@ struct clk *clk_get_parent(struct clk *clk) } EXPORT_SYMBOL(clk_get_parent); +int clk_set_parent(struct clk *clk, struct clk *parent) +{ + return 0; +} +EXPORT_SYMBOL(clk_set_parent); + unsigned long clk_get_rate(struct clk *clk) { if (!clk) diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c index d56e9b9d2e434d72ec1c8ed398dd70dac8e7b6e0..a994022e32c9f0ee0ed4f91612d5fbd26071e218 100644 --- a/arch/mips/cavium-octeon/octeon-platform.c +++ b/arch/mips/cavium-octeon/octeon-platform.c @@ -328,6 +328,7 @@ static int __init octeon_ehci_device_init(void) pd->dev.platform_data = &octeon_ehci_pdata; octeon_ehci_hw_start(&pd->dev); + put_device(&pd->dev); return ret; } @@ -391,6 +392,7 @@ static int __init octeon_ohci_device_init(void) pd->dev.platform_data = &octeon_ohci_pdata; octeon_ohci_hw_start(&pd->dev); + put_device(&pd->dev); return ret; } diff --git a/arch/mips/cavium-octeon/octeon-usb.c b/arch/mips/cavium-octeon/octeon-usb.c index 950e6c6e862973b421393f9afcc02f9fdb1f3f0f..fa87e5aa1811d8876713e048d4ccc2fd8e89fd57 100644 --- a/arch/mips/cavium-octeon/octeon-usb.c +++ b/arch/mips/cavium-octeon/octeon-usb.c @@ -544,6 +544,7 @@ static int __init dwc3_octeon_device_init(void) devm_iounmap(&pdev->dev, base); devm_release_mem_region(&pdev->dev, res->start, resource_size(res)); + put_device(&pdev->dev); } } while (node != NULL); diff --git a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h index 87a5bfbf8cfe9b925c351f42de612be1b8dcaf82..28572ddfb004a2b384d4ac33ebb865bd4c4e413e 100644 --- a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h +++ b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h @@ -36,7 +36,7 @@ nop /* Loongson-3A R2/R3 */ andi t0, (PRID_IMP_MASK | PRID_REV_MASK) - slti t0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0) + slti t0, t0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0) bnez t0, 2f nop 1: @@ -71,7 +71,7 @@ nop /* Loongson-3A R2/R3 */ andi t0, (PRID_IMP_MASK | PRID_REV_MASK) - slti t0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0) + slti t0, t0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0) bnez t0, 2f nop 1: diff --git a/arch/mips/include/asm/octeon/cvmx-bootinfo.h b/arch/mips/include/asm/octeon/cvmx-bootinfo.h index c114a7ba0badd28ddf9dfa41d600774e03d943ec..e77e8b7c00838a1fc1ff30176ac6a7d5d01386dd 100644 --- a/arch/mips/include/asm/octeon/cvmx-bootinfo.h +++ b/arch/mips/include/asm/octeon/cvmx-bootinfo.h @@ -317,7 +317,7 @@ enum cvmx_chip_types_enum { /* Functions to return string based on type */ #define ENUM_BRD_TYPE_CASE(x) \ - case x: return(#x + 16); /* Skip CVMX_BOARD_TYPE_ */ + case x: return (&#x[16]); /* Skip CVMX_BOARD_TYPE_ */ static inline const char *cvmx_board_type_to_string(enum cvmx_board_types_enum type) { @@ -408,7 +408,7 @@ static inline const char *cvmx_board_type_to_string(enum } #define ENUM_CHIP_TYPE_CASE(x) \ - case x: return(#x + 15); /* Skip CVMX_CHIP_TYPE */ + case x: return (&#x[15]); /* Skip CVMX_CHIP_TYPE */ static inline const char *cvmx_chip_type_to_string(enum cvmx_chip_types_enum type) { diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c index 4916cccf378fdd1d446f6ed23420828133e73563..7a623684d9b5ed415e167af74e0b9af22ea5d74c 100644 --- a/arch/mips/lantiq/clk.c +++ b/arch/mips/lantiq/clk.c @@ -164,6 +164,12 @@ struct clk *clk_get_parent(struct clk *clk) } EXPORT_SYMBOL(clk_get_parent); +int clk_set_parent(struct clk *clk, struct clk *parent) +{ + return 0; +} +EXPORT_SYMBOL(clk_set_parent); + static inline u32 get_counter_resolution(void) { u32 res; diff --git a/arch/nds32/kernel/perf_event_cpu.c b/arch/nds32/kernel/perf_event_cpu.c index 0ce6f9f307e6ad34188ced46a9f8f5c71fc1c564..f3879196078133d6645f667d0b900cfc41deb1f5 100644 --- a/arch/nds32/kernel/perf_event_cpu.c +++ b/arch/nds32/kernel/perf_event_cpu.c @@ -1363,6 +1363,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); unsigned long fp = 0; unsigned long gp = 0; unsigned long lp = 0; @@ -1371,7 +1372,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, leaf_fp = 0; - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { + if (guest_cbs && guest_cbs->is_in_guest()) { /* We don't support guest os callchain now */ return; } @@ -1479,9 +1480,10 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); struct stackframe fr; - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { + if (guest_cbs && guest_cbs->is_in_guest()) { /* We don't support guest os callchain now */ return; } @@ -1493,20 +1495,23 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, unsigned long perf_instruction_pointer(struct pt_regs *regs) { + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); + /* However, NDS32 does not support virtualization */ - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) - return perf_guest_cbs->get_guest_ip(); + if (guest_cbs && guest_cbs->is_in_guest()) + return guest_cbs->get_guest_ip(); return instruction_pointer(regs); } unsigned long perf_misc_flags(struct pt_regs *regs) { + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); int misc = 0; /* However, NDS32 does not support virtualization */ - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { - if (perf_guest_cbs->is_user_mode()) + if (guest_cbs && guest_cbs->is_in_guest()) { + if (guest_cbs->is_user_mode()) misc |= PERF_RECORD_MISC_GUEST_USER; else misc |= PERF_RECORD_MISC_GUEST_KERNEL; diff --git a/arch/openrisc/include/asm/syscalls.h b/arch/openrisc/include/asm/syscalls.h index 3a7eeae6f56a8259714670dd00154ee8da8657d4..aa1c7e98722e3a6bfb39903d59162bfb1c611da9 100644 --- a/arch/openrisc/include/asm/syscalls.h +++ b/arch/openrisc/include/asm/syscalls.h @@ -22,9 +22,11 @@ asmlinkage long sys_or1k_atomic(unsigned long type, unsigned long *v1, asmlinkage long __sys_clone(unsigned long clone_flags, unsigned long newsp, void __user *parent_tid, void __user *child_tid, int tls); +asmlinkage long __sys_clone3(struct clone_args __user *uargs, size_t size); asmlinkage long __sys_fork(void); #define sys_clone __sys_clone +#define sys_clone3 __sys_clone3 #define sys_fork __sys_fork #endif /* __ASM_OPENRISC_SYSCALLS_H */ diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S index 98e4f97db51596050fc7d02416ebf655286f9366..b42d32d79b2e614d9ea62fa0117f2fee49846c75 100644 --- a/arch/openrisc/kernel/entry.S +++ b/arch/openrisc/kernel/entry.S @@ -1170,6 +1170,11 @@ ENTRY(__sys_clone) l.j _fork_save_extra_regs_and_call l.nop +ENTRY(__sys_clone3) + l.movhi r29,hi(sys_clone3) + l.j _fork_save_extra_regs_and_call + l.ori r29,r29,lo(sys_clone3) + ENTRY(__sys_fork) l.movhi r29,hi(sys_fork) l.ori r29,r29,lo(sys_fork) diff --git a/arch/parisc/include/asm/special_insns.h b/arch/parisc/include/asm/special_insns.h index a303ae9a77f41fe8ceef5490ab2be5b394fc199d..16ee41e77174f14e016dac77ab6e512c49b02816 100644 --- a/arch/parisc/include/asm/special_insns.h +++ b/arch/parisc/include/asm/special_insns.h @@ -2,28 +2,32 @@ #ifndef __PARISC_SPECIAL_INSNS_H #define __PARISC_SPECIAL_INSNS_H -#define lpa(va) ({ \ - unsigned long pa; \ - __asm__ __volatile__( \ - "copy %%r0,%0\n\t" \ - "lpa %%r0(%1),%0" \ - : "=r" (pa) \ - : "r" (va) \ - : "memory" \ - ); \ - pa; \ +#define lpa(va) ({ \ + unsigned long pa; \ + __asm__ __volatile__( \ + "copy %%r0,%0\n" \ + "8:\tlpa %%r0(%1),%0\n" \ + "9:\n" \ + ASM_EXCEPTIONTABLE_ENTRY(8b, 9b) \ + : "=&r" (pa) \ + : "r" (va) \ + : "memory" \ + ); \ + pa; \ }) -#define lpa_user(va) ({ \ - unsigned long pa; \ - __asm__ __volatile__( \ - "copy %%r0,%0\n\t" \ - "lpa %%r0(%%sr3,%1),%0" \ - : "=r" (pa) \ - : "r" (va) \ - : "memory" \ - ); \ - pa; \ +#define lpa_user(va) ({ \ + unsigned long pa; \ + __asm__ __volatile__( \ + "copy %%r0,%0\n" \ + "8:\tlpa %%r0(%%sr3,%1),%0\n" \ + "9:\n" \ + ASM_EXCEPTIONTABLE_ENTRY(8b, 9b) \ + : "=&r" (pa) \ + : "r" (va) \ + : "memory" \ + ); \ + pa; \ }) #define mfctl(reg) ({ \ diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index 43f56335759a44a4c9347e49c1e0b99cdff0d31d..269b737d26299cf09dda74abb22266f457df0766 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -784,7 +784,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) * unless pagefault_disable() was called before. */ - if (fault_space == 0 && !faulthandler_disabled()) + if (faulthandler_disabled() || fault_space == 0) { /* Clean up and return if in exception table. */ if (fixup_exception(regs)) diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c index 237d20dd5622de53fb207ec5888f972c7237a9e3..286cec4d86d7b0165a7b81ffcfdc48512a568acf 100644 --- a/arch/parisc/kernel/unaligned.c +++ b/arch/parisc/kernel/unaligned.c @@ -340,7 +340,7 @@ static int emulate_stw(struct pt_regs *regs, int frreg, int flop) : "r" (val), "r" (regs->ior), "r" (regs->isr) : "r19", "r20", "r21", "r22", "r1", FIXUP_BRANCH_CLOBBER ); - return 0; + return ret; } static int emulate_std(struct pt_regs *regs, int frreg, int flop) { @@ -397,7 +397,7 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop) __asm__ __volatile__ ( " mtsp %4, %%sr1\n" " zdep %2, 29, 2, %%r19\n" -" dep %%r0, 31, 2, %2\n" +" dep %%r0, 31, 2, %3\n" " mtsar %%r19\n" " zvdepi -2, 32, %%r19\n" "1: ldw 0(%%sr1,%3),%%r20\n" @@ -409,7 +409,7 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop) " andcm %%r21, %%r19, %%r21\n" " or %1, %%r20, %1\n" " or %2, %%r21, %2\n" -"3: stw %1,0(%%sr1,%1)\n" +"3: stw %1,0(%%sr1,%3)\n" "4: stw %%r1,4(%%sr1,%3)\n" "5: stw %2,8(%%sr1,%3)\n" " copy %%r0, %0\n" @@ -596,7 +596,6 @@ void handle_unaligned(struct pt_regs *regs) ret = ERR_NOTHANDLED; /* "undefined", but lets kill them. */ break; } -#ifdef CONFIG_PA20 switch (regs->iir & OPCODE2_MASK) { case OPCODE_FLDD_L: @@ -607,22 +606,23 @@ void handle_unaligned(struct pt_regs *regs) flop=1; ret = emulate_std(regs, R2(regs->iir),1); break; +#ifdef CONFIG_PA20 case OPCODE_LDD_L: ret = emulate_ldd(regs, R2(regs->iir),0); break; case OPCODE_STD_L: ret = emulate_std(regs, R2(regs->iir),0); break; - } #endif + } switch (regs->iir & OPCODE3_MASK) { case OPCODE_FLDW_L: flop=1; - ret = emulate_ldw(regs, R2(regs->iir),0); + ret = emulate_ldw(regs, R2(regs->iir), 1); break; case OPCODE_LDW_M: - ret = emulate_ldw(regs, R2(regs->iir),1); + ret = emulate_ldw(regs, R2(regs->iir), 0); break; case OPCODE_FSTW_L: diff --git a/arch/parisc/lib/iomap.c b/arch/parisc/lib/iomap.c index f03adb1999e77ed05f1282be1b24aa5c601403ce..e362d6a147311264462d2c2789d26ea39822da3a 100644 --- a/arch/parisc/lib/iomap.c +++ b/arch/parisc/lib/iomap.c @@ -346,6 +346,16 @@ u64 ioread64be(const void __iomem *addr) return *((u64 *)addr); } +u64 ioread64_lo_hi(const void __iomem *addr) +{ + u32 low, high; + + low = ioread32(addr); + high = ioread32(addr + sizeof(u32)); + + return low + ((u64)high << 32); +} + u64 ioread64_hi_lo(const void __iomem *addr) { u32 low, high; @@ -419,6 +429,12 @@ void iowrite64be(u64 datum, void __iomem *addr) } } +void iowrite64_lo_hi(u64 val, void __iomem *addr) +{ + iowrite32(val, addr); + iowrite32(val >> 32, addr + sizeof(u32)); +} + void iowrite64_hi_lo(u64 val, void __iomem *addr) { iowrite32(val >> 32, addr + sizeof(u32)); @@ -527,6 +543,7 @@ EXPORT_SYMBOL(ioread32); EXPORT_SYMBOL(ioread32be); EXPORT_SYMBOL(ioread64); EXPORT_SYMBOL(ioread64be); +EXPORT_SYMBOL(ioread64_lo_hi); EXPORT_SYMBOL(ioread64_hi_lo); EXPORT_SYMBOL(iowrite8); EXPORT_SYMBOL(iowrite16); @@ -535,6 +552,7 @@ EXPORT_SYMBOL(iowrite32); EXPORT_SYMBOL(iowrite32be); EXPORT_SYMBOL(iowrite64); EXPORT_SYMBOL(iowrite64be); +EXPORT_SYMBOL(iowrite64_lo_hi); EXPORT_SYMBOL(iowrite64_hi_lo); EXPORT_SYMBOL(ioread8_rep); EXPORT_SYMBOL(ioread16_rep); diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 8f10cc6ee0fcea554aa47356047ef911ce0b4709..319afa00cdf7bfa4686c2bcb5f72750c746d5cf3 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -346,9 +346,9 @@ static void __init setup_bootmem(void) static bool kernel_set_to_readonly; -static void __init map_pages(unsigned long start_vaddr, - unsigned long start_paddr, unsigned long size, - pgprot_t pgprot, int force) +static void __ref map_pages(unsigned long start_vaddr, + unsigned long start_paddr, unsigned long size, + pgprot_t pgprot, int force) { pmd_t *pmd; pte_t *pg_table; @@ -458,7 +458,7 @@ void __init set_kernel_text_rw(int enable_read_write) flush_tlb_all(); } -void __ref free_initmem(void) +void free_initmem(void) { unsigned long init_begin = (unsigned long)__init_begin; unsigned long init_end = (unsigned long)__init_end; @@ -472,7 +472,6 @@ void __ref free_initmem(void) /* The init text pages are marked R-X. We have to * flush the icache and mark them RW- * - * This is tricky, because map_pages is in the init section. * Do a dummy remap of the data section first (the data * section is already PAGE_KERNEL) to pull in the TLB entries * for map_kernel */ diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi index c90702b04a530f798886e71be187cf36574d94b1..48e5cd61599c6ba0409f106fda093bf19f40b5a9 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi @@ -79,6 +79,7 @@ fman0: fman@400000 { #size-cells = <0>; compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; reg = <0xfc000 0x1000>; + fsl,erratum-a009885; }; xmdio0: mdio@fd000 { @@ -86,6 +87,7 @@ fman0: fman@400000 { #size-cells = <0>; compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; reg = <0xfd000 0x1000>; + fsl,erratum-a009885; }; }; diff --git a/arch/powerpc/include/asm/book3s/32/mmu-hash.h b/arch/powerpc/include/asm/book3s/32/mmu-hash.h index a8982d52f6b1d6c850600d343c632961e2a8502c..cbde06d0fb380c02ccd1a903c3213abd77efbfd9 100644 --- a/arch/powerpc/include/asm/book3s/32/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/32/mmu-hash.h @@ -102,6 +102,8 @@ extern s32 patch__hash_page_B, patch__hash_page_C; extern s32 patch__flush_hash_A0, patch__flush_hash_A1, patch__flush_hash_A2; extern s32 patch__flush_hash_B; +int __init find_free_bat(void); +unsigned int bat_block_size(unsigned long base, unsigned long top); #endif /* !__ASSEMBLY__ */ /* We happily ignore the smaller BATs on 601, we don't actually use diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 523d3e6e2400917f95bbcda21cec6b6b8b10d453..94c5c66231a8c394b00ab24127e9041ee95c9ef6 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -142,6 +142,7 @@ static inline bool pte_user(pte_t pte) #ifndef __ASSEMBLY__ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); +void unmap_kernel_page(unsigned long va); #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 4a3dca0271f1e71e388bd1e400a8fd35c354c497..71e2c524f1eead2d4c5e5c27a26023812e8b5869 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -1054,6 +1054,8 @@ static inline int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t p return hash__map_kernel_page(ea, pa, prot); } +void unmap_kernel_page(unsigned long va); + static inline int __meminit vmemmap_create_mapping(unsigned long start, unsigned long page_size, unsigned long phys) diff --git a/arch/powerpc/include/asm/cpu_setup_power.h b/arch/powerpc/include/asm/cpu_setup_power.h new file mode 100644 index 0000000000000000000000000000000000000000..24be9131f8032d80b002f2bf0b055e54981717bf --- /dev/null +++ b/arch/powerpc/include/asm/cpu_setup_power.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2020 IBM Corporation + */ +void __setup_cpu_power7(unsigned long offset, struct cpu_spec *spec); +void __restore_cpu_power7(void); +void __setup_cpu_power8(unsigned long offset, struct cpu_spec *spec); +void __restore_cpu_power8(void); +void __setup_cpu_power9(unsigned long offset, struct cpu_spec *spec); +void __restore_cpu_power9(void); +void __setup_cpu_power10(unsigned long offset, struct cpu_spec *spec); +void __restore_cpu_power10(void); diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h index 591b2f4deed533df0832db93bec2441fd738ec9b..897cc68758d44aca52afb362b0e018ceb7d1ea82 100644 --- a/arch/powerpc/include/asm/fixmap.h +++ b/arch/powerpc/include/asm/fixmap.h @@ -111,8 +111,10 @@ static inline void __set_fixmap(enum fixed_addresses idx, BUILD_BUG_ON(idx >= __end_of_fixed_addresses); else if (WARN_ON(idx >= __end_of_fixed_addresses)) return; - - map_kernel_page(__fix_to_virt(idx), phys, flags); + if (pgprot_val(flags)) + map_kernel_page(__fix_to_virt(idx), phys, flags); + else + unmap_kernel_page(__fix_to_virt(idx)); } #define __early_set_fixmap __set_fixmap diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index 3e8e19f5746c7bf18b515402801b90a13b958a39..00c8cda1c9c3179e41ae91691fced33f58b9ac70 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -382,6 +382,8 @@ #define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2 #define H_CPU_BEHAV_FLUSH_COUNT_CACHE (1ull << 58) // IBM bit 5 #define H_CPU_BEHAV_FLUSH_LINK_STACK (1ull << 57) // IBM bit 6 +#define H_CPU_BEHAV_NO_L1D_FLUSH_ENTRY (1ull << 56) // IBM bit 7 +#define H_CPU_BEHAV_NO_L1D_FLUSH_UACCESS (1ull << 55) // IBM bit 8 /* Flag values used in H_REGISTER_PROC_TBL hcall */ #define PROC_TABLE_OP_MASK 0x18 diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 0363734ff56e0f9fc0e2c6ddb013cea24d7cbbd6..0f2acbb9667400226d98d18509c36f7e57c37b69 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -38,6 +38,8 @@ #define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE) #endif +#endif /* CONFIG_PPC64 */ + /* * flags for paca->irq_soft_mask */ @@ -46,8 +48,6 @@ #define IRQS_PMI_DISABLED 2 #define IRQS_ALL_DISABLED (IRQS_DISABLED | IRQS_PMI_DISABLED) -#endif /* CONFIG_PPC64 */ - #ifndef __ASSEMBLY__ extern void replay_system_reset(void); @@ -175,6 +175,42 @@ static inline bool arch_irqs_disabled(void) return arch_irqs_disabled_flags(arch_local_save_flags()); } +static inline void set_pmi_irq_pending(void) +{ + /* + * Invoked from PMU callback functions to set PMI bit in the paca. + * This has to be called with irq's disabled (via hard_irq_disable()). + */ + if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) + WARN_ON_ONCE(mfmsr() & MSR_EE); + + get_paca()->irq_happened |= PACA_IRQ_PMI; +} + +static inline void clear_pmi_irq_pending(void) +{ + /* + * Invoked from PMU callback functions to clear the pending PMI bit + * in the paca. + */ + if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) + WARN_ON_ONCE(mfmsr() & MSR_EE); + + get_paca()->irq_happened &= ~PACA_IRQ_PMI; +} + +static inline bool pmi_irq_pending(void) +{ + /* + * Invoked from PMU callback functions to check if there is a pending + * PMI bit in the paca. + */ + if (get_paca()->irq_happened & PACA_IRQ_PMI) + return true; + + return false; +} + #ifdef CONFIG_PPC_BOOK3S /* * To support disabling and enabling of irq with PMI, set of @@ -296,6 +332,10 @@ extern void irq_set_pending_from_srr1(unsigned long srr1); extern void force_external_irq_replay(void); +static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val) +{ + regs->softe = val; +} #else /* CONFIG_PPC64 */ static inline unsigned long arch_local_save_flags(void) @@ -364,6 +404,13 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs) static inline void may_hard_irq_enable(void) { } +static inline void clear_pmi_irq_pending(void) { } +static inline void set_pmi_irq_pending(void) { } +static inline bool pmi_irq_pending(void) { return false; } + +static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val) +{ +} #endif /* CONFIG_PPC64 */ #define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 96522f7f0618a0eb9096858f2b63ff2b98bc3046..e53cc07e6b9ec9d433f4969d9100d277e7ac0c3a 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -65,6 +65,7 @@ extern int icache_44x_need_flush; #ifndef __ASSEMBLY__ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); +void unmap_kernel_page(unsigned long va); #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h index 57cd3892bfe05237231e929fb1284612d9d13cbe..1eacff0fff02930042793416483dc06da2c37b7c 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h @@ -311,6 +311,7 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma, #define __swp_entry_to_pte(x) __pte((x).val) int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot); +void unmap_kernel_page(unsigned long va); extern int __meminit vmemmap_create_mapping(unsigned long start, unsigned long page_size, unsigned long phys); diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index a6e3700c4566adbe194f76643f2464bfdc16d2b1..f0c0816f572707a2f0467c6b78a21b869c17e539 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -449,6 +449,7 @@ #define PPC_RAW_LDX(r, base, b) (0x7c00002a | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b)) #define PPC_RAW_LHZ(r, base, i) (0xa0000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i)) #define PPC_RAW_LHBRX(r, base, b) (0x7c00062c | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b)) +#define PPC_RAW_LWBRX(r, base, b) (0x7c00042c | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b)) #define PPC_RAW_LDBRX(r, base, b) (0x7c000428 | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b)) #define PPC_RAW_STWCX(s, a, b) (0x7c00012d | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_RAW_CMPWI(a, i) (0x2c000000 | ___PPC_RA(a) | IMM_L(i)) diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index f4b98903064f5ea9f22953d50e85f418ba6ea8d0..6afb14b6bbc26ca80646015a51f72095cfb73bb6 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -865,6 +865,7 @@ #define MMCR0_BHRBA 0x00200000UL /* BHRB Access allowed in userspace */ #define MMCR0_EBE 0x00100000UL /* Event based branch enable */ #define MMCR0_PMCC 0x000c0000UL /* PMC control */ +#define MMCR0_PMCCEXT ASM_CONST(0x00000200) /* PMCCEXT control */ #define MMCR0_PMCC_U6 0x00080000UL /* PMC1-6 are R/W by user (PR) */ #define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/ #define MMCR0_PMCjCE ASM_CONST(0x00004000) /* PMCj count enable*/ diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index fe2ef598e2ead0a5f429cd30a90e0edd48e22664..376104c166fcf337bd4aa19b50a1e4f588ae8daf 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -11,6 +11,7 @@ CFLAGS_prom_init.o += -fPIC CFLAGS_btext.o += -fPIC endif +CFLAGS_early_32.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) CFLAGS_cputable.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) CFLAGS_prom_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c index 803c2a45b22ac2092137e1b2a715d9f51a23b434..1cffb5e7c38d6c0f1315dcb9c729842b351a68d9 100644 --- a/arch/powerpc/kernel/btext.c +++ b/arch/powerpc/kernel/btext.c @@ -241,8 +241,10 @@ int __init btext_find_display(int allow_nonstdout) rc = btext_initialize(np); printk("result: %d\n", rc); } - if (rc == 0) + if (rc == 0) { + of_node_put(np); break; + } } return rc; } diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S deleted file mode 100644 index 704e8b9501eee84ade894358b57986268c2ac17f..0000000000000000000000000000000000000000 --- a/arch/powerpc/kernel/cpu_setup_power.S +++ /dev/null @@ -1,252 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * This file contains low level CPU setup functions. - * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org) - */ - -#include -#include -#include -#include -#include -#include -#include - -/* Entry: r3 = crap, r4 = ptr to cputable entry - * - * Note that we can be called twice for pseudo-PVRs - */ -_GLOBAL(__setup_cpu_power7) - mflr r11 - bl __init_hvmode_206 - mtlr r11 - beqlr - li r0,0 - mtspr SPRN_LPID,r0 - LOAD_REG_IMMEDIATE(r0, PCR_MASK) - mtspr SPRN_PCR,r0 - mfspr r3,SPRN_LPCR - li r4,(LPCR_LPES1 >> LPCR_LPES_SH) - bl __init_LPCR_ISA206 - mtlr r11 - blr - -_GLOBAL(__restore_cpu_power7) - mflr r11 - mfmsr r3 - rldicl. r0,r3,4,63 - beqlr - li r0,0 - mtspr SPRN_LPID,r0 - LOAD_REG_IMMEDIATE(r0, PCR_MASK) - mtspr SPRN_PCR,r0 - mfspr r3,SPRN_LPCR - li r4,(LPCR_LPES1 >> LPCR_LPES_SH) - bl __init_LPCR_ISA206 - mtlr r11 - blr - -_GLOBAL(__setup_cpu_power8) - mflr r11 - bl __init_FSCR - bl __init_PMU - bl __init_PMU_ISA207 - bl __init_hvmode_206 - mtlr r11 - beqlr - li r0,0 - mtspr SPRN_LPID,r0 - LOAD_REG_IMMEDIATE(r0, PCR_MASK) - mtspr SPRN_PCR,r0 - mfspr r3,SPRN_LPCR - ori r3, r3, LPCR_PECEDH - li r4,0 /* LPES = 0 */ - bl __init_LPCR_ISA206 - bl __init_HFSCR - bl __init_PMU_HV - bl __init_PMU_HV_ISA207 - mtlr r11 - blr - -_GLOBAL(__restore_cpu_power8) - mflr r11 - bl __init_FSCR - bl __init_PMU - bl __init_PMU_ISA207 - mfmsr r3 - rldicl. r0,r3,4,63 - mtlr r11 - beqlr - li r0,0 - mtspr SPRN_LPID,r0 - LOAD_REG_IMMEDIATE(r0, PCR_MASK) - mtspr SPRN_PCR,r0 - mfspr r3,SPRN_LPCR - ori r3, r3, LPCR_PECEDH - li r4,0 /* LPES = 0 */ - bl __init_LPCR_ISA206 - bl __init_HFSCR - bl __init_PMU_HV - bl __init_PMU_HV_ISA207 - mtlr r11 - blr - -_GLOBAL(__setup_cpu_power10) - mflr r11 - bl __init_FSCR_power10 - bl __init_PMU - bl __init_PMU_ISA31 - b 1f - -_GLOBAL(__setup_cpu_power9) - mflr r11 - bl __init_FSCR_power9 - bl __init_PMU -1: bl __init_hvmode_206 - mtlr r11 - beqlr - li r0,0 - mtspr SPRN_PSSCR,r0 - mtspr SPRN_LPID,r0 - mtspr SPRN_PID,r0 - LOAD_REG_IMMEDIATE(r0, PCR_MASK) - mtspr SPRN_PCR,r0 - mfspr r3,SPRN_LPCR - LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC) - or r3, r3, r4 - LOAD_REG_IMMEDIATE(r4, LPCR_UPRT | LPCR_HR) - andc r3, r3, r4 - li r4,0 /* LPES = 0 */ - bl __init_LPCR_ISA300 - bl __init_HFSCR - bl __init_PMU_HV - mtlr r11 - blr - -_GLOBAL(__restore_cpu_power10) - mflr r11 - bl __init_FSCR_power10 - bl __init_PMU - bl __init_PMU_ISA31 - b 1f - -_GLOBAL(__restore_cpu_power9) - mflr r11 - bl __init_FSCR_power9 - bl __init_PMU -1: mfmsr r3 - rldicl. r0,r3,4,63 - mtlr r11 - beqlr - li r0,0 - mtspr SPRN_PSSCR,r0 - mtspr SPRN_LPID,r0 - mtspr SPRN_PID,r0 - LOAD_REG_IMMEDIATE(r0, PCR_MASK) - mtspr SPRN_PCR,r0 - mfspr r3,SPRN_LPCR - LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC) - or r3, r3, r4 - LOAD_REG_IMMEDIATE(r4, LPCR_UPRT | LPCR_HR) - andc r3, r3, r4 - li r4,0 /* LPES = 0 */ - bl __init_LPCR_ISA300 - bl __init_HFSCR - bl __init_PMU_HV - mtlr r11 - blr - -__init_hvmode_206: - /* Disable CPU_FTR_HVMODE and exit if MSR:HV is not set */ - mfmsr r3 - rldicl. r0,r3,4,63 - bnelr - ld r5,CPU_SPEC_FEATURES(r4) - LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE | CPU_FTR_P9_TM_HV_ASSIST) - andc r5,r5,r6 - std r5,CPU_SPEC_FEATURES(r4) - blr - -__init_LPCR_ISA206: - /* Setup a sane LPCR: - * Called with initial LPCR in R3 and desired LPES 2-bit value in R4 - * - * LPES = 0b01 (HSRR0/1 used for 0x500) - * PECE = 0b111 - * DPFD = 4 - * HDICE = 0 - * VC = 0b100 (VPM0=1, VPM1=0, ISL=0) - * VRMASD = 0b10000 (L=1, LP=00) - * - * Other bits untouched for now - */ - li r5,0x10 - rldimi r3,r5, LPCR_VRMASD_SH, 64-LPCR_VRMASD_SH-5 - - /* POWER9 has no VRMASD */ -__init_LPCR_ISA300: - rldimi r3,r4, LPCR_LPES_SH, 64-LPCR_LPES_SH-2 - ori r3,r3,(LPCR_PECE0|LPCR_PECE1|LPCR_PECE2) - li r5,4 - rldimi r3,r5, LPCR_DPFD_SH, 64-LPCR_DPFD_SH-3 - clrrdi r3,r3,1 /* clear HDICE */ - li r5,4 - rldimi r3,r5, LPCR_VC_SH, 0 - mtspr SPRN_LPCR,r3 - isync - blr - -__init_FSCR_power10: - mfspr r3, SPRN_FSCR - ori r3, r3, FSCR_PREFIX - mtspr SPRN_FSCR, r3 - // fall through - -__init_FSCR_power9: - mfspr r3, SPRN_FSCR - ori r3, r3, FSCR_SCV - mtspr SPRN_FSCR, r3 - // fall through - -__init_FSCR: - mfspr r3,SPRN_FSCR - ori r3,r3,FSCR_TAR|FSCR_EBB - mtspr SPRN_FSCR,r3 - blr - -__init_HFSCR: - mfspr r3,SPRN_HFSCR - ori r3,r3,HFSCR_TAR|HFSCR_TM|HFSCR_BHRB|HFSCR_PM|\ - HFSCR_DSCR|HFSCR_VECVSX|HFSCR_FP|HFSCR_EBB|HFSCR_MSGP - mtspr SPRN_HFSCR,r3 - blr - -__init_PMU_HV: - li r5,0 - mtspr SPRN_MMCRC,r5 - blr - -__init_PMU_HV_ISA207: - li r5,0 - mtspr SPRN_MMCRH,r5 - blr - -__init_PMU: - li r5,0 - mtspr SPRN_MMCRA,r5 - mtspr SPRN_MMCR0,r5 - mtspr SPRN_MMCR1,r5 - mtspr SPRN_MMCR2,r5 - blr - -__init_PMU_ISA207: - li r5,0 - mtspr SPRN_MMCRS,r5 - blr - -__init_PMU_ISA31: - li r5,0 - mtspr SPRN_MMCR3,r5 - LOAD_REG_IMMEDIATE(r5, MMCRA_BHRB_DISABLE) - mtspr SPRN_MMCRA,r5 - blr diff --git a/arch/powerpc/kernel/cpu_setup_power.c b/arch/powerpc/kernel/cpu_setup_power.c new file mode 100644 index 0000000000000000000000000000000000000000..3cca88ee96d711813cb64430b838d95ec63bb98b --- /dev/null +++ b/arch/powerpc/kernel/cpu_setup_power.c @@ -0,0 +1,272 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2020, Jordan Niethe, IBM Corporation. + * + * This file contains low level CPU setup functions. + * Originally written in assembly by Benjamin Herrenschmidt & various other + * authors. + */ + +#include +#include +#include +#include +#include + +/* Disable CPU_FTR_HVMODE and return false if MSR:HV is not set */ +static bool init_hvmode_206(struct cpu_spec *t) +{ + u64 msr; + + msr = mfmsr(); + if (msr & MSR_HV) + return true; + + t->cpu_features &= ~(CPU_FTR_HVMODE | CPU_FTR_P9_TM_HV_ASSIST); + return false; +} + +static void init_LPCR_ISA300(u64 lpcr, u64 lpes) +{ + /* POWER9 has no VRMASD */ + lpcr |= (lpes << LPCR_LPES_SH) & LPCR_LPES; + lpcr |= LPCR_PECE0|LPCR_PECE1|LPCR_PECE2; + lpcr |= (4ull << LPCR_DPFD_SH) & LPCR_DPFD; + lpcr &= ~LPCR_HDICE; /* clear HDICE */ + lpcr |= (4ull << LPCR_VC_SH); + mtspr(SPRN_LPCR, lpcr); + isync(); +} + +/* + * Setup a sane LPCR: + * Called with initial LPCR and desired LPES 2-bit value + * + * LPES = 0b01 (HSRR0/1 used for 0x500) + * PECE = 0b111 + * DPFD = 4 + * HDICE = 0 + * VC = 0b100 (VPM0=1, VPM1=0, ISL=0) + * VRMASD = 0b10000 (L=1, LP=00) + * + * Other bits untouched for now + */ +static void init_LPCR_ISA206(u64 lpcr, u64 lpes) +{ + lpcr |= (0x10ull << LPCR_VRMASD_SH) & LPCR_VRMASD; + init_LPCR_ISA300(lpcr, lpes); +} + +static void init_FSCR(void) +{ + u64 fscr; + + fscr = mfspr(SPRN_FSCR); + fscr |= FSCR_TAR|FSCR_EBB; + mtspr(SPRN_FSCR, fscr); +} + +static void init_FSCR_power9(void) +{ + u64 fscr; + + fscr = mfspr(SPRN_FSCR); + fscr |= FSCR_SCV; + mtspr(SPRN_FSCR, fscr); + init_FSCR(); +} + +static void init_FSCR_power10(void) +{ + u64 fscr; + + fscr = mfspr(SPRN_FSCR); + fscr |= FSCR_PREFIX; + mtspr(SPRN_FSCR, fscr); + init_FSCR_power9(); +} + +static void init_HFSCR(void) +{ + u64 hfscr; + + hfscr = mfspr(SPRN_HFSCR); + hfscr |= HFSCR_TAR|HFSCR_TM|HFSCR_BHRB|HFSCR_PM|HFSCR_DSCR|\ + HFSCR_VECVSX|HFSCR_FP|HFSCR_EBB|HFSCR_MSGP; + mtspr(SPRN_HFSCR, hfscr); +} + +static void init_PMU_HV(void) +{ + mtspr(SPRN_MMCRC, 0); +} + +static void init_PMU_HV_ISA207(void) +{ + mtspr(SPRN_MMCRH, 0); +} + +static void init_PMU(void) +{ + mtspr(SPRN_MMCRA, 0); + mtspr(SPRN_MMCR0, 0); + mtspr(SPRN_MMCR1, 0); + mtspr(SPRN_MMCR2, 0); +} + +static void init_PMU_ISA207(void) +{ + mtspr(SPRN_MMCRS, 0); +} + +static void init_PMU_ISA31(void) +{ + mtspr(SPRN_MMCR3, 0); + mtspr(SPRN_MMCRA, MMCRA_BHRB_DISABLE); + mtspr(SPRN_MMCR0, MMCR0_PMCCEXT); +} + +/* + * Note that we can be called twice of pseudo-PVRs. + * The parameter offset is not used. + */ + +void __setup_cpu_power7(unsigned long offset, struct cpu_spec *t) +{ + if (!init_hvmode_206(t)) + return; + + mtspr(SPRN_LPID, 0); + mtspr(SPRN_PCR, PCR_MASK); + init_LPCR_ISA206(mfspr(SPRN_LPCR), LPCR_LPES1 >> LPCR_LPES_SH); +} + +void __restore_cpu_power7(void) +{ + u64 msr; + + msr = mfmsr(); + if (!(msr & MSR_HV)) + return; + + mtspr(SPRN_LPID, 0); + mtspr(SPRN_PCR, PCR_MASK); + init_LPCR_ISA206(mfspr(SPRN_LPCR), LPCR_LPES1 >> LPCR_LPES_SH); +} + +void __setup_cpu_power8(unsigned long offset, struct cpu_spec *t) +{ + init_FSCR(); + init_PMU(); + init_PMU_ISA207(); + + if (!init_hvmode_206(t)) + return; + + mtspr(SPRN_LPID, 0); + mtspr(SPRN_PCR, PCR_MASK); + init_LPCR_ISA206(mfspr(SPRN_LPCR) | LPCR_PECEDH, 0); /* LPES = 0 */ + init_HFSCR(); + init_PMU_HV(); + init_PMU_HV_ISA207(); +} + +void __restore_cpu_power8(void) +{ + u64 msr; + + init_FSCR(); + init_PMU(); + init_PMU_ISA207(); + + msr = mfmsr(); + if (!(msr & MSR_HV)) + return; + + mtspr(SPRN_LPID, 0); + mtspr(SPRN_PCR, PCR_MASK); + init_LPCR_ISA206(mfspr(SPRN_LPCR) | LPCR_PECEDH, 0); /* LPES = 0 */ + init_HFSCR(); + init_PMU_HV(); + init_PMU_HV_ISA207(); +} + +void __setup_cpu_power9(unsigned long offset, struct cpu_spec *t) +{ + init_FSCR_power9(); + init_PMU(); + + if (!init_hvmode_206(t)) + return; + + mtspr(SPRN_PSSCR, 0); + mtspr(SPRN_LPID, 0); + mtspr(SPRN_PID, 0); + mtspr(SPRN_PCR, PCR_MASK); + init_LPCR_ISA300((mfspr(SPRN_LPCR) | LPCR_PECEDH | LPCR_PECE_HVEE |\ + LPCR_HVICE | LPCR_HEIC) & ~(LPCR_UPRT | LPCR_HR), 0); + init_HFSCR(); + init_PMU_HV(); +} + +void __restore_cpu_power9(void) +{ + u64 msr; + + init_FSCR_power9(); + init_PMU(); + + msr = mfmsr(); + if (!(msr & MSR_HV)) + return; + + mtspr(SPRN_PSSCR, 0); + mtspr(SPRN_LPID, 0); + mtspr(SPRN_PID, 0); + mtspr(SPRN_PCR, PCR_MASK); + init_LPCR_ISA300((mfspr(SPRN_LPCR) | LPCR_PECEDH | LPCR_PECE_HVEE |\ + LPCR_HVICE | LPCR_HEIC) & ~(LPCR_UPRT | LPCR_HR), 0); + init_HFSCR(); + init_PMU_HV(); +} + +void __setup_cpu_power10(unsigned long offset, struct cpu_spec *t) +{ + init_FSCR_power10(); + init_PMU(); + init_PMU_ISA31(); + + if (!init_hvmode_206(t)) + return; + + mtspr(SPRN_PSSCR, 0); + mtspr(SPRN_LPID, 0); + mtspr(SPRN_PID, 0); + mtspr(SPRN_PCR, PCR_MASK); + init_LPCR_ISA300((mfspr(SPRN_LPCR) | LPCR_PECEDH | LPCR_PECE_HVEE |\ + LPCR_HVICE | LPCR_HEIC) & ~(LPCR_UPRT | LPCR_HR), 0); + init_HFSCR(); + init_PMU_HV(); +} + +void __restore_cpu_power10(void) +{ + u64 msr; + + init_FSCR_power10(); + init_PMU(); + init_PMU_ISA31(); + + msr = mfmsr(); + if (!(msr & MSR_HV)) + return; + + mtspr(SPRN_PSSCR, 0); + mtspr(SPRN_LPID, 0); + mtspr(SPRN_PID, 0); + mtspr(SPRN_PCR, PCR_MASK); + init_LPCR_ISA300((mfspr(SPRN_LPCR) | LPCR_PECEDH | LPCR_PECE_HVEE |\ + LPCR_HVICE | LPCR_HEIC) & ~(LPCR_UPRT | LPCR_HR), 0); + init_HFSCR(); + init_PMU_HV(); +} diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 29de58d4dfb767e01409629796e955b7c7a80841..8fdb40ee86d11930e4bd25220d16672ffa1f21ce 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -60,19 +60,15 @@ extern void __setup_cpu_7410(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_745x(unsigned long offset, struct cpu_spec* spec); #endif /* CONFIG_PPC32 */ #ifdef CONFIG_PPC64 +#include extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_ppc970MP(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_pa6t(unsigned long offset, struct cpu_spec* spec); extern void __restore_cpu_pa6t(void); extern void __restore_cpu_ppc970(void); -extern void __setup_cpu_power7(unsigned long offset, struct cpu_spec* spec); -extern void __restore_cpu_power7(void); -extern void __setup_cpu_power8(unsigned long offset, struct cpu_spec* spec); -extern void __restore_cpu_power8(void); -extern void __setup_cpu_power9(unsigned long offset, struct cpu_spec* spec); -extern void __restore_cpu_power9(void); -extern void __setup_cpu_power10(unsigned long offset, struct cpu_spec* spec); -extern void __restore_cpu_power10(void); +extern long __machine_check_early_realmode_p7(struct pt_regs *regs); +extern long __machine_check_early_realmode_p8(struct pt_regs *regs); +extern long __machine_check_early_realmode_p9(struct pt_regs *regs); #endif /* CONFIG_PPC64 */ #if defined(CONFIG_E500) extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec); diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index 1098863e17ee8d90cdd9b1363ed16b40a2748e95..9d079659b24d344698f33451935ad68b2cef75be 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -454,6 +454,7 @@ static void init_pmu_power10(void) mtspr(SPRN_MMCR3, 0); mtspr(SPRN_MMCRA, MMCRA_BHRB_DISABLE); + mtspr(SPRN_MMCR0, MMCR0_PMCCEXT); } static int __init feat_enable_pmu_power10(struct dt_cpu_feature *f) diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index eddf362caedce8091bcc417bde5a99547d836019..c3bb800dc4352398dcad2186fb6c6fdb624fd1e3 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -1641,6 +1641,14 @@ int __init setup_fadump(void) else if (fw_dump.reserve_dump_area_size) fw_dump.ops->fadump_init_mem_struct(&fw_dump); + /* + * In case of panic, fadump is triggered via ppc_panic_event() + * panic notifier. Setting crash_kexec_post_notifiers to 'true' + * lets panic() function take crash friendly path before panic + * notifiers are invoked. + */ + crash_kexec_post_notifiers = true; + return 1; } subsys_initcall(setup_fadump); diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index a1ae00689e0f4898d17a6782a9e82afc7756edd7..aeb9bc9958749e8a67a4887bf09b3f6fae51172d 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -27,6 +27,7 @@ #include #include +#include #include #include #include @@ -626,7 +627,7 @@ start_here: b . /* prevent prefetch past rfi */ /* Set up the initial MMU state so we can do the first level of - * kernel initialization. This maps the first 16 MBytes of memory 1:1 + * kernel initialization. This maps the first 32 MBytes of memory 1:1 * virtual to physical and more importantly sets the cache mode. */ initial_mmu: @@ -663,6 +664,12 @@ initial_mmu: tlbwe r4,r0,TLB_DATA /* Load the data portion of the entry */ tlbwe r3,r0,TLB_TAG /* Load the tag portion of the entry */ + li r0,62 /* TLB slot 62 */ + addis r4,r4,SZ_16M@h + addis r3,r3,SZ_16M@h + tlbwe r4,r0,TLB_DATA /* Load the data portion of the entry */ + tlbwe r3,r0,TLB_TAG /* Load the tag portion of the entry */ + isync /* Establish the exception vector base diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 7e337c570ea6b3ea4b0f7a1b4f372835cb5e8ae5..9e71c0739f08dd32a70a56a5a7fb76a09364f665 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -2956,7 +2956,7 @@ static void __init fixup_device_tree_efika_add_phy(void) /* Check if the phy-handle property exists - bail if it does */ rv = prom_getprop(node, "phy-handle", prop, sizeof(prop)); - if (!rv) + if (rv <= 0) return; /* diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 452cbf98bfd714aefddc1b9373663add4e7ee948..cf99f57aed8220b15544a08cb534e3a11880bc2f 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -60,6 +60,7 @@ #include #include #include +#include #ifdef DEBUG #include @@ -594,6 +595,45 @@ void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) } #endif +#ifdef CONFIG_NMI_IPI +static void crash_stop_this_cpu(struct pt_regs *regs) +#else +static void crash_stop_this_cpu(void *dummy) +#endif +{ + /* + * Just busy wait here and avoid marking CPU as offline to ensure + * register data is captured appropriately. + */ + while (1) + cpu_relax(); +} + +void crash_smp_send_stop(void) +{ + static bool stopped = false; + + /* + * In case of fadump, register data for all CPUs is captured by f/w + * on ibm,os-term rtas call. Skip IPI callbacks to other CPUs before + * this rtas call to avoid tricky post processing of those CPUs' + * backtraces. + */ + if (should_fadump_crash()) + return; + + if (stopped) + return; + + stopped = true; + +#ifdef CONFIG_NMI_IPI + smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_stop_this_cpu, 1000000); +#else + smp_call_function(crash_stop_this_cpu, NULL, 0); +#endif /* CONFIG_NMI_IPI */ +} + #ifdef CONFIG_NMI_IPI static void nmi_stop_this_cpu(struct pt_regs *regs) { @@ -1488,10 +1528,12 @@ void start_secondary(void *unused) BUG(); } +#ifdef CONFIG_PROFILING int setup_profiling_timer(unsigned int multiplier) { return 0; } +#endif static void fixup_topology(void) { diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 77dffea3d5373d69b886577edd9f9d88b978ab2c..069d451240fa4c961fc85fed91a3eb5777d57676 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -1922,11 +1922,40 @@ void vsx_unavailable_tm(struct pt_regs *regs) } #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ -void performance_monitor_exception(struct pt_regs *regs) +static void performance_monitor_exception_nmi(struct pt_regs *regs) +{ + nmi_enter(); + + __this_cpu_inc(irq_stat.pmu_irqs); + + perf_irq(regs); + + nmi_exit(); +} + +static void performance_monitor_exception_async(struct pt_regs *regs) { + irq_enter(); + __this_cpu_inc(irq_stat.pmu_irqs); perf_irq(regs); + + irq_exit(); +} + +void performance_monitor_exception(struct pt_regs *regs) +{ + /* + * On 64-bit, if perf interrupts hit in a local_irq_disable + * (soft-masked) region, we consider them as NMIs. This is required to + * prevent hash faults on user addresses when reading callchains (and + * looks better from an irq tracing perspective). + */ + if (IS_ENABLED(CONFIG_PPC64) && unlikely(arch_irq_disabled_regs(regs))) + performance_monitor_exception_nmi(regs); + else + performance_monitor_exception_async(regs); } #ifdef CONFIG_PPC_ADV_DEBUG_REGS diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c index af3c15a1d41eb159670be90218c12b9966afa7f1..75b2a6c4db5a5001313ddb2e0764a42b7656ee9c 100644 --- a/arch/powerpc/kernel/watchdog.c +++ b/arch/powerpc/kernel/watchdog.c @@ -132,6 +132,10 @@ static void set_cpumask_stuck(const struct cpumask *cpumask, u64 tb) { cpumask_or(&wd_smp_cpus_stuck, &wd_smp_cpus_stuck, cpumask); cpumask_andnot(&wd_smp_cpus_pending, &wd_smp_cpus_pending, cpumask); + /* + * See wd_smp_clear_cpu_pending() + */ + smp_mb(); if (cpumask_empty(&wd_smp_cpus_pending)) { wd_smp_last_reset_tb = tb; cpumask_andnot(&wd_smp_cpus_pending, @@ -217,13 +221,44 @@ static void wd_smp_clear_cpu_pending(int cpu, u64 tb) cpumask_clear_cpu(cpu, &wd_smp_cpus_stuck); wd_smp_unlock(&flags); + } else { + /* + * The last CPU to clear pending should have reset the + * watchdog so we generally should not find it empty + * here if our CPU was clear. However it could happen + * due to a rare race with another CPU taking the + * last CPU out of the mask concurrently. + * + * We can't add a warning for it. But just in case + * there is a problem with the watchdog that is causing + * the mask to not be reset, try to kick it along here. + */ + if (unlikely(cpumask_empty(&wd_smp_cpus_pending))) + goto none_pending; } return; } + cpumask_clear_cpu(cpu, &wd_smp_cpus_pending); + + /* + * Order the store to clear pending with the load(s) to check all + * words in the pending mask to check they are all empty. This orders + * with the same barrier on another CPU. This prevents two CPUs + * clearing the last 2 pending bits, but neither seeing the other's + * store when checking if the mask is empty, and missing an empty + * mask, which ends with a false positive. + */ + smp_mb(); if (cpumask_empty(&wd_smp_cpus_pending)) { unsigned long flags; +none_pending: + /* + * Double check under lock because more than one CPU could see + * a clear mask with the lockless check after clearing their + * pending bits. + */ wd_smp_lock(&flags); if (cpumask_empty(&wd_smp_cpus_pending)) { wd_smp_last_reset_tb = tb; @@ -314,8 +349,12 @@ void arch_touch_nmi_watchdog(void) { unsigned long ticks = tb_ticks_per_usec * wd_timer_period_ms * 1000; int cpu = smp_processor_id(); - u64 tb = get_tb(); + u64 tb; + if (!cpumask_test_cpu(cpu, &watchdog_cpumask)) + return; + + tb = get_tb(); if (tb - per_cpu(wd_timer_tb, cpu) >= ticks) { per_cpu(wd_timer_tb, cpu) = tb; wd_smp_clear_cpu_pending(cpu, tb); diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 175967a195c4459075cdb7cc3c70841a64a7a4cc..527c205d5a5f5f4f05d7416ef83ede53d5479d23 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -4557,8 +4557,12 @@ static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm, unsigned long npages = mem->memory_size >> PAGE_SHIFT; if (change == KVM_MR_CREATE) { - slot->arch.rmap = vzalloc(array_size(npages, - sizeof(*slot->arch.rmap))); + unsigned long size = array_size(npages, sizeof(*slot->arch.rmap)); + + if ((size >> PAGE_SHIFT) > totalram_pages()) + return -ENOMEM; + + slot->arch.rmap = vzalloc(size); if (!slot->arch.rmap) return -ENOMEM; } diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index a5f1ae892ba68ddd1d6dca6728d10339fe3c950c..d0b6c8c16c48a5ed7e475ce11af48af2acf124b4 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -510,7 +510,7 @@ long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu) if (eaddr & (0xFFFUL << 52)) return H_PARAMETER; - buf = kzalloc(n, GFP_KERNEL); + buf = kzalloc(n, GFP_KERNEL | __GFP_NOWARN); if (!buf) return H_NO_MEM; diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 58991233381ed6d0d1f730b3c6836e53e006d9a7..0697a0e014ae86651bf9090c96c87d91be660e52 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -19,6 +19,9 @@ CFLAGS_code-patching.o += -DDISABLE_BRANCH_PROFILING CFLAGS_feature-fixups.o += -DDISABLE_BRANCH_PROFILING endif +CFLAGS_code-patching.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) +CFLAGS_feature-fixups.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) + obj-y += alloc.o code-patching.o feature-fixups.o pmem.o inst.o test_code-patching.o ifndef CONFIG_KASAN diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index a2e067f68dee867762fb4657d4e8aaf5ec68e62c..0edebbbffcdcaa00f41f86f4e1d8e56f8e9feae9 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -3062,12 +3062,14 @@ void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op) case BARRIER_EIEIO: eieio(); break; +#ifdef CONFIG_PPC64 case BARRIER_LWSYNC: asm volatile("lwsync" : : : "memory"); break; case BARRIER_PTESYNC: asm volatile("ptesync" : : : "memory"); break; +#endif } break; diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c index a59e7ec9818039de71e921ea9dcee6063415ad1a..602ab13127b4077a6fc81dafa002019432f67436 100644 --- a/arch/powerpc/mm/book3s32/mmu.c +++ b/arch/powerpc/mm/book3s32/mmu.c @@ -72,7 +72,7 @@ unsigned long p_block_mapped(phys_addr_t pa) return 0; } -static int find_free_bat(void) +int __init find_free_bat(void) { int b; int n = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4; @@ -96,7 +96,7 @@ static int find_free_bat(void) * - block size has to be a power of two. This is calculated by finding the * highest bit set to 1. */ -static unsigned int block_size(unsigned long base, unsigned long top) +unsigned int bat_block_size(unsigned long base, unsigned long top) { unsigned int max_size = SZ_256M; unsigned int base_shift = (ffs(base) - 1) & 31; @@ -141,7 +141,7 @@ static unsigned long __init __mmu_mapin_ram(unsigned long base, unsigned long to int idx; while ((idx = find_free_bat()) != -1 && base != top) { - unsigned int size = block_size(base, top); + unsigned int size = bat_block_size(base, top); if (size < 128 << 10) break; @@ -201,18 +201,17 @@ void mmu_mark_initmem_nx(void) int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4; int i; unsigned long base = (unsigned long)_stext - PAGE_OFFSET; - unsigned long top = (unsigned long)_etext - PAGE_OFFSET; + unsigned long top = ALIGN((unsigned long)_etext - PAGE_OFFSET, SZ_128K); unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET; unsigned long size; - for (i = 0; i < nb - 1 && base < top && top - base > (128 << 10);) { - size = block_size(base, top); + for (i = 0; i < nb - 1 && base < top;) { + size = bat_block_size(base, top); setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT); base += size; } if (base < top) { - size = block_size(base, top); - size = max(size, 128UL << 10); + size = bat_block_size(base, top); if ((top - base) > size) { size <<= 1; if (strict_kernel_rwx_enabled() && base + size > border) diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 1d5eec847b883e8eda9e2eb7ba09a39193219c4f..295959487b76d606766962bd29a5184ffec2d5ab 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -1152,7 +1152,7 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) int pud_clear_huge(pud_t *pud) { - if (pud_huge(*pud)) { + if (pud_is_leaf(*pud)) { pud_clear(pud); return 1; } @@ -1199,7 +1199,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) int pmd_clear_huge(pmd_t *pmd) { - if (pmd_huge(*pmd)) { + if (pmd_is_leaf(*pmd)) { pmd_clear(pmd); return 1; } diff --git a/arch/powerpc/mm/kasan/book3s_32.c b/arch/powerpc/mm/kasan/book3s_32.c index 202bd260a009567b242b7b0177d4f47bfc53f364..450a67ef0bbe1eb4fc4f897e897f1ff24727f44c 100644 --- a/arch/powerpc/mm/kasan/book3s_32.c +++ b/arch/powerpc/mm/kasan/book3s_32.c @@ -10,47 +10,51 @@ int __init kasan_init_region(void *start, size_t size) { unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start); unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size); - unsigned long k_cur = k_start; - int k_size = k_end - k_start; - int k_size_base = 1 << (ffs(k_size) - 1); + unsigned long k_nobat = k_start; + unsigned long k_cur; + phys_addr_t phys; int ret; - void *block; - block = memblock_alloc(k_size, k_size_base); - - if (block && k_size_base >= SZ_128K && k_start == ALIGN(k_start, k_size_base)) { - int k_size_more = 1 << (ffs(k_size - k_size_base) - 1); - - setbat(-1, k_start, __pa(block), k_size_base, PAGE_KERNEL); - if (k_size_more >= SZ_128K) - setbat(-1, k_start + k_size_base, __pa(block) + k_size_base, - k_size_more, PAGE_KERNEL); - if (v_block_mapped(k_start)) - k_cur = k_start + k_size_base; - if (v_block_mapped(k_start + k_size_base)) - k_cur = k_start + k_size_base + k_size_more; - - update_bats(); + while (k_nobat < k_end) { + unsigned int k_size = bat_block_size(k_nobat, k_end); + int idx = find_free_bat(); + + if (idx == -1) + break; + if (k_size < SZ_128K) + break; + phys = memblock_phys_alloc_range(k_size, k_size, 0, + MEMBLOCK_ALLOC_ANYWHERE); + if (!phys) + break; + + setbat(idx, k_nobat, phys, k_size, PAGE_KERNEL); + k_nobat += k_size; } + if (k_nobat != k_start) + update_bats(); - if (!block) - block = memblock_alloc(k_size, PAGE_SIZE); - if (!block) - return -ENOMEM; + if (k_nobat < k_end) { + phys = memblock_phys_alloc_range(k_end - k_nobat, PAGE_SIZE, 0, + MEMBLOCK_ALLOC_ANYWHERE); + if (!phys) + return -ENOMEM; + } ret = kasan_init_shadow_page_tables(k_start, k_end); if (ret) return ret; - kasan_update_early_region(k_start, k_cur, __pte(0)); + kasan_update_early_region(k_start, k_nobat, __pte(0)); - for (; k_cur < k_end; k_cur += PAGE_SIZE) { + for (k_cur = k_nobat; k_cur < k_end; k_cur += PAGE_SIZE) { pmd_t *pmd = pmd_off_k(k_cur); - void *va = block + k_cur - k_start; - pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL); + pte_t pte = pfn_pte(PHYS_PFN(phys + k_cur - k_nobat), PAGE_KERNEL); __set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0); } flush_tlb_kernel_range(k_start, k_end); + memset(kasan_mem_to_shadow(start), 0, k_end - k_start); + return 0; } diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index 15555c95cebc70ea708e41753148f73142af0367..faaf33e204de1064af5843c03058b05428e40271 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -194,6 +194,15 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, __set_pte_at(mm, addr, ptep, pte, 0); } +void unmap_kernel_page(unsigned long va) +{ + pmd_t *pmdp = pmd_off_k(va); + pte_t *ptep = pte_offset_kernel(pmdp, va); + + pte_clear(&init_mm, va, ptep); + flush_tlb_kernel_range(va, va + PAGE_SIZE); +} + /* * This is called when relaxing access to a PTE. It's also called in the page * fault path when we don't hit any of the major fault cases, ie, a minor diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index cc6e2f94517fcbae716c9b1c6119002603497327..aefc2bfdf10494b0358ef531c899f51be083132f 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -102,7 +102,8 @@ EXPORT_SYMBOL(__pte_frag_size_shift); struct page *p4d_page(p4d_t p4d) { if (p4d_is_leaf(p4d)) { - VM_WARN_ON(!p4d_huge(p4d)); + if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) + VM_WARN_ON(!p4d_huge(p4d)); return pte_page(p4d_pte(p4d)); } return virt_to_page(p4d_page_vaddr(p4d)); @@ -112,7 +113,8 @@ struct page *p4d_page(p4d_t p4d) struct page *pud_page(pud_t pud) { if (pud_is_leaf(pud)) { - VM_WARN_ON(!pud_huge(pud)); + if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) + VM_WARN_ON(!pud_huge(pud)); return pte_page(pud_pte(pud)); } return virt_to_page(pud_page_vaddr(pud)); @@ -125,7 +127,13 @@ struct page *pud_page(pud_t pud) struct page *pmd_page(pmd_t pmd) { if (pmd_is_leaf(pmd)) { - VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd))); + /* + * vmalloc_to_page may be called on any vmap address (not only + * vmalloc), and it uses pmd_page() etc., when huge vmap is + * enabled so these checks can't be used. + */ + if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) + VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd))); return pte_page(pmd_pte(pmd)); } return virt_to_page(pmd_page_vaddr(pmd)); diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 8936090acb57960a13e19c3048b59aae5e32092d..0d47514e8870db6919a615a72a3b15c82ef52c75 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -651,17 +651,21 @@ bpf_alu32_trunc: EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1])); break; case 64: - /* - * Way easier and faster(?) to store the value - * into stack and then use ldbrx - * - * ctx->seen will be reliable in pass2, but - * the instructions generated will remain the - * same across all passes - */ + /* Store the value to stack and then use byte-reverse loads */ PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx)); EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx))); - EMIT(PPC_RAW_LDBRX(dst_reg, 0, b2p[TMP_REG_1])); + if (cpu_has_feature(CPU_FTR_ARCH_206)) { + EMIT(PPC_RAW_LDBRX(dst_reg, 0, b2p[TMP_REG_1])); + } else { + EMIT(PPC_RAW_LWBRX(dst_reg, 0, b2p[TMP_REG_1])); + if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN)) + EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32)); + EMIT(PPC_RAW_LI(b2p[TMP_REG_2], 4)); + EMIT(PPC_RAW_LWBRX(b2p[TMP_REG_2], b2p[TMP_REG_2], b2p[TMP_REG_1])); + if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) + EMIT(PPC_RAW_SLDI(b2p[TMP_REG_2], b2p[TMP_REG_2], 32)); + EMIT(PPC_RAW_OR(dst_reg, dst_reg, b2p[TMP_REG_2])); + } break; } break; diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 91452313489f1ae7f5e990aedae09a5b24788f43..e49aa8fc6a491a972829788d91583e4fbbbc0eb5 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -95,6 +95,7 @@ static unsigned int freeze_events_kernel = MMCR0_FCS; #define SPRN_SIER3 0 #define MMCRA_SAMPLE_ENABLE 0 #define MMCRA_BHRB_DISABLE 0 +#define MMCR0_PMCCEXT 0 static inline unsigned long perf_ip_adjust(struct pt_regs *regs) { @@ -109,10 +110,6 @@ static inline void perf_read_regs(struct pt_regs *regs) { regs->result = 0; } -static inline int perf_intr_is_nmi(struct pt_regs *regs) -{ - return 0; -} static inline int siar_valid(struct pt_regs *regs) { @@ -331,15 +328,6 @@ static inline void perf_read_regs(struct pt_regs *regs) regs->result = use_siar; } -/* - * If interrupts were soft-disabled when a PMU interrupt occurs, treat - * it as an NMI. - */ -static inline int perf_intr_is_nmi(struct pt_regs *regs) -{ - return (regs->softe & IRQS_DISABLED); -} - /* * On processors like P7+ that have the SIAR-Valid bit, marked instructions * must be sampled only if the SIAR-valid bit is set. @@ -817,6 +805,19 @@ static void write_pmc(int idx, unsigned long val) } } +static int any_pmc_overflown(struct cpu_hw_events *cpuhw) +{ + int i, idx; + + for (i = 0; i < cpuhw->n_events; i++) { + idx = cpuhw->event[i]->hw.idx; + if ((idx) && ((int)read_pmc(idx) < 0)) + return idx; + } + + return 0; +} + /* Called from sysrq_handle_showregs() */ void perf_event_print_debug(void) { @@ -1240,11 +1241,16 @@ static void power_pmu_disable(struct pmu *pmu) /* * Set the 'freeze counters' bit, clear EBE/BHRBA/PMCC/PMAO/FC56 + * Also clear PMXE to disable PMI's getting triggered in some + * corner cases during PMU disable. */ val = mmcr0 = mfspr(SPRN_MMCR0); val |= MMCR0_FC; val &= ~(MMCR0_EBE | MMCR0_BHRBA | MMCR0_PMCC | MMCR0_PMAO | - MMCR0_FC56); + MMCR0_PMXE | MMCR0_FC56); + /* Set mmcr0 PMCCEXT for p10 */ + if (ppmu->flags & PPMU_ARCH_31) + val |= MMCR0_PMCCEXT; /* * The barrier is to make sure the mtspr has been @@ -1255,6 +1261,34 @@ static void power_pmu_disable(struct pmu *pmu) mb(); isync(); + /* + * Some corner cases could clear the PMU counter overflow + * while a masked PMI is pending. One such case is when + * a PMI happens during interrupt replay and perf counter + * values are cleared by PMU callbacks before replay. + * + * If any PMC corresponding to the active PMU events are + * overflown, disable the interrupt by clearing the paca + * bit for PMI since we are disabling the PMU now. + * Otherwise provide a warning if there is PMI pending, but + * no counter is found overflown. + */ + if (any_pmc_overflown(cpuhw)) { + /* + * Since power_pmu_disable runs under local_irq_save, it + * could happen that code hits a PMC overflow without PMI + * pending in paca. Hence only clear PMI pending if it was + * set. + * + * If a PMI is pending, then MSR[EE] must be disabled (because + * the masked PMI handler disabling EE). So it is safe to + * call clear_pmi_irq_pending(). + */ + if (pmi_irq_pending()) + clear_pmi_irq_pending(); + } else + WARN_ON(pmi_irq_pending()); + val = mmcra = cpuhw->mmcr.mmcra; /* @@ -1346,6 +1380,15 @@ static void power_pmu_enable(struct pmu *pmu) * (possibly updated for removal of events). */ if (!cpuhw->n_added) { + /* + * If there is any active event with an overflown PMC + * value, set back PACA_IRQ_PMI which would have been + * cleared in power_pmu_disable(). + */ + hard_irq_disable(); + if (any_pmc_overflown(cpuhw)) + set_pmi_irq_pending(); + mtspr(SPRN_MMCRA, cpuhw->mmcr.mmcra & ~MMCRA_SAMPLE_ENABLE); mtspr(SPRN_MMCR1, cpuhw->mmcr.mmcr1); if (ppmu->flags & PPMU_ARCH_31) @@ -2250,7 +2293,6 @@ static void __perf_event_interrupt(struct pt_regs *regs) struct perf_event *event; unsigned long val[8]; int found, active; - int nmi; if (cpuhw->n_limited) freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5), @@ -2258,18 +2300,6 @@ static void __perf_event_interrupt(struct pt_regs *regs) perf_read_regs(regs); - /* - * If perf interrupts hit in a local_irq_disable (soft-masked) region, - * we consider them as NMIs. This is required to prevent hash faults on - * user addresses when reading callchains. See the NMI test in - * do_hash_page. - */ - nmi = perf_intr_is_nmi(regs); - if (nmi) - nmi_enter(); - else - irq_enter(); - /* Read all the PMCs since we'll need them a bunch of times */ for (i = 0; i < ppmu->n_counter; ++i) val[i] = read_pmc(i + 1); @@ -2296,6 +2326,14 @@ static void __perf_event_interrupt(struct pt_regs *regs) break; } } + + /* + * Clear PACA_IRQ_PMI in case it was set by + * set_pmi_irq_pending() when PMU was enabled + * after accounting for interrupts. + */ + clear_pmi_irq_pending(); + if (!active) /* reset non active counters that have overflowed */ write_pmc(i + 1, 0); @@ -2315,8 +2353,15 @@ static void __perf_event_interrupt(struct pt_regs *regs) } } } - if (!found && !nmi && printk_ratelimit()) - printk(KERN_WARNING "Can't find PMC that caused IRQ\n"); + + /* + * During system wide profling or while specific CPU is monitored for an + * event, some corner cases could cause PMC to overflow in idle path. This + * will trigger a PMI after waking up from idle. Since counter values are _not_ + * saved/restored in idle path, can lead to below "Can't find PMC" message. + */ + if (unlikely(!found) && !arch_irq_disabled_regs(regs)) + printk_ratelimited(KERN_WARNING "Can't find PMC that caused IRQ\n"); /* * Reset MMCR0 to its normal value. This will set PMXE and @@ -2326,11 +2371,6 @@ static void __perf_event_interrupt(struct pt_regs *regs) * we get back out of this interrupt. */ write_mmcr0(cpuhw, cpuhw->mmcr.mmcr0); - - if (nmi) - nmi_exit(); - else - irq_exit(); } static void perf_event_interrupt(struct pt_regs *regs) diff --git a/arch/powerpc/perf/core-fsl-emb.c b/arch/powerpc/perf/core-fsl-emb.c index e0e7e276bfd25aeecf709ee49399ec87692bcb16..ee721f420a7bac495c3363b74bf78a8c6e0b044e 100644 --- a/arch/powerpc/perf/core-fsl-emb.c +++ b/arch/powerpc/perf/core-fsl-emb.c @@ -31,19 +31,6 @@ static atomic_t num_events; /* Used to avoid races in calling reserve/release_pmc_hardware */ static DEFINE_MUTEX(pmc_reserve_mutex); -/* - * If interrupts were soft-disabled when a PMU interrupt occurs, treat - * it as an NMI. - */ -static inline int perf_intr_is_nmi(struct pt_regs *regs) -{ -#ifdef __powerpc64__ - return (regs->softe & IRQS_DISABLED); -#else - return 0; -#endif -} - static void perf_event_interrupt(struct pt_regs *regs); /* @@ -659,13 +646,6 @@ static void perf_event_interrupt(struct pt_regs *regs) struct perf_event *event; unsigned long val; int found = 0; - int nmi; - - nmi = perf_intr_is_nmi(regs); - if (nmi) - nmi_enter(); - else - irq_enter(); for (i = 0; i < ppmu->n_counter; ++i) { event = cpuhw->event[i]; @@ -690,11 +670,6 @@ static void perf_event_interrupt(struct pt_regs *regs) mtmsr(mfmsr() | MSR_PMM); mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE); isync(); - - if (nmi) - nmi_exit(); - else - irq_exit(); } void hw_perf_event_setup(int cpu) diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c index 5e8eedda45d39fdd48da8ee9ce3b5970348e1f77..58448f0e472132e9fa24163e10b0162120ae83b3 100644 --- a/arch/powerpc/perf/isa207-common.c +++ b/arch/powerpc/perf/isa207-common.c @@ -561,6 +561,14 @@ int isa207_compute_mmcr(u64 event[], int n_ev, if (!(pmc_inuse & 0x60)) mmcr->mmcr0 |= MMCR0_FC56; + /* + * Set mmcr0 (PMCCEXT) for p10 which + * will restrict access to group B registers + * when MMCR0 PMCC=0b00. + */ + if (cpu_has_feature(CPU_FTR_ARCH_31)) + mmcr->mmcr0 |= MMCR0_PMCCEXT; + mmcr->mmcr1 = mmcr1; mmcr->mmcra = mmcra; mmcr->mmcr2 = mmcr2; diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index 2124831cf57c081e7625be24fc25ea9c0e0ba286..d04079b34d7c2881e9229807111629f28a03fd18 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -976,6 +976,7 @@ static int __init cell_iommu_fixed_mapping_init(void) if (hbase < dbase || (hend > (dbase + dsize))) { pr_debug("iommu: hash window doesn't fit in" "real DMA window\n"); + of_node_put(np); return -1; } } diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c index 9068edef71f7832c375be25cfb0754e92553b146..59999902e4a6aff631b1ba7efab56efb6080ef23 100644 --- a/arch/powerpc/platforms/cell/pervasive.c +++ b/arch/powerpc/platforms/cell/pervasive.c @@ -77,6 +77,7 @@ static int cbe_system_reset_exception(struct pt_regs *regs) switch (regs->msr & SRR1_WAKEMASK) { case SRR1_WAKEDEC: set_dec(1); + break; case SRR1_WAKEEE: /* * Handle these when interrupts get re-enabled and we take diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c index a1b7f79a8a152eae5e9ae783f9c3d69654719012..de10c13de15c6655e4ca39e433874c789d5704fa 100644 --- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c +++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c @@ -215,6 +215,7 @@ void hlwd_pic_probe(void) irq_set_chained_handler(cascade_virq, hlwd_pic_irq_cascade); hlwd_irq_host = host; + of_node_put(np); break; } } diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c index f77a59b5c2e1a9dcc7a810fc3e112ef03e7b8b32..df89d916236d965595702eef3a5f7a813deae485 100644 --- a/arch/powerpc/platforms/powermac/low_i2c.c +++ b/arch/powerpc/platforms/powermac/low_i2c.c @@ -582,6 +582,7 @@ static void __init kw_i2c_add(struct pmac_i2c_host_kw *host, bus->close = kw_i2c_close; bus->xfer = kw_i2c_xfer; mutex_init(&bus->mutex); + lockdep_register_key(&bus->lock_key); lockdep_set_class(&bus->mutex, &bus->lock_key); if (controller == busnode) bus->flags = pmac_i2c_multibus; @@ -810,6 +811,7 @@ static void __init pmu_i2c_probe(void) bus->hostdata = bus + 1; bus->xfer = pmu_i2c_xfer; mutex_init(&bus->mutex); + lockdep_register_key(&bus->lock_key); lockdep_set_class(&bus->mutex, &bus->lock_key); bus->flags = pmac_i2c_multibus; list_add(&bus->link, &pmac_i2c_busses); @@ -933,6 +935,7 @@ static void __init smu_i2c_probe(void) bus->hostdata = bus + 1; bus->xfer = smu_i2c_xfer; mutex_init(&bus->mutex); + lockdep_register_key(&bus->lock_key); lockdep_set_class(&bus->mutex, &bus->lock_key); bus->flags = 0; list_add(&bus->link, &pmac_i2c_busses); diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c index 608569082ba0bb12e555c0b4b2ad4c392bcdd6ba..123a0e799b7bdbbd3dc507e3a5016f5728eab47a 100644 --- a/arch/powerpc/platforms/powernv/opal-lpc.c +++ b/arch/powerpc/platforms/powernv/opal-lpc.c @@ -396,6 +396,7 @@ void __init opal_lpc_init(void) if (!of_get_property(np, "primary", NULL)) continue; opal_lpc_chip_id = of_get_ibm_chip_id(np); + of_node_put(np); break; } if (opal_lpc_chip_id < 0) diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 5f0d446a2325e746046c0db642ee685ff7b0b920..47dfada140e19d2334ab09505bb6b34a26ad3624 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -538,6 +538,12 @@ static void init_cpu_char_feature_flags(struct h_cpu_char_result *result) if (!(result->behaviour & H_CPU_BEHAV_L1D_FLUSH_PR)) security_ftr_clear(SEC_FTR_L1D_FLUSH_PR); + if (result->behaviour & H_CPU_BEHAV_NO_L1D_FLUSH_ENTRY) + security_ftr_clear(SEC_FTR_L1D_FLUSH_ENTRY); + + if (result->behaviour & H_CPU_BEHAV_NO_L1D_FLUSH_UACCESS) + security_ftr_clear(SEC_FTR_L1D_FLUSH_UACCESS); + if (!(result->behaviour & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR)) security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR); } diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c index 1e3674d7ea7bce396f791b0558b52597d8f938a5..b57eeaff7bb33395289788d0b88832b0c46ca248 100644 --- a/arch/powerpc/sysdev/xive/spapr.c +++ b/arch/powerpc/sysdev/xive/spapr.c @@ -658,6 +658,9 @@ static int xive_spapr_debug_show(struct seq_file *m, void *private) struct xive_irq_bitmap *xibm; char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + list_for_each_entry(xibm, &xive_irq_bitmaps, list) { memset(buf, 0, PAGE_SIZE); bitmap_print_to_pagebuf(true, buf, xibm->bitmap, xibm->count); diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index 226c366072da3be17be66608b97376cf8bd6a6eb..db9505c658eab386cb680b54cce9f47ef962549c 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -50,6 +50,12 @@ riscv-march-$(CONFIG_ARCH_RV32I) := rv32ima riscv-march-$(CONFIG_ARCH_RV64I) := rv64ima riscv-march-$(CONFIG_FPU) := $(riscv-march-y)fd riscv-march-$(CONFIG_RISCV_ISA_C) := $(riscv-march-y)c + +# Newer binutils versions default to ISA spec version 20191213 which moves some +# instructions from the I extension to the Zicsr and Zifencei extensions. +toolchain-need-zicsr-zifencei := $(call cc-option-yn, -march=$(riscv-march-y)_zicsr_zifencei) +riscv-march-$(toolchain-need-zicsr-zifencei) := $(riscv-march-y)_zicsr_zifencei + KBUILD_CFLAGS += -march=$(subst fd,,$(riscv-march-y)) KBUILD_AFLAGS += -march=$(riscv-march-y) diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index 62de075fc60c0a617b7230e7063a6f44e0917824..bc49d5f2302b605ca72e28d43b71f35259ec8996 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -44,6 +44,8 @@ obj-$(CONFIG_MODULE_SECTIONS) += module-sections.o obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o +obj-$(CONFIG_TRACE_IRQFLAGS) += trace_irq.o + obj-$(CONFIG_RISCV_BASE_PMU) += perf_event.o obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index 76274a4a1d8e6596dbf3069df999847a94238a7e..5214c578a602392d7af7f2d741096ca534b9afae 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -98,7 +98,7 @@ _save_context: .option pop #ifdef CONFIG_TRACE_IRQFLAGS - call trace_hardirqs_off + call __trace_hardirqs_off #endif #ifdef CONFIG_CONTEXT_TRACKING @@ -131,7 +131,7 @@ skip_context_tracking: andi t0, s1, SR_PIE beqz t0, 1f #ifdef CONFIG_TRACE_IRQFLAGS - call trace_hardirqs_on + call __trace_hardirqs_on #endif csrs CSR_STATUS, SR_IE @@ -222,7 +222,7 @@ ret_from_exception: REG_L s0, PT_STATUS(sp) csrc CSR_STATUS, SR_IE #ifdef CONFIG_TRACE_IRQFLAGS - call trace_hardirqs_off + call __trace_hardirqs_off #endif #ifdef CONFIG_RISCV_M_MODE /* the MPP value is too large to be used as an immediate arg for addi */ @@ -258,10 +258,10 @@ restore_all: REG_L s1, PT_STATUS(sp) andi t0, s1, SR_PIE beqz t0, 1f - call trace_hardirqs_on + call __trace_hardirqs_on j 2f 1: - call trace_hardirqs_off + call __trace_hardirqs_off 2: #endif REG_L a0, PT_STATUS(sp) diff --git a/arch/riscv/kernel/perf_callchain.c b/arch/riscv/kernel/perf_callchain.c index cf190197a22f6556b2b05fe686589b3442e7a090..ad3001cbdf6186778e09d33f285070c5d3f06ad9 100644 --- a/arch/riscv/kernel/perf_callchain.c +++ b/arch/riscv/kernel/perf_callchain.c @@ -60,10 +60,11 @@ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry, void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); unsigned long fp = 0; /* RISC-V does not support perf in guest mode. */ - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) + if (guest_cbs && guest_cbs->is_in_guest()) return; fp = regs->s0; @@ -84,8 +85,10 @@ void notrace walk_stackframe(struct task_struct *task, void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); + /* RISC-V does not support perf in guest mode. */ - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { + if (guest_cbs && guest_cbs->is_in_guest()) { pr_warn("RISC-V does not support perf in guest mode!"); return; } diff --git a/arch/riscv/kernel/trace_irq.c b/arch/riscv/kernel/trace_irq.c new file mode 100644 index 0000000000000000000000000000000000000000..095ac976d7da1092cbbec8d1a2c11282236d7c36 --- /dev/null +++ b/arch/riscv/kernel/trace_irq.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Changbin Du + */ + +#include +#include +#include "trace_irq.h" + +/* + * trace_hardirqs_on/off require the caller to setup frame pointer properly. + * Otherwise, CALLER_ADDR1 might trigger an pagging exception in kernel. + * Here we add one extra level so they can be safely called by low + * level entry code which $fp is used for other purpose. + */ + +void __trace_hardirqs_on(void) +{ + trace_hardirqs_on(); +} +NOKPROBE_SYMBOL(__trace_hardirqs_on); + +void __trace_hardirqs_off(void) +{ + trace_hardirqs_off(); +} +NOKPROBE_SYMBOL(__trace_hardirqs_off); diff --git a/arch/riscv/kernel/trace_irq.h b/arch/riscv/kernel/trace_irq.h new file mode 100644 index 0000000000000000000000000000000000000000..99fe67377e5ed6795b1a45bf6b2a0d8af6fef41d --- /dev/null +++ b/arch/riscv/kernel/trace_irq.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2022 Changbin Du + */ +#ifndef __TRACE_IRQ_H +#define __TRACE_IRQ_H + +void __trace_hardirqs_on(void); +void __trace_hardirqs_off(void); + +#endif /* __TRACE_IRQ_H */ diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c index e1fcc03159ef25c42e90fa55e072f9914602ee5c..a927adccb4ba7f81b08cb6f24e7ec3c48a6aa60d 100644 --- a/arch/s390/hypfs/hypfs_vm.c +++ b/arch/s390/hypfs/hypfs_vm.c @@ -20,6 +20,7 @@ static char local_guest[] = " "; static char all_guests[] = "* "; +static char *all_groups = all_guests; static char *guest_query; struct diag2fc_data { @@ -62,10 +63,11 @@ static int diag2fc(int size, char* query, void *addr) memcpy(parm_list.userid, query, NAME_LEN); ASCEBC(parm_list.userid, NAME_LEN); - parm_list.addr = (unsigned long) addr ; + memcpy(parm_list.aci_grp, all_groups, NAME_LEN); + ASCEBC(parm_list.aci_grp, NAME_LEN); + parm_list.addr = (unsigned long)addr; parm_list.size = size; parm_list.fmt = 0x02; - memset(parm_list.aci_grp, 0x40, NAME_LEN); rc = -1; diag_stat_inc(DIAG_STAT_X2FC); diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 4055f1c4981472b049f9029e49f53bbddc3cdb6e..b81bc96216b972274a9ec386998a74310b6e3ec8 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c @@ -30,7 +30,7 @@ #define DEBUGP(fmt , ...) #endif -#define PLT_ENTRY_SIZE 20 +#define PLT_ENTRY_SIZE 22 void *module_alloc(unsigned long size) { @@ -330,27 +330,26 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */ case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */ if (info->plt_initialized == 0) { - unsigned int insn[5]; - unsigned int *ip = me->core_layout.base + - me->arch.plt_offset + - info->plt_offset; - - insn[0] = 0x0d10e310; /* basr 1,0 */ - insn[1] = 0x100a0004; /* lg 1,10(1) */ + unsigned char insn[PLT_ENTRY_SIZE]; + char *plt_base; + char *ip; + + plt_base = me->core_layout.base + me->arch.plt_offset; + ip = plt_base + info->plt_offset; + *(int *)insn = 0x0d10e310; /* basr 1,0 */ + *(int *)&insn[4] = 0x100c0004; /* lg 1,12(1) */ if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) { - unsigned int *ij; - ij = me->core_layout.base + - me->arch.plt_offset + - me->arch.plt_size - PLT_ENTRY_SIZE; - insn[2] = 0xa7f40000 + /* j __jump_r1 */ - (unsigned int)(u16) - (((unsigned long) ij - 8 - - (unsigned long) ip) / 2); + char *jump_r1; + + jump_r1 = plt_base + me->arch.plt_size - + PLT_ENTRY_SIZE; + /* brcl 0xf,__jump_r1 */ + *(short *)&insn[8] = 0xc0f4; + *(int *)&insn[10] = (jump_r1 - (ip + 8)) / 2; } else { - insn[2] = 0x07f10000; /* br %r1 */ + *(int *)&insn[8] = 0x07f10000; /* br %r1 */ } - insn[3] = (unsigned int) (val >> 32); - insn[4] = (unsigned int) val; + *(long *)&insn[14] = val; write(ip, insn, sizeof(insn)); info->plt_initialized = 1; diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index e6c4f29fc6956ff7421d9bffbcff634321b098d1..b51ab19eb9721479bb05718895e1a159fa763fef 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -2115,6 +2115,13 @@ int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu) return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs); } +int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + + return test_bit(IRQ_PEND_RESTART, &li->pending_irqs); +} + void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 00f03f363c9b0b848a83174b5074df5c52acf47c..d8e9239c24ffc62c91a0f280fe5bddd89029a664 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -4588,10 +4588,15 @@ int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) } } - /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ + /* + * Set the VCPU to STOPPED and THEN clear the interrupt flag, + * now that the SIGP STOP and SIGP STOP AND STORE STATUS orders + * have been fully processed. This will ensure that the VCPU + * is kept BUSY if another VCPU is inquiring with SIGP SENSE. + */ + kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED); kvm_s390_clear_stop_irq(vcpu); - kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED); __disable_ibs_on_vcpu(vcpu); for (i = 0; i < online_vcpus; i++) { @@ -4649,6 +4654,8 @@ static long kvm_s390_guest_sida_op(struct kvm_vcpu *vcpu, return -EINVAL; if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block)) return -E2BIG; + if (!kvm_s390_pv_cpu_is_protected(vcpu)) + return -EINVAL; switch (mop->op) { case KVM_S390_MEMOP_SIDA_READ: diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 2d134833bca6964c0464fdcfe8f748fa636c1858..a3e9b71d426f9313ac3764c034c0e8aa85293480 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -418,6 +418,7 @@ void kvm_s390_destroy_adapters(struct kvm *kvm); int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu); extern struct kvm_device_ops kvm_flic_ops; int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu); +int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu); void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu); int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *buf, int len); diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 683036c1c92a8f9428622c8ba5b897b86cd3e6ae..3dc921e853b6e26e8cc35a9d39f29cc655216e7f 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -288,6 +288,34 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code, if (!dst_vcpu) return SIGP_CC_NOT_OPERATIONAL; + /* + * SIGP RESTART, SIGP STOP, and SIGP STOP AND STORE STATUS orders + * are processed asynchronously. Until the affected VCPU finishes + * its work and calls back into KVM to clear the (RESTART or STOP) + * interrupt, we need to return any new non-reset orders "busy". + * + * This is important because a single VCPU could issue: + * 1) SIGP STOP $DESTINATION + * 2) SIGP SENSE $DESTINATION + * + * If the SIGP SENSE would not be rejected as "busy", it could + * return an incorrect answer as to whether the VCPU is STOPPED + * or OPERATING. + */ + if (order_code != SIGP_INITIAL_CPU_RESET && + order_code != SIGP_CPU_RESET) { + /* + * Lockless check. Both SIGP STOP and SIGP (RE)START + * properly synchronize everything while processing + * their orders, while the guest cannot observe a + * difference when issuing other orders from two + * different VCPUs. + */ + if (kvm_s390_is_stop_irq_pending(dst_vcpu) || + kvm_s390_is_restart_irq_pending(dst_vcpu)) + return SIGP_CC_BUSY; + } + switch (order_code) { case SIGP_SENSE: vcpu->stat.instruction_sigp_sense++; diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index 11d2c8395e2ae92fdc2ac8c94283da9cada12cfc..6d99b1be0082f933c7749de801634585e782507d 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -253,13 +253,15 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) /* Free 2K page table fragment of a 4K page */ bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)); spin_lock_bh(&mm->context.lock); - mask = atomic_xor_bits(&page->_refcount, 1U << (bit + 24)); + mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24)); mask >>= 24; if (mask & 3) list_add(&page->lru, &mm->context.pgtable_list); else list_del(&page->lru); spin_unlock_bh(&mm->context.lock); + mask = atomic_xor_bits(&page->_refcount, 0x10U << (bit + 24)); + mask >>= 24; if (mask != 0) return; } else { diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c index d11b3d41c378552b8890c305ad1437096bca838f..d5d768188b3ba3f19edafc091ee1fe8bc672c849 100644 --- a/arch/um/drivers/virtio_uml.c +++ b/arch/um/drivers/virtio_uml.c @@ -1076,6 +1076,8 @@ static void virtio_uml_release_dev(struct device *d) container_of(d, struct virtio_device, dev); struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev); + time_travel_propagate_time(); + /* might not have been opened due to not negotiating the feature */ if (vu_dev->req_fd >= 0) { um_free_irq(VIRTIO_IRQ, vu_dev); @@ -1109,6 +1111,8 @@ static int virtio_uml_probe(struct platform_device *pdev) vu_dev->pdev = pdev; vu_dev->req_fd = -1; + time_travel_propagate_time(); + do { rc = os_connect_socket(pdata->socket_path); } while (rc == -EINTR); diff --git a/arch/um/include/asm/delay.h b/arch/um/include/asm/delay.h index 56fc2b8f2dd019ec8b75065f2a7f2c791c0c2d7a..e79b2ab6f40c81d6afd54391dbdfabaed82cdaaf 100644 --- a/arch/um/include/asm/delay.h +++ b/arch/um/include/asm/delay.h @@ -14,7 +14,7 @@ static inline void um_ndelay(unsigned long nsecs) ndelay(nsecs); } #undef ndelay -#define ndelay um_ndelay +#define ndelay(n) um_ndelay(n) static inline void um_udelay(unsigned long usecs) { @@ -26,5 +26,5 @@ static inline void um_udelay(unsigned long usecs) udelay(usecs); } #undef udelay -#define udelay um_udelay +#define udelay(n) um_udelay(n) #endif /* __UM_DELAY_H */ diff --git a/arch/um/include/shared/registers.h b/arch/um/include/shared/registers.h index 0c50fa6e8a55b7330f43027c4dea3170e5173d72..fbb709a222839132cec2a2bbf16c4467d88450e6 100644 --- a/arch/um/include/shared/registers.h +++ b/arch/um/include/shared/registers.h @@ -16,8 +16,8 @@ extern int restore_fp_registers(int pid, unsigned long *fp_regs); extern int save_fpx_registers(int pid, unsigned long *fp_regs); extern int restore_fpx_registers(int pid, unsigned long *fp_regs); extern int save_registers(int pid, struct uml_pt_regs *regs); -extern int restore_registers(int pid, struct uml_pt_regs *regs); -extern int init_registers(int pid); +extern int restore_pid_registers(int pid, struct uml_pt_regs *regs); +extern int init_pid_registers(int pid); extern void get_safe_registers(unsigned long *regs, unsigned long *fp_regs); extern unsigned long get_thread_reg(int reg, jmp_buf *buf); extern int get_fp_registers(int pid, unsigned long *regs); diff --git a/arch/um/os-Linux/registers.c b/arch/um/os-Linux/registers.c index 2d9270508e1565620e418f0134bf7573474351eb..b123955be7accf01958684dbbd2fea0ef8130202 100644 --- a/arch/um/os-Linux/registers.c +++ b/arch/um/os-Linux/registers.c @@ -21,7 +21,7 @@ int save_registers(int pid, struct uml_pt_regs *regs) return 0; } -int restore_registers(int pid, struct uml_pt_regs *regs) +int restore_pid_registers(int pid, struct uml_pt_regs *regs) { int err; @@ -36,7 +36,7 @@ int restore_registers(int pid, struct uml_pt_regs *regs) static unsigned long exec_regs[MAX_REG_NR]; static unsigned long exec_fp_regs[FP_SIZE]; -int init_registers(int pid) +int init_pid_registers(int pid) { int err; diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c index f79dc338279e65e09653e23c1ac7169bf26076ff..b28373a2b8d2d035c7f96d374c50f9b479250459 100644 --- a/arch/um/os-Linux/start_up.c +++ b/arch/um/os-Linux/start_up.c @@ -336,7 +336,7 @@ void __init os_early_checks(void) check_tmpexec(); pid = start_ptraced_child(); - if (init_registers(pid)) + if (init_pid_registers(pid)) fatal("Failed to initialize default registers"); stop_ptraced_child(pid, 1, 1); } diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 6004047d25fdda7135884cb5f85363caab72d8b4..bf91e0a36d77fa1fb3882e247633873ed18a6bc4 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -28,7 +28,11 @@ KCOV_INSTRUMENT := n targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \ vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4 vmlinux.bin.zst -KBUILD_CFLAGS := -m$(BITS) -O2 +# CLANG_FLAGS must come before any cc-disable-warning or cc-option calls in +# case of cross compiling, as it has the '--target=' flag, which is needed to +# avoid errors with '-march=i386', and future flags may depend on the target to +# be valid. +KBUILD_CFLAGS := -m$(BITS) -O2 $(CLANG_FLAGS) KBUILD_CFLAGS += -fno-strict-aliasing -fPIE KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING cflags-$(CONFIG_X86_32) := -march=i386 @@ -46,7 +50,6 @@ KBUILD_CFLAGS += -D__DISABLE_EXPORTS # Disable relocation relaxation in case the link is not PIE. KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no) KBUILD_CFLAGS += -include $(srctree)/include/linux/hidden.h -KBUILD_CFLAGS += $(CLANG_FLAGS) # sev-es.c indirectly inludes inat-table.h which is generated during # compilation and stored in $(objtree). Add the directory to the includes so diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig index 78210793d357cf027b8a44419ec3b9447bdad98a..38d7acb9610cc46ae24f40c9d59e8ae24e2aac74 100644 --- a/arch/x86/configs/i386_defconfig +++ b/arch/x86/configs/i386_defconfig @@ -264,3 +264,4 @@ CONFIG_BLK_DEV_IO_TRACE=y CONFIG_PROVIDE_OHCI1394_DMA_INIT=y CONFIG_EARLY_PRINTK_DBGP=y CONFIG_DEBUG_BOOT_PARAMS=y +CONFIG_KALLSYMS_ALL=y diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index 9936528e19393afad0f87a89196e43dd2c52e171..c6e587a9a6f850b9b4539652565a8b4e70cc9c50 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig @@ -260,3 +260,4 @@ CONFIG_BLK_DEV_IO_TRACE=y CONFIG_PROVIDE_OHCI1394_DMA_INIT=y CONFIG_EARLY_PRINTK_DBGP=y CONFIG_DEBUG_BOOT_PARAMS=y +CONFIG_KALLSYMS_ALL=y diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 1f5d96ba4866d811247e582e329ab3135034a50b..b79b9f21cbb3be91ecd35838c2ef2b704b21a780 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2545,10 +2545,11 @@ static bool perf_hw_regs(struct pt_regs *regs) void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); struct unwind_state state; unsigned long addr; - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { + if (guest_cbs && guest_cbs->is_in_guest()) { /* TODO: We don't support guest os callchain now */ return; } @@ -2648,10 +2649,11 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); struct stack_frame frame; const struct stack_frame __user *fp; - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { + if (guest_cbs && guest_cbs->is_in_guest()) { /* TODO: We don't support guest os callchain now */ return; } @@ -2728,18 +2730,21 @@ static unsigned long code_segment_base(struct pt_regs *regs) unsigned long perf_instruction_pointer(struct pt_regs *regs) { - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) - return perf_guest_cbs->get_guest_ip(); + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); + + if (guest_cbs && guest_cbs->is_in_guest()) + return guest_cbs->get_guest_ip(); return regs->ip + code_segment_base(regs); } unsigned long perf_misc_flags(struct pt_regs *regs) { + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); int misc = 0; - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { - if (perf_guest_cbs->is_user_mode()) + if (guest_cbs && guest_cbs->is_in_guest()) { + if (guest_cbs->is_user_mode()) misc |= PERF_RECORD_MISC_GUEST_USER; else misc |= PERF_RECORD_MISC_GUEST_KERNEL; diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index a521135247eb64513fef5762caa3400603c8cb3a..5ba13b00e3a714108b8dca51b5ac106d9e7298bc 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2586,6 +2586,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) { struct perf_sample_data data; struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct perf_guest_info_callbacks *guest_cbs; int bit; int handled = 0; @@ -2651,9 +2652,11 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) */ if (__test_and_clear_bit(GLOBAL_STATUS_TRACE_TOPAPMI_BIT, (unsigned long *)&status)) { handled++; - if (unlikely(perf_guest_cbs && perf_guest_cbs->is_in_guest() && - perf_guest_cbs->handle_intel_pt_intr)) - perf_guest_cbs->handle_intel_pt_intr(); + + guest_cbs = perf_get_guest_cbs(); + if (unlikely(guest_cbs && guest_cbs->is_in_guest() && + guest_cbs->handle_intel_pt_intr)) + guest_cbs->handle_intel_pt_intr(); else intel_pt_interrupt(); } @@ -4350,6 +4353,19 @@ static __initconst const struct x86_pmu intel_pmu = { .lbr_read = intel_pmu_lbr_read_64, .lbr_save = intel_pmu_lbr_save, .lbr_restore = intel_pmu_lbr_restore, + + /* + * SMM has access to all 4 rings and while traditionally SMM code only + * ran in CPL0, 2021-era firmware is starting to make use of CPL3 in SMM. + * + * Since the EVENTSEL.{USR,OS} CPL filtering makes no distinction + * between SMM or not, this results in what should be pure userspace + * counters including SMM data. + * + * This is a clear privilege issue, therefore globally disable + * counting SMM by default. + */ + .attr_freeze_on_smi = 1, }; static __init void intel_clovertown_quirk(void) diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index 9c1a013d5682297885704ee425127db6d464b741..bd8516e6c353c75c524df1b8ed84733c61407890 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -1734,6 +1734,9 @@ static bool is_arch_lbr_xsave_available(void) * Check the LBR state with the corresponding software structure. * Disable LBR XSAVES support if the size doesn't match. */ + if (xfeature_size(XFEATURE_LBR) == 0) + return false; + if (WARN_ON(xfeature_size(XFEATURE_LBR) != get_lbr_state_size())) return false; diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index 37129b76135a14b55a9105da3c5cedcb06a20f67..c084899e9582546d96cb49997e936ef1a8d89939 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -897,8 +897,9 @@ static void pt_handle_status(struct pt *pt) * means we are already losing data; need to let the decoder * know. */ - if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) || - buf->output_off == pt_buffer_region_size(buf)) { + if (!buf->single && + (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) || + buf->output_off == pt_buffer_region_size(buf))) { perf_aux_output_flag(&pt->handle, PERF_AUX_FLAG_TRUNCATED); advance++; diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index ba26792d96731ae9f441086bba1a9be7e8241516..03c8047bebb38f83cf83f07ce412f5053e1eba14 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -5239,7 +5239,7 @@ static struct intel_uncore_type icx_uncore_imc = { .fixed_ctr_bits = 48, .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR, .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL, - .event_descs = hswep_uncore_imc_events, + .event_descs = snr_uncore_imc_events, .perf_ctr = SNR_IMC_MMIO_PMON_CTR0, .event_ctl = SNR_IMC_MMIO_PMON_CTL0, .event_mask = SNBEP_PMON_RAW_EVENT_MASK, diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index 4e5af2b00d89ba9651d3e5a84ef754ce96b68ce1..70b9bc5403c5e175081d3ba7c351af1b7ffa78d5 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h @@ -531,9 +531,11 @@ static inline void __fpregs_load_activate(void) * The FPU context is only stored/restored for a user task and * PF_KTHREAD is used to distinguish between kernel and user threads. */ -static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu) +static inline void switch_fpu_prepare(struct task_struct *prev, int cpu) { - if (static_cpu_has(X86_FEATURE_FPU) && !(current->flags & PF_KTHREAD)) { + struct fpu *old_fpu = &prev->thread.fpu; + + if (static_cpu_has(X86_FEATURE_FPU) && !(prev->flags & PF_KTHREAD)) { if (!copy_fpregs_to_fpstate(old_fpu)) old_fpu->last_cpu = -1; else @@ -552,10 +554,11 @@ static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu) * Load PKRU from the FPU context if available. Delay loading of the * complete FPU state until the return to userland. */ -static inline void switch_fpu_finish(struct fpu *new_fpu) +static inline void switch_fpu_finish(struct task_struct *next) { u32 pkru_val = init_pkru_value; struct pkru_state *pk; + struct fpu *next_fpu = &next->thread.fpu; if (!static_cpu_has(X86_FEATURE_FPU)) return; @@ -569,7 +572,7 @@ static inline void switch_fpu_finish(struct fpu *new_fpu) * PKRU state is switched eagerly because it needs to be valid before we * return to userland e.g. for a copy_to_user() operation. */ - if (!(current->flags & PF_KTHREAD)) { + if (!(next->flags & PF_KTHREAD)) { /* * If the PKRU bit in xsave.header.xfeatures is not set, * then the PKRU component was in init state, which means @@ -578,7 +581,7 @@ static inline void switch_fpu_finish(struct fpu *new_fpu) * in memory is not valid. This means pkru_val has to be * set to 0 and not to init_pkru_value. */ - pk = get_xsave_addr(&new_fpu->state.xsave, XFEATURE_PKRU); + pk = get_xsave_addr(&next_fpu->state.xsave, XFEATURE_PKRU); pkru_val = pk ? pk->pkru : 0; } __write_pkru(pkru_val); diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index e8fb4b0394af23ae19625b38589639b17d296e75..0eb41dce55da392b79a168d398943bdbc6614570 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1285,6 +1285,7 @@ struct kvm_x86_ops { }; struct kvm_x86_nested_ops { + void (*leave_nested)(struct kvm_vcpu *vcpu); int (*check_events)(struct kvm_vcpu *vcpu); bool (*hv_timer_pending)(struct kvm_vcpu *vcpu); int (*get_state)(struct kvm_vcpu *vcpu, @@ -1306,6 +1307,7 @@ struct kvm_x86_init_ops { int (*disabled_by_bios)(void); int (*check_processor_compatibility)(void); int (*hardware_setup)(void); + bool (*intel_pt_intr_in_guest)(void); struct kvm_x86_ops *runtime_ops; }; diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h index 5db5d083c87322ec4913252d1c22752b85d517f4..331474b150f16964e1ba8788e6a623d0214aee62 100644 --- a/arch/x86/include/asm/realmode.h +++ b/arch/x86/include/asm/realmode.h @@ -89,6 +89,7 @@ static inline void set_real_mode_mem(phys_addr_t mem) } void reserve_real_mode(void); +void load_trampoline_pgtable(void); #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 5c95d242f38d708e313f39cc781fa8455fdd16ed..bb1430283c726c1fa5791b56ca88bee298200b94 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -314,11 +314,12 @@ do { \ do { \ __chk_user_ptr(ptr); \ switch (size) { \ - unsigned char x_u8__; \ - case 1: \ + case 1: { \ + unsigned char x_u8__; \ __get_user_asm(x_u8__, ptr, "b", "=q", label); \ (x) = x_u8__; \ break; \ + } \ case 2: \ __get_user_asm(x, ptr, "w", "=r", label); \ break; \ diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c index 0c6b02dd744c1424c3235035aab439d2f1d0d1b8..f73f1184b1c13d9b2ef62fecda27b85138f10869 100644 --- a/arch/x86/kernel/cpu/mce/amd.c +++ b/arch/x86/kernel/cpu/mce/amd.c @@ -387,7 +387,7 @@ static void threshold_restart_bank(void *_tr) u32 hi, lo; /* sysfs write might race against an offline operation */ - if (this_cpu_read(threshold_banks)) + if (!this_cpu_read(threshold_banks) && !tr->set_lvt_off) return; rdmsr(tr->b->address, lo, hi); diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index 14b34963eb1f7f165ee59337f6f6f6a44e4ea0e4..5cf1a024408bf948a3671424f96a9945945970db 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -295,11 +295,17 @@ static void wait_for_panic(void) panic("Panicing machine check CPU died"); } -static void mce_panic(const char *msg, struct mce *final, char *exp) +static noinstr void mce_panic(const char *msg, struct mce *final, char *exp) { - int apei_err = 0; struct llist_node *pending; struct mce_evt_llist *l; + int apei_err = 0; + + /* + * Allow instrumentation around external facilities usage. Not that it + * matters a whole lot since the machine is going to panic anyway. + */ + instrumentation_begin(); if (!fake_panic) { /* @@ -314,7 +320,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp) } else { /* Don't log too much for fake panic */ if (atomic_inc_return(&mce_fake_panicked) > 1) - return; + goto out; } pending = mce_gen_pool_prepare_records(); /* First print corrected ones that are still unlogged */ @@ -352,6 +358,9 @@ static void mce_panic(const char *msg, struct mce *final, char *exp) panic(msg); } else pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg); + +out: + instrumentation_end(); } /* Support code for software error injection */ @@ -682,7 +691,7 @@ static struct notifier_block mce_default_nb = { /* * Read ADDR and MISC registers. */ -static void mce_read_aux(struct mce *m, int i) +static noinstr void mce_read_aux(struct mce *m, int i) { if (m->status & MCI_STATUS_MISCV) m->misc = mce_rdmsrl(msr_ops.misc(i)); @@ -1061,10 +1070,13 @@ static int mce_start(int *no_way_out) * Synchronize between CPUs after main scanning loop. * This invokes the bulk of the Monarch processing. */ -static int mce_end(int order) +static noinstr int mce_end(int order) { - int ret = -1; u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC; + int ret = -1; + + /* Allow instrumentation around external facilities. */ + instrumentation_begin(); if (!timeout) goto reset; @@ -1108,7 +1120,8 @@ static int mce_end(int order) /* * Don't reset anything. That's done by the Monarch. */ - return 0; + ret = 0; + goto out; } /* @@ -1123,6 +1136,10 @@ reset: * Let others run again. */ atomic_set(&mce_executing, 0); + +out: + instrumentation_end(); + return ret; } @@ -1443,6 +1460,14 @@ noinstr void do_machine_check(struct pt_regs *regs) if (worst != MCE_AR_SEVERITY && !kill_it) goto out; + /* + * Enable instrumentation around the external facilities like + * task_work_add() (via queue_task_work()), fixup_exception() etc. + * For now, that is. Fixing this properly would need a lot more involved + * reorganization. + */ + instrumentation_begin(); + /* Fault was in user mode and we need to take some action */ if ((m.cs & 3) == 3) { /* If this triggers there is no way to recover. Die hard. */ @@ -1468,6 +1493,9 @@ noinstr void do_machine_check(struct pt_regs *regs) if (m.kflags & MCE_IN_KERNEL_COPYIN) queue_task_work(&m, msg, kill_it); } + + instrumentation_end(); + out: mce_wrmsrl(MSR_IA32_MCG_STATUS, 0); } diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c index 3a44346f2276601a06d9aa5a5aae4b78c48b2418..e7808309d471073d9e3b29dbec5291a01376e451 100644 --- a/arch/x86/kernel/cpu/mce/inject.c +++ b/arch/x86/kernel/cpu/mce/inject.c @@ -347,7 +347,7 @@ static ssize_t flags_write(struct file *filp, const char __user *ubuf, char buf[MAX_FLAG_OPT_SIZE], *__buf; int err; - if (cnt > MAX_FLAG_OPT_SIZE) + if (!cnt || cnt > MAX_FLAG_OPT_SIZE) return -EINVAL; if (copy_from_user(&buf, ubuf, cnt)) diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c index 2577d787578102c5175ee6c8c4776dc957abf2bf..886d4648c9dd429ac6dcd06ae57f93b90035ab9e 100644 --- a/arch/x86/kernel/cpu/mce/intel.c +++ b/arch/x86/kernel/cpu/mce/intel.c @@ -486,6 +486,8 @@ static void intel_ppin_init(struct cpuinfo_x86 *c) case INTEL_FAM6_BROADWELL_X: case INTEL_FAM6_SKYLAKE_X: case INTEL_FAM6_ICELAKE_X: + case INTEL_FAM6_ICELAKE_D: + case INTEL_FAM6_SAPPHIRERAPIDS_X: case INTEL_FAM6_XEON_PHI_KNL: case INTEL_FAM6_XEON_PHI_KNM: diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 0c6d1dc59fa21c1156dd398f309c9164909f3a62..8e27cbefaa4bf1e33420601adebf9815cc701584 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -515,6 +515,7 @@ static const struct intel_early_ops gen11_early_ops __initconst = { .stolen_size = gen9_stolen_size, }; +/* Intel integrated GPUs for which we need to reserve "stolen memory" */ static const struct pci_device_id intel_early_ids[] __initconst = { INTEL_I830_IDS(&i830_early_ops), INTEL_I845G_IDS(&i845_early_ops), @@ -588,6 +589,13 @@ static void __init intel_graphics_quirks(int num, int slot, int func) u16 device; int i; + /* + * Reserve "stolen memory" for an integrated GPU. If we've already + * found one, there's nothing to do for other (discrete) GPUs. + */ + if (resource_size(&intel_graphics_stolen_res)) + return; + device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID); for (i = 0; i < ARRAY_SIZE(intel_early_ids); i++) { @@ -700,7 +708,7 @@ static struct chipset early_qrk[] __initdata = { { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST, PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID, - QFLAG_APPLY_ONCE, intel_graphics_quirks }, + 0, intel_graphics_quirks }, /* * HPET on the current version of the Baytrail platform has accuracy * problems: it will halt in deep idle state - so we disable it. diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 4f2f54e1281c3f1d35d4166c8835f71bf7954f99..98bf8fd189025d02168436669b28f11cfad81b73 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -159,14 +159,12 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) { struct thread_struct *prev = &prev_p->thread, *next = &next_p->thread; - struct fpu *prev_fpu = &prev->fpu; - struct fpu *next_fpu = &next->fpu; int cpu = smp_processor_id(); /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ if (!test_thread_flag(TIF_NEED_FPU_LOAD)) - switch_fpu_prepare(prev_fpu, cpu); + switch_fpu_prepare(prev_p, cpu); /* * Save away %gs. No need to save %fs, as it was saved on the @@ -213,7 +211,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) this_cpu_write(current_task, next_p); - switch_fpu_finish(next_fpu); + switch_fpu_finish(next_p); /* Load the Intel cache allocation PQR MSR. */ resctrl_sched_in(); diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index df342bedea88afc1e8571d6e20024bf477569c82..ad3f82a18de9df1e151e41ea3c5f184a493b9cac 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -535,15 +535,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) { struct thread_struct *prev = &prev_p->thread; struct thread_struct *next = &next_p->thread; - struct fpu *prev_fpu = &prev->fpu; - struct fpu *next_fpu = &next->fpu; int cpu = smp_processor_id(); WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) && this_cpu_read(irq_count) != -1); if (!test_thread_flag(TIF_NEED_FPU_LOAD)) - switch_fpu_prepare(prev_fpu, cpu); + switch_fpu_prepare(prev_p, cpu); /* We must save %fs and %gs before load_TLS() because * %fs and %gs may be cleared by load_TLS(). @@ -595,7 +593,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) this_cpu_write(current_task, next_p); this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p)); - switch_fpu_finish(next_fpu); + switch_fpu_finish(next_p); /* Reload sp0. */ update_task_stack(next_p); diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 798a6f73f89460559fd3b81e249dd5b306a18cea..df3514835b3563522fdf93a2e9e36aabb2234b69 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -113,17 +113,9 @@ void __noreturn machine_real_restart(unsigned int type) spin_unlock(&rtc_lock); /* - * Switch back to the initial page table. + * Switch to the trampoline page table. */ -#ifdef CONFIG_X86_32 - load_cr3(initial_page_table); -#else - write_cr3(real_mode_header->trampoline_pgd); - - /* Exiting long mode will fail if CR4.PCIDE is set. */ - if (boot_cpu_has(X86_FEATURE_PCID)) - cr4_clear_bits(X86_CR4_PCIDE); -#endif + load_trampoline_pgtable(); /* Jump to the identity-mapped low memory code */ #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index f9f1b45e5ddc423a1def33d40011de1f693bc20d..13d1a0ac8916a86b2931fe721fd533b294929362 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -1127,6 +1127,7 @@ static int tsc_cs_enable(struct clocksource *cs) static struct clocksource clocksource_tsc_early = { .name = "tsc-early", .rating = 299, + .uncertainty_margin = 32 * NSEC_PER_MSEC, .read = read_tsc, .mask = CLOCKSOURCE_MASK(64), .flags = CLOCK_SOURCE_IS_CONTINUOUS | diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index c2516ddc3cbec958a78473aa46c927d8885018ec..20d29ae8ed702f21d56e0c4fa43fc18b8a377a9f 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3631,12 +3631,23 @@ static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr) walk_shadow_page_lockless_end(vcpu); } +static u32 alloc_apf_token(struct kvm_vcpu *vcpu) +{ + /* make sure the token value is not 0 */ + u32 id = vcpu->arch.apf.id; + + if (id << 12 == 0) + vcpu->arch.apf.id = 1; + + return (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; +} + static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, gfn_t gfn) { struct kvm_arch_async_pf arch; - arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; + arch.token = alloc_apf_token(vcpu); arch.gfn = gfn; arch.direct_map = vcpu->arch.mmu->direct_map; arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu); diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index f2ddf663e72e92a0f871522dc97e776ac283fc29..7e08efb06839379328323af53ad7b887dbbbc284 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -1130,12 +1130,12 @@ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, bool spte_set = false; tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) { - if (!is_writable_pte(iter.old_spte)) - break; - new_spte = iter.old_spte & ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE); + if (new_spte == iter.old_spte) + break; + tdp_mmu_set_spte(kvm, &iter, new_spte); spte_set = true; } diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 67741d2a030851b9424406921755cdad8d30cfc7..2f83b5d948b33c4129dfdc011ba8c9dd7be5f992 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -95,7 +95,7 @@ static void kvm_perf_overflow_intr(struct perf_event *perf_event, } static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, - unsigned config, bool exclude_user, + u64 config, bool exclude_user, bool exclude_kernel, bool intr, bool in_tx, bool in_tx_cp) { @@ -170,8 +170,8 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc) void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) { - unsigned config, type = PERF_TYPE_RAW; - u8 event_select, unit_mask; + u64 config; + u32 type = PERF_TYPE_RAW; struct kvm *kvm = pmc->vcpu->kvm; struct kvm_pmu_event_filter *filter; int i; @@ -203,23 +203,18 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) if (!allow_event) return; - event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT; - unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8; - if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE | ARCH_PERFMON_EVENTSEL_INV | ARCH_PERFMON_EVENTSEL_CMASK | HSW_IN_TX | HSW_IN_TX_CHECKPOINTED))) { - config = kvm_x86_ops.pmu_ops->find_arch_event(pmc_to_pmu(pmc), - event_select, - unit_mask); + config = kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc); if (config != PERF_COUNT_HW_MAX) type = PERF_TYPE_HARDWARE; } if (type == PERF_TYPE_RAW) - config = eventsel & X86_RAW_EVENT_MASK; + config = eventsel & AMD64_RAW_EVENT_MASK; if (pmc->current_config == eventsel && pmc_resume_counter(pmc)) return; diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index 067fef51760c4702c8d0ec5c7ff1d51ae00b5d93..1a44e29e733309d2ba8b2c59f6644076d30e1d53 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -24,8 +24,7 @@ struct kvm_event_hw_type_mapping { }; struct kvm_pmu_ops { - unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select, - u8 unit_mask); + unsigned int (*pmc_perf_hw_id)(struct kvm_pmc *pmc); unsigned (*find_fixed_event)(int idx); bool (*pmc_is_enabled)(struct kvm_pmc *pmc); struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx); diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index 8c550999ace0c26066824c59fe3fb8b616659ace..a8b5533cf601d686b21bd8e11dd874ae70f8c1a6 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -344,8 +344,6 @@ int avic_incomplete_ipi_interception(struct vcpu_svm *svm) break; } case AVIC_IPI_FAILURE_INVALID_TARGET: - WARN_ONCE(1, "Invalid IPI target: index=%u, vcpu=%d, icr=%#0x:%#0x\n", - index, svm->vcpu.vcpu_id, icrh, icrl); break; case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE: WARN_ONCE(1, "Invalid backing page\n"); diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index f0946872f5e6d5f298db28d4327502f734ffc91b..23910e6a3f011ccdf30b90954fbf253061cc65ec 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -783,8 +783,10 @@ void svm_free_nested(struct vcpu_svm *svm) /* * Forcibly leave nested mode in order to be able to reset the VCPU later on. */ -void svm_leave_nested(struct vcpu_svm *svm) +void svm_leave_nested(struct kvm_vcpu *vcpu) { + struct vcpu_svm *svm = to_svm(vcpu); + if (is_guest_mode(&svm->vcpu)) { struct vmcb *hsave = svm->nested.hsave; struct vmcb *vmcb = svm->vmcb; @@ -1185,7 +1187,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, return -EINVAL; if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) { - svm_leave_nested(svm); + svm_leave_nested(vcpu); svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET)); return 0; } @@ -1238,6 +1240,9 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, copy_vmcb_control_area(&hsave->control, &svm->vmcb->control); hsave->save = *save; + if (is_guest_mode(vcpu)) + svm_leave_nested(vcpu); + svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa; load_nested_vmcb_control(svm, ctl); nested_prepare_vmcb_control(svm); @@ -1252,6 +1257,7 @@ out_free: } struct kvm_x86_nested_ops svm_nested_ops = { + .leave_nested = svm_leave_nested, .check_events = svm_check_nested_events, .get_nested_state_pages = svm_get_nested_state_pages, .get_state = svm_get_nested_state, diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c index 5a5c165a30ed1fb89e91c78221d1701935df639e..4e7093bcb64b62ef9c853dba9013db543b357e45 100644 --- a/arch/x86/kvm/svm/pmu.c +++ b/arch/x86/kvm/svm/pmu.c @@ -126,10 +126,10 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr, return &pmu->gp_counters[msr_to_index(msr)]; } -static unsigned amd_find_arch_event(struct kvm_pmu *pmu, - u8 event_select, - u8 unit_mask) +static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc) { + u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT; + u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8; int i; for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++) @@ -312,7 +312,7 @@ static void amd_pmu_reset(struct kvm_vcpu *vcpu) } struct kvm_pmu_ops amd_pmu_ops = { - .find_arch_event = amd_find_arch_event, + .pmc_perf_hw_id = amd_pmc_perf_hw_id, .find_fixed_event = amd_find_fixed_event, .pmc_is_enabled = amd_pmc_is_enabled, .pmc_idx_to_pmc = amd_pmc_idx_to_pmc, diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 5e1d7396a6b8a3c1cebf620bbefaf79c643cd137..7773a765f548923888359b18aa92a96abc6cc35b 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -279,7 +279,7 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) if ((old_efer & EFER_SVME) != (efer & EFER_SVME)) { if (!(efer & EFER_SVME)) { - svm_leave_nested(svm); + svm_leave_nested(vcpu); svm_set_gif(svm, true); /* @@ -4103,6 +4103,10 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, void *insn, int i bool smep, smap, is_user; unsigned long cr4; + /* Emulation is always possible when KVM has access to all guest state. */ + if (!sev_guest(vcpu->kvm)) + return true; + /* * Detect and workaround Errata 1096 Fam_17h_00_0Fh. * @@ -4146,23 +4150,27 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, void *insn, int i if (likely(!insn || insn_len)) return true; - /* - * If RIP is invalid, go ahead with emulation which will cause an - * internal error exit. - */ - if (!kvm_vcpu_gfn_to_memslot(vcpu, kvm_rip_read(vcpu) >> PAGE_SHIFT)) - return true; - cr4 = kvm_read_cr4(vcpu); smep = cr4 & X86_CR4_SMEP; smap = cr4 & X86_CR4_SMAP; is_user = svm_get_cpl(vcpu) == 3; if (smap && (!smep || is_user)) { - if (!sev_guest(vcpu->kvm)) - return true; - pr_err_ratelimited("KVM: SEV Guest triggered AMD Erratum 1096\n"); - kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); + + /* + * If the fault occurred in userspace, arbitrarily inject #GP + * to avoid killing the guest and to hopefully avoid confusing + * the guest kernel too much, e.g. injecting #PF would not be + * coherent with respect to the guest's page tables. Request + * triple fault if the fault occurred in the kernel as there's + * no fault that KVM can inject without confusing the guest. + * In practice, the triple fault is moot as no sane SEV kernel + * will execute from user memory while also running with SMAP=1. + */ + if (is_user) + kvm_inject_gp(vcpu, 0); + else + kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); } return false; diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index be74e22b82ea7332d80670f355c94cd8b1a7ccc5..2c007241fbf5397054c00aa9f17c5da79f4cf0fc 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -393,7 +393,7 @@ static inline bool nested_exit_on_nmi(struct vcpu_svm *svm) int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, struct vmcb *nested_vmcb); -void svm_leave_nested(struct vcpu_svm *svm); +void svm_leave_nested(struct kvm_vcpu *vcpu); void svm_free_nested(struct vcpu_svm *svm); int svm_allocate_nested(struct vcpu_svm *svm); int nested_svm_vmrun(struct vcpu_svm *svm); diff --git a/arch/x86/kvm/vmx/evmcs.c b/arch/x86/kvm/vmx/evmcs.c index c0d6fee9225feb9ea672396a634f4813d1409b8d..5b68034ec5f9c336d628ad9036b6589ed5970550 100644 --- a/arch/x86/kvm/vmx/evmcs.c +++ b/arch/x86/kvm/vmx/evmcs.c @@ -361,6 +361,7 @@ void nested_evmcs_filter_control_msr(u32 msr_index, u64 *pdata) case MSR_IA32_VMX_PROCBASED_CTLS2: ctl_high &= ~EVMCS1_UNSUPPORTED_2NDEXEC; break; + case MSR_IA32_VMX_TRUE_PINBASED_CTLS: case MSR_IA32_VMX_PINBASED_CTLS: ctl_high &= ~EVMCS1_UNSUPPORTED_PINCTRL; break; diff --git a/arch/x86/kvm/vmx/evmcs.h b/arch/x86/kvm/vmx/evmcs.h index bd41d9462355fd65909554b41b3ce49654ef3e4d..011929a63823084e21e2dc5575e1567bbc478ee7 100644 --- a/arch/x86/kvm/vmx/evmcs.h +++ b/arch/x86/kvm/vmx/evmcs.h @@ -59,7 +59,9 @@ DECLARE_STATIC_KEY_FALSE(enable_evmcs); SECONDARY_EXEC_SHADOW_VMCS | \ SECONDARY_EXEC_TSC_SCALING | \ SECONDARY_EXEC_PAUSE_LOOP_EXITING) -#define EVMCS1_UNSUPPORTED_VMEXIT_CTRL (VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) +#define EVMCS1_UNSUPPORTED_VMEXIT_CTRL \ + (VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | \ + VM_EXIT_SAVE_VMX_PREEMPTION_TIMER) #define EVMCS1_UNSUPPORTED_VMENTRY_CTRL (VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) #define EVMCS1_UNSUPPORTED_VMFUNC (VMX_VMFUNC_EPTP_SWITCHING) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 36661b15c3d047ec7c28a6d4d102a03c15deaf93..0c2389d0fdafe23c9e90cf4e05e893e100739f8e 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -6628,6 +6628,7 @@ __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *)) } struct kvm_x86_nested_ops vmx_nested_ops = { + .leave_nested = vmx_leave_nested, .check_events = vmx_check_nested_events, .hv_timer_pending = nested_vmx_preemption_timer_pending, .get_state = vmx_get_nested_state, diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index cdf5f34518f43b4c2e46bf123a489cf6cf002461..bd70c1d7f3458594e8496ebd1330b666fb7c9d62 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -68,10 +68,11 @@ static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data) reprogram_counter(pmu, bit); } -static unsigned intel_find_arch_event(struct kvm_pmu *pmu, - u8 event_select, - u8 unit_mask) +static unsigned int intel_pmc_perf_hw_id(struct kvm_pmc *pmc) { + struct kvm_pmu *pmu = pmc_to_pmu(pmc); + u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT; + u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8; int i; for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++) @@ -432,7 +433,7 @@ static void intel_pmu_reset(struct kvm_vcpu *vcpu) } struct kvm_pmu_ops intel_pmu_ops = { - .find_arch_event = intel_find_arch_event, + .pmc_perf_hw_id = intel_pmc_perf_hw_id, .find_fixed_event = intel_find_fixed_event, .pmc_is_enabled = intel_pmc_is_enabled, .pmc_idx_to_pmc = intel_pmc_idx_to_pmc, diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c index fbd9b10354790dd6709c60f70cf5ec98a76361cf..5f8acd2faa7c1ea4848b257a5564e54376fe25ec 100644 --- a/arch/x86/kvm/vmx/posted_intr.c +++ b/arch/x86/kvm/vmx/posted_intr.c @@ -15,7 +15,7 @@ * can find which vCPU should be waken up. */ static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu); -static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock); +static DEFINE_PER_CPU(raw_spinlock_t, blocked_vcpu_on_cpu_lock); static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu) { @@ -121,9 +121,9 @@ static void __pi_post_block(struct kvm_vcpu *vcpu) new.control) != old.control); if (!WARN_ON_ONCE(vcpu->pre_pcpu == -1)) { - spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); + raw_spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); list_del(&vcpu->blocked_vcpu_list); - spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); + raw_spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); vcpu->pre_pcpu = -1; } } @@ -154,11 +154,11 @@ int pi_pre_block(struct kvm_vcpu *vcpu) local_irq_disable(); if (!WARN_ON_ONCE(vcpu->pre_pcpu != -1)) { vcpu->pre_pcpu = vcpu->cpu; - spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); + raw_spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); list_add_tail(&vcpu->blocked_vcpu_list, &per_cpu(blocked_vcpu_on_cpu, vcpu->pre_pcpu)); - spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); + raw_spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); } do { @@ -215,7 +215,7 @@ void pi_wakeup_handler(void) struct kvm_vcpu *vcpu; int cpu = smp_processor_id(); - spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); + raw_spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); list_for_each_entry(vcpu, &per_cpu(blocked_vcpu_on_cpu, cpu), blocked_vcpu_list) { struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); @@ -223,13 +223,13 @@ void pi_wakeup_handler(void) if (pi_test_on(pi_desc) == 1) kvm_vcpu_kick(vcpu); } - spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); + raw_spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); } void __init pi_init_cpu(int cpu) { INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu)); - spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); + raw_spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); } bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 38c453f28f1f0920bdc9ce383343e7d127f587e6..94f5f2129e3b4d0314f4b94955650a70e332d321 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -4846,8 +4846,33 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu) dr6 = vmx_get_exit_qual(vcpu); if (!(vcpu->guest_debug & (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { + /* + * If the #DB was due to ICEBP, a.k.a. INT1, skip the + * instruction. ICEBP generates a trap-like #DB, but + * despite its interception control being tied to #DB, + * is an instruction intercept, i.e. the VM-Exit occurs + * on the ICEBP itself. Note, skipping ICEBP also + * clears STI and MOVSS blocking. + * + * For all other #DBs, set vmcs.PENDING_DBG_EXCEPTIONS.BS + * if single-step is enabled in RFLAGS and STI or MOVSS + * blocking is active, as the CPU doesn't set the bit + * on VM-Exit due to #DB interception. VM-Entry has a + * consistency check that a single-step #DB is pending + * in this scenario as the previous instruction cannot + * have toggled RFLAGS.TF 0=>1 (because STI and POP/MOV + * don't modify RFLAGS), therefore the one instruction + * delay when activating single-step breakpoints must + * have already expired. Note, the CPU sets/clears BS + * as appropriate for all other VM-Exits types. + */ if (is_icebp(intr_info)) WARN_ON(!skip_emulated_instruction(vcpu)); + else if ((vmx_get_rflags(vcpu) & X86_EFLAGS_TF) && + (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & + (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS))) + vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, + vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS) | DR6_BS); kvm_queue_exception_p(vcpu, DB_VECTOR, dr6); return 1; @@ -7915,6 +7940,7 @@ static struct kvm_x86_init_ops vmx_init_ops __initdata = { .disabled_by_bios = vmx_disabled_by_bios, .check_processor_compatibility = vmx_check_processor_compat, .hardware_setup = hardware_setup, + .intel_pt_intr_in_guest = vmx_pt_mode_is_host_guest, .runtime_ops = &vmx_x86_ops, }; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4f828cac0273e672aef345e1b1b2feba5b7eca97..a5d6d79b023bc1a1be066f86a1944ed0c709ed2b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1229,7 +1229,7 @@ static const u32 msrs_to_save_all[] = { MSR_IA32_UMWAIT_CONTROL, MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1, - MSR_ARCH_PERFMON_FIXED_CTR0 + 2, MSR_ARCH_PERFMON_FIXED_CTR0 + 3, + MSR_ARCH_PERFMON_FIXED_CTR0 + 2, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS, MSR_CORE_PERF_GLOBAL_CTRL, MSR_CORE_PERF_GLOBAL_OVF_CTRL, MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1, @@ -3171,6 +3171,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (data & ~supported_xss) return 1; vcpu->arch.ia32_xss = data; + kvm_update_cpuid_runtime(vcpu); break; case MSR_SMI_COUNT: if (!msr_info->host_initiated) @@ -4390,6 +4391,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, vcpu->arch.hflags |= HF_SMM_MASK; else vcpu->arch.hflags &= ~HF_SMM_MASK; + + kvm_x86_ops.nested_ops->leave_nested(vcpu); kvm_smm_changed(vcpu); } @@ -7882,7 +7885,7 @@ static struct perf_guest_info_callbacks kvm_guest_cbs = { .is_in_guest = kvm_is_in_guest, .is_user_mode = kvm_is_user_mode, .get_guest_ip = kvm_get_guest_ip, - .handle_intel_pt_intr = kvm_handle_intel_pt_intr, + .handle_intel_pt_intr = NULL, }; #ifdef CONFIG_X86_64 @@ -8005,6 +8008,8 @@ int kvm_arch_init(void *opaque) PT_PRESENT_MASK, 0, sme_me_mask); kvm_timer_init(); + if (ops->intel_pt_intr_in_guest && ops->intel_pt_intr_in_guest()) + kvm_guest_cbs.handle_intel_pt_intr = kvm_handle_intel_pt_intr; perf_register_guest_info_callbacks(&kvm_guest_cbs); if (boot_cpu_has(X86_FEATURE_XSAVE)) { @@ -8042,6 +8047,7 @@ void kvm_arch_exit(void) #endif kvm_lapic_exit(); perf_unregister_guest_info_callbacks(&kvm_guest_cbs); + kvm_guest_cbs.handle_intel_pt_intr = NULL; if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block, diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c index 3313bffbecd4d3d8b179ceaef3cab7c73d83f977..1a702c6a226ec2daff44df71e742c0cc126bc370 100644 --- a/arch/x86/realmode/init.c +++ b/arch/x86/realmode/init.c @@ -17,6 +17,32 @@ u32 *trampoline_cr4_features; /* Hold the pgd entry used on booting additional CPUs */ pgd_t trampoline_pgd_entry; +void load_trampoline_pgtable(void) +{ +#ifdef CONFIG_X86_32 + load_cr3(initial_page_table); +#else + /* + * This function is called before exiting to real-mode and that will + * fail with CR4.PCIDE still set. + */ + if (boot_cpu_has(X86_FEATURE_PCID)) + cr4_clear_bits(X86_CR4_PCIDE); + + write_cr3(real_mode_header->trampoline_pgd); +#endif + + /* + * The CR3 write above will not flush global TLB entries. + * Stale, global entries from previous page tables may still be + * present. Flush those stale entries. + * + * This ensures that memory accessed while running with + * trampoline_pgd is *actually* mapped into trampoline_pgd. + */ + __flush_tlb_all(); +} + void __init reserve_real_mode(void) { phys_addr_t mem; diff --git a/arch/x86/um/syscalls_64.c b/arch/x86/um/syscalls_64.c index 58f51667e2e4beb82ec794cf618bf7026cfcb66c..8249685b409605a486d19621c2a78a9713856c58 100644 --- a/arch/x86/um/syscalls_64.c +++ b/arch/x86/um/syscalls_64.c @@ -11,6 +11,7 @@ #include #include /* XXX This should get the constants from libc */ #include +#include long arch_prctl(struct task_struct *task, int option, unsigned long __user *arg2) @@ -35,7 +36,7 @@ long arch_prctl(struct task_struct *task, int option, switch (option) { case ARCH_SET_FS: case ARCH_SET_GS: - ret = restore_registers(pid, ¤t->thread.regs.regs); + ret = restore_pid_registers(pid, ¤t->thread.regs.regs); if (ret) return ret; break; diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 16ff25d6935e78d69a42c4a24174fd5702c280b9..804c65d2b95f3c066f9040d5d5580e1b6256f0ce 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -1387,10 +1387,6 @@ asmlinkage __visible void __init xen_start_kernel(void) xen_acpi_sleep_register(); - /* Avoid searching for BIOS MP tables */ - x86_init.mpparse.find_smp_config = x86_init_noop; - x86_init.mpparse.get_smp_config = x86_init_uint_noop; - xen_boot_params_init_edd(); #ifdef CONFIG_ACPI diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c index c2ac319f11a4b785e07ac52041f54d8adc2c4e85..8f9e7e2407c873621f50c6954e6cff712437c6bf 100644 --- a/arch/x86/xen/smp_pv.c +++ b/arch/x86/xen/smp_pv.c @@ -149,28 +149,12 @@ int xen_smp_intr_init_pv(unsigned int cpu) return rc; } -static void __init xen_fill_possible_map(void) -{ - int i, rc; - - if (xen_initial_domain()) - return; - - for (i = 0; i < nr_cpu_ids; i++) { - rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); - if (rc >= 0) { - num_processors++; - set_cpu_possible(i, true); - } - } -} - -static void __init xen_filter_cpu_maps(void) +static void __init _get_smp_config(unsigned int early) { int i, rc; unsigned int subtract = 0; - if (!xen_initial_domain()) + if (early) return; num_processors = 0; @@ -211,7 +195,6 @@ static void __init xen_pv_smp_prepare_boot_cpu(void) * sure the old memory can be recycled. */ make_lowmem_page_readwrite(xen_initial_gdt); - xen_filter_cpu_maps(); xen_setup_vcpu_info_placement(); /* @@ -491,5 +474,8 @@ static const struct smp_ops xen_smp_ops __initconst = { void __init xen_smp_init(void) { smp_ops = xen_smp_ops; - xen_fill_possible_map(); + + /* Avoid searching for BIOS MP tables */ + x86_init.mpparse.find_smp_config = x86_init_noop; + x86_init.mpparse.get_smp_config = _get_smp_config; } diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index b8c2ddc01aec3b3cebf3f54a2b44c6333444c747..8d95bf7765b190b3d646f797426b50fdb4f942a2 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -6404,6 +6404,8 @@ static void bfq_exit_queue(struct elevator_queue *e) spin_unlock_irq(&bfqd->lock); #endif + wbt_enable_default(bfqd->queue); + kfree(bfqd); } diff --git a/block/bio-integrity.c b/block/bio-integrity.c index 9ffd7e2895547676ed801c099306f5a728918840..4f6f140a44e064f18d9c6b3726e3fdcb8572ed40 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -384,7 +384,7 @@ void bio_integrity_advance(struct bio *bio, unsigned int bytes_done) struct blk_integrity *bi = blk_get_integrity(bio->bi_disk); unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9); - bip->bip_iter.bi_sector += bytes_done >> 9; + bip->bip_iter.bi_sector += bio_integrity_intervals(bi, bytes_done >> 9); bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes); } diff --git a/block/bio.c b/block/bio.c index 0703a208ca248cfaf41a779ad434db50d89ca7b2..f8d26ce7b61b0059197d1f1b40fba3a36d9da0dd 100644 --- a/block/bio.c +++ b/block/bio.c @@ -575,7 +575,8 @@ void bio_truncate(struct bio *bio, unsigned new_size) offset = new_size - done; else offset = 0; - zero_user(bv.bv_page, offset, bv.bv_len - offset); + zero_user(bv.bv_page, bv.bv_offset + offset, + bv.bv_len - offset); truncated = true; } done += bv.bv_len; diff --git a/block/blk-flush.c b/block/blk-flush.c index 70f1d02135ed6419b1e81da7f03a85e99fc4dc19..33b487b5cbf78226b1b5ff431a59ff2baa796d51 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -236,8 +236,10 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error) * avoiding use-after-free. */ WRITE_ONCE(flush_rq->state, MQ_RQ_IDLE); - if (fq->rq_status != BLK_STS_OK) + if (fq->rq_status != BLK_STS_OK) { error = fq->rq_status; + fq->rq_status = BLK_STS_OK; + } if (!q->elevator) { flush_rq->tag = BLK_MQ_NO_TAG; diff --git a/block/blk-pm.c b/block/blk-pm.c index 17bd020268d421434a3724ad63552f2df49da0f4..2dad62cc157272e114eb31255026d2d49b30d550 100644 --- a/block/blk-pm.c +++ b/block/blk-pm.c @@ -163,27 +163,19 @@ EXPORT_SYMBOL(blk_pre_runtime_resume); /** * blk_post_runtime_resume - Post runtime resume processing * @q: the queue of the device - * @err: return value of the device's runtime_resume function * * Description: - * Update the queue's runtime status according to the return value of the - * device's runtime_resume function. If the resume was successful, call - * blk_set_runtime_active() to do the real work of restarting the queue. + * For historical reasons, this routine merely calls blk_set_runtime_active() + * to do the real work of restarting the queue. It does this regardless of + * whether the device's runtime-resume succeeded; even if it failed the + * driver or error handler will need to communicate with the device. * * This function should be called near the end of the device's * runtime_resume callback. */ -void blk_post_runtime_resume(struct request_queue *q, int err) +void blk_post_runtime_resume(struct request_queue *q) { - if (!q->dev) - return; - if (!err) { - blk_set_runtime_active(q); - } else { - spin_lock_irq(&q->queue_lock); - q->rpm_status = RPM_SUSPENDED; - spin_unlock_irq(&q->queue_lock); - } + blk_set_runtime_active(q); } EXPORT_SYMBOL(blk_post_runtime_resume); @@ -201,7 +193,7 @@ EXPORT_SYMBOL(blk_post_runtime_resume); * runtime PM status and re-enable peeking requests from the queue. It * should be called before first request is added to the queue. * - * This function is also called by blk_post_runtime_resume() for successful + * This function is also called by blk_post_runtime_resume() for * runtime resumes. It does everything necessary to restart the queue. */ void blk_set_runtime_active(struct request_queue *q) diff --git a/block/elevator.c b/block/elevator.c index 2a525863d4e92ab9b19bcb1b40d923365e5bbdef..2f962662c32a1710ab5059ecb6459a3754718cf4 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -518,8 +518,6 @@ void elv_unregister_queue(struct request_queue *q) kobject_del(&e->kobj); e->registered = 0; - /* Re-enable throttling in case elevator disabled it */ - wbt_enable_default(q); } } diff --git a/crypto/algapi.c b/crypto/algapi.c index fdabf2675b63fefed7e93ad994e35e7f7b3844d5..9de27daa98b47b7c8503c5615dd5f4c2fea9fb60 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -1295,3 +1295,4 @@ module_exit(crypto_algapi_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Cryptographic algorithms API"); +MODULE_SOFTDEP("pre: cryptomgr"); diff --git a/crypto/api.c b/crypto/api.c index c4eda56cff8917e08c2de3b0ada13df291c785ab..5ffcd3ab4a75309edcd605122d9cf9b1c7b5b978 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -603,4 +603,3 @@ EXPORT_SYMBOL_GPL(crypto_req_done); MODULE_DESCRIPTION("Cryptographic core API"); MODULE_LICENSE("GPL"); -MODULE_SOFTDEP("pre: cryptomgr"); diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c index 6e147c43fc186c2c540c6a23c8b995c1da413c2d..37c4c308339e4f225ce8b224c7058edd543f3bfe 100644 --- a/crypto/jitterentropy.c +++ b/crypto/jitterentropy.c @@ -265,7 +265,6 @@ static int jent_stuck(struct rand_data *ec, __u64 current_delta) { __u64 delta2 = jent_delta(ec->last_delta, current_delta); __u64 delta3 = jent_delta(ec->last_delta2, delta2); - unsigned int delta_masked = current_delta & JENT_APT_WORD_MASK; ec->last_delta = current_delta; ec->last_delta2 = delta2; @@ -274,7 +273,7 @@ static int jent_stuck(struct rand_data *ec, __u64 current_delta) * Insert the result of the comparison of two back-to-back time * deltas. */ - jent_apt_insert(ec, delta_masked); + jent_apt_insert(ec, current_delta); if (!current_delta || !delta2 || !delta3) { /* RCT with a stuck bit */ diff --git a/drivers/accessibility/speakup/speakup_dectlk.c b/drivers/accessibility/speakup/speakup_dectlk.c index ab6d61e80b1cbac8d66951190a89d5f6c99ee7f7..d689ec5e276f12aa8fb8630bf7cdddcf9aad86a7 100644 --- a/drivers/accessibility/speakup/speakup_dectlk.c +++ b/drivers/accessibility/speakup/speakup_dectlk.c @@ -44,6 +44,7 @@ static struct var_t vars[] = { { CAPS_START, .u.s = {"[:dv ap 160] " } }, { CAPS_STOP, .u.s = {"[:dv ap 100 ] " } }, { RATE, .u.n = {"[:ra %d] ", 180, 75, 650, 0, 0, NULL } }, + { PITCH, .u.n = {"[:dv ap %d] ", 122, 50, 350, 0, 0, NULL } }, { INFLECTION, .u.n = {"[:dv pr %d] ", 100, 0, 10000, 0, 0, NULL } }, { VOL, .u.n = {"[:dv g5 %d] ", 86, 60, 86, 0, 0, NULL } }, { PUNCT, .u.n = {"[:pu %c] ", 0, 0, 2, 0, 0, "nsa" } }, diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c index 3323a2ba6a3138472403a0a4b700a77825dbe839..b3230e511870aad7ec54e49c0cad6cb901ca9aa1 100644 --- a/drivers/acpi/acpica/exfield.c +++ b/drivers/acpi/acpica/exfield.c @@ -326,12 +326,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc, obj_desc->field.base_byte_offset, source_desc->buffer.pointer, data_length); - if ((obj_desc->field.region_obj->region.address == - PCC_MASTER_SUBSPACE - && MASTER_SUBSPACE_COMMAND(obj_desc->field. - base_byte_offset)) - || GENERIC_SUBSPACE_COMMAND(obj_desc->field. - base_byte_offset)) { + if (MASTER_SUBSPACE_COMMAND(obj_desc->field.base_byte_offset)) { /* Perform the write */ diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c index a46d685a3ffcf0888571e89cbdde6e0263197adc..9d67dfd93d5b652af943cd8e3317c699413d1f07 100644 --- a/drivers/acpi/acpica/exoparg1.c +++ b/drivers/acpi/acpica/exoparg1.c @@ -1007,7 +1007,8 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state) (walk_state, return_desc, &temp_desc); if (ACPI_FAILURE(status)) { - goto cleanup; + return_ACPI_STATUS + (status); } return_desc = temp_desc; diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c index 4836a4b8b38b87709091009efe209d29fd400d03..142a755be68811445ebef9b90660c5ac8924c421 100644 --- a/drivers/acpi/acpica/hwesleep.c +++ b/drivers/acpi/acpica/hwesleep.c @@ -104,7 +104,9 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state) /* Flush caches, as per ACPI specification */ - ACPI_FLUSH_CPU_CACHE(); + if (sleep_state < ACPI_STATE_S4) { + ACPI_FLUSH_CPU_CACHE(); + } status = acpi_os_enter_sleep(sleep_state, sleep_control, 0); if (status == AE_CTRL_TERMINATE) { diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c index fcc84d196238a90c6c4a6e6d46895f7a8d157114..6a20bb5059c1d815a33ab83149dca348065e5a44 100644 --- a/drivers/acpi/acpica/hwsleep.c +++ b/drivers/acpi/acpica/hwsleep.c @@ -110,7 +110,9 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state) /* Flush caches, as per ACPI specification */ - ACPI_FLUSH_CPU_CACHE(); + if (sleep_state < ACPI_STATE_S4) { + ACPI_FLUSH_CPU_CACHE(); + } status = acpi_os_enter_sleep(sleep_state, pm1a_control, pm1b_control); if (status == AE_CTRL_TERMINATE) { diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c index f1645d87864c3dcd6dc32ca3f35e6c6e69b4fbb1..3948c34d85830e0e671ff4fd2008431d21ffe5e0 100644 --- a/drivers/acpi/acpica/hwxfsleep.c +++ b/drivers/acpi/acpica/hwxfsleep.c @@ -162,8 +162,6 @@ acpi_status acpi_enter_sleep_state_s4bios(void) return_ACPI_STATUS(status); } - ACPI_FLUSH_CPU_CACHE(); - status = acpi_hw_write_port(acpi_gbl_FADT.smi_command, (u32)acpi_gbl_FADT.s4_bios_request, 8); if (ACPI_FAILURE(status)) { diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c index 72d2c0b656339c1f06473b252765713c2053039f..cb1750e7a6281ae9a91bed0d586dffc6b605db6d 100644 --- a/drivers/acpi/acpica/utdelete.c +++ b/drivers/acpi/acpica/utdelete.c @@ -422,6 +422,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action) ACPI_WARNING((AE_INFO, "Obj %p, Reference Count is already zero, cannot decrement\n", object)); + return; } ACPI_DEBUG_PRINT_RAW((ACPI_DB_ALLOCATIONS, diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 2494138a6905ed105009894a8eb49ed2ee4ac4e6..50ed949dc14494538084767a5050d296e0e0463e 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -1454,9 +1454,17 @@ static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res, res[0].start = pmcg->page0_base_address; res[0].end = pmcg->page0_base_address + SZ_4K - 1; res[0].flags = IORESOURCE_MEM; - res[1].start = pmcg->page1_base_address; - res[1].end = pmcg->page1_base_address + SZ_4K - 1; - res[1].flags = IORESOURCE_MEM; + /* + * The initial version in DEN0049C lacked a way to describe register + * page 1, which makes it broken for most PMCG implementations; in + * that case, just let the driver fail gracefully if it expects to + * find a second memory resource. + */ + if (node->revision > 0) { + res[1].start = pmcg->page1_base_address; + res[1].end = pmcg->page1_base_address + SZ_4K - 1; + res[1].flags = IORESOURCE_MEM; + } if (pmcg->overflow_gsiv) acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow", diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index e04352c1dc2ce825b981f4e066633951b85aedbd..2376f57b3617aa2f4ff47d4100a85d8ba792effa 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -59,6 +59,7 @@ static int battery_bix_broken_package; static int battery_notification_delay_ms; static int battery_ac_is_broken; static int battery_check_pmic = 1; +static int battery_quirk_notcharging; static unsigned int cache_time = 1000; module_param(cache_time, uint, 0644); MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); @@ -222,6 +223,8 @@ static int acpi_battery_get_property(struct power_supply *psy, val->intval = POWER_SUPPLY_STATUS_CHARGING; else if (acpi_battery_is_charged(battery)) val->intval = POWER_SUPPLY_STATUS_FULL; + else if (battery_quirk_notcharging) + val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; else val->intval = POWER_SUPPLY_STATUS_UNKNOWN; break; @@ -1105,6 +1108,12 @@ battery_do_not_check_pmic_quirk(const struct dmi_system_id *d) return 0; } +static int __init battery_quirk_not_charging(const struct dmi_system_id *d) +{ + battery_quirk_notcharging = 1; + return 0; +} + static const struct dmi_system_id bat_dmi_table[] __initconst = { { /* NEC LZ750/LS */ @@ -1149,6 +1158,19 @@ static const struct dmi_system_id bat_dmi_table[] __initconst = { DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"), }, }, + { + /* + * On Lenovo ThinkPads the BIOS specification defines + * a state when the bits for charging and discharging + * are both set to 0. That state is "Not Charging". + */ + .callback = battery_quirk_not_charging, + .ident = "Lenovo ThinkPad", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad"), + }, + }, {}, }; diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index e317214aabec5570fcc1b5bbe1a89211fabe0198..5e14288fcabe904e493e7b68af3968cccddc5f80 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -98,8 +98,8 @@ int acpi_bus_get_status(struct acpi_device *device) acpi_status status; unsigned long long sta; - if (acpi_device_always_present(device)) { - acpi_set_device_status(device, ACPI_STA_DEFAULT); + if (acpi_device_override_status(device, &sta)) { + acpi_set_device_status(device, sta); return 0; } diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index be3e0921a6c006f820d2cfa1fea47e995e3c8fa7..8347eaee679c8e4a63184cb9dabeb8945381a123 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -166,6 +166,7 @@ struct acpi_ec_query { struct transaction transaction; struct work_struct work; struct acpi_ec_query_handler *handler; + struct acpi_ec *ec; }; static int acpi_ec_query(struct acpi_ec *ec, u8 *data); @@ -469,6 +470,7 @@ static void acpi_ec_submit_query(struct acpi_ec *ec) ec_dbg_evt("Command(%s) submitted/blocked", acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY)); ec->nr_pending_queries++; + ec->events_in_progress++; queue_work(ec_wq, &ec->work); } } @@ -535,7 +537,7 @@ static void acpi_ec_enable_event(struct acpi_ec *ec) #ifdef CONFIG_PM_SLEEP static void __acpi_ec_flush_work(void) { - drain_workqueue(ec_wq); /* flush ec->work */ + flush_workqueue(ec_wq); /* flush ec->work */ flush_workqueue(ec_query_wq); /* flush queries */ } @@ -1116,7 +1118,7 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit) } EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler); -static struct acpi_ec_query *acpi_ec_create_query(u8 *pval) +static struct acpi_ec_query *acpi_ec_create_query(struct acpi_ec *ec, u8 *pval) { struct acpi_ec_query *q; struct transaction *t; @@ -1124,11 +1126,13 @@ static struct acpi_ec_query *acpi_ec_create_query(u8 *pval) q = kzalloc(sizeof (struct acpi_ec_query), GFP_KERNEL); if (!q) return NULL; + INIT_WORK(&q->work, acpi_ec_event_processor); t = &q->transaction; t->command = ACPI_EC_COMMAND_QUERY; t->rdata = pval; t->rlen = 1; + q->ec = ec; return q; } @@ -1145,13 +1149,21 @@ static void acpi_ec_event_processor(struct work_struct *work) { struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work); struct acpi_ec_query_handler *handler = q->handler; + struct acpi_ec *ec = q->ec; ec_dbg_evt("Query(0x%02x) started", handler->query_bit); + if (handler->func) handler->func(handler->data); else if (handler->handle) acpi_evaluate_object(handler->handle, NULL, NULL, NULL); + ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit); + + spin_lock_irq(&ec->lock); + ec->queries_in_progress--; + spin_unlock_irq(&ec->lock); + acpi_ec_delete_query(q); } @@ -1161,7 +1173,7 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data) int result; struct acpi_ec_query *q; - q = acpi_ec_create_query(&value); + q = acpi_ec_create_query(ec, &value); if (!q) return -ENOMEM; @@ -1183,19 +1195,20 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data) } /* - * It is reported that _Qxx are evaluated in a parallel way on - * Windows: + * It is reported that _Qxx are evaluated in a parallel way on Windows: * https://bugzilla.kernel.org/show_bug.cgi?id=94411 * - * Put this log entry before schedule_work() in order to make - * it appearing before any other log entries occurred during the - * work queue execution. + * Put this log entry before queue_work() to make it appear in the log + * before any other messages emitted during workqueue handling. */ ec_dbg_evt("Query(0x%02x) scheduled", value); - if (!queue_work(ec_query_wq, &q->work)) { - ec_dbg_evt("Query(0x%02x) overlapped", value); - result = -EBUSY; - } + + spin_lock_irq(&ec->lock); + + ec->queries_in_progress++; + queue_work(ec_query_wq, &q->work); + + spin_unlock_irq(&ec->lock); err_exit: if (result) @@ -1253,6 +1266,10 @@ static void acpi_ec_event_handler(struct work_struct *work) ec_dbg_evt("Event stopped"); acpi_ec_check_event(ec); + + spin_lock_irqsave(&ec->lock, flags); + ec->events_in_progress--; + spin_unlock_irqrestore(&ec->lock, flags); } static void acpi_ec_handle_interrupt(struct acpi_ec *ec) @@ -2034,6 +2051,7 @@ void acpi_ec_set_gpe_wake_mask(u8 action) bool acpi_ec_dispatch_gpe(void) { + bool work_in_progress; u32 ret; if (!first_ec) @@ -2046,6 +2064,16 @@ bool acpi_ec_dispatch_gpe(void) if (acpi_any_gpe_status_set(first_ec->gpe)) return true; + /* + * Cancel the SCI wakeup and process all pending events in case there + * are any wakeup ones in there. + * + * Note that if any non-EC GPEs are active at this point, the SCI will + * retrigger after the rearming in acpi_s2idle_wake(), so no events + * should be missed by canceling the wakeup here. + */ + pm_system_cancel_wakeup(); + /* * Dispatch the EC GPE in-band, but do not report wakeup in any case * to allow the caller to process events properly after that. @@ -2054,8 +2082,19 @@ bool acpi_ec_dispatch_gpe(void) if (ret == ACPI_INTERRUPT_HANDLED) pm_pr_dbg("ACPI EC GPE dispatched\n"); - /* Flush the event and query workqueues. */ - acpi_ec_flush_work(); + /* Drain EC work. */ + do { + acpi_ec_flush_work(); + + pm_pr_dbg("ACPI EC work flushed\n"); + + spin_lock_irq(&first_ec->lock); + + work_in_progress = first_ec->events_in_progress + + first_ec->queries_in_progress > 0; + + spin_unlock_irq(&first_ec->lock); + } while (work_in_progress && !pm_wakeup_pending()); return false; } diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index a958ad60a3394d4398ed774c6b42187329ec13b7..125e4901c9b47834238778e61a7315108ab976a9 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -184,6 +184,8 @@ struct acpi_ec { struct work_struct work; unsigned long timestamp; unsigned long nr_pending_queries; + unsigned int events_in_progress; + unsigned int queries_in_progress; bool busy_polling; unsigned int polling_guard; }; diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index de0533bd4e086a0804aea4abe5c79424ddc83d57..67a5ee2fedfd3daf9305c3a028b2793703c13790 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1577,6 +1577,7 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device) { struct list_head resource_list; bool is_serial_bus_slave = false; + static const struct acpi_device_id ignore_serial_bus_ids[] = { /* * These devices have multiple I2cSerialBus resources and an i2c-client * must be instantiated for each, each with its own i2c_device_id. @@ -1585,11 +1586,18 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device) * drivers/platform/x86/i2c-multi-instantiate.c driver, which knows * which i2c_device_id to use for each resource. */ - static const struct acpi_device_id i2c_multi_instantiate_ids[] = { {"BSG1160", }, {"BSG2150", }, {"INT33FE", }, {"INT3515", }, + /* + * HIDs of device with an UartSerialBusV2 resource for which userspace + * expects a regular tty cdev to be created (instead of the in kernel + * serdev) and which have a kernel driver which expects a platform_dev + * such as the rfkill-gpio driver. + */ + {"BCM4752", }, + {"LNV4752", }, {} }; @@ -1603,8 +1611,7 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device) fwnode_property_present(&device->fwnode, "baud"))) return true; - /* Instantiate a pdev for the i2c-multi-instantiate drv to bind to */ - if (!acpi_match_device_ids(device, i2c_multi_instantiate_ids)) + if (!acpi_match_device_ids(device, ignore_serial_bus_ids)) return false; INIT_LIST_HEAD(&resource_list); diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 31c9d0c8ae11f67092e9fef9ed9a953cee0a6e28..e2614ea820bb87dfd55b92ef8a20cc9a6698be34 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -1012,21 +1012,15 @@ static bool acpi_s2idle_wake(void) return true; } - /* Check non-EC GPE wakeups and dispatch the EC GPE. */ + /* + * Check non-EC GPE wakeups and if there are none, cancel the + * SCI-related wakeup and dispatch the EC GPE. + */ if (acpi_ec_dispatch_gpe()) { pm_pr_dbg("ACPI non-EC GPE wakeup\n"); return true; } - /* - * Cancel the SCI wakeup and process all pending events in case - * there are any wakeup ones in there. - * - * Note that if any non-EC GPEs are active at this point, the - * SCI will retrigger after the rearming below, so no events - * should be missed by canceling the wakeup here. - */ - pm_system_cancel_wakeup(); acpi_os_wait_events_complete(); /* @@ -1040,6 +1034,7 @@ static bool acpi_s2idle_wake(void) return true; } + pm_wakeup_clear(acpi_sci_irq); rearm_wake_irq(acpi_sci_irq); } diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c index bdc1ba00aee9f7e54f8b3c20e04577135cdbb430..3f9a162be84e373648ac0b01ff5b54a21feeeb3e 100644 --- a/drivers/acpi/x86/utils.c +++ b/drivers/acpi/x86/utils.c @@ -22,58 +22,71 @@ * Some BIOS-es (temporarily) hide specific APCI devices to work around Windows * driver bugs. We use DMI matching to match known cases of this. * - * We work around this by always reporting ACPI_STA_DEFAULT for these - * devices. Note this MUST only be done for devices where this is safe. + * Likewise sometimes some not-actually present devices are sometimes + * reported as present, which may cause issues. * - * This forcing of devices to be present is limited to specific CPU (SoC) - * models both to avoid potentially causing trouble on other models and - * because some HIDs are re-used on different SoCs for completely - * different devices. + * We work around this by using the below quirk list to override the status + * reported by the _STA method with a fixed value (ACPI_STA_DEFAULT or 0). + * Note this MUST only be done for devices where this is safe. + * + * This status overriding is limited to specific CPU (SoC) models both to + * avoid potentially causing trouble on other models and because some HIDs + * are re-used on different SoCs for completely different devices. */ -struct always_present_id { +struct override_status_id { struct acpi_device_id hid[2]; struct x86_cpu_id cpu_ids[2]; struct dmi_system_id dmi_ids[2]; /* Optional */ const char *uid; + const char *path; + unsigned long long status; }; -#define X86_MATCH(model) X86_MATCH_INTEL_FAM6_MODEL(model, NULL) - -#define ENTRY(hid, uid, cpu_models, dmi...) { \ +#define ENTRY(status, hid, uid, path, cpu_model, dmi...) { \ { { hid, }, {} }, \ - { cpu_models, {} }, \ + { X86_MATCH_INTEL_FAM6_MODEL(cpu_model, NULL), {} }, \ { { .matches = dmi }, {} }, \ uid, \ + path, \ + status, \ } -static const struct always_present_id always_present_ids[] = { +#define PRESENT_ENTRY_HID(hid, uid, cpu_model, dmi...) \ + ENTRY(ACPI_STA_DEFAULT, hid, uid, NULL, cpu_model, dmi) + +#define NOT_PRESENT_ENTRY_HID(hid, uid, cpu_model, dmi...) \ + ENTRY(0, hid, uid, NULL, cpu_model, dmi) + +#define PRESENT_ENTRY_PATH(path, cpu_model, dmi...) \ + ENTRY(ACPI_STA_DEFAULT, "", NULL, path, cpu_model, dmi) + +#define NOT_PRESENT_ENTRY_PATH(path, cpu_model, dmi...) \ + ENTRY(0, "", NULL, path, cpu_model, dmi) + +static const struct override_status_id override_status_ids[] = { /* * Bay / Cherry Trail PWM directly poked by GPU driver in win10, * but Linux uses a separate PWM driver, harmless if not used. */ - ENTRY("80860F09", "1", X86_MATCH(ATOM_SILVERMONT), {}), - ENTRY("80862288", "1", X86_MATCH(ATOM_AIRMONT), {}), + PRESENT_ENTRY_HID("80860F09", "1", ATOM_SILVERMONT, {}), + PRESENT_ENTRY_HID("80862288", "1", ATOM_AIRMONT, {}), - /* Lenovo Yoga Book uses PWM2 for keyboard backlight control */ - ENTRY("80862289", "2", X86_MATCH(ATOM_AIRMONT), { - DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9"), - }), /* * The INT0002 device is necessary to clear wakeup interrupt sources * on Cherry Trail devices, without it we get nobody cared IRQ msgs. */ - ENTRY("INT0002", "1", X86_MATCH(ATOM_AIRMONT), {}), + PRESENT_ENTRY_HID("INT0002", "1", ATOM_AIRMONT, {}), /* * On the Dell Venue 11 Pro 7130 and 7139, the DSDT hides * the touchscreen ACPI device until a certain time * after _SB.PCI0.GFX0.LCD.LCD1._ON gets called has passed * *and* _STA has been called at least 3 times since. */ - ENTRY("SYNA7500", "1", X86_MATCH(HASWELL_L), { + PRESENT_ENTRY_HID("SYNA7500", "1", HASWELL_L, { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7130"), }), - ENTRY("SYNA7500", "1", X86_MATCH(HASWELL_L), { + PRESENT_ENTRY_HID("SYNA7500", "1", HASWELL_L, { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7139"), }), @@ -81,54 +94,83 @@ static const struct always_present_id always_present_ids[] = { /* * The GPD win BIOS dated 20170221 has disabled the accelerometer, the * drivers sometimes cause crashes under Windows and this is how the - * manufacturer has solved this :| Note that the the DMI data is less - * generic then it seems, a board_vendor of "AMI Corporation" is quite - * rare and a board_name of "Default String" also is rare. + * manufacturer has solved this :| The DMI match may not seem unique, + * but it is. In the 67000+ DMI decode dumps from linux-hardware.org + * only 116 have board_vendor set to "AMI Corporation" and of those 116 + * only the GPD win and pocket entries' board_name is "Default string". * * Unfortunately the GPD pocket also uses these strings and its BIOS * was copy-pasted from the GPD win, so it has a disabled KIOX000A * node which we should not enable, thus we also check the BIOS date. */ - ENTRY("KIOX000A", "1", X86_MATCH(ATOM_AIRMONT), { + PRESENT_ENTRY_HID("KIOX000A", "1", ATOM_AIRMONT, { DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), DMI_MATCH(DMI_BOARD_NAME, "Default string"), DMI_MATCH(DMI_PRODUCT_NAME, "Default string"), DMI_MATCH(DMI_BIOS_DATE, "02/21/2017") }), - ENTRY("KIOX000A", "1", X86_MATCH(ATOM_AIRMONT), { + PRESENT_ENTRY_HID("KIOX000A", "1", ATOM_AIRMONT, { DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), DMI_MATCH(DMI_BOARD_NAME, "Default string"), DMI_MATCH(DMI_PRODUCT_NAME, "Default string"), DMI_MATCH(DMI_BIOS_DATE, "03/20/2017") }), - ENTRY("KIOX000A", "1", X86_MATCH(ATOM_AIRMONT), { + PRESENT_ENTRY_HID("KIOX000A", "1", ATOM_AIRMONT, { DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), DMI_MATCH(DMI_BOARD_NAME, "Default string"), DMI_MATCH(DMI_PRODUCT_NAME, "Default string"), DMI_MATCH(DMI_BIOS_DATE, "05/25/2017") }), + + /* + * The GPD win/pocket have a PCI wifi card, but its DSDT has the SDIO + * mmc controller enabled and that has a child-device which _PS3 + * method sets a GPIO causing the PCI wifi card to turn off. + * See above remark about uniqueness of the DMI match. + */ + NOT_PRESENT_ENTRY_PATH("\\_SB_.PCI0.SDHB.BRC1", ATOM_AIRMONT, { + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"), + DMI_EXACT_MATCH(DMI_BOARD_SERIAL, "Default string"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"), + }), }; -bool acpi_device_always_present(struct acpi_device *adev) +bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *status) { bool ret = false; unsigned int i; - for (i = 0; i < ARRAY_SIZE(always_present_ids); i++) { - if (acpi_match_device_ids(adev, always_present_ids[i].hid)) + for (i = 0; i < ARRAY_SIZE(override_status_ids); i++) { + if (!x86_match_cpu(override_status_ids[i].cpu_ids)) continue; - if (!adev->pnp.unique_id || - strcmp(adev->pnp.unique_id, always_present_ids[i].uid)) + if (override_status_ids[i].dmi_ids[0].matches[0].slot && + !dmi_check_system(override_status_ids[i].dmi_ids)) continue; - if (!x86_match_cpu(always_present_ids[i].cpu_ids)) - continue; + if (override_status_ids[i].path) { + struct acpi_buffer path = { ACPI_ALLOCATE_BUFFER, NULL }; + bool match; - if (always_present_ids[i].dmi_ids[0].matches[0].slot && - !dmi_check_system(always_present_ids[i].dmi_ids)) - continue; + if (acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &path)) + continue; + + match = strcmp((char *)path.pointer, override_status_ids[i].path) == 0; + kfree(path.pointer); + + if (!match) + continue; + } else { + if (acpi_match_device_ids(adev, override_status_ids[i].hid)) + continue; + + if (!adev->pnp.unique_id || + strcmp(adev->pnp.unique_id, override_status_ids[i].uid)) + continue; + } + *status = override_status_ids[i].status; ret = true; break; } diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 80e2bbb36422ed0195a49b29a69df77e6baacb9a..366b12405708187e0116337a4a316cdec45fb9d3 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -2657,8 +2657,8 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda, if (!ret) ret = binder_translate_fd(fd, offset, t, thread, in_reply_to); - if (ret < 0) - return ret; + if (ret) + return ret > 0 ? -EINVAL : ret; } return 0; } diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 1f54f82d22d61450d747985ec997994721161bf9..d2b544bdc7b5e735289d7c71395bce92b34e0f8a 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -3989,6 +3989,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* devices that don't properly handle TRIM commands */ { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, }, + { "M88V29*", NULL, ATA_HORKAGE_NOTRIM, }, /* * As defined, the DRAT (Deterministic Read After Trim) and RZAT diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c index fad6c6a873130a5b4af26e117a36ba94ec9fe42f..499a947d56ddb66c92b78897280f2050a6cd1ec9 100644 --- a/drivers/ata/pata_hpt37x.c +++ b/drivers/ata/pata_hpt37x.c @@ -917,6 +917,20 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) irqmask &= ~0x10; pci_write_config_byte(dev, 0x5a, irqmask); + /* + * HPT371 chips physically have only one channel, the secondary one, + * but the primary channel registers do exist! Go figure... + * So, we manually disable the non-existing channel here + * (if the BIOS hasn't done this already). + */ + if (dev->device == PCI_DEVICE_ID_TTI_HPT371) { + u8 mcr1; + + pci_read_config_byte(dev, 0x50, &mcr1); + mcr1 &= ~0x04; + pci_write_config_byte(dev, 0x50, mcr1); + } + /* * default to pci clock. make sure MA15/16 are set to output * to prevent drives having problems with 40-pin cables. Needed diff --git a/drivers/base/core.c b/drivers/base/core.c index 389d13616d1df5371576399be1e276efcc92dda0..c0566aff535513601666083c93c3fafb69948d67 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -348,8 +348,7 @@ static void device_link_release_fn(struct work_struct *work) /* Ensure that all references to the link object have been dropped. */ device_link_synchronize_removal(); - while (refcount_dec_not_one(&link->rpm_active)) - pm_runtime_put(link->supplier); + pm_runtime_release_supplier(link, true); put_device(link->consumer); put_device(link->supplier); diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 81ad4f867f02d584eb08a34cd4a082c8e44ba34d..64ff137408b8c2521999c37ccd6fdd275ae77226 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -592,6 +592,9 @@ re_probe: drv->remove(dev); devres_release_all(dev); + arch_teardown_dma_ops(dev); + kfree(dev->dma_range_map); + dev->dma_range_map = NULL; driver_sysfs_remove(dev); dev->driver = NULL; dev_set_drvdata(dev, NULL); @@ -1168,6 +1171,8 @@ static void __device_release_driver(struct device *dev, struct device *parent) devres_release_all(dev); arch_teardown_dma_ops(dev); + kfree(dev->dma_range_map); + dev->dma_range_map = NULL; dev->driver = NULL; dev_set_drvdata(dev, NULL); if (dev->pm_domain && dev->pm_domain->dismiss) diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index a71d1411794394d0781ec22412c5bf87109950ea..b5cbaa61cbea7ad51c85df214b546a70f5b07b76 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c @@ -59,8 +59,15 @@ static struct dentry *public_dev_mount(struct file_system_type *fs_type, int fla const char *dev_name, void *data) { struct super_block *s = mnt->mnt_sb; + int err; + atomic_inc(&s->s_active); down_write(&s->s_umount); + err = reconfigure_single(s, flags, data); + if (err < 0) { + deactivate_locked_super(s); + return ERR_PTR(err); + } return dget(s->s_root); } diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index bc649da4899a0b873884226c40af571c6cfed8a2..1573319404888ad8b07bffdb4f0620d4dc553a4b 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -305,19 +305,40 @@ static int rpm_get_suppliers(struct device *dev) return 0; } +/** + * pm_runtime_release_supplier - Drop references to device link's supplier. + * @link: Target device link. + * @check_idle: Whether or not to check if the supplier device is idle. + * + * Drop all runtime PM references associated with @link to its supplier device + * and if @check_idle is set, check if that device is idle (and so it can be + * suspended). + */ +void pm_runtime_release_supplier(struct device_link *link, bool check_idle) +{ + struct device *supplier = link->supplier; + + /* + * The additional power.usage_count check is a safety net in case + * the rpm_active refcount becomes saturated, in which case + * refcount_dec_not_one() would return true forever, but it is not + * strictly necessary. + */ + while (refcount_dec_not_one(&link->rpm_active) && + atomic_read(&supplier->power.usage_count) > 0) + pm_runtime_put_noidle(supplier); + + if (check_idle) + pm_request_idle(supplier); +} + static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend) { struct device_link *link; list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, - device_links_read_lock_held()) { - - while (refcount_dec_not_one(&link->rpm_active)) - pm_runtime_put_noidle(link->supplier); - - if (try_to_suspend) - pm_request_idle(link->supplier); - } + device_links_read_lock_held()) + pm_runtime_release_supplier(link, try_to_suspend); } static void rpm_put_suppliers(struct device *dev) @@ -1755,9 +1776,7 @@ void pm_runtime_drop_link(struct device_link *link) return; pm_runtime_drop_link_count(link->consumer); - - while (refcount_dec_not_one(&link->rpm_active)) - pm_runtime_put(link->supplier); + pm_runtime_release_supplier(link, true); } static bool pm_runtime_need_not_resume(struct device *dev) diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 92073ac68473c5c677bd5d48c8701a0d11a52440..8997e0227eb9d83f65d7664cedf53d0054cda5b3 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -34,7 +34,8 @@ suspend_state_t pm_suspend_target_state; bool events_check_enabled __read_mostly; /* First wakeup IRQ seen by the kernel in the last cycle. */ -unsigned int pm_wakeup_irq __read_mostly; +static unsigned int wakeup_irq[2] __read_mostly; +static DEFINE_RAW_SPINLOCK(wakeup_irq_lock); /* If greater than 0 and the system is suspending, terminate the suspend. */ static atomic_t pm_abort_suspend __read_mostly; @@ -941,19 +942,45 @@ void pm_system_cancel_wakeup(void) atomic_dec_if_positive(&pm_abort_suspend); } -void pm_wakeup_clear(bool reset) +void pm_wakeup_clear(unsigned int irq_number) { - pm_wakeup_irq = 0; - if (reset) + raw_spin_lock_irq(&wakeup_irq_lock); + + if (irq_number && wakeup_irq[0] == irq_number) + wakeup_irq[0] = wakeup_irq[1]; + else + wakeup_irq[0] = 0; + + wakeup_irq[1] = 0; + + raw_spin_unlock_irq(&wakeup_irq_lock); + + if (!irq_number) atomic_set(&pm_abort_suspend, 0); } void pm_system_irq_wakeup(unsigned int irq_number) { - if (pm_wakeup_irq == 0) { - pm_wakeup_irq = irq_number; + unsigned long flags; + + raw_spin_lock_irqsave(&wakeup_irq_lock, flags); + + if (wakeup_irq[0] == 0) + wakeup_irq[0] = irq_number; + else if (wakeup_irq[1] == 0) + wakeup_irq[1] = irq_number; + else + irq_number = 0; + + raw_spin_unlock_irqrestore(&wakeup_irq_lock, flags); + + if (irq_number) pm_system_wakeup(); - } +} + +unsigned int pm_wakeup_irq(void) +{ + return wakeup_irq[0]; } /** diff --git a/drivers/base/property.c b/drivers/base/property.c index 4c43d30145c6b086dfeb6aa57ef5d9c2a53f5353..cf88a5554d9c5111a5686ca9488b0fbd679f3c76 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -1195,8 +1195,10 @@ fwnode_graph_devcon_match(struct fwnode_handle *fwnode, const char *con_id, fwnode_graph_for_each_endpoint(fwnode, ep) { node = fwnode_graph_get_remote_port_parent(ep); - if (!fwnode_device_is_available(node)) + if (!fwnode_device_is_available(node)) { + fwnode_handle_put(node); continue; + } ret = match(node, con_id, data); fwnode_handle_put(node); diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index ad5c2de395d1f11690e4b6dd03fc435565a51a85..87c5c421e0f461a307c484821243f82aef75a1b0 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -170,11 +170,9 @@ static void regmap_irq_sync_unlock(struct irq_data *data) ret = regmap_write(map, reg, d->mask_buf[i]); if (d->chip->clear_ack) { if (d->chip->ack_invert && !ret) - ret = regmap_write(map, reg, - d->mask_buf[i]); + ret = regmap_write(map, reg, UINT_MAX); else if (!ret) - ret = regmap_write(map, reg, - ~d->mask_buf[i]); + ret = regmap_write(map, reg, 0); } if (ret != 0) dev_err(d->map->dev, "Failed to ack 0x%x: %d\n", @@ -509,11 +507,9 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) data->status_buf[i]); if (chip->clear_ack) { if (chip->ack_invert && !ret) - ret = regmap_write(map, reg, - data->status_buf[i]); + ret = regmap_write(map, reg, UINT_MAX); else if (!ret) - ret = regmap_write(map, reg, - ~data->status_buf[i]); + ret = regmap_write(map, reg, 0); } if (ret != 0) dev_err(map->dev, "Failed to ack 0x%x: %d\n", @@ -745,13 +741,9 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, d->status_buf[i] & d->mask_buf[i]); if (chip->clear_ack) { if (chip->ack_invert && !ret) - ret = regmap_write(map, reg, - (d->status_buf[i] & - d->mask_buf[i])); + ret = regmap_write(map, reg, UINT_MAX); else if (!ret) - ret = regmap_write(map, reg, - ~(d->status_buf[i] & - d->mask_buf[i])); + ret = regmap_write(map, reg, 0); } if (ret != 0) { dev_err(map->dev, "Failed to ack 0x%x: %d\n", diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 456a1787e18d06b5ad5eec33a46a65eae7cb93bd..55a30afc14a00cc3dd3bba15281f623561442b62 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -620,6 +620,7 @@ int regmap_attach_dev(struct device *dev, struct regmap *map, if (ret) return ret; + regmap_debugfs_exit(map); regmap_debugfs_init(map); /* Add a devres resource for dev_get_regmap() */ diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c index 206bd4d7d7e237beaf36531833787e6005135767..d2fb3eb5816c3518aa0ad4ad1af666230a4f7bc5 100644 --- a/drivers/base/swnode.c +++ b/drivers/base/swnode.c @@ -519,7 +519,7 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode, return -ENOENT; if (nargs_prop) { - error = property_entry_read_int_array(swnode->node->properties, + error = property_entry_read_int_array(ref->node->properties, nargs_prop, sizeof(u32), &nargs_prop_val, 1); if (error) diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 7df79ae6b0a1e194834b938844697520bc7e8852..aaee15058d181409ffeda855dc9b284bcc6c5e70 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -1015,7 +1015,7 @@ static DECLARE_DELAYED_WORK(fd_timer, fd_timer_workfn); static void cancel_activity(void) { do_floppy = NULL; - cancel_delayed_work_sync(&fd_timer); + cancel_delayed_work(&fd_timer); cancel_work_sync(&floppy_work); } @@ -3169,6 +3169,8 @@ static void raw_cmd_free(struct floppy_raw_cmd **ptr) } } +#define MAX_LEN (1UL << MAX_ORDER << PAGE_SHIFT) + static int raw_cmd_copyin(int cmd, void __user *param, struct floppy_raw_cmd **rcmd) { @@ -3198,7 +3200,7 @@ loop: ptr->resultcode = 0; if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) { - if (ptr->length <= 0) + if (ptr->length <= 0 || ptr->length >= MAX_LEN) return -EINVAL; ptr->kernel_data = (char *)fd_dma_mem_alloc(ptr->length); fallback_on_nodma_alloc(&ptr->kernel_data, ptr->length); diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c index 5f9f027956317ef3a5bb71f6753f39d7522023cf..74856a58621622dae4582f05a4914dd7bdfe3f05 100644 --- a/drivers/bluetooth/btmtksdio.c +++ b/drivers/bluetooth/btmtksdio.c @@ -1042,6 +1042,8 @@ static int btmtksdio_runtime_suspend(struct device *dev) if (!bdev) return 0; + sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); + sdio_claim_host(bdev->func); sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, &err); diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index 8ea5ca8d71d6d2260965cfc1c9f4ae0e0a645d13..259a643377c242d8409cf4f8a03c423d982c55c4 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -1164,7 +1164,12 @@ static int bcm_probe(struct platform_device *pdev) return -ENOMEM; dev->dev = &pdev->dev; - dev->irq = platform_get_irq(pdev, 0); + + ret = platform_get_irq(pdev, 0); + if (ret < 0) + return ret; + + dev->irq = ret; /* Initialize routing field to an unused value */ dev->pcm_int_params[0] = 0xff; diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 4184faef9f169b9defdfede96734f9486487a40d..dc7ee5dd2eeca3f15685ea97ba85e8396a3f77c4 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -1844,6 +1844,9 @@ static int qca_power_off(struct hci_dev *hdev) hu->hdev->hw_error = NULL; hu->hdev->cmd_timeout = NULL; + del_timer_sync(&qca->wake_retrans_timer); + del_timer_sync(&qca->tx_idle_timer); + /* Stop sending shutdown command if soc crashes. */ if (soc_type != QCA_ROME && qca->memdump_state == QCA_MEMDUMP_IDLE) { @@ -1987,7 +1990,7 @@ static int qca_serdev_probe(struct serdev_device *serdev) qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable", GPIOD_OUT_LOW); - if (!qcadev->bt_en) { + if (IS_ERR_OR_NULL(qcadev->bt_en)) { dev_warn(&serdev->dev, "failed to acquire enable gpio\n"); power_ctrl_enabled = false; } diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c index 8ab26dec5f6e8ccdd1a9c796f4c66ebdd13a63c7..8469f9876dd2674f86d4f02705013140c1c1f060 100644 --- a/drivers/bluetooth/hci_vhci.c +++ b/drivers/bluetooth/hci_vhci.c @@ -121,6 +121,8 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode) if (opcode & 0x80) set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); + set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); + if (hci_register_dev(hdev) < 0) { BT_ERR("Can't register HCI device"); hci_free_dev(hdev); diff --git a/drivers/bus/simple-pm-bus.c b/drivers/bus/simple-pm-bus.c index 244b8f3b38b402a0aa375853a81903f3c9956bfa..c5eb46cbf388b507608f96ed1b55082d42d54981 100644 --- a/drivers/bus/simple-pm-bus.c +++ b/drivers/bus/simple-pm-bus.c @@ -16,33 +16,7 @@ static int simple_pm_bus_probe(struct platform_device *pdev) { - const struct device *dev = &pdev->dev; - struct device_node *np = dev->of_node; - const struct of_device_id *match; - - /* - * Allow user to use driver_override to bind this driver to a - * transparent bus device which has a different compatible string - * that's not listed in simple_pm_bus_of_match. We don't want to do any - * of the simple-pm-bus tasks for these devices, so return early. - */ - if (pdev->driver_override) - return 0; - - match = of_match_device(dev->driver->of_match_table, dev); - /* - * These are transparent bus devices (not simple-pm-bus matches) that - * have their child nodes populated automatically. So, don't need to - * do anything more. We only match with the device if this driver is - * the most specific match because we don't want to incorrectly bind to - * a device that has a more specific driver. - */ - if (match && match->data) { - if (of_property_match_string(np, "compatible", match->compatible) == 0) - return 0; - else - return -ENODEV; - } + struct device_node *np = pdev->dev.of_node; dev_dbg(&pdev->dev, "%s\n", __func__); @@ -56,25 +30,14 @@ static int simple_pm_bus_probe(struct platform_device *pdev) static int simple_pm_bus_remove(struct platform_device *pdev) { - const void *data = of_device_get_match_data(&pdev->dev); - - if (pdev->driver_override || data) - return 0; - dev_dbg(&pdev->dev, "%s\n", __func__); pm_runtime_disable(&pdev->dev); return 0; } -#define ONLY_BUS ((void *) 1) /* Match if the device is only a bus. */ - static const struct of_device_id simple_pm_bus_of_match[] = { { .compatible = "simple-pm-bus", }, - { .compatible = "simple-bus", .data = ONLY_BUS }, - { .compatible = "simple-mfd", .data = ONLY_BUS }, - { .compatible = "isa", .data = ONLY_BUS }, - { .compatible = "arm,amba-bus", .data = ONLY_BUS }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, simple_pm_bus_of_match); diff --git a/drivers/char/mwave/3780i.h b/drivers/char/mwave/3780i.h index 9ccb6b270b071cfbf03b250d1af442f28d75bafe..95164246afd1a313d3fb3a2726c6abcff35c77b0 100644 --- a/drivers/char/mwave/3780i.h +++ b/drivers/char/mwave/3780i.h @@ -68,7 +68,7 @@ typedef struct { unsigned char ClockControl:1; /* RW: Clock control: 0=normal, 1=stop 3780i clocks */ unsigned char SoftReset:1; /* RW: Soft reset 0=normal, 1=soft reset active */ unsigned char ConfigMode:1; /* RW: Configuration mode, 0=normal, 1=config mode */ - unsigned char Reserved:5; /* 0: Reserved */ + unsigned short Reserved:13; /* 0: Reserved */ } DSP_ISA_SLAVE_CONTROL; diff --git a/drivers/char/random.c b/drivers/char/random.c index 8c94380e7a463e200bccea195827bd657d98a58f..5f541c94655988ffc8b7ab4065195891a1b465f5 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -922,12 +922,14 @@ static struct crng_state *select_crng(void) /* * crng_fast_load() can be called by code in the interrupt service - * path. So we can't afford to dilly-dally. + * path. So we can't afford to dilly-dally. Returns the number of + * bytes processed from cp. */ -static int crng_fast_load(const char *cp, size_t len) +static size_t crng_fast_load(const char *cp, size_t len) { unsigned long flags; char *p; + size_t ret = 0; if (!spin_trylock_irqsave(&primary_crng.lock, flags)) return 0; @@ -938,7 +940,7 @@ static int crng_fast_load(const char *cp, size_t len) p = (unsigned char *) &primary_crng.state[4]; while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) { p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp; - cp++; crng_init_cnt++; len--; + cp++; crng_init_cnt++; len--; ret++; } spin_unlock_irqrestore(&primary_crng.lock, flags); if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { @@ -946,7 +948,7 @@ static int crng_fast_load(const char *cp, size_t len) crng_init = 1; pr_notice("fast init done\n"); } - return 1; + return ret; } /* @@ -1299,7 +1301,7 @@ void add_interrupt_randomness(int irq, int irq_flags) if (unlikely(crng_init == 0)) { if ((fast_pool->count >= 64) && crng_fast_load((char *) fast_pool->pool, - sizeof(fast_pool->pool))) { + sizeof(fast_pool->pool)) > 0) { fast_pool->count = 0; fast_pool->last = now; } @@ -1985,7 +1987,10 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) */ if (!capable(CAP_SYS_ADMIN)) return -EPERM; - input_pool.entropy_count = 0; + if (xchg(&input_pool.entropy_count, 0) && random_write_wakeup_bits) { + wake_up_interruptible(&random_write_wait); + kill_fasync(&fasync, SIGIO, POLL_OUT); + } return 0; case RNDRESEEDCRNG: if (!capable(CAP_SYS_ADMIN)) @@ -2319,8 +2324,11 @@ void add_hwgenerator_randomness(const char *buffer, size_t count, struct entropy_store *poolp = &input_pool; if (unlikely(crng_init == 0)) { - crng_fast_load(buffer, count); - return; + size_t ret = crng_fast_load(buffer, count); + count -= ret; + buffer += ret; + if (!count || crng_init == 0) + return; } /* Suspend writing if we're above the trickle threshold. diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index b2659a4c401687342f2ce21bfa80ceea3e238842..dc56b976d8162cff62ad6e6d3af5ba60a239606a 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -950,9 +950,11 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, priv->timeout_max = TPM_TIMEOUT_USECS_MAX; priv->phy_ops = phy_ops; + dev_set_drvdata(&chip->dev, priv); + rc = tpm_tis_read32(priv, TPM_DID_VID(0), &vendor); if (rc < 0) - goto out_err; + return rc; priv->manufacturer_id = vendor; @@ -962,8 +964,6 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, priv->timeout_max = TIS_TIMEOUT_MAX_ATML; } - dev_set_drvdata(&chip->dev, priv); - if (is_bsw()) { priv->ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR, ILB_REMAP_SIZE); @@ -994,7 +994,15 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, intmask |= TPM_INTF_CMD_READY_INT | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT | TPM_INTF_STS_VALID_INT; intmask &= ~TPM_GLOBAL_INT_ENABLE; + + rc = request_locality(chip, 0); + if (rc < 0) { + rc = -ENODEV; + goto out_err; + } + tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask); + release_locality(chip, 0); rc = tpm_chip_start(chip); if (rc) diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c index 7fc2d55add2ea4a3662611c50b7f7ffda519c410..7e74aafddbf825232804a40650987fd6230d7def 100644 --- a/drivers/clk/bcm/clk-bcm2835.c +++ b/drivers/clk/bcm/clk-bcm2835.c @@ -939,8 +939,7 @@ static int bcm2835_clock_is_on(struct clk_hw *hw) static u32 bcm2835_clock_choose_div(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate, - bool round_up) + unsigned long parent_rate) { struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw); const struct bcm2835_clock_data *data = clock->data; @@ -952,10 +951,6 @@ static u32 bcm2835_clock_choose_div(struct clk_hw *hw, rem = do_div(temp, rate); div = temp; - - /* Round up and mask off the unused bits */ - if (round_up && ((div & unused_frac_mask) != 0 || rem != 0)) - div += unused_frac_mask + 1; div &= ~unused_frac_mask; /* different clamping limits apply for a mash clock */ @@ -1112,7 +1107,7 @@ static int bcm2835_clock_set_rate_and_parent(struct clk_hw *hw, struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw); struct bcm2835_cprman *cprman = clock->cprman; const struct bcm2835_clock_data *data = clock->data; - u32 div = bcm2835_clock_choose_div(hw, rate, parent_rate, false); + u32 div = bcm2835_clock_choose_div(hw, rate, parent_rate); u32 ctl; spin_lock(&cprman->regs_lock); @@ -1178,7 +1173,7 @@ static unsigned long bcm2835_clock_choose_div_and_prate(struct clk_hw *hw, if (!(BIT(parent_idx) & data->set_rate_parent)) { *prate = clk_hw_get_rate(parent); - *div = bcm2835_clock_choose_div(hw, rate, *prate, true); + *div = bcm2835_clock_choose_div(hw, rate, *prate); *avgrate = bcm2835_clock_rate_from_divisor(clock, *prate, *div); @@ -1264,7 +1259,7 @@ static int bcm2835_clock_determine_rate(struct clk_hw *hw, rate = bcm2835_clock_choose_div_and_prate(hw, i, req->rate, &div, &prate, &avgrate); - if (rate > best_rate && rate <= req->rate) { + if (abs(req->rate - rate) < abs(req->rate - best_rate)) { best_parent = parent; best_prate = prate; best_rate = rate; diff --git a/drivers/clk/clk-bm1880.c b/drivers/clk/clk-bm1880.c index e6d6599d310a1da8b2011151c81d13efb8d42cb7..fad78a22218e8d7cb12a05bf49ed60bbf2b91072 100644 --- a/drivers/clk/clk-bm1880.c +++ b/drivers/clk/clk-bm1880.c @@ -522,14 +522,6 @@ static struct clk_hw *bm1880_clk_register_pll(struct bm1880_pll_hw_clock *pll_cl return hw; } -static void bm1880_clk_unregister_pll(struct clk_hw *hw) -{ - struct bm1880_pll_hw_clock *pll_hw = to_bm1880_pll_clk(hw); - - clk_hw_unregister(hw); - kfree(pll_hw); -} - static int bm1880_clk_register_plls(struct bm1880_pll_hw_clock *clks, int num_clks, struct bm1880_clock_data *data) @@ -555,7 +547,7 @@ static int bm1880_clk_register_plls(struct bm1880_pll_hw_clock *clks, err_clk: while (i--) - bm1880_clk_unregister_pll(data->hw_data.hws[clks[i].pll.id]); + clk_hw_unregister(data->hw_data.hws[clks[i].pll.id]); return PTR_ERR(hw); } @@ -695,14 +687,6 @@ static struct clk_hw *bm1880_clk_register_div(struct bm1880_div_hw_clock *div_cl return hw; } -static void bm1880_clk_unregister_div(struct clk_hw *hw) -{ - struct bm1880_div_hw_clock *div_hw = to_bm1880_div_clk(hw); - - clk_hw_unregister(hw); - kfree(div_hw); -} - static int bm1880_clk_register_divs(struct bm1880_div_hw_clock *clks, int num_clks, struct bm1880_clock_data *data) @@ -729,7 +713,7 @@ static int bm1880_clk_register_divs(struct bm1880_div_hw_clock *clks, err_clk: while (i--) - bm1880_clk_unregister_div(data->hw_data.hws[clks[i].div.id]); + clk_hw_unregister(data->hw_data.hws[clks[i].div.id]); return PTR_ERR(hw); } diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c index eb22f4fdbc6b42c2f3384bafdc33ef93c8c8375e..772b48ad0cd7809c8dbede6a2fd30cc410e2a753 100644 --- a/drivers/clk/clk-si5341.c +++ b/drivers/clk/clk-si5341.c @@ -1576,7 +1576,7 @@ static int si5341_probe(struct i2c_client *client, clk_prepare(data->clk[i].hw.clk); } - err = of_clk_add_hw_provider(client->dev.of_node, of_clk_si5341_get, + err = devm_of_clk_add_hw_provider(&client->dev, of_clk_si5341_get, data); if (err) { dev_err(&client->dev, "unable to add clk provider\n"); diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c index 5c75e3d906c209767b5716992b8f6b718cd3d098..682a18b392f0843ae4aeab2b56caaa7a62153add 100644 --- a/drivers/clk/clk-stm32f4.c +++ b/drivers/clk/clk-stm32f4.c @@ -129,7 +129,6 @@ static const struct stm32f4_gate_data stm32f429_gates[] __initconst = { { STM32F4_RCC_APB2ENR, 20, "spi5", "apb2_div" }, { STM32F4_RCC_APB2ENR, 21, "spi6", "apb2_div" }, { STM32F4_RCC_APB2ENR, 22, "sai1", "apb2_div" }, - { STM32F4_RCC_APB2ENR, 26, "ltdc", "apb2_div" }, }; static const struct stm32f4_gate_data stm32f469_gates[] __initconst = { @@ -211,7 +210,6 @@ static const struct stm32f4_gate_data stm32f469_gates[] __initconst = { { STM32F4_RCC_APB2ENR, 20, "spi5", "apb2_div" }, { STM32F4_RCC_APB2ENR, 21, "spi6", "apb2_div" }, { STM32F4_RCC_APB2ENR, 22, "sai1", "apb2_div" }, - { STM32F4_RCC_APB2ENR, 26, "ltdc", "apb2_div" }, }; static const struct stm32f4_gate_data stm32f746_gates[] __initconst = { @@ -286,7 +284,6 @@ static const struct stm32f4_gate_data stm32f746_gates[] __initconst = { { STM32F4_RCC_APB2ENR, 21, "spi6", "apb2_div" }, { STM32F4_RCC_APB2ENR, 22, "sai1", "apb2_div" }, { STM32F4_RCC_APB2ENR, 23, "sai2", "apb2_div" }, - { STM32F4_RCC_APB2ENR, 26, "ltdc", "apb2_div" }, }; static const struct stm32f4_gate_data stm32f769_gates[] __initconst = { @@ -364,7 +361,6 @@ static const struct stm32f4_gate_data stm32f769_gates[] __initconst = { { STM32F4_RCC_APB2ENR, 21, "spi6", "apb2_div" }, { STM32F4_RCC_APB2ENR, 22, "sai1", "apb2_div" }, { STM32F4_RCC_APB2ENR, 23, "sai2", "apb2_div" }, - { STM32F4_RCC_APB2ENR, 26, "ltdc", "apb2_div" }, { STM32F4_RCC_APB2ENR, 30, "mdio", "apb2_div" }, }; diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index d710b3466668d4cacf364874ceca03cb19c016ca..8c10c84098fbae171d7ac3bef1aab8403af80aa8 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -3440,6 +3440,24 @@ static int __init clk_debug_init(void) { struct clk_core *core; +#ifdef CLOCK_ALLOW_WRITE_DEBUGFS + pr_warn("\n"); + pr_warn("********************************************************************\n"); + pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); + pr_warn("** **\n"); + pr_warn("** WRITEABLE clk DebugFS SUPPORT HAS BEEN ENABLED IN THIS KERNEL **\n"); + pr_warn("** **\n"); + pr_warn("** This means that this kernel is built to expose clk operations **\n"); + pr_warn("** such as parent or rate setting, enabling, disabling, etc. **\n"); + pr_warn("** to userspace, which may compromise security on your system. **\n"); + pr_warn("** **\n"); + pr_warn("** If you see this message and you are not debugging the **\n"); + pr_warn("** kernel, report this immediately to your vendor! **\n"); + pr_warn("** **\n"); + pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); + pr_warn("********************************************************************\n"); +#endif + rootdir = debugfs_create_dir("clk", NULL); debugfs_create_file("clk_summary", 0444, rootdir, &all_lists, diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c index 33a7ddc23cd24e7e643faf5b93b0dfabd98113a9..db122d94db583364a321094774ee4b055342d626 100644 --- a/drivers/clk/imx/clk-imx8mn.c +++ b/drivers/clk/imx/clk-imx8mn.c @@ -274,9 +274,9 @@ static const char * const imx8mn_pdm_sels[] = {"osc_24m", "sys_pll2_100m", "audi static const char * const imx8mn_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", }; -static const char * const imx8mn_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "osc_27m", - "sys_pll1_200m", "audio_pll2_out", "vpu_pll", - "sys_pll1_80m", }; +static const char * const imx8mn_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "dummy", + "sys_pll1_200m", "audio_pll2_out", "sys_pll2_500m", + "dummy", "sys_pll1_80m", }; static const char * const imx8mn_clko2_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_400m", "sys_pll2_166m", "sys_pll3_out", "audio_pll1_out", "video_pll1_out", "osc_32k", }; diff --git a/drivers/clk/ingenic/jz4725b-cgu.c b/drivers/clk/ingenic/jz4725b-cgu.c index 8c38e72d14a79919f51569ee226f4b94558f6722..786e361a4a6a456ef090fa770a27704f4ba5c783 100644 --- a/drivers/clk/ingenic/jz4725b-cgu.c +++ b/drivers/clk/ingenic/jz4725b-cgu.c @@ -139,11 +139,10 @@ static const struct ingenic_cgu_clk_info jz4725b_cgu_clocks[] = { }, [JZ4725B_CLK_I2S] = { - "i2s", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE, + "i2s", CGU_CLK_MUX | CGU_CLK_DIV, .parents = { JZ4725B_CLK_EXT, JZ4725B_CLK_PLL_HALF, -1, -1 }, .mux = { CGU_REG_CPCCR, 31, 1 }, .div = { CGU_REG_I2SCDR, 0, 1, 9, -1, -1, -1 }, - .gate = { CGU_REG_CLKGR, 6 }, }, [JZ4725B_CLK_SPI] = { diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c index 0a68af6eec3ddf31e470fe52031410a297d53fb8..d42551a46ec913c73c4d049050e9c039bc0a5106 100644 --- a/drivers/clk/meson/gxbb.c +++ b/drivers/clk/meson/gxbb.c @@ -712,6 +712,35 @@ static struct clk_regmap gxbb_mpll_prediv = { }; static struct clk_regmap gxbb_mpll0_div = { + .data = &(struct meson_clk_mpll_data){ + .sdm = { + .reg_off = HHI_MPLL_CNTL7, + .shift = 0, + .width = 14, + }, + .sdm_en = { + .reg_off = HHI_MPLL_CNTL, + .shift = 25, + .width = 1, + }, + .n2 = { + .reg_off = HHI_MPLL_CNTL7, + .shift = 16, + .width = 9, + }, + .lock = &meson_clk_lock, + }, + .hw.init = &(struct clk_init_data){ + .name = "mpll0_div", + .ops = &meson_clk_mpll_ops, + .parent_hws = (const struct clk_hw *[]) { + &gxbb_mpll_prediv.hw + }, + .num_parents = 1, + }, +}; + +static struct clk_regmap gxl_mpll0_div = { .data = &(struct meson_clk_mpll_data){ .sdm = { .reg_off = HHI_MPLL_CNTL7, @@ -748,7 +777,16 @@ static struct clk_regmap gxbb_mpll0 = { .hw.init = &(struct clk_init_data){ .name = "mpll0", .ops = &clk_regmap_gate_ops, - .parent_hws = (const struct clk_hw *[]) { &gxbb_mpll0_div.hw }, + .parent_data = &(const struct clk_parent_data) { + /* + * Note: + * GXL and GXBB have different SDM_EN registers. We + * fallback to the global naming string mechanism so + * mpll0_div picks up the appropriate one. + */ + .name = "mpll0_div", + .index = -1, + }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, }, @@ -3043,7 +3081,7 @@ static struct clk_hw_onecell_data gxl_hw_onecell_data = { [CLKID_VAPB_1] = &gxbb_vapb_1.hw, [CLKID_VAPB_SEL] = &gxbb_vapb_sel.hw, [CLKID_VAPB] = &gxbb_vapb.hw, - [CLKID_MPLL0_DIV] = &gxbb_mpll0_div.hw, + [CLKID_MPLL0_DIV] = &gxl_mpll0_div.hw, [CLKID_MPLL1_DIV] = &gxbb_mpll1_div.hw, [CLKID_MPLL2_DIV] = &gxbb_mpll2_div.hw, [CLKID_MPLL_PREDIV] = &gxbb_mpll_prediv.hw, @@ -3438,7 +3476,7 @@ static struct clk_regmap *const gxl_clk_regmaps[] = { &gxbb_mpll0, &gxbb_mpll1, &gxbb_mpll2, - &gxbb_mpll0_div, + &gxl_mpll0_div, &gxbb_mpll1_div, &gxbb_mpll2_div, &gxbb_cts_amclk_div, diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c index b6f97960d8ee020f908dbd260b6c04c188c0b4d8..5c40ca1d4740e7f67f4a674dd08a433f78488ffc 100644 --- a/drivers/clocksource/timer-ti-dm-systimer.c +++ b/drivers/clocksource/timer-ti-dm-systimer.c @@ -241,7 +241,7 @@ static void __init dmtimer_systimer_assign_alwon(void) bool quirk_unreliable_oscillator = false; /* Quirk unreliable 32 KiHz oscillator with incomplete dts */ - if (of_machine_is_compatible("ti,omap3-beagle") || + if (of_machine_is_compatible("ti,omap3-beagle-ab4") || of_machine_is_compatible("timll,omap3-devkit8000")) { quirk_unreliable_oscillator = true; counter_32k = -ENODEV; diff --git a/drivers/counter/Kconfig b/drivers/counter/Kconfig index 2de53ab0dd252be4bb89f018b0827d3bf1fd6627..cbdf84200e278695d9da6149655c70ce0f66694a 100644 --- a/drivers/counter/Kconfig +++ b/drivers/counter/Kconfig @@ -41,7 +41,7 @@ config STM32_TIMER_CNT config STM32_LPTIMER_CNT tristate "STM32 LP Timer encoder counter driver" - depends on (MFD_STM32_LPTIMER || COMPILE_TEST) && IIO + depends on MFD_STM32_LPTIMER || COMPILE_TEST help Select this option to enable STM32 Low-Power Timer quadrature encoder and counter driver. diff --git a/drivers/counter/stm32-lptimer-cnt.c b/drivers/counter/stm32-lptimer-cnt.c index fd6828e2d34f532629217c5167d188c6e4241e2f..937439635d53f4a6fcc0fd334ca70caee19a521d 100644 --- a/drivers/counter/stm32-lptimer-cnt.c +++ b/drivers/counter/stm32-lptimer-cnt.c @@ -12,8 +12,8 @@ #include #include -#include #include +#include #include #include #include @@ -107,249 +107,27 @@ static int stm32_lptim_setup(struct stm32_lptim_cnt *priv, int enable) return regmap_update_bits(priv->regmap, STM32_LPTIM_CFGR, mask, val); } -static int stm32_lptim_write_raw(struct iio_dev *indio_dev, - struct iio_chan_spec const *chan, - int val, int val2, long mask) -{ - struct stm32_lptim_cnt *priv = iio_priv(indio_dev); - int ret; - - switch (mask) { - case IIO_CHAN_INFO_ENABLE: - if (val < 0 || val > 1) - return -EINVAL; - - /* Check nobody uses the timer, or already disabled/enabled */ - ret = stm32_lptim_is_enabled(priv); - if ((ret < 0) || (!ret && !val)) - return ret; - if (val && ret) - return -EBUSY; - - ret = stm32_lptim_setup(priv, val); - if (ret) - return ret; - return stm32_lptim_set_enable_state(priv, val); - - default: - return -EINVAL; - } -} - -static int stm32_lptim_read_raw(struct iio_dev *indio_dev, - struct iio_chan_spec const *chan, - int *val, int *val2, long mask) -{ - struct stm32_lptim_cnt *priv = iio_priv(indio_dev); - u32 dat; - int ret; - - switch (mask) { - case IIO_CHAN_INFO_RAW: - ret = regmap_read(priv->regmap, STM32_LPTIM_CNT, &dat); - if (ret) - return ret; - *val = dat; - return IIO_VAL_INT; - - case IIO_CHAN_INFO_ENABLE: - ret = stm32_lptim_is_enabled(priv); - if (ret < 0) - return ret; - *val = ret; - return IIO_VAL_INT; - - case IIO_CHAN_INFO_SCALE: - /* Non-quadrature mode: scale = 1 */ - *val = 1; - *val2 = 0; - if (priv->quadrature_mode) { - /* - * Quadrature encoder mode: - * - both edges, quarter cycle, scale is 0.25 - * - either rising/falling edge scale is 0.5 - */ - if (priv->polarity > 1) - *val2 = 2; - else - *val2 = 1; - } - return IIO_VAL_FRACTIONAL_LOG2; - - default: - return -EINVAL; - } -} - -static const struct iio_info stm32_lptim_cnt_iio_info = { - .read_raw = stm32_lptim_read_raw, - .write_raw = stm32_lptim_write_raw, -}; - -static const char *const stm32_lptim_quadrature_modes[] = { - "non-quadrature", - "quadrature", -}; - -static int stm32_lptim_get_quadrature_mode(struct iio_dev *indio_dev, - const struct iio_chan_spec *chan) -{ - struct stm32_lptim_cnt *priv = iio_priv(indio_dev); - - return priv->quadrature_mode; -} - -static int stm32_lptim_set_quadrature_mode(struct iio_dev *indio_dev, - const struct iio_chan_spec *chan, - unsigned int type) -{ - struct stm32_lptim_cnt *priv = iio_priv(indio_dev); - - if (stm32_lptim_is_enabled(priv)) - return -EBUSY; - - priv->quadrature_mode = type; - - return 0; -} - -static const struct iio_enum stm32_lptim_quadrature_mode_en = { - .items = stm32_lptim_quadrature_modes, - .num_items = ARRAY_SIZE(stm32_lptim_quadrature_modes), - .get = stm32_lptim_get_quadrature_mode, - .set = stm32_lptim_set_quadrature_mode, -}; - -static const char * const stm32_lptim_cnt_polarity[] = { - "rising-edge", "falling-edge", "both-edges", -}; - -static int stm32_lptim_cnt_get_polarity(struct iio_dev *indio_dev, - const struct iio_chan_spec *chan) -{ - struct stm32_lptim_cnt *priv = iio_priv(indio_dev); - - return priv->polarity; -} - -static int stm32_lptim_cnt_set_polarity(struct iio_dev *indio_dev, - const struct iio_chan_spec *chan, - unsigned int type) -{ - struct stm32_lptim_cnt *priv = iio_priv(indio_dev); - - if (stm32_lptim_is_enabled(priv)) - return -EBUSY; - - priv->polarity = type; - - return 0; -} - -static const struct iio_enum stm32_lptim_cnt_polarity_en = { - .items = stm32_lptim_cnt_polarity, - .num_items = ARRAY_SIZE(stm32_lptim_cnt_polarity), - .get = stm32_lptim_cnt_get_polarity, - .set = stm32_lptim_cnt_set_polarity, -}; - -static ssize_t stm32_lptim_cnt_get_ceiling(struct stm32_lptim_cnt *priv, - char *buf) -{ - return snprintf(buf, PAGE_SIZE, "%u\n", priv->ceiling); -} - -static ssize_t stm32_lptim_cnt_set_ceiling(struct stm32_lptim_cnt *priv, - const char *buf, size_t len) -{ - int ret; - - if (stm32_lptim_is_enabled(priv)) - return -EBUSY; - - ret = kstrtouint(buf, 0, &priv->ceiling); - if (ret) - return ret; - - if (priv->ceiling > STM32_LPTIM_MAX_ARR) - return -EINVAL; - - return len; -} - -static ssize_t stm32_lptim_cnt_get_preset_iio(struct iio_dev *indio_dev, - uintptr_t private, - const struct iio_chan_spec *chan, - char *buf) -{ - struct stm32_lptim_cnt *priv = iio_priv(indio_dev); - - return stm32_lptim_cnt_get_ceiling(priv, buf); -} - -static ssize_t stm32_lptim_cnt_set_preset_iio(struct iio_dev *indio_dev, - uintptr_t private, - const struct iio_chan_spec *chan, - const char *buf, size_t len) -{ - struct stm32_lptim_cnt *priv = iio_priv(indio_dev); - - return stm32_lptim_cnt_set_ceiling(priv, buf, len); -} - -/* LP timer with encoder */ -static const struct iio_chan_spec_ext_info stm32_lptim_enc_ext_info[] = { - { - .name = "preset", - .shared = IIO_SEPARATE, - .read = stm32_lptim_cnt_get_preset_iio, - .write = stm32_lptim_cnt_set_preset_iio, - }, - IIO_ENUM("polarity", IIO_SEPARATE, &stm32_lptim_cnt_polarity_en), - IIO_ENUM_AVAILABLE("polarity", &stm32_lptim_cnt_polarity_en), - IIO_ENUM("quadrature_mode", IIO_SEPARATE, - &stm32_lptim_quadrature_mode_en), - IIO_ENUM_AVAILABLE("quadrature_mode", &stm32_lptim_quadrature_mode_en), - {} -}; - -static const struct iio_chan_spec stm32_lptim_enc_channels = { - .type = IIO_COUNT, - .channel = 0, - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | - BIT(IIO_CHAN_INFO_ENABLE) | - BIT(IIO_CHAN_INFO_SCALE), - .ext_info = stm32_lptim_enc_ext_info, - .indexed = 1, -}; - -/* LP timer without encoder (counter only) */ -static const struct iio_chan_spec_ext_info stm32_lptim_cnt_ext_info[] = { - { - .name = "preset", - .shared = IIO_SEPARATE, - .read = stm32_lptim_cnt_get_preset_iio, - .write = stm32_lptim_cnt_set_preset_iio, - }, - IIO_ENUM("polarity", IIO_SEPARATE, &stm32_lptim_cnt_polarity_en), - IIO_ENUM_AVAILABLE("polarity", &stm32_lptim_cnt_polarity_en), - {} -}; - -static const struct iio_chan_spec stm32_lptim_cnt_channels = { - .type = IIO_COUNT, - .channel = 0, - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | - BIT(IIO_CHAN_INFO_ENABLE) | - BIT(IIO_CHAN_INFO_SCALE), - .ext_info = stm32_lptim_cnt_ext_info, - .indexed = 1, -}; - /** * enum stm32_lptim_cnt_function - enumerates LPTimer counter & encoder modes * @STM32_LPTIM_COUNTER_INCREASE: up count on IN1 rising, falling or both edges * @STM32_LPTIM_ENCODER_BOTH_EDGE: count on both edges (IN1 & IN2 quadrature) + * + * In non-quadrature mode, device counts up on active edge. + * In quadrature mode, encoder counting scenarios are as follows: + * +---------+----------+--------------------+--------------------+ + * | Active | Level on | IN1 signal | IN2 signal | + * | edge | opposite +----------+---------+----------+---------+ + * | | signal | Rising | Falling | Rising | Falling | + * +---------+----------+----------+---------+----------+---------+ + * | Rising | High -> | Down | - | Up | - | + * | edge | Low -> | Up | - | Down | - | + * +---------+----------+----------+---------+----------+---------+ + * | Falling | High -> | - | Up | - | Down | + * | edge | Low -> | - | Down | - | Up | + * +---------+----------+----------+---------+----------+---------+ + * | Both | High -> | Down | Up | Up | Down | + * | edges | Low -> | Up | Down | Down | Up | + * +---------+----------+----------+---------+----------+---------+ */ enum stm32_lptim_cnt_function { STM32_LPTIM_COUNTER_INCREASE, @@ -484,7 +262,7 @@ static ssize_t stm32_lptim_cnt_ceiling_read(struct counter_device *counter, { struct stm32_lptim_cnt *const priv = counter->priv; - return stm32_lptim_cnt_get_ceiling(priv, buf); + return snprintf(buf, PAGE_SIZE, "%u\n", priv->ceiling); } static ssize_t stm32_lptim_cnt_ceiling_write(struct counter_device *counter, @@ -493,8 +271,22 @@ static ssize_t stm32_lptim_cnt_ceiling_write(struct counter_device *counter, const char *buf, size_t len) { struct stm32_lptim_cnt *const priv = counter->priv; + unsigned int ceiling; + int ret; + + if (stm32_lptim_is_enabled(priv)) + return -EBUSY; + + ret = kstrtouint(buf, 0, &ceiling); + if (ret) + return ret; + + if (ceiling > STM32_LPTIM_MAX_ARR) + return -EINVAL; + + priv->ceiling = ceiling; - return stm32_lptim_cnt_set_ceiling(priv, buf, len); + return len; } static const struct counter_count_ext stm32_lptim_cnt_ext[] = { @@ -630,32 +422,19 @@ static int stm32_lptim_cnt_probe(struct platform_device *pdev) { struct stm32_lptimer *ddata = dev_get_drvdata(pdev->dev.parent); struct stm32_lptim_cnt *priv; - struct iio_dev *indio_dev; - int ret; if (IS_ERR_OR_NULL(ddata)) return -EINVAL; - indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*priv)); - if (!indio_dev) + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) return -ENOMEM; - priv = iio_priv(indio_dev); priv->dev = &pdev->dev; priv->regmap = ddata->regmap; priv->clk = ddata->clk; priv->ceiling = STM32_LPTIM_MAX_ARR; - /* Initialize IIO device */ - indio_dev->name = dev_name(&pdev->dev); - indio_dev->dev.of_node = pdev->dev.of_node; - indio_dev->info = &stm32_lptim_cnt_iio_info; - if (ddata->has_encoder) - indio_dev->channels = &stm32_lptim_enc_channels; - else - indio_dev->channels = &stm32_lptim_cnt_channels; - indio_dev->num_channels = 1; - /* Initialize Counter device */ priv->counter.name = dev_name(&pdev->dev); priv->counter.parent = &pdev->dev; @@ -673,10 +452,6 @@ static int stm32_lptim_cnt_probe(struct platform_device *pdev) platform_set_drvdata(pdev, priv); - ret = devm_iio_device_register(&pdev->dev, indio_dev); - if (ret) - return ret; - return devm_counter_register(&pdev->dev, &priv->counter); } diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 8e159fb6af9cdab10ebfab8f6fa0eb32fa767eb0..30dafe8fc5054eadbdc3a592f89576f18a066718 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1400,7 +1400,7 @@ static int cpufreq_online(unsigned int cpu) ret = freq_qos_add_request(&policy->constraints, policy->min_freq_req, FREQ_QOS_MIN, - policy->min); + FREQ_QOS_MIN_DEFAULT_VALUE); if (ret < 0) { /* * So we don't call freq_qos_remove_request() for an @@ -1420,7 +1420,7 @@ static int cpufreq_online(unsigned int cpu) ret = freq_qos_add_request(&policy->constraints, policy->max_freq_req, FREQ_QOS_MAX, - policy->max); + FREQ_QOS_MAX_DEFAULT_VALUE); if (ret < 0) { policy->max_freq_req = NULL; goto out_destroy_policy; diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index a780e627838ae9a3aa41835a32d353c4d4ea00ff..5a40c7d10cc9a6912e3dba2104c0d6990dc78111 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -5467,7 +5467,7 @@ int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req) dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1])); dpaa2_fd_set_flc(&fd, req->flc_dma); - ppriv = this_cpu_ptr(priv->ppriv); + ppriv = raw_cpu_ptr(priv->ppriv); for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) { err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid, &fd); diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 9b968ac4ee7b617302ae4748b4b130a10ae580e3..a196bb8b170103a28d2a9140d3dee7a3c3d51e35 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -1302,7 +1302,7 @@ static int omap_aes_suspend(struct device *dev) static int omap_aes_resume(struct device *dev) { - pm_runtime_resume_and_get(dev); + pm_runtime_get_sync(dev); return 0; } #endif diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c index d7ca222f0df18b427b1e5059e5111151836aa209..74afafc84c7164ca4dc2bfe873f2682df64b84a7 100644 --- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c +++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c @@ -111,37 +111,19 @@ static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr) mutex_lock(lock); - /* Check if PF2VF CSR is in use by remote function */ + /* Check if the PFVF CSR is in use by remote function */ val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset); if ((val & remote_in_use_mask) == remote_in_use_pattern) { dev_dbg(&GET_DEV(accel_dev), - "PF2VF CSR in use by remote function\n"); + "PFVF CSR in use by remote function\n"); ret = -EBUSY; goto out; } - /* Attempt to get ownership of PF2VF CSR */ msg &= ~local_in_use_mask; msg |= local_in_use_pattern; - ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg); - /* Wait in case remote func also attempting to get ownership */ - msleep(ADF_IOV_MSG_COLLISION_DETECT_DELAY); - - val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset); - if ((val & local_in_use_mask) != local_in_use_pattern) { - dev_dbg(&GET_DEV(accel_dev), - "PF2VF CSR in use by remote - collision detected\n"); - ret = -EBUSY; - goto out; - } - - /* - * This function now owns the PV2VF CSR. The IN_USE_BY pattern must - * remain in the PF2VF CSR for all writes including ACK from remote - * until this local function relinquishes the CSR. Send the message - * by interrupting the remote. - */ + /* Attempt to get ownership of the PFVF CSR */ ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg | int_bit); /* Wait for confirmation from remote func it received the message */ @@ -150,6 +132,12 @@ static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr) val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset); } while ((val & int_bit) && (count++ < ADF_IOV_MSG_ACK_MAX_RETRY)); + if (val & int_bit) { + dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n"); + val &= ~int_bit; + ret = -EIO; + } + if (val != msg) { dev_dbg(&GET_DEV(accel_dev), "Collision - PFVF CSR overwritten by remote function\n"); @@ -157,13 +145,7 @@ static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr) goto out; } - if (val & int_bit) { - dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n"); - val &= ~int_bit; - ret = -EIO; - } - - /* Finished with PF2VF CSR; relinquish it and leave msg in CSR */ + /* Finished with the PFVF CSR; relinquish it and leave msg in CSR */ ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, val & ~local_in_use_mask); out: mutex_unlock(lock); @@ -171,12 +153,13 @@ out: } /** - * adf_iov_putmsg() - send PF2VF message + * adf_iov_putmsg() - send PFVF message * @accel_dev: Pointer to acceleration device. * @msg: Message to send - * @vf_nr: VF number to which the message will be sent + * @vf_nr: VF number to which the message will be sent if on PF, ignored + * otherwise * - * Function sends a messge from the PF to a VF + * Function sends a message through the PFVF channel * * Return: 0 on success, error code otherwise. */ diff --git a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c index 54b738da829d899e4558ccbb4d7e07ac8c36e5d5..3e25fac051b25f005a29cf6275fc9aa4a539f4bf 100644 --- a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c +++ b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c @@ -8,7 +8,7 @@ * adf_vf2pf_notify_init() - send init msg to PF * @accel_dev: Pointer to acceleration VF device. * - * Function sends an init messge from the VF to a PF + * Function sends an init message from the VF to a PF * * Return: 0 on success, error code otherwise. */ @@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(adf_vf2pf_notify_init); * adf_vf2pf_notify_shutdown() - send shutdown msg to PF * @accel_dev: Pointer to acceleration VF device. * - * Function sends a shutdown messge from the VF to a PF + * Function sends a shutdown message from the VF to a PF * * Return: void */ diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c index 87be96a0b0bba65f2f4c58dbb4994fa6fa42d027..8b4e79d882af45c6a9e1effa893f57becba194bc 100644 --- a/drivers/crypto/qce/sha.c +++ b/drivers/crypto/qce/sha.c @@ -533,8 +533,8 @@ static int qce_ahash_register_one(const struct qce_ahash_def *def, ret = crypto_register_ahash(alg); if (ret) { - kfree(tmpl); dev_err(qce->dev, "%s registration failed\n", base->cra_name); + kfree(tmpl); return ret; } diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c index d8053789c8828a6dc6ac0e0a02ead50e3eacc248..89c7fc3efbd717511d72bbf7ef52333320641314 100644 --- a/drivers/crypto/qce/skcipher.c +++ b/drivers/crypto/qce/skcipher.c @@ -433,8 +433,8 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def, ret = crypto_register_skcipher(alg); if (ret) { - kfree(tmpl); dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name); + kfree(tmpl); return ret; } diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c index 75867c0b00172a013edcb2bfea5070e785ec5557..be1bf39a317de15b335d6289bf556e7e1205371b 100644 --- a/drivers/crypto/stm32/stm32-crc32.c +++ b/drivers/crypto/stm32/stm32-crc32.c @@ -279,7 +279,7 @@ static struct shash_alg algs[] = { .digestsize = CHKSUM_DIGEST_SIZE, .base = { .cra_name = "crc32", - .cra_driver_name = DRIVER_NAME, + .cra_driver_name = "stm32-crc32-crc32", .cra_priority = 200, .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, @@ -301,7 +301,7 @@ static struct shash_alg algs[] = { .digestsize = CHKSUM_DIGEST_SIZE, .base = { .cra_name = "crc32c", - .cra_driver_name = DRIVER_NAME, + .cra_driver_name = "stm32-crc32-crc32c", .cra_priority = 200, .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c index 7999b26a16ed057217a570cfe780c29d0d5a229c..81eb136b6c11df0308969c9ee0ec91f696a0213f 100644 --- a/drivers/crypto/stm32/stm32-cryp.c +++ b/drivers/crypto/stm32/stm32-cryp.c @@ -37,7 +37,6 @@ /* Mode mask = bits [15..0] */ #define FLG_MODE_MASK GENMASK(15, 0) /* Bit [31..16] status */ -#define FLG_CCM_PADDED_WA BIT(16) /* Registers */ #define CRYP_CR 0x00000000 @@ -105,8 +104,6 @@ /* Misc */ #define AES_BLOCK_32 (AES_BLOCK_SIZE / sizeof(u32)) #define GCM_CTR_INIT 2 -#define _walked_in (cryp->in_walk.offset - cryp->in_sg->offset) -#define _walked_out (cryp->out_walk.offset - cryp->out_sg->offset) #define CRYP_AUTOSUSPEND_DELAY 50 struct stm32_cryp_caps { @@ -144,26 +141,16 @@ struct stm32_cryp { size_t authsize; size_t hw_blocksize; - size_t total_in; - size_t total_in_save; - size_t total_out; - size_t total_out_save; + size_t payload_in; + size_t header_in; + size_t payload_out; - struct scatterlist *in_sg; struct scatterlist *out_sg; - struct scatterlist *out_sg_save; - - struct scatterlist in_sgl; - struct scatterlist out_sgl; - bool sgs_copied; - - int in_sg_len; - int out_sg_len; struct scatter_walk in_walk; struct scatter_walk out_walk; - u32 last_ctr[4]; + __be32 last_ctr[4]; u32 gcm_ctr; }; @@ -262,6 +249,7 @@ static inline int stm32_cryp_wait_output(struct stm32_cryp *cryp) } static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp); +static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err); static struct stm32_cryp *stm32_cryp_find_dev(struct stm32_cryp_ctx *ctx) { @@ -283,103 +271,6 @@ static struct stm32_cryp *stm32_cryp_find_dev(struct stm32_cryp_ctx *ctx) return cryp; } -static int stm32_cryp_check_aligned(struct scatterlist *sg, size_t total, - size_t align) -{ - int len = 0; - - if (!total) - return 0; - - if (!IS_ALIGNED(total, align)) - return -EINVAL; - - while (sg) { - if (!IS_ALIGNED(sg->offset, sizeof(u32))) - return -EINVAL; - - if (!IS_ALIGNED(sg->length, align)) - return -EINVAL; - - len += sg->length; - sg = sg_next(sg); - } - - if (len != total) - return -EINVAL; - - return 0; -} - -static int stm32_cryp_check_io_aligned(struct stm32_cryp *cryp) -{ - int ret; - - ret = stm32_cryp_check_aligned(cryp->in_sg, cryp->total_in, - cryp->hw_blocksize); - if (ret) - return ret; - - ret = stm32_cryp_check_aligned(cryp->out_sg, cryp->total_out, - cryp->hw_blocksize); - - return ret; -} - -static void sg_copy_buf(void *buf, struct scatterlist *sg, - unsigned int start, unsigned int nbytes, int out) -{ - struct scatter_walk walk; - - if (!nbytes) - return; - - scatterwalk_start(&walk, sg); - scatterwalk_advance(&walk, start); - scatterwalk_copychunks(buf, &walk, nbytes, out); - scatterwalk_done(&walk, out, 0); -} - -static int stm32_cryp_copy_sgs(struct stm32_cryp *cryp) -{ - void *buf_in, *buf_out; - int pages, total_in, total_out; - - if (!stm32_cryp_check_io_aligned(cryp)) { - cryp->sgs_copied = 0; - return 0; - } - - total_in = ALIGN(cryp->total_in, cryp->hw_blocksize); - pages = total_in ? get_order(total_in) : 1; - buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages); - - total_out = ALIGN(cryp->total_out, cryp->hw_blocksize); - pages = total_out ? get_order(total_out) : 1; - buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages); - - if (!buf_in || !buf_out) { - dev_err(cryp->dev, "Can't allocate pages when unaligned\n"); - cryp->sgs_copied = 0; - return -EFAULT; - } - - sg_copy_buf(buf_in, cryp->in_sg, 0, cryp->total_in, 0); - - sg_init_one(&cryp->in_sgl, buf_in, total_in); - cryp->in_sg = &cryp->in_sgl; - cryp->in_sg_len = 1; - - sg_init_one(&cryp->out_sgl, buf_out, total_out); - cryp->out_sg_save = cryp->out_sg; - cryp->out_sg = &cryp->out_sgl; - cryp->out_sg_len = 1; - - cryp->sgs_copied = 1; - - return 0; -} - static void stm32_cryp_hw_write_iv(struct stm32_cryp *cryp, __be32 *iv) { if (!iv) @@ -481,16 +372,99 @@ static int stm32_cryp_gcm_init(struct stm32_cryp *cryp, u32 cfg) /* Wait for end of processing */ ret = stm32_cryp_wait_enable(cryp); - if (ret) + if (ret) { dev_err(cryp->dev, "Timeout (gcm init)\n"); + return ret; + } - return ret; + /* Prepare next phase */ + if (cryp->areq->assoclen) { + cfg |= CR_PH_HEADER; + stm32_cryp_write(cryp, CRYP_CR, cfg); + } else if (stm32_cryp_get_input_text_len(cryp)) { + cfg |= CR_PH_PAYLOAD; + stm32_cryp_write(cryp, CRYP_CR, cfg); + } + + return 0; +} + +static void stm32_crypt_gcmccm_end_header(struct stm32_cryp *cryp) +{ + u32 cfg; + int err; + + /* Check if whole header written */ + if (!cryp->header_in) { + /* Wait for completion */ + err = stm32_cryp_wait_busy(cryp); + if (err) { + dev_err(cryp->dev, "Timeout (gcm/ccm header)\n"); + stm32_cryp_write(cryp, CRYP_IMSCR, 0); + stm32_cryp_finish_req(cryp, err); + return; + } + + if (stm32_cryp_get_input_text_len(cryp)) { + /* Phase 3 : payload */ + cfg = stm32_cryp_read(cryp, CRYP_CR); + cfg &= ~CR_CRYPEN; + stm32_cryp_write(cryp, CRYP_CR, cfg); + + cfg &= ~CR_PH_MASK; + cfg |= CR_PH_PAYLOAD | CR_CRYPEN; + stm32_cryp_write(cryp, CRYP_CR, cfg); + } else { + /* + * Phase 4 : tag. + * Nothing to read, nothing to write, caller have to + * end request + */ + } + } +} + +static void stm32_cryp_write_ccm_first_header(struct stm32_cryp *cryp) +{ + unsigned int i; + size_t written; + size_t len; + u32 alen = cryp->areq->assoclen; + u32 block[AES_BLOCK_32] = {0}; + u8 *b8 = (u8 *)block; + + if (alen <= 65280) { + /* Write first u32 of B1 */ + b8[0] = (alen >> 8) & 0xFF; + b8[1] = alen & 0xFF; + len = 2; + } else { + /* Build the two first u32 of B1 */ + b8[0] = 0xFF; + b8[1] = 0xFE; + b8[2] = (alen & 0xFF000000) >> 24; + b8[3] = (alen & 0x00FF0000) >> 16; + b8[4] = (alen & 0x0000FF00) >> 8; + b8[5] = alen & 0x000000FF; + len = 6; + } + + written = min_t(size_t, AES_BLOCK_SIZE - len, alen); + + scatterwalk_copychunks((char *)block + len, &cryp->in_walk, written, 0); + for (i = 0; i < AES_BLOCK_32; i++) + stm32_cryp_write(cryp, CRYP_DIN, block[i]); + + cryp->header_in -= written; + + stm32_crypt_gcmccm_end_header(cryp); } static int stm32_cryp_ccm_init(struct stm32_cryp *cryp, u32 cfg) { int ret; - u8 iv[AES_BLOCK_SIZE], b0[AES_BLOCK_SIZE]; + u32 iv_32[AES_BLOCK_32], b0_32[AES_BLOCK_32]; + u8 *iv = (u8 *)iv_32, *b0 = (u8 *)b0_32; __be32 *bd; u32 *d; unsigned int i, textlen; @@ -531,10 +505,24 @@ static int stm32_cryp_ccm_init(struct stm32_cryp *cryp, u32 cfg) /* Wait for end of processing */ ret = stm32_cryp_wait_enable(cryp); - if (ret) + if (ret) { dev_err(cryp->dev, "Timeout (ccm init)\n"); + return ret; + } - return ret; + /* Prepare next phase */ + if (cryp->areq->assoclen) { + cfg |= CR_PH_HEADER | CR_CRYPEN; + stm32_cryp_write(cryp, CRYP_CR, cfg); + + /* Write first (special) block (may move to next phase [payload]) */ + stm32_cryp_write_ccm_first_header(cryp); + } else if (stm32_cryp_get_input_text_len(cryp)) { + cfg |= CR_PH_PAYLOAD; + stm32_cryp_write(cryp, CRYP_CR, cfg); + } + + return 0; } static int stm32_cryp_hw_init(struct stm32_cryp *cryp) @@ -542,7 +530,7 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp) int ret; u32 cfg, hw_mode; - pm_runtime_resume_and_get(cryp->dev); + pm_runtime_get_sync(cryp->dev); /* Disable interrupt */ stm32_cryp_write(cryp, CRYP_IMSCR, 0); @@ -605,16 +593,6 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp) if (ret) return ret; - /* Phase 2 : header (authenticated data) */ - if (cryp->areq->assoclen) { - cfg |= CR_PH_HEADER; - } else if (stm32_cryp_get_input_text_len(cryp)) { - cfg |= CR_PH_PAYLOAD; - stm32_cryp_write(cryp, CRYP_CR, cfg); - } else { - cfg |= CR_PH_INIT; - } - break; case CR_DES_CBC: @@ -633,8 +611,6 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp) stm32_cryp_write(cryp, CRYP_CR, cfg); - cryp->flags &= ~FLG_CCM_PADDED_WA; - return 0; } @@ -644,28 +620,9 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err) /* Phase 4 : output tag */ err = stm32_cryp_read_auth_tag(cryp); - if (!err && (!(is_gcm(cryp) || is_ccm(cryp)))) + if (!err && (!(is_gcm(cryp) || is_ccm(cryp) || is_ecb(cryp)))) stm32_cryp_get_iv(cryp); - if (cryp->sgs_copied) { - void *buf_in, *buf_out; - int pages, len; - - buf_in = sg_virt(&cryp->in_sgl); - buf_out = sg_virt(&cryp->out_sgl); - - sg_copy_buf(buf_out, cryp->out_sg_save, 0, - cryp->total_out_save, 1); - - len = ALIGN(cryp->total_in_save, cryp->hw_blocksize); - pages = len ? get_order(len) : 1; - free_pages((unsigned long)buf_in, pages); - - len = ALIGN(cryp->total_out_save, cryp->hw_blocksize); - pages = len ? get_order(len) : 1; - free_pages((unsigned long)buf_out, pages); - } - pm_runtime_mark_last_busy(cryp->dev); pm_runtime_put_autosuspend(cryp->dev); @@ -674,8 +631,6 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err) else crypto_finalize_skcipher_request(cryp->engine, cryp->req, err); - - memset(cryp->ctx->key, 0, cryp->ctx->keylen); } static int stm32_cryp_cpu_start(struct stm32_cryp *cryp) @@ -801,7 +756,20 @@ static int stm32_cryp_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key, static int stm32_cryp_aes_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { - return authsize == AES_BLOCK_SIZE ? 0 : -EINVAL; + switch (authsize) { + case 4: + case 8: + case 12: + case 13: + case 14: + case 15: + case 16: + break; + default: + return -EINVAL; + } + + return 0; } static int stm32_cryp_aes_ccm_setauthsize(struct crypto_aead *tfm, @@ -825,31 +793,61 @@ static int stm32_cryp_aes_ccm_setauthsize(struct crypto_aead *tfm, static int stm32_cryp_aes_ecb_encrypt(struct skcipher_request *req) { + if (req->cryptlen % AES_BLOCK_SIZE) + return -EINVAL; + + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_AES | FLG_ECB | FLG_ENCRYPT); } static int stm32_cryp_aes_ecb_decrypt(struct skcipher_request *req) { + if (req->cryptlen % AES_BLOCK_SIZE) + return -EINVAL; + + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_AES | FLG_ECB); } static int stm32_cryp_aes_cbc_encrypt(struct skcipher_request *req) { + if (req->cryptlen % AES_BLOCK_SIZE) + return -EINVAL; + + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_AES | FLG_CBC | FLG_ENCRYPT); } static int stm32_cryp_aes_cbc_decrypt(struct skcipher_request *req) { + if (req->cryptlen % AES_BLOCK_SIZE) + return -EINVAL; + + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_AES | FLG_CBC); } static int stm32_cryp_aes_ctr_encrypt(struct skcipher_request *req) { + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_AES | FLG_CTR | FLG_ENCRYPT); } static int stm32_cryp_aes_ctr_decrypt(struct skcipher_request *req) { + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_AES | FLG_CTR); } @@ -863,53 +861,122 @@ static int stm32_cryp_aes_gcm_decrypt(struct aead_request *req) return stm32_cryp_aead_crypt(req, FLG_AES | FLG_GCM); } +static inline int crypto_ccm_check_iv(const u8 *iv) +{ + /* 2 <= L <= 8, so 1 <= L' <= 7. */ + if (iv[0] < 1 || iv[0] > 7) + return -EINVAL; + + return 0; +} + static int stm32_cryp_aes_ccm_encrypt(struct aead_request *req) { + int err; + + err = crypto_ccm_check_iv(req->iv); + if (err) + return err; + return stm32_cryp_aead_crypt(req, FLG_AES | FLG_CCM | FLG_ENCRYPT); } static int stm32_cryp_aes_ccm_decrypt(struct aead_request *req) { + int err; + + err = crypto_ccm_check_iv(req->iv); + if (err) + return err; + return stm32_cryp_aead_crypt(req, FLG_AES | FLG_CCM); } static int stm32_cryp_des_ecb_encrypt(struct skcipher_request *req) { + if (req->cryptlen % DES_BLOCK_SIZE) + return -EINVAL; + + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_DES | FLG_ECB | FLG_ENCRYPT); } static int stm32_cryp_des_ecb_decrypt(struct skcipher_request *req) { + if (req->cryptlen % DES_BLOCK_SIZE) + return -EINVAL; + + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_DES | FLG_ECB); } static int stm32_cryp_des_cbc_encrypt(struct skcipher_request *req) { + if (req->cryptlen % DES_BLOCK_SIZE) + return -EINVAL; + + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_DES | FLG_CBC | FLG_ENCRYPT); } static int stm32_cryp_des_cbc_decrypt(struct skcipher_request *req) { + if (req->cryptlen % DES_BLOCK_SIZE) + return -EINVAL; + + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_DES | FLG_CBC); } static int stm32_cryp_tdes_ecb_encrypt(struct skcipher_request *req) { + if (req->cryptlen % DES_BLOCK_SIZE) + return -EINVAL; + + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB | FLG_ENCRYPT); } static int stm32_cryp_tdes_ecb_decrypt(struct skcipher_request *req) { + if (req->cryptlen % DES_BLOCK_SIZE) + return -EINVAL; + + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB); } static int stm32_cryp_tdes_cbc_encrypt(struct skcipher_request *req) { + if (req->cryptlen % DES_BLOCK_SIZE) + return -EINVAL; + + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC | FLG_ENCRYPT); } static int stm32_cryp_tdes_cbc_decrypt(struct skcipher_request *req) { + if (req->cryptlen % DES_BLOCK_SIZE) + return -EINVAL; + + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC); } @@ -919,6 +986,7 @@ static int stm32_cryp_prepare_req(struct skcipher_request *req, struct stm32_cryp_ctx *ctx; struct stm32_cryp *cryp; struct stm32_cryp_reqctx *rctx; + struct scatterlist *in_sg; int ret; if (!req && !areq) @@ -944,76 +1012,55 @@ static int stm32_cryp_prepare_req(struct skcipher_request *req, if (req) { cryp->req = req; cryp->areq = NULL; - cryp->total_in = req->cryptlen; - cryp->total_out = cryp->total_in; + cryp->header_in = 0; + cryp->payload_in = req->cryptlen; + cryp->payload_out = req->cryptlen; + cryp->authsize = 0; } else { /* * Length of input and output data: * Encryption case: - * INPUT = AssocData || PlainText + * INPUT = AssocData || PlainText * <- assoclen -> <- cryptlen -> - * <------- total_in -----------> * - * OUTPUT = AssocData || CipherText || AuthTag - * <- assoclen -> <- cryptlen -> <- authsize -> - * <---------------- total_out -----------------> + * OUTPUT = AssocData || CipherText || AuthTag + * <- assoclen -> <-- cryptlen --> <- authsize -> * * Decryption case: - * INPUT = AssocData || CipherText || AuthTag - * <- assoclen -> <--------- cryptlen ---------> - * <- authsize -> - * <---------------- total_in ------------------> + * INPUT = AssocData || CipherTex || AuthTag + * <- assoclen ---> <---------- cryptlen ----------> * - * OUTPUT = AssocData || PlainText - * <- assoclen -> <- crypten - authsize -> - * <---------- total_out -----------------> + * OUTPUT = AssocData || PlainText + * <- assoclen -> <- cryptlen - authsize -> */ cryp->areq = areq; cryp->req = NULL; cryp->authsize = crypto_aead_authsize(crypto_aead_reqtfm(areq)); - cryp->total_in = areq->assoclen + areq->cryptlen; - if (is_encrypt(cryp)) - /* Append auth tag to output */ - cryp->total_out = cryp->total_in + cryp->authsize; - else - /* No auth tag in output */ - cryp->total_out = cryp->total_in - cryp->authsize; + if (is_encrypt(cryp)) { + cryp->payload_in = areq->cryptlen; + cryp->header_in = areq->assoclen; + cryp->payload_out = areq->cryptlen; + } else { + cryp->payload_in = areq->cryptlen - cryp->authsize; + cryp->header_in = areq->assoclen; + cryp->payload_out = cryp->payload_in; + } } - cryp->total_in_save = cryp->total_in; - cryp->total_out_save = cryp->total_out; + in_sg = req ? req->src : areq->src; + scatterwalk_start(&cryp->in_walk, in_sg); - cryp->in_sg = req ? req->src : areq->src; cryp->out_sg = req ? req->dst : areq->dst; - cryp->out_sg_save = cryp->out_sg; - - cryp->in_sg_len = sg_nents_for_len(cryp->in_sg, cryp->total_in); - if (cryp->in_sg_len < 0) { - dev_err(cryp->dev, "Cannot get in_sg_len\n"); - ret = cryp->in_sg_len; - return ret; - } - - cryp->out_sg_len = sg_nents_for_len(cryp->out_sg, cryp->total_out); - if (cryp->out_sg_len < 0) { - dev_err(cryp->dev, "Cannot get out_sg_len\n"); - ret = cryp->out_sg_len; - return ret; - } - - ret = stm32_cryp_copy_sgs(cryp); - if (ret) - return ret; - - scatterwalk_start(&cryp->in_walk, cryp->in_sg); scatterwalk_start(&cryp->out_walk, cryp->out_sg); if (is_gcm(cryp) || is_ccm(cryp)) { /* In output, jump after assoc data */ - scatterwalk_advance(&cryp->out_walk, cryp->areq->assoclen); - cryp->total_out -= cryp->areq->assoclen; + scatterwalk_copychunks(NULL, &cryp->out_walk, cryp->areq->assoclen, 2); } + if (is_ctr(cryp)) + memset(cryp->last_ctr, 0, sizeof(cryp->last_ctr)); + ret = stm32_cryp_hw_init(cryp); return ret; } @@ -1061,8 +1108,7 @@ static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq) if (!cryp) return -ENODEV; - if (unlikely(!cryp->areq->assoclen && - !stm32_cryp_get_input_text_len(cryp))) { + if (unlikely(!cryp->payload_in && !cryp->header_in)) { /* No input data to process: get tag and finish */ stm32_cryp_finish_req(cryp, 0); return 0; @@ -1071,43 +1117,10 @@ static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq) return stm32_cryp_cpu_start(cryp); } -static u32 *stm32_cryp_next_out(struct stm32_cryp *cryp, u32 *dst, - unsigned int n) -{ - scatterwalk_advance(&cryp->out_walk, n); - - if (unlikely(cryp->out_sg->length == _walked_out)) { - cryp->out_sg = sg_next(cryp->out_sg); - if (cryp->out_sg) { - scatterwalk_start(&cryp->out_walk, cryp->out_sg); - return (sg_virt(cryp->out_sg) + _walked_out); - } - } - - return (u32 *)((u8 *)dst + n); -} - -static u32 *stm32_cryp_next_in(struct stm32_cryp *cryp, u32 *src, - unsigned int n) -{ - scatterwalk_advance(&cryp->in_walk, n); - - if (unlikely(cryp->in_sg->length == _walked_in)) { - cryp->in_sg = sg_next(cryp->in_sg); - if (cryp->in_sg) { - scatterwalk_start(&cryp->in_walk, cryp->in_sg); - return (sg_virt(cryp->in_sg) + _walked_in); - } - } - - return (u32 *)((u8 *)src + n); -} - static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp) { - u32 cfg, size_bit, *dst, d32; - u8 *d8; - unsigned int i, j; + u32 cfg, size_bit; + unsigned int i; int ret = 0; /* Update Config */ @@ -1130,7 +1143,7 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp) stm32_cryp_write(cryp, CRYP_DIN, size_bit); size_bit = is_encrypt(cryp) ? cryp->areq->cryptlen : - cryp->areq->cryptlen - AES_BLOCK_SIZE; + cryp->areq->cryptlen - cryp->authsize; size_bit *= 8; if (cryp->caps->swap_final) size_bit = (__force u32)cpu_to_be32(size_bit); @@ -1139,11 +1152,9 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp) stm32_cryp_write(cryp, CRYP_DIN, size_bit); } else { /* CCM: write CTR0 */ - u8 iv[AES_BLOCK_SIZE]; - u32 *iv32 = (u32 *)iv; - __be32 *biv; - - biv = (void *)iv; + u32 iv32[AES_BLOCK_32]; + u8 *iv = (u8 *)iv32; + __be32 *biv = (__be32 *)iv32; memcpy(iv, cryp->areq->iv, AES_BLOCK_SIZE); memset(iv + AES_BLOCK_SIZE - 1 - iv[0], 0, iv[0] + 1); @@ -1165,39 +1176,18 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp) } if (is_encrypt(cryp)) { + u32 out_tag[AES_BLOCK_32]; + /* Get and write tag */ - dst = sg_virt(cryp->out_sg) + _walked_out; + for (i = 0; i < AES_BLOCK_32; i++) + out_tag[i] = stm32_cryp_read(cryp, CRYP_DOUT); - for (i = 0; i < AES_BLOCK_32; i++) { - if (cryp->total_out >= sizeof(u32)) { - /* Read a full u32 */ - *dst = stm32_cryp_read(cryp, CRYP_DOUT); - - dst = stm32_cryp_next_out(cryp, dst, - sizeof(u32)); - cryp->total_out -= sizeof(u32); - } else if (!cryp->total_out) { - /* Empty fifo out (data from input padding) */ - stm32_cryp_read(cryp, CRYP_DOUT); - } else { - /* Read less than an u32 */ - d32 = stm32_cryp_read(cryp, CRYP_DOUT); - d8 = (u8 *)&d32; - - for (j = 0; j < cryp->total_out; j++) { - *((u8 *)dst) = *(d8++); - dst = stm32_cryp_next_out(cryp, dst, 1); - } - cryp->total_out = 0; - } - } + scatterwalk_copychunks(out_tag, &cryp->out_walk, cryp->authsize, 1); } else { /* Get and check tag */ u32 in_tag[AES_BLOCK_32], out_tag[AES_BLOCK_32]; - scatterwalk_map_and_copy(in_tag, cryp->in_sg, - cryp->total_in_save - cryp->authsize, - cryp->authsize, 0); + scatterwalk_copychunks(in_tag, &cryp->in_walk, cryp->authsize, 0); for (i = 0; i < AES_BLOCK_32; i++) out_tag[i] = stm32_cryp_read(cryp, CRYP_DOUT); @@ -1217,115 +1207,59 @@ static void stm32_cryp_check_ctr_counter(struct stm32_cryp *cryp) { u32 cr; - if (unlikely(cryp->last_ctr[3] == 0xFFFFFFFF)) { - cryp->last_ctr[3] = 0; - cryp->last_ctr[2]++; - if (!cryp->last_ctr[2]) { - cryp->last_ctr[1]++; - if (!cryp->last_ctr[1]) - cryp->last_ctr[0]++; - } + if (unlikely(cryp->last_ctr[3] == cpu_to_be32(0xFFFFFFFF))) { + /* + * In this case, we need to increment manually the ctr counter, + * as HW doesn't handle the U32 carry. + */ + crypto_inc((u8 *)cryp->last_ctr, sizeof(cryp->last_ctr)); cr = stm32_cryp_read(cryp, CRYP_CR); stm32_cryp_write(cryp, CRYP_CR, cr & ~CR_CRYPEN); - stm32_cryp_hw_write_iv(cryp, (u32 *)cryp->last_ctr); + stm32_cryp_hw_write_iv(cryp, cryp->last_ctr); stm32_cryp_write(cryp, CRYP_CR, cr); } - cryp->last_ctr[0] = stm32_cryp_read(cryp, CRYP_IV0LR); - cryp->last_ctr[1] = stm32_cryp_read(cryp, CRYP_IV0RR); - cryp->last_ctr[2] = stm32_cryp_read(cryp, CRYP_IV1LR); - cryp->last_ctr[3] = stm32_cryp_read(cryp, CRYP_IV1RR); + /* The IV registers are BE */ + cryp->last_ctr[0] = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV0LR)); + cryp->last_ctr[1] = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV0RR)); + cryp->last_ctr[2] = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV1LR)); + cryp->last_ctr[3] = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV1RR)); } -static bool stm32_cryp_irq_read_data(struct stm32_cryp *cryp) +static void stm32_cryp_irq_read_data(struct stm32_cryp *cryp) { - unsigned int i, j; - u32 d32, *dst; - u8 *d8; - size_t tag_size; - - /* Do no read tag now (if any) */ - if (is_encrypt(cryp) && (is_gcm(cryp) || is_ccm(cryp))) - tag_size = cryp->authsize; - else - tag_size = 0; - - dst = sg_virt(cryp->out_sg) + _walked_out; + unsigned int i; + u32 block[AES_BLOCK_32]; - for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) { - if (likely(cryp->total_out - tag_size >= sizeof(u32))) { - /* Read a full u32 */ - *dst = stm32_cryp_read(cryp, CRYP_DOUT); + for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) + block[i] = stm32_cryp_read(cryp, CRYP_DOUT); - dst = stm32_cryp_next_out(cryp, dst, sizeof(u32)); - cryp->total_out -= sizeof(u32); - } else if (cryp->total_out == tag_size) { - /* Empty fifo out (data from input padding) */ - d32 = stm32_cryp_read(cryp, CRYP_DOUT); - } else { - /* Read less than an u32 */ - d32 = stm32_cryp_read(cryp, CRYP_DOUT); - d8 = (u8 *)&d32; - - for (j = 0; j < cryp->total_out - tag_size; j++) { - *((u8 *)dst) = *(d8++); - dst = stm32_cryp_next_out(cryp, dst, 1); - } - cryp->total_out = tag_size; - } - } - - return !(cryp->total_out - tag_size) || !cryp->total_in; + scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize, + cryp->payload_out), 1); + cryp->payload_out -= min_t(size_t, cryp->hw_blocksize, + cryp->payload_out); } static void stm32_cryp_irq_write_block(struct stm32_cryp *cryp) { - unsigned int i, j; - u32 *src; - u8 d8[4]; - size_t tag_size; - - /* Do no write tag (if any) */ - if (is_decrypt(cryp) && (is_gcm(cryp) || is_ccm(cryp))) - tag_size = cryp->authsize; - else - tag_size = 0; - - src = sg_virt(cryp->in_sg) + _walked_in; + unsigned int i; + u32 block[AES_BLOCK_32] = {0}; - for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) { - if (likely(cryp->total_in - tag_size >= sizeof(u32))) { - /* Write a full u32 */ - stm32_cryp_write(cryp, CRYP_DIN, *src); + scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, cryp->hw_blocksize, + cryp->payload_in), 0); + for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) + stm32_cryp_write(cryp, CRYP_DIN, block[i]); - src = stm32_cryp_next_in(cryp, src, sizeof(u32)); - cryp->total_in -= sizeof(u32); - } else if (cryp->total_in == tag_size) { - /* Write padding data */ - stm32_cryp_write(cryp, CRYP_DIN, 0); - } else { - /* Write less than an u32 */ - memset(d8, 0, sizeof(u32)); - for (j = 0; j < cryp->total_in - tag_size; j++) { - d8[j] = *((u8 *)src); - src = stm32_cryp_next_in(cryp, src, 1); - } - - stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8); - cryp->total_in = tag_size; - } - } + cryp->payload_in -= min_t(size_t, cryp->hw_blocksize, cryp->payload_in); } static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp) { int err; - u32 cfg, tmp[AES_BLOCK_32]; - size_t total_in_ori = cryp->total_in; - struct scatterlist *out_sg_ori = cryp->out_sg; + u32 cfg, block[AES_BLOCK_32] = {0}; unsigned int i; /* 'Special workaround' procedure described in the datasheet */ @@ -1350,18 +1284,25 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp) /* b) pad and write the last block */ stm32_cryp_irq_write_block(cryp); - cryp->total_in = total_in_ori; + /* wait end of process */ err = stm32_cryp_wait_output(cryp); if (err) { - dev_err(cryp->dev, "Timeout (write gcm header)\n"); + dev_err(cryp->dev, "Timeout (write gcm last data)\n"); return stm32_cryp_finish_req(cryp, err); } /* c) get and store encrypted data */ - stm32_cryp_irq_read_data(cryp); - scatterwalk_map_and_copy(tmp, out_sg_ori, - cryp->total_in_save - total_in_ori, - total_in_ori, 0); + /* + * Same code as stm32_cryp_irq_read_data(), but we want to store + * block value + */ + for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) + block[i] = stm32_cryp_read(cryp, CRYP_DOUT); + + scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize, + cryp->payload_out), 1); + cryp->payload_out -= min_t(size_t, cryp->hw_blocksize, + cryp->payload_out); /* d) change mode back to AES GCM */ cfg &= ~CR_ALGO_MASK; @@ -1374,19 +1315,13 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp) stm32_cryp_write(cryp, CRYP_CR, cfg); /* f) write padded data */ - for (i = 0; i < AES_BLOCK_32; i++) { - if (cryp->total_in) - stm32_cryp_write(cryp, CRYP_DIN, tmp[i]); - else - stm32_cryp_write(cryp, CRYP_DIN, 0); - - cryp->total_in -= min_t(size_t, sizeof(u32), cryp->total_in); - } + for (i = 0; i < AES_BLOCK_32; i++) + stm32_cryp_write(cryp, CRYP_DIN, block[i]); /* g) Empty fifo out */ err = stm32_cryp_wait_output(cryp); if (err) { - dev_err(cryp->dev, "Timeout (write gcm header)\n"); + dev_err(cryp->dev, "Timeout (write gcm padded data)\n"); return stm32_cryp_finish_req(cryp, err); } @@ -1399,16 +1334,14 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp) static void stm32_cryp_irq_set_npblb(struct stm32_cryp *cryp) { - u32 cfg, payload_bytes; + u32 cfg; /* disable ip, set NPBLB and reneable ip */ cfg = stm32_cryp_read(cryp, CRYP_CR); cfg &= ~CR_CRYPEN; stm32_cryp_write(cryp, CRYP_CR, cfg); - payload_bytes = is_decrypt(cryp) ? cryp->total_in - cryp->authsize : - cryp->total_in; - cfg |= (cryp->hw_blocksize - payload_bytes) << CR_NBPBL_SHIFT; + cfg |= (cryp->hw_blocksize - cryp->payload_in) << CR_NBPBL_SHIFT; cfg |= CR_CRYPEN; stm32_cryp_write(cryp, CRYP_CR, cfg); } @@ -1417,13 +1350,11 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp) { int err = 0; u32 cfg, iv1tmp; - u32 cstmp1[AES_BLOCK_32], cstmp2[AES_BLOCK_32], tmp[AES_BLOCK_32]; - size_t last_total_out, total_in_ori = cryp->total_in; - struct scatterlist *out_sg_ori = cryp->out_sg; + u32 cstmp1[AES_BLOCK_32], cstmp2[AES_BLOCK_32]; + u32 block[AES_BLOCK_32] = {0}; unsigned int i; /* 'Special workaround' procedure described in the datasheet */ - cryp->flags |= FLG_CCM_PADDED_WA; /* a) disable ip */ stm32_cryp_write(cryp, CRYP_IMSCR, 0); @@ -1453,7 +1384,7 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp) /* b) pad and write the last block */ stm32_cryp_irq_write_block(cryp); - cryp->total_in = total_in_ori; + /* wait end of process */ err = stm32_cryp_wait_output(cryp); if (err) { dev_err(cryp->dev, "Timeout (wite ccm padded data)\n"); @@ -1461,13 +1392,16 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp) } /* c) get and store decrypted data */ - last_total_out = cryp->total_out; - stm32_cryp_irq_read_data(cryp); + /* + * Same code as stm32_cryp_irq_read_data(), but we want to store + * block value + */ + for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) + block[i] = stm32_cryp_read(cryp, CRYP_DOUT); - memset(tmp, 0, sizeof(tmp)); - scatterwalk_map_and_copy(tmp, out_sg_ori, - cryp->total_out_save - last_total_out, - last_total_out, 0); + scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize, + cryp->payload_out), 1); + cryp->payload_out -= min_t(size_t, cryp->hw_blocksize, cryp->payload_out); /* d) Load again CRYP_CSGCMCCMxR */ for (i = 0; i < ARRAY_SIZE(cstmp2); i++) @@ -1484,10 +1418,10 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp) stm32_cryp_write(cryp, CRYP_CR, cfg); /* g) XOR and write padded data */ - for (i = 0; i < ARRAY_SIZE(tmp); i++) { - tmp[i] ^= cstmp1[i]; - tmp[i] ^= cstmp2[i]; - stm32_cryp_write(cryp, CRYP_DIN, tmp[i]); + for (i = 0; i < ARRAY_SIZE(block); i++) { + block[i] ^= cstmp1[i]; + block[i] ^= cstmp2[i]; + stm32_cryp_write(cryp, CRYP_DIN, block[i]); } /* h) wait for completion */ @@ -1501,30 +1435,34 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp) static void stm32_cryp_irq_write_data(struct stm32_cryp *cryp) { - if (unlikely(!cryp->total_in)) { + if (unlikely(!cryp->payload_in)) { dev_warn(cryp->dev, "No more data to process\n"); return; } - if (unlikely(cryp->total_in < AES_BLOCK_SIZE && + if (unlikely(cryp->payload_in < AES_BLOCK_SIZE && (stm32_cryp_get_hw_mode(cryp) == CR_AES_GCM) && is_encrypt(cryp))) { /* Padding for AES GCM encryption */ - if (cryp->caps->padding_wa) + if (cryp->caps->padding_wa) { /* Special case 1 */ - return stm32_cryp_irq_write_gcm_padded_data(cryp); + stm32_cryp_irq_write_gcm_padded_data(cryp); + return; + } /* Setting padding bytes (NBBLB) */ stm32_cryp_irq_set_npblb(cryp); } - if (unlikely((cryp->total_in - cryp->authsize < AES_BLOCK_SIZE) && + if (unlikely((cryp->payload_in < AES_BLOCK_SIZE) && (stm32_cryp_get_hw_mode(cryp) == CR_AES_CCM) && is_decrypt(cryp))) { /* Padding for AES CCM decryption */ - if (cryp->caps->padding_wa) + if (cryp->caps->padding_wa) { /* Special case 2 */ - return stm32_cryp_irq_write_ccm_padded_data(cryp); + stm32_cryp_irq_write_ccm_padded_data(cryp); + return; + } /* Setting padding bytes (NBBLB) */ stm32_cryp_irq_set_npblb(cryp); @@ -1536,192 +1474,60 @@ static void stm32_cryp_irq_write_data(struct stm32_cryp *cryp) stm32_cryp_irq_write_block(cryp); } -static void stm32_cryp_irq_write_gcm_header(struct stm32_cryp *cryp) +static void stm32_cryp_irq_write_gcmccm_header(struct stm32_cryp *cryp) { - int err; - unsigned int i, j; - u32 cfg, *src; - - src = sg_virt(cryp->in_sg) + _walked_in; - - for (i = 0; i < AES_BLOCK_32; i++) { - stm32_cryp_write(cryp, CRYP_DIN, *src); - - src = stm32_cryp_next_in(cryp, src, sizeof(u32)); - cryp->total_in -= min_t(size_t, sizeof(u32), cryp->total_in); - - /* Check if whole header written */ - if ((cryp->total_in_save - cryp->total_in) == - cryp->areq->assoclen) { - /* Write padding if needed */ - for (j = i + 1; j < AES_BLOCK_32; j++) - stm32_cryp_write(cryp, CRYP_DIN, 0); - - /* Wait for completion */ - err = stm32_cryp_wait_busy(cryp); - if (err) { - dev_err(cryp->dev, "Timeout (gcm header)\n"); - return stm32_cryp_finish_req(cryp, err); - } - - if (stm32_cryp_get_input_text_len(cryp)) { - /* Phase 3 : payload */ - cfg = stm32_cryp_read(cryp, CRYP_CR); - cfg &= ~CR_CRYPEN; - stm32_cryp_write(cryp, CRYP_CR, cfg); - - cfg &= ~CR_PH_MASK; - cfg |= CR_PH_PAYLOAD; - cfg |= CR_CRYPEN; - stm32_cryp_write(cryp, CRYP_CR, cfg); - } else { - /* Phase 4 : tag */ - stm32_cryp_write(cryp, CRYP_IMSCR, 0); - stm32_cryp_finish_req(cryp, 0); - } - - break; - } - - if (!cryp->total_in) - break; - } -} + unsigned int i; + u32 block[AES_BLOCK_32] = {0}; + size_t written; -static void stm32_cryp_irq_write_ccm_header(struct stm32_cryp *cryp) -{ - int err; - unsigned int i = 0, j, k; - u32 alen, cfg, *src; - u8 d8[4]; - - src = sg_virt(cryp->in_sg) + _walked_in; - alen = cryp->areq->assoclen; - - if (!_walked_in) { - if (cryp->areq->assoclen <= 65280) { - /* Write first u32 of B1 */ - d8[0] = (alen >> 8) & 0xFF; - d8[1] = alen & 0xFF; - d8[2] = *((u8 *)src); - src = stm32_cryp_next_in(cryp, src, 1); - d8[3] = *((u8 *)src); - src = stm32_cryp_next_in(cryp, src, 1); - - stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8); - i++; - - cryp->total_in -= min_t(size_t, 2, cryp->total_in); - } else { - /* Build the two first u32 of B1 */ - d8[0] = 0xFF; - d8[1] = 0xFE; - d8[2] = alen & 0xFF000000; - d8[3] = alen & 0x00FF0000; - - stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8); - i++; - - d8[0] = alen & 0x0000FF00; - d8[1] = alen & 0x000000FF; - d8[2] = *((u8 *)src); - src = stm32_cryp_next_in(cryp, src, 1); - d8[3] = *((u8 *)src); - src = stm32_cryp_next_in(cryp, src, 1); - - stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8); - i++; - - cryp->total_in -= min_t(size_t, 2, cryp->total_in); - } - } + written = min_t(size_t, AES_BLOCK_SIZE, cryp->header_in); - /* Write next u32 */ - for (; i < AES_BLOCK_32; i++) { - /* Build an u32 */ - memset(d8, 0, sizeof(u32)); - for (k = 0; k < sizeof(u32); k++) { - d8[k] = *((u8 *)src); - src = stm32_cryp_next_in(cryp, src, 1); - - cryp->total_in -= min_t(size_t, 1, cryp->total_in); - if ((cryp->total_in_save - cryp->total_in) == alen) - break; - } + scatterwalk_copychunks(block, &cryp->in_walk, written, 0); + for (i = 0; i < AES_BLOCK_32; i++) + stm32_cryp_write(cryp, CRYP_DIN, block[i]); - stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8); - - if ((cryp->total_in_save - cryp->total_in) == alen) { - /* Write padding if needed */ - for (j = i + 1; j < AES_BLOCK_32; j++) - stm32_cryp_write(cryp, CRYP_DIN, 0); - - /* Wait for completion */ - err = stm32_cryp_wait_busy(cryp); - if (err) { - dev_err(cryp->dev, "Timeout (ccm header)\n"); - return stm32_cryp_finish_req(cryp, err); - } - - if (stm32_cryp_get_input_text_len(cryp)) { - /* Phase 3 : payload */ - cfg = stm32_cryp_read(cryp, CRYP_CR); - cfg &= ~CR_CRYPEN; - stm32_cryp_write(cryp, CRYP_CR, cfg); - - cfg &= ~CR_PH_MASK; - cfg |= CR_PH_PAYLOAD; - cfg |= CR_CRYPEN; - stm32_cryp_write(cryp, CRYP_CR, cfg); - } else { - /* Phase 4 : tag */ - stm32_cryp_write(cryp, CRYP_IMSCR, 0); - stm32_cryp_finish_req(cryp, 0); - } + cryp->header_in -= written; - break; - } - } + stm32_crypt_gcmccm_end_header(cryp); } static irqreturn_t stm32_cryp_irq_thread(int irq, void *arg) { struct stm32_cryp *cryp = arg; u32 ph; + u32 it_mask = stm32_cryp_read(cryp, CRYP_IMSCR); if (cryp->irq_status & MISR_OUT) /* Output FIFO IRQ: read data */ - if (unlikely(stm32_cryp_irq_read_data(cryp))) { - /* All bytes processed, finish */ - stm32_cryp_write(cryp, CRYP_IMSCR, 0); - stm32_cryp_finish_req(cryp, 0); - return IRQ_HANDLED; - } + stm32_cryp_irq_read_data(cryp); if (cryp->irq_status & MISR_IN) { - if (is_gcm(cryp)) { + if (is_gcm(cryp) || is_ccm(cryp)) { ph = stm32_cryp_read(cryp, CRYP_CR) & CR_PH_MASK; if (unlikely(ph == CR_PH_HEADER)) /* Write Header */ - stm32_cryp_irq_write_gcm_header(cryp); - else - /* Input FIFO IRQ: write data */ - stm32_cryp_irq_write_data(cryp); - cryp->gcm_ctr++; - } else if (is_ccm(cryp)) { - ph = stm32_cryp_read(cryp, CRYP_CR) & CR_PH_MASK; - if (unlikely(ph == CR_PH_HEADER)) - /* Write Header */ - stm32_cryp_irq_write_ccm_header(cryp); + stm32_cryp_irq_write_gcmccm_header(cryp); else /* Input FIFO IRQ: write data */ stm32_cryp_irq_write_data(cryp); + if (is_gcm(cryp)) + cryp->gcm_ctr++; } else { /* Input FIFO IRQ: write data */ stm32_cryp_irq_write_data(cryp); } } + /* Mask useless interrupts */ + if (!cryp->payload_in && !cryp->header_in) + it_mask &= ~IMSCR_IN; + if (!cryp->payload_out) + it_mask &= ~IMSCR_OUT; + stm32_cryp_write(cryp, CRYP_IMSCR, it_mask); + + if (!cryp->payload_in && !cryp->header_in && !cryp->payload_out) + stm32_cryp_finish_req(cryp, 0); + return IRQ_HANDLED; } @@ -1742,7 +1548,7 @@ static struct skcipher_alg crypto_algs[] = { .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), - .base.cra_alignmask = 0xf, + .base.cra_alignmask = 0, .base.cra_module = THIS_MODULE, .init = stm32_cryp_init_tfm, @@ -1759,7 +1565,7 @@ static struct skcipher_alg crypto_algs[] = { .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), - .base.cra_alignmask = 0xf, + .base.cra_alignmask = 0, .base.cra_module = THIS_MODULE, .init = stm32_cryp_init_tfm, @@ -1777,7 +1583,7 @@ static struct skcipher_alg crypto_algs[] = { .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), - .base.cra_alignmask = 0xf, + .base.cra_alignmask = 0, .base.cra_module = THIS_MODULE, .init = stm32_cryp_init_tfm, @@ -1795,7 +1601,7 @@ static struct skcipher_alg crypto_algs[] = { .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), - .base.cra_alignmask = 0xf, + .base.cra_alignmask = 0, .base.cra_module = THIS_MODULE, .init = stm32_cryp_init_tfm, @@ -1812,7 +1618,7 @@ static struct skcipher_alg crypto_algs[] = { .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), - .base.cra_alignmask = 0xf, + .base.cra_alignmask = 0, .base.cra_module = THIS_MODULE, .init = stm32_cryp_init_tfm, @@ -1830,7 +1636,7 @@ static struct skcipher_alg crypto_algs[] = { .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), - .base.cra_alignmask = 0xf, + .base.cra_alignmask = 0, .base.cra_module = THIS_MODULE, .init = stm32_cryp_init_tfm, @@ -1847,7 +1653,7 @@ static struct skcipher_alg crypto_algs[] = { .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), - .base.cra_alignmask = 0xf, + .base.cra_alignmask = 0, .base.cra_module = THIS_MODULE, .init = stm32_cryp_init_tfm, @@ -1877,7 +1683,7 @@ static struct aead_alg aead_algs[] = { .cra_flags = CRYPTO_ALG_ASYNC, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct stm32_cryp_ctx), - .cra_alignmask = 0xf, + .cra_alignmask = 0, .cra_module = THIS_MODULE, }, }, @@ -1897,7 +1703,7 @@ static struct aead_alg aead_algs[] = { .cra_flags = CRYPTO_ALG_ASYNC, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct stm32_cryp_ctx), - .cra_alignmask = 0xf, + .cra_alignmask = 0, .cra_module = THIS_MODULE, }, }, @@ -2025,8 +1831,6 @@ err_engine1: list_del(&cryp->list); spin_unlock(&cryp_list.lock); - pm_runtime_disable(dev); - pm_runtime_put_noidle(dev); pm_runtime_disable(dev); pm_runtime_put_noidle(dev); diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index ff5362da118d8c294f5a5a7bc2feb4b4feddb520..16bb52836b28dbb6095dddd113f774db7fd5cb28 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c @@ -812,7 +812,7 @@ static void stm32_hash_finish_req(struct ahash_request *req, int err) static int stm32_hash_hw_init(struct stm32_hash_dev *hdev, struct stm32_hash_request_ctx *rctx) { - pm_runtime_resume_and_get(hdev->dev); + pm_runtime_get_sync(hdev->dev); if (!(HASH_FLAGS_INIT & hdev->flags)) { stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT); @@ -961,7 +961,7 @@ static int stm32_hash_export(struct ahash_request *req, void *out) u32 *preg; unsigned int i; - pm_runtime_resume_and_get(hdev->dev); + pm_runtime_get_sync(hdev->dev); while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY)) cpu_relax(); @@ -999,7 +999,7 @@ static int stm32_hash_import(struct ahash_request *req, const void *in) preg = rctx->hw_context; - pm_runtime_resume_and_get(hdev->dev); + pm_runtime_get_sync(hdev->dev); stm32_hash_write(hdev, HASH_IMR, *preg++); stm32_hash_write(hdev, HASH_STR, *preg++); diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c index d3fbd950be944f7a3bc7c2d231a0573cea5eac70..3e07f961e2f3d75af19b0e88f9346318fc3e6eb2 100644 --- a/drivers/dma-buf/dma-fence-array.c +++ b/drivers/dma-buf/dma-fence-array.c @@ -104,7 +104,11 @@ static bool dma_fence_array_signaled(struct dma_fence *fence) { struct dma_fence_array *array = to_dma_fence_array(fence); - return atomic_read(&array->num_pending) <= 0; + if (atomic_read(&array->num_pending) > 0) + return false; + + dma_fence_array_clear_pending_error(array); + return true; } static void dma_fence_array_release(struct dma_fence *fence) diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c index afd22c9dbdcfa16864f07e2fdab2e1ed8f5992fb..798f86fcd50fa47c61a2103cf3c7e806a8ea0c53 100644 --- a/drivers/dma-buf/dma-heap.c +++ b/drivers/dma-buf/dma-heap.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -123,6 +124,7 @@ static long dma_heap_ioctl(struct file *file, unsigned int ucmd, if (nr >= ARRAY_SIZE(dma_heap_ioctl_cmds)) return -EINVAL; + nr = array_index_nospec(nr, ARRAY_SIZE(dma_heap_ioctl_cmds)); /* Get the kernel ioctl cmd that matches */ kcmd = dma_heap_ioctl_cmds[nr]; diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 627ad74c879fdb6276d13085e3fe1c936a693be6..90afba0b36fe97514b661d8489be85b20b95966e 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -89,6 +89,7 @@ #define AT_XDMAC_CNDC_NDE (0x1 << 0) /* Channel x Next Descriptor Enable */ #define AT_XDMAC_CNDC_NDSUP (0x1 << 1) /* Channel x Next Descriptor Source Update */ #define AT_XDMAC_CNDC_NDDUP (0x1 << 2) /* Channel x Next Descriptor Destination Update */ +#define AT_XDMAC_CNDC_NDVIEW_MASK GENMASK(28, 27) #define AT_XDMAC_CNDC_NDVIEW_NDV0 (0x0 << 3) /* Channel x Next Descriptor View 0 */ #define AT_XDMAC_CNDC_NDVIEW_NDV1 (0x1 << 3) /* Channel x Next Descriptor View 1 */ #define AT_XDMAC_CNDC_NDVIEW_NDV2 (0x2 << 3) /* Channel x Next Descriptor View 2 */ @@ -220,15 +221,15 @@ struct at_xdmac { /* Linked List Descriptor */ struct at_xdmac_lld { - dma_addr_t mbr_nda; /* Next Descriptor Member */ - u32 mbr_ubc; /* Microblock Control Member */ - dma_addr_t mbr_sa; /* Source Address Member */ - dma_addr_t mbr_da; /* Destination Address Member */ - u32 mbr_cfg; /* Configuration Register */ - u32 mbr_bc; /* Block Control Register */ - u32 mbr_ds; /* Data Stride Register */ - u32 mbr_sus; /* Source Microblock Stride Register */ - u32 mbr_dus; /* Destination Microblock Stride Register */ + u32 mbr_nda; /* Next Descriptor Member */ + u32 mbr_ubc; /* Microblock Control Member */ + u32 mbr_sa; /* Source Address Member */ + u32 mbr_da; /* Destination Address Member */ + u32 mbr_cfg; /* Configuration Register */ + u32 mbr_bc; /* Block Control Register */ + u32 mbr_ds; /* Data Stride Register */ + u32 mbr_sus; /* Source Microblock Stride Register */ + u32 mbr_dus; /* Destination Microblock Stride Register */ }; /* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */ @@ -338,9 +339,6 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan, dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first); - if (at_xdmac_chan_is_enabled(atchan)) - return; - /* Set transfer as active to not try to start it again. */ first->active_xfer = true; @@ -356,7 +354,8 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan, */ if (at_xdmac_chan_is_cyclic(atchan)) reg = AT_XDMAC_CNDC_NDVIEW_NDV1; - else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3) + else if ((first->lld.mbr_ubc & + AT_XDMAC_CNDC_NDVIEW_MASK) == AT_XDMAC_MBR_UBC_NDV3) reg = AT_XDMAC_CNDC_NDVIEW_NDV3; else reg = AT_XDMAC_CNDC_NDVIEW_NDV2; @@ -427,13 +426,12 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx) spin_lock_irqsave(&atchan->lock, irqflags); cookie = dma_cookie_assign(tx); + list_add_tail(&desc->xfer_node, &atchan->xfers_list); + spin_unlock_irqrestore(&atchan->lock, irqflags); + dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n", __func__, atchan, desc); - list_add_tail(&desc->xfer_node, &atchan->xfers_list); - if (list_is_singular(&atchan->xfers_list)) - at_xdmac_start_xfer(atchan, desc); - spin_unlock_irqrestore(&atchan->lock, irqflags); return cookie; } @@ -1563,14 +1561,17 @@ static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan) struct at_xdmac_desc *desc; struct dma_async_tx_descriptor *txd; - if (!list_empty(&atchan->xfers_list)) { - desc = list_first_entry(&atchan->xfers_list, - struct at_xdmac_desc, xfer_node); - txd = &desc->tx_dma_desc; - - if (txd->flags & DMA_PREP_INTERRUPT) - dmaengine_desc_get_callback_invoke(txd, NULL); + spin_lock_irq(&atchan->lock); + if (list_empty(&atchan->xfers_list)) { + spin_unlock_irq(&atchan->lock); + return; } + desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, + xfer_node); + spin_unlock_irq(&atchan->lock); + txd = &desc->tx_dma_desc; + if (txd->flags & DMA_PREP_INTERRUPT) + dmaengine_desc_get_callback_invoke(txd, NULL); } static void at_xdmac_handle_error(struct at_xdmac_chan *atchan) @@ -1724,11 +1725,9 @@ static void at_xdmac_issue_pending(struct dma_chan *chan) dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__); - if (!at_xdmac_chan_is_cyclic(atchan)) { - spin_lock_irqsave(&atchan->lock, flags); - at_xdmac_advance_work(atchan); - spin_unlock_irqrestore(&atchan->lock, flags); - } + spin_lock_irqsave(&atchan->lock, flags); + at_xdmac_advance_work(atchan); + spin_unlock_irqrestore(&atchan->lock, flags); return; } diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index b84303be8edf55d5ed1dfdf80e5705d8f30fc918..4eb63f1ad224765d690256e9b5eef55d0abe50c7 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c @@ -728,12 +728,6 @@ static int mmp_pdma_config_write(struct dma_chan *dchan, chan->dir = direction; chan->dev_addr = addr; - /* FIXME: drivers should be ported over to use the filter - * function. Once that's done, the following two lines can - * be removed. - */ - if (cfg->slave_id) - chan->drcmr = cfg->slave_id; return 0; } diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index 349fb312c8725678a2a1268a4371047dbcb2ed11..b4ef4f19f7decb9c007a3ecfa369ba974fd1faca 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c @@ -911,13 +911,6 @@ static void pxad_get_config(struct pxad_chan *chan, *dcmd |= PXA_DCMD_BURST16; else if (maxburst == 32) *dcmd |= PXA_DCMD_BURST32; - - /* FIXME: drivers should be ported over to use the filter - * function. Once that's done, the following two lines can - * be removed. - */ - if (chan->cfg.slave_id) - chan->drcmr = chan->cfg.slave_id; } static struct dma_async_tx_descriptor * diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 991a7b5da29f085625356abf60344f4e8be8fc20..7c268d1bd205071d70f638d915c05f50fedd5cb2 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -1844,8 +1844,13 @@ static int rcar_dmac_probe(struct platform_device *pdev) dmac->dev = &pdev->dev; platform_set_drvdata(pdev, dmac); - dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK); - dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40)); + ret = dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK); + if (ret) + return ret; + + ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40)); + if (ret) + return ret; ret = rcar_dmac_parse_of(&pdev->dev, dmac); if (ret < 0) diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c index bddd3b23f33fc8dc54ff25b1667ae1d4ad93297a..f04bcffd3c24a847ca2a88a80baf1c0345a04f14 100644 --- a/drivers/dma/stm32-dmamux.c +++ b/drivers/dma/stm32-dmamux.c @@ -292,10 +292,12 @@ static int stm32_dmamux_probe(struct platform_device *pdev) ret = of_dma_router_register(node, stm32_dmamux_route_allocate, &stm32_dmamux->dmarouter); if (ret) - goto err_clk; + goto pm_disable; return 0; +pm_disable: + pm_runtime_disable(&pdev->dev); err_clk: clk_disable_unprepare(stm32_dmamux->clk); diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c index 9d473923712adab957e8dcbc200d496b2c786c37..fe36738f2dd7e35f71b46669e6d1bfd761236add 100644 --- a/drivers/dma/stm32-mdma.c +++ b/drivers/dma/stm32-mdma.c @@ -184,7 +184,7 @@ #define STM32_MDMA_CTBR(x) (0x68 + 0x40 * (x)) #define STM32_MDMA_CTBR_DBUS BIT(17) #define STM32_MDMA_CTBR_SBUS BIT(16) -#define STM32_MDMA_CTBR_TSEL_MASK GENMASK(7, 0) +#define STM32_MDMA_CTBR_TSEL_MASK GENMASK(5, 0) #define STM32_MDMA_CTBR_TSEL(n) STM32_MDMA_SET(n, \ STM32_MDMA_CTBR_TSEL_MASK) diff --git a/drivers/dma/uniphier-xdmac.c b/drivers/dma/uniphier-xdmac.c index d6b8a202474f4a8f817da25fb689701538fa7460..290836b7e1be2b9ac86dbd437038d8c828a0832e 100644 --- a/drivers/dma/uniphier-xdmac.c +++ b/drivers/dma/uniphier-xdmac.c @@ -131,8 +131,9 @@ uniphier_xdmac_next_desc(struct uniphier_xdmac_chan *xc) static void uniphier_xdmac_chan_start(struct uniphier_xdmac_chan *xc, struct uniphier_xdmac_desc *xd) { - u32 src_mode, src_addr, src_width; - u32 dst_mode, dst_addr, dst_width; + u32 src_mode, src_width; + u32 dst_mode, dst_width; + dma_addr_t src_addr, dst_addr; u32 val, its, tnum; enum dma_slave_buswidth buswidth; diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c index e91cf1147a4e0f4740a65f39560c35bc5a5ecec0..be38fd71f731a5c4989cff9a340734e4d552adba 100644 --- a/drivers/edac/altera_edac.c +++ b/drivers/edac/altera_edac.c @@ -349,7 +349,7 @@ static int altr_sdram_probe(struct platform_device *pdev) if (irq < 0) { edac_printk(KERN_ERR, EDAC_MC, "No irq %d in DT\n", irq); - return -ENODEV; + return irq; } /* Arria10 has a 2nd IRQ */ diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 01ff71f7b64562f37459b5dd6dd86835458d215e..f4eb071327be08163e9463d9abbbb3ac1885f9c1 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c @@ -210,7 +210,7 @@ void *edac_align_ptr(void **p, unsigned int size, int n_elems) else return (char *)ptr; - r = (unsigned long)p % align; + r = (unsigned long)ptr % align; if (r == 0) return (char *)ptr; diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c index 1a801a5d3b08b3c66bd117a6a55b7cf475992e0c..92906b56b1a2b9f64045fe2acacbed2b7a39fa7a 100644 --- a/drivers/edac/synopsys_edac.c +++ b/drivers/edac/synopsys_edac.c @@ -1351,8 +1351,7 @@ static int mc_probe(struct platform_device *pdev) } } - if (of_device_is_compatible(pdev->dev.of_node, - "xlnx,zynqmp-ddrc-2.40a")) + if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) setup_address_map(priv); #endif diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c index 1d2c27a00a4a8348eb5e0d0c203eafa4520c7030..cd1eefeff1923192735c787d05143eb6fc7661e2 100644 --- a/drivers/edac/xgene_edac.c +++ b/drivers/edac/xgene_edac.c @@ -1919,7 +1919,7 @@ static int xgene_edac_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, i); if (irq < 0) { dev_err(&pdev->dev, "No IRQ resource\n"); - rc = -EINVAL; + rc = irq; goto out_err; } rc = devm_request_irq(&pdev->dev, irq, diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 847f33ffc4aedee73d07980e56859530626143e3..9fa86288b78a987490a9826ccac9727d580dba10 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -719,6 +719,13 @@ void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr, systab_hdr->revision >> 16, systab_hdr->revision & 0xffff, vendor); + + if (IS_ENABLED(CONFIG_X86_64) && + systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION && + !strcmp(vendor, "Apple")) { + pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n"); + efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION; + } } static __initdata char memory_type_name[][13] = { diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c index c1b57dfb127763dbe4a2b7062f57406e42e0abae..415a971e76947c9c5ccb58618e50f9f42c314616 100644 --- a/drivers/firmware/efi/libstub/arm64-stub.c +++ b/drivers/firmware/efi/libstub/arm64-stub.c @@ -119,9 +119,9 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, if (image->image_base != _text) efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n"); - if (!IS_ALIGNED((u64)_text, EFI_KIMG_ALIGN)) - efi_err("FIRMWARE BUG: kernel image not aligned on %ldk boundary\n", - EFI_KIMG_ALIGN >> 10); + if (!IS_ALIGNED((u64)_text, SEGMENT_ALIGN)) + efi_err("FIRMWARE BUG: kernel image not aligned on %dk boundary\n", + SEGMENT_ALIGN >> 10); kernel_size = _edata - _text; kernel_memsize = kernel_size + (_end - _edata); diff --git a/drivers/firmware/google/Kconfig b/drivers/firmware/google/Kconfig index 97968aece54f889bbb6083fe956aceb1b2e9f162..931544c9f63d4b8e0ed169e491b51c15df2259b3 100644 --- a/drivers/firmware/google/Kconfig +++ b/drivers/firmware/google/Kconfig @@ -3,9 +3,9 @@ menuconfig GOOGLE_FIRMWARE bool "Google Firmware Drivers" default n help - These firmware drivers are used by Google's servers. They are - only useful if you are working directly on one of their - proprietary servers. If in doubt, say "N". + These firmware drivers are used by Google servers, + Chromebooks and other devices using coreboot firmware. + If in doubt, say "N". if GOOGLE_FIRMWARE diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c index 172c751a4f6c24acc1e5e8cb04b594b1843a5b86..f08e056ed0ae4506db5f6336534b4095ea4cc388 100644 --- a/drivers/firmware/qemu_fw_cfg.c +++ b/drivers/firmware/qemu_fw_cfg.c @@ -388,9 +388,7 @@ static void fw_cfg_sysfs_cache_cleanup(void) struct fw_cfg_sysfs_entry *entry, *next; list_for_each_entry_safe(entry, next, &fw_cfg_entry_cache, list) { - /* will end up invoking fw_cfg_sysfs_cache_delist() - * via each object's release() method (i.e. destructor) - */ + fw_cfg_sysfs_cache_delist(entry); kobject_put(&entry->kobj); } } @@ -448,7 +446,6 @@ static void fw_cfg_sysfs_release_entry(struct kobject *kobj) { struct fw_cfg_sysfs_entry *entry = to_entry(kobj); - fw_cfg_sysfs_cache_delist(entry); kfree(entry); } @@ -601,20 +598,18 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f) /* set file entry information */ entry->size = be32_to_cpu(f->size); entry->select = be16_to_cpu(f->select); - memcpy(entry->name, f->name, FW_CFG_MAX_FILE_PATH); + strscpy(entry->name, f->name, FW_CFG_MAX_FILE_PATH); /* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */ err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype, fw_cfg_sel_ko, "%d", entry->select); - if (err) { - kobject_put(&entry->kobj); - return err; - } + if (err) + goto err_put_entry; /* add raw binary content access */ err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw); if (err) - goto err_add_raw; + goto err_del_entry; /* try adding "/sys/firmware/qemu_fw_cfg/by_name/" symlink */ fw_cfg_build_symlink(fw_cfg_fname_kset, &entry->kobj, entry->name); @@ -623,9 +618,10 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f) fw_cfg_sysfs_cache_enlist(entry); return 0; -err_add_raw: +err_del_entry: kobject_del(&entry->kobj); - kfree(entry); +err_put_entry: + kobject_put(&entry->kobj); return err; } diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c index dfd8a4876a27ac4baa5cac2aaffdbfb0223aab54..d5f25246404d9e28e434046f921b5052525172c2 100644 --- a/drivers/gpio/gpio-aggregator.c +++ b/drivers/gpio/gpio-aggregator.c @@ -330,7 +330,8 @@ static int gpio_fwd_get(struct gpio_chip *chip, unsigned int offset) { struct gpiochip_fwd *fwd = gpiochip_get_data(chip); - return gpiod_get_value(fwd->descs[offset]); + return chip->can_sleep ? gpiod_get_value_cansleep(fwd->descs[offset]) + : gpiod_get_value(fwd->descs[offset]); } static int gpio_fwd_get_multiple(struct gpiochip_fwd *fwd, unsigned long *mask, @@ -349,7 +350,10 @@ static int gpio_fwd_get_multiple(struct gpiochip_fwd *fwd, unsigned long *mask, for_each_set_bit(i, mask, fwd->chip.ngpio) descs[j++] = fwd->descs[i]; - error = gpiod_get_array_value(j, descs, NULL, values); + if (fwd->chip.can_sleep) + error = gpiod_get_array_value_cansleep(j, descs, NULL, values); + else + error = gpiod_get_array_value(j, descs, NULL, values); if (error) return error; @@ -384,7 +388,10 @@ static void gpio_fwd_set(struct gpio_chip *chip, unsigned int offset, int value) { struct gpiochip_fwd *fwd = gpiochip_get_data(chip); - gpiod_set_value(fwd->descs[offset], value); + if (chip->can_sleep) + gpiod_set_value_cansleep(fwd->descs[offset], value); + else + gpiod_set_value(fwd->descs[offset], value); } static void gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask, @@ -403,7 +410,10 @@ static void gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask, descs[j++] = fwd->descs[i]; } - gpiod_set_array_value(j, descs, NULL, values); + if (fwd->chip.can_sleep) + gpiod_set_array_value_cansleep(j, descs, NULL, values); + else + gpiod_set_array_value(j, descs, NULL, values); } static void gpio_fwd_set_multiple_locked(struct gpio_chip *chip, diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c index b966f5e28ebffd5f3a97bcd43f1001156641b43d..e0d5d80ec8e0f2f650804c523f4f624b48f4ded7 100644 --- a/drivers/gpio/gpio-aspeed.c +++ b/drivers/gpio/gpio-aspeed.c @@ -53,7 +53,7 @@ struct aspeed_gpio_config { struct aspeed_gpio { struct gpio_chip chip; struct irq_chip irqc; - spinlock_t lock; + raw_spinlock_t lock; void __iomem *base; int irq; const struct aspeed_gpio_config *config; @@ -413,14 +413,14 @@ static void aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset, unsigned long flags; bool copro; - spin_lock_irqsave(&gpio->lock, flags); + raw_spin_lock_irqsave(&gpio->lock, flags); copro = aspeed_gpio_copro_request(gpio, offset); __aspeed_gpio_set(gc, offset, val); if (copro) aspeed_gpio_copro_release(gpio, offset); - spin_unlock_irqrestore(&gpio->lock, flags); + raw_spin_unlock_irqrestore(&gpio->lock, flags); } static int aspeed_gpio_dir_in(struct gpio_chip *gc, unsigned int offset) @@ -435,7 +435,7 @@ static int aspeed_gpio_dir_in(struct gpio_chip *gc, unsigned int offset) if (!have_input(gpio, offset)) return -ENOTSUPP; - spin_lock_irqsave(&gpio->lock, flags); + raw_spin_lock_irqsave(&gpio->lock, flags); reg = ioread32(addr); reg &= ~GPIO_BIT(offset); @@ -445,7 +445,7 @@ static int aspeed_gpio_dir_in(struct gpio_chip *gc, unsigned int offset) if (copro) aspeed_gpio_copro_release(gpio, offset); - spin_unlock_irqrestore(&gpio->lock, flags); + raw_spin_unlock_irqrestore(&gpio->lock, flags); return 0; } @@ -463,7 +463,7 @@ static int aspeed_gpio_dir_out(struct gpio_chip *gc, if (!have_output(gpio, offset)) return -ENOTSUPP; - spin_lock_irqsave(&gpio->lock, flags); + raw_spin_lock_irqsave(&gpio->lock, flags); reg = ioread32(addr); reg |= GPIO_BIT(offset); @@ -474,7 +474,7 @@ static int aspeed_gpio_dir_out(struct gpio_chip *gc, if (copro) aspeed_gpio_copro_release(gpio, offset); - spin_unlock_irqrestore(&gpio->lock, flags); + raw_spin_unlock_irqrestore(&gpio->lock, flags); return 0; } @@ -492,11 +492,11 @@ static int aspeed_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) if (!have_output(gpio, offset)) return GPIO_LINE_DIRECTION_IN; - spin_lock_irqsave(&gpio->lock, flags); + raw_spin_lock_irqsave(&gpio->lock, flags); val = ioread32(bank_reg(gpio, bank, reg_dir)) & GPIO_BIT(offset); - spin_unlock_irqrestore(&gpio->lock, flags); + raw_spin_unlock_irqrestore(&gpio->lock, flags); return val ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN; } @@ -539,14 +539,14 @@ static void aspeed_gpio_irq_ack(struct irq_data *d) status_addr = bank_reg(gpio, bank, reg_irq_status); - spin_lock_irqsave(&gpio->lock, flags); + raw_spin_lock_irqsave(&gpio->lock, flags); copro = aspeed_gpio_copro_request(gpio, offset); iowrite32(bit, status_addr); if (copro) aspeed_gpio_copro_release(gpio, offset); - spin_unlock_irqrestore(&gpio->lock, flags); + raw_spin_unlock_irqrestore(&gpio->lock, flags); } static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set) @@ -565,7 +565,7 @@ static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set) addr = bank_reg(gpio, bank, reg_irq_enable); - spin_lock_irqsave(&gpio->lock, flags); + raw_spin_lock_irqsave(&gpio->lock, flags); copro = aspeed_gpio_copro_request(gpio, offset); reg = ioread32(addr); @@ -577,7 +577,7 @@ static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set) if (copro) aspeed_gpio_copro_release(gpio, offset); - spin_unlock_irqrestore(&gpio->lock, flags); + raw_spin_unlock_irqrestore(&gpio->lock, flags); } static void aspeed_gpio_irq_mask(struct irq_data *d) @@ -629,7 +629,7 @@ static int aspeed_gpio_set_type(struct irq_data *d, unsigned int type) return -EINVAL; } - spin_lock_irqsave(&gpio->lock, flags); + raw_spin_lock_irqsave(&gpio->lock, flags); copro = aspeed_gpio_copro_request(gpio, offset); addr = bank_reg(gpio, bank, reg_irq_type0); @@ -649,7 +649,7 @@ static int aspeed_gpio_set_type(struct irq_data *d, unsigned int type) if (copro) aspeed_gpio_copro_release(gpio, offset); - spin_unlock_irqrestore(&gpio->lock, flags); + raw_spin_unlock_irqrestore(&gpio->lock, flags); irq_set_handler_locked(d, handler); @@ -719,7 +719,7 @@ static int aspeed_gpio_reset_tolerance(struct gpio_chip *chip, treg = bank_reg(gpio, to_bank(offset), reg_tolerance); - spin_lock_irqsave(&gpio->lock, flags); + raw_spin_lock_irqsave(&gpio->lock, flags); copro = aspeed_gpio_copro_request(gpio, offset); val = readl(treg); @@ -733,7 +733,7 @@ static int aspeed_gpio_reset_tolerance(struct gpio_chip *chip, if (copro) aspeed_gpio_copro_release(gpio, offset); - spin_unlock_irqrestore(&gpio->lock, flags); + raw_spin_unlock_irqrestore(&gpio->lock, flags); return 0; } @@ -859,7 +859,7 @@ static int enable_debounce(struct gpio_chip *chip, unsigned int offset, return rc; } - spin_lock_irqsave(&gpio->lock, flags); + raw_spin_lock_irqsave(&gpio->lock, flags); if (timer_allocation_registered(gpio, offset)) { rc = unregister_allocated_timer(gpio, offset); @@ -919,7 +919,7 @@ static int enable_debounce(struct gpio_chip *chip, unsigned int offset, configure_timer(gpio, offset, i); out: - spin_unlock_irqrestore(&gpio->lock, flags); + raw_spin_unlock_irqrestore(&gpio->lock, flags); return rc; } @@ -930,13 +930,13 @@ static int disable_debounce(struct gpio_chip *chip, unsigned int offset) unsigned long flags; int rc; - spin_lock_irqsave(&gpio->lock, flags); + raw_spin_lock_irqsave(&gpio->lock, flags); rc = unregister_allocated_timer(gpio, offset); if (!rc) configure_timer(gpio, offset, 0); - spin_unlock_irqrestore(&gpio->lock, flags); + raw_spin_unlock_irqrestore(&gpio->lock, flags); return rc; } @@ -1018,7 +1018,7 @@ int aspeed_gpio_copro_grab_gpio(struct gpio_desc *desc, return -EINVAL; bindex = offset >> 3; - spin_lock_irqsave(&gpio->lock, flags); + raw_spin_lock_irqsave(&gpio->lock, flags); /* Sanity check, this shouldn't happen */ if (gpio->cf_copro_bankmap[bindex] == 0xff) { @@ -1039,7 +1039,7 @@ int aspeed_gpio_copro_grab_gpio(struct gpio_desc *desc, if (bit) *bit = GPIO_OFFSET(offset); bail: - spin_unlock_irqrestore(&gpio->lock, flags); + raw_spin_unlock_irqrestore(&gpio->lock, flags); return rc; } EXPORT_SYMBOL_GPL(aspeed_gpio_copro_grab_gpio); @@ -1063,7 +1063,7 @@ int aspeed_gpio_copro_release_gpio(struct gpio_desc *desc) return -EINVAL; bindex = offset >> 3; - spin_lock_irqsave(&gpio->lock, flags); + raw_spin_lock_irqsave(&gpio->lock, flags); /* Sanity check, this shouldn't happen */ if (gpio->cf_copro_bankmap[bindex] == 0) { @@ -1077,7 +1077,7 @@ int aspeed_gpio_copro_release_gpio(struct gpio_desc *desc) aspeed_gpio_change_cmd_source(gpio, bank, bindex, GPIO_CMDSRC_ARM); bail: - spin_unlock_irqrestore(&gpio->lock, flags); + raw_spin_unlock_irqrestore(&gpio->lock, flags); return rc; } EXPORT_SYMBOL_GPL(aspeed_gpio_copro_release_gpio); @@ -1151,7 +1151,7 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev) if (IS_ERR(gpio->base)) return PTR_ERR(gpio->base); - spin_lock_init(&gpio->lock); + raw_spin_lock_init(&gpio->lock); gpio_id = of_match_node(aspeed_gpio_of_table, pdev->dev.of_node); if (!gpio_id) diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c index d5eb9ca119016c6ba7882334ee2eaf16ce77949a..4f28fa73450c175bb7baa5ed8e78db0db161f0b0 100644 --- a/drivers/gpio/gpio-sifive.c +++ b/drivers/gpio/gpio-sifive.c @@ -206,7 +206,7 @@ static int sifive_gpio_probe(struct platform_device *pdev) NULL, chip->base + SIFIVE_GPIO_OUTPUT_EN, chip->base + SIFIVE_GPIO_INPUT_EN, - 0); + BGPIOF_READ_OUTPUT_REG_SET); if (ret) { dev_err(dev, "unable to init generic GPIO\n"); return ret; diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c index 9500074b1f1b55e00951af9b7b9f2d956f37a7ef..7fbe5f0681b956b582a03f12c5b409e9f7f6de14 100644 --- a/drivers/gpio/gpio-tegra186.c +++ b/drivers/gpio/gpio-tegra186.c @@ -337,9 +337,12 @@ static int tegra186_gpio_of_xlate(struct gpio_chip *chip, return offset + pin; } +#define to_tegra_gpio(x) container_of((x), struct tegra_gpio, gpio) + static void tegra186_irq_ack(struct irq_data *data) { - struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data); + struct gpio_chip *gc = irq_data_get_irq_chip_data(data); + struct tegra_gpio *gpio = to_tegra_gpio(gc); void __iomem *base; base = tegra186_gpio_get_base(gpio, data->hwirq); @@ -351,7 +354,8 @@ static void tegra186_irq_ack(struct irq_data *data) static void tegra186_irq_mask(struct irq_data *data) { - struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data); + struct gpio_chip *gc = irq_data_get_irq_chip_data(data); + struct tegra_gpio *gpio = to_tegra_gpio(gc); void __iomem *base; u32 value; @@ -366,7 +370,8 @@ static void tegra186_irq_mask(struct irq_data *data) static void tegra186_irq_unmask(struct irq_data *data) { - struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data); + struct gpio_chip *gc = irq_data_get_irq_chip_data(data); + struct tegra_gpio *gpio = to_tegra_gpio(gc); void __iomem *base; u32 value; @@ -381,7 +386,8 @@ static void tegra186_irq_unmask(struct irq_data *data) static int tegra186_irq_set_type(struct irq_data *data, unsigned int type) { - struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data); + struct gpio_chip *gc = irq_data_get_irq_chip_data(data); + struct tegra_gpio *gpio = to_tegra_gpio(gc); void __iomem *base; u32 value; diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 6f11714ce02396abcc0268019336d542080b2a2a..55e4f402ec8b6116d4cd54ae07456f6872685350 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -969,10 +969,17 @@ int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *name, int ind irq_flags = acpi_dev_get_irq_type(info.triggering, info.polarity); - /* Set type if specified and different than the current one */ - if (irq_flags != IRQ_TYPE_NONE && - irq_flags != irq_get_trigger_type(irq)) - irq_set_irq_type(irq, irq_flags); + /* + * If the IRQ is not already in use then set type + * if specified and different than the current one. + */ + if (can_request_irq(irq, irq_flags)) { + if (irq_flags != IRQ_TYPE_NONE && + irq_flags != irq_get_trigger_type(irq)) + irq_set_irq_type(irq, irq_flags); + } else { + dev_dbg(&adev->dev, "IRQ %d already in use\n", irq); + } return irq; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 0de66f59adb8ab0aaf4e555ea8220ae1716f45e9..df1f9b88a53f9fb04d5c20c5ad703a99a1208e45 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -387,6 +387,9 @@ amdgpu_connector_lcd_native_mode(struct drm_encoder *encoder) native_mode->vdisplay != 0 && native_mode->clock != 0) { mode = drm_mode_duplicate(dev, native_mode); + if (!mode) + return NULL; + mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; drm_mode_set_name(mode); @@ -401,6 +404,9 @@ amdgpu_connector_lcd_native_mode(struct drm_encoder *encoder) * simpler. */ mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false); + if (!mode) + return NULL; + mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; DRM_DEBUG_KMS("Adding cvt approximation of native panel mode %s\n", mode->name); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 2f70fdd6104f20df7192687ed1fbe28028c5f4da..582055136cdbfc9c7765fe696fbcc444437c7e0a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -267,7 +267,6 @@ int amdgpu_irq_init(struct amdgpu_device *adev) if (!amdgpu_device_has_dc_support(adev)) { if (!adev->enable_virtual_display) /* Disable vblank IRQs aggressively for power-saving */ - /* XXX: can this be enabled for DC? */ adev_to_drm(adev)->vblank_disable_immediate = true; r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index efda38349a0322f108c2eb7d1abd2a4fa85d233a..917b94002f4b70170cc04616670fd55d7e9b9817 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -766,9 +766,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file dev_info.high_va_offset = AMDGPU_GMC_HOLE_END; dev_info.high_va_max = AMDGPU_GMC_HOLE_END | vm_size; } - dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); + dev_info.virtual_address_alignment = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE; - dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE; + dev_info.gart_page_size = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); dev_info.cu_active_number = adev->gfx.cu_info.number; dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask; dev_info.ce_ram_size = adev->gfx.ce_ram_size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 5207ad654f18e9e8afd96f7155044a515dc45a77..0b162928a248b971c2baf0e846c3811325b09fd2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -2120,7 +2120,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, unsigned i; int r; - if (direct_submit && !ring->sched.ready) { + if (!direct_submit && !ring->sched.ready) { DRM_ERROR("Trying to move memory with ring turned off.\n"); return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 9ab65ca7df777f02c519b3c2d3b3a9cee45eac3f..873bc33912e23e74fee63e8404537dfb942a8dd3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -524,10 +524,10 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) static int gmc_v8_0_mc_init(struct amdgpu_device *adev) { int r; + u32 tmp; adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev); if (!adev->gmc.vram_width) { - u32 tmp; int chansize, numchan; /* Get VRAM informations */ @@ -571,8 +571,15 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) adev->gmc.vram_width = numchan * chansize; } /* size in MB on si */ - adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; - adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; + tmp = RREG32(mmCONFIG_MEMSIZE); + /* some boards may have garbage in the upper 16 bits */ + if (tmp & 0xffff0000) { + DRM_INFO("Probable bad vram size: 0x%08x\n", tmp); + if (tmp & 0xffff) + tmp &= 0xffff; + } + adev->gmc.mc_vram_size = tmp * 1024ULL * 1024ULL; + adev->gmc.real_vram_size = adev->gmc.mc_vram_size; if (!(adev->flags & AMD_IS_APU)) { r = amdgpu_device_resize_fb_bar(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 37226cbbbd11a4c3d79dbe41ac9b581eb6f049e0..7212b9900e0abafdf8386235789ec6f25167384d 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -1194,8 +1194,11 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_SDMA_MGCG | AMD_CG_SUPPORT_SDMA_LS; + /* + * MMHUB PG needs to be disabled for Picasso for + * stability reasons. + */ adev->pg_flags = AMD_PG_SUPPORT_SDMA | - AMD_PG_SUPPORT_MMHUB | AMD_PG_SUPPORT_VCN; } else { adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index d8ce4c324284652ad33f2e8a716b97076d038008..374f2bf06c3a9ce57405be5f1a31698385451246 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1069,6 +1069,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size; adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size; + /* Disable vblank IRQs aggressively for power-saving */ + adev_to_drm(adev)->vblank_disable_immediate = true; + if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) { DRM_ERROR( "amdgpu: failed to initialize sw for display support.\n"); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 284ed1c8a35acef795887ec7b2abc42fc9cf2751..93f5229c303e7bc484244f71bfd03edd5c6a51af 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -2436,7 +2436,8 @@ static void commit_planes_for_stream(struct dc *dc, } if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) - if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { + if (top_pipe_to_program && + top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { if (should_use_dmub_lock(stream->link)) { union dmub_hw_lock_flags hw_locks = { 0 }; struct dmub_hw_lock_inst_flags inst_flags = { 0 }; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index a7f8caf1086b95a62f15aa39b08c2ca807eb306c..0e359a299f9ecb68198cf1965918f7713da41401 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -3587,6 +3587,26 @@ static bool retrieve_link_cap(struct dc_link *link) dp_hw_fw_revision.ieee_fw_rev, sizeof(dp_hw_fw_revision.ieee_fw_rev)); + /* Quirk for Apple MBP 2018 15" Retina panels: wrong DP_MAX_LINK_RATE */ + { + uint8_t str_mbp_2018[] = { 101, 68, 21, 103, 98, 97 }; + uint8_t fwrev_mbp_2018[] = { 7, 4 }; + uint8_t fwrev_mbp_2018_vega[] = { 8, 4 }; + + /* We also check for the firmware revision as 16,1 models have an + * identical device id and are incorrectly quirked otherwise. + */ + if ((link->dpcd_caps.sink_dev_id == 0x0010fa) && + !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2018, + sizeof(str_mbp_2018)) && + (!memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018, + sizeof(fwrev_mbp_2018)) || + !memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018_vega, + sizeof(fwrev_mbp_2018_vega)))) { + link->reported_link_cap.link_rate = LINK_RATE_RBR2; + } + } + memset(&link->dpcd_caps.dsc_caps, '\0', sizeof(link->dpcd_caps.dsc_caps)); memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap)); diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 9f383b9041d28c4e585060792974cb7bcdc16076..49109614510b8c8ef38ab94e2f4cac1db0867ac0 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -2098,6 +2098,12 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ } } + /* setting should not be allowed from VF */ + if (amdgpu_sriov_vf(adev)) { + dev_attr->attr.mode &= ~S_IWUGO; + dev_attr->store = NULL; + } + #undef DEVICE_ATTR_IS return 0; diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c index 914c569ab8c15d10e58e3194f7b4b921816cdb93..cab3f5c4e2fc8368e8819fc5fc27d6cae1df16c9 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c @@ -1086,11 +1086,21 @@ int analogix_dp_send_psr_spd(struct analogix_dp_device *dp, if (!blocking) return 0; + /* + * db[1]!=0: entering PSR, wait for fully active remote frame buffer. + * db[1]==0: exiting PSR, wait for either + * (a) ACTIVE_RESYNC - the sink "must display the + * incoming active frames from the Source device with no visible + * glitches and/or artifacts", even though timings may still be + * re-synchronizing; or + * (b) INACTIVE - the transition is fully complete. + */ ret = readx_poll_timeout(analogix_dp_get_psr_status, dp, psr_status, psr_status >= 0 && ((vsc->db[1] && psr_status == DP_PSR_SINK_ACTIVE_RFB) || - (!vsc->db[1] && psr_status == DP_PSR_SINK_INACTIVE)), 1500, - DP_TIMEOUT_PSR_LOOP_MS * 1000); + (!vsc->db[1] && (psr_status == DP_PSR_SINK_ACTIVE_RESYNC || + psr_status == DP_PSR_SINK_INACTIVE))), + 1500, DP_TIMEOUT_PSR_LOOP_MS * 1000); if (ret) { dev_warn(dp->dev, "Failed to apply PSR %d\n", ret); return ret; diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c index 4d278573cdb99817bab4e58705fedcd9f43f8949..544a47335cac4a8f130af61c4c03467c7205cb3a 100644 --- a/drivers/gpu/drm/bridge/display-connector.c +++ b/drivers/gpu/drm/bridge/display-connector.c @@ -104,7 +104,7 @@ static int display_connector_probe(struct platform_device *pdev) { struct display_connector *conn; unsigned int type; - const char *label; + const char *label = NULL; int ret; conn = devm_kzalloc(&pdev->dev, sizeof(*conn), GFP_KERNEL); diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c index d2808c4a6fb1cfa12ddaab5e4ada9ae578955bb7..cce98bf2a4e7324f85c1d4e113e44ae80abed7a7 100644 --- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c +++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c @@ -306,19 +306,10 @@ out: mutex_unlock(&ge_b850v3_lvds_dev_mutex); } -static int stdp4028_ge_b850v3_fw_probe(struct i2c_client *stdp4028_i2c, - const struct i2c_device_id *id) +static int ge_b850v3_register(void) { + struct i2c_client *stdp4028_i2c = ge_b850v3_lvds_ptr->stdp4028_i2c; struct device *dev = &stdp4028_i2c->dev; - int ret; - - ret = ge_b850v3_lvds_init(dev); - - if (ret) - return ret; - - ge_b850v3_lvds_ptr->stdp4028_i2c = stdp4028_i2c; - i2c_set_clientdata(stdp4028_i2c, ge_b850v3_lvds_ptr); /* drm bridge initialization */ ge_b850v3_lvds_ptr->bridge.funcs = &ge_b850v3_lvds_funcs; @@ -343,6 +334,27 @@ static int stdp4028_ge_b850v3_fw_probe(struct i2c_client *stdp4028_i2c, "ge-b850v3-lvds-dp", ge_b850v3_lvds_ptr); } +static int stdp4028_ge_b850v3_fw_probe(struct i2c_client *stdp4028_i2c, + const struct i2c_device_id *id) +{ + struct device *dev = &stdp4028_i2c->dev; + int ret; + + ret = ge_b850v3_lvds_init(dev); + + if (ret) + return ret; + + ge_b850v3_lvds_ptr->stdp4028_i2c = stdp4028_i2c; + i2c_set_clientdata(stdp4028_i2c, ge_b850v3_lvds_ptr); + + /* Only register after both bridges are probed */ + if (!ge_b850v3_lvds_ptr->stdp2690_i2c) + return 0; + + return ge_b850v3_register(); +} + static int stdp4028_ge_b850v3_fw_remove(struct i2c_client *stdp4028_i2c) { ge_b850v3_lvds_remove(); @@ -386,7 +398,11 @@ static int stdp2690_ge_b850v3_fw_probe(struct i2c_client *stdp2690_i2c, ge_b850v3_lvds_ptr->stdp2690_i2c = stdp2690_i2c; i2c_set_clientdata(stdp2690_i2c, ge_b850v3_lvds_ptr); - return 0; + /* Only register after both bridges are probed */ + if (!ge_b850v3_lvds_ptr->stdp4028_i2c) + return 0; + + return ge_b850v3_register(); } static int stdp2690_ge_b850v3_fw_remove(struct i2c_client *stdp2690_i2c) diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c index d0db1acf11d7319ec722f33202cb91db27971620..7d2ed0ed2fe26c75cebb8f484028ec2be13973ab 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c @@ -320,13 +320,17 @@ static int dw_hdmi_open(struct snd_pcm_substream *substream) struct snd_pcm_runtime *runtime = substream->runtime; struct snd_dw_hdmi *dw = substream->private_data; void __iomem *base = dw->data.base; + u8 *eld; int ret; runtime->hw = dw_hdmi_hw; - ret = snd_pcm_hw_constraint_eld(runtime, dw->data.eld); - if (ret < 0) - return ret; + eld = dw->data.get_eld(dw->data.hdmi); + if (eld) { + ret = snd_pcm_hw_constraint_eld(runtime, eld); + if (ret < 0) + return ret; + } ret = snd_pcm_limit_hw_rates(runtime); if (ret < 0) diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h index cb07dc0da5a70462972ae0592370b6ef22a35e16..f72d27208ebef5fb14825df3afa11a772be32ce5 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h @@ -9,15 +9,15 @@ struct dw_hdmi_audio_data { void __iomem *base; int irq; struct dw_hdmi *hdmi; - u8 *eld; + u8 *(*get_eld)(struct dw_hdmi *hdmi); }; struct dw_hdmi_i2s_audio_data { struct dw_hdmi *hdmi; - u8 *eld; void (*write)(struct dw_hdmi *hdmi, u8 val, int offset); u8 (*read)(struct dw_hdmi *hdmi, int offset); + u8 *(*get_eld)(struct dw_hdmi *hdmi); }; #endif diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c index 9fef6413741dcbd674d78cae5d5478aa0bb63ce0..9682416056ed61929ab81e348152025932360756 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c @@ -135,8 +135,15 @@ static int dw_hdmi_i2s_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len) { struct dw_hdmi_i2s_audio_data *audio = data; + u8 *eld; + + eld = audio->get_eld(audio->hdmi); + if (eld) + memcpy(buf, eld, min_t(size_t, MAX_ELD_BYTES, len)); + else + /* Pass en empty ELD if connector not available */ + memset(buf, 0, len); - memcpy(buf, audio->eld, min_t(size_t, MAX_ELD_BYTES, len)); return 0; } diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index 4c4fb4b52b7d1c71883a72d65cf8828c0bc75965..f7830acb45ead0bc1b89192c710929cac5ac2752 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -756,6 +756,14 @@ static void hdmi_enable_audio_clk(struct dw_hdmi *hdmi, bool enable) hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS); } +static u8 *hdmi_audio_get_eld(struct dw_hdmi *hdmi) +{ + if (!hdmi->curr_conn) + return NULL; + + return hdmi->curr_conn->eld; +} + static void dw_hdmi_ahb_audio_enable(struct dw_hdmi *hdmi) { hdmi_set_cts_n(hdmi, hdmi->audio_cts, hdmi->audio_n); @@ -3379,7 +3387,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, audio.base = hdmi->regs; audio.irq = irq; audio.hdmi = hdmi; - audio.eld = hdmi->connector.eld; + audio.get_eld = hdmi_audio_get_eld; hdmi->enable_audio = dw_hdmi_ahb_audio_enable; hdmi->disable_audio = dw_hdmi_ahb_audio_disable; @@ -3392,7 +3400,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, struct dw_hdmi_i2s_audio_data audio; audio.hdmi = hdmi; - audio.eld = hdmi->connector.eld; + audio.get_eld = hdmi_audio_get_eld; audio.write = hdmi_writeb; audio.read = hdmi_readb; hdmi->enable_audio = dw_hdmi_i2s_audio_enable; diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index ecdf9b01340f5e6413f10c26054a722560ea72d2..1a58481037b3fec5b2b762031855f7856de619f4 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -171,6 +171,7 @@ static const struct regmap_config ti_sn_bridge_regmap_config = { .val_bits = 8, .volatile_table = &ti_sn_bridge_volatile_table, .cache_type = REGCACHE_NONE, + .max_register = 0xFF, }; static void ti_sn_bridge_write_u16(struct ti_sn_bridge *pdata, diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index cd162d406078aa6d6aa802e5f90ee6124be61b4b..006e3b896caea7ed28a9c3df19bb5b134eef9ab9 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -577,6 +577,7 @@ static int drm_dev_init(struct drm_device *dev, struct drm_driver *driver, struct device *parent) { + struct inode *inode; int ret; if (!drm_core_init_complete) { @@ -613,13 +614,15 @@ static int drm_dev_init(struct drm_device *dev, if (ret) return ret; - dev->anon_inode = drm_fs_inode_new(); - if (IS_ERR(dev->anon_inode)) { - ret = PTR_ERR(dev->anon_inode); + inode = drm_fs_inode_new(); + if (IS_ERR(inode)) { + ret = PTR_ERR(inode); DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret); goto err; } + dev->anon_inode = inode; + if (drm_core_check_feature(dev, DRIVER_RENDER)) { ret = drm_minor_alloc(dev, DRM_MINOR_RENDER); if (ret) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 46e41ad5e44fd7027ded09297036f38a7f71b4e4..bfb83f9e87385787be80fcda0403ffae52129b10 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -5131,6 +5131,7 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi if (!(edid->input & DRM_EDID_INPUT_DIGITAL)) return quirks; + info->color_formats |= DRM_COLOR_FORMAT_RGB444; drm_parse_cea_ext(connector, edid); /* @@ -5179,7 +5180,6 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi DRM_DEBUG("%s: Assigning EDID-1.4 digital sink color depth as %d bpc.\n", connector->name, info->bpc); - info->color_formats |= DRM_COLOR_FORMAT_RGB444; if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444) info->color_formats |= DRM_COLOR_FORMAT_YCRCB444; if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422) diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index a950d5db211c51547fbb3f4fa98a4cb68d41af64..448c2f2d803a6296e67c4b72b92d00afb95d83cc 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -115,6 +115,12 @@ static const struct drm_dmi_panel_orientation_data lcd1280x1920_rightside_up = { .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, }; +static const struct drm_dmi_panel_orientation_data lcd1600x2560_leftside_up = { + .width = 1600, + .height = 2560, + .orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP, +}; + static const struct dmi_system_id orientation_data[] = { { /* Acer One 10 (S1003) */ .matches = { @@ -248,6 +254,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"), }, .driver_data = (void *)&lcd1200x1920_rightside_up, + }, { /* Lenovo Yoga Book X90F / X91F / X91L */ + .matches = { + /* Non exact match to match all versions */ + DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9"), + }, + .driver_data = (void *)&lcd1200x1920_rightside_up, }, { /* OneGX1 Pro */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SYSTEM_MANUFACTURER"), @@ -255,6 +267,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Default string"), }, .driver_data = (void *)&onegx1_pro, + }, { /* OneXPlayer */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ONE-NETBOOK TECHNOLOGY CO., LTD."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"), + }, + .driver_data = (void *)&lcd1600x2560_leftside_up, }, { /* Samsung GalaxyBook 10.6 */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 5f24cc52c2878660179a73e656c226c8db30bc09..ddf539f26f2dad3891fac462060b60ea7f7317c7 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -469,6 +469,12 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, return -EINVAL; } + if (args->stream_size > SZ_128K || args->nr_relocs > SZ_128K || + args->nr_bos > SZ_128K || args->nr_pmrs > 128) { + DRM_ERROR("submit arguments out of size limits\n"); + return -EINVAL; + } + /* * Copy the command submission and bo array to kernel space in * one go, and do this outside of any locks. diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h index 1c75c8ed5bcea2c5bec1d923ed35fc9a78884c67..85eddd492774d59c34189f6768e24e3f42dd8766 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h @@ -130,6 +130,7 @@ struct etnaviv_gpu { /* hang detection */ u32 hangcheck_dma_addr; + u32 hangcheck_fence; void __iomem *mmio; int irq; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index cd46c882269cc959070822de22ea9a61d09cf544..026b6c0731198093cb33b444aa85c688fafcc949 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c @@ -106,8 +106,10 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job) */ dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); change = dma_addr - gpu->hangcheck_dma_addr; - if (change < 0 || change > 16) { + if (gpu->completed_fence != gpu->hangcheck_fence || + change < 0 || change > 16) { gpu->hangcheck_dma_addr = dma_addr; + gpu->hangcheck_fence = gpu->completed_fence; goto out_no_timeout; } diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 1e1cb245fca778e71d0553cb15e66693f1f0f2c6..8eb9bf3a1617e6bed7bf08e16ea914969a58a12c 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -100,6 +100,7 @@ config DRM_I915_USERPTR config DRM_I915_GVT bool "Enable Intel GVT-g graphics virtualization host support" depends on DRM_I915 + depends on X86 depends on 64BIT default n help diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c index de995362f42832206be9f512aa6f0effc68cda6e..abff2d6cedd12ca99ecb9004635d5d20026d1240 100644 --- a/drivers/gpu/drm/i915/display/intel_opregion.c +++ b/drivers/gpu/drm/i915/display/intel_opregion.c @@ -361,6 +361,21 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, port++; } + /* + * The port numbering and mapping here is bizarre. The now-obsolete + * swsci spec supports ports numbered [0..4]. Port E is handled as a + * special case, but port F and beyond are not. The functionality is + * supposed to be obsolete for new platforms. Just bail out if the port + * number is out of bounds after mapping. + */ + if (port > 4) { + drm_dbg_kms(&dev_priv->drm, + "[ENCODER:%d:%s] port %c (index %u) out of bounds for display power state notification\n", + intel_encoder->base.base.id, intel_encoder->base.name, + port_name(intel_encoder->port), port); + return -EINVAL; + } + if (!enable) parm |= 4 << 8; diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c index 0e60aec0bb19122a161899f6190a54f752454ca0..b561e9e00153e6ee0140fb380995cbd69078feac 100644 --- a/drivers/gpu/drm/i915/display/intel_overlay.c +++ b/drivers/gpu/drm/i915/display/intel_overlay.c @@ -932,6 +932,9 @@ static int check_overlay_dst(struct intel_overlay *overlay, const struct intel_crtc_state *pipe_config = overlay->crtc->config; + if (rec->dst_height == 0 || rec->dst_width == 0) + return -EINVAL; + if (rec->dst_x < pipe_config->pipe_src_w && rec->dst_x + rec->dst_width <= pipe_config->pipe_src_w && rec->dst_y < pipe_config->pipe_src_h && diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index d6711caa7f399564120d7eb9dca039bcf3183bd0..dbc88fc7136bfeaff9916635a063bf0ae5928f27 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -159,6 +159,7 @@ struct drm_i915_gem_object { #define I915_BO_ALLOC_VOLATILE BIT(1) #define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_VOLATILE) #define I915_BO_READONLY BIT(2) +#define I915_BO_WAS_BOUND_BIT 3 /* * Is the object to be mapped as read-only to the GPU diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index f60ca6dc911f29d7d8881b566bc51f11e4554bb4..27d24cb38c0d2c42b406160e8b95f6189202f6c9 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -10,6 +10,8 @@ #include "i915_gem_lmem.h" #include "i915_gem_mman.h" +#include "gt/intel_gt.h" + void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, struct sg_table *pages, unsigned int sg_page_sizes) @@ -186,6 +188,14 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) __i915_gem_object_reset_page_iter(obj); obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0; + if (test_and_clear_bit(I915_BO_WAS_BOUND_BIT, &obj->flags)) { + struct drm_i915_private *i915 = to_i915(obj->base.dev); + intel_wakeref_t wakeref; + + with_intel_runtime_pm_if_active(&i915->runtime_pm, wakeref) + intel_gt_invalidate_tlbs(&i915->gt); + } + return pages; } diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 39b428c5049c04f1dbb27a917e20c58b015acd03..6615eb5147e234795b8014cc9846aa794c1823c9 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -26,6 +26,8 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915) spin_lock_init(>->irq_lock); + mutex_init(>->tlb_invalidate_lock); + INIT_LIST_HEAD(>->closed_vma); spin_lock_init(>->closed_lock); @@ -661,3 +663,103 @@ void intel_gt_info_print(const struct intel_gt_info *info, intel_sseu_dump(&info->sseu, p); } + +struct reg_and_bit { + i915_reg_t reg; + u32 bit; +}; + +static struct reg_and_bit +get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8, + const i915_reg_t *regs, const unsigned int num) +{ + const unsigned int class = engine->class; + struct reg_and_bit rb = { }; + + if (drm_WARN_ON_ONCE(&engine->i915->drm, + class >= num || !regs[class].reg)) + return rb; + + rb.reg = regs[class]; + if (gen8 && class == VIDEO_DECODE_CLASS) + rb.reg.reg += 4 * engine->instance; /* GEN8_M2TCR */ + else + rb.bit = engine->instance; + + rb.bit = BIT(rb.bit); + + return rb; +} + +void intel_gt_invalidate_tlbs(struct intel_gt *gt) +{ + static const i915_reg_t gen8_regs[] = { + [RENDER_CLASS] = GEN8_RTCR, + [VIDEO_DECODE_CLASS] = GEN8_M1TCR, /* , GEN8_M2TCR */ + [VIDEO_ENHANCEMENT_CLASS] = GEN8_VTCR, + [COPY_ENGINE_CLASS] = GEN8_BTCR, + }; + static const i915_reg_t gen12_regs[] = { + [RENDER_CLASS] = GEN12_GFX_TLB_INV_CR, + [VIDEO_DECODE_CLASS] = GEN12_VD_TLB_INV_CR, + [VIDEO_ENHANCEMENT_CLASS] = GEN12_VE_TLB_INV_CR, + [COPY_ENGINE_CLASS] = GEN12_BLT_TLB_INV_CR, + }; + struct drm_i915_private *i915 = gt->i915; + struct intel_uncore *uncore = gt->uncore; + struct intel_engine_cs *engine; + enum intel_engine_id id; + const i915_reg_t *regs; + unsigned int num = 0; + + if (I915_SELFTEST_ONLY(gt->awake == -ENODEV)) + return; + + if (INTEL_GEN(i915) == 12) { + regs = gen12_regs; + num = ARRAY_SIZE(gen12_regs); + } else if (INTEL_GEN(i915) >= 8 && INTEL_GEN(i915) <= 11) { + regs = gen8_regs; + num = ARRAY_SIZE(gen8_regs); + } else if (INTEL_GEN(i915) < 8) { + return; + } + + if (drm_WARN_ONCE(&i915->drm, !num, + "Platform does not implement TLB invalidation!")) + return; + + GEM_TRACE("\n"); + + assert_rpm_wakelock_held(&i915->runtime_pm); + + mutex_lock(>->tlb_invalidate_lock); + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); + + for_each_engine(engine, gt, id) { + /* + * HW architecture suggest typical invalidation time at 40us, + * with pessimistic cases up to 100us and a recommendation to + * cap at 1ms. We go a bit higher just in case. + */ + const unsigned int timeout_us = 100; + const unsigned int timeout_ms = 4; + struct reg_and_bit rb; + + rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num); + if (!i915_mmio_reg_offset(rb.reg)) + continue; + + intel_uncore_write_fw(uncore, rb.reg, rb.bit); + if (__intel_wait_for_register_fw(uncore, + rb.reg, rb.bit, 0, + timeout_us, timeout_ms, + NULL)) + drm_err_ratelimited(>->i915->drm, + "%s TLB invalidation did not complete in %ums!\n", + engine->name, timeout_ms); + } + + intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL); + mutex_unlock(>->tlb_invalidate_lock); +} diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h index 9157c7411f60398cf6daabb029a7135cbd5b70fa..d9a1168172ae33af369c39e530a181c5f324f1cf 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.h +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -77,4 +77,6 @@ static inline bool intel_gt_is_wedged(const struct intel_gt *gt) void intel_gt_info_print(const struct intel_gt_info *info, struct drm_printer *p); +void intel_gt_invalidate_tlbs(struct intel_gt *gt); + #endif /* __INTEL_GT_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index 6d39a4a11bf3983e81a57915f129c23e6f28fa1e..78c061614d8bba9e7c6a32fcdb5e929877be7301 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -36,6 +36,8 @@ struct intel_gt { struct intel_uc uc; + struct mutex tlb_invalidate_lock; + struct intel_gt_timelines { spinlock_t lock; /* protects active_list */ struct list_head active_list; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index ce8c91c5fdd3b4cc2285523a50c95da8e10e9237..12488996a7f4f759bb966f7da7320384b3d8b49c 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2639,6 +2639,12 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1 << 28) #define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1 << 24) +#define GEN8_RTCR _MMIO(0x4260) +#define GEN8_M1TCR _MMIO(0x4264) +#define GEN8_M2TCR _MMIO(0x4268) +#define GEN8_BTCR _MMIO(0x426c) +#define GEN8_VTCR _MMIO(0x4270) + #if 0 #define PRB0_TAIL _MMIO(0x2030) #define PRB0_HEAD _MMIO(0x2034) @@ -2728,6 +2734,11 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define FAULT_VA_HIGH_BITS (0xf << 0) #define FAULT_GTT_SEL (1 << 4) +#define GEN12_GFX_TLB_INV_CR _MMIO(0xced8) +#define GEN12_VD_TLB_INV_CR _MMIO(0xcedc) +#define GEN12_VE_TLB_INV_CR _MMIO(0xcee0) +#define GEN12_BLT_TLB_INV_CR _MMIO(0xcee4) + #define GEN12_AUX_ERR_DBG _MMIO(0x43f4) #define FPGA_DBG _MMIO(0x42300) diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index caa9b041616b0d0903788589ad4a7ae7965339c6..50a86fd89d00524023c7fae53ef7ced05a2270a0 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -439,6 +439,9 @@ int i915_vma_bind(struct i915_vma *vma, vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags); } + if (vma->obj) + set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags); + atomic_or(bind_flags, &vma->flags); return 0; } diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index e51ca7ca0a2a70bfca211ac809b8f4c8d7a05d1e..472aaea75ef84303ec901d4b81df104c25c15e3d 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3996,6 +3996,17 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state) return ret; } + if (intel_can_enable_sagv(dev_priv, new_bw_state) != + intel_can_enable_sagv(dev_priv, old_bw_state)) { + ret = intel_atomic_serialize_global_state(&new_bw_state->base); + if (ret) + return ret; + } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) { + ret = intel_atomic_lock_global_state(&new_bw_state->base); + if (ret) + return ret; + } + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal; @@ -4010,17 +4021,6 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state) intel_can_enable_sagv(dev_priv, new_bw_state); } - if (intel_can_enable_sagv(dev_priv, new_bw_state) != - intel_can_enable_sagv(dev_priv, old_bw_state)) { - ret = intel_atomic_serialize_global_state(&new_bw_state->base); - if (ret) - return ret; - } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) { - ret = intel_atomic_lock_global_state(&new_bw_state->base); - if (ret) - return ret; - } - return 0; } diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 97ded2a59cf4c50838148c09c0004013bcb091ea..01849840ac560064d5fab2b285c98d0f21e3f5a1 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -694,7 +694,8 @@ void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore, } static void __intel_uncore_forcewake_put(struct intel_uncore *uncore, - enum forcewake_domains fw_domains) + enum forcewake_domains fw_domains, + bool delayed) { struct intel_uncore_forcewake_domain *domain; unsigned int tmp; @@ -709,7 +710,11 @@ static void __intel_uncore_forcewake_put(struct intel_uncore *uncore, continue; } - uncore->funcs.force_wake_put(uncore, domain->mask); + if (delayed && + !(domain->uncore->fw_domains_timer & domain->mask)) + fw_domain_arm_timer(domain); + else + uncore->funcs.force_wake_put(uncore, domain->mask); } } @@ -730,7 +735,20 @@ void intel_uncore_forcewake_put(struct intel_uncore *uncore, return; spin_lock_irqsave(&uncore->lock, irqflags); - __intel_uncore_forcewake_put(uncore, fw_domains); + __intel_uncore_forcewake_put(uncore, fw_domains, false); + spin_unlock_irqrestore(&uncore->lock, irqflags); +} + +void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore, + enum forcewake_domains fw_domains) +{ + unsigned long irqflags; + + if (!uncore->funcs.force_wake_put) + return; + + spin_lock_irqsave(&uncore->lock, irqflags); + __intel_uncore_forcewake_put(uncore, fw_domains, true); spin_unlock_irqrestore(&uncore->lock, irqflags); } @@ -772,7 +790,7 @@ void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore, if (!uncore->funcs.force_wake_put) return; - __intel_uncore_forcewake_put(uncore, fw_domains); + __intel_uncore_forcewake_put(uncore, fw_domains, false); } void assert_forcewakes_inactive(struct intel_uncore *uncore) diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h index c4b22d9d0b45108aab6a3d0dee5f5f7cea8efac4..034f04e0de8b7774ff8cd6eab16c9e577cb375f9 100644 --- a/drivers/gpu/drm/i915/intel_uncore.h +++ b/drivers/gpu/drm/i915/intel_uncore.h @@ -211,6 +211,8 @@ void intel_uncore_forcewake_get(struct intel_uncore *uncore, enum forcewake_domains domains); void intel_uncore_forcewake_put(struct intel_uncore *uncore, enum forcewake_domains domains); +void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore, + enum forcewake_domains domains); void intel_uncore_forcewake_flush(struct intel_uncore *uncore, enum forcewake_domains fw_domains); diff --git a/drivers/gpu/drm/lima/lima_device.c b/drivers/gpu/drm/lima/lima_device.c index 65fdca366e41f00d940f13a0865234fc47df5595..36c99058942788fe6e31cc127b7b22c666bc5c21 100644 --- a/drivers/gpu/drm/lima/lima_device.c +++ b/drivers/gpu/drm/lima/lima_device.c @@ -357,6 +357,7 @@ int lima_device_init(struct lima_device *ldev) int err, i; dma_set_coherent_mask(ldev->dev, DMA_BIT_MASK(32)); + dma_set_max_seg_size(ldev->dev, UINT_MAX); err = lima_clk_init(ldev); if (err) diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c index 8cee2591e7284ba06e4a02475eba09331e47e1a2..ccc742dc78bd99a262385c0088c6c7ca48abc270 100644 --- a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c +++ b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c @@ -147,6 +147,8 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev) return -ENOMEM; mipi_tx->driver_data = of_device_get_match_data(dev); + if (!mipi_tx->driver_data) + return -ENODEV; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); mipi_tx->regs = devm_ioremap_resource(dev, mem); diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index dabb4a1ccdcf70150db023a2815e460e87ea6333..1aad34b5ffd7f42d9e605ed79de1cec10bcbb5c6 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -60,6 +60,7 @@ config DRM_MSM_HDMI_HDCP config DRM_MSM_DP bool "Enable DisplayPort support in MSM DRM driver" depends on DRM_MSM + select RATIONAL default y help Compile in support for DP driver in MSM DRM driver. DP external diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c index a7a24539921f3a58f18aa4c7c87021dd166d68b2..a6efc11eba93fdc2e7786312d71d52717e374fcf 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c @@ -26,9 +26,16 @@ static void dpu_setup_dspp_pcc(struct dpu_hw_dspp *ctx, struct dpu_hw_pcc_cfg *cfg) { - u32 base = ctx->cap->sblk->pcc.base; + u32 base; - if (!ctx || !base) { + if (!ctx) { + DRM_ERROR("invalid ctx %pK\n", ctx); + return; + } + + base = ctx->cap->sblk->pcc.base; + + if (!base) { DRM_ERROR("invalid ctx %pK pcc base 0x%x\n", ctx, base); return; } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index b4a2e8eb35dd25f905a6eb23959c86e0a3b16554..08e082d0443af22af0087f49d60ba5f14110c2f2 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -71,8 +71,8 @@ static int _dpu_danger_signal_status(struct seq_file *s, &status); } else { seq_puts(s, "\nSafe signal status:\n"); - if (kms->hw_mdp->ops.get_danger_status) - kms->hw_mdp->ops.get_danger_status(kms->hw_mdp, + if (kms->hw_mdp->ops.get_safe_status) + kms->hw_mdp->ops.get_safe_status(kms->hw_mdp, &status); } pm_runtime_put_sync(&kms->pdev->dev); diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c index 1adead764feed8558a6ef5191175251a1c94bdce..f845333593daa7d1821f54536e8dbc41fb901786 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.c +++ b/drivers/gpu/drm/msm/dsi/dsi.c @@ -33,7 +33,12 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi) of_node_put(phy_node); - if (!phy_pdev || !msm_dsi->phy) { + if (!phy_pdev) { + DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__); + return -EPROBE_DEFER; + } + if (!msm_dsi->phy) { + put_device(&phy_pdev->dev); DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__); return -EPROBE_DEFER; } diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index e8c1a727179cc29b8afceda742353b71c72a7a96..e07986ab52c2235a115b61b70541dc002dfd1c1d 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -769,12 +769,14 @@ void __exit msm_dsi_phy_driver_unregister(void) int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, struct msm_dsi_phy_clk_request *clk_req) { - struct device *dev = &phy->pdev->dev; + struct device *dev; int ret; if (!phy || !phy->cfg->ops.enable) return -EINVAL; + dev = &phy->pdev->dev; + ret = dsi_phy_enable_resource(phy); if (ret) { DRM_DEV_ERROR(dev, "%s: resource enable failed, %d\n", diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index 737453b6e596646b9a530ee16a18901af83c69e8..94f948ef279d107408e1c4b820176f5d78245c60 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c @@ -97,10 +97,15 @@ static int msm_hdmi_get_phy(struct hdmi *hdmi) of_node_put(phy_node); - if (!phy_pdev || !hdmi->phy) { + if (!phy_pdev) { DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n"); return -EPROBE_DEFER; } + if (!hdmi->phy) { + DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n"); + put_device(&phy_pdev->dev); + return -EPROBE_DEFER; + } hdmi->phy_dev = get_device(&phy_pdev->dev); diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 33e42b2f9cfcb4a79cda66378ab6ac4d0205770a..e37e5afc680a2b49bf0afcc3e232ca1bc58f8384 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -350,7 +350,7 @@ static int msm_init_vram(struct drm_device *dev) of_node_put(node); if (ret) return ret; - size = r.end - r.start; + size = r.end - r.start + 1; DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start); /* if we have no IOMMU, then we need to use carveout allocator. diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c index 7739f46470d3e1215402e541f55194d6764d34a9..99fee4d8cd3187dc0549f412ff384926f82a10b6 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c @@ -205,7 +205,7 @@ nv04_display_destroy(struct drm_device *dev) nvif_notify_dtor(&disp->flip); nouveau_display(dev)->priv = NULL; - kfree(disp); + vfree(disp); nvif_object_unmap(&drm->client.device.object); } @@ -223,7 +223,7 @@ nv04_display_create(struct drm_device *dev) struct nv04_display *disp; int i, ret; - disp = kzalloc(sizeof(*disp), GFP_KERNEL); + disp = vzalloc(sizeof(*disp)); if (!disp) return -ENOMEM; diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c index c6a3448180d6fdb9102026bbbd17bffad1ca1775..93d9575181c67f7ae01203ff9b34e3c91e3cc6ec 100644 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c @@ -119,8 +119,12 @@ nvkm_falcon_disable(struct nvkm_falcon *falcon) int nvkm_falcon_reset(struct nvkm_falcon *falcon) { - nvkm_falcon_disable(falcon); - return nvkm_falcon_enable(falcon); + if (!falcon->func->reset) { + nvkm_falcon_disable(falcon); + return nvkm_falcon_enable(falcon); + } + + return falcon->func->reset(falcon); } int diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c index f3c30b2a788e8534fe72c23729779afd5e39d573..8bff14ae16b0e5bd76ef0f7541040dda3cb80e70 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c @@ -38,7 +38,7 @@ nvbios_addr(struct nvkm_bios *bios, u32 *addr, u8 size) *addr += bios->imaged_addr; } - if (unlikely(*addr + size >= bios->size)) { + if (unlikely(*addr + size > bios->size)) { nvkm_error(&bios->subdev, "OOB %d %08x %08x\n", size, p, *addr); return false; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c index a0fe607c9c07fc5f59f88c45ff4c4538dc5badc2..3bfc55c571b5e0aca53846dd68a0eb460d6b743e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c @@ -94,20 +94,13 @@ nvkm_pmu_fini(struct nvkm_subdev *subdev, bool suspend) return 0; } -static int +static void nvkm_pmu_reset(struct nvkm_pmu *pmu) { struct nvkm_device *device = pmu->subdev.device; if (!pmu->func->enabled(pmu)) - return 0; - - /* Inhibit interrupts, and wait for idle. */ - nvkm_wr32(device, 0x10a014, 0x0000ffff); - nvkm_msec(device, 2000, - if (!nvkm_rd32(device, 0x10a04c)) - break; - ); + return; /* Reset. */ if (pmu->func->reset) @@ -118,25 +111,37 @@ nvkm_pmu_reset(struct nvkm_pmu *pmu) if (!(nvkm_rd32(device, 0x10a10c) & 0x00000006)) break; ); - - return 0; } static int nvkm_pmu_preinit(struct nvkm_subdev *subdev) { struct nvkm_pmu *pmu = nvkm_pmu(subdev); - return nvkm_pmu_reset(pmu); + nvkm_pmu_reset(pmu); + return 0; } static int nvkm_pmu_init(struct nvkm_subdev *subdev) { struct nvkm_pmu *pmu = nvkm_pmu(subdev); - int ret = nvkm_pmu_reset(pmu); - if (ret == 0 && pmu->func->init) - ret = pmu->func->init(pmu); - return ret; + struct nvkm_device *device = pmu->subdev.device; + + if (!pmu->func->init) + return 0; + + if (pmu->func->enabled(pmu)) { + /* Inhibit interrupts, and wait for idle. */ + nvkm_wr32(device, 0x10a014, 0x0000ffff); + nvkm_msec(device, 2000, + if (!nvkm_rd32(device, 0x10a04c)) + break; + ); + + nvkm_pmu_reset(pmu); + } + + return pmu->func->init(pmu); } static void * diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c index 383376addb41c4a55dd1261ea65c748c846233fa..a9d6c36195ed1f175cdb83b32f203b8695302f72 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c @@ -23,9 +23,38 @@ */ #include "priv.h" +static int +gm200_pmu_flcn_reset(struct nvkm_falcon *falcon) +{ + struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon); + + nvkm_falcon_wr32(falcon, 0x014, 0x0000ffff); + pmu->func->reset(pmu); + return nvkm_falcon_enable(falcon); +} + +const struct nvkm_falcon_func +gm200_pmu_flcn = { + .debug = 0xc08, + .fbif = 0xe00, + .load_imem = nvkm_falcon_v1_load_imem, + .load_dmem = nvkm_falcon_v1_load_dmem, + .read_dmem = nvkm_falcon_v1_read_dmem, + .bind_context = nvkm_falcon_v1_bind_context, + .wait_for_halt = nvkm_falcon_v1_wait_for_halt, + .clear_interrupt = nvkm_falcon_v1_clear_interrupt, + .set_start_addr = nvkm_falcon_v1_set_start_addr, + .start = nvkm_falcon_v1_start, + .enable = nvkm_falcon_v1_enable, + .disable = nvkm_falcon_v1_disable, + .reset = gm200_pmu_flcn_reset, + .cmdq = { 0x4a0, 0x4b0, 4 }, + .msgq = { 0x4c8, 0x4cc, 0 }, +}; + static const struct nvkm_pmu_func gm200_pmu = { - .flcn = >215_pmu_flcn, + .flcn = &gm200_pmu_flcn, .enabled = gf100_pmu_enabled, .reset = gf100_pmu_reset, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c index 8f6ed5373ea16e046a7748717639a304ac1327f9..7938722b4da17df0fdd580b7aeb64d890ef2cf04 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c @@ -211,7 +211,7 @@ gm20b_pmu_recv(struct nvkm_pmu *pmu) static const struct nvkm_pmu_func gm20b_pmu = { - .flcn = >215_pmu_flcn, + .flcn = &gm200_pmu_flcn, .enabled = gf100_pmu_enabled, .intr = gt215_pmu_intr, .recv = gm20b_pmu_recv, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c index 3d8ce14dba7bf1fb2b7a1d4c54dd9ced9f2f6ed0..3dfb3e8522f6a096aaa90fddcf5a79e2e05ec8a6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c @@ -39,7 +39,7 @@ gp102_pmu_enabled(struct nvkm_pmu *pmu) static const struct nvkm_pmu_func gp102_pmu = { - .flcn = >215_pmu_flcn, + .flcn = &gm200_pmu_flcn, .enabled = gp102_pmu_enabled, .reset = gp102_pmu_reset, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c index 9c237c426599b543e38f39e3d2fd3888bb70e510..7f5f9d5448360a69b5d9a5f832690c88233c593d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c @@ -78,7 +78,7 @@ gp10b_pmu_acr = { static const struct nvkm_pmu_func gp10b_pmu = { - .flcn = >215_pmu_flcn, + .flcn = &gm200_pmu_flcn, .enabled = gf100_pmu_enabled, .intr = gt215_pmu_intr, .recv = gm20b_pmu_recv, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h index 276b6d778e532fc54b0049833ddb88d498c6f21e..b945ec320cd2ef15fa1e5749d6425c4c30d7dd4d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h @@ -44,6 +44,8 @@ void gf100_pmu_reset(struct nvkm_pmu *); void gk110_pmu_pgob(struct nvkm_pmu *, bool); +extern const struct nvkm_falcon_func gm200_pmu_flcn; + void gm20b_pmu_acr_bld_patch(struct nvkm_acr *, u32, s64); void gm20b_pmu_acr_bld_write(struct nvkm_acr *, u32, struct nvkm_acr_lsfw *); int gm20b_pmu_acr_boot(struct nvkm_falcon *); diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c index aea31622539146ebbf9bfbcb9f08a3c65b92e633..f194b62e290cae561b2c2b188701481f6aa4ee73 100644 --- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c +++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c @@ -484,6 +484,7 @@ static void innolux_panel_del(struct innolux_panel *innolux) static int innolux_panel_probe(struct mipi_dsi_device *dsi) { const struct panel_desc *desc; + struct innolux_panel *innolux; int err; desc = of_device_get_match_data(&dsi->dev); @@ -495,7 +496,14 @@ static int innolux_panel_probe(struct mipi_dsi_device *dsi) if (err < 0) return err; - return mipi_dsi_attach(dsi); + err = mipi_dsi_attach(dsi); + if (err < 0) { + innolux = mipi_dsi_get_drvdata(dsi); + innolux_panel_del(innolux); + return err; + } + + return 0; } static int innolux_panel_remove(struct mipi_dsi_device *dsi) diff --git a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c index 86e4213e8bb13f6e834c91bb755d599143f3adc4..daccb1fd5fdad82dcdfd8caacfe89e43a5bb5746 100644 --- a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c +++ b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c @@ -406,7 +406,13 @@ static int kingdisplay_panel_probe(struct mipi_dsi_device *dsi) if (err < 0) return err; - return mipi_dsi_attach(dsi); + err = mipi_dsi_attach(dsi); + if (err < 0) { + kingdisplay_panel_del(kingdisplay); + return err; + } + + return 0; } static int kingdisplay_panel_remove(struct mipi_dsi_device *dsi) diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index cc5ee1b3af84f27462ddcaf64b33d20ceab3c96a..12aa7877a625a389a418cd3e8f3f9835595bc86e 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -197,7 +197,8 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder, * so don't register a backlight device */ if ((rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) && - (rdev->pdev->device == 0x6741)) + (rdev->pdev->device == 0x6741) && + !dmi_match(DMI_PRODUCT_NAME, "iMac12,1")) return; if (!radeon_encoder->enc_priv) diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 8c0a572940e826aa05d49645f0a28b14cf8e285a..32070e94f6c4941120a0a31a7a1f4691278a7376 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -634,6 +634,8 @@ void radeon_driver_lastclose_kms(struct drm_device *dev) int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) { struct radeon_device *rdev = dev->dev_private; + struct radeon_fpriv *fpriv; + struct radeon_vm *vm; int r; file_priv->driver_priv = NULL; @@ -646,48 +648,52 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) /* new gpu have virtual address space support */ if (rdev->family >= CHIP_CAYMAN) { - struct radeon_fpriv *fpriv; - struct radeon_vm *vm; fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); if (unlikely(!fpriv)) { r = -ENOMEM; - goto out_suspend; + goto err_suspend; } if (rdev->accel_working) { vm = &fpriv->vm; r = radeon_vm_init(rdev, vm); - if (r) { - kfree(fpriv); - goto out_suspend; - } + if (r) + goto err_fpriv; r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); - if (r) { - radeon_vm_fini(rdev, vm); - kfree(fpriv); - goto out_suspend; - } + if (r) + goto err_vm_fini; /* map the ib pool buffer read only into * virtual address space */ vm->ib_bo_va = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo); + if (!vm->ib_bo_va) { + r = -ENOMEM; + goto err_vm_fini; + } + r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va, RADEON_VA_IB_OFFSET, RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED); - if (r) { - radeon_vm_fini(rdev, vm); - kfree(fpriv); - goto out_suspend; - } + if (r) + goto err_vm_fini; } file_priv->driver_priv = fpriv; } -out_suspend: + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); + return 0; + +err_vm_fini: + radeon_vm_fini(rdev, vm); +err_fpriv: + kfree(fpriv); + +err_suspend: pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return r; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index 675ac27eb2402ecc81f0cc2977504f4803e5bdf8..41a2f8d3e9927a4fc8b078f64b1f94eeaecaa73c 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -215,6 +215,7 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode; struct rcar_du_device *rcdu = rcrtc->dev; unsigned long mode_clock = mode->clock * 1000; + unsigned int hdse_offset; u32 dsmr; u32 escr; @@ -298,10 +299,15 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) | DSMR_DIPM_DISP | DSMR_CSPM; rcar_du_crtc_write(rcrtc, DSMR, dsmr); + hdse_offset = 19; + if (rcrtc->group->cmms_mask & BIT(rcrtc->index % 2)) + hdse_offset += 25; + /* Display timings */ - rcar_du_crtc_write(rcrtc, HDSR, mode->htotal - mode->hsync_start - 19); + rcar_du_crtc_write(rcrtc, HDSR, mode->htotal - mode->hsync_start - + hdse_offset); rcar_du_crtc_write(rcrtc, HDER, mode->htotal - mode->hsync_start + - mode->hdisplay - 19); + mode->hdisplay - hdse_offset); rcar_du_crtc_write(rcrtc, HSWR, mode->hsync_end - mode->hsync_start - 1); rcar_du_crtc_write(rcrtc, HCR, mode->htotal - 1); @@ -836,6 +842,7 @@ rcar_du_crtc_mode_valid(struct drm_crtc *crtc, struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); struct rcar_du_device *rcdu = rcrtc->dev; bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE; + unsigned int min_sync_porch; unsigned int vbp; if (interlaced && !rcar_du_has(rcdu, RCAR_DU_FEATURE_INTERLACED)) @@ -843,9 +850,14 @@ rcar_du_crtc_mode_valid(struct drm_crtc *crtc, /* * The hardware requires a minimum combined horizontal sync and back - * porch of 20 pixels and a minimum vertical back porch of 3 lines. + * porch of 20 pixels (when CMM isn't used) or 45 pixels (when CMM is + * used), and a minimum vertical back porch of 3 lines. */ - if (mode->htotal - mode->hsync_start < 20) + min_sync_porch = 20; + if (rcrtc->group->cmms_mask & BIT(rcrtc->index % 2)) + min_sync_porch += 25; + + if (mode->htotal - mode->hsync_start < min_sync_porch) return MODE_HBLANK_NARROW; vbp = (mode->vtotal - mode->vsync_end) / (interlaced ? 2 : 1); diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c index d0c9610ad22025140f9e15c84a54e681eccc73ac..b0fb3c3cba5962a151263af9365677838737d23a 100644 --- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c @@ -243,6 +243,8 @@ struct dw_mipi_dsi_rockchip { struct dw_mipi_dsi *dmd; const struct rockchip_dw_dsi_chip_data *cdata; struct dw_mipi_dsi_plat_data pdata; + + bool dsi_bound; }; struct dphy_pll_parameter_map { @@ -753,10 +755,6 @@ static void dw_mipi_dsi_encoder_enable(struct drm_encoder *encoder) if (mux < 0) return; - pm_runtime_get_sync(dsi->dev); - if (dsi->slave) - pm_runtime_get_sync(dsi->slave->dev); - /* * For the RK3399, the clk of grf must be enabled before writing grf * register. And for RK3288 or other soc, this grf_clk must be NULL, @@ -775,20 +773,10 @@ static void dw_mipi_dsi_encoder_enable(struct drm_encoder *encoder) clk_disable_unprepare(dsi->grf_clk); } -static void dw_mipi_dsi_encoder_disable(struct drm_encoder *encoder) -{ - struct dw_mipi_dsi_rockchip *dsi = to_dsi(encoder); - - if (dsi->slave) - pm_runtime_put(dsi->slave->dev); - pm_runtime_put(dsi->dev); -} - static const struct drm_encoder_helper_funcs dw_mipi_dsi_encoder_helper_funcs = { .atomic_check = dw_mipi_dsi_encoder_atomic_check, .enable = dw_mipi_dsi_encoder_enable, - .disable = dw_mipi_dsi_encoder_disable, }; static int rockchip_dsi_drm_create_encoder(struct dw_mipi_dsi_rockchip *dsi, @@ -918,10 +906,14 @@ static int dw_mipi_dsi_rockchip_bind(struct device *dev, put_device(second); } + pm_runtime_get_sync(dsi->dev); + if (dsi->slave) + pm_runtime_get_sync(dsi->slave->dev); + ret = clk_prepare_enable(dsi->pllref_clk); if (ret) { DRM_DEV_ERROR(dev, "Failed to enable pllref_clk: %d\n", ret); - return ret; + goto out_pm_runtime; } /* @@ -933,7 +925,7 @@ static int dw_mipi_dsi_rockchip_bind(struct device *dev, ret = clk_prepare_enable(dsi->grf_clk); if (ret) { DRM_DEV_ERROR(dsi->dev, "Failed to enable grf_clk: %d\n", ret); - return ret; + goto out_pll_clk; } dw_mipi_dsi_rockchip_config(dsi); @@ -945,16 +937,27 @@ static int dw_mipi_dsi_rockchip_bind(struct device *dev, ret = rockchip_dsi_drm_create_encoder(dsi, drm_dev); if (ret) { DRM_DEV_ERROR(dev, "Failed to create drm encoder\n"); - return ret; + goto out_pll_clk; } ret = dw_mipi_dsi_bind(dsi->dmd, &dsi->encoder); if (ret) { DRM_DEV_ERROR(dev, "Failed to bind: %d\n", ret); - return ret; + goto out_pll_clk; } + dsi->dsi_bound = true; + return 0; + +out_pll_clk: + clk_disable_unprepare(dsi->pllref_clk); +out_pm_runtime: + pm_runtime_put(dsi->dev); + if (dsi->slave) + pm_runtime_put(dsi->slave->dev); + + return ret; } static void dw_mipi_dsi_rockchip_unbind(struct device *dev, @@ -966,9 +969,15 @@ static void dw_mipi_dsi_rockchip_unbind(struct device *dev, if (dsi->is_slave) return; + dsi->dsi_bound = false; + dw_mipi_dsi_unbind(dsi->dmd); clk_disable_unprepare(dsi->pllref_clk); + + pm_runtime_put(dsi->dev); + if (dsi->slave) + pm_runtime_put(dsi->slave->dev); } static const struct component_ops dw_mipi_dsi_rockchip_ops = { @@ -1026,6 +1035,36 @@ static const struct dw_mipi_dsi_host_ops dw_mipi_dsi_rockchip_host_ops = { .detach = dw_mipi_dsi_rockchip_host_detach, }; +static int __maybe_unused dw_mipi_dsi_rockchip_resume(struct device *dev) +{ + struct dw_mipi_dsi_rockchip *dsi = dev_get_drvdata(dev); + int ret; + + /* + * Re-configure DSI state, if we were previously initialized. We need + * to do this before rockchip_drm_drv tries to re-enable() any panels. + */ + if (dsi->dsi_bound) { + ret = clk_prepare_enable(dsi->grf_clk); + if (ret) { + DRM_DEV_ERROR(dsi->dev, "Failed to enable grf_clk: %d\n", ret); + return ret; + } + + dw_mipi_dsi_rockchip_config(dsi); + if (dsi->slave) + dw_mipi_dsi_rockchip_config(dsi->slave); + + clk_disable_unprepare(dsi->grf_clk); + } + + return 0; +} + +static const struct dev_pm_ops dw_mipi_dsi_rockchip_pm_ops = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, dw_mipi_dsi_rockchip_resume) +}; + static int dw_mipi_dsi_rockchip_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -1126,14 +1165,10 @@ static int dw_mipi_dsi_rockchip_probe(struct platform_device *pdev) if (ret != -EPROBE_DEFER) DRM_DEV_ERROR(dev, "Failed to probe dw_mipi_dsi: %d\n", ret); - goto err_clkdisable; + return ret; } return 0; - -err_clkdisable: - clk_disable_unprepare(dsi->pllref_clk); - return ret; } static int dw_mipi_dsi_rockchip_remove(struct platform_device *pdev) @@ -1249,6 +1284,7 @@ struct platform_driver dw_mipi_dsi_rockchip_driver = { .remove = dw_mipi_dsi_rockchip_remove, .driver = { .of_match_table = dw_mipi_dsi_rockchip_dt_ids, + .pm = &dw_mipi_dsi_rockchip_pm_ops, .name = "dw-mipi-dsi-rockchip", }, }; diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c index 23de359a1dec60116f29a22f045f663b2b7ec707..515e6f187dc777aa6abcdab46a45733c058d8b9a 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c @@ -529,13 +529,6 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master, return ret; } - ret = clk_prepare_enable(hdmi->vpll_clk); - if (ret) { - DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI vpll: %d\n", - ret); - return ret; - } - hdmi->phy = devm_phy_optional_get(dev, "hdmi"); if (IS_ERR(hdmi->phy)) { ret = PTR_ERR(hdmi->phy); @@ -544,6 +537,13 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master, return ret; } + ret = clk_prepare_enable(hdmi->vpll_clk); + if (ret) { + DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI vpll: %d\n", + ret); + return ret; + } + drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs); drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS); diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c index a6fe03c3748aad414911cddccc6062c712940432..39e1e1ebea92841dd0ca037da8d9975d50a1ab0d 100644 --- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c +++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c @@ -873,6 +873,7 @@ static const struct vop_win_phy rk3399_win01_data = { .enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0), .format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1), .rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12), + .x_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 21), .y_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 22), .act_info = VOP_REG(RK3288_WIN0_ACT_INFO, 0x1fff1fff, 0), .dsp_info = VOP_REG(RK3288_WIN0_DSP_INFO, 0x0fff0fff, 0), @@ -883,6 +884,7 @@ static const struct vop_win_phy rk3399_win01_data = { .uv_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 16), .src_alpha_ctl = VOP_REG(RK3288_WIN0_SRC_ALPHA_CTRL, 0xff, 0), .dst_alpha_ctl = VOP_REG(RK3288_WIN0_DST_ALPHA_CTRL, 0xff, 0), + .channel = VOP_REG(RK3288_WIN0_CTRL2, 0xff, 0), }; /* @@ -893,11 +895,11 @@ static const struct vop_win_phy rk3399_win01_data = { static const struct vop_win_data rk3399_vop_win_data[] = { { .base = 0x00, .phy = &rk3399_win01_data, .type = DRM_PLANE_TYPE_PRIMARY }, - { .base = 0x40, .phy = &rk3288_win01_data, + { .base = 0x40, .phy = &rk3368_win01_data, .type = DRM_PLANE_TYPE_OVERLAY }, - { .base = 0x00, .phy = &rk3288_win23_data, + { .base = 0x00, .phy = &rk3368_win23_data, .type = DRM_PLANE_TYPE_OVERLAY }, - { .base = 0x50, .phy = &rk3288_win23_data, + { .base = 0x50, .phy = &rk3368_win23_data, .type = DRM_PLANE_TYPE_CURSOR }, }; diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c index b77f726303d89d186427e6d6285ab3563e367a20..ec0e4d8f0aade7e9f8f2b89ba687cdda1886f92f 100644 --- a/drivers/gpu/drm/tegra/vic.c +++ b/drivers/gpu/drm/tegra/vic.c @@ -5,6 +5,7 @@ #include #include +#include #include #include #include @@ -265,10 +266,8 @@ static int vic_load_firmware(struct vic *vic) if (!client->group) { virt = dma_alloc_coherent(vic->dev, size, &iova, GFP_KERNEL); - - err = dma_mapping_error(vic->dev, iova); - if (err < 0) - return err; + if (!virt) + return -ENOMEM; } else { virt = tegra_drm_alloc(tegra, size, &iova); } diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index eb4b7df02ca034ab55c963e7c5d00430bd302b19..f673292eec9dbb6b5c050bfca006cbc4c6a151cc 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -789,6 +789,8 @@ int ttm_mem_evict_first(struct ttm_bo_device *bdev, ret = ttm_bo_evict(bo, ctx); if (locked) ttm_bo_unreserve(bo); + else + ttm_bo_move_to_lru_tail_unlocked(bo); ttm_bo_put(bo); return ret; diff --git a/drivers/gpu/drm/vboxvideo/vbox_main.c b/drivers/gpu/drm/vboxvideo/vbox_main.c index d68d9bad76747015c446b3f5908014ca81dd3b70..c5ea880d17b29bc227d523e59a7bb07502ea4751 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_main.c +++ b/drivers/gpu/drm/vboxvideo/vbox_main.c @@ -123,8 +123,8 @@ int vbox_hw_init(struct vbox_private *vbox) /* Create guest-heap mem-pool use 2^4 = 16 byte chunks */ vbox->guest_pool = devm_gen_pool_create(vbox->ddev.dev, 4, -1, "vboxvideo-accel"); - if (!vbox->guest_pool) - return -ENOMEM; + if (IS_ERR(vbox->guest_pool)) + return PTR_ERR(vbox->guest_pool); ret = gen_pool_add_virt(vbox->guest_pool, (unsigned long)vbox->guest_heap, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 1523b51a7284c03928195c67063db5ab96762f8d..ad208a5f4ebe52e1f14c982bd0c75a2aa3e5fef0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -1088,15 +1088,14 @@ extern int vmw_execbuf_fence_commands(struct drm_file *file_priv, struct vmw_private *dev_priv, struct vmw_fence_obj **p_fence, uint32_t *p_handle); -extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, +extern int vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, struct vmw_fpriv *vmw_fp, int ret, struct drm_vmw_fence_rep __user *user_fence_rep, struct vmw_fence_obj *fence, uint32_t fence_handle, - int32_t out_fence_fd, - struct sync_file *sync_file); + int32_t out_fence_fd); bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd); /** diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 83e1b54eb8647d0a9cc3e0d8493172ed16d12349..739cbc77d8867cdb98fb4555467096c8eca4c6ea 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -3816,17 +3816,17 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv, * Also if copying fails, user-space will be unable to signal the fence object * so we wait for it immediately, and then unreference the user-space reference. */ -void +int vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, struct vmw_fpriv *vmw_fp, int ret, struct drm_vmw_fence_rep __user *user_fence_rep, struct vmw_fence_obj *fence, uint32_t fence_handle, - int32_t out_fence_fd, struct sync_file *sync_file) + int32_t out_fence_fd) { struct drm_vmw_fence_rep fence_rep; if (user_fence_rep == NULL) - return; + return 0; memset(&fence_rep, 0, sizeof(fence_rep)); @@ -3854,20 +3854,14 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, * handle. */ if (unlikely(ret != 0) && (fence_rep.error == 0)) { - if (sync_file) - fput(sync_file->file); - - if (fence_rep.fd != -1) { - put_unused_fd(fence_rep.fd); - fence_rep.fd = -1; - } - ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle, TTM_REF_USAGE); VMW_DEBUG_USER("Fence copy error. Syncing.\n"); (void) vmw_fence_obj_wait(fence, false, false, VMW_FENCE_WAIT_TIMEOUT); } + + return ret ? -EFAULT : 0; } /** @@ -4209,16 +4203,23 @@ int vmw_execbuf_process(struct drm_file *file_priv, (void) vmw_fence_obj_wait(fence, false, false, VMW_FENCE_WAIT_TIMEOUT); + } + } + + ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, + user_fence_rep, fence, handle, out_fence_fd); + + if (sync_file) { + if (ret) { + /* usercopy of fence failed, put the file object */ + fput(sync_file->file); + put_unused_fd(out_fence_fd); } else { /* Link the fence with the FD created earlier */ fd_install(out_fence_fd, sync_file->file); } } - vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, - user_fence_rep, fence, handle, out_fence_fd, - sync_file); - /* Don't unreference when handing fence out */ if (unlikely(out_fence != NULL)) { *out_fence = fence; @@ -4236,7 +4237,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, */ vmw_validation_unref_lists(&val_ctx); - return 0; + return ret; out_unlock_binding: mutex_unlock(&dev_priv->binding_mutex); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 0f8d293971576a4f6d2faf78c6e24e8825457a58..8bc41ec97d71a29fb9bf804ca8fc41671e716e82 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -1171,7 +1171,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data, } vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence, - handle, -1, NULL); + handle, -1); vmw_fence_obj_unreference(&fence); return 0; out_no_create: diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index bc67f2b930e1f17e8e4356f09385b980133d02b8..415078e9ea39ec2b6923ce8f86d9b27d023011d9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -2481,7 +2481,7 @@ void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv, if (file_priv) vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, user_fence_rep, fence, - handle, -1, NULL); + handle, -1); if (out_fence) *out_fence = fence; else diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index d0ebb70e2fdd6e56750db8a0772b8491f8b7bf54..a2c09dca4eef945e76a2bf68da9f0db52bb39263 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -18,6 +18,10 @@ #include #undef CREATE_TRACE_POINTS +#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) +#include +#endif + #include "bus.h" #include "channel.h" #include "debug.h" @@ -232,6 +236,17 @@ static struct iommu_domain *host1x_iommu_attach(struct host1x *host) struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev); int err; +#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) + if (host->dev->archdata.mapping) { + struct dma_iommu_mapping *mapping = + to_dma_iommu_mapping(host->dev); + arm_iommu_detach_device(host->dev); + arm_iommu_release_mapping(mapping); + + domain = iommu_get_domain_for_dev(host->dev); + } +#endif + /* * We may not always want to enable IOMMU support (for example if the * host1x firewall is already enabled and we don't support addressing diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index 5c1d33cda863b2be27caaf278ee835d5b0d244e2..e5d2e7e9541b8296b421c2933ec299a46bc99fe7 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c @@ -415,7 +415,7 @@ static int apple_input_configured(struct hid_device *hdev, if ((asc->quirks & APPLE_HAS_FN) && !asc->fn_found) { hid_info(hdev, "Fn key not found (Apple Wireless Keyboard clone?), disabling Fn key handling\n"); - asc->quirks = 0; + asc->quirks &= ~APPLE_HAS_FN; } return 0; diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index f87d2205be421e4e48e6725097be8c17c52ae793..2c671990f18e7dc2068d1146bbced4995746fbae 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -1324,6 +1324,7 @@ #define USB_VENDOR_ID_UGTIZER 0x2179 #define USB_DEVICE_ID_UGTIZER_TABLET_GP0610 0x0053 #define USB_DEVICE_ID_UGTIZER_TABLET_GT5040 0x0077 +#define USB_DEVICE_ID_UGTIZER_TABLET_WP5540 0x0004 #define USB_VENDOR_ID_VIEWSONIC 0x0543 #define USB_DEVICE_ID_VIEWSONIC_PD1011 0xe621 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 580d378342c41d7d99dcf60587ace66ba0807258..eb53855898c8d225905f1bd3de5499c72ef001ef 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -1288,6 +1288,12 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct input = field->hidinput->input; + if (usage->type == EV_ABS && + (((*quirks & HID_QUIRK_X_INVERT) && usage->code == ABS_X) || + ((*quirks & HID_QUIRK_Y_INVERT) && usage->code == ABS_Y))) { + value = field->logical_maximum - value; + } + if (usage->hat_min < usage->hat_max || usage->hat_dir) { int hat_dir = usage->hat_dir; if (!hat_dir) diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index fd02b2821daf173e50aba9a9e7c3c68775b697d3..cc9aaea2d9abd796fcdf9e612f257a1d89f68ec8 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -188,6 +188,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60), HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_WP5540), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET), HID_QUIRK_MULTI_INPUT }, diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c index dd05bed4ca53a9696409303d21e7494761b3dc57..38f9bbad81c1774508f6291e16b3159fe5e3844e 100644 --- a/drivers/hid/hid-uclogic-params.c +++ b/drivers/hid/hid-uclogic-params.c @@ -65,7 +65,7 @@ static int uclogic_params_get_str_desc(__u8 **pbuf, struct hid_device *hdev, __u8 idx, size_t len) { int rc; - struct usb_device *udev = hid_to_usb_dev(hdev); + struct usb_device *udev; __u8 *buf = NULL; /* Check arguments */ @@ -74,6 +74,8 @@ static int uclogic_params_get_str_desc(__u8 **pbuf, struct hid_device *hdev, goto cleanup; } + udev = hid_to_usb_dev(hdev); + buf = kmalloc(len, GFP_KERNEL); if (buf == NULL) { rc = -ENOMEM; @@ -449,7 +451,7 @@ static int uclogic_params_frame_init_v1_buttonpad( { int rc; bool found = false; - struct usb_device *usb_dev = hid_to_usb_dev(hdev); + struct usb_device *usb_dev; char *str_buf = NULL; const size_t str_len = 16; @@ -459,6 +461,8 @@ static int uclogic_params_frame_init_v1_buttonpad( goto cleanup; } + usb_dev = hid_to_usb_dev(hdev); + /* * Enable generic button mode */ @@ -705,9 +709,9 @@ static int uclogic_params_huion_init(struct uclogic_params *params, struct hid_device *hdev) { int rc; - struct usb_device *udev = hid_to_usb_dev(hdev); - struct usb_interface *iface = to_usb_interface(hdev->dev.parent); - __u8 bInterfaceNumber = iface->cur_altsetting->desc.bInterfaceNumber; + struct usb_device *udev; + struct usb_interface *iface; + __u8 bInterfaceNumber; bool found; /* The resulting parameters (noop) */ struct uclogic_params p = {0, }; @@ -721,6 +725,10 @@ static int uclogic_params_huion_init(struct uclogic_params *params, goto cleanup; } + udev = hid_to_usb_dev(hdev); + iface = to_usb_interface(hdev->dev.parent); + bInterfaceNumber = iface->cur_altsetting->desc.bInterfaceNumber; + /* If it's not a pen interface */ if (bInterfaceNumber != 0) { /* TODO: Consider marking the interface invalid */ @@ -832,10 +840,10 @@ int uclogic_params_init(struct uclogic_params *params, struct hid_device *hdev) { int rc; - struct usb_device *udev = hid_to_usb_dev(hdev); - __u8 bNumInterfaces = udev->config->desc.bNumInterfaces; - struct usb_interface *iface = to_usb_interface(hdev->dev.parent); - __u8 bInterfaceNumber = iface->cur_altsetting->desc.bInterfaceNumber; + struct usb_device *udev; + __u8 bNumInterfaces; + struct usb_interface *iface; + __u8 bInterfaceNumber; bool found; /* The resulting parameters (noop) */ struct uclogic_params p = {0, }; @@ -846,6 +854,11 @@ int uclogic_params_init(struct uclogic_params *params, goto cleanup; } + udev = hid_to_usb_dev(hdev); + bNumInterfaces = udev->config->desc.bNumInterfaces; + iface = to_usb_interface(hdev->dev.parent); + bInterfaceNumber = iface->cur_altsetting->desc.bInterfaceNumber; + /* * Set replacement report descriptor if the original matches the * specified size. Otherwise keep interface unchanged. diff --git a/drivers/hid/hid-vivaldi.c b/drivers/hid/hid-vivaldi.c index 72957a9f7117052fb7b7e9bdf0fd87d14018400e..576518e704ee6426484ee6aa9687992af7c5608a 100644 --- a/drivers/hid/hid-vivaldi.c +++ b/drivers/hid/hid-vivaldi.c @@ -74,10 +74,11 @@ static void vivaldi_feature_mapping(struct hid_device *hdev, struct hid_usage *usage) { struct vivaldi_data *drvdata = hid_get_drvdata(hdev); + struct hid_report *report = field->report; int fn_key; int ret; u32 report_len; - u8 *buf; + u8 *report_data, *buf; if (field->logical != HID_USAGE_FN_ROW_PHYSMAP || (usage->hid & HID_USAGE_PAGE) != HID_UP_ORDINAL) @@ -89,12 +90,24 @@ static void vivaldi_feature_mapping(struct hid_device *hdev, if (fn_key > drvdata->max_function_row_key) drvdata->max_function_row_key = fn_key; - buf = hid_alloc_report_buf(field->report, GFP_KERNEL); - if (!buf) + report_data = buf = hid_alloc_report_buf(report, GFP_KERNEL); + if (!report_data) return; - report_len = hid_report_len(field->report); - ret = hid_hw_raw_request(hdev, field->report->id, buf, + report_len = hid_report_len(report); + if (!report->id) { + /* + * hid_hw_raw_request() will stuff report ID (which will be 0) + * into the first byte of the buffer even for unnumbered + * reports, so we need to account for this to avoid getting + * -EOVERFLOW in return. + * Note that hid_alloc_report_buf() adds 7 bytes to the size + * so we can safely say that we have space for an extra byte. + */ + report_len++; + } + + ret = hid_hw_raw_request(hdev, report->id, report_data, report_len, HID_FEATURE_REPORT, HID_REQ_GET_REPORT); if (ret < 0) { @@ -103,7 +116,16 @@ static void vivaldi_feature_mapping(struct hid_device *hdev, goto out; } - ret = hid_report_raw_event(hdev, HID_FEATURE_REPORT, buf, + if (!report->id) { + /* + * Undo the damage from hid_hw_raw_request() for unnumbered + * reports. + */ + report_data++; + report_len--; + } + + ret = hid_report_raw_event(hdev, HID_FEATURE_REPORT, report_data, report_len, 0); if (ret) { dev_warn(&hdev->dev, "failed to report feature %d\n", diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c index 8fe3efcb832715c4a978625387dc0bb0ea5b00a2..fc06d8bb42e0fe94e75167fe360c600283f80a4d 100644 --- a/drivers/hid/uhid.c +++ b/drivers/hid/uhid.c @@ -28,11 +28,22 @@ struct uhid_device { struct mutex devlock; + + /* This flag tracks whether the HID device is usable for commands from + * userspace. The flag is already set before hid_add_device(), which + * runs in workqueue context, to allow hid_add_device() to communicate + * with userspace. + * However, if hid_add_device() fails, the flag is cleared without + * holding devlock. + * We guarantee that if @running changes from true to false while you're + * holding @devlock, it's still fine to access @hid. + */ bool running; __u8 *rd_data; uint rd_size; + /* When this is NULL, userspace may use UHID_CREATE/UHID_CREATE2. */ struct hid_device *hid; struct uhid_event input_buf; @@ -63,9 +74,18 @@ static void uhid_device_add_worker(struct work_struct *work) if (ret) { hid_err(uhid->hid, "Cannot register HID device: error %d\n", ret); - hid_destroy_device(uhid->hid); - uhid->hid = NULL; + /* We used to call hid_destroy_device() here, but that's really + * messy to get right because we have to coordinate with + * concurrent writes from userspace that might be in the middle + * of using uhid->hid. + * Just leave uhid->hid as-is for now, and clean it up when + * userspace tries to close or reinitialize the uhid instance. + * + * However, we do have to clear the ->running flag and do a + * wakeup to make sure userspace knows that the device is gone. + */ uhid->running = false; + wake_up_interruptible(&uhid->report_wait); } } @@ -474,7 +494,7 @@ static int uhid_dev_create2(struct uhid_device *uhid, void *rd_data; int ret; - if (uhid->running) + if (uhid->hid) return -EALREADY; rd_size = ev->u.create2.rd_size; @@ -556,7 +576,7 @@ static int uhid_dev_create(struct uhid_device *uhid, static int uhid_dev_destroy(struct uhid_device *uhid) { - if (!uhid->running) + if (!uhid->hid) return -EINVAL; uhid->running = false; @@ -565,6 +585,7 @@ static int uhid_dev_destroy(struct uhid_device *uhid) cancel_work_sync(&uhid->worker); hid_destroy_device(uhid->hid); + uhid->hid = NULL; kfree(uhid->rd_data); return 0; diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index c25274275258f92344acf355f56dfcd45836593d..d90bfa8b7313e4b05a6d697d7a14bc81ac952c45 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -2566,6 +2566,24 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac, } } +static bool wacom_wac_slot_is_active(struct input_dev *dev, int key) +{ + struct input_mt *mt = dev->mt; + struct input_mt_slot *s; + + if (!mt) + return false; + + for (s = mt->slots; s != mt->slots + mt->num_slots; s++) { + if (s->key == key && + input_mt_get_value(s, ABS_MT_TRACKING_ID) >= 0) { + return true; + } + } + + return false; +} + static void wacom_wac_finger_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { @@ -2613,9 +2631,14 @@ static void wacom_wac_finger_event(struct hid_device *hdev, } if (usage->usage_index + 1 == field->report_count) { - if (equivalent_usage == wacom_wac->hid_data.last_slot_field && - wacom_wac->hid_data.confidence) - wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input); + if (equivalent_usage == wacom_wac->hid_data.last_slot_field) { + bool touch_removed = wacom_wac_slot_is_active(wacom_wac->touch_input, + wacom_wac->hid_data.id) && !wacom_wac->hid_data.tipswitch; + + if (wacom_wac->hid_data.confidence || touch_removed) { + wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input); + } + } } } @@ -2631,6 +2654,10 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev, hid_data->confidence = true; + hid_data->cc_report = 0; + hid_data->cc_index = -1; + hid_data->cc_value_index = -1; + for (i = 0; i < report->maxfield; i++) { struct hid_field *field = report->field[i]; int j; @@ -2664,11 +2691,14 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev, hid_data->cc_index >= 0) { struct hid_field *field = report->field[hid_data->cc_index]; int value = field->value[hid_data->cc_value_index]; - if (value) + if (value) { hid_data->num_expected = value; + hid_data->num_received = 0; + } } else { hid_data->num_expected = wacom_wac->features.touch_max; + hid_data->num_received = 0; } } @@ -2692,6 +2722,7 @@ static void wacom_wac_finger_report(struct hid_device *hdev, input_sync(input); wacom_wac->hid_data.num_received = 0; + wacom_wac->hid_data.num_expected = 0; /* keep touch state for pen event */ wacom_wac->shared->touch_down = wacom_wac_finger_count_touches(wacom_wac); diff --git a/drivers/hsi/hsi_core.c b/drivers/hsi/hsi_core.c index a5f92e2889cb81c58e66048b25d145502ea2a1bb..a330f58d45fc637d502a9efc331b9d9bb7948963 100644 --- a/drivers/hsi/hsi_core.c +++ b/drivers/hsi/hsi_core.c @@ -102,6 +102,7 @@ struct hsi_client *hsi_new_client(struct hsi_port *port, if (device_register(&cl->device) < 0) { pr_err("hsi: failed to register client: %s\n", info->name); put_device(&cl->device); + goto err; } return cl; diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index a5a402e776c77f2640a1de5b09a13b3f0ff330e2..362da2a83b470bce767073e1a7d8b718575e96e0 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -1944,8 +1944,10 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel) kobj->kset = dev->channels_kset; ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL, "%u", relid); - if (ret) + if (ret) { + kobject_put(kobj); return ret; + } ret = sysfs_create_group(kobj, &vmbus_chan_group); @@ -1954,6 +1956,7 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel) * The calling functions' error handling paths will cleanup the * empty channel directory. */ + kobject_put(kobj); dev_err(device, "Unable to set up channel sysfs files\n"); return ret; } diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c index 87f401100466d3bd66082bb021d4ccf257ea16b3..10c7b6295b02e8b2819e2fa61cc09ce467d19e35 100644 --- a/drivers/hwmon/dell-smm-hwmon.c +++ b/drivers/hwmon/dell-smm-hwmon.c @@ -317,7 +317,7 @@ static int i8k_enable_fan_auto_mode(bool enable) } /* - * Set the fan speed (off, low, high). Returns the new fan status. + * Set the fan speed (off, low, high, ...). */ static int i8k_set_fan(int fan, int speed) { @@ -329,7 +329,7 @@ static int i8k_set_fan(int fan, int speed) speed = (speed < 0) ? 0 : ((speed > i8k_fan_max) ? i8k_fan_max : speed); regs.ebx = (fan & 0xff) | (speed << 8); - return i8k_smm(®s) ? : i8k_get_fan_status(fan); + return i8k_smm(®s); } static int i8k_get_temp_type(int sensor) @@ -443,7 +443,7 @@ static int i8k_ioctl_unlocked(struct file *fp, unsigned int cmd, unsigned long arg) { int val = 0; - int speed; + int speed, err; unsigned char buff[16]; int __user *argp = (int __user *)arg; @@ -504,7 +504,11 @@ i8k_ioctl_unlocked(struct file *fp, unsigned int cmd, unsigned long arg) if (copy_from_user(&speed, argp + 1, sizeof(int))) return -EFAULT; - val = i8k_set_fan(val, speed); + err = i8k_set_fan(val, speed); + if (err < 0) + return err; + + val = i8k_get_fan_status(val); break; default: diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c index e5a83f74926772ac138e3479945612669c5ec2bd..d649fea8299948a183b582669df670c728877927 100644 --- a/drivers/hwmon/hwmon.c +++ b/drivers/hwmon/hwmon.c @@ -178,12 +178,14 @@ static int hwmon_thermal_add_sensor(struct device *dev, int index) tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata, &hwmon_thermal_ops); - /* - * If CONFIG_THERMAL_OF is disabled, this returns -ENODEV, - * so ignore that error but forward any other error. - */ - if (IS_ERR(tzd) && (PTR_ERR(tzd) != -ENODEV)) - return PTR_ERR(tzd); + if (IS_ERR(tzd)) { + if (PTR_ERR(tzd) != -ENODEV) + return PTR_ERR(tzd); + dev_info(dev, "temp%d_input not attached to any thermal zone\n", + index + 1); + devm_kfree(dev, tdata); + return 0; + } err = devm_add_action(dev, hwmon_thermal_remove_sensor, &tdata->node); if (err) diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c index 959446b0137bcba40ab1c54d1c2f8c46efb368f5..a7142c32889c056bc25572cce925afbbe0546bff 100644 --- a/drivers/hwmon/lm90.c +++ b/drivers/hwmon/lm90.c @@ -373,7 +373,7 @@ static const struct lm90_params lm90_params[] = { .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_CRIT, .alert_alarms = 0x7c, - .max_convrate = 8, + .max_convrate = 7, }, [lm86] = { .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT @@ -394,12 +394,13 @@ static const struct lm90_params lm90_params[] = { .max_convrate = 9, }, [max6646] = { - .flags = LM90_HAVE_CRIT, + .flags = LM90_HAVE_CRIT | LM90_HAVE_BROKEN_ALERT, .alert_alarms = 0x7c, .max_convrate = 6, .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL, }, [max6654] = { + .flags = LM90_HAVE_BROKEN_ALERT, .alert_alarms = 0x7c, .max_convrate = 7, .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL, @@ -418,7 +419,7 @@ static const struct lm90_params lm90_params[] = { }, [max6680] = { .flags = LM90_HAVE_OFFSET | LM90_HAVE_CRIT - | LM90_HAVE_CRIT_ALRM_SWP, + | LM90_HAVE_CRIT_ALRM_SWP | LM90_HAVE_BROKEN_ALERT, .alert_alarms = 0x7c, .max_convrate = 7, }, diff --git a/drivers/hwmon/mr75203.c b/drivers/hwmon/mr75203.c index 18da5a25e89abfb950fe228ebe243d98ec93abb6..046523d47c29b75dfb045e95ad6a8ea86a42752e 100644 --- a/drivers/hwmon/mr75203.c +++ b/drivers/hwmon/mr75203.c @@ -93,7 +93,7 @@ #define VM_CH_REQ BIT(21) #define IP_TMR 0x05 -#define POWER_DELAY_CYCLE_256 0x80 +#define POWER_DELAY_CYCLE_256 0x100 #define POWER_DELAY_CYCLE_64 0x40 #define PVT_POLL_DELAY_US 20 diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c index ba766d24219ef0ea5cbfc3c433c1f4115b53ded5..44e2466f3c6744c21190f73475d8dcce106e9ddc 100644 --- a/drivers/i2c/busses/i2c-brcmstb.c +++ b/drivers/i2c/busses/i2c-brcmstb.c @@ -674,7 +674,7 @@ static int brcmstb_i2c_probe(struct platform_device *pdev) /* set the data in/out register size for compatible SoCs */ if (of_device_is_compatible(dev->device->of_node, - "brcmstb,brcmper-i2c")) + "brcm,brcmper-i2c")) dev->data_regsz = sizeof(u8); else dev->data_regsz = sizeof(u32); diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c index 55c83a7a24f368fa969618acfe813b2fb9e486e3..56c87ade0e89d29d877b97b2063097585dd596f1 100644 --- a/drivers/i2c/busses/i2c-designware-pcidrv.c +++ b/drivers/i2c/busses/i2c-designware-pcidrv.c @@ -37,10 +37,10 @@ enum dw_pci_ctl_id_t { }; struct dw_scl_sda_cfg { - u32 ss_hcnt; - u32 fs_hcnt; - u32 ss_lcnt; - u32 fs_lcnt; + u16 ss_hcnt; + u16 fs_hcnt; + u16 ss_lcnt; + u16 fs_lcnt; u32 sda_hold; }; diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index eab6fd6b890ebe3e7750a2333f223c886e6d250f..5618c1ff34dc3a88c6e65fbdf179c435b7d9512f 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -797,6 +797,11 @@ static int i801_block_transaction(struct i801_priv *priv, int result = 0; unsigned char hostc; + if (read_write == I2C_SMBUS_READ && command == I2C_SMBUS_BLOCK_DATA) + data->block[0] = I2C_SMBUS_BLOCK_MAX; + else if (data->block[0] < 1 || data->block[0] > I2C_SMBUS_BLOCK_MAX) + return -EPROTO; + if (command == I2C_SMBUS_I2C_BLOCK_DATA) { if (read_write == I2C_SMBUS_WRITE) { /* set I2C_EN bit in configuration register */ @@ -810,16 +815,6 @@ static int i801_block_transaction(struct i801_priv *priv, } } - if (read_write == I2C_SMBUS_WRITE - || command == I2C_SMBUS_I2C_BLOCK_DATA) { - if (data->block[0] < 1) - data->block[0] = 1; - if (data->block[0] > I2C_SMBUS_BLOCK_MAX) - data->block[0] = I2C_SMBUS_BLOCK_MAX; - } else { - data->block[0] = 32; /* max for SMBus block reads */ - } - /* Experience has shown that the block buffer can only be used for SMBus (not I2C) block transactions, even though the datasheet doesn't mention this limitation. */ diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c index af349661fd7694382a6d4221677715ade8e2b3d4..8de8296d258318437441ee7e8aa4529c15516c09 100644 --- a/drivers/i2c/busses/i2c-mpc.c +++ b/drivers/i2c/busses/i2c-mpc.c @@ -105,23 +105,30 @@ static irqreturn_t mpc_i2c_isr(int irq, void *dev_id) /* Sometimes 9th clock pulse isn't generated, and slave doesn't release * the bus, because it wants to send ACK. * Following sequence of enabling/disabling and sending start/stop generates - * the 9 pulses, so it's all OK. + * the 9 pulses, each with a START then ending with STOP, so it's all OK. */ static void mpc_i2c_fixup(struct mpc_i2c *i2c) { int k; - u32 delay_val = 1000000 / i2c->real_clk + 1; - - if (delay_val < 2) - delay_val = 2; + unsigned long flags; for (k = 9; k; k--) { writeccr(i2c, 0); - writeccr(i2c, CCR_MSTA | CCR_MTX | CCR_MEN); + writeb(0, i2c->base + MPC_I2C_SR); /* clear any status bits */ + writeccr(i2c, CCR_MEN | CCR_MSTA); /* START */ + readb(i2c->base + MPC_I2C_DR); /* init xfer */ + udelay(15); /* let it hit the bus */ + local_irq_save(flags); /* should not be delayed further */ + writeccr(i2c, CCR_MEN | CCR_MSTA | CCR_RSTA); /* delay SDA */ readb(i2c->base + MPC_I2C_DR); - writeccr(i2c, CCR_MEN); - udelay(delay_val << 1); + if (k != 1) + udelay(5); + local_irq_restore(flags); } + writeccr(i2c, CCR_MEN); /* Initiate STOP */ + readb(i2c->base + MPC_I2C_DR); + udelay(15); /* Let STOP propagate */ + writeccr(i2c, 0); } static int i2c_wait(struct mpc_i2c *i2c, unsigned timeout, int writing) diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c index 1c259b5188de8b2e2b7d6e202d088ee51c7f002c..09e599069a81d2133127c1ebba763bd84b174a4c 100644 --- a/drivers/i2c/busses/i2c-qcom-cci.c +++ b/drivers/i2c/busses/i2c-qcom-cci.c @@ -558,7 +558,7 @@ static int cci_probe(struct platform_device *pdev) cci->master[idx].adap.quirks = &cci->data->quirks; cci->master[idx].adap.algo = &cci_algo; cci->master[idx].adap.dev.parent = dev; - cci->master[idx].adap.dev.of_node = child; + cci->master[idx].adap.dev.of_node = of_node_get(child); cci->master[idx].master = idx; cci->master[idx].cci = cci; @@ -643,8 +643,10 @@ static int cci_probe(struct platform_device *pdev) continue; ret = i2c_add_adapter(&cci->master[i].adap); - if (ret < 0) + if (ret < 0) { + of_node_put(cci->master[i].adap.dev.of_node); goto error_i2c; + } } pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC); @@ -655,9 +657,11 @@ static int cci_probe(struct platform_device *pdev) return 0; error_i2c: - for (; i >= 0; i--) { - if (cci->master[i].cci) + for (--i ; i >= 0; i--) { + if (cci->master[i].cci) { i2c_del_adapter(&cci->master[i].adap); + of_node_put(cci->master[i].adap.dev.of_node); + } } error: disable_irq(cci->irq); @@ -673,8 +677,10 @@ static int cci_remove(struct platform_device *pdev) int i; for (i = 0; i < cci->data->num_masters; i++) { - if (cci->master[i].cci) + if (cci->master[i].cci) { i2c_del_adapter(&cci->master[i].adap); + of_node_put(cci->master[i].adap.dev.of_node); + } cci_halt(cci, i); } diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c index 48435865fdaf34387b563b9ce025e00fddedcc0b..792526462f1c9c4275edda27526cc7eb555e97a0 100644 --- a/drivers/iio/accel/bmc150-accel-core.c +++ b/drivers/iio/accel/bmc150-accel-core.c @@ -1648,11 +1648,14 @@ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq, ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(dev, "Unable to register iio device\n"); - goto err_trigger_unregister; + goto err_pm_cleanup; } return 0; +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(dev); + pm_runtime_disable(dev); err_trigger_unregister: bmc150_accel_unregister_triggers(data, BMC150_ACCEL_TRIGGERS - 1); err_buffer_cleanup: diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c index 2eaf85b6e39f4c399ea971451649042fd82c6712..89e0a89d95d6bdadc34dcda832345ae95e4c89d8 100644 --- a/drivers/iio/accel/kxcjk-1013.c +++ b/drivers/iio/accel/kxcjk-1013.c @@ -1429,11 +1429,14 @@ static int kxcjk1013_probe(struct i2c_client *client, ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(&client->dev, "unable to register iio device\n"); - goto err_buffer_cleanup; + goto err_pm_cleanup; } return 0; +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(&client->dev); + pm_runtime_disable(&client->dev); err_buffer_cleanup: iio_triggered_buffer_cleanup(indio_dev); err_trigger_unregister: diff --git a/drivers/iio/accel/mma9551.c b/drivers/iio/accel/mma9551.c index 08a2303cc9df3c08022bb32730faa41c01f49816..26421e8e8263958b1585d0b8d176a56bca0c4951 100644 --- a/drivers/iio/accel/mma9551.c +++ b/drivers/iio/accel/mma9551.c @@ -495,11 +495,14 @@ static int mma9551_probe(struct i2c_client *client, ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(&client->dev, "unable to register iio device\n"); - goto out_poweroff; + goto err_pm_cleanup; } return 0; +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(&client->dev); + pm_runtime_disable(&client->dev); out_poweroff: mma9551_set_device_state(client, false); diff --git a/drivers/iio/accel/mma9553.c b/drivers/iio/accel/mma9553.c index c15908faa38167b0d6ff68d53ceca8de0ed2d403..a23a7685d1f93c893da306c14760eeea03966e94 100644 --- a/drivers/iio/accel/mma9553.c +++ b/drivers/iio/accel/mma9553.c @@ -1134,12 +1134,15 @@ static int mma9553_probe(struct i2c_client *client, ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(&client->dev, "unable to register iio device\n"); - goto out_poweroff; + goto err_pm_cleanup; } dev_dbg(&indio_dev->dev, "Registered device %s\n", name); return 0; +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(&client->dev); + pm_runtime_disable(&client->dev); out_poweroff: mma9551_set_device_state(client, false); return ret; diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c index 9c2401c5848ece566c7bc8944d2699c7f4cd20fa..bd350099503765cc2213470bbb137057be211924 100644 --- a/drivers/iio/adc/ad7124.c +++ b/drivers/iio/adc/ad7124.c @@ -74,7 +74,7 @@ #define AD7124_CONFIG_REF_SEL(x) FIELD_PREP(AD7124_CONFIG_REF_SEL_MSK, x) #define AD7124_CONFIG_PGA_MSK GENMASK(2, 0) #define AD7124_CONFIG_PGA(x) FIELD_PREP(AD7124_CONFIG_PGA_MSK, x) -#define AD7124_CONFIG_IN_BUFF_MSK GENMASK(7, 6) +#define AD7124_CONFIG_IN_BUFF_MSK GENMASK(6, 5) #define AD7124_CONFIG_IN_BUFF(x) FIELD_PREP(AD7124_CONFIG_IN_BUFF_MSK, x) /* AD7124_FILTER_X */ diff --git a/drivers/iio/adc/men_z188_adc.c b/drivers/iio/adc/men_z188_adc.c index 42ea8bc7e78051c16bc8f1dcd7546393c99153d5..adc5ceaef8c93a373daef26dbaded4d4be1904ca 100644 --- a/drivers/iio/adc/men_z188_adc.c +++ b/drivers/iio/adc/men_z188_adc.c @@ -103,6 +103,7 @@ static int men_z188_probe(struct mcb_device *dev, struct z188_adc *adc; struct iio_dev *indio_dev; struct resource *mem; + int ret; indio_dev = devm_iio_device_alloc(&dev->dev, sizeof(struct z188_adc)); if (!indio_dev) @@ -128,8 +129,14 @@ static int men_z188_probe(struct mcb_device *dev, adc->mem = mem; mcb_set_drvdata(dev, indio_dev); - return iio_device_register(indio_dev); + ret = iio_device_register(indio_dev); + if (ret) + goto err_unmap; + + return 0; +err_unmap: + iounmap(adc->base); err: mcb_release_mem(mem); return -ENXIO; diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c index b64718daa2017c91707cc30763ba85d692616e68..c79cd88cd42314a9a4ed658c382524e2df57f849 100644 --- a/drivers/iio/adc/ti-adc081c.c +++ b/drivers/iio/adc/ti-adc081c.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -151,13 +152,16 @@ static int adc081c_probe(struct i2c_client *client, { struct iio_dev *iio; struct adc081c *adc; - struct adcxx1c_model *model; + const struct adcxx1c_model *model; int err; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA)) return -EOPNOTSUPP; - model = &adcxx1c_models[id->driver_data]; + if (dev_fwnode(&client->dev)) + model = device_get_match_data(&client->dev); + else + model = &adcxx1c_models[id->driver_data]; iio = devm_iio_device_alloc(&client->dev, sizeof(*adc)); if (!iio) @@ -224,10 +228,17 @@ static const struct i2c_device_id adc081c_id[] = { }; MODULE_DEVICE_TABLE(i2c, adc081c_id); +static const struct acpi_device_id adc081c_acpi_match[] = { + /* Used on some AAEON boards */ + { "ADC081C", (kernel_ulong_t)&adcxx1c_models[ADC081C] }, + { } +}; +MODULE_DEVICE_TABLE(acpi, adc081c_acpi_match); + static const struct of_device_id adc081c_of_match[] = { - { .compatible = "ti,adc081c" }, - { .compatible = "ti,adc101c" }, - { .compatible = "ti,adc121c" }, + { .compatible = "ti,adc081c", .data = &adcxx1c_models[ADC081C] }, + { .compatible = "ti,adc101c", .data = &adcxx1c_models[ADC101C] }, + { .compatible = "ti,adc121c", .data = &adcxx1c_models[ADC121C] }, { } }; MODULE_DEVICE_TABLE(of, adc081c_of_match); @@ -236,6 +247,7 @@ static struct i2c_driver adc081c_driver = { .driver = { .name = "adc081c", .of_match_table = adc081c_of_match, + .acpi_match_table = adc081c_acpi_match, }, .probe = adc081c_probe, .remove = adc081c_remove, diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c index 39fe0b1785920c5150005a7af38a702087cdc564..b6b90eebec0b99b290490ae5b7f46c713f06fac9 100644 --- a/drivers/iio/gyro/bmg160_core.c +++ b/drivers/iio/gyro/bmg160_core.c @@ -1170,11 +1170,14 @@ int bmg160_core_probe(struct device *dev, struct regmap *regmap, int irq, ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(dev, "unable to register iio device\n"); - goto err_buffer_cleanup; + goto err_pm_cleanup; } return 0; +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(dev); + pm_runtime_disable(dev); err_buffer_cleanup: iio_triggered_buffer_cleanup(indio_dev); err_trigger_unregister: diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c index 61885e99d3fc14db2655e7ac955b1adb4dd7a618..89133315e6aaf07bf509916a857233e04275acc9 100644 --- a/drivers/iio/imu/kmx61.c +++ b/drivers/iio/imu/kmx61.c @@ -1392,7 +1392,7 @@ static int kmx61_probe(struct i2c_client *client, ret = iio_device_register(data->acc_indio_dev); if (ret < 0) { dev_err(&client->dev, "Failed to register acc iio device\n"); - goto err_buffer_cleanup_mag; + goto err_pm_cleanup; } ret = iio_device_register(data->mag_indio_dev); @@ -1405,6 +1405,9 @@ static int kmx61_probe(struct i2c_client *client, err_iio_unregister_acc: iio_device_unregister(data->acc_indio_dev); +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(&client->dev); + pm_runtime_disable(&client->dev); err_buffer_cleanup_mag: if (client->irq > 0) iio_triggered_buffer_cleanup(data->mag_indio_dev); diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c index 558ca3843bb95fe858932ebf363b1ad4c6a915a0..2c528425b03b4c0583c156a12cab7745dbae4d72 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c @@ -1558,8 +1558,12 @@ static int st_lsm6dsx_read_oneshot(struct st_lsm6dsx_sensor *sensor, if (err < 0) return err; + /* + * we need to wait for sensor settling time before + * reading data in order to avoid corrupted samples + */ delay = 1000000000 / sensor->odr; - usleep_range(delay, 2 * delay); + usleep_range(3 * delay, 4 * delay); err = st_lsm6dsx_read_locked(hw, addr, &data, sizeof(data)); if (err < 0) diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c index 8eacfaf584cfd0690fb4020a6ff74cca8bb490eb..620537d0104d4b756fcaa17f8e1c93bb390ca444 100644 --- a/drivers/iio/magnetometer/bmc150_magn.c +++ b/drivers/iio/magnetometer/bmc150_magn.c @@ -941,13 +941,14 @@ int bmc150_magn_probe(struct device *dev, struct regmap *regmap, ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(dev, "unable to register iio device\n"); - goto err_disable_runtime_pm; + goto err_pm_cleanup; } dev_dbg(dev, "Registered device %s\n", name); return 0; -err_disable_runtime_pm: +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(dev); pm_runtime_disable(dev); err_buffer_cleanup: iio_triggered_buffer_cleanup(indio_dev); diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 8e54184566f7fe5bb7c389bd8907c446aac23ac3..fbb0efbe25f84fee2f3e6defcb727fd48201c43e 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -68,8 +68,8 @@ static const char * const cma_events[] = { [RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit", }; -static void cma_set_mgid(struct rdma_id_private *id_priv, struct sockaddr *addr, - union ib_gid *mgid); +static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid, + enum ib_gid_type gid_type); const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event) { @@ -775,6 +775,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) unsigned int p; u16 pkey, index; enum ib_port_state port_state; + int ret; int i; cma_dev = NULL; @@ -793,9 +794,14 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) if (ib_get_cached_port_state(cur_dev->device, p, &port_state)) continue; - for (i = 0; !rdma_query_gid(cur_dev->device, - p, i, &gid); - i++) { + + for (i = 0; i < cur_dev->device->port_data[p].immutable.gid_tbl_len; + ++i) { + ret = rdma_query_gid(cur_dev->device, p, i, + &gid); + if (ret) + continue; + if (!memcmp(&gid, dgid, sizeof(gid))) { cma_dev = cur_dev; sgid = gid; @@ -1834,17 +1840,19 @@ static void destroy_mc(struct rdma_id_private *id_priv, if (dev_addr->bound_dev_if) ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); - if (ndev) { + if (ndev && !send_only) { + enum ib_gid_type gid_type; union ib_gid mgid; - cma_set_mgid(id_priv, (struct sockaddr *)&mc->addr, - &mgid); - - if (!send_only) - cma_igmp_send(ndev, &mgid, false); - - dev_put(ndev); + gid_type = id_priv->cma_dev->default_gid_type + [id_priv->id.port_num - + rdma_start_port( + id_priv->cma_dev->device)]; + cma_iboe_set_mgid((struct sockaddr *)&mc->addr, &mgid, + gid_type); + cma_igmp_send(ndev, &mgid, false); } + dev_put(ndev); cancel_work_sync(&mc->iboe_join.work); } @@ -3313,22 +3321,30 @@ err: static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, const struct sockaddr *dst_addr) { - if (!src_addr || !src_addr->sa_family) { - src_addr = (struct sockaddr *) &id->route.addr.src_addr; - src_addr->sa_family = dst_addr->sa_family; - if (IS_ENABLED(CONFIG_IPV6) && - dst_addr->sa_family == AF_INET6) { - struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr; - struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr; - src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; - if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) - id->route.addr.dev_addr.bound_dev_if = dst_addr6->sin6_scope_id; - } else if (dst_addr->sa_family == AF_IB) { - ((struct sockaddr_ib *) src_addr)->sib_pkey = - ((struct sockaddr_ib *) dst_addr)->sib_pkey; - } - } - return rdma_bind_addr(id, src_addr); + struct sockaddr_storage zero_sock = {}; + + if (src_addr && src_addr->sa_family) + return rdma_bind_addr(id, src_addr); + + /* + * When the src_addr is not specified, automatically supply an any addr + */ + zero_sock.ss_family = dst_addr->sa_family; + if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) { + struct sockaddr_in6 *src_addr6 = + (struct sockaddr_in6 *)&zero_sock; + struct sockaddr_in6 *dst_addr6 = + (struct sockaddr_in6 *)dst_addr; + + src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; + if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) + id->route.addr.dev_addr.bound_dev_if = + dst_addr6->sin6_scope_id; + } else if (dst_addr->sa_family == AF_IB) { + ((struct sockaddr_ib *)&zero_sock)->sib_pkey = + ((struct sockaddr_ib *)dst_addr)->sib_pkey; + } + return rdma_bind_addr(id, (struct sockaddr *)&zero_sock); } /* diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 76b9c436edcd2f1ffaf7175d09133a26b0b93141..aa526c5ca0cf3789892cf82310e9708d810c1898 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -2411,7 +2411,8 @@ int ib_find_gid(struct ib_device *device, union ib_gid *gid, ++i) { ret = rdma_query_gid(device, port, i, &tmp_gid); if (ret) - return ret; + continue; + if (!memcmp(&tmp_gid, gid, sizeof *gid)) { *port_num = port; if (index) diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 2cc785c1970b4cce98f669b10be85b9470290dba..d12018c4c86e968f01ee8240c7f870d8f5f40167 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -95,6 +95,7 @@ struct ucma_context { u64 uid; struct list_head list; + struct list_head mc_list; struct work_struct close_work; }; @@ -105,6 +106,7 @@ struct ucma_multicast { u64 uid; u8 join_state; + struct list_head list; struct sockaddr_storage addr; }; @@ -198,6 +200,7 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file) INIT_WORK(&ctx->close_work, ucma_close_id); init_completion(&ctx->comp); + INIT_LIST_HEAD(&ctx->mc_list); /* So list_del() will work if we don't do ucma_finish_ctx() */ INIT_LIST_HEAD(&ctx->list); ctx->file = file; @@ -484,19 +487,19 @@ err1: static void ucma_cleanup_multicast(struct ucma_context *ctx) { - struct ucma_multicast *mc; - unsigned long index; + struct ucma_multicast *mc, *tmp; - xa_for_each(&multicast_table, index, mc) { - if (mc->ctx != ctx) - continue; + xa_lock(&multicast_table); + list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) { + list_del(&mc->list); /* * At this point mc->ctx->ref is 0 so the mc cannot leave the * lock on the reader and this is enough serialization */ - xa_erase(&multicast_table, index); + __xa_erase(&multicast_table, mc->id); kfree(mc); } + xa_unlock(&multicast_table); } static void ucma_cleanup_mc_events(struct ucma_multicast *mc) @@ -1469,12 +1472,16 @@ static ssize_t ucma_process_join(struct ucma_file *file, mc->uid = cmd->uid; memcpy(&mc->addr, addr, cmd->addr_size); - if (xa_alloc(&multicast_table, &mc->id, NULL, xa_limit_32b, + xa_lock(&multicast_table); + if (__xa_alloc(&multicast_table, &mc->id, NULL, xa_limit_32b, GFP_KERNEL)) { ret = -ENOMEM; goto err_free_mc; } + list_add_tail(&mc->list, &ctx->mc_list); + xa_unlock(&multicast_table); + mutex_lock(&ctx->mutex); ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr, join_state, mc); @@ -1500,8 +1507,11 @@ err_leave_multicast: mutex_unlock(&ctx->mutex); ucma_cleanup_mc_events(mc); err_xa_erase: - xa_erase(&multicast_table, mc->id); + xa_lock(&multicast_table); + list_del(&mc->list); + __xa_erase(&multicast_table, mc->id); err_free_mc: + xa_unlock(&multicast_table); kfree(mc); err_put_ctx: ucma_put_ctx(ctx); @@ -1569,15 +1579,17 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file, mc = ERR_PTR(-EINVAL); else if (!refcount_inc_not_zero(&mc->ctx->ref)) mc = ERR_PTR(-ENXIO); - else - __xa_erase(&multicast_table, mc->id); - xa_unlock(&multicast_table); if (IS_ERR(mc)) { + xa_unlock(&multicast_table); ret = PTR_ERR(mc); goto out; } + list_del(&mc->list); + __xa_erase(&multicast_table, mc->id); + xa_unlock(&multicast_table); + mutex_lock(&mc->ctx->mutex); rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr); mutex_unlock(&mc->ctx->mutex); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 441eb421e5e59a7d088611b52ad28c3bc44c8c94..5759027914b01a5badc44866f89ab0d43d51cfa4 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -614,8 +614,6 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res, if (!cmdq->cmdq_bitmap) goto fail; - cmdq->bmap_size = bmap_size; - /* Allocate one extra to hold the QP1 entries */ rcfw->qp_tbl_size = qp_tbl_sz + 1; rcfw->qp_tbl = kcalloc(rcfw->qp_tbl_size, sizeof(struct bnxt_qplib_qp_node), @@ -663,8 +661,8 @@ void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) iounmap(cmdq->cmdq_mbox.reg.bar_reg); iounmap(creq->creq_db.reg.bar_reg); - indx = find_first_bit(cmdq->cmdq_bitmap, cmdq->bmap_size); - if (indx != cmdq->bmap_size) + indx = find_first_bit(cmdq->cmdq_bitmap, rcfw->cmdq_depth); + if (indx != rcfw->cmdq_depth) dev_err(&rcfw->pdev->dev, "disabling RCFW with pending cmd-bit %lx\n", indx); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index 5f2f0a5a3560ff89902a04d3390f303c161e7e4a..6953f4e53dd2090e7b4e55e18b6572a8b9eb7971 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h @@ -150,7 +150,6 @@ struct bnxt_qplib_cmdq_ctx { wait_queue_head_t waitq; unsigned long flags; unsigned long *cmdq_bitmap; - u32 bmap_size; u32 seq_num; }; diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 861e19fdfeb469fff7d75143cfbc80738ae70ecf..12e5461581cb455be2d8f19c26e46568fcc6cd7f 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -2469,6 +2469,7 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, memset(attr, 0, sizeof(*attr)); memset(init_attr, 0, sizeof(*init_attr)); attr->qp_state = to_ib_qp_state(qhp->attr.state); + attr->cur_qp_state = to_ib_qp_state(qhp->attr.state); init_attr->cap.max_send_wr = qhp->attr.sq_num_entries; init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries; init_attr->cap.max_send_sge = qhp->attr.sq_max_sges; diff --git a/drivers/infiniband/hw/hfi1/ipoib_main.c b/drivers/infiniband/hw/hfi1/ipoib_main.c index 9f71b9d706bd9efdf8b7b8c885788aabf155db0c..22299b0b7df0ed54c6d05e365e7d18c4c10a3fa1 100644 --- a/drivers/infiniband/hw/hfi1/ipoib_main.c +++ b/drivers/infiniband/hw/hfi1/ipoib_main.c @@ -185,12 +185,6 @@ static void hfi1_ipoib_netdev_dtor(struct net_device *dev) free_percpu(priv->netstats); } -static void hfi1_ipoib_free_rdma_netdev(struct net_device *dev) -{ - hfi1_ipoib_netdev_dtor(dev); - free_netdev(dev); -} - static void hfi1_ipoib_set_id(struct net_device *dev, int id) { struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev); @@ -227,24 +221,23 @@ static int hfi1_ipoib_setup_rn(struct ib_device *device, priv->port_num = port_num; priv->netdev_ops = netdev->netdev_ops; - netdev->netdev_ops = &hfi1_ipoib_netdev_ops; - ib_query_pkey(device, port_num, priv->pkey_index, &priv->pkey); rc = hfi1_ipoib_txreq_init(priv); if (rc) { dd_dev_err(dd, "IPoIB netdev TX init - failed(%d)\n", rc); - hfi1_ipoib_free_rdma_netdev(netdev); return rc; } rc = hfi1_ipoib_rxq_init(netdev); if (rc) { dd_dev_err(dd, "IPoIB netdev RX init - failed(%d)\n", rc); - hfi1_ipoib_free_rdma_netdev(netdev); + hfi1_ipoib_txreq_deinit(priv); return rc; } + netdev->netdev_ops = &hfi1_ipoib_netdev_ops; + netdev->priv_destructor = hfi1_ipoib_netdev_dtor; netdev->needs_free_netdev = true; diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index ba65823a5c0bbf4497fc283a4328b0cd2de215cd..1e8b3e4ef1b179b6bfb8bba9f9ef318456092ddb 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -279,6 +279,9 @@ static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device, static int hns_roce_query_pkey(struct ib_device *ib_dev, u8 port, u16 index, u16 *pkey) { + if (index > 0) + return -EINVAL; + *pkey = PKEY_ID; return 0; @@ -356,7 +359,7 @@ static int hns_roce_mmap(struct ib_ucontext *context, return rdma_user_mmap_io(context, vma, to_hr_ucontext(context)->uar.pfn, PAGE_SIZE, - pgprot_noncached(vma->vm_page_prot), + pgprot_device(vma->vm_page_prot), NULL); /* vm_pgoff: 1 -- TPTR */ diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 7b11aff8a5ea7cb0d37f1d09149468b10b4882d6..05c7200751e50a834ac948a9c690b0a9b7cf68f5 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -3273,7 +3273,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, case MLX4_DEV_EVENT_PORT_MGMT_CHANGE: ew = kmalloc(sizeof *ew, GFP_ATOMIC); if (!ew) - break; + return; INIT_WORK(&ew->work, handle_port_mgmt_change_event); memcpy(&ew->ib_eqe, eqe, sizeof *eqe); diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 16d52836518943775e4d49b68380345929621425..eeb87f31cd2523f71bf5f141fa7f7c5d8ab9a909 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -1918,6 +1918,7 @@ static int qedr_create_user_qp(struct qedr_dev *dev, /* db offset was calculated in copy_qp_uresp, now set in the user q */ if (qedr_qp_has_sq(qp)) { qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset; + qp->sq.max_wr = attrs->cap.max_send_wr; rc = qedr_db_recovery_add(dev, qp->usq.db_addr, &qp->usq.db_rec_data->db_data, DB_REC_WIDTH_32B, @@ -1928,6 +1929,7 @@ static int qedr_create_user_qp(struct qedr_dev *dev, if (qedr_qp_has_rq(qp)) { qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset; + qp->rq.max_wr = attrs->cap.max_recv_wr; rc = qedr_db_recovery_add(dev, qp->urq.db_addr, &qp->urq.db_rec_data->db_data, DB_REC_WIDTH_32B, diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index ee48befc8978619b477cfa7325d66be1f622773f..09f0dbf941c067d3431ef8deec1f1a86c6a96de0 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -3124,6 +3124,8 @@ do_write: case IB_WR_ATOMIC_FETCH_AND_ADD: if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) goto inv_err; + if (unlikely(wqe->atomic_wr.remote_addr & (sizeof(u64) - 1))) + goto inv_err; if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), wqe->atomic_wr.remote_addr, wqe->atomic_wr.rkey, diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.c b/drivers/infiniband/sw/rxe/rxe_opcode.c index 0cb4b01fd9101e235bdb39052e5dc17c7b1ecd83..66ffb516bdaf0c1625b50d15d8686b0e9d9527c7 100644 --- a/drivers/infiniband/sw/rxe/rxe_opcode.c +++ b/drivers/infiniband/sw/rxe/rxe_opcode.c @@ -110,7 +110,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { } }, [IB_OPCODE_RC_SEND_MIDDLE] = { - .name = "IB_OPCODE_RC_SEND_MIDDLE]", + .name = "IB_OPCODE_RC_SEND_MIDDLE", .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK | RXE_MIDDLE_MASK, .length = RXE_BTH_BYTES, diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h index 368959ae9a8cc382bb0bf89f66bce731ddbf3f24..df03d84c6868ace3d5fe68c47e62cdd33e8b44d2 100644 --- a/drivers/infiniband/sw/siw/siw.h +++ b/drivers/infiniband/sw/siw/siw.h @@ -644,14 +644,9 @@ static inline struct siw_sqe *orq_get_current(struct siw_qp *qp) return &qp->orq[qp->orq_get % qp->attrs.orq_size]; } -static inline struct siw_sqe *orq_get_tail(struct siw_qp *qp) -{ - return &qp->orq[qp->orq_put % qp->attrs.orq_size]; -} - static inline struct siw_sqe *orq_get_free(struct siw_qp *qp) { - struct siw_sqe *orq_e = orq_get_tail(qp); + struct siw_sqe *orq_e = &qp->orq[qp->orq_put % qp->attrs.orq_size]; if (READ_ONCE(orq_e->flags) == 0) return orq_e; diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c index 60116f20653c776c3de38cbb3c954d9bdffbb112..875ea6f1b04a29034b6b41f8bfedaefc01af28c6 100644 --- a/drivers/infiniband/sw/siw/siw_qp_rx.c +++ b/drivers/infiniband/sw/siw/siw_qp_rx.c @@ -1153,11 +1153,12 @@ static int siw_check_tx_fence(struct siw_qp *qp) spin_lock_irqsave(&qp->orq_lock, flags); - rreq = orq_get_current(qp); - /* free current orq entry */ + rreq = orq_get_current(qp); WRITE_ONCE(rreq->flags, 0); + qp->orq_get++; + if (qp->tx_ctx.orq_fence) { if (unlikely(tx_waiting->wr_status != SIW_WR_QUEUED)) { pr_warn("siw: [QP %u]: fence resume: bad status %d\n", @@ -1165,10 +1166,12 @@ static int siw_check_tx_fence(struct siw_qp *qp) rv = -EPROTO; goto out; } - /* resume SQ processing */ + /* resume SQ processing, if possible */ if (tx_waiting->sqe.opcode == SIW_OP_READ || tx_waiting->sqe.opcode == SIW_OP_READ_LOCAL_INV) { - rreq = orq_get_tail(qp); + + /* SQ processing was stopped because of a full ORQ */ + rreq = orq_get_free(qp); if (unlikely(!rreq)) { pr_warn("siw: [QP %u]: no ORQE\n", qp_id(qp)); rv = -EPROTO; @@ -1181,15 +1184,14 @@ static int siw_check_tx_fence(struct siw_qp *qp) resume_tx = 1; } else if (siw_orq_empty(qp)) { + /* + * SQ processing was stopped by fenced work request. + * Resume since all previous Read's are now completed. + */ qp->tx_ctx.orq_fence = 0; resume_tx = 1; - } else { - pr_warn("siw: [QP %u]: fence resume: orq idx: %d:%d\n", - qp_id(qp), qp->orq_get, qp->orq_put); - rv = -EPROTO; } } - qp->orq_get++; out: spin_unlock_irqrestore(&qp->orq_lock, flags); diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c index 46fad202a380e6043a0d94066827ccf099e953a3..13634eda833de3d2729fba2b9ada55c721276da9 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c @@ -1328,6 +1328,12 @@ out_err: static void free_permits(struct rtrs_clt *clt) { + if (clt->permits_map) { + size_t sz = clt->queue_depth; + + wait_event(clt->permits_wait, + find_first_bit(clt->permits_map, sz) >= sz); + } kfree(clt->permits_map); clt->permits_map = NULL; kfree(clt->permits); @@ -2540,6 +2546,8 @@ static void rtrs_clt_dev_release(struct device *dev) { struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev); + mutex_destroy(&clt->paths_ev_mutex); + mutex_destroy(&clt->paths_mutex); kfree(clt); } @@ -2571,6 +2579,8 @@ static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num, return ERR_PTR(-ENOMEM); } + clt->dev.class = rtrs_clt_dev_class; + clt->dev.release = rtrs_clt_dev_release; uuid_gen(&clt->paths_uuid); INIT_LIST_HEAD_RCU(&clt->paths_list); clt->paths_num = paths_num; @@ -2588,64 +2598,51 @@ static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num, init_waitqueue_head(&clt->permits_wait); mutex_init(&clt->paths_ev_mutex); mutex_init(&clt->paths_mutex); + device_initialize(&clt->dev); - clt->dev.class = rtrs_clt_dev_class; - clt->dev.release = rtrs_clt_dev_release; err = dev_set_name(&clt->dev, "%s", sessname); if (err) - goto err; + goto err_put; + /* * Suppress user space notification until * sysfs files are created */ dev_set_uevent_suppress(&clt->dev, true); - err = device_register(&clt->dev); - if (err) { - put_device(&clt->dev); - goto err; - } + err = device_add(&clt->dev); + if (err) + goto err_put; clt->kobj_paths = kobject_create_and_add("paths", &clt->dev.kobj); if (!clt->kobj_paths) { err = -ENOMEM; - goto err_dev; + goto err_del; } err = rtrs_clt_create_sysfs_root_files(clt); if (err) { kobject_del(clt->kobj_paths); kobject_put(clt->kobj_paths); - goto err_dev; + goto err_del; } dev_set_uevent_suppress(&clt->dev, false); kobject_uevent(&clt->dev.kobj, KOBJ_ADD); return clt; -err_dev: - device_unregister(&clt->dev); -err: +err_del: + device_del(&clt->dev); +err_put: free_percpu(clt->pcpu_path); - kfree(clt); + put_device(&clt->dev); return ERR_PTR(err); } -static void wait_for_inflight_permits(struct rtrs_clt *clt) -{ - if (clt->permits_map) { - size_t sz = clt->queue_depth; - - wait_event(clt->permits_wait, - find_first_bit(clt->permits_map, sz) >= sz); - } -} - static void free_clt(struct rtrs_clt *clt) { - wait_for_inflight_permits(clt); - free_permits(clt); free_percpu(clt->pcpu_path); - mutex_destroy(&clt->paths_ev_mutex); - mutex_destroy(&clt->paths_mutex); - /* release callback will free clt in last put */ + + /* + * release callback will free clt and destroy mutexes in last put + */ device_unregister(&clt->dev); } @@ -2761,6 +2758,7 @@ void rtrs_clt_close(struct rtrs_clt *clt) rtrs_clt_destroy_sess_files(sess, NULL); kobject_put(&sess->kobj); } + free_permits(clt); free_clt(clt); } EXPORT_SYMBOL(rtrs_clt_close); diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 86d5c4c92b363c4fc6fb9832ca7da626496a7200..b4ccb333a834201e0e67cac4337383293cacd780 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -4045,9 +4045,11 @@ static void srp_remove_one(struct ib_device *device, void *client_data) spin_unlock(&host->target_lock); /* - * Wait for tl_err and target port removal tasks. + * srp_queue_remove_work() queues a call to + * srp_remove_target(). The latter function cancels + * target->tl_err_work so waiting for the remove works to + * finish is sufficient. */ - flush_workqueue(system_long_wq); flush_workqueue(srp_remove_wq); kfree(host); diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 28de889aa516408a9b398449beda05706b0a8749..502e6532dd549477e851f03e9f43766ae27f6c24 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -805,16 +806,27 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu) { #ifdef CONFIG_IRQ_REMAP u32 status, i; + u64 entry; if (!iommu->ga_log) return -EINVAL; - status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); - /* Check if already running */ - if (status & (MMIO_STATUS_GALOG_RUN_MASK)) + status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); + if (WARN_ON(status & (MMIO_STATUS_GALOG_RUN_MASK))) return 0; + entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512; + memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET, + &entry, sizeof(entry)); + entry = (iommu_virt_to_phys(iommu->ga_log_tail) & + (BIT_ULL(52)-1)) & ~7ULL; + memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET, + &entry, sizeof(entry)); + writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); + writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET); + + iommu_feature_enable(iommu, CONTROL_GAINT_EN); iommu_feature_enable(iommu, CONTROL_GALOG_EN); @@ -822,19 +834,18 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu) status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); if (status & (MMIO_STATUS_GALOG_RUN_MASK)) break; + udelay(10); } - if (i >= LOOP_TIMEOUT) + if (WARN_ON(i >= LOOP_TIMEOUT)) return -EINVAL; #endif /* CONFIG_IRQ_REMAP */ return 0; } -#ifdef CONFIG_IRQ_REMAP static int iommu_init_ga_log(struct amd_iommu *iommu) { - u64 entry; - +#ifdef CONFIG_IRQ_REMAP if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) return 0; @@ -848,32 +859,13 @@ static int iommu_init_ga_log(struct amd_iommu *iommu) if (!iommu->ga_log_tail) goto err_out; - entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512; - memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET, - &entry, sizeof(entry)); - entry = (iommu_virt_to_phys(iommu->ga_log_tail) & - (BIT_ULL(52)-1)) & ~7ULL; - memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET, - &entry, sizeof(entry)); - writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); - writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET); - return 0; err_out: free_ga_log(iommu); return -EINVAL; -} -#endif /* CONFIG_IRQ_REMAP */ - -static int iommu_init_ga(struct amd_iommu *iommu) -{ - int ret = 0; - -#ifdef CONFIG_IRQ_REMAP - ret = iommu_init_ga_log(iommu); +#else + return 0; #endif /* CONFIG_IRQ_REMAP */ - - return ret; } static int __init alloc_cwwb_sem(struct amd_iommu *iommu) @@ -1860,7 +1852,7 @@ static int __init iommu_init_pci(struct amd_iommu *iommu) if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu)) return -ENOMEM; - ret = iommu_init_ga(iommu); + ret = iommu_init_ga_log(iommu); if (ret) return ret; diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c index aedaae4630bc8117ed9432daf2d7ecaffd28c773..b853888774e65511ae957b542e30139059c09c3e 100644 --- a/drivers/iommu/intel/irq_remapping.c +++ b/drivers/iommu/intel/irq_remapping.c @@ -576,9 +576,8 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) fn, &intel_ir_domain_ops, iommu); if (!iommu->ir_domain) { - irq_domain_free_fwnode(fn); pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id); - goto out_free_bitmap; + goto out_free_fwnode; } iommu->ir_msi_domain = arch_create_remap_msi_irq_domain(iommu->ir_domain, @@ -602,7 +601,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) if (dmar_enable_qi(iommu)) { pr_err("Failed to enable queued invalidation\n"); - goto out_free_bitmap; + goto out_free_ir_domain; } } @@ -626,6 +625,14 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) return 0; +out_free_ir_domain: + if (iommu->ir_msi_domain) + irq_domain_remove(iommu->ir_msi_domain); + iommu->ir_msi_domain = NULL; + irq_domain_remove(iommu->ir_domain); + iommu->ir_domain = NULL; +out_free_fwnode: + irq_domain_free_fwnode(fn); out_free_bitmap: bitmap_free(bitmap); out_free_pages: diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index a688f22cbe3b590cce25bf1ca4e490d4a8c8bdb0..3bcd3afe977839aa1d20c05ff13e9ba8ff167862 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -242,13 +242,17 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp, __GFP_ZERO | ARM_V7S_TABLE_GFP_DMA, get_order(size)); else if (lvl == 2) table = kmem_cache_zalloc(data->l2_tables, gfp); + + if (!table) + return NULL; + phys = virt_to_phys(table); if (phys != (arm_v7s_iopte)phys) { /* Doesn't fit in PTE */ dev_err(dev, "Page table does not fit in PTE: %pa", &phys); goto out_free; } - if (table && !cfg->coherent_walk) { + if (!cfg->coherent_walk) { dma = dma_map_single(dev, table, size, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma)) goto out_free; diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index bcfbd0e44a4a07fb40b27aeb422b09a08adcfd63..e1cd31c0e3c195f944cac59576b2b36a88573bca 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -302,11 +302,12 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table, arm_lpae_iopte *ptep, arm_lpae_iopte curr, - struct io_pgtable_cfg *cfg) + struct arm_lpae_io_pgtable *data) { arm_lpae_iopte old, new; + struct io_pgtable_cfg *cfg = &data->iop.cfg; - new = __pa(table) | ARM_LPAE_PTE_TYPE_TABLE; + new = paddr_to_iopte(__pa(table), data) | ARM_LPAE_PTE_TYPE_TABLE; if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) new |= ARM_LPAE_PTE_NSTABLE; @@ -357,7 +358,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, if (!cptep) return -ENOMEM; - pte = arm_lpae_install_table(cptep, ptep, 0, cfg); + pte = arm_lpae_install_table(cptep, ptep, 0, data); if (pte) __arm_lpae_free_pages(cptep, tblsz, cfg); } else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) { @@ -546,7 +547,7 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, __arm_lpae_init_pte(data, blk_paddr, pte, lvl, &tablep[i]); } - pte = arm_lpae_install_table(tablep, ptep, blk_pte, cfg); + pte = arm_lpae_install_table(tablep, ptep, blk_pte, data); if (pte != blk_pte) { __arm_lpae_free_pages(tablep, tablesz, cfg); /* diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index bcf060b5cf85bed978bdf49a3c851b26d92b1967..9d65557dfb2ce2140f8fc624b5b913aa92c17894 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -185,9 +185,14 @@ static struct dev_iommu *dev_iommu_get(struct device *dev) static void dev_iommu_free(struct device *dev) { - iommu_fwspec_free(dev); - kfree(dev->iommu); + struct dev_iommu *param = dev->iommu; + dev->iommu = NULL; + if (param->fwspec) { + fwnode_handle_put(param->fwspec->iommu_fwnode); + kfree(param->fwspec); + } + kfree(param); } static int __iommu_probe_device(struct device *dev, struct list_head *group_list) diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 30d969a4c5fde811ded13735a806eca79af45ce5..1164d1a42cbc5dff86d85cbb0f397c020e5e8b84 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -64,8 +64,7 @@ static void free_iova_flush_queue(struct iova_domain *iovad) if (!has_iova_flush_queue(iovad)) return; - if (timer_pending(&iovad->fq_timer)) - del_timer(&iovad->fq_timer); + del_timer_sync(&iovad->fq_timer); fq_destroy_all_entries(iovad); diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 1bdb7acf445f4aa78278f8bff608aa47b0d9a1b6..04d1b3963b6ba8aa10288cebd7f8ba405fe2cc44 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -915,6 +915,22 @@ static int __gic_update_rdist_properties(struct redist_region *region, { u64 typer = gic_read_typer(ptr + GICR_TYPER); + /* Boot-time cleanip */ + if ((typer & GICR_TYPER_VLPIS) && (typer & GICR_TYPER_RVPEID)) { + u64 val; + + /* Deactivate any present vPE */ + val = gicr_read_vpendbaser(ptr + SZ_128K + GICR_VPENDBASER); + if (val & GICR_VPENDBASER_Valid) + gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast, + ptr + SZ_128K + GICR_VPENDBASER); + + /* Mark the VPE table as invalid */ + val = gicr_read_vpropbaser(ptr + SZ_128K + GICR_VPROPBASER); + val &= ~GICR_VPROPBASER_4_1_VALID; + gicr_write_vpropbaser(val, ptr + SZ_128K + GICR_VPROPBASER); + } + gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS); /* RVPEID implies some form of DirectLPI, no matter what the doc says... :-/ */ diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index 926e55d838cb1a3d176ff605546bc0fa60f6d825..bd99ee0ae433d9d0a014831c61c3f7119409d326 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -400,3 +400,4 @@ out_free_priv: IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init); IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */ +IRQCHIP_DECLARE(thead_c900_plic, "thead,c900-plic", plic_init); /* for firmware driver */ diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 19a70f434029b53ce90161c46f14aa6a96a50cbf..6030cba5b0382fe4b9c18a0569a48994f4e9a148 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1894,8 +1894,10 @@ static struct mapped_device *alloc_dev(int minor) if (IS_ENABLED(CONFIG_DAX_DRIVER)) { md->dax_dev = alloc_dax(md, md->disk->disk_name, &dm_dax_ops, 0); - if (IS_ERR(md->dax_dev)) + if (IS_ERR(md->dax_dev)) { + md->dax_dev = NULL; goto bad; + } } add_disk_no_queue_reg(md->disk); diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c index ef6e78d45d5b8bfe8e63200611b97131bbebd45e..ee3e63aa864bfd9ffbf79282018025e781505dd8 100644 --- a/drivers/md/persistent-data/dm-btree.c +++ b/drivers/md/persistent-data/dm-btree.c @@ -83,14 +83,16 @@ void inc_children(struct dm_transaction_manager *tm, struct btree_node *n, } static int insert_at(size_t value_size, struct btree_node *node, unsigned index, - uint64_t key, void *value) - __dm_written_to_disk(value) + uint64_t key, void *value) + __dm_written_to_disk(value) { uint32_t nr_entries = le32_to_cpu(node->header.nr_entries); + uint32_t max_entries = le32_to_cpu(node->header.max_entries); __le64 key_le = cpu_to_le64(key); if (index > nr_entries || - index >= le32_to_cpu(node->header.max_entries)) { + index >= max_entries || + nr_entries >= max_entries) { DMERR("too many entries in btree node for insert"); __dm_unbless_for_disk(value); return -ENOMEM; diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c index a213bf11738fbbb10448d09abfb8d532679f011e..85853ab6297171c755114929712322fc682170c3 100644 --- a/drivers/md/persistent-data/dm-space-map-common.c +++ b/drivers/md/persistent-data/dm-space-map-common.c @@ -281,6 +281,11 @@ int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result) struct disk_index_entry ie_disk; struct dm_block *blk; + if (b >= ll->nr_blocks) { + DMERR_LIMIT("metadata block out of bounds"); + return -EINVAL; + } + b = do_div(index, ll->entries_per_block); r = ll->load_ie(ll, index, &ie_disk); if (r < 0) diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig index a6d073f2e036aa34e6a3fd0b14f3a6755544c1a8..d157af63be417ab40fc85eb60b04838e5c90f020 100644 --- a/drivers/media/Kconfig +++ b/drivers/media/Kconfig @@ -142,10 +142,10 @@ config MEDIA_TEST_SUPPORT prompt "Test drivers" if MEDIA_SUPPORT_FILTER default y if !MEDIA_SUPPORT_FILTER help - Those drivers should not be used on production Kernels, but - can be useful on debug ones. It enables several dummy drivers - that simulate a real hardware. Very useful to test userspace - applications and to validate if the subsystem core is doesn't + These drivers should not be used on production kernels, but + can be useful on debug ones. This option enables several dummy drivers + that simulate real hardware. Very useful to test userspace + applications and to validate if the subsystem core doesn't have regressions. Say Y if you want to use some virtual test driver. diff --git a/drivers/media/cec/core/cec-pin.c b/drivers/media/cec/core/cec-pin.c index f006bd8eec63c0808f8dd09fb3ced709ae3642a5..f8452a1f9fc6c13bd14a22ec1b2679d8b3b78712 100644 --- a/drivers/media/cec/core/cec-pin.c +++ b/drivers/media/cec/core/cec-pin.c @@ -1033,6 +1033,7 @@ static int cec_pin_thread_func(void *_adap) { struct cec_adapter *adap = _adap; struct cec_pin *pin = adap->pin; + bool irq_enabled = false; for (;;) { wait_event_interruptible(pin->kthread_waitq, @@ -1060,6 +1061,7 @@ static int cec_pin_thread_func(void *_adap) ns_to_ktime(pin->work_rx_msg.rx_ts)); msg->len = 0; } + if (pin->work_tx_status) { unsigned int tx_status = pin->work_tx_status; @@ -1083,27 +1085,39 @@ static int cec_pin_thread_func(void *_adap) switch (atomic_xchg(&pin->work_irq_change, CEC_PIN_IRQ_UNCHANGED)) { case CEC_PIN_IRQ_DISABLE: - pin->ops->disable_irq(adap); + if (irq_enabled) { + pin->ops->disable_irq(adap); + irq_enabled = false; + } cec_pin_high(pin); cec_pin_to_idle(pin); hrtimer_start(&pin->timer, ns_to_ktime(0), HRTIMER_MODE_REL); break; case CEC_PIN_IRQ_ENABLE: + if (irq_enabled) + break; pin->enable_irq_failed = !pin->ops->enable_irq(adap); if (pin->enable_irq_failed) { cec_pin_to_idle(pin); hrtimer_start(&pin->timer, ns_to_ktime(0), HRTIMER_MODE_REL); + } else { + irq_enabled = true; } break; default: break; } - if (kthread_should_stop()) break; } + if (pin->ops->disable_irq && irq_enabled) + pin->ops->disable_irq(adap); + hrtimer_cancel(&pin->timer); + cec_pin_read(pin); + cec_pin_to_idle(pin); + pin->state = CEC_ST_OFF; return 0; } @@ -1130,13 +1144,7 @@ static int cec_pin_adap_enable(struct cec_adapter *adap, bool enable) hrtimer_start(&pin->timer, ns_to_ktime(0), HRTIMER_MODE_REL); } else { - if (pin->ops->disable_irq) - pin->ops->disable_irq(adap); - hrtimer_cancel(&pin->timer); kthread_stop(pin->kthread); - cec_pin_read(pin); - cec_pin_to_idle(pin); - pin->state = CEC_ST_OFF; } return 0; } @@ -1157,11 +1165,8 @@ void cec_pin_start_timer(struct cec_pin *pin) if (pin->state != CEC_ST_RX_IRQ) return; - atomic_set(&pin->work_irq_change, CEC_PIN_IRQ_UNCHANGED); - pin->ops->disable_irq(pin->adap); - cec_pin_high(pin); - cec_pin_to_idle(pin); - hrtimer_start(&pin->timer, ns_to_ktime(0), HRTIMER_MODE_REL); + atomic_set(&pin->work_irq_change, CEC_PIN_IRQ_DISABLE); + wake_up_interruptible(&pin->kthread_waitq); } static int cec_pin_adap_transmit(struct cec_adapter *adap, u8 attempts, diff --git a/drivers/media/common/saa7146/saa7146_fops.c b/drivers/media/common/saa7146/saa7146_fops.c index d6531874faa650f0e8367ac8d37e0490e299b7db..8047e305f3d014c768c91e83e3ff491409ea8460 100644 --- a/drivers/media/common/saa7146/saa7146_fops.c +++ b/drivers/media/common/saa7146/saa7146_fops.c @@ -523,7 +523,7 @@ int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv) ERR("out of memory. aborting.\n"); kfree(vv); v4l2_ctrl_handler_free(hdl); - return -1; + return -ENOMEM; } saa7146_video_uops.init(dev,vv); diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c index 2f3a5996d3fc90a62e01f1e6c62876ace0c75ddb..fe626109ef4db62b381d5c0959fa0a693bea5d5e 100644 --- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c +++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c @@ -150,7 +150,7 @@ static void *vb2_dc_alloc(struct device *dev, unsigned long attrs, buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr, GFP_KERNEL | gfp_flags, buf->attrs); if (!buf->cookie) { - dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size); + dev_err(dev, "dma_alloc_coherent of size %lu failed\n", size); kfree(buf); return ERR_PTR(-ENOMEM); } @@ -196,9 +196,9 @@ static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma) vma->vm_ops->open(vma); - pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n", - __func__, (unsigned long)buf->dma_addr, vma->vm_start, - buf->size); + pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %lu\n", + __func__, (unsigned long)buf->dma_addr, vma->vm_start, + buf->size); return 0; } diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c index f14a872d126872c3fc695cbd3f8460c32d317b14..e58cb8434dafeea3323ea892028be6ffde3ca155 100644 --- a/drivers/media/dvb-core/dmxdev.c +++ b/drivers/media/dvb-core/dmxdev.c @@ -1413,7 +1413,7 @@ static const struct dvb_device dvbdev_dvr = { }; int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *dvb_adapter) { - int i; + int i, ret; if (dmxdev->demux->open(dmxdev->demux) < 0) return -EUSERS; @@ -1432,14 +1432,26 @@ int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *dvb_adapter) DMXDEV_STATE_FREE); } - dvb_register_device(dvb_adapter, &dmxdev->dvbdev, &dvbdev_demux, dmxdev, + ret = dvb_register_device(dvb_adapter, &dmxdev->dvbdev, &dvbdev_demux, dmxdev, DVB_DEVICE_DEMUX, dmxdev->filternum); - dvb_register_device(dvb_adapter, &dmxdev->dvr_dvbdev, &dvbdev_dvr, + if (ret < 0) + goto err_register_dvbdev; + + ret = dvb_register_device(dvb_adapter, &dmxdev->dvr_dvbdev, &dvbdev_dvr, dmxdev, DVB_DEVICE_DVR, dmxdev->filternum); + if (ret < 0) + goto err_register_dvr_dvbdev; dvb_ringbuffer_init(&dmxdev->dvr_buffer, NULL, 8192); return 0; + +err_register_dvr_dvbdev: + dvb_unregister_device(dmxdev->dvbdev); +err_register_dvbdev: + vfree(dmxdev->filter); + dmxdev->filter = NULL; + return ret; } EXPORT_SYMBOL(dvb_dmxdev_init); diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c index bb02354a48b81b4c6c6ff3c23558eb1df31afc34..d67f2dd997d06188bba43fb266829ffd298e8a82 100644 --- a/drivers/media/dvb-frontends/dib8000.c +++ b/drivers/media/dvb-frontends/dib8000.c @@ -4473,8 +4473,10 @@ static struct dvb_frontend *dib8000_init(struct i2c_adapter *i2c_adap, u8 i2c_ad state->timf_default = cfg->pll->timf; - if (dib8000_identify(&state->i2c) == 0) + if (dib8000_identify(&state->i2c) == 0) { + kfree(fe); goto error; + } dibx000_init_i2c_master(&state->i2c_master, DIB8000, state->i2c.adap, state->i2c.addr); diff --git a/drivers/media/pci/b2c2/flexcop-pci.c b/drivers/media/pci/b2c2/flexcop-pci.c index a9d9520a94c6d87437aa6e839099e569b28f91c8..c9e6c7d6637682cbf53c651a604b79cd9e0824df 100644 --- a/drivers/media/pci/b2c2/flexcop-pci.c +++ b/drivers/media/pci/b2c2/flexcop-pci.c @@ -185,6 +185,8 @@ static irqreturn_t flexcop_pci_isr(int irq, void *dev_id) dma_addr_t cur_addr = fc->read_ibi_reg(fc,dma1_008).dma_0x8.dma_cur_addr << 2; u32 cur_pos = cur_addr - fc_pci->dma[0].dma_addr0; + if (cur_pos > fc_pci->dma[0].size * 2) + goto error; deb_irq("%u irq: %08x cur_addr: %llx: cur_pos: %08x, last_cur_pos: %08x ", jiffies_to_usecs(jiffies - fc_pci->last_irq), @@ -225,6 +227,7 @@ static irqreturn_t flexcop_pci_isr(int irq, void *dev_id) ret = IRQ_NONE; } +error: spin_unlock_irqrestore(&fc_pci->irq_lock, flags); return ret; } diff --git a/drivers/media/pci/saa7146/hexium_gemini.c b/drivers/media/pci/saa7146/hexium_gemini.c index 2214c74bbbf1548b12b8122827a25c0b61a45756..3947701cd6c7e76835d3d8e58321a5a75c7d9c46 100644 --- a/drivers/media/pci/saa7146/hexium_gemini.c +++ b/drivers/media/pci/saa7146/hexium_gemini.c @@ -284,7 +284,12 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d hexium_set_input(hexium, 0); hexium->cur_input = 0; - saa7146_vv_init(dev, &vv_data); + ret = saa7146_vv_init(dev, &vv_data); + if (ret) { + i2c_del_adapter(&hexium->i2c_adapter); + kfree(hexium); + return ret; + } vv_data.vid_ops.vidioc_enum_input = vidioc_enum_input; vv_data.vid_ops.vidioc_g_input = vidioc_g_input; diff --git a/drivers/media/pci/saa7146/hexium_orion.c b/drivers/media/pci/saa7146/hexium_orion.c index 39d14c179d229da31c1d76a7c067b56a24c2883d..2eb4bee16b71f28175854cc636472c903f0f4228 100644 --- a/drivers/media/pci/saa7146/hexium_orion.c +++ b/drivers/media/pci/saa7146/hexium_orion.c @@ -355,10 +355,16 @@ static struct saa7146_ext_vv vv_data; static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_data *info) { struct hexium *hexium = (struct hexium *) dev->ext_priv; + int ret; DEB_EE("\n"); - saa7146_vv_init(dev, &vv_data); + ret = saa7146_vv_init(dev, &vv_data); + if (ret) { + pr_err("Error in saa7146_vv_init()\n"); + return ret; + } + vv_data.vid_ops.vidioc_enum_input = vidioc_enum_input; vv_data.vid_ops.vidioc_g_input = vidioc_g_input; vv_data.vid_ops.vidioc_s_input = vidioc_s_input; diff --git a/drivers/media/pci/saa7146/mxb.c b/drivers/media/pci/saa7146/mxb.c index 73fc901ecf3db5ecd7312bb29af338886717244a..bf0b9b0914cd53cddf78c56eb4db2099302f7742 100644 --- a/drivers/media/pci/saa7146/mxb.c +++ b/drivers/media/pci/saa7146/mxb.c @@ -683,10 +683,16 @@ static struct saa7146_ext_vv vv_data; static int mxb_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_data *info) { struct mxb *mxb; + int ret; DEB_EE("dev:%p\n", dev); - saa7146_vv_init(dev, &vv_data); + ret = saa7146_vv_init(dev, &vv_data); + if (ret) { + ERR("Error in saa7146_vv_init()"); + return ret; + } + if (mxb_probe(dev)) { saa7146_vv_release(dev); return -1; diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c index 7bb6babdcade049bf7e6a806d19026a687f15a31..debc7509c173c41e218bbebbebeeeb9f4fc7ca1e 100644 --- a/drivers/media/platform/aspeed-video.c +++ b/drivers/media/platform/aspeed-video.c @@ -500,6 +500,10 @@ static void aspeed_video_enable_mode_detect(struct aspeed_video *video) aspeed_video_update(video, VE_INTERRUPT_CTRL, 0, VE_INTERRUPT_MODE_DETECT); + /* Disable mode detect in order to re-trigger */ + aspeed_video_update(video, VE_SEQ_CTRL, + VE_SEQ_CTRL_TRIG_MODE_DET, 0); + /* Trigger mode detect */ aspeed_video_update(video, VE_SEQ_CTRL, 0, VE_SEQ_CTRL_TRIG_MODE_DET); } @@ -552,6 +556,8 @@ static void aspeed_video_irq_res_change(struct aspeed_video *video, ulong delay) set_bit(VIDEO_RES_CHANGE, &video->flags); clear_bit(VIDEO_FRAME_INPRG, &video->flags); + video->v4l2_input_status = V4L2_IN_ST_NO_SIGNAL; + aspeed_video_off(video); aspeed_video_bufs_done(video, VB2_BUF_STATE_ERROR); @@ -786,10 +792,6 @@ static void aspeed_video_get_resolution(struct aspeed_video *video) return; } - /* Disable mode detect in order to re-trigger */ - aspeed_video_update(video, VE_SEQ_CTRL, - VE_SEQ_CTRL_TRIG_MODE_DET, 0); - aspeed_video_check_and_set_polarity(video); aspeed_video_enable_mode_detect(video); @@ -1337,7 +1339,6 @@ static void aspeed_video_resolution_work(struct work_struct *work) struct delayed_work *dwork = to_delayed_work(work); struct aspeed_video *video = container_of(dwork, struct aspeed_video, res_work); - u32 input_status = video->v4l2_input_status; aspeed_video_on(video); @@ -1350,8 +1351,7 @@ static void aspeed_video_resolution_work(struct work_struct *work) aspeed_video_get_resolution(video); if (video->detected_timings.width != video->active_timings.width || - video->detected_timings.height != video->active_timings.height || - input_status != video->v4l2_input_status) { + video->detected_timings.height != video->active_timings.height) { static const struct v4l2_event ev = { .type = V4L2_EVENT_SOURCE_CHANGE, .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION, diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c index 87a2c706f7477db4003ebcfe0c6578a34a4677b0..1eed69d29149f34c43fd155ff957c157623deac4 100644 --- a/drivers/media/platform/coda/coda-common.c +++ b/drivers/media/platform/coda/coda-common.c @@ -1537,11 +1537,13 @@ static void coda_pic_run_work(struct work_struct *work) if (!wait_for_completion_timeout(&ctx->completion, msecs_to_jiffies(1000))) { - dev_err(dev->dev, "CODA PIC_RUN timeout\n"); + if (ctx->use_bit) { + dev_err(dev->dev, "CODA PIC_RUN timeout\n"); - ctx->hold = true; + ctx->hold = true; - coda_hw_reset(ctx); + coda_hw_reset(ctx); + } if (ctx->ops->run_timeout) ctx->ops->run_timeout(ctx); diff --git a/drivers/media/platform/coda/coda-jpeg.c b/drivers/media/platform/coda/coda-jpeg.c index b11cfbe166dd3141568c10a16d02406385fa8f80..a72f4655e5ad56312d8fe4c113452f5b3baebe86 100644 --- a/drivers/media/platform/coda/coda-jpeg.c +++ b/drivers/media/platform/coda/coda-jpeg.c @@ -1127,7 +1127,8 @@ static int coda9_jpeg_prepare_encode(struct coda_ctx *ctx) coda_write(dev, 0, CODA9_REG_JPEG_GBU_BT_PTR); coda_write(dev, 0, CODA9_REG_JPEG_GBU_WD_PTR); coda_write(dev, 0, CODA9_REG_JPEG_GBU_BBSR); - coda_write(dev, 0, CODA9_REG_JPEG_BBC_STRM_CTRL); + coda_write(dev, BIT(31) | ((end_addr - start_addr - header_len) / 256), + CODA9_REG_JPEG_BBC_STRM_CTRL); coda_write(dev, 0, CODA9_REG_JPEG_GBU_CTRL); coda_write(dev, 0, CODA9_REG_JPEG_GBU_FF_RPTR); coda_write(dev, 127, CODA9_REG_JPEG_GBU_BBER); @@ -1257,6 +1258,23 @@ static void coda9_jpeg_finish_encode(struct coda_ctx *ctx) coda_hw_reset(ctx); } +static void coda9_jpeg_encode_timeout(struct coda_ctx *ctx) +{ + struct coda_dev *dev = ctx->dev; + u32 end_addr, wr_ptr; + + /* Handle missing BBC overflow interrupt via timeout */ + end_addr = coda_read(dev, CODA9_REG_JPEG_BBC_END_ADDR); + wr_ptr = coda_read(dev, CODA9_REG_JPEG_BBC_WR_PTR); + if (wr_ptr >= end_addr - 256) { + v4l2_err(&dev->v4l2_dev, "JPEG too large for capture buffer\n"); + coda9_jpeg_finish_encode(ctx); + return; + } + + coda_hw_reset(ctx); +} + static void coda9_jpeg_release(struct coda_ctx *ctx) { int i; @@ -1276,6 +1294,7 @@ const struct coda_context_ops coda9_jpeg_encode_ops = { .start_streaming = coda9_jpeg_start_encoding, .prepare_run = coda9_jpeg_prepare_encode, .finish_run = coda9_jpeg_finish_encode, + .run_timeout = coda9_jpeg_encode_timeout, .release = coda9_jpeg_release, }; diff --git a/drivers/media/platform/coda/imx-vdoa.c b/drivers/media/platform/coda/imx-vdoa.c index 8bc0d83718193f3028693cd566ab69133a3f274d..dd6e2e320264ef3eae9b70ea25691b6fb5c0396f 100644 --- a/drivers/media/platform/coda/imx-vdoa.c +++ b/drivers/media/platform/coda/imx-vdoa.c @@ -287,7 +287,11 @@ static int vdoa_probe(struct platform_device *pdev) struct resource *res; int ret; - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(&pdev->dev, "DMA enable failed\n"); + return ret; + } vdoa = devm_kzalloc(&pdev->dev, sizeof(*vdoa), GFP_KERNEL); if (!vdoa) diff --git a/drivers/media/platform/imx-pxp.c b/drivers/media/platform/imx-pxp.c index 08d76eb05ed1a446d9827c13aa4a20d999fbb353..62356adebc39ed15c6855c92729ca102937ad943 100644 --- a/drivers/media/platform/imx-pxp.c +++ b/drivers/media/platform/imx-pxp.c @@ -1664,6 +1664,8 @@ static int pxp_probe(struct platform_device *pdev) if (irq < 0) return irq; + spin_lock_init(&dev->irqlock); + ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, pxp_irq_handler, IRQF_ONESHOT, dev_name(&pdev->dev), dev); if (ret < 0) { @@ -1681,8 +1683,6 @@ static int pxp_probe(struct platform_device *pdev) goto err_clk; } - spin_lock_init(&dev->irqlock); - ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev); if (ret) goto err_clk; diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c index 219c2c5b78efc3c1f7520d1989ffaa3f7d05cbed..5f93bc670edb20e56eef04967e881c07666f7d9d 100644 --- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c +++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c @@ -237,11 +237,11 @@ static int fops_vcodec_release(struct file *file) mtk_v4l2_debug(1, "[%d] encoder", ctx->id); mutex_lock(&dev->dev_mutex); + v4l2_m2m_ctx_release(ctx->m2m_ctx); mtk_vcodec_enc_release(ctx); v4l2_fh_del(&ctx->fh); v4l2_fh_exit(&ctx->fh); v4l2_ctrl_handler_free(&ctx->ctrl_hdl); - v4l2_m2m_ctx_release(ctx->m2m_ctx); list_del_init(&ctx->list); kfree(ctx); diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c index 58ddebbb84468a08363da2f45e72bfa03abe280f..62d11c6e41d60d2b18b068260160d2e4afc51189 100644 --- a/drivers/media/platform/qcom/venus/core.c +++ b/drivers/media/platform/qcom/venus/core.c @@ -222,7 +222,6 @@ static int venus_probe(struct platform_device *pdev) return -ENOMEM; core->dev = dev; - platform_set_drvdata(pdev, core); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); core->base = devm_ioremap_resource(dev, r); @@ -252,7 +251,7 @@ static int venus_probe(struct platform_device *pdev) return -ENODEV; if (core->pm_ops->core_get) { - ret = core->pm_ops->core_get(dev); + ret = core->pm_ops->core_get(core); if (ret) return ret; } @@ -277,6 +276,12 @@ static int venus_probe(struct platform_device *pdev) if (ret) goto err_core_put; + ret = v4l2_device_register(dev, &core->v4l2_dev); + if (ret) + goto err_core_deinit; + + platform_set_drvdata(pdev, core); + pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); @@ -289,11 +294,11 @@ static int venus_probe(struct platform_device *pdev) ret = venus_firmware_init(core); if (ret) - goto err_runtime_disable; + goto err_of_depopulate; ret = venus_boot(core); if (ret) - goto err_runtime_disable; + goto err_firmware_deinit; ret = hfi_core_resume(core, true); if (ret) @@ -311,10 +316,6 @@ static int venus_probe(struct platform_device *pdev) if (ret) goto err_venus_shutdown; - ret = v4l2_device_register(dev, &core->v4l2_dev); - if (ret) - goto err_core_deinit; - ret = pm_runtime_put_sync(dev); if (ret) { pm_runtime_get_noresume(dev); @@ -327,18 +328,22 @@ static int venus_probe(struct platform_device *pdev) err_dev_unregister: v4l2_device_unregister(&core->v4l2_dev); -err_core_deinit: - hfi_core_deinit(core, false); err_venus_shutdown: venus_shutdown(core); +err_firmware_deinit: + venus_firmware_deinit(core); +err_of_depopulate: + of_platform_depopulate(dev); err_runtime_disable: pm_runtime_put_noidle(dev); pm_runtime_set_suspended(dev); pm_runtime_disable(dev); hfi_destroy(core); +err_core_deinit: + hfi_core_deinit(core, false); err_core_put: if (core->pm_ops->core_put) - core->pm_ops->core_put(dev); + core->pm_ops->core_put(core); return ret; } @@ -364,11 +369,12 @@ static int venus_remove(struct platform_device *pdev) pm_runtime_disable(dev); if (pm_ops->core_put) - pm_ops->core_put(dev); + pm_ops->core_put(core); + + v4l2_device_unregister(&core->v4l2_dev); hfi_destroy(core); - v4l2_device_unregister(&core->v4l2_dev); mutex_destroy(&core->pm_lock); mutex_destroy(&core->lock); venus_dbgfs_deinit(core); @@ -387,7 +393,7 @@ static __maybe_unused int venus_runtime_suspend(struct device *dev) return ret; if (pm_ops->core_power) { - ret = pm_ops->core_power(dev, POWER_OFF); + ret = pm_ops->core_power(core, POWER_OFF); if (ret) return ret; } @@ -405,7 +411,8 @@ static __maybe_unused int venus_runtime_suspend(struct device *dev) err_video_path: icc_set_bw(core->cpucfg_path, kbps_to_icc(1000), 0); err_cpucfg_path: - pm_ops->core_power(dev, POWER_ON); + if (pm_ops->core_power) + pm_ops->core_power(core, POWER_ON); return ret; } @@ -425,7 +432,7 @@ static __maybe_unused int venus_runtime_resume(struct device *dev) return ret; if (pm_ops->core_power) { - ret = pm_ops->core_power(dev, POWER_ON); + ret = pm_ops->core_power(core, POWER_ON); if (ret) return ret; } diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h index 05c9fbd51f0c057586afbe588541c4a93c9e3487..f2a0ef9ee884e488ea7ac3eb6418e1d73b408a82 100644 --- a/drivers/media/platform/qcom/venus/core.h +++ b/drivers/media/platform/qcom/venus/core.h @@ -123,7 +123,6 @@ struct venus_caps { * @clks: an array of struct clk pointers * @vcodec0_clks: an array of vcodec0 struct clk pointers * @vcodec1_clks: an array of vcodec1 struct clk pointers - * @pd_dl_venus: pmdomain device-link for venus domain * @pmdomains: an array of pmdomains struct device pointers * @vdev_dec: a reference to video device structure for decoder instances * @vdev_enc: a reference to video device structure for encoder instances @@ -161,7 +160,6 @@ struct venus_core { struct icc_path *cpucfg_path; struct opp_table *opp_table; bool has_opp_table; - struct device_link *pd_dl_venus; struct device *pmdomains[VIDC_PMDOMAINS_NUM_MAX]; struct device_link *opp_dl_venus; struct device *opp_pmdomain; diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c index 2946547a0df4a445cfd105847849bf076584f8d2..710f9a2b132b07ca89ca33cc983854be37a9f811 100644 --- a/drivers/media/platform/qcom/venus/pm_helpers.c +++ b/drivers/media/platform/qcom/venus/pm_helpers.c @@ -147,14 +147,12 @@ static u32 load_per_type(struct venus_core *core, u32 session_type) struct venus_inst *inst = NULL; u32 mbs_per_sec = 0; - mutex_lock(&core->lock); list_for_each_entry(inst, &core->instances, list) { if (inst->session_type != session_type) continue; mbs_per_sec += load_per_instance(inst); } - mutex_unlock(&core->lock); return mbs_per_sec; } @@ -203,14 +201,12 @@ static int load_scale_bw(struct venus_core *core) struct venus_inst *inst = NULL; u32 mbs_per_sec, avg, peak, total_avg = 0, total_peak = 0; - mutex_lock(&core->lock); list_for_each_entry(inst, &core->instances, list) { mbs_per_sec = load_per_instance(inst); mbs_to_bw(inst, mbs_per_sec, &avg, &peak); total_avg += avg; total_peak += peak; } - mutex_unlock(&core->lock); /* * keep minimum bandwidth vote for "video-mem" path, @@ -237,8 +233,9 @@ static int load_scale_v1(struct venus_inst *inst) struct device *dev = core->dev; u32 mbs_per_sec; unsigned int i; - int ret; + int ret = 0; + mutex_lock(&core->lock); mbs_per_sec = load_per_type(core, VIDC_SESSION_TYPE_ENC) + load_per_type(core, VIDC_SESSION_TYPE_DEC); @@ -263,29 +260,28 @@ set_freq: if (ret) { dev_err(dev, "failed to set clock rate %lu (%d)\n", freq, ret); - return ret; + goto exit; } ret = load_scale_bw(core); if (ret) { dev_err(dev, "failed to set bandwidth (%d)\n", ret); - return ret; + goto exit; } - return 0; +exit: + mutex_unlock(&core->lock); + return ret; } -static int core_get_v1(struct device *dev) +static int core_get_v1(struct venus_core *core) { - struct venus_core *core = dev_get_drvdata(dev); - return core_clks_get(core); } -static int core_power_v1(struct device *dev, int on) +static int core_power_v1(struct venus_core *core, int on) { - struct venus_core *core = dev_get_drvdata(dev); int ret = 0; if (on == POWER_ON) @@ -752,12 +748,12 @@ static int venc_power_v4(struct device *dev, int on) return ret; } -static int vcodec_domains_get(struct device *dev) +static int vcodec_domains_get(struct venus_core *core) { int ret; struct opp_table *opp_table; struct device **opp_virt_dev; - struct venus_core *core = dev_get_drvdata(dev); + struct device *dev = core->dev; const struct venus_resources *res = core->res; struct device *pd; unsigned int i; @@ -773,13 +769,6 @@ static int vcodec_domains_get(struct device *dev) core->pmdomains[i] = pd; } - core->pd_dl_venus = device_link_add(dev, core->pmdomains[0], - DL_FLAG_PM_RUNTIME | - DL_FLAG_STATELESS | - DL_FLAG_RPM_ACTIVE); - if (!core->pd_dl_venus) - return -ENODEV; - skip_pmdomains: if (!core->has_opp_table) return 0; @@ -806,29 +795,23 @@ skip_pmdomains: opp_dl_add_err: dev_pm_opp_detach_genpd(core->opp_table); opp_attach_err: - if (core->pd_dl_venus) { - device_link_del(core->pd_dl_venus); - for (i = 0; i < res->vcodec_pmdomains_num; i++) { - if (IS_ERR_OR_NULL(core->pmdomains[i])) - continue; - dev_pm_domain_detach(core->pmdomains[i], true); - } + for (i = 0; i < res->vcodec_pmdomains_num; i++) { + if (IS_ERR_OR_NULL(core->pmdomains[i])) + continue; + dev_pm_domain_detach(core->pmdomains[i], true); } + return ret; } -static void vcodec_domains_put(struct device *dev) +static void vcodec_domains_put(struct venus_core *core) { - struct venus_core *core = dev_get_drvdata(dev); const struct venus_resources *res = core->res; unsigned int i; if (!res->vcodec_pmdomains_num) goto skip_pmdomains; - if (core->pd_dl_venus) - device_link_del(core->pd_dl_venus); - for (i = 0; i < res->vcodec_pmdomains_num; i++) { if (IS_ERR_OR_NULL(core->pmdomains[i])) continue; @@ -845,9 +828,9 @@ skip_pmdomains: dev_pm_opp_detach_genpd(core->opp_table); } -static int core_get_v4(struct device *dev) +static int core_get_v4(struct venus_core *core) { - struct venus_core *core = dev_get_drvdata(dev); + struct device *dev = core->dev; const struct venus_resources *res = core->res; int ret; @@ -886,7 +869,7 @@ static int core_get_v4(struct device *dev) } } - ret = vcodec_domains_get(dev); + ret = vcodec_domains_get(core); if (ret) { if (core->has_opp_table) dev_pm_opp_of_remove_table(dev); @@ -897,14 +880,14 @@ static int core_get_v4(struct device *dev) return 0; } -static void core_put_v4(struct device *dev) +static void core_put_v4(struct venus_core *core) { - struct venus_core *core = dev_get_drvdata(dev); + struct device *dev = core->dev; if (legacy_binding) return; - vcodec_domains_put(dev); + vcodec_domains_put(core); if (core->has_opp_table) dev_pm_opp_of_remove_table(dev); @@ -913,19 +896,33 @@ static void core_put_v4(struct device *dev) } -static int core_power_v4(struct device *dev, int on) +static int core_power_v4(struct venus_core *core, int on) { - struct venus_core *core = dev_get_drvdata(dev); + struct device *dev = core->dev; + struct device *pmctrl = core->pmdomains[0]; int ret = 0; if (on == POWER_ON) { + if (pmctrl) { + ret = pm_runtime_get_sync(pmctrl); + if (ret < 0) { + pm_runtime_put_noidle(pmctrl); + return ret; + } + } + ret = core_clks_enable(core); + if (ret < 0 && pmctrl) + pm_runtime_put_sync(pmctrl); } else { /* Drop the performance state vote */ if (core->opp_pmdomain) dev_pm_opp_set_rate(dev, 0); core_clks_disable(core); + + if (pmctrl) + pm_runtime_put_sync(pmctrl); } return ret; @@ -962,13 +959,13 @@ static int load_scale_v4(struct venus_inst *inst) struct device *dev = core->dev; unsigned long freq = 0, freq_core1 = 0, freq_core2 = 0; unsigned long filled_len = 0; - int i, ret; + int i, ret = 0; for (i = 0; i < inst->num_input_bufs; i++) filled_len = max(filled_len, inst->payloads[i]); if (inst->session_type == VIDC_SESSION_TYPE_DEC && !filled_len) - return 0; + return ret; freq = calculate_inst_freq(inst, filled_len); inst->clk_data.freq = freq; @@ -984,7 +981,6 @@ static int load_scale_v4(struct venus_inst *inst) freq_core2 += inst->clk_data.freq; } } - mutex_unlock(&core->lock); freq = max(freq_core1, freq_core2); @@ -1008,17 +1004,19 @@ set_freq: if (ret) { dev_err(dev, "failed to set clock rate %lu (%d)\n", freq, ret); - return ret; + goto exit; } ret = load_scale_bw(core); if (ret) { dev_err(dev, "failed to set bandwidth (%d)\n", ret); - return ret; + goto exit; } - return 0; +exit: + mutex_unlock(&core->lock); + return ret; } static const struct venus_pm_ops pm_ops_v4 = { diff --git a/drivers/media/platform/qcom/venus/pm_helpers.h b/drivers/media/platform/qcom/venus/pm_helpers.h index aa2f6afa2354432fc0f58b6735f45356214f3579..a492c50c5543c5036a13cef7a72891b788b7e1c9 100644 --- a/drivers/media/platform/qcom/venus/pm_helpers.h +++ b/drivers/media/platform/qcom/venus/pm_helpers.h @@ -4,14 +4,15 @@ #define __VENUS_PM_HELPERS_H__ struct device; +struct venus_core; #define POWER_ON 1 #define POWER_OFF 0 struct venus_pm_ops { - int (*core_get)(struct device *dev); - void (*core_put)(struct device *dev); - int (*core_power)(struct device *dev, int on); + int (*core_get)(struct venus_core *core); + void (*core_put)(struct venus_core *core); + int (*core_power)(struct venus_core *core, int on); int (*vdec_get)(struct device *dev); void (*vdec_put)(struct device *dev); diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c index d2d87a204e9185af52430fd09d634c144a5ff6d2..5e8e48a721a049c9faabaa8c3e4d6770f99e6eaa 100644 --- a/drivers/media/platform/rcar-vin/rcar-csi2.c +++ b/drivers/media/platform/rcar-vin/rcar-csi2.c @@ -436,16 +436,23 @@ static int rcsi2_wait_phy_start(struct rcar_csi2 *priv, static int rcsi2_set_phypll(struct rcar_csi2 *priv, unsigned int mbps) { const struct rcsi2_mbps_reg *hsfreq; + const struct rcsi2_mbps_reg *hsfreq_prev = NULL; - for (hsfreq = priv->info->hsfreqrange; hsfreq->mbps != 0; hsfreq++) + for (hsfreq = priv->info->hsfreqrange; hsfreq->mbps != 0; hsfreq++) { if (hsfreq->mbps >= mbps) break; + hsfreq_prev = hsfreq; + } if (!hsfreq->mbps) { dev_err(priv->dev, "Unsupported PHY speed (%u Mbps)", mbps); return -ERANGE; } + if (hsfreq_prev && + ((mbps - hsfreq_prev->mbps) <= (hsfreq->mbps - mbps))) + hsfreq = hsfreq_prev; + rcsi2_write(priv, PHYPLL_REG, PHYPLL_HSFREQRANGE(hsfreq->reg)); return 0; @@ -969,10 +976,17 @@ static int rcsi2_phtw_write_mbps(struct rcar_csi2 *priv, unsigned int mbps, const struct rcsi2_mbps_reg *values, u16 code) { const struct rcsi2_mbps_reg *value; + const struct rcsi2_mbps_reg *prev_value = NULL; - for (value = values; value->mbps; value++) + for (value = values; value->mbps; value++) { if (value->mbps >= mbps) break; + prev_value = value; + } + + if (prev_value && + ((mbps - prev_value->mbps) <= (value->mbps - mbps))) + value = prev_value; if (!value->mbps) { dev_err(priv->dev, "Unsupported PHY speed (%u Mbps)", mbps); diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c index 3e7a3ae2a6b97045bdf9d653e5560802e02f8422..0bbe6f9f92062fdbd5bfe3fa43533a03959f1002 100644 --- a/drivers/media/platform/rcar-vin/rcar-v4l2.c +++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c @@ -175,20 +175,27 @@ static void rvin_format_align(struct rvin_dev *vin, struct v4l2_pix_format *pix) break; } - /* HW limit width to a multiple of 32 (2^5) for NV12/16 else 2 (2^1) */ + /* Hardware limits width alignment based on format. */ switch (pix->pixelformat) { + /* Multiple of 32 (2^5) for NV12/16. */ case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV16: walign = 5; break; - default: + /* Multiple of 2 (2^1) for YUV. */ + case V4L2_PIX_FMT_YUYV: + case V4L2_PIX_FMT_UYVY: walign = 1; break; + /* No multiple for RGB. */ + default: + walign = 0; + break; } /* Limit to VIN capabilities */ - v4l_bound_align_image(&pix->width, 2, vin->info->max_width, walign, - &pix->height, 4, vin->info->max_height, 2, 0); + v4l_bound_align_image(&pix->width, 5, vin->info->max_width, walign, + &pix->height, 2, vin->info->max_height, 0, 0); pix->bytesperline = rvin_format_bytesperline(vin, pix); pix->sizeimage = rvin_format_sizeimage(pix); diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c index a972c0705ac79c9d90aa051610766d6b7becd872..76d39e2e877068e0a8fbe65c42f8de585cc95d9f 100644 --- a/drivers/media/radio/si470x/radio-si470x-i2c.c +++ b/drivers/media/radio/si470x/radio-si470x-i2c.c @@ -368,7 +368,7 @@ static int si470x_i2c_probe(struct i2c_client *client) if (radio->hdl.error) { retval = radio->hdl.error; dev_err(&client->dev, "couldn't register control\n"); - goto err_dev; + goto err_all; } /* video device initialization */ @@ -463,7 +463,6 @@ static int si470x_i2c_probe(struct i2c_client *client) return 0; err_all: v4l2_ctrl_handler_free(&radio->hdl); -err_dev: v4l2_device_unregister(&radio->v4l2_dev); err_initial: return retval; diff --git a/drivers/media/rc/igorplugusb.c b/drivers/media/rc/igorplugusb.c index effaa5751d6c98099cd6cb745ddbcb40411eb889..3e9988ee785f0cf4f6a3804c253a8defb53f3586 100644 --- a/drivers/media/rc/igorplugusb.c +++ b/drivers/media/rc/igorplugusb.c @@ -64,9 +64,11 @@ static void igorplugusb_irdata(struct igorplugusb *ir, unsigned len) if (start >= len) { dev_err(ir->dev, "receive overflow invalid: %u", overflow); } else { - if (overflow > 0) + if (overflow > 0) { dev_warn(ir->dev, "receive overflow, at least %u lost", overflow); + ir_raw_event_reset(ir->rc); + } do { rawir.duration = ir->buf_in[i] * 85; diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c index 8870c4e6c5f445d1c67a00a3271d7787cfcdbf27..dbb5a4f44bda50631da1a72b95d2e3ca79e402ba 100644 --- a/drivers/media/rc/mceusb.c +++ b/drivers/media/rc/mceusb.c @@ -1430,7 +1430,7 @@ static void mceusb_gen1_init(struct mceusb_dev *ir) */ ret = usb_control_msg(ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0), USB_REQ_SET_ADDRESS, USB_TYPE_VENDOR, 0, 0, - data, USB_CTRL_MSG_SZ, HZ * 3); + data, USB_CTRL_MSG_SZ, 3000); dev_dbg(dev, "set address - ret = %d", ret); dev_dbg(dev, "set address - data[0] = %d, data[1] = %d", data[0], data[1]); @@ -1438,20 +1438,20 @@ static void mceusb_gen1_init(struct mceusb_dev *ir) /* set feature: bit rate 38400 bps */ ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0), USB_REQ_SET_FEATURE, USB_TYPE_VENDOR, - 0xc04e, 0x0000, NULL, 0, HZ * 3); + 0xc04e, 0x0000, NULL, 0, 3000); dev_dbg(dev, "set feature - ret = %d", ret); /* bRequest 4: set char length to 8 bits */ ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0), 4, USB_TYPE_VENDOR, - 0x0808, 0x0000, NULL, 0, HZ * 3); + 0x0808, 0x0000, NULL, 0, 3000); dev_dbg(dev, "set char length - retB = %d", ret); /* bRequest 2: set handshaking to use DTR/DSR */ ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0), 2, USB_TYPE_VENDOR, - 0x0000, 0x0100, NULL, 0, HZ * 3); + 0x0000, 0x0100, NULL, 0, 3000); dev_dbg(dev, "set handshake - retC = %d", ret); /* device resume */ diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c index 2cf3377ec63a7c0b8b61c17e0ce68d61ffecdae8..a61f9820ade951c943580c909d64380b32ec9779 100644 --- a/drivers/media/rc/redrat3.c +++ b/drivers/media/rc/redrat3.c @@ -404,7 +404,7 @@ static int redrat3_send_cmd(int cmd, struct redrat3_dev *rr3) udev = rr3->udev; res = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), cmd, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, - 0x0000, 0x0000, data, sizeof(u8), HZ * 10); + 0x0000, 0x0000, data, sizeof(u8), 10000); if (res < 0) { dev_err(rr3->dev, "%s: Error sending rr3 cmd res %d, data %d", @@ -480,7 +480,7 @@ static u32 redrat3_get_timeout(struct redrat3_dev *rr3) pipe = usb_rcvctrlpipe(rr3->udev, 0); ret = usb_control_msg(rr3->udev, pipe, RR3_GET_IR_PARAM, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, - RR3_IR_IO_SIG_TIMEOUT, 0, tmp, len, HZ * 5); + RR3_IR_IO_SIG_TIMEOUT, 0, tmp, len, 5000); if (ret != len) dev_warn(rr3->dev, "Failed to read timeout from hardware\n"); else { @@ -510,7 +510,7 @@ static int redrat3_set_timeout(struct rc_dev *rc_dev, unsigned int timeoutus) ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), RR3_SET_IR_PARAM, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, RR3_IR_IO_SIG_TIMEOUT, 0, timeout, sizeof(*timeout), - HZ * 25); + 25000); dev_dbg(dev, "set ir parm timeout %d ret 0x%02x\n", be32_to_cpu(*timeout), ret); @@ -542,32 +542,32 @@ static void redrat3_reset(struct redrat3_dev *rr3) *val = 0x01; rc = usb_control_msg(udev, rxpipe, RR3_RESET, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, - RR3_CPUCS_REG_ADDR, 0, val, len, HZ * 25); + RR3_CPUCS_REG_ADDR, 0, val, len, 25000); dev_dbg(dev, "reset returned 0x%02x\n", rc); *val = length_fuzz; rc = usb_control_msg(udev, txpipe, RR3_SET_IR_PARAM, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, - RR3_IR_IO_LENGTH_FUZZ, 0, val, len, HZ * 25); + RR3_IR_IO_LENGTH_FUZZ, 0, val, len, 25000); dev_dbg(dev, "set ir parm len fuzz %d rc 0x%02x\n", *val, rc); *val = (65536 - (minimum_pause * 2000)) / 256; rc = usb_control_msg(udev, txpipe, RR3_SET_IR_PARAM, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, - RR3_IR_IO_MIN_PAUSE, 0, val, len, HZ * 25); + RR3_IR_IO_MIN_PAUSE, 0, val, len, 25000); dev_dbg(dev, "set ir parm min pause %d rc 0x%02x\n", *val, rc); *val = periods_measure_carrier; rc = usb_control_msg(udev, txpipe, RR3_SET_IR_PARAM, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, - RR3_IR_IO_PERIODS_MF, 0, val, len, HZ * 25); + RR3_IR_IO_PERIODS_MF, 0, val, len, 25000); dev_dbg(dev, "set ir parm periods measure carrier %d rc 0x%02x", *val, rc); *val = RR3_DRIVER_MAXLENS; rc = usb_control_msg(udev, txpipe, RR3_SET_IR_PARAM, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, - RR3_IR_IO_MAX_LENGTHS, 0, val, len, HZ * 25); + RR3_IR_IO_MAX_LENGTHS, 0, val, len, 25000); dev_dbg(dev, "set ir parm max lens %d rc 0x%02x\n", *val, rc); kfree(val); @@ -585,7 +585,7 @@ static void redrat3_get_firmware_rev(struct redrat3_dev *rr3) rc = usb_control_msg(rr3->udev, usb_rcvctrlpipe(rr3->udev, 0), RR3_FW_VERSION, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, - 0, 0, buffer, RR3_FW_VERSION_LEN, HZ * 5); + 0, 0, buffer, RR3_FW_VERSION_LEN, 5000); if (rc >= 0) dev_info(rr3->dev, "Firmware rev: %s", buffer); @@ -825,14 +825,14 @@ static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf, pipe = usb_sndbulkpipe(rr3->udev, rr3->ep_out->bEndpointAddress); ret = usb_bulk_msg(rr3->udev, pipe, irdata, - sendbuf_len, &ret_len, 10 * HZ); + sendbuf_len, &ret_len, 10000); dev_dbg(dev, "sent %d bytes, (ret %d)\n", ret_len, ret); /* now tell the hardware to transmit what we sent it */ pipe = usb_rcvctrlpipe(rr3->udev, 0); ret = usb_control_msg(rr3->udev, pipe, RR3_TX_SEND_SIGNAL, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, - 0, 0, irdata, 2, HZ * 10); + 0, 0, irdata, 2, 10000); if (ret < 0) dev_err(dev, "Error: control msg send failed, rc %d\n", ret); diff --git a/drivers/media/tuners/msi001.c b/drivers/media/tuners/msi001.c index 78e6fd600d8ef77ec60fd1e55aa96feaaf90827f..44247049a31903f5cbb9159d9be8ae934d5fb9ae 100644 --- a/drivers/media/tuners/msi001.c +++ b/drivers/media/tuners/msi001.c @@ -442,6 +442,13 @@ static int msi001_probe(struct spi_device *spi) V4L2_CID_RF_TUNER_BANDWIDTH_AUTO, 0, 1, 1, 1); dev->bandwidth = v4l2_ctrl_new_std(&dev->hdl, &msi001_ctrl_ops, V4L2_CID_RF_TUNER_BANDWIDTH, 200000, 8000000, 1, 200000); + if (dev->hdl.error) { + ret = dev->hdl.error; + dev_err(&spi->dev, "Could not initialize controls\n"); + /* control init failed, free handler */ + goto err_ctrl_handler_free; + } + v4l2_ctrl_auto_cluster(2, &dev->bandwidth_auto, 0, false); dev->lna_gain = v4l2_ctrl_new_std(&dev->hdl, &msi001_ctrl_ops, V4L2_CID_RF_TUNER_LNA_GAIN, 0, 1, 1, 1); diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c index fefb2625f6558e340488586395607781f2becf2c..75ddf7ed1faff25f255d45b58ef5ee1e68f59109 100644 --- a/drivers/media/tuners/si2157.c +++ b/drivers/media/tuners/si2157.c @@ -90,7 +90,7 @@ static int si2157_init(struct dvb_frontend *fe) dev_dbg(&client->dev, "\n"); /* Try to get Xtal trim property, to verify tuner still running */ - memcpy(cmd.args, "\x15\x00\x04\x02", 4); + memcpy(cmd.args, "\x15\x00\x02\x04", 4); cmd.wlen = 4; cmd.rlen = 4; ret = si2157_cmd_execute(client, &cmd); diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c index e731243267e4904b8ceed06f004a0e62352e44cf..a2563c25408086d013c4a7a9306d913343aa1eda 100644 --- a/drivers/media/usb/b2c2/flexcop-usb.c +++ b/drivers/media/usb/b2c2/flexcop-usb.c @@ -87,7 +87,7 @@ static int flexcop_usb_readwrite_dw(struct flexcop_device *fc, u16 wRegOffsPCI, 0, fc_usb->data, sizeof(u32), - B2C2_WAIT_FOR_OPERATION_RDW * HZ); + B2C2_WAIT_FOR_OPERATION_RDW); if (ret != sizeof(u32)) { err("error while %s dword from %d (%d).", read ? "reading" : @@ -155,7 +155,7 @@ static int flexcop_usb_v8_memory_req(struct flexcop_usb *fc_usb, wIndex, fc_usb->data, buflen, - nWaitTime * HZ); + nWaitTime); if (ret != buflen) ret = -EIO; @@ -249,13 +249,13 @@ static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c, /* DKT 020208 - add this to support special case of DiSEqC */ case USB_FUNC_I2C_CHECKWRITE: pipe = B2C2_USB_CTRL_PIPE_OUT; - nWaitTime = 2; + nWaitTime = 2000; request_type |= USB_DIR_OUT; break; case USB_FUNC_I2C_READ: case USB_FUNC_I2C_REPEATREAD: pipe = B2C2_USB_CTRL_PIPE_IN; - nWaitTime = 2; + nWaitTime = 2000; request_type |= USB_DIR_IN; break; default: @@ -282,7 +282,7 @@ static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c, wIndex, fc_usb->data, buflen, - nWaitTime * HZ); + nWaitTime); if (ret != buflen) ret = -EIO; diff --git a/drivers/media/usb/b2c2/flexcop-usb.h b/drivers/media/usb/b2c2/flexcop-usb.h index 2f230bf72252be0d022e003e032784b0b5a9dce1..c7cca1a5ee59d0e420180a5d1c8325cf5351b21b 100644 --- a/drivers/media/usb/b2c2/flexcop-usb.h +++ b/drivers/media/usb/b2c2/flexcop-usb.h @@ -91,13 +91,13 @@ typedef enum { UTILITY_SRAM_TESTVERIFY = 0x16, } flexcop_usb_utility_function_t; -#define B2C2_WAIT_FOR_OPERATION_RW (1*HZ) -#define B2C2_WAIT_FOR_OPERATION_RDW (3*HZ) -#define B2C2_WAIT_FOR_OPERATION_WDW (1*HZ) +#define B2C2_WAIT_FOR_OPERATION_RW 1000 +#define B2C2_WAIT_FOR_OPERATION_RDW 3000 +#define B2C2_WAIT_FOR_OPERATION_WDW 1000 -#define B2C2_WAIT_FOR_OPERATION_V8READ (3*HZ) -#define B2C2_WAIT_FOR_OPERATION_V8WRITE (3*HZ) -#define B2C2_WAIT_FOR_OPERATION_V8FLASH (3*HZ) +#define B2C2_WAIT_FOR_OPERATION_V8READ 3000 +#define B2C2_WAIT_FOR_OPERATION_V8WRITE 3000 +#define B2C2_WAIT_FOR_OPERATION_V8FLASH 3000 typedef enum { V8_MEMORY_PAGE_DVB_CI = 0x20, diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c index 76aac06f9fb8e2cb5b85ada5a77be10900e37dfc..cba03b286473858851310c678f047ffc772f6e8f 100644 --- a/drivers/media/usb/cpia2/cpia2_usb.c +++ b/drivers/media/usb/cpia2/cpia2_usb.c @@ -550,7 +550,7 @@ static int write_packet(struct usb_device *udev, 0, /* index */ buf, /* buffer */ size, - HZ); + 1000); kfree(buf); return ret; @@ -582,7 +582,7 @@ static int read_packet(struct usb_device *udev, 0, /* index */ buf, /* buffer */ size, - HZ); + 1000); if (ret >= 0) memcpy(registers, buf, size); diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c index 70219b3e85666de2772509149396b85b4fa95a5c..7ea8f68b0f458418276bffd1c595cf8c2e094dc3 100644 --- a/drivers/media/usb/dvb-usb/dib0700_core.c +++ b/drivers/media/usb/dvb-usb/dib0700_core.c @@ -618,8 +618,6 @@ int dib0700_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) deb_info("the endpoint number (%i) is not correct, use the adapter id instead", adap->fe_adap[0].stream.props.endpoint); if (onoff) st->channel_state |= 1 << (adap->id); - else - st->channel_state |= 1 << ~(adap->id); } else { if (onoff) st->channel_state |= 1 << (adap->fe_adap[0].stream.props.endpoint-2); diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c index a27a6844032528437561278a5816d0fe6513a4e7..aa929db56db1f4721799aae7b3da1afc9e95215a 100644 --- a/drivers/media/usb/dvb-usb/dw2102.c +++ b/drivers/media/usb/dvb-usb/dw2102.c @@ -2148,46 +2148,153 @@ static struct dvb_usb_device_properties s6x0_properties = { } }; -static const struct dvb_usb_device_description d1100 = { - "Prof 1100 USB ", - {&dw2102_table[PROF_1100], NULL}, - {NULL}, -}; +static struct dvb_usb_device_properties p1100_properties = { + .caps = DVB_USB_IS_AN_I2C_ADAPTER, + .usb_ctrl = DEVICE_SPECIFIC, + .size_of_priv = sizeof(struct dw2102_state), + .firmware = P1100_FIRMWARE, + .no_reconnect = 1, -static const struct dvb_usb_device_description d660 = { - "TeVii S660 USB", - {&dw2102_table[TEVII_S660], NULL}, - {NULL}, -}; + .i2c_algo = &s6x0_i2c_algo, + .rc.core = { + .rc_interval = 150, + .rc_codes = RC_MAP_TBS_NEC, + .module_name = "dw2102", + .allowed_protos = RC_PROTO_BIT_NEC, + .rc_query = prof_rc_query, + }, -static const struct dvb_usb_device_description d480_1 = { - "TeVii S480.1 USB", - {&dw2102_table[TEVII_S480_1], NULL}, - {NULL}, + .generic_bulk_ctrl_endpoint = 0x81, + .num_adapters = 1, + .download_firmware = dw2102_load_firmware, + .read_mac_address = s6x0_read_mac_address, + .adapter = { + { + .num_frontends = 1, + .fe = {{ + .frontend_attach = stv0288_frontend_attach, + .stream = { + .type = USB_BULK, + .count = 8, + .endpoint = 0x82, + .u = { + .bulk = { + .buffersize = 4096, + } + } + }, + } }, + } + }, + .num_device_descs = 1, + .devices = { + {"Prof 1100 USB ", + {&dw2102_table[PROF_1100], NULL}, + {NULL}, + }, + } }; -static const struct dvb_usb_device_description d480_2 = { - "TeVii S480.2 USB", - {&dw2102_table[TEVII_S480_2], NULL}, - {NULL}, -}; +static struct dvb_usb_device_properties s660_properties = { + .caps = DVB_USB_IS_AN_I2C_ADAPTER, + .usb_ctrl = DEVICE_SPECIFIC, + .size_of_priv = sizeof(struct dw2102_state), + .firmware = S660_FIRMWARE, + .no_reconnect = 1, -static const struct dvb_usb_device_description d7500 = { - "Prof 7500 USB DVB-S2", - {&dw2102_table[PROF_7500], NULL}, - {NULL}, -}; + .i2c_algo = &s6x0_i2c_algo, + .rc.core = { + .rc_interval = 150, + .rc_codes = RC_MAP_TEVII_NEC, + .module_name = "dw2102", + .allowed_protos = RC_PROTO_BIT_NEC, + .rc_query = dw2102_rc_query, + }, -static const struct dvb_usb_device_description d421 = { - "TeVii S421 PCI", - {&dw2102_table[TEVII_S421], NULL}, - {NULL}, + .generic_bulk_ctrl_endpoint = 0x81, + .num_adapters = 1, + .download_firmware = dw2102_load_firmware, + .read_mac_address = s6x0_read_mac_address, + .adapter = { + { + .num_frontends = 1, + .fe = {{ + .frontend_attach = ds3000_frontend_attach, + .stream = { + .type = USB_BULK, + .count = 8, + .endpoint = 0x82, + .u = { + .bulk = { + .buffersize = 4096, + } + } + }, + } }, + } + }, + .num_device_descs = 3, + .devices = { + {"TeVii S660 USB", + {&dw2102_table[TEVII_S660], NULL}, + {NULL}, + }, + {"TeVii S480.1 USB", + {&dw2102_table[TEVII_S480_1], NULL}, + {NULL}, + }, + {"TeVii S480.2 USB", + {&dw2102_table[TEVII_S480_2], NULL}, + {NULL}, + }, + } }; -static const struct dvb_usb_device_description d632 = { - "TeVii S632 USB", - {&dw2102_table[TEVII_S632], NULL}, - {NULL}, +static struct dvb_usb_device_properties p7500_properties = { + .caps = DVB_USB_IS_AN_I2C_ADAPTER, + .usb_ctrl = DEVICE_SPECIFIC, + .size_of_priv = sizeof(struct dw2102_state), + .firmware = P7500_FIRMWARE, + .no_reconnect = 1, + + .i2c_algo = &s6x0_i2c_algo, + .rc.core = { + .rc_interval = 150, + .rc_codes = RC_MAP_TBS_NEC, + .module_name = "dw2102", + .allowed_protos = RC_PROTO_BIT_NEC, + .rc_query = prof_rc_query, + }, + + .generic_bulk_ctrl_endpoint = 0x81, + .num_adapters = 1, + .download_firmware = dw2102_load_firmware, + .read_mac_address = s6x0_read_mac_address, + .adapter = { + { + .num_frontends = 1, + .fe = {{ + .frontend_attach = prof_7500_frontend_attach, + .stream = { + .type = USB_BULK, + .count = 8, + .endpoint = 0x82, + .u = { + .bulk = { + .buffersize = 4096, + } + } + }, + } }, + } + }, + .num_device_descs = 1, + .devices = { + {"Prof 7500 USB DVB-S2", + {&dw2102_table[PROF_7500], NULL}, + {NULL}, + }, + } }; static struct dvb_usb_device_properties su3000_properties = { @@ -2267,6 +2374,59 @@ static struct dvb_usb_device_properties su3000_properties = { } }; +static struct dvb_usb_device_properties s421_properties = { + .caps = DVB_USB_IS_AN_I2C_ADAPTER, + .usb_ctrl = DEVICE_SPECIFIC, + .size_of_priv = sizeof(struct dw2102_state), + .power_ctrl = su3000_power_ctrl, + .num_adapters = 1, + .identify_state = su3000_identify_state, + .i2c_algo = &su3000_i2c_algo, + + .rc.core = { + .rc_interval = 150, + .rc_codes = RC_MAP_SU3000, + .module_name = "dw2102", + .allowed_protos = RC_PROTO_BIT_RC5, + .rc_query = su3000_rc_query, + }, + + .read_mac_address = su3000_read_mac_address, + + .generic_bulk_ctrl_endpoint = 0x01, + + .adapter = { + { + .num_frontends = 1, + .fe = {{ + .streaming_ctrl = su3000_streaming_ctrl, + .frontend_attach = m88rs2000_frontend_attach, + .stream = { + .type = USB_BULK, + .count = 8, + .endpoint = 0x82, + .u = { + .bulk = { + .buffersize = 4096, + } + } + } + } }, + } + }, + .num_device_descs = 2, + .devices = { + { "TeVii S421 PCI", + { &dw2102_table[TEVII_S421], NULL }, + { NULL }, + }, + { "TeVii S632 USB", + { &dw2102_table[TEVII_S632], NULL }, + { NULL }, + }, + } +}; + static struct dvb_usb_device_properties t220_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, @@ -2384,101 +2544,33 @@ static struct dvb_usb_device_properties tt_s2_4600_properties = { static int dw2102_probe(struct usb_interface *intf, const struct usb_device_id *id) { - int retval = -ENOMEM; - struct dvb_usb_device_properties *p1100; - struct dvb_usb_device_properties *s660; - struct dvb_usb_device_properties *p7500; - struct dvb_usb_device_properties *s421; - - p1100 = kmemdup(&s6x0_properties, - sizeof(struct dvb_usb_device_properties), GFP_KERNEL); - if (!p1100) - goto err0; - - /* copy default structure */ - /* fill only different fields */ - p1100->firmware = P1100_FIRMWARE; - p1100->devices[0] = d1100; - p1100->rc.core.rc_query = prof_rc_query; - p1100->rc.core.rc_codes = RC_MAP_TBS_NEC; - p1100->adapter->fe[0].frontend_attach = stv0288_frontend_attach; - - s660 = kmemdup(&s6x0_properties, - sizeof(struct dvb_usb_device_properties), GFP_KERNEL); - if (!s660) - goto err1; - - s660->firmware = S660_FIRMWARE; - s660->num_device_descs = 3; - s660->devices[0] = d660; - s660->devices[1] = d480_1; - s660->devices[2] = d480_2; - s660->adapter->fe[0].frontend_attach = ds3000_frontend_attach; - - p7500 = kmemdup(&s6x0_properties, - sizeof(struct dvb_usb_device_properties), GFP_KERNEL); - if (!p7500) - goto err2; - - p7500->firmware = P7500_FIRMWARE; - p7500->devices[0] = d7500; - p7500->rc.core.rc_query = prof_rc_query; - p7500->rc.core.rc_codes = RC_MAP_TBS_NEC; - p7500->adapter->fe[0].frontend_attach = prof_7500_frontend_attach; - - - s421 = kmemdup(&su3000_properties, - sizeof(struct dvb_usb_device_properties), GFP_KERNEL); - if (!s421) - goto err3; - - s421->num_device_descs = 2; - s421->devices[0] = d421; - s421->devices[1] = d632; - s421->adapter->fe[0].frontend_attach = m88rs2000_frontend_attach; - - if (0 == dvb_usb_device_init(intf, &dw2102_properties, - THIS_MODULE, NULL, adapter_nr) || - 0 == dvb_usb_device_init(intf, &dw2104_properties, - THIS_MODULE, NULL, adapter_nr) || - 0 == dvb_usb_device_init(intf, &dw3101_properties, - THIS_MODULE, NULL, adapter_nr) || - 0 == dvb_usb_device_init(intf, &s6x0_properties, - THIS_MODULE, NULL, adapter_nr) || - 0 == dvb_usb_device_init(intf, p1100, - THIS_MODULE, NULL, adapter_nr) || - 0 == dvb_usb_device_init(intf, s660, - THIS_MODULE, NULL, adapter_nr) || - 0 == dvb_usb_device_init(intf, p7500, - THIS_MODULE, NULL, adapter_nr) || - 0 == dvb_usb_device_init(intf, s421, - THIS_MODULE, NULL, adapter_nr) || - 0 == dvb_usb_device_init(intf, &su3000_properties, - THIS_MODULE, NULL, adapter_nr) || - 0 == dvb_usb_device_init(intf, &t220_properties, - THIS_MODULE, NULL, adapter_nr) || - 0 == dvb_usb_device_init(intf, &tt_s2_4600_properties, - THIS_MODULE, NULL, adapter_nr)) { - - /* clean up copied properties */ - kfree(s421); - kfree(p7500); - kfree(s660); - kfree(p1100); + if (!(dvb_usb_device_init(intf, &dw2102_properties, + THIS_MODULE, NULL, adapter_nr) && + dvb_usb_device_init(intf, &dw2104_properties, + THIS_MODULE, NULL, adapter_nr) && + dvb_usb_device_init(intf, &dw3101_properties, + THIS_MODULE, NULL, adapter_nr) && + dvb_usb_device_init(intf, &s6x0_properties, + THIS_MODULE, NULL, adapter_nr) && + dvb_usb_device_init(intf, &p1100_properties, + THIS_MODULE, NULL, adapter_nr) && + dvb_usb_device_init(intf, &s660_properties, + THIS_MODULE, NULL, adapter_nr) && + dvb_usb_device_init(intf, &p7500_properties, + THIS_MODULE, NULL, adapter_nr) && + dvb_usb_device_init(intf, &s421_properties, + THIS_MODULE, NULL, adapter_nr) && + dvb_usb_device_init(intf, &su3000_properties, + THIS_MODULE, NULL, adapter_nr) && + dvb_usb_device_init(intf, &t220_properties, + THIS_MODULE, NULL, adapter_nr) && + dvb_usb_device_init(intf, &tt_s2_4600_properties, + THIS_MODULE, NULL, adapter_nr))) { return 0; } - retval = -ENODEV; - kfree(s421); -err3: - kfree(p7500); -err2: - kfree(s660); -err1: - kfree(p1100); -err0: - return retval; + return -ENODEV; } static void dw2102_disconnect(struct usb_interface *intf) diff --git a/drivers/media/usb/dvb-usb/m920x.c b/drivers/media/usb/dvb-usb/m920x.c index 4bb5b82599a790c615422fe360ce20f4bdf37a9d..691e05833db1959e27858445e7e3594f5b4df7fc 100644 --- a/drivers/media/usb/dvb-usb/m920x.c +++ b/drivers/media/usb/dvb-usb/m920x.c @@ -274,6 +274,13 @@ static int m920x_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int nu /* Should check for ack here, if we knew how. */ } if (msg[i].flags & I2C_M_RD) { + char *read = kmalloc(1, GFP_KERNEL); + if (!read) { + ret = -ENOMEM; + kfree(read); + goto unlock; + } + for (j = 0; j < msg[i].len; j++) { /* Last byte of transaction? * Send STOP, otherwise send ACK. */ @@ -281,9 +288,12 @@ static int m920x_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int nu if ((ret = m920x_read(d->udev, M9206_I2C, 0x0, 0x20 | stop, - &msg[i].buf[j], 1)) != 0) + read, 1)) != 0) goto unlock; + msg[i].buf[j] = read[0]; } + + kfree(read); } else { for (j = 0; j < msg[i].len; j++) { /* Last byte of transaction? Then send STOP. */ diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c index cf45cc566cbe2e7a2a6fde6422fc0c134b392b09..87e375562dbb2b1df019c6ea305a222a298b01df 100644 --- a/drivers/media/usb/em28xx/em28xx-cards.c +++ b/drivers/media/usb/em28xx/em28xx-cards.c @@ -3575,8 +3575,10 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev, if (dev->is_audio_only) { retval = em28xx_audio_setup(dev); - if (retval) - return -ENODEV; + if (retval) { + retval = -ENODEV; + goto err_deinit_media; + } em28xx_init_extension(dev); return 0; @@ -3595,7 +3597,7 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev, dev_err(&dev->intf->dev, "%s: em28xx_i2c_register bus 0 - error [%d]!\n", __func__, retval); - return retval; + goto err_deinit_media; } /* register i2c bus 1 */ @@ -3611,9 +3613,7 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev, "%s: em28xx_i2c_register bus 1 - error [%d]!\n", __func__, retval); - em28xx_i2c_unregister(dev, 0); - - return retval; + goto err_unreg_i2c; } } @@ -3621,6 +3621,12 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev, em28xx_card_setup(dev); return 0; + +err_unreg_i2c: + em28xx_i2c_unregister(dev, 0); +err_deinit_media: + em28xx_unregister_media_device(dev); + return retval; } static int em28xx_duplicate_dev(struct em28xx *dev) diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c index af9216278024f7c4ddb13e22fc817210c1cfed80..308bc029099d9da5de834f76e8e7b1b8fc4831f5 100644 --- a/drivers/media/usb/em28xx/em28xx-core.c +++ b/drivers/media/usb/em28xx/em28xx-core.c @@ -89,7 +89,7 @@ int em28xx_read_reg_req_len(struct em28xx *dev, u8 req, u16 reg, mutex_lock(&dev->ctrl_urb_lock); ret = usb_control_msg(udev, pipe, req, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - 0x0000, reg, dev->urb_buf, len, HZ); + 0x0000, reg, dev->urb_buf, len, 1000); if (ret < 0) { em28xx_regdbg("(pipe 0x%08x): IN: %02x %02x %02x %02x %02x %02x %02x %02x failed with error %i\n", pipe, @@ -158,7 +158,7 @@ int em28xx_write_regs_req(struct em28xx *dev, u8 req, u16 reg, char *buf, memcpy(dev->urb_buf, buf, len); ret = usb_control_msg(udev, pipe, req, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - 0x0000, reg, dev->urb_buf, len, HZ); + 0x0000, reg, dev->urb_buf, len, 1000); mutex_unlock(&dev->ctrl_urb_lock); if (ret < 0) { diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c index d38dee1792e41f21b2f21758fe393e92b5a061d4..3915d551d59e7b2f564a2d222d5ee9d21cf1028b 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c @@ -1467,7 +1467,7 @@ static int pvr2_upload_firmware1(struct pvr2_hdw *hdw) for (address = 0; address < fwsize; address += 0x800) { memcpy(fw_ptr, fw_entry->data + address, 0x800); ret += usb_control_msg(hdw->usb_dev, pipe, 0xa0, 0x40, address, - 0, fw_ptr, 0x800, HZ); + 0, fw_ptr, 0x800, 1000); } trace_firmware("Upload done, releasing device's CPU"); @@ -1605,7 +1605,7 @@ int pvr2_upload_firmware2(struct pvr2_hdw *hdw) ((u32 *)fw_ptr)[icnt] = swab32(((u32 *)fw_ptr)[icnt]); ret |= usb_bulk_msg(hdw->usb_dev, pipe, fw_ptr,bcnt, - &actual_length, HZ); + &actual_length, 1000); ret |= (actual_length != bcnt); if (ret) break; fw_done += bcnt; @@ -3438,7 +3438,7 @@ void pvr2_hdw_cpufw_set_enabled(struct pvr2_hdw *hdw, 0xa0,0xc0, address,0, hdw->fw_buffer+address, - 0x800,HZ); + 0x800,1000); if (ret < 0) break; } @@ -3977,7 +3977,7 @@ void pvr2_hdw_cpureset_assert(struct pvr2_hdw *hdw,int val) /* Write the CPUCS register on the 8051. The lsb of the register is the reset bit; a 1 asserts reset while a 0 clears it. */ pipe = usb_sndctrlpipe(hdw->usb_dev, 0); - ret = usb_control_msg(hdw->usb_dev,pipe,0xa0,0x40,0xe600,0,da,1,HZ); + ret = usb_control_msg(hdw->usb_dev,pipe,0xa0,0x40,0xe600,0,da,1,1000); if (ret < 0) { pvr2_trace(PVR2_TRACE_ERROR_LEGS, "cpureset_assert(%d) error=%d",val,ret); diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c index 4af55e2478be19ef0cd85c723bdb8e3596455426..cb15eb32d2a6be0bb6a360e842bfa134c7a73ba5 100644 --- a/drivers/media/usb/s2255/s2255drv.c +++ b/drivers/media/usb/s2255/s2255drv.c @@ -1884,7 +1884,7 @@ static long s2255_vendor_req(struct s2255_dev *dev, unsigned char Request, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, Value, Index, buf, - TransferBufferLength, HZ * 5); + TransferBufferLength, USB_CTRL_SET_TIMEOUT); if (r >= 0) memcpy(TransferBuffer, buf, TransferBufferLength); @@ -1893,7 +1893,7 @@ static long s2255_vendor_req(struct s2255_dev *dev, unsigned char Request, r = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), Request, USB_TYPE_VENDOR | USB_RECIP_DEVICE, Value, Index, buf, - TransferBufferLength, HZ * 5); + TransferBufferLength, USB_CTRL_SET_TIMEOUT); } kfree(buf); return r; diff --git a/drivers/media/usb/stk1160/stk1160-core.c b/drivers/media/usb/stk1160/stk1160-core.c index b4f8bc5db138917baa34d827cc209554c15ef43e..4e1698f7881876010ff05f18b39610d863fccaa6 100644 --- a/drivers/media/usb/stk1160/stk1160-core.c +++ b/drivers/media/usb/stk1160/stk1160-core.c @@ -65,7 +65,7 @@ int stk1160_read_reg(struct stk1160 *dev, u16 reg, u8 *value) return -ENOMEM; ret = usb_control_msg(dev->udev, pipe, 0x00, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - 0x00, reg, buf, sizeof(u8), HZ); + 0x00, reg, buf, sizeof(u8), 1000); if (ret < 0) { stk1160_err("read failed on reg 0x%x (%d)\n", reg, ret); @@ -85,7 +85,7 @@ int stk1160_write_reg(struct stk1160 *dev, u16 reg, u16 value) ret = usb_control_msg(dev->udev, pipe, 0x01, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - value, reg, NULL, 0, HZ); + value, reg, NULL, 0, 1000); if (ret < 0) { stk1160_err("write failed on reg 0x%x (%d)\n", reg, ret); diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c index b8477fa93b7d7733c8226634b282a8545ee45f71..f6373d678d256361b14024406688680af7d3a91e 100644 --- a/drivers/media/usb/uvc/uvc_video.c +++ b/drivers/media/usb/uvc/uvc_video.c @@ -1915,6 +1915,10 @@ static int uvc_video_start_transfer(struct uvc_streaming *stream, if (ep == NULL) return -EIO; + /* Reject broken descriptors. */ + if (usb_endpoint_maxp(&ep->desc) == 0) + return -EIO; + ret = uvc_init_video_bulk(stream, ep, gfp_flags); } diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h index a3dfacf069c44dede79b361f4c4e0d21002932b7..c884020b287843270872f88b36e0856ff0f18f0d 100644 --- a/drivers/media/usb/uvc/uvcvideo.h +++ b/drivers/media/usb/uvc/uvcvideo.h @@ -183,7 +183,7 @@ /* Maximum status buffer size in bytes of interrupt URB. */ #define UVC_MAX_STATUS_SIZE 16 -#define UVC_CTRL_CONTROL_TIMEOUT 500 +#define UVC_CTRL_CONTROL_TIMEOUT 5000 #define UVC_CTRL_STREAMING_TIMEOUT 5000 /* Maximum allowed number of control mappings per device */ diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index 3204a2fab361c8485821fa086338b9801849b483..2ee902f64ddc79ac001429434f54cb986385291f 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -2133,6 +2133,7 @@ static int v4l_prepare_buf(const struct v4l2_ioctl_ops *ops, static int v4l_g_parm(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { + struct video_device *vfd = video_devdata(file); struct v4l2_streamparm *p = arg; v4l2_std_id std; int ret = check_fmt(file, p->type); @@ -2144,7 +2145,8 @@ static int v4l_g_parm(const struct v4l2_ioctl_ops *ops, if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) return -EINVAL; - p->parm.capture.readbuffers = 2; + if (vfd->device_caps & V4L2_CAP_READWRITE) + p->parm.capture.readbuffers = 2; ret = ops->vidioc_g_std(file, fh, &std); if (ret == 0) v4l2_video_std_frame_period(std, &p->parm.capture.timeperframe); diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c index a760ab08256ff47aeffe8a7e3fa4e88a99683022..9019121a80f53ded8c553ae1fa49723ca09dd1cf 100644 --- a/drivers/memory/renesas-rpc-if.c +++ b/drivers/memory/renesas-rpc-if.c @@ -245,7 +245,7 @@ int rpcif_sw_init(struct rpcif *rpc, struct device *dev) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dirmap"); rpc->dirmap = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(rpc->dirmap)) - rpc->dirmap = NULL; + return PTR_ERR(rpc->dirmap); rpc->size = resource_size(res); rpc->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); diff --git a/drivers/mfd/atmel-flexcom.c b/drivers/mfd/atmel-flexcom.c index d2f5c073fdf31553d4d107c7d605f0fe155671d8..559eb4d352b68b21c69fe3ad5002b37ac7cd6c22 100644 --- a/drivers/mfd/atmel-flexcom.c +++ b/drivers/mfd/atmel-flexcom.c @@ -87,8 +87,7 @@ static const struct of_device_id atmel_flexcom_of_match[] = { }; MODULE_DEVICE_TABLE(of, atmel_flexcom_of_match); -#ifdef CONFIG_PM_SLEEP -static int atmel_flexcom_resume(struct device *dev) +static int __maybe_unused atmel_flexcom_resume_noirq(struct device *dev) { struct atmel_flexcom *ddata = dev_get_drvdata(dev); int err; @@ -105,16 +104,16 @@ static int atmel_flexcom_resume(struct device *dev) return 0; } -#endif -static SIMPLE_DEV_PM_OPS(atmel_flexcom_pm_ops, NULL, - atmel_flexcom_resume); +static const struct dev_pm_ops atmel_flexcom_pm_ops = { + .resume_noirq = atmel_flexcom_resume_noirq, +}; static struct platform_driver atmel_flexcom_driver = { .probe = atmel_flexcom_probe, .driver = { .name = "atmel_flexcom", - .pm = &atmel_flexcom_pm_ops, + .pm = pm_ptr(&atmel_flexcom_pm_ops), .of_match_table = atmel_flexcom_of_match, }, }; diff --git a/drivers/misc/eeprom/ee1004.c b/drivers/misc/eeprom/ee1004.c index 252e15ba65e111c0595dc2683dec25620cfb5f3a..d9f90332aaf65ecadf5b28e8c4191980d9c39f7e 100644 --- a/drivers/misc/eeprom/ee1004.c +++ b/drivers/misc/eeprom/ee1004.c @@ -82,6 +82,9 @@ static ssize_t ee1004_eeprom_read(struct i2c_client *client, char *buf, if (unlikely(offset + count > EE1004_PAGE_SIZE)) count = EE1004_PAGE_SIZE - offset; + if (count > I2C_SMBUS_BLOCK_MAX) + count = I2C_SMBUS_BLOCK_MAX; + status = i2c_smbus_read_i2c_block_data_or_emulated(client, offset, count, buf); dev_dbg(&client->dev, "read %zu@%d --> %d\n", count, offset, status); diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index ef49ac8d910192dc242e3d7219e727e16af588bf..d0471fec37fbb30d6519c7b7749ddea8058ef523 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -1284,7 +1284,14 @@ static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp) } if (copy_to_user(argp, &bp, sizeof(bp))) { - dma_buf_put(buf->dmabuf); + /* + * The usercopy failed, but we can't do much about it, as + * dma_buf_fd() already called fd_install() and made the + * file descriptor accessible for the current process. It + * might already be closed and dmabuf no longer valid when + * we reach this point. Therefore "leak" the fd and rely on + * the process exit path to do any required cleanup. + */ return -EFAULT; } diff --git a/drivers/misc/lattice-ecp3-config.c b/drivers/misc/lattice-ecp3-config.c index 5eaf74447ca1e48764b446512e42d538579874eb..556bb7d705f532c7465917bc35de01187d263912 100644 --- a/drivers/misc/lattice-ecp3-config.c +++ b/drivers/misc/lattice-ecp3-config.c @@ -76,12 +76,12 @@ static void firmware_load(const struct firmware *fw, void *context) if (fw == NULL) { dev_err(&spi->dev, "Cannot load firmware, aborting\n"); - return; + goto out; } if (fw->size == 0) { dev_err(&spi->dev, "Error: Firmware size is 0!\n"); - return; + goto out; } /* Fill dummy data (24 stuffing bits for commands) */ @@ -103,7 +103,7 @@ static void firmware_load(const struct firmware *fw, void *context) dev_err(&spi->dev, "Error: No supported FPGA detected (JEDEC_ID=%08x)!\n", jedec_id); - return; + goto out; } dev_info(&spi->dev, "FPGA %s detected\n", ecp3_dev[i].name); @@ -116,7 +116,7 @@ static void firmware_load(const struct firmware *fw, void *context) buffer = kzalloc(fw->size + 8, GFP_KERNEL); if (!buffer) { dev_err(&spi->dev, "Error: Can't allocate memory!\n"); - return; + goto out; } /* @@ -155,7 +155,7 @@ static void firmware_load(const struct firmware *fw, void *context) "Error: Timeout waiting for FPGA to clear (status=%08x)!\n", status); kfree(buffer); - return; + goto out; } dev_info(&spi->dev, "Configuring the FPGA...\n"); @@ -181,7 +181,7 @@ static void firmware_load(const struct firmware *fw, void *context) release_firmware(fw); kfree(buffer); - +out: complete(&data->fw_loaded); } diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile index 30c8ac24635d4cb14ef9f8534687f9611ce09ce7..4405fb2bc7a0004b4346f3faf649eb8cf5c4586c 100644 --- a/drivers/misc/lkdtm/Makefile +++ b/drivers/misc/lkdtm/Makefile @@ -16,7 +16,7 @@ KCOV_INSTRUMENT_rodata.o := n OBJCOPYFLAGS := OBJCOPYFLAGS_rodata_objcopy.o := \ - --rename-section .noinstr.text=.rodata,alloc,readonly,load + --rename-section .noinstr.text=.rodata,alloc,readonly,load,contents targets += rodata.o rodata_objcopy.o $(obj)/rodata_objcopy.o: $(obj)/rodata.o FORCE $(call if_changed,objcopy) diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index da599f075bc2f7e71e7577bb7db0d68660da3fe8..2eae0a77b2f79d30d428e9e962b7c74246a20040 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -1649,31 +1649,31 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req) struct mmc_card *card = mq->card; struct mmc_host *host = card->host; blk_status_t error = BLK_STS_OK; - int retries = 0; do { u32 status; int err; + int retries = 0; - mmc_blk_rw_rq_prep(mqrq, card, 1, mq); + while (retries++ <= MMC_READ_SINGLE_RETRIES) { + mmc_blk_rw_rq_prep(mqrq, card, 1, mq); - mmc_wait_for_req(host, mrq); + mmc_wait_for_req(host, mrq); - err = mmc_send_status(card, &status); - if (err) - goto error_exit; - - if (!mmc_host_is_spi(host) && - !mmc_ready_for_data(status)) { - err = mmc_blk_fix_state(card, req); + err = mmc_send_status(card, &status); if (err) goto error_exit; - } - if (mrq->cmd->error && retries++ < MMC_READ_SINGLE_RETRIES) - continue; + if (!mmc_host_is_spi(host) && + !mmc_ready_for_data(status)) { + err = mmc_blk_fix_state(card, req); + if (err) + goto error_exit; + } - retries = 0; + if (!mrq->cmd->error) + break; + } if (mrq->cmd->error || mrq->data->error || diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index 1b0853a82189aceefcd70a2e491fc0ac1ddcf183..99a4ce68d82f1e2c753745a86027526c475dd317 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -708,6 +708,8 @@ try_again: if (host->ops->init_card) host->ops->init_card(host, card); + card->ocr = ocr_card; + /* * If the host and card support UHS-I mode request the card * to switch to 1.8V signaling level. No 1.8v signalling if @@ -820,7 +822,7 @@ try_again: goto mismatch; } } - card->ocr = ocr_card; + mmc_fixup_device(card, sdio_fixup_methods); if (card->type == MMC_TYPE_SD_COMBO) { diff --git a/drivers/mmc/host/meson-mx-sdhc-mmc.c b/drivers/mmc/host/meson-mx-sdhc-mmc.c index 8fdd0bbbfa21fb0ee77a769446f0769f04749ec4..28aa78aa08f3f2752d3da3b2db2a51e98ab80e6a 100644 --- a/drivers/mmc/host/meson-mx-sdhc-mmc.c +++ b/drivers/mmc/host/meson-mx-sdhc-mmc.c @@ -854,6 +854,11 @@ static int meson_mx_sdhc_probe(struct platform_device *pdev) goto err_disable_pclk; irq = platform_get_irq(pdev, 0); + if (irq < 0) { + ret = irq; + goto err_disable_pclk; + } + ret = devm_request_threaded_irq(dev, irq, meson_mx_sdhc_irq, meson_mx_sdhc_irq_thread, IRQF_ONESHOT, NULL, host); diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c index 1c5299cd0cbe1333dc394e0c432a102a884616fd..264aae2a2b0cfa62bf68b6984399cc15a83fc305 100644 --- a/drivers/mmc/host/meson-mx-sdio.c +++ b/drivers/mmc/host/meson-mx-sdio.c @@ -663,6 +663,11 @@ static int meson_mx_mmc_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); + if (irq < 0) { + ret = irq; + goto error_free_mmc; + } + ret = devm_request_threaded_irq(host->controller_dev, irq, meson_mx_mmc_irq, meson_mx_mmc_irq_thread, IRQF_ONESHOT, diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c index 7697068ad9695144efacf999f4f4b2941f91275b..ea67a7ef2390c834d2118f58d610be5a557ef859 100644 --- a/drivers/mmc/host/moxart-mmc.c +++ b/drivers/mmc/host/moxart-mmc.c @@ -708,12 +708,12 @@ static int moxart_remove(struct platform_device *pdev) if (!IS_ERR_OR_NULL(host->dma_chan_rx)) dma_release_channel(host->dma_chan_rx); mmc_remove_host(mmc); - mmc_free_host(mmc); writel(0, host->base + REG_INTERRUPT_MASK); writel(0, host->base + REG_POWER_CONTROL); writel(readl(host->base + REG_CLOCK_CONTROL) | CLK_OFF, host->base + REG_CLOCK_CONTROL); + mmc_free_host(mmc); return 0; } diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index ab5ab969f711deecdbca38e27c758b2f2b416bd4..343648fcbc31f1b228b3f8ca4a3cea6fbd8207c6 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -524,12 +524,16 @@ static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask) static int esdhc_of_enable_dma(struct sdhci_host *host) { + int ret; u32 value; struct device *dev = mmc_dev(host->mmc); if (of_device_is_compatible(dev->of_node, "fsl,ls1043a-esdhc") || - of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc")) - dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)); + of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc")) { + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)); + if (ret) + return ret; + } value = sdhci_readl(host, ESDHC_DMA_SYSCTL); diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig index aef14990e5f7cfc6c0e115f66a6047e03e4edf9a..19726ebd973d0bbde3d118688401baedc1169f76 100644 --- a/drivers/mtd/chips/Kconfig +++ b/drivers/mtd/chips/Kconfig @@ -55,12 +55,14 @@ choice LITTLE_ENDIAN_BYTE, if the bytes are reversed. config MTD_CFI_NOSWAP + depends on !ARCH_IXP4XX || CPU_BIG_ENDIAN bool "NO" config MTD_CFI_BE_BYTE_SWAP bool "BIG_ENDIAN_BYTE" config MTD_CFI_LE_BYTE_SWAP + depends on !ARCH_IXP4XX bool "LITTLE_ENDIAN_BYTE" endchoice diff --git a/drivers/mtd/hyperbus/rpc-if.c b/drivers/mtd/hyperbus/rpc-if.c index ecb050ba95cdff52e22469672b60e366ca265abf..dc164c18f8429fe5d632b264084af0bc9b7e574c 100644 --- a/drivers/mtd/hyperbus/rpc-if.c +++ b/drivers/mtd/hyperbus/rpc-if.c @@ -124,7 +124,9 @@ static int rpcif_hb_probe(struct platform_device *pdev) if (!hyperbus) return -ENOMEM; - rpcif_sw_init(&hyperbus->rpc, pdev->dev.parent); + error = rpcif_sw_init(&hyperbus->rpc, pdev->dev.parent); + if (error) + return error; platform_set_drvdata(pdev, hyperbus); @@ -150,9 +152,9 @@ static int rpcif_hb_remove(struct platform_device *pdev) { struct rpcif_hyperbus *hyperbus = platform_get_drvdata(pdev); int error = hyperbus_unregister_device(&hyperbus->hbdev); - struct rpcif *rpc = dev_get_drvdata(pdev->dev.parent); - rpcif_disable_rpm(rpc); + rpcif_disable_rpm(&hyperbus->rpc); + return error; } diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig index 6650acbc961e972089aa97fdf56f90b987075a71..fc0aaa03c524272ed065837c438b277c921b4fb0 100644 --- a/drivers/mtd/maps/Kconfig +++ b/drivers/mtd/maps/Kconfig @@ -325,7 +325,7 @@ config MTD_DC21285 config MTD_IXP4XX tristate "CFI Flash device mapped on Intel IXP4xx based systems" - depends on MTD_CFI && MTD_COMPLEX_MAPPINGS && ARCH_IXP4XX + depends on MTD_CFI && MTD_COMPLEX_MAPPINGS && ARCH_IXP4XX && MTD_CFI_ADV_OPTIONS help This enables MTD access to flash devices on platforms based on Intel's IXP4xx family of network processors such as the diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index 95d47422bbf2039f9e92bc7d09e08c548d7e3b0a..5725818fa199f72c056e9f1bb5be10eb2126e93a 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c @@ -313,7 +313,7 @@ static int __mtd_del_partition(struct mtd_info *mtd) if (err) return err; - list_del(&child->part.node); + list_del(&mtd->part.node); free_partition(mtd); return 0; diff --git a/drivers/mtd/nand/bbt.c b/drivers/mtd/nand/bbt.c index 044adf91385465ca73e54f53238fb68b03c8a604..64af6898131d656401a42135855c22e183df4109 100644 --- a/drivers/mtd/nand/bbt.c +++ b/drivers/mtd/nand/bbt.c @@ -123,7 +123,7 @@ int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry, unsigned int rbits = bits_per_block + offs - BITS_PER_LONG; pos[1] &= ~GENMASK(rbits - 1, 0); - pos[1] |= val >> rbits; + pos[1] |= val >> (bits_per_block - rbits); } return 0; diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index 909b14cc8e55cf880ebd0e040774e85a7102161c..580b91cbd18defc7a085cc9dad965c4b9b7159f4 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -2062,7 +2062,7 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip, mtd->oobsize / trans, host->hwcfg.sector_size_1k); - if (!ret) { + if (ret != -EBADMSG) { *err_addr = brcmnand_get_uncorrecc_addr(ctrl); if (*err_addr) diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c index f8c36d19ab47f36aba5d2c1696bd795058bb084b..bfd3f440aca574931e30e22e96e4a6e891ea0f4b 100644 --- a/drivers/mtd/nand/raw/davinci_nand.c +++ b/drivers/mtd/nand/raw/davinci_nand.c @@ -372,17 +372,15 @@ correct: } /** - * nand_read_page_hwecc_oob_first - hw ecc, read oob first + * nand_davinci_read_page_hwecc_oob_first - Hardware ECC page read with ECC + * data read from OOB area * @chip: nand chip info structure * @buf: buffer to store read data * @oob_required: caller requires OOB data read to chip->oob_poi * @page: page number to read * - * Hardware ECC for large page chips, require OOB to be read first. For this - * ECC mode, the write_page method is re-used from ECC_HW. These methods - * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with - * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from - * the data area, by overwriting the NAND manufacturer bad block markings. + * Hardware ECC for large page chips, which requires the ECC data to be + * extracted from the OOB before the actual data is read. */ static int nand_davinci_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf, @@ -394,7 +392,6 @@ static int nand_davinci_read_page_hwecc_oob_first(struct nand_chip *chip, int eccsteps = chip->ecc.steps; uint8_t *p = buf; uint8_t *ecc_code = chip->ecc.code_buf; - uint8_t *ecc_calc = chip->ecc.calc_buf; unsigned int max_bitflips = 0; /* Read the OOB area first */ @@ -402,7 +399,8 @@ static int nand_davinci_read_page_hwecc_oob_first(struct nand_chip *chip, if (ret) return ret; - ret = nand_read_page_op(chip, page, 0, NULL, 0); + /* Move read cursor to start of page */ + ret = nand_change_read_column_op(chip, 0, NULL, 0, false); if (ret) return ret; @@ -420,8 +418,6 @@ static int nand_davinci_read_page_hwecc_oob_first(struct nand_chip *chip, if (ret) return ret; - chip->ecc.calculate(chip, p, &ecc_calc[i]); - stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL); if (stat == -EBADMSG && (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) { diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index a6658567d55c0a637c8b484abc5f938d49728d2c..cb7631145700a56f700fa861d4ed42f88817fc50 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -711,14 +711,32 @@ static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this, (use_half_period ? BM_GPMI_CTRL1_HALF_PERIOD : 0); } -static void gpmi_nfc_apply_timings(struct gpmi_nand_data *this) +static int gpmi_nfc_apply_timings(struct gpmi_nand_data *this) { struct gpmi_nfc_hardware_timing *hw = &this->hw; struct resources *r = &this->resources; void __iomem *gpmi_regs = r->gpmi_regs; unsigned int dll_wait_time_us; + int ret; - clk_set_rate(r->clock[0], hw->clk_rate); + /* Clock dividers do NOT guarantee a clean clock signal on its output + * during the change of the divide factor on i.MX6Q/UL/SX. On i.MX7/8, + * all clock dividers provide these guarantee. + */ + if (GPMI_IS_MX6Q(this) || GPMI_IS_MX6SX(this)) + clk_disable_unprepare(r->clock[0]); + + ret = clk_set_rate(r->clock[0], hw->clk_rate); + if (ret) { + dev_err(this->dev, "cannot set clock rate to %lu Hz: %d\n", hw->clk_rate, ret); + return ret; + } + + if (GPMI_IS_MX6Q(this) || GPMI_IS_MX6SX(this)) { + ret = clk_prepare_enable(r->clock[0]); + if (ret) + return ret; + } writel(hw->timing0, gpmi_regs + HW_GPMI_TIMING0); writel(hw->timing1, gpmi_regs + HW_GPMI_TIMING1); @@ -737,6 +755,8 @@ static void gpmi_nfc_apply_timings(struct gpmi_nand_data *this) /* Wait for the DLL to settle. */ udelay(dll_wait_time_us); + + return 0; } static int gpmi_setup_interface(struct nand_chip *chip, int chipnr, @@ -1032,15 +1052,6 @@ static int gpmi_get_clks(struct gpmi_nand_data *this) r->clock[i] = clk; } - if (GPMI_IS_MX6(this)) - /* - * Set the default value for the gpmi clock. - * - * If you want to use the ONFI nand which is in the - * Synchronous Mode, you should change the clock as you need. - */ - clk_set_rate(r->clock[0], 22000000); - return 0; err_clock: @@ -2278,7 +2289,9 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip, */ if (this->hw.must_apply_timings) { this->hw.must_apply_timings = false; - gpmi_nfc_apply_timings(this); + ret = gpmi_nfc_apply_timings(this); + if (ret) + goto out_pm; } dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs); @@ -2407,6 +2420,7 @@ unmap: this->bch = false; +out_pm: pm_runtime_mark_last_busy(this->dev); pm_runtime_put_autosuspend(this->dev); diff --git a/drivers/mtd/nand/raw/mpc5121_nfc.c b/drivers/mtd/nand/raw/mpc5121_nfc.c index cb293c50acb876fe565adfd93aec2f024e857e44..5b9271b9c32655b4d4d8f2d972c3d478d7b3b6d9 100644 --- a/drivers/mtd/nand/raw/mpc5121_nfc.c +++ b/drivers/mtd/nand/raw/mpc5121_nfc.c @@ -291,7 +291,6 @@ static int ads5121_chipselect_init(struct mtd_info *mtd) /* Control chips select signal on ADS5121 board */ static void ads5121_select_chip(struct nand_chip *nand, int chip) { - struct mtd_info *mtd = nand_to_mtd(nand); struct mpc5121_nfc_prv *prv = nand_get_controller_data(nand); u8 v; diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c index b99d2e9d1e2c49a86157c8e06d08df067928a87e..bb181e18c7c52b80ca2fe7fff6c03f0f70324a3e 100644 --- a/drivers/mtd/nand/raw/qcom_nandc.c +++ b/drivers/mtd/nand/raw/qcom_nandc.c @@ -2,7 +2,6 @@ /* * Copyright (c) 2016, The Linux Foundation. All rights reserved. */ - #include #include #include @@ -2968,10 +2967,6 @@ static int qcom_nandc_probe(struct platform_device *pdev) if (!nandc->base_dma) return -ENXIO; - ret = qcom_nandc_alloc(nandc); - if (ret) - goto err_nandc_alloc; - ret = clk_prepare_enable(nandc->core_clk); if (ret) goto err_core_clk; @@ -2980,6 +2975,10 @@ static int qcom_nandc_probe(struct platform_device *pdev) if (ret) goto err_aon_clk; + ret = qcom_nandc_alloc(nandc); + if (ret) + goto err_nandc_alloc; + ret = qcom_nandc_setup(nandc); if (ret) goto err_setup; @@ -2991,15 +2990,14 @@ static int qcom_nandc_probe(struct platform_device *pdev) return 0; err_setup: + qcom_nandc_unalloc(nandc); +err_nandc_alloc: clk_disable_unprepare(nandc->aon_clk); err_aon_clk: clk_disable_unprepare(nandc->core_clk); err_core_clk: - qcom_nandc_unalloc(nandc); -err_nandc_alloc: dma_unmap_resource(dev, res->start, resource_size(res), DMA_BIDIRECTIONAL, 0); - return ret; } diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index aa001b16765ae152e666fb1fa0d2ebd4ef8784e6..c2cef7ba2671969c74737d26ec3b6ea992d3b694 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -223,7 +223,7 @@ static inline int __check_agg_selection_timer(struct port *port) if (bond == NULL) return 0; - return BOND_AD_INFO(bond).agg_select_timer ? 1 : 0; + return atomic_read(&BOND_AD_INFO(bond).agg_select_timer) ? 1 : 0; } /** @@ -1003,8 +1003,8 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr) if (port->aggregator && port->aggregator->is_active && !__port_is_enabled(port)) { - __enable_port(port); + *update_slave_arr = true; } } break; @@ -1760,6 +1760,7 @@ static void ad_agg_selection_logic(struct aggregator *agg, port = port->next_port_in_aggregator) { __enable_port(port); } + *update_slave_arr = true; } } @@ -1975,7 +1976,7 @@ static void ad_marker_response_received(struct bond_marker *marker, */ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout) { - BOND_AD_INFO(bond).agg_select_timer = timeout; + atomic_set(&BOND_AD_INFO(bond).agg_select_timer, timeout); } /** @@ -2258,6 +2259,28 @@ void bond_3ad_update_ad_actor_settings(struct bonding *bond) spin_unlock_bh(&bond->mode_lock); } +/** + * bond_agg_timer_advance - advance agg_select_timer + * @bond: bonding structure + * + * Return true when agg_select_timer reaches 0. + */ +static bool bond_agg_timer_advance(struct bonding *bond) +{ + int val, nval; + + while (1) { + val = atomic_read(&BOND_AD_INFO(bond).agg_select_timer); + if (!val) + return false; + nval = val - 1; + if (atomic_cmpxchg(&BOND_AD_INFO(bond).agg_select_timer, + val, nval) == val) + break; + } + return nval == 0; +} + /** * bond_3ad_state_machine_handler - handle state machines timeout * @work: work context to fetch bonding struct to work on from @@ -2293,9 +2316,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work) if (!bond_has_slaves(bond)) goto re_arm; - /* check if agg_select_timer timer after initialize is timed out */ - if (BOND_AD_INFO(bond).agg_select_timer && - !(--BOND_AD_INFO(bond).agg_select_timer)) { + if (bond_agg_timer_advance(bond)) { slave = bond_first_slave_rcu(bond); port = slave ? &(SLAVE_AD_INFO(slave)->port) : NULL; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 645c7cabcbe4ddabaf9faafab26fce4455d1e340..cbeb69bca0bba2e1dc879aa9d2fd175596959411 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1061,9 +1061,6 @@ static bool bond_should_notify_peers(struct bonding *bond) slave = rcu_dereference(bond->curr_active_slave); rcu_read_unlock(); - netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n", - slave ? slave->dev->name : "NULL"); - if (!slave || !bond->send_peer_notif || bond->send_peer_notif % max(1, bond->params.peer_notif_delay) != 0 || @@ -1071,6 +1068,9 @@ static bool bond_should_notify_peers(struct bonding *bond) test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state)) return false; + netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n", + slave ? slave->dev->name : "NULL"); + return true; } @@ -2272,10 +2272,9 @@ static int __bond_release_one(struct net_device *bond_dev, bond_select_active_slave(bond); } - if (!bond_has_slaves(bond)) { - bond_set_carrier(bond); + bond_set_carrier(bond); + if (!bond_has_slaves(bond)) eth_hw_addr_random(bond_dev); - } unblock_netpoll_tx(); synchronize_rcu(); @@ -4562,25 +4561,39 @@ static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb, struct bonding *bond = netdev_priv(bond_dev); struct slave *slave = NULL; struct list_head *iter; + bool xmit_suc = false; + bool skb_used = false; bond_for_each_slave_rcu(bond, slave, iter) { - if (bond_is_last_slave(bond, slave)) - break; - if (bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) { - struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); + struct sk_buff *skb2; + + if (!(bond_slave_is_up(slave) && slave->link == BOND_LINK_UP)) + continue; + if (bond_is_last_slave(bond, slave)) { + skb2 = skb; + skb_used = true; + } else { + skb2 = skb_clone(skb, GFP_ATOMIC); if (!skb2) { net_err_ratelimited("%s: Error: %s: skb_clone() failed\n", bond_dev->name, __func__); continue; } - bond_dev_queue_xmit(bond, skb2, slave->dev); } + + if (bond_dev_queue_xmit(bond, skb2, slave->dev) == NETDEV_TX_OK) + xmit_suc = true; } - if (slave && bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) - return bond_dev_queue_xmit(bond, skb, slave->dev); - return bond_tx_drop(bond_dev, skb); + if (!skb_used) + dev_kfree_skb_any(skb); + + if (xmit_suc) + return NETDEV_TX_OK; + + atomic_long_inc(&bond_dev->tx_dropped); + return NET_XMIT_DROP; } /*------------------------- Device initialization ---------------------------*/ diff --git a/drivers/net/can/softing/softing_cs.c b/drivers/net/can/softing/softing_cs.c index 2e93ee79237395e310b5f16e0ef15aa77e427111..e5c939b63fa65889e34e274aaee1426a349b056e 100644 --- a/drivers/net/can/softing/softing_cs.c +++ b/drivers/net/can/softing/softing_cs.c @@ -293,7 +293,7 @@ static int softingcs_probe(struct pcmcia_device *pcmcia) return 0; platform_failed: - kfree(dev); + platform_device_put(pdev); mem_failed: pcmcia_bad: pcmcia_failed: diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c index ccd649a8e37bdc23de9449c42db7cdda2e714fd4..bad69a4abec1043e5d62e87725ccca07627eb8dd 100644 --- a/drivers/net/can/softing/softing_fw.c +++ b/drivers/net/can/softing/softing_fw.c @@ -565,18 +565,19 @@ int softing_startstop(struct net_device *dev, int up) if (ret < 0) goto failed; } - /* enable_error_frame */ - /* + + /* enable_error_frame + * * Error reporting is switched off at the moment since * the receiving of them is not yet 100% verified * This should be enabled sooner or later - * - if (error_reporting) { + */ + if (0 && error_reporting) { ret = softing_fct_cmd(card, 51, "enable_error_frame"); if (ret < 0) goto failed; } - */ + /* initialize interface */ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 2]); iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 4]); diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c index 4e13f6dfb91a2eecf603c8abc19dfc9e2e77e6e9..abe00a085f6fcd872481441120aa020471c6fe80 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c @@ -1288,7 +1288,7 @@ mcp251xfd_tef_obj_read(const struct mcp251xfd_priv *priv, len > tx_ring->obj_num || offset + len > tx_ring->obj_num)) { netdev_err(priv->ndev, - "Trying to read to many TEF objects (max=%d, offset=%d, len=%d).\n", + "Trying to read too many TEF objects (max=%d, offset=%d, len=%d).\n", tx_ring->obj_num, offset, len); return -ERANGE; } @@ -2497,7 +2497,7 @@ static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv) if (!mcp251xfd_is_251X(priv) && priv->devtype_data.model != devtype_data->model) { netdev_info(ndev, - "Detected %s, but firmware specifies a %s. Fixing up.", + "Detected %s, but firmware specifies a %s. Fixing up.\n", __mcp251xfd_get_model_str(devtype_data->model), mcp251xfd_get_model_str(priv)); } @@ -2534,7 +2534,7 @@ static int mcp251xfd_register_check_rx_int(struct mcp251xfd_priv *priv) return 0; netdev_info(priv->ndev, - "RX_INT active after softreset, disabling RX_INT support."); + "RX_INT active after softreset, disabling RX_INT support.\n"); devm_gpiod_put(&priv->spi->dev, priv->rx_int); priv->rx_int = NULL; diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 48d746e18f30219e49ed04c24165a789d0bc71fc..375998263af7a2abb70f5abca7d86c8619541346 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -1762,7 +1762,12 @@ static int xcan_probe(struct platform_device *pdev) spin_lock_init(&priv->tx_lock); /* Get IRQ for the device */ - ndev->irq = platform_get_irq(pdev, 0); + ret = platform_get_irq(pdev, 0); + if (ret < 0) + goto err_free; + + ndev->irq = ret; + ndev->flags |= IFF_ECHO; /* We support local echo */ platform_set_drvdata(pdev, ndev); diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 2451f61a38e4ae3582c41fe6a313e012d87031ef..9e32ea9c116473d532784e58606f64b6839974f2 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -36,6 +36,7 @@ config NET_DSA_MT7530 tristate "MediaTek MT753x and MT7621 Ethernet switch support" depends on NET_DSA select NET_DSA_TAG_MTK + select MEDIATEK_GE_PHY help This enables support for the MediaTek MT7530, MT7531, and MT7621 Ethernet switch chips. diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 690e9d9495e7593aa2eda07ff1dae9c2049fef3a..08a675a5328d718091ee6189e186d6dd06fec394 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -504,7 +504,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds) get_device(&priv->master_mii_bus->dev); priv->master_mii_dn = dn; - priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev); + priv->slave_mii_bus = mdiobus_alloc(); if (!priv->slave_mii_bus) { of_node_put(dn); return -ENOMEM; @@ -564,8 +564,10 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds) } err = mdiobus_register(priv->slave_mii_bus); - if (err && dn) + if (err && dn) { + mdiobus_free(priv->slave_mii_bus); of_node_put(dn); + } return err; } @@ -573,6 +575,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds) static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv) { mdiobus_unregister(priv->slave_mii_bus); + mdiobus_free(priv->slave_mii_bus); of_node_put(priv->master_mii_dn); } diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index dcf1fc89451f28f98f501536b1ce334f717b4d49..2044d440d7de4cc15c841c4f331a62d6d2cebc70 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -1305,7 +1305,7 @@ static int lan9303_probe_reset_gpio(struct lan9303 *chip, struct device_node *np) { chip->reset_gpio = devm_gpiod_get_optional(chip->dev, "reset", - GPIOD_OUT_LOW); + GPIOD_OUT_HIGH); if (IS_ERR(chip->reset_gpio)) return PTR_ERR(chip->reset_gpio); diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index 4d23a7aba79615c6bdd360a63d8511c6dbe36d63..80ef7ea779545c3db1d0bfb64256a30aaac82a96 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -495,8 +495,9 @@ static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg) static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np) { struct dsa_switch *ds = priv->ds; + int err; - ds->slave_mii_bus = devm_mdiobus_alloc(priv->dev); + ds->slave_mii_bus = mdiobus_alloc(); if (!ds->slave_mii_bus) return -ENOMEM; @@ -509,7 +510,11 @@ static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np) ds->slave_mii_bus->parent = priv->dev; ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask; - return of_mdiobus_register(ds->slave_mii_bus, mdio_np); + err = of_mdiobus_register(ds->slave_mii_bus, mdio_np); + if (err) + mdiobus_free(ds->slave_mii_bus); + + return err; } static int gswip_pce_table_entry_read(struct gswip_priv *priv, @@ -2086,8 +2091,10 @@ disable_switch: gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB); dsa_unregister_switch(priv->ds); mdio_bus: - if (mdio_np) + if (mdio_np) { mdiobus_unregister(priv->ds->slave_mii_bus); + mdiobus_free(priv->ds->slave_mii_bus); + } put_mdio_node: of_node_put(mdio_np); for (i = 0; i < priv->num_gphy_fw; i++) @@ -2108,6 +2115,7 @@ static int gswip_remove(struct platform_device *pdev) if (priv->ds->slave_mii_bus) { mdiobus_unregister(priv->ds->slave_mii_bus); of_node_put(priv->ds->slave_mii_bus->dev.of_node); + mdiobus_free(priv->ds->slave_mii_bus); } for (i = 0; i < priv->num_gphy_fw; i++) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index afc5500ef8ed9227176b7cda75a80d395dcd43e7..1992be77522ac0db4046e74b5e29badbba1718d9 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3072,7 +3072,7 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip, return err; } - bus = devm_mdiobus_alloc_size(chip->dev, sizeof(*mdio_bus)); + bus = mdiobus_alloc_size(sizeof(*mdio_bus)); if (!bus) return -ENOMEM; @@ -3097,14 +3097,14 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip, if (!external) { err = mv88e6xxx_g2_irq_mdio_setup(chip, bus); if (err) - return err; + goto out; } err = of_mdiobus_register(bus, np); if (err) { dev_err(chip->dev, "Cannot register MDIO bus (%d)\n", err); mv88e6xxx_g2_irq_mdio_free(chip, bus); - return err; + goto out; } if (external) @@ -3113,21 +3113,26 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip, list_add(&mdio_bus->list, &chip->mdios); return 0; + +out: + mdiobus_free(bus); + return err; } static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip) { - struct mv88e6xxx_mdio_bus *mdio_bus; + struct mv88e6xxx_mdio_bus *mdio_bus, *p; struct mii_bus *bus; - list_for_each_entry(mdio_bus, &chip->mdios, list) { + list_for_each_entry_safe(mdio_bus, p, &chip->mdios, list) { bus = mdio_bus->bus; if (!mdio_bus->external) mv88e6xxx_g2_irq_mdio_free(chip, bus); mdiobus_unregister(bus); + mdiobus_free(bus); } } diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 2e5bbdca5ea4756df5089804bce7970735aba5ff..cd8d9b0e0edb322f519698fb193564372d9a46e1 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -1050,7 +1050,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot) return PTR_ERR(hw); } - bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv)); + bus = mdiobus_alloc_size(sizeof(*mdio_priv)); if (!bus) return -ENOMEM; @@ -1070,6 +1070,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot) rc = mdiobus_register(bus); if (rc < 0) { dev_err(dev, "failed to register MDIO bus\n"); + mdiobus_free(bus); return rc; } @@ -1119,6 +1120,7 @@ static void vsc9959_mdio_bus_free(struct ocelot *ocelot) lynx_pcs_destroy(pcs); } mdiobus_unregister(felix->imdio); + mdiobus_free(felix->imdio); } static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port, diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c index 661745932a5392004231fcd06929ac947223a483..c33bdcf7efc587e4d5d20c45801e9b0c21a511c8 100644 --- a/drivers/net/dsa/qca/ar9331.c +++ b/drivers/net/dsa/qca/ar9331.c @@ -289,7 +289,7 @@ static int ar9331_sw_mbus_init(struct ar9331_sw_priv *priv) if (!mnp) return -ENODEV; - ret = of_mdiobus_register(mbus, mnp); + ret = devm_of_mdiobus_register(dev, mbus, mnp); of_node_put(mnp); if (ret) return ret; @@ -856,7 +856,6 @@ static void ar9331_sw_remove(struct mdio_device *mdiodev) struct ar9331_sw_priv *priv = dev_get_drvdata(&mdiodev->dev); irq_domain_remove(priv->irqdomain); - mdiobus_unregister(priv->mbus); dsa_unregister_switch(&priv->ds); reset_control_assert(priv->sw_reset); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 395eb0b52680211eac42a3baea57e3e21091b539..a816b30bca04c7641bc3142df5a814b02ea27b3b 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -721,7 +721,9 @@ static void xgbe_stop_timers(struct xgbe_prv_data *pdata) if (!channel->tx_ring) break; + /* Deactivate the Tx timer */ del_timer_sync(&channel->tx_timer); + channel->tx_timer_active = 0; } } @@ -2557,6 +2559,14 @@ read_again: buf2_len = xgbe_rx_buf2_len(rdata, packet, len); len += buf2_len; + if (buf2_len > rdata->rx.buf.dma_len) { + /* Hardware inconsistency within the descriptors + * that has resulted in a length underflow. + */ + error = 1; + goto skip_data; + } + if (!skb) { skb = xgbe_create_skb(pdata, napi, rdata, buf1_len); @@ -2586,8 +2596,10 @@ skip_data: if (!last || context_next) goto read_again; - if (!skb) + if (!skb || error) { + dev_kfree_skb(skb); goto next_packet; + } /* Be sure we don't exceed the configured MTU */ max_len = netdev->mtu + ETH_HLEN; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c index 90cb55eb546658456f7ace869a079171d169d041..014513ce00a14b06149f5f82257359b11c2d34f5 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c @@ -418,6 +418,9 @@ static void xgbe_pci_remove(struct pci_dev *pdev) pci_free_irq_vectors(pdata->pcidev); + /* Disable all interrupts in the hardware */ + XP_IOWRITE(pdata, XP_INT_EN, 0x0); + xgbe_free_pdata(pdata); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index d04994840b87d5cf8f406bdc80556f77c9ebe501..bb3ba614fb17497d62566d41026997db23473a26 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1850,6 +1850,14 @@ struct bnx2x { /* Vxlan/Geneve related information */ u16 udp_tunnel_ports[BNX2X_UDP_PORT_MAX]; + +#define FW_CAP_INVALIDATE_VF_FP_HSI BIT(0) + u32 fw_cap; + + u32 fw_major; + u32 fw_minor; + u32 fw_rev; + u32 fw_eng; }; /* Tx queues may be less or equal to Rx queues */ @@ -2526,5 +2534,6 @@ void bnx2x_register_phc(struct bnx2x *bp); * Meant for implicit re-load flows. */ int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp); - +int bnx2x_init_firmware(struct bnx2x *bp); +void bnx2x_release_firmware(struct bnx2x *bp); #endif /* bnx2x.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index b5d954cb409ae4de9e6e6d70dede933a2154bbfa..41ebbb2c7d3ac3c8d1aa172d9a2a874e6f51b8d7 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -2364,10 +2364,8 @@ int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err) if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP && load_code != FW_MSG_CODE_DRV_LOAD_COMMON) { /* build my FW version dword */ - u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) + - (BCM_5710_FW_MINOR_VERSION << 8) + - (BCM_5710_FW_REVISION_VERSION << 16) + - (BCM_5710_FW_ENGINEERING_VERSION << 24); + u32 my_fw = (bp->fw_major) + (bp->fw_minor << 8) + + (bp->fw_rev << 16) + (bp->fw_eng << 24); /* read loaded FW from chip */ u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h index 3f8435208bf498171a48eb0b4336295b40aec549..a84d015da5dfa6005ce846349915038d5dcd8826 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h @@ -241,6 +241,8 @@ IRO[221].m2)) #define XSTORM_VF_TO_PF_OFFSET(funcId) \ (IRO[48].base + ((funcId) * IRO[48].m1)) +#define XSTORM_ETH_FUNCTION_INFO_FP_HSI_VALID_E2_OFFSET(fid) \ + (IRO[386].base + ((fid) * IRO[386].m1)) #define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0 /* eth hsi version */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 622fadc50316ee9c37af97a45e266ab7fb187b63..611efee758340bd9dc5338ce68409bab179bf650 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -3024,7 +3024,8 @@ struct afex_stats { #define BCM_5710_FW_MAJOR_VERSION 7 #define BCM_5710_FW_MINOR_VERSION 13 -#define BCM_5710_FW_REVISION_VERSION 15 +#define BCM_5710_FW_REVISION_VERSION 21 +#define BCM_5710_FW_REVISION_VERSION_V15 15 #define BCM_5710_FW_ENGINEERING_VERSION 0 #define BCM_5710_FW_COMPILE_FLAGS 1 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 28069b29086257e922ef0ebf78cb9f5084ec3759..7fa271db41b0763c719b7ea223c79e166e1d72eb 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -74,9 +74,19 @@ __stringify(BCM_5710_FW_MINOR_VERSION) "." \ __stringify(BCM_5710_FW_REVISION_VERSION) "." \ __stringify(BCM_5710_FW_ENGINEERING_VERSION) + +#define FW_FILE_VERSION_V15 \ + __stringify(BCM_5710_FW_MAJOR_VERSION) "." \ + __stringify(BCM_5710_FW_MINOR_VERSION) "." \ + __stringify(BCM_5710_FW_REVISION_VERSION_V15) "." \ + __stringify(BCM_5710_FW_ENGINEERING_VERSION) + #define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw" #define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw" #define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw" +#define FW_FILE_NAME_E1_V15 "bnx2x/bnx2x-e1-" FW_FILE_VERSION_V15 ".fw" +#define FW_FILE_NAME_E1H_V15 "bnx2x/bnx2x-e1h-" FW_FILE_VERSION_V15 ".fw" +#define FW_FILE_NAME_E2_V15 "bnx2x/bnx2x-e2-" FW_FILE_VERSION_V15 ".fw" /* Time in jiffies before concluding the transmitter is hung */ #define TX_TIMEOUT (5*HZ) @@ -90,6 +100,9 @@ MODULE_LICENSE("GPL"); MODULE_FIRMWARE(FW_FILE_NAME_E1); MODULE_FIRMWARE(FW_FILE_NAME_E1H); MODULE_FIRMWARE(FW_FILE_NAME_E2); +MODULE_FIRMWARE(FW_FILE_NAME_E1_V15); +MODULE_FIRMWARE(FW_FILE_NAME_E1H_V15); +MODULE_FIRMWARE(FW_FILE_NAME_E2_V15); int bnx2x_num_queues; module_param_named(num_queues, bnx2x_num_queues, int, 0444); @@ -747,9 +760,7 @@ static int bnx2x_mc_assert(struct bnx2x *bp) CHIP_IS_E1(bp) ? "everest1" : CHIP_IS_E1H(bp) ? "everest1h" : CHIP_IS_E2(bp) ? "everest2" : "everest3", - BCM_5710_FW_MAJOR_VERSION, - BCM_5710_FW_MINOR_VERSION, - BCM_5710_FW_REVISION_VERSION); + bp->fw_major, bp->fw_minor, bp->fw_rev); return rc; } @@ -12355,6 +12366,15 @@ static int bnx2x_init_bp(struct bnx2x *bp) bnx2x_read_fwinfo(bp); + if (IS_PF(bp)) { + rc = bnx2x_init_firmware(bp); + + if (rc) { + bnx2x_free_mem_bp(bp); + return rc; + } + } + func = BP_FUNC(bp); /* need to reset chip if undi was active */ @@ -12367,6 +12387,7 @@ static int bnx2x_init_bp(struct bnx2x *bp) rc = bnx2x_prev_unload(bp); if (rc) { + bnx2x_release_firmware(bp); bnx2x_free_mem_bp(bp); return rc; } @@ -13366,16 +13387,11 @@ static int bnx2x_check_firmware(struct bnx2x *bp) /* Check FW version */ offset = be32_to_cpu(fw_hdr->fw_version.offset); fw_ver = firmware->data + offset; - if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) || - (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) || - (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) || - (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) { + if (fw_ver[0] != bp->fw_major || fw_ver[1] != bp->fw_minor || + fw_ver[2] != bp->fw_rev || fw_ver[3] != bp->fw_eng) { BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n", - fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3], - BCM_5710_FW_MAJOR_VERSION, - BCM_5710_FW_MINOR_VERSION, - BCM_5710_FW_REVISION_VERSION, - BCM_5710_FW_ENGINEERING_VERSION); + fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3], + bp->fw_major, bp->fw_minor, bp->fw_rev, bp->fw_eng); return -EINVAL; } @@ -13453,34 +13469,51 @@ do { \ (u8 *)bp->arr, len); \ } while (0) -static int bnx2x_init_firmware(struct bnx2x *bp) +int bnx2x_init_firmware(struct bnx2x *bp) { - const char *fw_file_name; + const char *fw_file_name, *fw_file_name_v15; struct bnx2x_fw_file_hdr *fw_hdr; int rc; if (bp->firmware) return 0; - if (CHIP_IS_E1(bp)) + if (CHIP_IS_E1(bp)) { fw_file_name = FW_FILE_NAME_E1; - else if (CHIP_IS_E1H(bp)) + fw_file_name_v15 = FW_FILE_NAME_E1_V15; + } else if (CHIP_IS_E1H(bp)) { fw_file_name = FW_FILE_NAME_E1H; - else if (!CHIP_IS_E1x(bp)) + fw_file_name_v15 = FW_FILE_NAME_E1H_V15; + } else if (!CHIP_IS_E1x(bp)) { fw_file_name = FW_FILE_NAME_E2; - else { + fw_file_name_v15 = FW_FILE_NAME_E2_V15; + } else { BNX2X_ERR("Unsupported chip revision\n"); return -EINVAL; } + BNX2X_DEV_INFO("Loading %s\n", fw_file_name); rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev); if (rc) { - BNX2X_ERR("Can't load firmware file %s\n", - fw_file_name); - goto request_firmware_exit; + BNX2X_DEV_INFO("Trying to load older fw %s\n", fw_file_name_v15); + + /* try to load prev version */ + rc = request_firmware(&bp->firmware, fw_file_name_v15, &bp->pdev->dev); + + if (rc) + goto request_firmware_exit; + + bp->fw_rev = BCM_5710_FW_REVISION_VERSION_V15; + } else { + bp->fw_cap |= FW_CAP_INVALIDATE_VF_FP_HSI; + bp->fw_rev = BCM_5710_FW_REVISION_VERSION; } + bp->fw_major = BCM_5710_FW_MAJOR_VERSION; + bp->fw_minor = BCM_5710_FW_MINOR_VERSION; + bp->fw_eng = BCM_5710_FW_ENGINEERING_VERSION; + rc = bnx2x_check_firmware(bp); if (rc) { BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name); @@ -13536,7 +13569,7 @@ request_firmware_exit: return rc; } -static void bnx2x_release_firmware(struct bnx2x *bp) +void bnx2x_release_firmware(struct bnx2x *bp) { kfree(bp->init_ops_offsets); kfree(bp->init_ops); @@ -14053,6 +14086,7 @@ static int bnx2x_init_one(struct pci_dev *pdev, return 0; init_one_freemem: + bnx2x_release_firmware(bp); bnx2x_free_mem_bp(bp); init_one_exit: diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 03eb0179ec0085eb07a2b82b1293664e5419279a..08437eaacbb966cf3909fd21d14ae2fe15e11a5e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -758,9 +758,18 @@ static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf) void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid) { + u16 abs_fid; + + abs_fid = FW_VF_HANDLE(abs_vfid); + /* set the VF-PF association in the FW */ - storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp)); - storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1); + storm_memset_vf_to_pf(bp, abs_fid, BP_FUNC(bp)); + storm_memset_func_en(bp, abs_fid, 1); + + /* Invalidate fp_hsi version for vfs */ + if (bp->fw_cap & FW_CAP_INVALIDATE_VF_FP_HSI) + REG_WR8(bp, BAR_XSTRORM_INTMEM + + XSTORM_ETH_FUNCTION_INFO_FP_HSI_VALID_E2_OFFSET(abs_fid), 0); /* clear vf errors*/ bnx2x_vf_semi_clear_err(bp, abs_vfid); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 6f9196ff2ac4f56b0c73303ca618b460160f4447..98087b278d1f4965227f37ad19157c8191fa2e9f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1926,6 +1926,9 @@ static int bnxt_get_fecparam(struct net_device *dev, case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE: fec->active_fec |= ETHTOOL_FEC_LLRS; break; + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE: + fec->active_fec |= ETHTOOL_FEC_OFF; + break; } return 0; } diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 8da6f867b6d226ddf0edf95a66a90fd2ff00ba17..bd1f419bc47ae83a217ed23aeb6f310a27ba711b 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -3978,10 +3978,12 @@ static int bcmgenet_probe(struct platform_device *pdev) /* Request the WOL interrupt and advertise suspend if available */ priv->wol_irq_disabled = true; - err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0, - dev->name, priv); - if (!err) - device_set_wakeup_capable(&pdev->dev, 1); + if (priv->wol_irq > 0) { + err = devm_request_irq(&pdev->dev, priv->wol_irq, + bcmgenet_wol_isr, 0, dev->name, priv); + if (!err) + device_set_wakeup_capable(&pdev->dev, 1); + } /* Set the needed headroom to account for any possible * features enabling/disabling at runtime diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 1e8bf6b9834bba6cb0454faa38ff7c5d6db540de..2af464ac250ac73f11eee87d91db38c776cdb66a 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -4534,7 +4534,7 @@ static int macb_probe(struct platform_device *pdev) #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) { - dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44)); bp->hw_dma_cap |= HW_DMA_CAP_64B; } #endif diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c index d04a6c1634452034217e9dc4ab8771bf02899b86..da8d10475a08e6e23c466b6f62637f7c840e86bd 100644 --- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c +++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c @@ -32,6 +32,7 @@ #include #include +#include #include #include @@ -99,7 +100,7 @@ cxgb_find_route(struct cxgb4_lld_info *lldi, rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip, peer_port, local_port, IPPROTO_TCP, - tos, 0); + tos & ~INET_ECN_MASK, 0); if (IS_ERR(rt)) return NULL; n = dst_neigh_lookup(&rt->dst, &peer_ip); diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 8df6f081f2447fbb3cdb8ddd88c14db01bb3c9bb..d11fcfd927c0b5633928e5c5f1babe352253a244 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -305,21 +305,21 @@ static void gmac_speed_set(struct net_device *netdev) switch (phydev->speed) { case 1000: status.bits.speed = GMAC_SPEED_1000; - if (phydev->interface == PHY_INTERFACE_MODE_RGMII) + if (phy_interface_mode_is_rgmii(phydev->interface)) status.bits.mii_rmii = GMAC_PHY_RGMII_1000; netdev_dbg(netdev, "connect %s to RGMII @ 1Gbit\n", phydev_name(phydev)); break; case 100: status.bits.speed = GMAC_SPEED_100; - if (phydev->interface == PHY_INTERFACE_MODE_RGMII) + if (phy_interface_mode_is_rgmii(phydev->interface)) status.bits.mii_rmii = GMAC_PHY_RGMII_100_10; netdev_dbg(netdev, "connect %s to RGMII @ 100 Mbit\n", phydev_name(phydev)); break; case 10: status.bits.speed = GMAC_SPEED_10; - if (phydev->interface == PHY_INTERFACE_MODE_RGMII) + if (phy_interface_mode_is_rgmii(phydev->interface)) status.bits.mii_rmii = GMAC_PHY_RGMII_100_10; netdev_dbg(netdev, "connect %s to RGMII @ 10 Mbit\n", phydev_name(phydev)); @@ -389,6 +389,9 @@ static int gmac_setup_phy(struct net_device *netdev) status.bits.mii_rmii = GMAC_PHY_GMII; break; case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_TXID: + case PHY_INTERFACE_MODE_RGMII_RXID: netdev_dbg(netdev, "RGMII: set GMAC0 and GMAC1 to MII/RGMII mode\n"); status.bits.mii_rmii = GMAC_PHY_RGMII_100_10; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index f06d88c471d0f0c8f8fd7e526f1a7423fd55f5a6..d89ddc165ec24f757aca4be47e8bfcfdd2803b57 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -4225,7 +4225,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) } INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp); - + mutex_init(&priv->onestep_tstamp_lock); skb_queue_head_init(&priv->tx_skbs); /* Obtain a MC portal */ @@ -4405,12 +4405,12 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) #ifdef CONFIG_DEBUG_FS dpaa2_dbg_remove(priv); #endif + + unregister_netdev(net_dev); rtnl_lock(); dpaa2_eth_disconnect_mac(priv); rtnl_unlock(); - unregister_netdev(net_dev); - dpaa2_eth_dl_port_del(priv); dpaa2_eth_dl_traps_unregister(priv); dpaa2_eth_dl_unregister(priv); diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 901749a7a318b7e001c7b31c9c6e41707588bb69..6eeccc11b76efc4381aa38b83461e5410ff957c9 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -94,14 +94,17 @@ static void mac_exception(void *handle, enum fman_mac_exceptions ex) __func__, ex); } -static void set_fman_mac_params(struct mac_device *mac_dev, - struct fman_mac_params *params) +static int set_fman_mac_params(struct mac_device *mac_dev, + struct fman_mac_params *params) { struct mac_priv_s *priv = mac_dev->priv; params->base_addr = (typeof(params->base_addr)) devm_ioremap(priv->dev, mac_dev->res->start, resource_size(mac_dev->res)); + if (!params->base_addr) + return -ENOMEM; + memcpy(¶ms->addr, mac_dev->addr, sizeof(mac_dev->addr)); params->max_speed = priv->max_speed; params->phy_if = mac_dev->phy_if; @@ -112,6 +115,8 @@ static void set_fman_mac_params(struct mac_device *mac_dev, params->event_cb = mac_exception; params->dev_id = mac_dev; params->internal_phy_node = priv->internal_phy_node; + + return 0; } static int tgec_initialization(struct mac_device *mac_dev) @@ -123,7 +128,9 @@ static int tgec_initialization(struct mac_device *mac_dev) priv = mac_dev->priv; - set_fman_mac_params(mac_dev, ¶ms); + err = set_fman_mac_params(mac_dev, ¶ms); + if (err) + goto _return; mac_dev->fman_mac = tgec_config(¶ms); if (!mac_dev->fman_mac) { @@ -169,7 +176,9 @@ static int dtsec_initialization(struct mac_device *mac_dev) priv = mac_dev->priv; - set_fman_mac_params(mac_dev, ¶ms); + err = set_fman_mac_params(mac_dev, ¶ms); + if (err) + goto _return; mac_dev->fman_mac = dtsec_config(¶ms); if (!mac_dev->fman_mac) { @@ -218,7 +227,9 @@ static int memac_initialization(struct mac_device *mac_dev) priv = mac_dev->priv; - set_fman_mac_params(mac_dev, ¶ms); + err = set_fman_mac_params(mac_dev, ¶ms); + if (err) + goto _return; if (priv->max_speed == SPEED_10000) params.phy_if = PHY_INTERFACE_MODE_XGMII; diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c index bfa2826c55454db2586f8dfde2799989db243a46..b7984a772e12d61916d0a8d4a8513631e82a2c5d 100644 --- a/drivers/net/ethernet/freescale/xgmac_mdio.c +++ b/drivers/net/ethernet/freescale/xgmac_mdio.c @@ -49,6 +49,7 @@ struct tgec_mdio_controller { struct mdio_fsl_priv { struct tgec_mdio_controller __iomem *mdio_base; bool is_little_endian; + bool has_a009885; bool has_a011043; }; @@ -184,10 +185,10 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum) { struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv; struct tgec_mdio_controller __iomem *regs = priv->mdio_base; + unsigned long flags; uint16_t dev_addr; uint32_t mdio_stat; uint32_t mdio_ctl; - uint16_t value; int ret; bool endian = priv->is_little_endian; @@ -219,12 +220,18 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum) return ret; } + if (priv->has_a009885) + /* Once the operation completes, i.e. MDIO_STAT_BSY clears, we + * must read back the data register within 16 MDC cycles. + */ + local_irq_save(flags); + /* Initiate the read */ xgmac_write32(mdio_ctl | MDIO_CTL_READ, ®s->mdio_ctl, endian); ret = xgmac_wait_until_done(&bus->dev, regs, endian); if (ret) - return ret; + goto irq_restore; /* Return all Fs if nothing was there */ if ((xgmac_read32(®s->mdio_stat, endian) & MDIO_STAT_RD_ER) && @@ -232,13 +239,17 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum) dev_dbg(&bus->dev, "Error while reading PHY%d reg at %d.%hhu\n", phy_id, dev_addr, regnum); - return 0xffff; + ret = 0xffff; + } else { + ret = xgmac_read32(®s->mdio_data, endian) & 0xffff; + dev_dbg(&bus->dev, "read %04x\n", ret); } - value = xgmac_read32(®s->mdio_data, endian) & 0xffff; - dev_dbg(&bus->dev, "read %04x\n", value); +irq_restore: + if (priv->has_a009885) + local_irq_restore(flags); - return value; + return ret; } static int xgmac_mdio_probe(struct platform_device *pdev) @@ -282,6 +293,8 @@ static int xgmac_mdio_probe(struct platform_device *pdev) priv->is_little_endian = device_property_read_bool(&pdev->dev, "little-endian"); + priv->has_a009885 = device_property_read_bool(&pdev->dev, + "fsl,erratum-a009885"); priv->has_a011043 = device_property_read_bool(&pdev->dev, "fsl,erratum-a011043"); @@ -307,9 +320,10 @@ err_ioremap: static int xgmac_mdio_remove(struct platform_device *pdev) { struct mii_bus *bus = platform_get_drvdata(pdev); + struct mdio_fsl_priv *priv = bus->priv; mdiobus_unregister(bus); - iounmap(bus->priv); + iounmap(priv->mdio_base); mdiobus_free(bus); return 0; diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c index 6009d76e41fc4f26e6b3481aa3d533d76441de0e..67f2b9a61463ad38ef806f406b7bbd9fed6bb596 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.c +++ b/drivers/net/ethernet/google/gve/gve_adminq.c @@ -141,7 +141,7 @@ static int gve_adminq_parse_err(struct gve_priv *priv, u32 status) */ static int gve_adminq_kick_and_wait(struct gve_priv *priv) { - u32 tail, head; + int tail, head; int i; tail = ioread32be(&priv->reg_bar0->adminq_event_counter); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 6e7da1dc2e8c30bd8593393d20d19339152c24af..d6580e942724d9214bec07a7359dab6d99d64d79 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -2382,8 +2382,7 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) break; } - if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) - hclgevf_enable_vector(&hdev->misc_vector, true); + hclgevf_enable_vector(&hdev->misc_vector, true); return IRQ_HANDLED; } diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c index 27937c5d795673a53d79aee82a91898732102d52..daec9ce04531be2531a24d5b44bc83ad5dae3a2c 100644 --- a/drivers/net/ethernet/i825xx/sni_82596.c +++ b/drivers/net/ethernet/i825xx/sni_82596.c @@ -117,9 +117,10 @@ static int sni_82596_probe(struct platform_device *dev) netdevice->dev_addr[5] = readb(eth_addr + 0x06); iounmap(eth_addr); - if (!netdevice->irq) { + if (netdevice->irq < 0) { printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n", __FILE__, netdevice->base_addr); + retval = netdevice->irq; goto probe_failed; } diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 4f99d97638248c0d32cecc7b1cb14f837cdcef45..c7be7ab131b197dc34eaf26562c34a643f0bc2be 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -3401,11 +3401,25 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry) struct device *dev = &adapter->vdev->dev; union ibmvnic_crq crq; int max_entries; + int cap_reqs; + + /* We send out 6 or 7 REQUEST_CAPABILITY CRQs below (depending on + * the PROMISC flag). Initialize this count upfront. When the tasklet + * receives a response to all of these, it will send the next protocol + * message (QUERY_IP_OFFLOAD). + */ + if (!(adapter->netdev->flags & IFF_PROMISC) || + adapter->promisc_supported) + cap_reqs = 7; + else + cap_reqs = 6; if (!retry) { /* Sub-CRQ entries are 32 byte long */ int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4); + atomic_set(&adapter->running_cap_crqs, cap_reqs); + if (adapter->min_tx_entries_per_subcrq > entries_page || adapter->min_rx_add_entries_per_subcrq > entries_page) { dev_err(dev, "Fatal, invalid entries per sub-crq\n"); @@ -3466,44 +3480,45 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry) adapter->opt_rx_comp_queues; adapter->req_rx_add_queues = adapter->max_rx_add_queues; + } else { + atomic_add(cap_reqs, &adapter->running_cap_crqs); } - memset(&crq, 0, sizeof(crq)); crq.request_capability.first = IBMVNIC_CRQ_CMD; crq.request_capability.cmd = REQUEST_CAPABILITY; crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES); crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues); - atomic_inc(&adapter->running_cap_crqs); + cap_reqs--; ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES); crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues); - atomic_inc(&adapter->running_cap_crqs); + cap_reqs--; ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES); crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues); - atomic_inc(&adapter->running_cap_crqs); + cap_reqs--; ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ); crq.request_capability.number = cpu_to_be64(adapter->req_tx_entries_per_subcrq); - atomic_inc(&adapter->running_cap_crqs); + cap_reqs--; ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ); crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_entries_per_subcrq); - atomic_inc(&adapter->running_cap_crqs); + cap_reqs--; ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_MTU); crq.request_capability.number = cpu_to_be64(adapter->req_mtu); - atomic_inc(&adapter->running_cap_crqs); + cap_reqs--; ibmvnic_send_crq(adapter, &crq); if (adapter->netdev->flags & IFF_PROMISC) { @@ -3511,16 +3526,21 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry) crq.request_capability.capability = cpu_to_be16(PROMISC_REQUESTED); crq.request_capability.number = cpu_to_be64(1); - atomic_inc(&adapter->running_cap_crqs); + cap_reqs--; ibmvnic_send_crq(adapter, &crq); } } else { crq.request_capability.capability = cpu_to_be16(PROMISC_REQUESTED); crq.request_capability.number = cpu_to_be64(0); - atomic_inc(&adapter->running_cap_crqs); + cap_reqs--; ibmvnic_send_crq(adapter, &crq); } + + /* Keep at end to catch any discrepancy between expected and actual + * CRQs sent. + */ + WARN_ON(cap_reqs != 0); } static int pending_scrq(struct ibmvnic_adapter *adapter, @@ -3953,118 +3973,132 @@ static void send_query_map(struct ibmvnic_adapter *adapter) static void send_query_cap(struct ibmvnic_adapter *adapter) { union ibmvnic_crq crq; + int cap_reqs; + + /* We send out 25 QUERY_CAPABILITY CRQs below. Initialize this count + * upfront. When the tasklet receives a response to all of these, it + * can send out the next protocol messaage (REQUEST_CAPABILITY). + */ + cap_reqs = 25; + + atomic_set(&adapter->running_cap_crqs, cap_reqs); - atomic_set(&adapter->running_cap_crqs, 0); memset(&crq, 0, sizeof(crq)); crq.query_capability.first = IBMVNIC_CRQ_CMD; crq.query_capability.cmd = QUERY_CAPABILITY; crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(MIN_MTU); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(MAX_MTU); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ); - atomic_inc(&adapter->running_cap_crqs); + ibmvnic_send_crq(adapter, &crq); + cap_reqs--; + + /* Keep at end to catch any discrepancy between expected and actual + * CRQs sent. + */ + WARN_ON(cap_reqs != 0); } static void send_query_ip_offload(struct ibmvnic_adapter *adapter) @@ -4369,6 +4403,8 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq, char *name; atomic_dec(&adapter->running_cap_crqs); + netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n", + atomic_read(&adapter->running_cap_crqs)); switch (be16_to_cpu(crq->request_capability_rsp.capability)) { case REQ_TX_QUEUES: req_value = &adapter->req_tx_queues; @@ -5039,12 +5075,6 @@ static void ibmvnic_tasklet(struct tasklet_struct *t) ibmvnic_handle_crq(crq, adapter); crq->generic.first = 0; } - - /* remain in tasklet until all - * capabilities responses are received - */ - if (!adapter->wait_capability) - done = true; } /* if capabilities CRQ's were sent in this tasklet, the following * tasklet must wait until all responses are received diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 5b83d1bc0e74d38ea1c1c7ccd6c2214bda402a39..effdc3361266f11932d50925d35bd0ecd93dd7b4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -172,7 +172,6 @@ enum i40e_interrupt_policy { struct i40e_lump_tracking { u16 num_entries; - u16 search_hint; u16 list[0]; #define I40E_PILE_VALID_BIT 0x8000 #define I40E_IWARP_IRQ_PILE_ID (I40E_PILE_VALID_BIT - 2) @@ -755,12 +754,12 @@ struct i40e_vsi { struct rtnl_link_stats64 net_stats_offsets; struct i40e_eth_stats eth_stats; struct i40e_eth_stats eth_stats_offsets; - u32 tx_restart; - u32 tx_busy; + u64 tx_restart; + u64 tx_busy; u64 tx_linearize; u64 tx_force_wb; - u32 rx_buf_failed; - u32 rx_page_failed; + u64 rx_buf_failed; + u64 rx_page_failed; /* These are containers of ring pointers, allocated at run-time */ struct i40e_ring **rx_rings; diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 714b578b2b49c5948b476319fb80e7b269d28bd3..1114a15a9ce3c88783e17e84389d19d8916cb8b5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -240,7 +240,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) (unsigned long int)vsi->net_stats_offsets.rx_compressed, (unsigned long int)vsi->net_stats_offsets.tx_compressed); dev_info(&pf->pdev->dev, - " tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n", + " tx_restart = %llu, tx_busy = %llu, rx_buf_failed = %llu, rx_page_failed = %llu\n", vsi->tx_restart, vsi->tx_busy, vsi->rx_buf_failed, vsi->rx_page_failed); rcu_read_lock(); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index f888a443a067bcbaa773f2aef770aca78336b911..bd18a780a0008de16a0f70428654389f2634734c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -195,10 +195,6 @@ int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem) * @id: an owner id to stick on the items assigned * * Returns the base item index of the lump, or negative for error - * - * The search_hint trick and lack of advanced fit-finding only work - * because we're highly likely to have all the same size lump requests. - * Linear search time and any fragmentation should be minimal. **/ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, u16 needed, u16 id) @@ -213,8 +209,21 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, return -EINVAL; } - /* start the linear search with an imperfect hint */ - i = pile->search_hint; + /* Allocate last queue in the pile for FDIR VSI queue + * so it doesn't fragment the qp_pile + */ + if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) { + if (pile->list[pile->num_entries - 1] & I40E_PILE_VALID_BIT) { + dev_err(&pf->pdev->dev, + "Cannot allocate queue %d for I40E_VSI_FDIR\n", + pile->num_entries - 1); + return -ENOMEM; + } + pile->list[pile->num_entries - 1] = id | I40E_PILE_VALID_BIT; + return pile->num_entries - 1; + } + + i = 0; while (i < pile->num_entries) { /* skip already allocated entries */ if (pile->list[i] & I40E_PILE_VALID_BIT) { @@ -233,7 +242,6 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, for (j = 0; j < needed; j++) pile->list[i+j] = id | I40E_PILE_VALID_BIT; ret = i; - pile->search_hint = i + j; break; } @@ -256,7 +264,7 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id) { int valid_id = (id | I40E_PILE_VALID_BIT); int count = 0; - int i; + u16 i; if (!pile || index >= pile->num_entries) return -EINVAL; @@ -268,8 +276,6 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id) count++; } - if (count && index < pile->search_hint) - pile->search_hint = index; return count; } @@ -771,9 +777,9 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) struct rtnl_link_stats64 *ns; /* netdev stats */ struct i40e_eth_stats *oes; struct i40e_eth_stats *es; /* device's eth stats */ - u32 tx_restart, tx_busy; + u64 tx_restart, tx_busy; struct i40e_ring *p; - u32 rx_page, rx_buf; + u64 rx_page, rx_buf; u64 bytes, packets; unsigned int start; u64 tx_linearize; @@ -10130,15 +10136,9 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) } i40e_get_oem_version(&pf->hw); - if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) && - ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) || - hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) { - /* The following delay is necessary for 4.33 firmware and older - * to recover after EMP reset. 200 ms should suffice but we - * put here 300 ms to be sure that FW is ready to operate - * after reset. - */ - mdelay(300); + if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) { + /* The following delay is necessary for firmware update. */ + mdelay(1000); } /* re-verify the eeprom if we just had an EMP reset */ @@ -11327,7 +11327,6 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf) return -ENOMEM; pf->irq_pile->num_entries = vectors; - pf->irq_pile->search_hint = 0; /* track first vector for misc interrupts, ignore return */ (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1); @@ -12130,7 +12129,6 @@ static int i40e_sw_init(struct i40e_pf *pf) goto sw_init_done; } pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp; - pf->qp_pile->search_hint = 0; pf->tx_timeout_recovery_level = 1; diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h index 564df22f3f4639263b94bf7cd0374dbc7961cb85..8335f151ceefc9a0c9ee94d46fcdcfaabf7713a3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_register.h +++ b/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -279,6 +279,9 @@ #define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */ #define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1 #define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT) +#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30 +#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT) +#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0 #define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11 diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 65c4c4fd359fa62d636dfc27089d7508b27be79d..f71b7334e295582471e773ac66ceff24e012c794 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1323,6 +1323,32 @@ static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf, return aq_ret; } +/** + * i40e_sync_vfr_reset + * @hw: pointer to hw struct + * @vf_id: VF identifier + * + * Before trigger hardware reset, we need to know if no other process has + * reserved the hardware for any reset operations. This check is done by + * examining the status of the RSTAT1 register used to signal the reset. + **/ +static int i40e_sync_vfr_reset(struct i40e_hw *hw, int vf_id) +{ + u32 reg; + int i; + + for (i = 0; i < I40E_VFR_WAIT_COUNT; i++) { + reg = rd32(hw, I40E_VFINT_ICR0_ENA(vf_id)) & + I40E_VFINT_ICR0_ADMINQ_MASK; + if (reg) + return 0; + + usleep_range(100, 200); + } + + return -EAGAIN; +} + /** * i40e_trigger_vf_reset * @vf: pointer to the VF structure @@ -1337,9 +1363,11 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr) struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; u32 reg, reg_idx, bit_idx; + bool vf_active; + u32 radq; /* warn the VF */ - clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); + vf_active = test_and_clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); /* Disable VF's configuration API during reset. The flag is re-enabled * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI. @@ -1353,7 +1381,19 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr) * just need to clean up, so don't hit the VFRTRIG register. */ if (!flr) { - /* reset VF using VPGEN_VFRTRIG reg */ + /* Sync VFR reset before trigger next one */ + radq = rd32(hw, I40E_VFINT_ICR0_ENA(vf->vf_id)) & + I40E_VFINT_ICR0_ADMINQ_MASK; + if (vf_active && !radq) + /* waiting for finish reset by virtual driver */ + if (i40e_sync_vfr_reset(hw, vf->vf_id)) + dev_info(&pf->pdev->dev, + "Reset VF %d never finished\n", + vf->vf_id); + + /* Reset VF using VPGEN_VFRTRIG reg. It is also setting + * in progress state in rstat1 register. + */ reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); @@ -2563,6 +2603,59 @@ error_param: aq_ret); } +/** + * i40e_check_enough_queue - find big enough queue number + * @vf: pointer to the VF info + * @needed: the number of items needed + * + * Returns the base item index of the queue, or negative for error + **/ +static int i40e_check_enough_queue(struct i40e_vf *vf, u16 needed) +{ + unsigned int i, cur_queues, more, pool_size; + struct i40e_lump_tracking *pile; + struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi; + + vsi = pf->vsi[vf->lan_vsi_idx]; + cur_queues = vsi->alloc_queue_pairs; + + /* if current allocated queues are enough for need */ + if (cur_queues >= needed) + return vsi->base_queue; + + pile = pf->qp_pile; + if (cur_queues > 0) { + /* if the allocated queues are not zero + * just check if there are enough queues for more + * behind the allocated queues. + */ + more = needed - cur_queues; + for (i = vsi->base_queue + cur_queues; + i < pile->num_entries; i++) { + if (pile->list[i] & I40E_PILE_VALID_BIT) + break; + + if (more-- == 1) + /* there is enough */ + return vsi->base_queue; + } + } + + pool_size = 0; + for (i = 0; i < pile->num_entries; i++) { + if (pile->list[i] & I40E_PILE_VALID_BIT) { + pool_size = 0; + continue; + } + if (needed <= ++pool_size) + /* there is enough */ + return i; + } + + return -ENOMEM; +} + /** * i40e_vc_request_queues_msg * @vf: pointer to the VF info @@ -2597,6 +2690,12 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg) req_pairs - cur_pairs, pf->queues_left); vfres->num_queue_pairs = pf->queues_left + cur_pairs; + } else if (i40e_check_enough_queue(vf, req_pairs) < 0) { + dev_warn(&pf->pdev->dev, + "VF %d requested %d more queues, but there is not enough for it.\n", + vf->vf_id, + req_pairs - cur_pairs); + vfres->num_queue_pairs = cur_pairs; } else { /* successful request */ vf->num_req_queues = req_pairs; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 49575a640a84c5e6c3abeaaae05427818b06fd87..03c42fd0fea19326916d302d67a7585456dc0a7c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -19,6 +19,7 @@ #define I40E_MAX_VF_PROMISC_FLAGS 3 #define I40E_VF_STATE_WAIT_COUNT 20 +#define I40E_VFR_WAIT_COUNT 100 /* Various queue ctrls */ enum i40e_queue_ctrl { diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 64714757bd4f4f790c9d7ffb47a57086927ea9ce..2b0d0373ab2c64d6261a2f622537d63efaf842b0 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -3032,7 +3032,8 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw)) { struct ice_link_default_override_tlv tlv; - if (ice_get_link_default_override(&tlv, pi)) + status = ice_get_link_default_override(&tlv, pi); + if (status) goto out; if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) && diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index c0ee0541e53fc14c92ef083b6accea03bc156d49..847e1ef8e10641a13c835eca849a4b7a4f1af679 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -507,6 +507,7 @@ struct ice_tx_ctx_desc { (0x3FFFFULL << ICE_TXD_CTX_QW1_TSO_LEN_S) #define ICE_TXD_CTX_QW1_MSS_S 50 +#define ICE_TXD_CTX_MIN_MSS 64 enum ice_tx_ctx_desc_cmd_bits { ICE_TX_CTX_DESC_TSO = 0x01, diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 4c7d1720113a0a939061f7a959b05ce7ed845ad6..fb4656902634c624d3b24855a2a5767d6b9482a0 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -6787,6 +6787,7 @@ ice_features_check(struct sk_buff *skb, struct net_device __always_unused *netdev, netdev_features_t features) { + bool gso = skb_is_gso(skb); size_t len; /* No point in doing any of this if neither checksum nor GSO are @@ -6799,24 +6800,32 @@ ice_features_check(struct sk_buff *skb, /* We cannot support GSO if the MSS is going to be less than * 64 bytes. If it is then we need to drop support for GSO. */ - if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) + if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS)) features &= ~NETIF_F_GSO_MASK; - len = skb_network_header(skb) - skb->data; + len = skb_network_offset(skb); if (len > ICE_TXD_MACLEN_MAX || len & 0x1) goto out_rm_features; - len = skb_transport_header(skb) - skb_network_header(skb); + len = skb_network_header_len(skb); if (len > ICE_TXD_IPLEN_MAX || len & 0x1) goto out_rm_features; if (skb->encapsulation) { - len = skb_inner_network_header(skb) - skb_transport_header(skb); - if (len > ICE_TXD_L4LEN_MAX || len & 0x1) - goto out_rm_features; + /* this must work for VXLAN frames AND IPIP/SIT frames, and in + * the case of IPIP frames, the transport header pointer is + * after the inner header! So check to make sure that this + * is a GRE or UDP_TUNNEL frame before doing that math. + */ + if (gso && (skb_shinfo(skb)->gso_type & + (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) { + len = skb_inner_network_header(skb) - + skb_transport_header(skb); + if (len > ICE_TXD_L4LEN_MAX || len & 0x1) + goto out_rm_features; + } - len = skb_inner_transport_header(skb) - - skb_inner_network_header(skb); + len = skb_inner_network_header_len(skb); if (len > ICE_TXD_IPLEN_MAX || len & 0x1) goto out_rm_features; } diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index a7d0a459969a2ec7c77a58597ec931c87ce94436..2d6ac61d7a3e62fe3d8a0b6355eac8d9c3923895 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1992,14 +1992,15 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter, if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX) return; - set_ring_build_skb_enabled(rx_ring); + if (PAGE_SIZE < 8192) + if (max_frame > IXGBEVF_MAX_FRAME_BUILD_SKB) + set_ring_uses_large_buffer(rx_ring); - if (PAGE_SIZE < 8192) { - if (max_frame <= IXGBEVF_MAX_FRAME_BUILD_SKB) - return; + /* 82599 can't rely on RXDCTL.RLPML to restrict the size of the frame */ + if (adapter->hw.mac.type == ixgbe_mac_82599_vf && !ring_uses_large_buffer(rx_ring)) + return; - set_ring_uses_large_buffer(rx_ring); - } + set_ring_build_skb_enabled(rx_ring); } /** diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 044a5b1196acbc2515b788cedeb4d16dbee79879..161174be51c31932b99af4725ca63fb2b67f9502 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -386,7 +386,12 @@ static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf, dst_mdev->msg_size = mbox_hdr->msg_size; dst_mdev->num_msgs = num_msgs; err = otx2_sync_mbox_msg(dst_mbox); - if (err) { + /* Error code -EIO indicate there is a communication failure + * to the AF. Rest of the error codes indicate that AF processed + * VF messages and set the error codes in response messages + * (if any) so simply forward responses to VF. + */ + if (err == -EIO) { dev_warn(pf->dev, "AF not responding to VF%d messages\n", vf); /* restore PF mbase and exit */ diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index a2d3f04a9ff222fe4ebccd4607873e39ae0ca33d..7d7dc0754a3a1335df5458fb50b15344c274d600 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -215,7 +215,7 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode, phylink_config); struct mtk_eth *eth = mac->hw; u32 mcr_cur, mcr_new, sid, i; - int val, ge_mode, err; + int val, ge_mode, err = 0; /* MT76x8 has no hardware settings between for the MAC */ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) && diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 2e55e0088871540e1f139ab970660e5f184f8ec1..6af0dd847169177ef5c9ca52e04dc0b38c2f2b65 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -147,8 +147,12 @@ static void cmd_ent_put(struct mlx5_cmd_work_ent *ent) if (!refcount_dec_and_test(&ent->refcnt)) return; - if (ent->idx >= 0) - cmd_free_index(ent->cmd, ent->idx); + if (ent->idx >= 0) { + struct mlx5_cmd *cmd = ent->cmd; + + cmd_free_index(cmd, ent->idx); + up(ent->page_queue ? &cmd->pages_sem : &cmd->sem); + } cmd_free_ent(ent); } @@ -883,25 +887,6 @@ static bool opcode_allowed(struct mlx5_cmd *cmd, u16 opcode) return cmd->allowed_opcode == opcode; } -static int cmd_alloc_index_retry(struct mlx5_cmd *cmd) -{ - unsigned long alloc_end = jiffies + msecs_to_jiffies(1000); - int idx; - -retry: - idx = cmd_alloc_index(cmd); - if (idx < 0 && time_before(jiffies, alloc_end)) { - /* Index allocation can fail on heavy load of commands. This is a temporary - * situation as the current command already holds the semaphore, meaning that - * another command completion is being handled and it is expected to release - * the entry index soon. - */ - cpu_relax(); - goto retry; - } - return idx; -} - bool mlx5_cmd_is_down(struct mlx5_core_dev *dev) { return pci_channel_offline(dev->pdev) || @@ -926,7 +911,7 @@ static void cmd_work_handler(struct work_struct *work) sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; down(sem); if (!ent->page_queue) { - alloc_ret = cmd_alloc_index_retry(cmd); + alloc_ret = cmd_alloc_index(cmd); if (alloc_ret < 0) { mlx5_core_err_rl(dev, "failed to allocate command entry\n"); if (ent->callback) { @@ -1582,8 +1567,6 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force vector = vec & 0xffffffff; for (i = 0; i < (1 << cmd->log_sz); i++) { if (test_bit(i, &vector)) { - struct semaphore *sem; - ent = cmd->ent_arr[i]; /* if we already completed the command, ignore it */ @@ -1606,10 +1589,6 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) cmd_ent_put(ent); - if (ent->page_queue) - sem = &cmd->pages_sem; - else - sem = &cmd->sem; ent->ts2 = ktime_get_ns(); memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out)); dump_command(dev, ent, 0); @@ -1663,7 +1642,6 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force */ complete(&ent->done); } - up(sem); } } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c index 9c076aa20306a7d2bd116b85dfac000bcafaa3f0..b6f5c1bcdbcd493f4ec4ea9bae6eb904454ccce5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c @@ -183,18 +183,7 @@ void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw, static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev) { - struct mlx5e_rep_priv *rpriv; - struct mlx5e_priv *priv; - - /* A given netdev is not a representor or not a slave of LAG configuration */ - if (!mlx5e_eswitch_rep(netdev) || !netif_is_lag_port(netdev)) - return false; - - priv = netdev_priv(netdev); - rpriv = priv->ppriv; - - /* Egress acl forward to vport is supported only non-uplink representor */ - return rpriv->rep->vport != MLX5_VPORT_UPLINK; + return netif_is_lag_port(netdev) && mlx5e_eswitch_vf_rep(netdev); } static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *ptr) @@ -210,9 +199,6 @@ static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *pt u16 fwd_vport_num; int err; - if (!mlx5e_rep_is_lag_netdev(netdev)) - return; - info = ptr; lag_info = info->lower_state_info; /* This is not an event of a representor becoming active slave */ @@ -266,9 +252,6 @@ static void mlx5e_rep_changeupper_event(struct net_device *netdev, void *ptr) struct net_device *lag_dev; struct mlx5e_priv *priv; - if (!mlx5e_rep_is_lag_netdev(netdev)) - return; - priv = netdev_priv(netdev); rpriv = priv->ppriv; lag_dev = info->upper_dev; @@ -293,6 +276,19 @@ static int mlx5e_rep_esw_bond_netevent(struct notifier_block *nb, unsigned long event, void *ptr) { struct net_device *netdev = netdev_notifier_info_to_dev(ptr); + struct mlx5e_rep_priv *rpriv; + struct mlx5e_rep_bond *bond; + struct mlx5e_priv *priv; + + if (!mlx5e_rep_is_lag_netdev(netdev)) + return NOTIFY_DONE; + + bond = container_of(nb, struct mlx5e_rep_bond, nb); + priv = netdev_priv(netdev); + rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch, REP_ETH); + /* Verify VF representor is on the same device of the bond handling the netevent. */ + if (rpriv->uplink_priv.bond != bond) + return NOTIFY_DONE; switch (event) { case NETDEV_CHANGELOWERSTATE: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c index 71e8d66fa1509434f300c4447036fce13f281666..6692bc8333f73fa1723d0b2deede69a9dcb2889e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c @@ -11,13 +11,13 @@ static int mlx5e_xsk_map_pool(struct mlx5e_priv *priv, { struct device *dev = mlx5_core_dma_dev(priv->mdev); - return xsk_pool_dma_map(pool, dev, 0); + return xsk_pool_dma_map(pool, dev, DMA_ATTR_SKIP_CPU_SYNC); } static void mlx5e_xsk_unmap_pool(struct mlx5e_priv *priv, struct xsk_buff_pool *pool) { - return xsk_pool_dma_unmap(pool, 0); + return xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC); } static int mlx5e_xsk_get_pools(struct mlx5e_xsk *xsk) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index bc7c1962f9e66565ee186da96fe09b82f971c832..6a1b1363ac16a8d0049ade5da6ad5ccf8a955221 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1746,7 +1746,7 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev, if (size_read < 0) { netdev_err(priv->netdev, "%s: mlx5_query_eeprom failed:0x%x\n", __func__, size_read); - return 0; + return size_read; } i += size_read; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 2f6c3a5813ed18820ab3c620959194f41b140f5b..16e98ac47624c7c7496f8418c1db082c6f5d7add 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -5024,9 +5024,13 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) } if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)) { - netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; - netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; - netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL; + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; + netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; } if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_GRE)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 117a5934145372db73f3de0a2ecfd5b733e7f764..b8637547800f98042433b588f1a1f7c7a935542f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -276,8 +276,8 @@ static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq, if (unlikely(!dma_info->page)) return -ENOMEM; - dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0, - PAGE_SIZE, rq->buff.map_dir); + dma_info->addr = dma_map_page_attrs(rq->pdev, dma_info->page, 0, PAGE_SIZE, + rq->buff.map_dir, DMA_ATTR_SKIP_CPU_SYNC); if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) { page_pool_recycle_direct(rq->page_pool, dma_info->page); dma_info->page = NULL; @@ -298,7 +298,8 @@ static inline int mlx5e_page_alloc(struct mlx5e_rq *rq, void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info) { - dma_unmap_page(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir); + dma_unmap_page_attrs(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir, + DMA_ATTR_SKIP_CPU_SYNC); } void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, @@ -984,7 +985,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, } /* True when explicitly set via priv flag, or XDP prog is loaded */ - if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)) + if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state) || + get_cqe_tls_offload(cqe)) goto csum_unnecessary; /* CQE csum doesn't cover padding octets in short ethernet diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index e06b1ba7d23498bd72cb7e895e2365d4fe2c4b14..ccc7dd3e738a48ede918113f769a0bed7b84300b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -2037,10 +2037,6 @@ esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw) if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source)) return false; - if (mlx5_core_is_ecpf_esw_manager(esw->dev) || - mlx5_ecpf_vport_exists(esw->dev)) - return false; - return true; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 0ff034b0866e2e9da7334fee35531f7f9a1830a3..55772f0cbbf8fef911b7e233605b0bd9564fd286 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -2034,6 +2034,8 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle) fte->node.del_hw_func = NULL; up_write_ref_node(&fte->node, false); tree_put_node(&fte->node, false); + } else { + up_write_ref_node(&fte->node, false); } kfree(handle); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c index ee710ce0079504466f34fd9910c481a0537fadd9..9b472e793ee361acf907286584842d993c8db075 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c @@ -131,7 +131,7 @@ static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev) { struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; - del_timer(&fw_reset->timer); + del_timer_sync(&fw_reset->timer); } static void mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c index 15c3a9058e72876fda3ac796fa78bf98dbc42255..0f0d250bbc1508f05661267a084faf136541ae8e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c @@ -265,10 +265,8 @@ static int mlx5_lag_fib_event(struct notifier_block *nb, fen_info = container_of(info, struct fib_entry_notifier_info, info); fi = fen_info->fi; - if (fi->nh) { - NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported"); - return notifier_from_errno(-EINVAL); - } + if (fi->nh) + return NOTIFY_DONE; fib_dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev; if (fib_dev != ldev->pf[MLX5_LAG_P1].netdev && fib_dev != ldev->pf[MLX5_LAG_P2].netdev) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c index 947f346bdc2d6ef373b6d1fd7ee1a824b84934fc..77c6287c90d55063cb61b49d8c2b298312f27bc3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c @@ -292,7 +292,7 @@ static int create_chain_restore(struct fs_chain *chain) { struct mlx5_eswitch *esw = chain->chains->dev->priv.eswitch; - char modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)]; + u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; struct mlx5_fs_chains *chains = chain->chains; enum mlx5e_tc_attr_to_reg chain_to_reg; struct mlx5_modify_hdr *mod_hdr; diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h index 5ffdfb532cb7f9e4a16f1423283648e8f85bce1a..91f68fb0b420ae5eee729cf3bdb094b20fec4618 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h +++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h @@ -905,6 +905,18 @@ static inline int mlxsw_cmd_sw2hw_rdq(struct mlxsw_core *mlxsw_core, */ MLXSW_ITEM32(cmd_mbox, sw2hw_dq, cq, 0x00, 24, 8); +enum mlxsw_cmd_mbox_sw2hw_dq_sdq_lp { + MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_WQE, + MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_IGNORE_WQE, +}; + +/* cmd_mbox_sw2hw_dq_sdq_lp + * SDQ local Processing + * 0: local processing by wqe.lp + * 1: local processing (ignoring wqe.lp) + */ +MLXSW_ITEM32(cmd_mbox, sw2hw_dq, sdq_lp, 0x00, 23, 1); + /* cmd_mbox_sw2hw_dq_sdq_tclass * SDQ: CPU Egress TClass * RDQ: Reserved diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index ffaeda75eec4226d52f9baf32fe20556e4d5c6e7..dbb16ce25bdf32b92d8466adbd42801e33af17d8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -285,6 +285,7 @@ static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, struct mlxsw_pci_queue *q) { int tclass; + int lp; int i; int err; @@ -292,9 +293,12 @@ static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, q->consumer_counter = 0; tclass = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_PCI_SDQ_EMAD_TC : MLXSW_PCI_SDQ_CTL_TC; + lp = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_IGNORE_WQE : + MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_WQE; /* Set CQ of same number of this SDQ. */ mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num); + mlxsw_cmd_mbox_sw2hw_dq_sdq_lp_set(mbox, lp); mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, tclass); mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */ for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { @@ -1599,7 +1603,7 @@ static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb, wqe = elem_info->elem; mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */ - mlxsw_pci_wqe_lp_set(wqe, !!tx_info->is_emad); + mlxsw_pci_wqe_lp_set(wqe, 0); mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET); err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data, @@ -1900,6 +1904,7 @@ int mlxsw_pci_driver_register(struct pci_driver *pci_driver) { pci_driver->probe = mlxsw_pci_probe; pci_driver->remove = mlxsw_pci_remove; + pci_driver->shutdown = mlxsw_pci_remove; return pci_register_driver(pci_driver); } EXPORT_SYMBOL(mlxsw_pci_driver_register); diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 52401915828a1287d70edf567f01974117d69907..a06466ecca12a90055016c3e1f6adafbea314f42 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -848,12 +848,11 @@ void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data) } EXPORT_SYMBOL(ocelot_get_strings); +/* Caller must hold &ocelot->stats_lock */ static void ocelot_update_stats(struct ocelot *ocelot) { int i, j; - mutex_lock(&ocelot->stats_lock); - for (i = 0; i < ocelot->num_phys_ports; i++) { /* Configure the port to read the stats from */ ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(i), SYS_STAT_CFG); @@ -872,8 +871,6 @@ static void ocelot_update_stats(struct ocelot *ocelot) ~(u64)U32_MAX) + val; } } - - mutex_unlock(&ocelot->stats_lock); } static void ocelot_check_stats_work(struct work_struct *work) @@ -882,7 +879,9 @@ static void ocelot_check_stats_work(struct work_struct *work) struct ocelot *ocelot = container_of(del_work, struct ocelot, stats_work); + mutex_lock(&ocelot->stats_lock); ocelot_update_stats(ocelot); + mutex_unlock(&ocelot->stats_lock); queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work, OCELOT_STATS_CHECK_DELAY); @@ -892,12 +891,16 @@ void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data) { int i; + mutex_lock(&ocelot->stats_lock); + /* check and update now */ ocelot_update_stats(ocelot); /* Copy all counters */ for (i = 0; i < ocelot->num_stats; i++) *data++ = ocelot->stats[port * ocelot->num_stats + i]; + + mutex_unlock(&ocelot->stats_lock); } EXPORT_SYMBOL(ocelot_get_ethtool_stats); diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index 3655503352928d9a7d368003ad530df20c3b1015..217e8333de6c66154cbe0d6b0af078b3200083ac 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -462,13 +462,6 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress, return -EOPNOTSUPP; } - if (filter->block_id == VCAP_IS1 && - !is_zero_ether_addr(match.mask->dst)) { - NL_SET_ERR_MSG_MOD(extack, - "Key type S1_NORMAL cannot match on destination MAC"); - return -EOPNOTSUPP; - } - /* The hw support mac matches only for MAC_ETYPE key, * therefore if other matches(port, tcp flags, etc) are added * then just bail out @@ -483,6 +476,14 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress, return -EOPNOTSUPP; flow_rule_match_eth_addrs(rule, &match); + + if (filter->block_id == VCAP_IS1 && + !is_zero_ether_addr(match.mask->dst)) { + NL_SET_ERR_MSG_MOD(extack, + "Key type S1_NORMAL cannot match on destination MAC"); + return -EOPNOTSUPP; + } + filter->key_type = OCELOT_VCAP_KEY_ETYPE; ether_addr_copy(filter->key.etype.dmac.value, match.key->dst); diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index d19c02e991145bed556f3500c6007e253057881e..088ceac07b80556be5b85ea5b1ad6ccb4f05391a 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -922,8 +922,8 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev, int port, bool mod) { struct nfp_flower_priv *priv = app->priv; - int ida_idx = NFP_MAX_MAC_INDEX, err; struct nfp_tun_offloaded_mac *entry; + int ida_idx = -1, err; u16 nfp_mac_idx = 0; entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr); @@ -997,7 +997,7 @@ err_remove_hash: err_free_entry: kfree(entry); err_free_ida: - if (ida_idx != NFP_MAX_MAC_INDEX) + if (ida_idx != -1) ida_simple_remove(&priv->tun.mac_off_ids, ida_idx); return err; @@ -1011,6 +1011,7 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev, struct nfp_flower_repr_priv *repr_priv; struct nfp_tun_offloaded_mac *entry; struct nfp_repr *repr; + u16 nfp_mac_idx; int ida_idx; entry = nfp_tunnel_lookup_offloaded_macs(app, mac); @@ -1029,8 +1030,6 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev, entry->bridge_count--; if (!entry->bridge_count && entry->ref_count) { - u16 nfp_mac_idx; - nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT; if (__nfp_tunnel_offload_mac(app, mac, nfp_mac_idx, false)) { @@ -1046,7 +1045,6 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev, /* If MAC is now used by 1 repr set the offloaded MAC index to port. */ if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) { - u16 nfp_mac_idx; int port, err; repr_priv = list_first_entry(&entry->repr_list, @@ -1074,8 +1072,14 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev, WARN_ON_ONCE(rhashtable_remove_fast(&priv->tun.offloaded_macs, &entry->ht_node, offloaded_macs_params)); + + if (nfp_flower_is_supported_bridge(netdev)) + nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT; + else + nfp_mac_idx = entry->index; + /* If MAC has global ID then extract and free the ida entry. */ - if (nfp_tunnel_is_mac_idx_global(entry->index)) { + if (nfp_tunnel_is_mac_idx_global(nfp_mac_idx)) { ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index); ida_simple_remove(&priv->tun.mac_off_ids, ida_idx); } diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c index 7072b249c8bd60ebb302db31df468635d841acc6..8157666209798ffc73a1c97d2905d7be16c5584f 100644 --- a/drivers/net/ethernet/rocker/rocker_ofdpa.c +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c @@ -2795,7 +2795,8 @@ static void ofdpa_fib4_abort(struct rocker *rocker) if (!ofdpa_port) continue; nh->fib_nh_flags &= ~RTNH_F_OFFLOAD; - ofdpa_flow_tbl_del(ofdpa_port, OFDPA_OP_FLAG_REMOVE, + ofdpa_flow_tbl_del(ofdpa_port, + OFDPA_OP_FLAG_REMOVE | OFDPA_OP_FLAG_NOWAIT, flow_entry); } spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 9f5ccf1a0a54015c09687297ec6b269cad68b37e..cad6588840d8b1678697330af987ef96944f577f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -734,7 +734,7 @@ static int sun8i_dwmac_reset(struct stmmac_priv *priv) if (err) { dev_err(priv->device, "EMAC reset timeout\n"); - return -EFAULT; + return err; } return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h index e5dbd0bc257e7a1d8136bca183f972be4f75740b..82889c363c7773cec228762ee688eada68c7850a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h @@ -130,6 +130,7 @@ #define NUM_DWMAC100_DMA_REGS 9 #define NUM_DWMAC1000_DMA_REGS 23 +#define NUM_DWMAC4_DMA_REGS 27 void dwmac_enable_dma_transmission(void __iomem *ioaddr); void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 9e54f953634b7d7656b46cfece6e357e4c5b6cf9..0c0f01f490057c1dc3da829882c1720674796933 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -21,10 +21,18 @@ #include "dwxgmac2.h" #define REG_SPACE_SIZE 0x1060 +#define GMAC4_REG_SPACE_SIZE 0x116C #define MAC100_ETHTOOL_NAME "st_mac100" #define GMAC_ETHTOOL_NAME "st_gmac" #define XGMAC_ETHTOOL_NAME "st_xgmac" +/* Same as DMA_CHAN_BASE_ADDR defined in dwmac4_dma.h + * + * It is here because dwmac_dma.h and dwmac4_dam.h can not be included at the + * same time due to the conflicting macro names. + */ +#define GMAC4_DMA_CHAN_BASE_ADDR 0x00001100 + #define ETHTOOL_DMA_OFFSET 55 struct stmmac_stats { @@ -413,6 +421,8 @@ static int stmmac_ethtool_get_regs_len(struct net_device *dev) if (priv->plat->has_xgmac) return XGMAC_REGSIZE * 4; + else if (priv->plat->has_gmac4) + return GMAC4_REG_SPACE_SIZE; return REG_SPACE_SIZE; } @@ -425,8 +435,13 @@ static void stmmac_ethtool_gregs(struct net_device *dev, stmmac_dump_mac_regs(priv, priv->hw, reg_space); stmmac_dump_dma_regs(priv, priv->ioaddr, reg_space); - if (!priv->plat->has_xgmac) { - /* Copy DMA registers to where ethtool expects them */ + /* Copy DMA registers to where ethtool expects them */ + if (priv->plat->has_gmac4) { + /* GMAC4 dumps its DMA registers at its DMA_CHAN_BASE_ADDR */ + memcpy(®_space[ETHTOOL_DMA_OFFSET], + ®_space[GMAC4_DMA_CHAN_BASE_ADDR / 4], + NUM_DWMAC4_DMA_REGS * 4); + } else if (!priv->plat->has_xgmac) { memcpy(®_space[ETHTOOL_DMA_OFFSET], ®_space[DMA_BUS_MODE / 4], NUM_DWMAC1000_DMA_REGS * 4); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c index d291612eeafb96b753da3a6e6b42fc2b16bc0d1e..07b1b8374cd26b8d9f2a52aa1a2d80d39b0b0c7f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -142,15 +142,20 @@ static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec, static void get_systime(void __iomem *ioaddr, u64 *systime) { - u64 ns; - - /* Get the TSSS value */ - ns = readl(ioaddr + PTP_STNSR); - /* Get the TSS and convert sec time value to nanosecond */ - ns += readl(ioaddr + PTP_STSR) * 1000000000ULL; + u64 ns, sec0, sec1; + + /* Get the TSS value */ + sec1 = readl_relaxed(ioaddr + PTP_STSR); + do { + sec0 = sec1; + /* Get the TSSS value */ + ns = readl_relaxed(ioaddr + PTP_STNSR); + /* Get the TSS value */ + sec1 = readl_relaxed(ioaddr + PTP_STSR); + } while (sec0 != sec1); if (systime) - *systime = ns; + *systime = ns + (sec1 * 1000000000ULL); } const struct stmmac_hwtimestamp stmmac_ptp = { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index a8c5492cb39bef3e7f818b4a33f489c72a392bb6..6d8a839fab22e7348fb8ede26973c2a584208f6c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -816,8 +816,6 @@ static int stmmac_init_ptp(struct stmmac_priv *priv) priv->hwts_tx_en = 0; priv->hwts_rx_en = 0; - stmmac_ptp_register(priv); - return 0; } @@ -2691,7 +2689,7 @@ static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) /** * stmmac_hw_setup - setup mac in a usable state. * @dev : pointer to the device structure. - * @init_ptp: initialize PTP if set + * @ptp_register: register PTP if set * Description: * this is the main function to setup the HW in a usable state because the * dma engine is reset, the core registers are configured (e.g. AXI, @@ -2701,7 +2699,7 @@ static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) * 0 on success and an appropriate (-)ve integer as defined in errno.h * file on failure. */ -static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) +static int stmmac_hw_setup(struct net_device *dev, bool ptp_register) { struct stmmac_priv *priv = netdev_priv(dev); u32 rx_cnt = priv->plat->rx_queues_to_use; @@ -2757,13 +2755,13 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) stmmac_mmc_setup(priv); - if (init_ptp) { - ret = stmmac_init_ptp(priv); - if (ret == -EOPNOTSUPP) - netdev_warn(priv->dev, "PTP not supported by HW\n"); - else if (ret) - netdev_warn(priv->dev, "PTP init failed\n"); - } + ret = stmmac_init_ptp(priv); + if (ret == -EOPNOTSUPP) + netdev_warn(priv->dev, "PTP not supported by HW\n"); + else if (ret) + netdev_warn(priv->dev, "PTP init failed\n"); + else if (ptp_register) + stmmac_ptp_register(priv); priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS; diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c index 424e644724e4608acfe6f2f631ac71e5b7d22f57..e74f2e95a46eb8ab8f01384f0718ebbc4b7a6504 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.c +++ b/drivers/net/ethernet/ti/cpsw_priv.c @@ -1144,7 +1144,7 @@ int cpsw_fill_rx_channels(struct cpsw_priv *priv) static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw, int size) { - struct page_pool_params pp_params; + struct page_pool_params pp_params = {}; struct page_pool *pool; pp_params.order = 0; diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 650ffb93796f1a50a77ea41056d1e9ad4289dfa3..130f4b707bdc4455face8f93cc98710d4abfe246 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1421,6 +1421,8 @@ static int temac_probe(struct platform_device *pdev) lp->indirect_lock = devm_kmalloc(&pdev->dev, sizeof(*lp->indirect_lock), GFP_KERNEL); + if (!lp->indirect_lock) + return -ENOMEM; spin_lock_init(lp->indirect_lock); } diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 69c79cc24e6e4c019bc13f5382971da81715446e..0baf85122f5acebf183d1b2c2b5dc1ec1c4d7ee4 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -41,8 +41,9 @@ #include "xilinx_axienet.h" /* Descriptors defines for Tx and Rx DMA */ -#define TX_BD_NUM_DEFAULT 64 +#define TX_BD_NUM_DEFAULT 128 #define RX_BD_NUM_DEFAULT 1024 +#define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1) #define TX_BD_NUM_MAX 4096 #define RX_BD_NUM_MAX 4096 @@ -496,7 +497,8 @@ static void axienet_setoptions(struct net_device *ndev, u32 options) static int __axienet_device_reset(struct axienet_local *lp) { - u32 timeout; + u32 value; + int ret; /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset * process of Axi DMA takes a while to complete as all pending @@ -506,15 +508,23 @@ static int __axienet_device_reset(struct axienet_local *lp) * they both reset the entire DMA core, so only one needs to be used. */ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK); - timeout = DELAY_OF_ONE_MILLISEC; - while (axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET) & - XAXIDMA_CR_RESET_MASK) { - udelay(1); - if (--timeout == 0) { - netdev_err(lp->ndev, "%s: DMA reset timeout!\n", - __func__); - return -ETIMEDOUT; - } + ret = read_poll_timeout(axienet_dma_in32, value, + !(value & XAXIDMA_CR_RESET_MASK), + DELAY_OF_ONE_MILLISEC, 50000, false, lp, + XAXIDMA_TX_CR_OFFSET); + if (ret) { + dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__); + return ret; + } + + /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */ + ret = read_poll_timeout(axienet_ior, value, + value & XAE_INT_PHYRSTCMPLT_MASK, + DELAY_OF_ONE_MILLISEC, 50000, false, lp, + XAE_IS_OFFSET); + if (ret) { + dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__); + return ret; } return 0; @@ -623,6 +633,8 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd, if (nr_bds == -1 && !(status & XAXIDMA_BD_STS_COMPLETE_MASK)) break; + /* Ensure we see complete descriptor update */ + dma_rmb(); phys = desc_get_phys_addr(lp, cur_p); dma_unmap_single(ndev->dev.parent, phys, (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), @@ -631,13 +643,15 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd, if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) dev_consume_skb_irq(cur_p->skb); - cur_p->cntrl = 0; cur_p->app0 = 0; cur_p->app1 = 0; cur_p->app2 = 0; cur_p->app4 = 0; - cur_p->status = 0; cur_p->skb = NULL; + /* ensure our transmit path and device don't prematurely see status cleared */ + wmb(); + cur_p->cntrl = 0; + cur_p->status = 0; if (sizep) *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; @@ -646,6 +660,32 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd, return i; } +/** + * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy + * @lp: Pointer to the axienet_local structure + * @num_frag: The number of BDs to check for + * + * Return: 0, on success + * NETDEV_TX_BUSY, if any of the descriptors are not free + * + * This function is invoked before BDs are allocated and transmission starts. + * This function returns 0 if a BD or group of BDs can be allocated for + * transmission. If the BD or any of the BDs are not free the function + * returns a busy status. This is invoked from axienet_start_xmit. + */ +static inline int axienet_check_tx_bd_space(struct axienet_local *lp, + int num_frag) +{ + struct axidma_bd *cur_p; + + /* Ensure we see all descriptor updates from device or TX IRQ path */ + rmb(); + cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num]; + if (cur_p->cntrl) + return NETDEV_TX_BUSY; + return 0; +} + /** * axienet_start_xmit_done - Invoked once a transmit is completed by the * Axi DMA Tx channel. @@ -675,30 +715,8 @@ static void axienet_start_xmit_done(struct net_device *ndev) /* Matches barrier in axienet_start_xmit */ smp_mb(); - netif_wake_queue(ndev); -} - -/** - * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy - * @lp: Pointer to the axienet_local structure - * @num_frag: The number of BDs to check for - * - * Return: 0, on success - * NETDEV_TX_BUSY, if any of the descriptors are not free - * - * This function is invoked before BDs are allocated and transmission starts. - * This function returns 0 if a BD or group of BDs can be allocated for - * transmission. If the BD or any of the BDs are not free the function - * returns a busy status. This is invoked from axienet_start_xmit. - */ -static inline int axienet_check_tx_bd_space(struct axienet_local *lp, - int num_frag) -{ - struct axidma_bd *cur_p; - cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num]; - if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK) - return NETDEV_TX_BUSY; - return 0; + if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) + netif_wake_queue(ndev); } /** @@ -730,20 +748,15 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) num_frag = skb_shinfo(skb)->nr_frags; cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; - if (axienet_check_tx_bd_space(lp, num_frag)) { - if (netif_queue_stopped(ndev)) - return NETDEV_TX_BUSY; - + if (axienet_check_tx_bd_space(lp, num_frag + 1)) { + /* Should not happen as last start_xmit call should have + * checked for sufficient space and queue should only be + * woken when sufficient space is available. + */ netif_stop_queue(ndev); - - /* Matches barrier in axienet_start_xmit_done */ - smp_mb(); - - /* Space might have just been freed - check again */ - if (axienet_check_tx_bd_space(lp, num_frag)) - return NETDEV_TX_BUSY; - - netif_wake_queue(ndev); + if (net_ratelimit()) + netdev_warn(ndev, "TX ring unexpectedly full\n"); + return NETDEV_TX_BUSY; } if (skb->ip_summed == CHECKSUM_PARTIAL) { @@ -804,6 +817,18 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) if (++lp->tx_bd_tail >= lp->tx_bd_num) lp->tx_bd_tail = 0; + /* Stop queue if next transmit may not have space */ + if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) { + netif_stop_queue(ndev); + + /* Matches barrier in axienet_start_xmit_done */ + smp_mb(); + + /* Space might have just been freed - check again */ + if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) + netif_wake_queue(ndev); + } + return NETDEV_TX_OK; } @@ -834,6 +859,8 @@ static void axienet_recv(struct net_device *ndev) tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; + /* Ensure we see complete descriptor update */ + dma_rmb(); phys = desc_get_phys_addr(lp, cur_p); dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size, DMA_FROM_DEVICE); @@ -1355,7 +1382,8 @@ static int axienet_ethtools_set_ringparam(struct net_device *ndev, if (ering->rx_pending > RX_BD_NUM_MAX || ering->rx_mini_pending || ering->rx_jumbo_pending || - ering->rx_pending > TX_BD_NUM_MAX) + ering->tx_pending < TX_BD_NUM_MIN || + ering->tx_pending > TX_BD_NUM_MAX) return -EINVAL; if (netif_running(ndev)) @@ -2015,6 +2043,11 @@ static int axienet_probe(struct platform_device *pdev) lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; + /* Reset core now that clocks are enabled, prior to accessing MDIO */ + ret = __axienet_device_reset(lp); + if (ret) + goto cleanup_clk; + lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); if (lp->phy_node) { ret = axienet_mdio_setup(lp); diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c index 5ab53e9942f3052b16f9e1f867e63901341b67b1..5d30b3e1806ab91fe39f0fa95586e79591035b28 100644 --- a/drivers/net/hamradio/yam.c +++ b/drivers/net/hamradio/yam.c @@ -951,9 +951,7 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) sizeof(struct yamdrv_ioctl_mcs)); if (IS_ERR(ym)) return PTR_ERR(ym); - if (ym->cmd != SIOCYAMSMCS) - return -EINVAL; - if (ym->bitrate > YAM_MAXBITRATE) { + if (ym->cmd != SIOCYAMSMCS || ym->bitrate > YAM_MAXBITRATE) { kfree(ym); return -EINVAL; } diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 7d67f41387f5577959839f902128b1777b4b51ee..4f5ef8a9a9a8796db21f270bbb707eb5204e10f1 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -100,6 +100,7 @@ struct at86rf230_local { unsigned long cal_timeout; bool is_tx; bool is_tx_from_off; + bool was_tx; u8 tx_retry; struct sk_buff *tx_skb; struct at86rf230_state_change tx; @@ -343,7 +344,11 @@ at86rf230_async_error_recover_complete(void *context) if (ctx->free) kfree(ctx); - ieee802154_wake_queue(lp->hw); + if (lp->was_tx) { + lp->was_tx = 0; + dev_kfree_skb_any(lp->tx_skb); + ieee802154_wake_queue(lp->hw); + } } static void @@ -352,7 +357,11 @@ at86rf230_async_error_recover(void *context) struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; - lp->is_tx = 0; + if (lp->is_tx) { + lp->was_tx = 1; + lp->is_tx = 0; + } + at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON, at86rf230_async_error_recover_complete); } diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index 4eb64709d44cb09865d23d9e34ea5d53a9cfe6b4..fd9f33c833fa367afd5a5a69e8c165f37ab3d70f 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -1771,6 +1771,7 @@ static int ca8210_async_xmit_complete( status ); if (status != MAC_TRANSACTION_OVERFLOW) { + dev_kfree_skb_any(priv->tx_skb); ieee802154_wake_queue(priv->hw); return 0; } @@ -2976,8 +2977,8 @@ static void ca8210_hw_setup(struct ieee802154_hw *ca8210_hw) ca8210_hw->phy->cca.opt = NL802154_CCA_OPT_ENERGY_CARRIER_AND; ca8210_hw->phy->cca_ed_level = -9800; ca8210_hw->phy->symbol_duration = 16; - ca8210_hw->phy->lifs_period = 40; - ca8210_hw->phy->sifs_period = 12; + ca8210_hw->phy->lifs_period = 40 * ca8210_hw->phy->symbol_duration; + ca8210_hw->phy->sifs_period = 12 * ca8210_hw->phy->symbol_duration; ca8210_hw->flags = IEEE802154_HW_AFILT | IEEE802154_HW_OMIT_CKSUM | diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c index 080b15fc00601ef832c5c1090c8872c211037e4c..97981cf7661ad759d901ff667174fe63bfea0a1f 100644 --- a/drivers/net/ieee802154/mac802154_hwsim.c +++ b/drivers/net/ieee802154/mac802154_hwsim.c @@ -786,6 +786,7 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev, goto err_pib; } + pib->channel = 13; rcu_assign_pointer(phy->pib, pib); phy->idx = idx; INIT_LIST_HEAD(&phy->edges); diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c index 8dc04e2590b18b1b41239c4ebf8ac1382942a8d5..383231b85464252a9dfc6a20f938247e5b546959 100644 --- a/drivers/net/ieee802154/mcr20a.c +++ b/drivers/net/ieee802154/mcr20a.c @@ -976,8 +976,8 @@ static void mcr20a_hw_setup(struct mcr20a_local *lp) dev_dbg(printdev(lp), "%s\n", __func__); phy->symbol_duration = 16; - phy->lifs_period = 40; - phy->sifs_period = 12; + phy->lifs_period = 40 * phy->symbol_duration; + phy->sifs_period = 12 * phy->symbol_duration; hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT | diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index a37aae00e128fed63dc334232a253c7b2668ad41..621648ce750b7efd60c328152d27cef3f90c0f5c 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -901,27 +901,35 @@ static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, u32 count) struct gsi *gsi; u32 backlog; - if (!endpoint->replenish_enabled) { + if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags)) { if (count) atomic_add(count, &endpoint->replenish_saved); return; } + /* If already active, just update the backlog */ + if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags)) { + if (count) + atomic_add(count, &endpoint->replenish_backlog); + return; + } while (atomic_dec_not_zero(&endpoint->replenish_backlog)) if (ipa_endpoint_replenish_one(endpoint)) goto try_again_later; + + clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); + if (count) atomic_add(count, &endpoint->replenish_backlog); return; try_again_later: - /* The last one didn't succeed, so fix the backlog */ - backlog = atomic_inc_return(&endpoint->replenish_backlog); + clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); - if (count) - atomic_add(count, &endpoint->replenish_backlog); + /* The last one didn't succeed, so fix the backlog */ + backlog = atomic_add_return(count + 1, &endpoint->replenish_backlog); /* Whenever a receive buffer transaction completes we'll try to * replenish again. It's unlikely, but if we fail to supply even @@ -941,7 +949,7 @@ static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint) u32 max_backlog; u32 saved; - endpoint->replenish_enabled = true; + set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); while ((saved = atomic_xchg(&endpoint->replenish_saved, 0))) atomic_add(saved, &endpoint->replenish_backlog); @@ -955,7 +963,7 @@ static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint) { u32 backlog; - endpoint->replenish_enabled = false; + clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0))) atomic_add(backlog, &endpoint->replenish_saved); } @@ -1472,7 +1480,8 @@ static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint) /* RX transactions require a single TRE, so the maximum * backlog is the same as the maximum outstanding TREs. */ - endpoint->replenish_enabled = false; + clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); + clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); atomic_set(&endpoint->replenish_saved, gsi_channel_tre_max(gsi, endpoint->channel_id)); atomic_set(&endpoint->replenish_backlog, 0); diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h index 58a245de488e86298f2bf5eb515032cf3f00a6e9..823c4a1296587c225f65239e624fe1911b707a52 100644 --- a/drivers/net/ipa/ipa_endpoint.h +++ b/drivers/net/ipa/ipa_endpoint.h @@ -39,6 +39,19 @@ enum ipa_endpoint_name { #define IPA_ENDPOINT_MAX 32 /* Max supported by driver */ +/** + * enum ipa_replenish_flag: RX buffer replenish flags + * + * @IPA_REPLENISH_ENABLED: Whether receive buffer replenishing is enabled + * @IPA_REPLENISH_ACTIVE: Whether replenishing is underway + * @IPA_REPLENISH_COUNT: Number of defined replenish flags + */ +enum ipa_replenish_flag { + IPA_REPLENISH_ENABLED, + IPA_REPLENISH_ACTIVE, + IPA_REPLENISH_COUNT, /* Number of flags (must be last) */ +}; + /** * struct ipa_endpoint - IPA endpoint information * @channel_id: EP's GSI channel @@ -60,7 +73,7 @@ struct ipa_endpoint { struct net_device *netdev; /* Receive buffer replenishing for RX endpoints */ - bool replenish_enabled; + DECLARE_BITMAP(replenish_flags, IPA_REPLENISH_COUNT); u32 replenish_ready; atomic_t replenish_saved; atomic_t replenish_backlog; diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index c601d3df272202bd52d23888c59312cbf2e87b4e..789a124809e3c2b118bb376b9b011eabfabf64b1 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -3869,6 +3869,18 @@ static void macsec_common_dellink(struct net_device *dev, struct list_head *head struct macsec_dev *macsec = macsec_priv(dev); struct net_device *real_dev = macsec->real_dev; + /* If h/w offloading is available, propagate to the device */ + if (macsec_is_offloaded(macsec)) { + const struct macsec_ops *ops; + struct macsec_context ctx; + + ops = macsec_get_ops(netdev_priv(dev), &ctx); + if (ops) { + ctx.secy = &macsec->secy; + macsec_offload(ops->mdo_del_secy, &ctx); + } + } + unregister_netdevice_queue(dev, head); list_del_rcu(&macsec->secys); macsec_del_dev(macsec); @@ -3883,18 +3895,6 @@ static void macsec_dellink(struct net_device *dev, struct list_head *head) struct net_device *real_dev = macsec->real_dev; struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); - /* If h/w offloading is available, propagate to the device */ - if (macsec_is_offloaded(macsec)) { - const struct macsec_ops *ops; - struct macsec_context ctx; - - ops = macsec_get_ops(netdev_priv(dev), &ctx); - if (ops) { - ctx.secy = &macsec->secy; - macsec_offload(ops->mdo_del_secy, &ctx); - } - } - macsec_common_dellink(dev, head); if (list_empty(&rxd->secys)) { @@ -4017,6 +4017,15 @@ static int macsec_newlink(struct net *net, struct net_device *dev, !macsec_check_offload(macsec->offload, macsec)) return -EOPNOTSUPP; + /* send_sci must be set to true when transmit sci explicitly is set */ + if ((data && data[IFLA_MACSEC_SCI]) && + (data && data[IFLA_MACSEC_INC_SCI])) { + u8 send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]); + + if (!send_sci) + return -EINVAL; + } + if (data && data[IFLA_MACSEC_ICV_LEN]) icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); mtu = real_dev->mtu - icv_len - macsec_extra_len(true); diff --git a/drivers/net/mdio/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c index 966c3b4ad59d14e74bc113680e0fbff231d296d2..e2273588c75b6f3dd212269611d7f41a8769ed16 100644 --- a/drivers/net/mdio/mdio-aspeed.c +++ b/drivers/net/mdio/mdio-aspeed.c @@ -148,6 +148,7 @@ static const struct of_device_id aspeed_mdio_of_match[] = { { .compatible = "aspeed,ast2600-mdio", }, { }, }; +MODULE_DEVICE_TABLE(of, aspeed_mdio_of_match); static struct platform_driver aspeed_mdio_driver = { .driver = { diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 8b0ac38742d0626aa865bf63bad4d0735336f93c..992459dcaf399dc76e2d021a375e45b24ab6e6cc 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -807,6 +807,7 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM54616S", /* PHY_GBIT_FEATURES */ + .soft_reset = genphy_soft_reset, .config_init = bcm54xx_config_init, .config_aneg = bcm54616s_config_aneg, .ack_interrupt = bcm_phy_ack_intr, diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 91616182c311f9733dc63bf2ff86f48caad4706f..cb9d1852a75c8c0f23d68583f3309230bb84c8e8 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -515,9 +515,9 @@ static int m88e1121_config_aneg_rgmii_delays(struct phy_device *phydev) else mscr = 0; - return phy_modify_paged(phydev, MII_MARVELL_MSCR_PAGE, - MII_88E1121_PHY_MSCR_REG, - MII_88E1121_PHY_MSCR_DELAY_MASK, mscr); + return phy_modify_paged_changed(phydev, MII_MARVELL_MSCR_PAGE, + MII_88E1121_PHY_MSCR_REG, + MII_88E1121_PHY_MSCR_DELAY_MASK, mscr); } static int m88e1121_config_aneg(struct phy_device *phydev) @@ -531,11 +531,13 @@ static int m88e1121_config_aneg(struct phy_device *phydev) return err; } + changed = err; + err = marvell_set_polarity(phydev, phydev->mdix_ctrl); if (err < 0) return err; - changed = err; + changed |= err; err = genphy_config_aneg(phydev); if (err < 0) @@ -1059,16 +1061,15 @@ static int m88e1118_config_aneg(struct phy_device *phydev) { int err; - err = genphy_soft_reset(phydev); + err = marvell_set_polarity(phydev, phydev->mdix_ctrl); if (err < 0) return err; - err = marvell_set_polarity(phydev, phydev->mdix_ctrl); + err = genphy_config_aneg(phydev); if (err < 0) return err; - err = genphy_config_aneg(phydev); - return 0; + return genphy_soft_reset(phydev); } static int m88e1118_config_init(struct phy_device *phydev) @@ -1090,6 +1091,12 @@ static int m88e1118_config_init(struct phy_device *phydev) if (err < 0) return err; + if (phy_interface_is_rgmii(phydev)) { + err = m88e1121_config_aneg_rgmii_delays(phydev); + if (err < 0) + return err; + } + /* Adjust LED Control */ if (phydev->dev_flags & MARVELL_PHY_M1118_DNS323_LEDS) err = phy_write(phydev, 0x10, 0x1100); diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 2645ca35103c9c56058ab21ab4a16a775a95158d..c416ab1d2b008899d85955435298acb31d14ca4f 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -588,7 +588,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) mdiobus_setup_mdiodev_from_board_info(bus, mdiobus_create_device); bus->state = MDIOBUS_REGISTERED; - pr_info("%s: probed\n", bus->name); + dev_dbg(&bus->dev, "probed\n"); return 0; error: diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c index 8d333d3084ed32a7c345656555b2b59376bb8f98..cccb83dae673ba1db8db6ea226fb51df49dd9c9a 100644 --- a/drivers/net/phy/phy-core.c +++ b/drivers/net/phy/phy-core.c @@ -161,11 +161,11 @@ static const struct phy_setting settings[] = { PHY_SETTING( 2500, FULL, 2500baseT_Full ), PHY_SETTING( 2500, FULL, 2500baseX_Full ), /* 1G */ - PHY_SETTING( 1000, FULL, 1000baseKX_Full ), PHY_SETTING( 1000, FULL, 1000baseT_Full ), PHY_SETTING( 1000, HALF, 1000baseT_Half ), PHY_SETTING( 1000, FULL, 1000baseT1_Full ), PHY_SETTING( 1000, FULL, 1000baseX_Full ), + PHY_SETTING( 1000, FULL, 1000baseKX_Full ), /* 100M */ PHY_SETTING( 100, FULL, 100baseT_Full ), PHY_SETTING( 100, FULL, 100baseT1_Full ), diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 85f3cde5ffd09d9bc05956b47f23339b9c1f891c..d2f6d8107595a673d11b17e42ed98dcf2c27bfdc 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1682,6 +1682,9 @@ void phy_detach(struct phy_device *phydev) phy_driver_is_genphy_10g(phydev)) device_release_driver(&phydev->mdio.dev); + /* Assert the reset signal */ + phy_device_reset(phydev, 1); + /* * The phydev might go away on the put_device() below, so avoid * a use-after-free bug by reading the underlying bus first. @@ -1693,9 +1696,6 @@ void phy_detach(struct phy_device *phydev) ndev_owner = dev->dev.parent->driver->owner; if (ndev_owner != bus->owner) module_put(bus->owner); - - /* Assert the reset signal */ - phy_device_reset(phydev, 1); } EXPORT_SYMBOL(phy_detach); diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index 4cf874fb5c5b414bc76b190d1d355a75df5aff76..a05d8372669c15b179fcd6abb55e794b9ce4909c 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -609,6 +609,11 @@ struct sfp_bus *sfp_bus_find_fwnode(struct fwnode_handle *fwnode) else if (ret < 0) return ERR_PTR(ret); + if (!fwnode_device_is_available(ref.fwnode)) { + fwnode_handle_put(ref.fwnode); + return NULL; + } + bus = sfp_bus_get(ref.fwnode); fwnode_handle_put(ref.fwnode); if (!bus) diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 32c34c728c7a1a7076dc0c0b00476259fc292172..efffa65f8214313efde976e20ed95d85f191359e 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -1589,17 +1589,20 @@ static int sfp_sm_probe_for_phy(struct sfp *sfp) static int sfp_module_parse_power(struct sfp *sfp) { u32 power_mW = 1000; + bool supports_a2; if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_POWER_DECL)) power_mW = 1500; if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_HIGH_POWER_LEVEL)) power_mW = 2000; + supports_a2 = sfp->id.ext.sff8472_compliance != + SFP_SFF8472_COMPLIANCE_NONE || + sfp->id.ext.diagmon & SFP_DIAGMON_DDM; + if (power_mW > sfp->max_power_mW) { /* Module power specification exceeds the allowed maximum. */ - if (sfp->id.ext.sff8472_compliance == - SFP_SFF8472_COMPLIANCE_NONE && - !(sfp->id.ext.diagmon & SFP_DIAGMON_DDM)) { + if (!supports_a2) { /* The module appears not to implement bus address * 0xa2, so assume that the module powers up in the * indicated mode. @@ -1616,11 +1619,25 @@ static int sfp_module_parse_power(struct sfp *sfp) } } + if (power_mW <= 1000) { + /* Modules below 1W do not require a power change sequence */ + sfp->module_power_mW = power_mW; + return 0; + } + + if (!supports_a2) { + /* The module power level is below the host maximum and the + * module appears not to implement bus address 0xa2, so assume + * that the module powers up in the indicated mode. + */ + return 0; + } + /* If the module requires a higher power mode, but also requires * an address change sequence, warn the user that the module may * not be functional. */ - if (sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE && power_mW > 1000) { + if (sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE) { dev_warn(sfp->dev, "Address Change Sequence not supported but module requires %u.%uW, module may not be functional\n", power_mW / 1000, (power_mW / 100) % 10); diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 33b2e0fb68bbbae1f2c27564e425764672a14a3c..2b9815ec4a622ff9b03805ae06f5daec40fa151e 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -69,6 +69,8 @@ #define MPHDRLEN 6 /* multilink protocol header length */ #define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */ +#define PPP_PROTO_LEN 2 + /* * An instance of /dev/ppp can be associated with either a ppp * interface unit or a ppp channel. In both cases, file->private_data @@ -496,6 +498,9 @@ static ssize_t ppp_write(struct file *file, const char __user *buf, if (!pf) return -ENXIO; + /* All PPP packets should start with the 2-byte protocol */ + if (count < PPP_PROTO_LEN) + return -EINVAL; ret = -ENOMEM; skb = alloc_skb(count + pf->hdrlen, GFP_KERNEL); if (!skb) @@ -1632,7 +1637,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) } ++ppp->stats64.tx_packets; - ppp->stats64.tx_bytes += skb->len - 2; + ppp->stats64.tx_bytes += skb->len - PPP_PROTO_LEN; switch (proto) { case PPP_IP: diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index b77b0a33d697dd3295b44a5543707db21649d627..0b0cbcee1920bdabe037345450c89e22e3853fa2 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1467,58 +1467,68 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb) u16 hdr_off; u32 *pkt_hdr; - /* This check is no longer done by usbnet */ - if (skb->len < dev->net->hard_header_len) + /* At the end of the SKB, there's a header telling us how many packets + * are bundled into this buffer and where we can find an array of + * per-packet metadata (which contains elements encoded into u16). + */ + if (skb->len < 4) return 0; - skb_trim(skb, skb->len - 4); rx_hdr = get_unaligned_le32(skb_tail_pointer(skb)); - pkt_cnt = (u16)rx_hdr; hdr_off = (u16)(rx_hdr >> 16); + + if (pkt_cnt == 0) + return 0; + + /* Make sure that the bounds of the metadata array are inside the SKB + * (and in front of the counter at the end). + */ + if (pkt_cnt * 2 + hdr_off > skb->len) + return 0; pkt_hdr = (u32 *)(skb->data + hdr_off); - while (pkt_cnt--) { + /* Packets must not overlap the metadata array */ + skb_trim(skb, hdr_off); + + for (; ; pkt_cnt--, pkt_hdr++) { u16 pkt_len; le32_to_cpus(pkt_hdr); pkt_len = (*pkt_hdr >> 16) & 0x1fff; - /* Check CRC or runt packet */ - if ((*pkt_hdr & AX_RXHDR_CRC_ERR) || - (*pkt_hdr & AX_RXHDR_DROP_ERR)) { - skb_pull(skb, (pkt_len + 7) & 0xFFF8); - pkt_hdr++; - continue; - } - - if (pkt_cnt == 0) { - skb->len = pkt_len; - /* Skip IP alignment pseudo header */ - skb_pull(skb, 2); - skb_set_tail_pointer(skb, skb->len); - skb->truesize = pkt_len + sizeof(struct sk_buff); - ax88179_rx_checksum(skb, pkt_hdr); - return 1; - } + if (pkt_len > skb->len) + return 0; - ax_skb = skb_clone(skb, GFP_ATOMIC); - if (ax_skb) { + /* Check CRC or runt packet */ + if (((*pkt_hdr & (AX_RXHDR_CRC_ERR | AX_RXHDR_DROP_ERR)) == 0) && + pkt_len >= 2 + ETH_HLEN) { + bool last = (pkt_cnt == 0); + + if (last) { + ax_skb = skb; + } else { + ax_skb = skb_clone(skb, GFP_ATOMIC); + if (!ax_skb) + return 0; + } ax_skb->len = pkt_len; /* Skip IP alignment pseudo header */ skb_pull(ax_skb, 2); skb_set_tail_pointer(ax_skb, ax_skb->len); ax_skb->truesize = pkt_len + sizeof(struct sk_buff); ax88179_rx_checksum(ax_skb, pkt_hdr); + + if (last) + return 1; + usbnet_skb_return(dev, ax_skb); - } else { - return 0; } - skb_pull(skb, (pkt_len + 7) & 0xFFF8); - pkt_hdr++; + /* Trim this packet away from the SKB */ + if (!skb_pull(skb, (pkt_len + 7) & 0xFFF8)) + return 0; } - return 1; } static struct sk_buff * diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 6aaa0675c28a397c30991375f22256e9209bb2a3..43ddbe61dc58e140015e43317efcd7893fdb1693 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -570,6 +570,11 @@ static const struct usb_device_id products[] = { .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \ .bInterfaceProtocol = USB_CDC_PROTO_NONE +#define ZAURUS_FAKE_INTERFACE \ + .bInterfaceClass = USB_CLASS_COMM, \ + .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \ + .bInterfaceProtocol = USB_CDC_PROTO_NONE + /* SA-1100 based Sharp Zaurus ("collie"), or compatible; * wire-incompatible with true CDC Ethernet implementations. * (And, it seems, needlessly so...) @@ -623,6 +628,13 @@ static const struct usb_device_id products[] = { .idProduct = 0x9032, /* SL-6000 */ ZAURUS_MASTER_INTERFACE, .driver_info = 0, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, + .idProduct = 0x9032, /* SL-6000 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = 0, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index eaaa5aee58251e5469dde979bddef20edf5cac85..ab91fa5b0194db57041c8f92ec9ba4ea62632a25 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -1702,10 +1702,10 @@ int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in) { struct sk_buff *skb; struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; - int len; + unsigned int len; int nframes; int x; - int offset; + unsigned int offset; union { struct usb_cdc_ncm_ndp16 *ndp16; struct usb_cdc_ncm_ndp32 *ndp32; @@ -1777,8 +1777,8 @@ next_ndp: break; } - /* sanity checking */ - if (((offset + len) > skb_in->len) || + /* sanity checking - watch out for integer wrap*/ + if ((offset > skb_in->len) || (len > skb_in->len - offset) || (len > ctx->rx_max) || (len < ETH_HLEN)) { netif_dbg(dev, rx_err, dev->net, "invalid frame detected (ignored) offset[%u]=%u, length=%u, skb=%p\n", diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index 207e59e74935aaf4b23add136b611d3138a6349b..06d9f19ca142a69a7c70d4200cd0d7455cbb7d4e 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c @@ -121,7 +121,7 @@ static int ipheth_alloc_urbs(struct ipheth_device *iphone) if (tx_buf == NULL) goto free_rx_urb; - rx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE, + rx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN, GFP_KERNEL, &rx_urb->transfer_dma); if (rx_buf == NULL) goto free_tx_buf; @@ -146,7 +146,7 @@ error_nomem: static void ipheth_free_urbs(struct ipheth_device *iphone) { - usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->rx_buf, + usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN, iphone->rx_buf, iphone->rx_urb->transfer_dma); usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->tx_buf, iphone->tx_urb->transfer_dma); @@ -317,7 +317,7 @@ static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags) usb_fill_bulk_urb(dev->rx_urb, udev, usb_rcvbulkpipe(udev, dev->bulk_in), - dev->rx_buf, IPHETH_BUF_SIZE, + dev->rx_buf, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN, ipheth_rcvbulk_callback, dev); dev->rx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c index 09bfa6a4dfbc17ecd40bc1fd37f23a8e88b75afe..7e40e2e2f37230c43ac875997485e23741abb420 100644 --- a/drivers/net/usb/mcs7830.c +++ b/drivers/net/usb/mcs7830.c @@ -108,8 +108,16 @@ static const char driver_name[] = "MOSCHIP usb-ethernet driver"; static int mcs7830_get_reg(struct usbnet *dev, u16 index, u16 size, void *data) { - return usbnet_read_cmd(dev, MCS7830_RD_BREQ, MCS7830_RD_BMREQ, - 0x0000, index, data, size); + int ret; + + ret = usbnet_read_cmd(dev, MCS7830_RD_BREQ, MCS7830_RD_BMREQ, + 0x0000, index, data, size); + if (ret < 0) + return ret; + else if (ret < size) + return -ENODATA; + + return ret; } static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, const void *data) diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 6e033ba717030e9c26a366b21f0bb74e2c078bc3..597766d14563e07242d76a5ae19b2321d0f5f3c6 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1333,6 +1333,8 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */ {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */ {QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/ + {QMI_FIXED_INTF(0x413c, 0x81e4, 0)}, /* Dell Wireless 5829e with eSIM support*/ + {QMI_FIXED_INTF(0x413c, 0x81e6, 0)}, /* Dell Wireless 5829e */ {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */ {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c index e04c8054c2cf3c7c3f5c3a065b48e99d299b5501..fce6713e970badda0337a49a518c07d88ecd5fa9 100644 --- a/drivers/net/usb/sr9700.c +++ b/drivers/net/usb/sr9700.c @@ -410,7 +410,7 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb) /* ignore the CRC length */ len = (skb->data[1] | (skb->data[2] << 8)) - 4; - if (len > ETH_FRAME_LEN) + if (len > ETH_FRAME_LEN || len > skb->len) return 0; /* the last packet of current skb */ diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c index 8e717a0b559b3aac42e94c0ea1c776737c79527b..7984f2157d222dbe2971702221dac46b2a370721 100644 --- a/drivers/net/usb/zaurus.c +++ b/drivers/net/usb/zaurus.c @@ -256,6 +256,11 @@ static const struct usb_device_id products [] = { .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \ .bInterfaceProtocol = USB_CDC_PROTO_NONE +#define ZAURUS_FAKE_INTERFACE \ + .bInterfaceClass = USB_CLASS_COMM, \ + .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \ + .bInterfaceProtocol = USB_CDC_PROTO_NONE + /* SA-1100 based Sharp Zaurus ("collie"), or compatible. */ { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO @@ -313,6 +318,13 @@ static const struct usb_device_id products [] = { .idProduct = 0x9032, /* SL-6000 */ ZAURUS_MASTER_INTERFACE, .driver_info = ZAURUS_PXA_INFO, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, + .idProduct = 0x9032, /* SL-6000 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = (unsigned long)&bogus_mdlm_info, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, diff --git a/drivers/net/veth.c b/drivers/net/veth.c index aef66f8eecee1c1cc151d506126464a907f9f67f..f7e3eb309a26ede481f7d257bb4e10e94f53a331 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -256,9 +256,10 @@ static void __veth_xdp_flush(struct veth_rq *rq) { /* Write ptr_ring before reading rx_notify_masked */ smp_mb(); - if (!rq->rx_notify_masked) { - rq->rx_notify_masked = true; - napi_schedule(&rq->xdp_napi); + if (!READ_ONCE(rq->rx_notify_masked) && + napi_schedule_prep(&rq->xdp_napi)) { + WRITE_ONCE(rq->rx_notify_masked, true); + __napi_schedule(&rq->xdp_napi); } } @@ -852,8 +853,10 @@ static int veth_poll(struct napi_struct *napi, int budget) /* Write rx_notify_masked before reading ptr_ring */ smp_store_mb(rq->rx_notify_masked, false); if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) { - rq->rx_notify_masked = true; - napi_schedule(&rq->xdp_napi); + if (napi_schedule_prep(&rq->xdp_napi)) { + WRITE_ONCE(rq->rx_notify_masked, true); + __napi_schedule(&rq->xdp_napi); + } } } diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c index 49cc4b7ed5163a2de35cd84c6987e813573aae90..1baec4b412c8dc7f0c3b1edbe4c1f445e5074704 100644 --- a/drivers/net/wireless/ath/ar5523/ar5523.c +++ b/drivers/net/wireless/ath/ar5523/ar5523.c @@ -153,6 +153,10 @@ static void ar5523_cmd_rx_cb(struct urb *urb) ar5523_err(ar, "Invalid reply to WDCMSG_TARGET_START"); return; } + if (!cmd->odata) { + ar5523_err(ar, "Unexpected WDCMSG_TARGET_START reply"); + return; + } memcpy(cmd->odata, hdr + 1, sizeof(u32)); cmd->olen = sizeof(u32); cmd->res = 0; diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index d73ad60b571c2c41653cf8723f998a010bcebec8..d0967bb1f38718a9efbc2280fdec3e0f3c92e2f6 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -89,6 +89,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, + .credit_size_workaround = false, .tx_stats_over_pktlog = true, }, { @@ -123,6 +124,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, + .credit_size_workaround = false, .tx_stats_over_pktlog = true, }, { @@ -158,6 +160,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, + .credit_size_workaround = false, .tx_stats_over_pktlog = false, }, { @@ -187,6 +190,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .num_wds_entries = 0x20, .uart_pin_workaround = true, .tx_stats_over_pktlog = false, + .credit_size_workaround = false, .bmi_large_size_download = true, .supports_peer_stats_info = true, }, @@ -222,6 +226,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, + .credit_size_workaround = false, .tx_stats_over_pktlog = false, }, { @@ -256,6 +261,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, + .credit_size_workaround = false, .tx_stats_over_pktlog = false, }, { @@ -290,6 +296,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, + .credit_size_workaround = false, .tx_stats_over_pktlog = false, }, { @@ -327,6 +334,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = true, + .credit_size_workaround = false, .tx_stats_over_pktlog = false, .supports_peer_stats_info = true, }, @@ -368,6 +376,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, + .credit_size_workaround = false, .tx_stats_over_pktlog = false, }, { @@ -415,6 +424,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, + .credit_size_workaround = false, .tx_stats_over_pktlog = false, }, { @@ -459,6 +469,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, + .credit_size_workaround = false, .tx_stats_over_pktlog = false, }, { @@ -493,6 +504,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, + .credit_size_workaround = false, .tx_stats_over_pktlog = false, }, { @@ -529,6 +541,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = true, + .credit_size_workaround = false, .tx_stats_over_pktlog = false, }, { @@ -557,6 +570,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .ast_skid_limit = 0x10, .num_wds_entries = 0x20, .uart_pin_workaround = true, + .credit_size_workaround = true, }, { .id = QCA4019_HW_1_0_DEV_VERSION, @@ -597,6 +611,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, + .credit_size_workaround = false, .tx_stats_over_pktlog = false, }, { @@ -624,6 +639,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = true, .hw_filter_reset_required = false, .fw_diag_ce_download = false, + .credit_size_workaround = false, .tx_stats_over_pktlog = false, }, }; @@ -697,6 +713,7 @@ static void ath10k_send_suspend_complete(struct ath10k *ar) static int ath10k_init_sdio(struct ath10k *ar, enum ath10k_firmware_mode mode) { + bool mtu_workaround = ar->hw_params.credit_size_workaround; int ret; u32 param = 0; @@ -714,7 +731,7 @@ static int ath10k_init_sdio(struct ath10k *ar, enum ath10k_firmware_mode mode) param |= HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_SET; - if (mode == ATH10K_FIRMWARE_MODE_NORMAL) + if (mode == ATH10K_FIRMWARE_MODE_NORMAL && !mtu_workaround) param |= HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE; else param &= ~HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE; diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index 1fc0a312ab587337c54ecf3eb7d9f11b648fd653..5f67da47036cf1be24ec705c8d56ef03f796cd4e 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -147,6 +147,9 @@ void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt) htt->num_pending_tx--; if (htt->num_pending_tx == htt->max_num_pending_tx - 1) ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL); + + if (htt->num_pending_tx == 0) + wake_up(&htt->empty_tx_wq); } int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt) diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index c6ded21f5ed69bbb5a846115a12593a14291dfe4..d3ef83ad577dada610f557a757389518383d11a7 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -618,6 +618,9 @@ struct ath10k_hw_params { */ bool uart_pin_workaround; + /* Workaround for the credit size calculation */ + bool credit_size_workaround; + /* tx stats support over pktlog */ bool tx_stats_over_pktlog; diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index aefe1f7f906c07d5eacaf918e069f187e5bbb59f..f51f1cf2c6a40eec433e5d415aec40ab165a3206 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -82,8 +82,6 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt, flags = skb_cb->flags; ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id); ath10k_htt_tx_dec_pending(htt); - if (htt->num_pending_tx == 0) - wake_up(&htt->empty_tx_wq); spin_unlock_bh(&htt->tx_lock); rcu_read_lock(); diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c index 430723c64adce6d03db67ca2196348d5801affef..9ff6e685331424df67c8ea1ae10d1bdacb1c7988 100644 --- a/drivers/net/wireless/ath/ath11k/ahb.c +++ b/drivers/net/wireless/ath/ath11k/ahb.c @@ -175,8 +175,11 @@ static void __ath11k_ahb_ext_irq_disable(struct ath11k_base *ab) ath11k_ahb_ext_grp_disable(irq_grp); - napi_synchronize(&irq_grp->napi); - napi_disable(&irq_grp->napi); + if (irq_grp->napi_enabled) { + napi_synchronize(&irq_grp->napi); + napi_disable(&irq_grp->napi); + irq_grp->napi_enabled = false; + } } } @@ -206,13 +209,13 @@ static void ath11k_ahb_clearbit32(struct ath11k_base *ab, u8 bit, u32 offset) static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id) { - const struct ce_pipe_config *ce_config; + const struct ce_attr *ce_attr; - ce_config = &ab->hw_params.target_ce_config[ce_id]; - if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_OUT) + ce_attr = &ab->hw_params.host_ce_config[ce_id]; + if (ce_attr->src_nentries) ath11k_ahb_setbit32(ab, ce_id, CE_HOST_IE_ADDRESS); - if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_IN) { + if (ce_attr->dest_nentries) { ath11k_ahb_setbit32(ab, ce_id, CE_HOST_IE_2_ADDRESS); ath11k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT, CE_HOST_IE_3_ADDRESS); @@ -221,13 +224,13 @@ static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id) static void ath11k_ahb_ce_irq_disable(struct ath11k_base *ab, u16 ce_id) { - const struct ce_pipe_config *ce_config; + const struct ce_attr *ce_attr; - ce_config = &ab->hw_params.target_ce_config[ce_id]; - if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_OUT) + ce_attr = &ab->hw_params.host_ce_config[ce_id]; + if (ce_attr->src_nentries) ath11k_ahb_clearbit32(ab, ce_id, CE_HOST_IE_ADDRESS); - if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_IN) { + if (ce_attr->dest_nentries) { ath11k_ahb_clearbit32(ab, ce_id, CE_HOST_IE_2_ADDRESS); ath11k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT, CE_HOST_IE_3_ADDRESS); @@ -300,7 +303,10 @@ static void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab) for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; - napi_enable(&irq_grp->napi); + if (!irq_grp->napi_enabled) { + napi_enable(&irq_grp->napi); + irq_grp->napi_enabled = true; + } ath11k_ahb_ext_grp_enable(irq_grp); } } diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index c8e36251068c9861bb75521be380f0dabfd2bed4..d2f2898d17b498967e646289bdd61d772d62f528 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -124,6 +124,7 @@ struct ath11k_ext_irq_grp { u32 num_irq; u32 grp_id; u64 timestamp; + bool napi_enabled; struct napi_struct napi; struct net_device napi_ndev; }; @@ -687,7 +688,6 @@ struct ath11k_base { u32 wlan_init_status; int irq_num[ATH11K_IRQ_NUM_MAX]; struct ath11k_ext_irq_grp ext_irq_grp[ATH11K_EXT_IRQ_GRP_NUM_MAX]; - struct napi_struct *napi; struct ath11k_targ_cap target_caps; u32 ext_service_bitmap[WMI_SERVICE_EXT_BM_SIZE]; bool pdevs_macaddr_valid; diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h index ee8db812589b3cdecdfda17ba796e36b75faa0a9..c4972233149f40aaac7a4377852e95f3bdecd892 100644 --- a/drivers/net/wireless/ath/ath11k/dp.h +++ b/drivers/net/wireless/ath/ath11k/dp.h @@ -514,7 +514,8 @@ struct htt_ppdu_stats_cfg_cmd { } __packed; #define HTT_PPDU_STATS_CFG_MSG_TYPE GENMASK(7, 0) -#define HTT_PPDU_STATS_CFG_PDEV_ID GENMASK(15, 8) +#define HTT_PPDU_STATS_CFG_SOC_STATS BIT(8) +#define HTT_PPDU_STATS_CFG_PDEV_ID GENMASK(15, 9) #define HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK GENMASK(31, 16) enum htt_ppdu_stats_tag_type { diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c index 21dfd08d3debb6b489e3647aeb2d02ec0d47dc0a..092eee735da29d2bf5a465a264d13fafb9bd90fa 100644 --- a/drivers/net/wireless/ath/ath11k/dp_tx.c +++ b/drivers/net/wireless/ath/ath11k/dp_tx.c @@ -894,7 +894,7 @@ int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k *ar, u32 mask) cmd->msg = FIELD_PREP(HTT_PPDU_STATS_CFG_MSG_TYPE, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG); - pdev_mask = 1 << (i + 1); + pdev_mask = 1 << (ar->pdev_idx + i); cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_PDEV_ID, pdev_mask); cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK, mask); diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c index 9904c0eb758754ebbe076ff116e4a973bd75443a..f3b9108ab6bd073e695c40851f3fff4520e64695 100644 --- a/drivers/net/wireless/ath/ath11k/hal.c +++ b/drivers/net/wireless/ath/ath11k/hal.c @@ -991,6 +991,7 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type, srng->msi_data = params->msi_data; srng->initialized = 1; spin_lock_init(&srng->lock); + lockdep_set_class(&srng->lock, hal->srng_key + ring_id); for (i = 0; i < HAL_SRNG_NUM_REG_GRP; i++) { srng->hwreg_base[i] = srng_config->reg_start[i] + @@ -1237,6 +1238,24 @@ static int ath11k_hal_srng_create_config(struct ath11k_base *ab) return 0; } +static void ath11k_hal_register_srng_key(struct ath11k_base *ab) +{ + struct ath11k_hal *hal = &ab->hal; + u32 ring_id; + + for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++) + lockdep_register_key(hal->srng_key + ring_id); +} + +static void ath11k_hal_unregister_srng_key(struct ath11k_base *ab) +{ + struct ath11k_hal *hal = &ab->hal; + u32 ring_id; + + for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++) + lockdep_unregister_key(hal->srng_key + ring_id); +} + int ath11k_hal_srng_init(struct ath11k_base *ab) { struct ath11k_hal *hal = &ab->hal; @@ -1256,6 +1275,8 @@ int ath11k_hal_srng_init(struct ath11k_base *ab) if (ret) goto err_free_cont_rdp; + ath11k_hal_register_srng_key(ab); + return 0; err_free_cont_rdp: @@ -1270,6 +1291,7 @@ void ath11k_hal_srng_deinit(struct ath11k_base *ab) { struct ath11k_hal *hal = &ab->hal; + ath11k_hal_unregister_srng_key(ab); ath11k_hal_free_cont_rdp(ab); ath11k_hal_free_cont_wrp(ab); kfree(hal->srng_config); diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h index 1f1b29cd0aa399f078a8c9a020ff81e7461736d6..5fbfded8d546c4193daa9d8dbb7a10963a1f2d9d 100644 --- a/drivers/net/wireless/ath/ath11k/hal.h +++ b/drivers/net/wireless/ath/ath11k/hal.h @@ -888,6 +888,8 @@ struct ath11k_hal { /* shadow register configuration */ u32 shadow_reg_addr[HAL_SHADOW_NUM_REGS]; int num_shadow_reg_configured; + + struct lock_class_key srng_key[HAL_SRNG_RING_ID_MAX]; }; u32 ath11k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid); diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c index 66331da3501291f08039073604089c32e10bfa86..f6282e8702923f27e7ff3a1c12a514886517d332 100644 --- a/drivers/net/wireless/ath/ath11k/hw.c +++ b/drivers/net/wireless/ath/ath11k/hw.c @@ -246,8 +246,6 @@ const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_ipq8074 = { const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qca6390 = { .tx = { ATH11K_TX_RING_MASK_0, - ATH11K_TX_RING_MASK_1, - ATH11K_TX_RING_MASK_2, }, .rx_mon_status = { 0, 0, 0, 0, diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 0924bc8b35205f1619ff4bf3de3d8240a82f4365..cc9122f420243069939f08cd12276fdb7e091a0c 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -792,11 +793,15 @@ static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif) if (cfg80211_find_ie(WLAN_EID_RSN, ies, (skb_tail_pointer(bcn) - ies))) arvif->rsnie_present = true; + else + arvif->rsnie_present = false; if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WPA, ies, (skb_tail_pointer(bcn) - ies))) arvif->wpaie_present = true; + else + arvif->wpaie_present = false; ret = ath11k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn); @@ -2316,9 +2321,12 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw, arg.scan_id = ATH11K_SCAN_ID; if (req->ie_len) { + arg.extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL); + if (!arg.extraie.ptr) { + ret = -ENOMEM; + goto exit; + } arg.extraie.len = req->ie_len; - arg.extraie.ptr = kzalloc(req->ie_len, GFP_KERNEL); - memcpy(arg.extraie.ptr, req->ie, req->ie_len); } if (req->n_ssids) { @@ -2395,9 +2403,7 @@ static int ath11k_install_key(struct ath11k_vif *arvif, return 0; if (cmd == DISABLE_KEY) { - /* TODO: Check if FW expects value other than NONE for del */ - /* arg.key_cipher = WMI_CIPHER_NONE; */ - arg.key_len = 0; + arg.key_cipher = WMI_CIPHER_NONE; arg.key_data = NULL; goto install; } @@ -2529,7 +2535,7 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, /* flush the fragments cache during key (re)install to * ensure all frags in the new frag list belong to the same key. */ - if (peer && cmd == SET_KEY) + if (peer && sta && cmd == SET_KEY) ath11k_peer_frags_flush(ar, peer); spin_unlock_bh(&ab->base_lock); @@ -3878,23 +3884,32 @@ static int __ath11k_set_antenna(struct ath11k *ar, u32 tx_ant, u32 rx_ant) return 0; } -int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx) +static void ath11k_mac_tx_mgmt_free(struct ath11k *ar, int buf_id) { - struct sk_buff *msdu = skb; + struct sk_buff *msdu; struct ieee80211_tx_info *info; - struct ath11k *ar = ctx; - struct ath11k_base *ab = ar->ab; spin_lock_bh(&ar->txmgmt_idr_lock); - idr_remove(&ar->txmgmt_idr, buf_id); + msdu = idr_remove(&ar->txmgmt_idr, buf_id); spin_unlock_bh(&ar->txmgmt_idr_lock); - dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len, + + if (!msdu) + return; + + dma_unmap_single(ar->ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len, DMA_TO_DEVICE); info = IEEE80211_SKB_CB(msdu); memset(&info->status, 0, sizeof(info->status)); ieee80211_free_txskb(ar->hw, msdu); +} + +int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx) +{ + struct ath11k *ar = ctx; + + ath11k_mac_tx_mgmt_free(ar, buf_id); return 0; } @@ -3903,17 +3918,10 @@ static int ath11k_mac_vif_txmgmt_idr_remove(int buf_id, void *skb, void *ctx) { struct ieee80211_vif *vif = ctx; struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB((struct sk_buff *)skb); - struct sk_buff *msdu = skb; struct ath11k *ar = skb_cb->ar; - struct ath11k_base *ab = ar->ab; - if (skb_cb->vif == vif) { - spin_lock_bh(&ar->txmgmt_idr_lock); - idr_remove(&ar->txmgmt_idr, buf_id); - spin_unlock_bh(&ar->txmgmt_idr_lock); - dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, - DMA_TO_DEVICE); - } + if (skb_cb->vif == vif) + ath11k_mac_tx_mgmt_free(ar, buf_id); return 0; } @@ -3928,6 +3936,8 @@ static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif, int buf_id; int ret; + ATH11K_SKB_CB(skb)->ar = ar; + spin_lock_bh(&ar->txmgmt_idr_lock); buf_id = idr_alloc(&ar->txmgmt_idr, skb, 0, ATH11K_TX_MGMT_NUM_PENDING_MAX, GFP_ATOMIC); diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c index d7eb6b7160bb4ff204c4a2c29819f73b90b92cd5..105e344240c100f1d9e31de57e27b467750cd485 100644 --- a/drivers/net/wireless/ath/ath11k/pci.c +++ b/drivers/net/wireless/ath/ath11k/pci.c @@ -416,8 +416,11 @@ static void __ath11k_pci_ext_irq_disable(struct ath11k_base *sc) ath11k_pci_ext_grp_disable(irq_grp); - napi_synchronize(&irq_grp->napi); - napi_disable(&irq_grp->napi); + if (irq_grp->napi_enabled) { + napi_synchronize(&irq_grp->napi); + napi_disable(&irq_grp->napi); + irq_grp->napi_enabled = false; + } } } @@ -436,7 +439,10 @@ static void ath11k_pci_ext_irq_enable(struct ath11k_base *ab) for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; - napi_enable(&irq_grp->napi); + if (!irq_grp->napi_enabled) { + napi_enable(&irq_grp->napi); + irq_grp->napi_enabled = true; + } ath11k_pci_ext_grp_enable(irq_grp); } } diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c index b8f9f3440887932d9e092850d4b866487bdad83e..e34311516b958a121464f36716f642a6c85d52ba 100644 --- a/drivers/net/wireless/ath/ath11k/reg.c +++ b/drivers/net/wireless/ath/ath11k/reg.c @@ -456,6 +456,9 @@ ath11k_reg_adjust_bw(u16 start_freq, u16 end_freq, u16 max_bw) { u16 bw; + if (end_freq <= start_freq) + return 0; + bw = end_freq - start_freq; bw = min_t(u16, bw, max_bw); @@ -463,8 +466,10 @@ ath11k_reg_adjust_bw(u16 start_freq, u16 end_freq, u16 max_bw) bw = 80; else if (bw >= 40 && bw < 80) bw = 40; - else if (bw < 40) + else if (bw >= 20 && bw < 40) bw = 20; + else + bw = 0; return bw; } @@ -488,73 +493,77 @@ ath11k_reg_update_weather_radar_band(struct ath11k_base *ab, struct cur_reg_rule *reg_rule, u8 *rule_idx, u32 flags, u16 max_bw) { + u32 start_freq; u32 end_freq; u16 bw; u8 i; i = *rule_idx; + /* there might be situations when even the input rule must be dropped */ + i--; + + /* frequencies below weather radar */ bw = ath11k_reg_adjust_bw(reg_rule->start_freq, ETSI_WEATHER_RADAR_BAND_LOW, max_bw); + if (bw > 0) { + i++; - ath11k_reg_update_rule(regd->reg_rules + i, reg_rule->start_freq, - ETSI_WEATHER_RADAR_BAND_LOW, bw, - reg_rule->ant_gain, reg_rule->reg_power, - flags); + ath11k_reg_update_rule(regd->reg_rules + i, + reg_rule->start_freq, + ETSI_WEATHER_RADAR_BAND_LOW, bw, + reg_rule->ant_gain, reg_rule->reg_power, + flags); - ath11k_dbg(ab, ATH11K_DBG_REG, - "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n", - i + 1, reg_rule->start_freq, ETSI_WEATHER_RADAR_BAND_LOW, - bw, reg_rule->ant_gain, reg_rule->reg_power, - regd->reg_rules[i].dfs_cac_ms, - flags); - - if (reg_rule->end_freq > ETSI_WEATHER_RADAR_BAND_HIGH) - end_freq = ETSI_WEATHER_RADAR_BAND_HIGH; - else - end_freq = reg_rule->end_freq; + ath11k_dbg(ab, ATH11K_DBG_REG, + "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n", + i + 1, reg_rule->start_freq, + ETSI_WEATHER_RADAR_BAND_LOW, bw, reg_rule->ant_gain, + reg_rule->reg_power, regd->reg_rules[i].dfs_cac_ms, + flags); + } - bw = ath11k_reg_adjust_bw(ETSI_WEATHER_RADAR_BAND_LOW, end_freq, - max_bw); + /* weather radar frequencies */ + start_freq = max_t(u32, reg_rule->start_freq, + ETSI_WEATHER_RADAR_BAND_LOW); + end_freq = min_t(u32, reg_rule->end_freq, ETSI_WEATHER_RADAR_BAND_HIGH); - i++; + bw = ath11k_reg_adjust_bw(start_freq, end_freq, max_bw); + if (bw > 0) { + i++; - ath11k_reg_update_rule(regd->reg_rules + i, - ETSI_WEATHER_RADAR_BAND_LOW, end_freq, bw, - reg_rule->ant_gain, reg_rule->reg_power, - flags); + ath11k_reg_update_rule(regd->reg_rules + i, start_freq, + end_freq, bw, reg_rule->ant_gain, + reg_rule->reg_power, flags); - regd->reg_rules[i].dfs_cac_ms = ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT; + regd->reg_rules[i].dfs_cac_ms = ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT; - ath11k_dbg(ab, ATH11K_DBG_REG, - "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n", - i + 1, ETSI_WEATHER_RADAR_BAND_LOW, end_freq, - bw, reg_rule->ant_gain, reg_rule->reg_power, - regd->reg_rules[i].dfs_cac_ms, - flags); - - if (end_freq == reg_rule->end_freq) { - regd->n_reg_rules--; - *rule_idx = i; - return; + ath11k_dbg(ab, ATH11K_DBG_REG, + "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n", + i + 1, start_freq, end_freq, bw, + reg_rule->ant_gain, reg_rule->reg_power, + regd->reg_rules[i].dfs_cac_ms, flags); } + /* frequencies above weather radar */ bw = ath11k_reg_adjust_bw(ETSI_WEATHER_RADAR_BAND_HIGH, reg_rule->end_freq, max_bw); + if (bw > 0) { + i++; - i++; - - ath11k_reg_update_rule(regd->reg_rules + i, ETSI_WEATHER_RADAR_BAND_HIGH, - reg_rule->end_freq, bw, - reg_rule->ant_gain, reg_rule->reg_power, - flags); + ath11k_reg_update_rule(regd->reg_rules + i, + ETSI_WEATHER_RADAR_BAND_HIGH, + reg_rule->end_freq, bw, + reg_rule->ant_gain, reg_rule->reg_power, + flags); - ath11k_dbg(ab, ATH11K_DBG_REG, - "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n", - i + 1, ETSI_WEATHER_RADAR_BAND_HIGH, reg_rule->end_freq, - bw, reg_rule->ant_gain, reg_rule->reg_power, - regd->reg_rules[i].dfs_cac_ms, - flags); + ath11k_dbg(ab, ATH11K_DBG_REG, + "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n", + i + 1, ETSI_WEATHER_RADAR_BAND_HIGH, + reg_rule->end_freq, bw, reg_rule->ant_gain, + reg_rule->reg_power, regd->reg_rules[i].dfs_cac_ms, + flags); + } *rule_idx = i; } diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index e84127165d85860f6618f6c64f23ada6569b2c8f..53846dc9a5c5abe92de8f591eed7ed0a48593eb3 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -1665,7 +1665,8 @@ int ath11k_wmi_vdev_install_key(struct ath11k *ar, tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd)); tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | FIELD_PREP(WMI_TLV_LEN, key_len_aligned); - memcpy(tlv->value, (u8 *)arg->key_data, key_len_aligned); + if (arg->key_data) + memcpy(tlv->value, (u8 *)arg->key_data, key_len_aligned); ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID); if (ret) { @@ -5421,7 +5422,7 @@ static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *sk ar = ab->pdevs[pdev_idx].ar; kfree(ab->new_regd[pdev_idx]); ab->new_regd[pdev_idx] = regd; - ieee80211_queue_work(ar->hw, &ar->regd_update_work); + queue_work(ab->workqueue, &ar->regd_update_work); } else { /* This regd would be applied during mac registration and is * held constant throughout for regd intersection purpose diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index 860da13bfb6ac91c55925babbd8150458d4b97a5..f06eec99de688c87e42fb8379d339b7a7aa1d4d8 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -590,6 +590,13 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev, return; } + if (pkt_len > 2 * MAX_RX_BUF_SIZE) { + dev_err(&hif_dev->udev->dev, + "ath9k_htc: invalid pkt_len (%x)\n", pkt_len); + RX_STAT_INC(skb_dropped); + return; + } + pad_len = 4 - (pkt_len & 0x3); if (pad_len == 4) pad_len = 0; diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c index cf4eb0fb2815182b42a3ded5f269c72776597aa8..6c62ffc799a2b5e07d5632461888cf2887664177 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.c +++ b/drivers/net/wireless/ath/wcn36xx/dxe.c @@ -272,6 +272,21 @@ static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch) return 0; } +static void wcn36xx_dxe_disable_ch_int(struct wcn36xx *wcn, u16 wcn_ch) +{ + int reg_data = 0; + + wcn36xx_dxe_read_register(wcn, + WCN36XX_DXE_INT_MASK_REG, + ®_data); + + reg_data &= ~wcn_ch; + + wcn36xx_dxe_write_register(wcn, + WCN36XX_DXE_INT_MASK_REG, + (int)reg_data); +} + static int wcn36xx_dxe_fill_skb(struct device *dev, struct wcn36xx_dxe_ctl *ctl, gfp_t gfp) @@ -869,7 +884,6 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn) WCN36XX_DXE_WQ_TX_L); wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, ®_data); - wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L); /***************************************/ /* Init descriptors for TX HIGH channel */ @@ -893,9 +907,6 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn) wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, ®_data); - /* Enable channel interrupts */ - wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H); - /***************************************/ /* Init descriptors for RX LOW channel */ /***************************************/ @@ -905,7 +916,6 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn) goto out_err_rxl_ch; } - /* For RX we need to preallocated buffers */ wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch); @@ -928,9 +938,6 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn) WCN36XX_DXE_REG_CTL_RX_L, WCN36XX_DXE_CH_DEFAULT_CTL_RX_L); - /* Enable channel interrupts */ - wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L); - /***************************************/ /* Init descriptors for RX HIGH channel */ /***************************************/ @@ -962,15 +969,18 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn) WCN36XX_DXE_REG_CTL_RX_H, WCN36XX_DXE_CH_DEFAULT_CTL_RX_H); - /* Enable channel interrupts */ - wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H); - ret = wcn36xx_dxe_request_irqs(wcn); if (ret < 0) goto out_err_irq; timer_setup(&wcn->tx_ack_timer, wcn36xx_dxe_tx_timer, 0); + /* Enable channel interrupts */ + wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L); + wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H); + wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L); + wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H); + return 0; out_err_irq: @@ -987,6 +997,14 @@ out_err_txh_ch: void wcn36xx_dxe_deinit(struct wcn36xx *wcn) { + int reg_data = 0; + + /* Disable channel interrupts */ + wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H); + wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L); + wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H); + wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L); + free_irq(wcn->tx_irq, wcn); free_irq(wcn->rx_irq, wcn); del_timer(&wcn->tx_ack_timer); @@ -996,6 +1014,15 @@ void wcn36xx_dxe_deinit(struct wcn36xx *wcn) wcn->tx_ack_skb = NULL; } + /* Put the DXE block into reset before freeing memory */ + reg_data = WCN36XX_DXE_REG_RESET; + wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data); + wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch); wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch); + + wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_l_ch); + wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_h_ch); + wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_l_ch); + wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_h_ch); } diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 629ddfd74da1a06fe3761ebcdd1bb68a0af46d0f..9aaf6f74733336cbe842625821667068813323e9 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -397,6 +397,7 @@ static void wcn36xx_change_opchannel(struct wcn36xx *wcn, int ch) static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed) { struct wcn36xx *wcn = hw->priv; + int ret; wcn36xx_dbg(WCN36XX_DBG_MAC, "mac config changed 0x%08x\n", changed); @@ -412,17 +413,31 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed) * want to receive/transmit regular data packets, then * simply stop the scan session and exit PS mode. */ - wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN, - wcn->sw_scan_vif); - wcn->sw_scan_channel = 0; + if (wcn->sw_scan_channel) + wcn36xx_smd_end_scan(wcn, wcn->sw_scan_channel); + if (wcn->sw_scan_init) { + wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN, + wcn->sw_scan_vif); + } } else if (wcn->sw_scan) { /* A scan is ongoing, do not change the operating * channel, but start a scan session on the channel. */ - wcn36xx_smd_init_scan(wcn, HAL_SYS_MODE_SCAN, - wcn->sw_scan_vif); + if (wcn->sw_scan_channel) + wcn36xx_smd_end_scan(wcn, wcn->sw_scan_channel); + if (!wcn->sw_scan_init) { + /* This can fail if we are unable to notify the + * operating channel. + */ + ret = wcn36xx_smd_init_scan(wcn, + HAL_SYS_MODE_SCAN, + wcn->sw_scan_vif); + if (ret) { + mutex_unlock(&wcn->conf_mutex); + return -EIO; + } + } wcn36xx_smd_start_scan(wcn, ch); - wcn->sw_scan_channel = ch; } else { wcn36xx_change_opchannel(wcn, ch); } @@ -709,7 +724,12 @@ static void wcn36xx_sw_scan_complete(struct ieee80211_hw *hw, struct wcn36xx *wcn = hw->priv; /* ensure that any scan session is finished */ - wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN, wcn->sw_scan_vif); + if (wcn->sw_scan_channel) + wcn36xx_smd_end_scan(wcn, wcn->sw_scan_channel); + if (wcn->sw_scan_init) { + wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN, + wcn->sw_scan_vif); + } wcn->sw_scan = false; wcn->sw_scan_opchannel = 0; } diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index 3793907ace92e0e1e73cc45184cfd6dfe9b11bc3..7f00cb6f5e16b3dbb0aa281587a1d6a8756c053b 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -730,6 +730,7 @@ int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode, wcn36xx_err("hal_init_scan response failed err=%d\n", ret); goto out; } + wcn->sw_scan_init = true; out: mutex_unlock(&wcn->hal_mutex); return ret; @@ -760,6 +761,7 @@ int wcn36xx_smd_start_scan(struct wcn36xx *wcn, u8 scan_channel) wcn36xx_err("hal_start_scan response failed err=%d\n", ret); goto out; } + wcn->sw_scan_channel = scan_channel; out: mutex_unlock(&wcn->hal_mutex); return ret; @@ -790,6 +792,7 @@ int wcn36xx_smd_end_scan(struct wcn36xx *wcn, u8 scan_channel) wcn36xx_err("hal_end_scan response failed err=%d\n", ret); goto out; } + wcn->sw_scan_channel = 0; out: mutex_unlock(&wcn->hal_mutex); return ret; @@ -831,6 +834,7 @@ int wcn36xx_smd_finish_scan(struct wcn36xx *wcn, wcn36xx_err("hal_finish_scan response failed err=%d\n", ret); goto out; } + wcn->sw_scan_init = false; out: mutex_unlock(&wcn->hal_mutex); return ret; @@ -2603,7 +2607,7 @@ static int wcn36xx_smd_missed_beacon_ind(struct wcn36xx *wcn, wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n", tmp->bss_index); vif = wcn36xx_priv_to_vif(tmp); - ieee80211_connection_loss(vif); + ieee80211_beacon_loss(vif); } return 0; } @@ -2618,7 +2622,7 @@ static int wcn36xx_smd_missed_beacon_ind(struct wcn36xx *wcn, wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n", rsp->bss_index); vif = wcn36xx_priv_to_vif(tmp); - ieee80211_connection_loss(vif); + ieee80211_beacon_loss(vif); return 0; } } diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c index bbd7194c82e27ef6982cbb7d548ab55a2edce1f1..f33e7228a1010f4e81b3211f7b33fd319960718f 100644 --- a/drivers/net/wireless/ath/wcn36xx/txrx.c +++ b/drivers/net/wireless/ath/wcn36xx/txrx.c @@ -237,7 +237,6 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb) const struct wcn36xx_rate *rate; struct ieee80211_hdr *hdr; struct wcn36xx_rx_bd *bd; - struct ieee80211_supported_band *sband; u16 fc, sn; /* @@ -259,8 +258,6 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb) fc = __le16_to_cpu(hdr->frame_control); sn = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl)); - status.freq = WCN36XX_CENTER_FREQ(wcn); - status.band = WCN36XX_BAND(wcn); status.mactime = 10; status.signal = -get_rssi0(bd); status.antenna = 1; @@ -272,18 +269,36 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb) wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x\n", status.flag); + if (bd->scan_learn) { + /* If packet originate from hardware scanning, extract the + * band/channel from bd descriptor. + */ + u8 hwch = (bd->reserved0 << 4) + bd->rx_ch; + + if (bd->rf_band != 1 && hwch <= sizeof(ab_rx_ch_map) && hwch >= 1) { + status.band = NL80211_BAND_5GHZ; + status.freq = ieee80211_channel_to_frequency(ab_rx_ch_map[hwch - 1], + status.band); + } else { + status.band = NL80211_BAND_2GHZ; + status.freq = ieee80211_channel_to_frequency(hwch, status.band); + } + } else { + status.band = WCN36XX_BAND(wcn); + status.freq = WCN36XX_CENTER_FREQ(wcn); + } + if (bd->rate_id < ARRAY_SIZE(wcn36xx_rate_table)) { rate = &wcn36xx_rate_table[bd->rate_id]; status.encoding = rate->encoding; status.enc_flags = rate->encoding_flags; status.bw = rate->bw; status.rate_idx = rate->mcs_or_legacy_index; - sband = wcn->hw->wiphy->bands[status.band]; status.nss = 1; if (status.band == NL80211_BAND_5GHZ && status.encoding == RX_ENC_LEGACY && - status.rate_idx >= sband->n_bitrates) { + status.rate_idx >= 4) { /* no dsss rates in 5Ghz rates table */ status.rate_idx -= 4; } @@ -298,22 +313,6 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb) ieee80211_is_probe_resp(hdr->frame_control)) status.boottime_ns = ktime_get_boottime_ns(); - if (bd->scan_learn) { - /* If packet originates from hardware scanning, extract the - * band/channel from bd descriptor. - */ - u8 hwch = (bd->reserved0 << 4) + bd->rx_ch; - - if (bd->rf_band != 1 && hwch <= sizeof(ab_rx_ch_map) && hwch >= 1) { - status.band = NL80211_BAND_5GHZ; - status.freq = ieee80211_channel_to_frequency(ab_rx_ch_map[hwch - 1], - status.band); - } else { - status.band = NL80211_BAND_2GHZ; - status.freq = ieee80211_channel_to_frequency(hwch, status.band); - } - } - memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); if (ieee80211_is_beacon(hdr->frame_control)) { diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h index 9b4dee2fc6483435a3a9ae17661a50d7f2f395d6..5c40d0bdee2451245f0442da5077c5eed34699cc 100644 --- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h +++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h @@ -231,6 +231,7 @@ struct wcn36xx { struct cfg80211_scan_request *scan_req; bool sw_scan; u8 sw_scan_opchannel; + bool sw_scan_init; u8 sw_scan_channel; struct ieee80211_vif *sw_scan_vif; struct mutex scan_lock; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index be214f39f52bec3207f3d9dacbab714341c6e2fa..ab84ac3f8f03f382a3fa60edf344e4a6d1a60891 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -185,6 +185,9 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv) for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) iwl_free_fw_img(drv, drv->fw.img + i); + + /* clear the data for the aborted load case */ + memset(&drv->fw, 0, sizeof(drv->fw)); } static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc, @@ -1365,6 +1368,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) int i; bool load_module = false; bool usniffer_images = false; + bool failure = true; fw->ucode_capa.max_probe_length = IWL_DEFAULT_MAX_PROBE_LENGTH; fw->ucode_capa.standard_phy_calibration_size = @@ -1625,15 +1629,9 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) * else from proceeding if the module fails to load * or hangs loading. */ - if (load_module) { + if (load_module) request_module("%s", op->name); -#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR - if (err) - IWL_ERR(drv, - "failed to load module %s (error %d), is dynamic loading enabled?\n", - op->name, err); -#endif - } + failure = false; goto free; try_again: @@ -1648,7 +1646,12 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) out_unbind: complete(&drv->request_firmware_complete); device_release_driver(drv->trans->dev); + /* drv has just been freed by the release */ + failure = false; free: + if (failure) + iwl_dealloc_ucode(drv); + if (pieces) { for (i = 0; i < ARRAY_SIZE(pieces->img); i++) kfree(pieces->img[i].sec); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c index a0ce761d0c59b423ec150878615aa79a81eadfa4..b1335fe3b01a29ced68eba4c6f9ca4522ce8c180 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c @@ -967,7 +967,7 @@ static void iwl_mvm_ftm_rtt_smoothing(struct iwl_mvm *mvm, overshoot = IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT; alpha = IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA; - rtt_avg = (alpha * rtt + (100 - alpha) * resp->rtt_avg) / 100; + rtt_avg = div_s64(alpha * rtt + (100 - alpha) * resp->rtt_avg, 100); IWL_DEBUG_INFO(mvm, "%pM: prev rtt_avg=%lld, new rtt_avg=%lld, rtt=%lld\n", diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 81cc85a97eb205d88cc0d1ec4a9b2f39ce72ea99..922a7ea0cd24e892109d2d4db11e4a0aec5f05c9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -1739,6 +1739,7 @@ static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm) struct iwl_mvm_mc_iter_data iter_data = { .mvm = mvm, }; + int ret; lockdep_assert_held(&mvm->mutex); @@ -1748,6 +1749,22 @@ static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm) ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_mc_iface_iterator, &iter_data); + + /* + * Send a (synchronous) ech command so that we wait for the + * multiple asynchronous MCAST_FILTER_CMD commands sent by + * the interface iterator. Otherwise, we might get here over + * and over again (by userspace just sending a lot of these) + * and the CPU can send them faster than the firmware can + * process them. + * Note that the CPU is still faster - but with this we'll + * actually send fewer commands overall because the CPU will + * not schedule the work in mac80211 as frequently if it's + * still running when rescheduled (possibly multiple times). + */ + ret = iwl_mvm_send_cmd_pdu(mvm, ECHO_CMD, 0, 0, NULL); + if (ret) + IWL_ERR(mvm, "Failed to synchronize multicast groups update\n"); } static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 838734fec50234ed253e752e86b9b39ea82c6804..86b3fb321dfdd82b0871bfb7920eac724c78e09e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -177,12 +177,39 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb, struct iwl_rx_mpdu_desc *desc = (void *)pkt->data; unsigned int headlen, fraglen, pad_len = 0; unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); + u8 mic_crc_len = u8_get_bits(desc->mac_flags1, + IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK) << 1; if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) { len -= 2; pad_len = 2; } + /* + * For non monitor interface strip the bytes the RADA might not have + * removed. As monitor interface cannot exist with other interfaces + * this removal is safe. + */ + if (mic_crc_len && !ieee80211_hw_check(mvm->hw, RX_INCLUDES_FCS)) { + u32 pkt_flags = le32_to_cpu(pkt->len_n_flags); + + /* + * If RADA was not enabled then decryption was not performed so + * the MIC cannot be removed. + */ + if (!(pkt_flags & FH_RSCSR_RADA_EN)) { + if (WARN_ON(crypt_len > mic_crc_len)) + return -EINVAL; + + mic_crc_len -= crypt_len; + } + + if (WARN_ON(mic_crc_len > len)) + return -EINVAL; + + len -= mic_crc_len; + } + /* If frame is small enough to fit in skb->head, pull it completely. * If not, only pull ieee80211_hdr (including crypto if present, and * an additional 8 bytes for SNAP/ethertype, see below) so that diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index a5d90e028833c7e420a2c85e2ffddce7980b903a..46255d2c555b6d97dbcd3b5213af86c63234c364 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -2157,7 +2157,7 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type) return -EIO; } -#define SCAN_TIMEOUT 20000 +#define SCAN_TIMEOUT 30000 void iwl_mvm_scan_timeout_wk(struct work_struct *work) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index 394598b14a17335ca0cae9c1057edb8cdde73cc0..3f081cdea09caf0012fbecdd460e3be5f8121f38 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -98,14 +98,13 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk) struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk); /* - * Clear the ROC_RUNNING /ROC_AUX_RUNNING status bit. + * Clear the ROC_RUNNING status bit. * This will cause the TX path to drop offchannel transmissions. * That would also be done by mac80211, but it is racy, in particular * in the case that the time event actually completed in the firmware * (which is handled in iwl_mvm_te_handle_notif). */ clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status); - clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status); synchronize_net(); @@ -131,9 +130,19 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk) mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif); iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true); } - } else { + } + + /* + * Clear the ROC_AUX_RUNNING status bit. + * This will cause the TX path to drop offchannel transmissions. + * That would also be done by mac80211, but it is racy, in particular + * in the case that the time event actually completed in the firmware + * (which is handled in iwl_mvm_te_handle_notif). + */ + if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) { /* do the same in case of hot spot 2.0 */ iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true); + /* In newer version of this command an aux station is added only * in cases of dedicated tx queue and need to be removed in end * of use */ @@ -1157,15 +1166,10 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm, cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)), .action = cpu_to_le32(FW_CTXT_ACTION_ADD), + .conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC), .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)), }; - /* The time_event_data.id field is reused to save session - * protection's configuration. - */ - mvmvif->time_event_data.id = SESSION_PROTECT_CONF_ASSOC; - cmd.conf_id = cpu_to_le32(mvmvif->time_event_data.id); - lockdep_assert_held(&mvm->mutex); spin_lock_bh(&mvm->time_event_lock); @@ -1179,6 +1183,11 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm, } iwl_mvm_te_clear_data(mvm, te_data); + /* + * The time_event_data.id field is reused to save session + * protection's configuration. + */ + te_data->id = le32_to_cpu(cmd.conf_id); te_data->duration = le32_to_cpu(cmd.duration_tu); spin_unlock_bh(&mvm->time_event_lock); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 2c13fa8f282009a74ee5ec9d502fc8d4f5ab5396..6aedf5762571df54ab615a66ce944e1efc580aca 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -2260,7 +2260,12 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) } } - if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) { + /* + * In some rare cases when the HW is in a bad state, we may + * get this interrupt too early, when prph_info is still NULL. + * So make sure that it's not NULL to prevent crashing. + */ + if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP && trans_pcie->prph_info) { u32 sleep_notif = le32_to_cpu(trans_pcie->prph_info->sleep_notif); if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND || diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index b031e9304983c40ed32abf549576923e99150304..b2991582189c20f1b1c5c60e072d3561d40ef2ae 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -320,8 +320,7 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, /* This may fail if AMT took ownership of the device */ if (iwl_pcie_prepare_card_hw(trans)) { IWL_WARN(trans, "Exit HW not ready\n"); - ret = -EIO; - goto out; + return -EIO; } iwl_enable_rfkill_int(trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 082768ec8aa808b46c41e5df5a8bdd8c6f77cd00..daec61a60fec5f39f5fc4d375568058f7e315593 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1313,8 +1313,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, /* This may fail if AMT took ownership of the device */ if (iwl_pcie_prepare_card_hw(trans)) { IWL_WARN(trans, "Exit HW not ready\n"); - ret = -EIO; - goto out; + return -EIO; } iwl_enable_rfkill_int(trans); diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c index 9181221a2434d89f318d58b87d9c4dc011dca146..0136df00ff6a65454e0b4aecb9f7575c569780a4 100644 --- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c @@ -1148,6 +1148,7 @@ int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, return 0; err_free_tfds: dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); + txq->tfds = NULL; error: if (txq->entries && cmd_queue) for (i = 0; i < slots_num; i++) diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c index bc79ca4cb803cc987cc0b9d33316950cd7f07520..753458628f86a18707b0b20acd9923bacd8706a2 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_event.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c @@ -364,10 +364,12 @@ static void mwifiex_process_uap_tx_pause(struct mwifiex_private *priv, sta_ptr = mwifiex_get_sta_entry(priv, tp->peermac); if (sta_ptr && sta_ptr->tx_pause != tp->tx_pause) { sta_ptr->tx_pause = tp->tx_pause; + spin_unlock_bh(&priv->sta_list_spinlock); mwifiex_update_ralist_tx_pause(priv, tp->peermac, tp->tx_pause); + } else { + spin_unlock_bh(&priv->sta_list_spinlock); } - spin_unlock_bh(&priv->sta_list_spinlock); } } @@ -399,11 +401,13 @@ static void mwifiex_process_sta_tx_pause(struct mwifiex_private *priv, sta_ptr = mwifiex_get_sta_entry(priv, tp->peermac); if (sta_ptr && sta_ptr->tx_pause != tp->tx_pause) { sta_ptr->tx_pause = tp->tx_pause; + spin_unlock_bh(&priv->sta_list_spinlock); mwifiex_update_ralist_tx_pause(priv, tp->peermac, tp->tx_pause); + } else { + spin_unlock_bh(&priv->sta_list_spinlock); } - spin_unlock_bh(&priv->sta_list_spinlock); } } } diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c index 9736aa0ab7fd432506ac17eda4129cafd19b8913..8f01fcbe93961a03a8c78924440ba7382489aea4 100644 --- a/drivers/net/wireless/marvell/mwifiex/usb.c +++ b/drivers/net/wireless/marvell/mwifiex/usb.c @@ -130,7 +130,8 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter, default: mwifiex_dbg(adapter, ERROR, "unknown recv_type %#x\n", recv_type); - return -1; + ret = -1; + goto exit_restore_skb; } break; case MWIFIEX_USB_EP_DATA: diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c index 6312fddd9c00a2b37dee0951810098472e72c053..eaba661133280272cb370fb647b04fefcc7d1c96 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c @@ -1000,6 +1000,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw) _initpabias(hw); rtl92c_dm_init(hw); exit: + local_irq_disable(); local_irq_restore(flags); return err; } diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c index 565efd8806247edbdf94b1fed4f134afe6cd290a..2ef1416899f03858b38ea838052c999cb65352d2 100644 --- a/drivers/net/wireless/realtek/rtw88/main.c +++ b/drivers/net/wireless/realtek/rtw88/main.c @@ -1652,7 +1652,7 @@ int rtw_core_init(struct rtw_dev *rtwdev) /* default rx filter setting */ rtwdev->hal.rcr = BIT_APP_FCS | BIT_APP_MIC | BIT_APP_ICV | - BIT_HTC_LOC_CTRL | BIT_APP_PHYSTS | + BIT_PKTCTL_DLEN | BIT_HTC_LOC_CTRL | BIT_APP_PHYSTS | BIT_AB | BIT_AM | BIT_APM; ret = rtw_load_firmware(rtwdev, RTW_NORMAL_FW); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.h b/drivers/net/wireless/realtek/rtw88/rtw8821c.h index bd01e82b6bcd0bf6c2c3bd9d6c0b38f091dfbd5a..8d1e8ff71d7efc3d0fe13e6fb4346b26565e39fe 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.h +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.h @@ -131,7 +131,7 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data) #define WLAN_TX_FUNC_CFG2 0x30 #define WLAN_MAC_OPT_NORM_FUNC1 0x98 #define WLAN_MAC_OPT_LB_FUNC1 0x80 -#define WLAN_MAC_OPT_FUNC2 0x30810041 +#define WLAN_MAC_OPT_FUNC2 0xb0810041 #define WLAN_SIFS_CFG (WLAN_SIFS_CCK_CONT_TX | \ (WLAN_SIFS_OFDM_CONT_TX << BIT_SHIFT_SIFS_OFDM_CTX) | \ diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c index 22d0dd640ac94fa8a06848ba23b33056748c5505..dbfd67c3f598cac92bbbcb5a2f49218e14a93621 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c @@ -204,7 +204,7 @@ static void rtw8822b_phy_set_param(struct rtw_dev *rtwdev) #define WLAN_TX_FUNC_CFG2 0x30 #define WLAN_MAC_OPT_NORM_FUNC1 0x98 #define WLAN_MAC_OPT_LB_FUNC1 0x80 -#define WLAN_MAC_OPT_FUNC2 0x30810041 +#define WLAN_MAC_OPT_FUNC2 0xb0810041 #define WLAN_SIFS_CFG (WLAN_SIFS_CCK_CONT_TX | \ (WLAN_SIFS_OFDM_CONT_TX << BIT_SHIFT_SIFS_OFDM_CTX) | \ diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c index 79ad6232dce8303353548ce52c5b4f2dde9018c8..cee586335552d82621c54fcb080cc9a2cc522b92 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c @@ -1248,7 +1248,7 @@ static void rtw8822c_phy_set_param(struct rtw_dev *rtwdev) #define WLAN_TX_FUNC_CFG2 0x30 #define WLAN_MAC_OPT_NORM_FUNC1 0x98 #define WLAN_MAC_OPT_LB_FUNC1 0x80 -#define WLAN_MAC_OPT_FUNC2 0x30810041 +#define WLAN_MAC_OPT_FUNC2 0xb0810041 #define WLAN_MAC_INT_MIG_CFG 0x33330000 #define WLAN_SIFS_CFG (WLAN_SIFS_CCK_CONT_TX | \ diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c index 8c638cfeac52f0524b0b31aa8ff792b5e59e7b7a..fe8aed58ac0888d088820da525347dfcd6648194 100644 --- a/drivers/net/wireless/rsi/rsi_91x_main.c +++ b/drivers/net/wireless/rsi/rsi_91x_main.c @@ -23,6 +23,7 @@ #include "rsi_common.h" #include "rsi_coex.h" #include "rsi_hal.h" +#include "rsi_usb.h" u32 rsi_zone_enabled = /* INFO_ZONE | INIT_ZONE | @@ -168,6 +169,9 @@ int rsi_read_pkt(struct rsi_common *common, u8 *rx_pkt, s32 rcv_pkt_len) frame_desc = &rx_pkt[index]; actual_length = *(u16 *)&frame_desc[0]; offset = *(u16 *)&frame_desc[2]; + if (!rcv_pkt_len && offset > + RSI_MAX_RX_USB_PKT_SIZE - FRAME_DESC_SZ) + goto fail; queueno = rsi_get_queueno(frame_desc, offset); length = rsi_get_length(frame_desc, offset); diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index d881df9ebd0c315ce662fe665dc3670798741513..11388a14696211b3bcc112eb0af0b7ffa70ac6e7 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -269,8 +269,12 @@ static void rsi_rx_done_handler(struct urb *urb) struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)rx_cb->data; int status = -EINVAL; + if (!rx_cb->rx_skb) + return; + if (urb->status) { dev_kfree_skb(rx_cb->rx_skb); + rx_cb->rx_skb = NULL; return; } @@ -294,8 +298,10 @@ out: if (rsi_rx_urb_submit(dev->priv, rx_cb->ep_num, GFP_ATOMIC)) rsi_dbg(ERR_ZONE, "%s: Failed in urb submission", __func__); - if (status) + if (status) { dev_kfree_skb(rx_cb->rx_skb); + rx_cb->rx_skb = NULL; + } } static void rsi_rx_urb_kill(struct rsi_hw *adapter, u8 ep_num) @@ -322,7 +328,6 @@ static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num, gfp_t mem_flags) struct sk_buff *skb; u8 dword_align_bytes = 0; -#define RSI_MAX_RX_USB_PKT_SIZE 3000 skb = dev_alloc_skb(RSI_MAX_RX_USB_PKT_SIZE); if (!skb) return -ENOMEM; diff --git a/drivers/net/wireless/rsi/rsi_usb.h b/drivers/net/wireless/rsi/rsi_usb.h index 8702f434b5699978549144b03203dacf5e016790..ad88f8c70a35174ea969f37464df307e0794508d 100644 --- a/drivers/net/wireless/rsi/rsi_usb.h +++ b/drivers/net/wireless/rsi/rsi_usb.h @@ -44,6 +44,8 @@ #define RSI_USB_BUF_SIZE 4096 #define RSI_USB_CTRL_BUF_SIZE 0x04 +#define RSI_MAX_RX_USB_PKT_SIZE 3000 + struct rx_usb_ctrl_block { u8 *data; struct urb *rx_urb; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 99b5152482fe420f26015676285cdcf2b8059e19..71c85c99e86c6c13dd0c1f79d872b32433a51d9b 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -4259,7 +4259,14 @@ static void nvme_async_event_work(struct work_struct *work) container_of(work, struct nvme_ctrl, async_event_work); nvme_aen_uevent(ctrl); - ctrl->ops->submit_async_event(ctrl); + + /* + * The transport drivers must guarantee AER submission here is safe by + * flushing ctrl async_event_work after changing the controller state + * from LIVE and before freeing the admin queue. + */ + if (ctrl->state == NVME_CTRL_LIVE) + ctrl->ops->submit_async_event(ctrl); } static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl) diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h index a9c1e3b4585ec3f9721c7e76fd031e8250fbd265..78467cb3f343e7dad0321e059264ca6e1bfb3cfe 100644 --- a/drivers/nvme/host/fabrics.h +++ b/drivers/nvme/host/fabrics.h @@ -153,6 +153,7 @@ nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl, struct nvmf_ctrl_options *opts) { if (ctrl->state == NVME_CTRL_DELETING || + ctrl->state == NVME_CTRL_DELETING_NOIO || ctrl->state == NVME_CTRL_DEAD || strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) || strcmp(opts->host->nqn, ctrl->opts->host->nqn) || diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 1b85349f57af0f278f10874c4149a4ff50254af5..97afeb898b2531bf627f460c4acbf4a612fcc37e 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -3198,7 +3198,8 @@ static const struct pci_device_id nvme_id_table[] = { NVME_QUIRK_DEALLOCATE_ZEROES, }, { PCI_VDEVICE(INTEL, 0x0a54), /* Intel P4500/P4600 */ .driver_data = NVME_QUIRK_STRIPE_SIZE | - NVME_QUIRK_DEALLOCATE_ZEROES, }, + NVME_QUIRK_DEALLOCATE_ZEROES | + NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_VDEVICE(INTEL, 0x0a55), /* Dell Express Flash P4600 */ .driver_data = NVME_QUIRK_STRIPE_SIZE | NVME_QUIRK_DEALLOCATE_ZEROES, }, diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 1b90563818434486bcff9d0f7d7ee15395b525ed..8eacc9bd58f5acfe6ce034e4869d75ef545fd71b 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1200,6 +1200,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work) struct nvme_rdma_ctrl, err_work); nvme_stop_keep_alive(&ctrl->ctrl); + flush_work(&ctrl->ctrl.async_event_work); nvme_rdma_teardown_io_queues(ctrl, false); nvme_start_queues(&ctrl->ctrl); nvme_rdma_teardown_admin_queue(ctrl, false); diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index e99d43989418738738524c631b343b084cb9f8d7..6105894a218a56575b7325c2fef80226fbde32bf 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -904,7 +904,15 @@ static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue) static void nvme_tcp_fail_request(struct nvme_tcp_request *req) { - nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_HOST_PATH_ERROR); + if (nvme_tcp_async_req(req)) { + union nvme_result res = {}; + + nvme_complete_async_event(&req->queue->ctrl->ctrl, + cpu_to_le16(NVME_SC_HOST_PATH_ERROR), &res); + } else { + nvme_tcp_end_request(blk_mq_rq_from_pdu(req), + NVME_SC_HOST_PATH_ERROR); + } } static int nvme_tcp_try_send_data(struct nvme_tcp_request *req) @@ -2069,6 +2077,7 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work) struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl; nvme_stop_keep_alive(ctrl); + flush_work(&ctrl->async_event_work); nvme_tcp_teardown_io_queues(ctrl, false); /* unquiesce to fail fast pending requests */ nvme_start_queues(ctrl); diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index 6b170083cd248e5f61ef69ed69ef5fd328a6ddf2..21d89d80d083834ad0b6c7d0189fc68f236118cc 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -222,6 +222,8 @@ static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj, struct device *dev = kobj_to_dev(kobj); struct nvmem_device *nvmem = to_nvmem_device(dev); + attr->size = nvmem->size; + return nvmem_bin_attr_get_umode(nvmem); } diff --git a/drivers/of/base.c b/drivers/of/base.c index 161a23631472d94d1a01b6b7752303153561f329..a44a0e7ba2510767b5cfd3a8c484c991da024f6e 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -1328,9 +1328,14 @@ int of_phandle_iterator_next(struct of_phandle_iterator *it) * property data length */ if (it->cur + count > it->list_end) { - pr_err("%pOF: %s = %d found %d\n", - it->parent, it->cells_name, - count, it->cell_count); + if (it->cells_name) + pr_err("%pOF: %s = %d found %td\n", + it->parent, it->cells_name, + count, it->list_end - it->cur); + else + pr_err("%pOF: phandle %s needs %d, found %td\n", + it->parent, of_node_full_name(it->node), + count, it->list_end - it->cur); goto err; } } diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index 1d4b0b7d0cc10c71c5071439f54eaf61ce1afdb1..5407bbdb6439519e9a2521a7aff31c2789fa2df4 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -910,11 +910,18 @@ static void __init of_unittest_dma_ranges_one(const char *path, if (!rc) { phys_addr_t paddr; dma_addr_t dma_addr; - struct device dev_bogus; + struct device *dev_bogus; - dev_bogus.dma_range_map = map; - paddr = dma_to_phys(&dev_bogus, expect_dma_addr); - dma_addr = phys_to_dma(&dev_bogus, expect_paddr); + dev_bogus = kzalloc(sizeof(struct device), GFP_KERNEL); + if (!dev_bogus) { + unittest(0, "kzalloc() failed\n"); + kfree(map); + return; + } + + dev_bogus->dma_range_map = map; + paddr = dma_to_phys(dev_bogus, expect_dma_addr); + dma_addr = phys_to_dma(dev_bogus, expect_paddr); unittest(paddr == expect_paddr, "of_dma_get_range: wrong phys addr %pap (expecting %llx) on node %pOF\n", @@ -924,6 +931,7 @@ static void __init of_unittest_dma_ranges_one(const char *path, &dma_addr, expect_dma_addr, np); kfree(map); + kfree(dev_bogus); } of_node_put(np); #endif @@ -933,8 +941,9 @@ static void __init of_unittest_parse_dma_ranges(void) { of_unittest_dma_ranges_one("/testcase-data/address-tests/device@70000000", 0x0, 0x20000000); - of_unittest_dma_ranges_one("/testcase-data/address-tests/bus@80000000/device@1000", - 0x100000000, 0x20000000); + if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) + of_unittest_dma_ranges_one("/testcase-data/address-tests/bus@80000000/device@1000", + 0x100000000, 0x20000000); of_unittest_dma_ranges_one("/testcase-data/address-tests/pci@90000000", 0x80000000, 0x20000000); } diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index b5f9ee81a46c1e2753d5f9150bf696279e882fff..b916fab9b161816bf032ba5b47ab58dc07cc4692 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -1003,7 +1003,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, ioc->usg_calls++; #endif - while(sg_dma_len(sglist) && nents--) { + while (nents && sg_dma_len(sglist)) { #ifdef CCIO_COLLECT_STATS ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT; @@ -1011,6 +1011,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, ccio_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction, 0); ++sglist; + nents--; } DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents); diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c index e090978518f1a651195cad3e654fc6bde4900094..4760f82def6ec5738f1aada43b4323a200065098 100644 --- a/drivers/parisc/pdc_stable.c +++ b/drivers/parisc/pdc_stable.c @@ -979,8 +979,10 @@ pdcs_register_pathentries(void) entry->kobj.kset = paths_kset; err = kobject_init_and_add(&entry->kobj, &ktype_pdcspath, NULL, "%s", entry->name); - if (err) + if (err) { + kobject_put(&entry->kobj); return err; + } /* kobject is now registered */ write_lock(&entry->rw_lock); diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index dce4cdf786cdb19a607a79dc35a804067577d41d..228c58060e9b3810f7ceb9dc1939628ec6eb804e 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -1047,7 +1047,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, spin_unlock_irqrestore(&ioc->res_lock, flags); #endif - while (sg_dma_len(sglist) && nents--) { + while (nents && sg_dma_len(sglist)) { sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction, 0); @@ -1056,6 +1056,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, ioc->usingle_calls--; /* kluge since call is unmap_sg() */ #endif ++sglist; + nents--; } DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents); diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c index 0f6a6685ab5b5400b2cba8acd26cd6c97b375ce5..f30144c8c0bd2c6ae5adf93a7837c10e80d08e97 100644 --- a/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c @@ -879,7 +879,6 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge, return PCI_BRIDGE_EMUL_HANDLED; } - case PCI_CAP_LIST_ID: case PCI_EXP_DEVCAP: case PCI_EXP_DEVCTL: *value = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg); @@ -960,6 +959,9 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie) /* Support interrupt A for MSI feature */ bridge->conf.intpin = PCIE_CORE_INT_A_ASSERT_ENABLE; + /* Aardvark HW provides PCIe Capability structure in version 2 */ + bridge->pcie_conf.cap = cpu_to_le16(2); + /* Indicates supports for Completion Retry Status */ bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS); diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index ad3e3cde1c20d2c9b6787b346fe43612738f6ec4..a070e69bb49cd40da45ddb46b857a7290a2cde76 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -1841,8 +1841,17 @@ static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus) if (!hv_dev) continue; - if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY) - set_dev_node(&dev->dev, hv_dev->desc.virtual_numa_node); + if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY && + hv_dev->desc.virtual_numa_node < num_possible_nodes()) + /* + * The kernel may boot with some NUMA nodes offline + * (e.g. in a KDUMP kernel) or with NUMA disabled via + * "numa=off". In those cases, adjust the host provided + * NUMA node to a valid NUMA node used by the kernel. + */ + set_dev_node(&dev->dev, + numa_map_to_online_node( + hv_dev->desc.virtual_numa_node)); put_pcichild(hv_dev); } diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c index ed13e81cd691d461708f2d7b798037fee15f8449..2dc6890dbcaa263471714d7989b4185b90e6b738 100644 --- a/drivers/pci/controller/pci-mvebu.c +++ b/drivers/pci/controller/pci-mvebu.c @@ -573,6 +573,8 @@ static struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = { static void mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port) { struct pci_bridge_emul *bridge = &port->bridge; + u32 pcie_cap = mvebu_readl(port, PCIE_CAP_PCIEXP); + u8 pcie_cap_ver = ((pcie_cap >> 16) & PCI_EXP_FLAGS_VERS); bridge->conf.vendor = PCI_VENDOR_ID_MARVELL; bridge->conf.device = mvebu_readl(port, PCIE_DEV_ID_OFF) >> 16; @@ -585,6 +587,12 @@ static void mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port) bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32; } + /* + * Older mvebu hardware provides PCIe Capability structure only in + * version 1. New hardware provides it in version 2. + */ + bridge->pcie_conf.cap = cpu_to_le16(pcie_cap_ver); + bridge->has_pcie = true; bridge->data = port; bridge->ops = &mvebu_pci_bridge_emul_ops; diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c index c33b385ac918e4eeb7805aaf9007b42be4f58398..b651b6f4446917855d92875df4c6f93ce55aed10 100644 --- a/drivers/pci/controller/pci-xgene.c +++ b/drivers/pci/controller/pci-xgene.c @@ -467,7 +467,7 @@ static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size) return 1; } - if ((size > SZ_1K) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 0))) { + if ((size > SZ_1K) && (size < SZ_4G) && !(*ib_reg_mask & (1 << 0))) { *ib_reg_mask |= (1 << 0); return 0; } diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 4fd200d8b0a9d921f49a3290a3eb2fcaf4c3dbd6..f1f789fe0637a6345db2ca1d9a437223f9c96a90 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -72,6 +72,8 @@ extern int pciehp_poll_time; * @reset_lock: prevents access to the Data Link Layer Link Active bit in the * Link Status register and to the Presence Detect State bit in the Slot * Status register during a slot reset which may cause them to flap + * @depth: Number of additional hotplug ports in the path to the root bus, + * used as lock subclass for @reset_lock * @ist_running: flag to keep user request waiting while IRQ thread is running * @request_result: result of last user request submitted to the IRQ thread * @requester: wait queue to wake up on completion of user request, @@ -103,6 +105,7 @@ struct controller { struct hotplug_slot hotplug_slot; /* hotplug core interface */ struct rw_semaphore reset_lock; + unsigned int depth; unsigned int ist_running; int request_result; wait_queue_head_t requester; diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index ad3393930ecb4d91dc21ef7a5abf575662c76e7c..e7fe4b42f0394e360b20dbefd886a1509541bc29 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -166,7 +166,7 @@ static void pciehp_check_presence(struct controller *ctrl) { int occupied; - down_read(&ctrl->reset_lock); + down_read_nested(&ctrl->reset_lock, ctrl->depth); mutex_lock(&ctrl->state_lock); occupied = pciehp_card_present_or_link_active(ctrl); diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 9d06939736c0f0634670bf346df2138905854d71..30708af975adc6383fc496088c09bccd475eebce 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -583,7 +583,7 @@ static void pciehp_ignore_dpc_link_change(struct controller *ctrl, * the corresponding link change may have been ignored above. * Synthesize it to ensure that it is acted on. */ - down_read(&ctrl->reset_lock); + down_read_nested(&ctrl->reset_lock, ctrl->depth); if (!pciehp_check_link_active(ctrl)) pciehp_request(ctrl, PCI_EXP_SLTSTA_DLLSC); up_read(&ctrl->reset_lock); @@ -642,6 +642,8 @@ read_status: */ if (ctrl->power_fault_detected) status &= ~PCI_EXP_SLTSTA_PFD; + else if (status & PCI_EXP_SLTSTA_PFD) + ctrl->power_fault_detected = true; events |= status; if (!events) { @@ -651,7 +653,7 @@ read_status: } if (status) { - pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events); + pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, status); /* * In MSI mode, all event bits must be zero before the port @@ -725,8 +727,7 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id) } /* Check Power Fault Detected */ - if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) { - ctrl->power_fault_detected = 1; + if (events & PCI_EXP_SLTSTA_PFD) { ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(ctrl)); pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF, PCI_EXP_SLTCTL_ATTN_IND_ON); @@ -746,7 +747,7 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id) * Disable requests have higher priority than Presence Detect Changed * or Data Link Layer State Changed events. */ - down_read(&ctrl->reset_lock); + down_read_nested(&ctrl->reset_lock, ctrl->depth); if (events & DISABLE_SLOT) pciehp_handle_disable_request(ctrl); else if (events & (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC)) @@ -880,7 +881,7 @@ int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, int probe) if (probe) return 0; - down_write(&ctrl->reset_lock); + down_write_nested(&ctrl->reset_lock, ctrl->depth); if (!ATTN_BUTTN(ctrl)) { ctrl_mask |= PCI_EXP_SLTCTL_PDCE; @@ -936,6 +937,20 @@ static inline void dbg_ctrl(struct controller *ctrl) #define FLAG(x, y) (((x) & (y)) ? '+' : '-') +static inline int pcie_hotplug_depth(struct pci_dev *dev) +{ + struct pci_bus *bus = dev->bus; + int depth = 0; + + while (bus->parent) { + bus = bus->parent; + if (bus->self && bus->self->is_hotplug_bridge) + depth++; + } + + return depth; +} + struct controller *pcie_init(struct pcie_device *dev) { struct controller *ctrl; @@ -949,6 +964,7 @@ struct controller *pcie_init(struct pcie_device *dev) return NULL; ctrl->pcie = dev; + ctrl->depth = pcie_hotplug_depth(dev->port); pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap); if (pdev->hotplug_user_indicators) diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 57314fec2261b7dbf7f47d31e2e839f8701b736f..3da69b26e674366a4d996742c3cff610eab8c859 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -1291,19 +1291,24 @@ EXPORT_SYMBOL(pci_free_irq_vectors); /** * pci_irq_vector - return Linux IRQ number of a device vector - * @dev: PCI device to operate on - * @nr: device-relative interrupt vector index (0-based). + * @dev: PCI device to operate on + * @nr: Interrupt vector index (0-based) + * + * @nr has the following meanings depending on the interrupt mode: + * MSI-X: The index in the MSI-X vector table + * MSI: The index of the enabled MSI vectors + * INTx: Must be 0 + * + * Return: The Linux interrupt number or -EINVAl if @nr is out of range. */ int pci_irq_vector(struct pci_dev *dev, unsigned int nr) { if (dev->msix_enabled) { struct msi_desc *entry; - int i = 0; for_each_pci_msi_entry(entry, dev) { - if (i == nr) + if (entry->msi_attrib.entry_nr == nr) return entry->irq; - i++; } WARN_ON_ONCE(1); return -EINVAL; @@ -1327,17 +1332,22 @@ EXPORT_SYMBOL(pci_irq_vector); * pci_irq_get_affinity - return the affinity of a particular MSI vector * @dev: PCI device to operate on * @nr: device-relative interrupt vector index (0-based). + * + * @nr has the following meanings depending on the interrupt mode: + * MSI-X: The index in the MSI-X vector table + * MSI: The index of the enabled MSI vectors + * INTx: Must be 0 + * + * Return: A cpumask pointer or NULL if @nr is out of range */ const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr) { if (dev->msix_enabled) { struct msi_desc *entry; - int i = 0; for_each_pci_msi_entry(entry, dev) { - if (i == nr) + if (entry->msi_attrib.entry_nr == nr) return &entry->affinity->mask; - i++; } WARN_ON_ONCE(1); return NULL; diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c index db97cddfc85e1c7e731bbb0f667dc2427b323f6c..37504c2cce9b89165da60b384b895e19ab89aa56 100644 --- a/drivers/pci/pci-bridge-emul.c +++ b/drivers/pci/pci-bridge-emul.c @@ -139,8 +139,13 @@ struct pci_bridge_reg_behavior pci_regs_behavior[PCI_STD_HEADER_SIZEOF / 4] = { .ro = GENMASK(7, 0), }, + /* + * If expansion ROM is unsupported then ROM Base Address register must + * be implemented as read-only register that return 0 when read, same + * as for unused Base Address registers. + */ [PCI_ROM_ADDRESS1 / 4] = { - .rw = GENMASK(31, 11) | BIT(0), + .ro = ~0, }, /* @@ -171,41 +176,55 @@ struct pci_bridge_reg_behavior pcie_cap_regs_behavior[PCI_CAP_PCIE_SIZEOF / 4] = [PCI_CAP_LIST_ID / 4] = { /* * Capability ID, Next Capability Pointer and - * Capabilities register are all read-only. + * bits [14:0] of Capabilities register are all read-only. + * Bit 15 of Capabilities register is reserved. */ - .ro = ~0, + .ro = GENMASK(30, 0), }, [PCI_EXP_DEVCAP / 4] = { - .ro = ~0, + /* + * Bits [31:29] and [17:16] are reserved. + * Bits [27:18] are reserved for non-upstream ports. + * Bits 28 and [14:6] are reserved for non-endpoint devices. + * Other bits are read-only. + */ + .ro = BIT(15) | GENMASK(5, 0), }, [PCI_EXP_DEVCTL / 4] = { - /* Device control register is RW */ - .rw = GENMASK(15, 0), + /* + * Device control register is RW, except bit 15 which is + * reserved for non-endpoints or non-PCIe-to-PCI/X bridges. + */ + .rw = GENMASK(14, 0), /* * Device status register has bits 6 and [3:0] W1C, [5:4] RO, - * the rest is reserved + * the rest is reserved. Also bit 6 is reserved for non-upstream + * ports. */ - .w1c = (BIT(6) | GENMASK(3, 0)) << 16, + .w1c = GENMASK(3, 0) << 16, .ro = GENMASK(5, 4) << 16, }, [PCI_EXP_LNKCAP / 4] = { - /* All bits are RO, except bit 23 which is reserved */ - .ro = lower_32_bits(~BIT(23)), + /* + * All bits are RO, except bit 23 which is reserved and + * bit 18 which is reserved for non-upstream ports. + */ + .ro = lower_32_bits(~(BIT(23) | PCI_EXP_LNKCAP_CLKPM)), }, [PCI_EXP_LNKCTL / 4] = { /* * Link control has bits [15:14], [11:3] and [1:0] RW, the - * rest is reserved. + * rest is reserved. Bit 8 is reserved for non-upstream ports. * * Link status has bits [13:0] RO, and bits [15:14] * W1C. */ - .rw = GENMASK(15, 14) | GENMASK(11, 3) | GENMASK(1, 0), + .rw = GENMASK(15, 14) | GENMASK(11, 9) | GENMASK(7, 3) | GENMASK(1, 0), .ro = GENMASK(13, 0) << 16, .w1c = GENMASK(15, 14) << 16, }, @@ -277,11 +296,9 @@ int pci_bridge_emul_init(struct pci_bridge_emul *bridge, if (bridge->has_pcie) { bridge->conf.capabilities_pointer = PCI_CAP_PCIE_START; + bridge->conf.status |= cpu_to_le16(PCI_STATUS_CAP_LIST); bridge->pcie_conf.cap_id = PCI_CAP_ID_EXP; - /* Set PCIe v2, root port, slot support */ - bridge->pcie_conf.cap = - cpu_to_le16(PCI_EXP_TYPE_ROOT_PORT << 4 | 2 | - PCI_EXP_FLAGS_SLOT); + bridge->pcie_conf.cap |= cpu_to_le16(PCI_EXP_TYPE_ROOT_PORT << 4); bridge->pcie_cap_regs_behavior = kmemdup(pcie_cap_regs_behavior, sizeof(pcie_cap_regs_behavior), @@ -290,6 +307,27 @@ int pci_bridge_emul_init(struct pci_bridge_emul *bridge, kfree(bridge->pci_regs_behavior); return -ENOMEM; } + /* These bits are applicable only for PCI and reserved on PCIe */ + bridge->pci_regs_behavior[PCI_CACHE_LINE_SIZE / 4].ro &= + ~GENMASK(15, 8); + bridge->pci_regs_behavior[PCI_COMMAND / 4].ro &= + ~((PCI_COMMAND_SPECIAL | PCI_COMMAND_INVALIDATE | + PCI_COMMAND_VGA_PALETTE | PCI_COMMAND_WAIT | + PCI_COMMAND_FAST_BACK) | + (PCI_STATUS_66MHZ | PCI_STATUS_FAST_BACK | + PCI_STATUS_DEVSEL_MASK) << 16); + bridge->pci_regs_behavior[PCI_PRIMARY_BUS / 4].ro &= + ~GENMASK(31, 24); + bridge->pci_regs_behavior[PCI_IO_BASE / 4].ro &= + ~((PCI_STATUS_66MHZ | PCI_STATUS_FAST_BACK | + PCI_STATUS_DEVSEL_MASK) << 16); + bridge->pci_regs_behavior[PCI_INTERRUPT_LINE / 4].rw &= + ~((PCI_BRIDGE_CTL_MASTER_ABORT | + BIT(8) | BIT(9) | BIT(11)) << 16); + bridge->pci_regs_behavior[PCI_INTERRUPT_LINE / 4].ro &= + ~((PCI_BRIDGE_CTL_FAST_BACK) << 16); + bridge->pci_regs_behavior[PCI_INTERRUPT_LINE / 4].w1c &= + ~(BIT(10) << 16); } if (flags & PCI_BRIDGE_EMUL_NO_PREFETCHABLE_BAR) { diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index bb863ddb59bfcf47ec7bfabccb631c5fe278a735..95fcc735c88e774568bda782313108bf026c1d12 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -4077,6 +4077,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9120, quirk_dma_func1_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9123, quirk_dma_func1_alias); +/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c136 */ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9125, + quirk_dma_func1_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128, quirk_dma_func1_alias); /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */ diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c index e211e2619680ce7a1e841878503c532d3b63e7ea..f70197154a36256ce59dcaf31d22faa09cb5fcaf 100644 --- a/drivers/pcmcia/cs.c +++ b/drivers/pcmcia/cs.c @@ -666,18 +666,16 @@ static int pccardd(void *__skt) if (events || sysfs_events) continue; + set_current_state(TASK_INTERRUPTIBLE); if (kthread_should_stop()) break; - set_current_state(TASK_INTERRUPTIBLE); - schedule(); - /* make sure we are running */ - __set_current_state(TASK_RUNNING); - try_to_freeze(); } + /* make sure we are running before we exit */ + __set_current_state(TASK_RUNNING); /* shut down socket, if a device is still present */ if (skt->state & SOCKET_PRESENT) { diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c index 3b05760e69d6213ecb48ae659d27ce6ebdc5a00a..69a6e9a5d6d269be87c84ea1611443c8e3ea657f 100644 --- a/drivers/pcmcia/rsrc_nonstatic.c +++ b/drivers/pcmcia/rsrc_nonstatic.c @@ -690,6 +690,9 @@ static struct resource *__nonstatic_find_io_region(struct pcmcia_socket *s, unsigned long min = base; int ret; + if (!res) + return NULL; + data.mask = align - 1; data.offset = base & data.mask; data.map = &s_data->io_db; @@ -809,6 +812,9 @@ static struct resource *nonstatic_find_mem_region(u_long base, u_long num, unsigned long min, max; int ret, i, j; + if (!res) + return NULL; + low = low || !(s->features & SS_CAP_PAGE_REGS); data.mask = align - 1; diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c index 99fbc7e4138be7e1262b6e413fbaeda208365623..b901a0d4e2a80af9e3a4b2aef4cadb072066b74e 100644 --- a/drivers/phy/broadcom/phy-brcm-usb.c +++ b/drivers/phy/broadcom/phy-brcm-usb.c @@ -17,6 +17,7 @@ #include #include #include +#include #include "phy-brcm-usb-init.h" @@ -69,12 +70,35 @@ struct brcm_usb_phy_data { int init_count; int wake_irq; struct brcm_usb_phy phys[BRCM_USB_PHY_ID_MAX]; + struct notifier_block pm_notifier; + bool pm_active; }; static s8 *node_reg_names[BRCM_REGS_MAX] = { "crtl", "xhci_ec", "xhci_gbl", "usb_phy", "usb_mdio", "bdc_ec" }; +static int brcm_pm_notifier(struct notifier_block *notifier, + unsigned long pm_event, + void *unused) +{ + struct brcm_usb_phy_data *priv = + container_of(notifier, struct brcm_usb_phy_data, pm_notifier); + + switch (pm_event) { + case PM_HIBERNATION_PREPARE: + case PM_SUSPEND_PREPARE: + priv->pm_active = true; + break; + case PM_POST_RESTORE: + case PM_POST_HIBERNATION: + case PM_POST_SUSPEND: + priv->pm_active = false; + break; + } + return NOTIFY_DONE; +} + static irqreturn_t brcm_usb_phy_wake_isr(int irq, void *dev_id) { struct phy *gphy = dev_id; @@ -90,6 +114,9 @@ static int brcm_usb_phy_init(struct phy *gphy) struct brcm_usb_phy_data *priv = container_of(phy, struct brcm_usb_phy_data, phys[phy->id]); + if (priv->pm_active) + return 0; + /* * Use a lock to make sure a second caller waits until * the base phy is inited before using it. @@ -119,6 +146,9 @@ static int brcm_usb_phy_exit(struct phy *gphy) struct brcm_usb_phy_data *priv = container_of(phy, struct brcm_usb_phy_data, phys[phy->id]); + if (priv->pm_active) + return 0; + dev_dbg(&gphy->dev, "EXIT\n"); if (phy->id == BRCM_USB_PHY_2_0) brcm_usb_uninit_eohci(&priv->ini); @@ -484,6 +514,9 @@ static int brcm_usb_phy_probe(struct platform_device *pdev) if (err) return err; + priv->pm_notifier.notifier_call = brcm_pm_notifier; + register_pm_notifier(&priv->pm_notifier); + mutex_init(&priv->mutex); /* make sure invert settings are correct */ @@ -524,7 +557,10 @@ static int brcm_usb_phy_probe(struct platform_device *pdev) static int brcm_usb_phy_remove(struct platform_device *pdev) { + struct brcm_usb_phy_data *priv = dev_get_drvdata(&pdev->dev); + sysfs_remove_group(&pdev->dev.kobj, &brcm_usb_phy_group); + unregister_pm_notifier(&priv->pm_notifier); return 0; } @@ -535,6 +571,7 @@ static int brcm_usb_phy_suspend(struct device *dev) struct brcm_usb_phy_data *priv = dev_get_drvdata(dev); if (priv->init_count) { + dev_dbg(dev, "SUSPEND\n"); priv->ini.wake_enabled = device_may_wakeup(dev); if (priv->phys[BRCM_USB_PHY_3_0].inited) brcm_usb_uninit_xhci(&priv->ini); @@ -574,6 +611,7 @@ static int brcm_usb_phy_resume(struct device *dev) * Uninitialize anything that wasn't previously initialized. */ if (priv->init_count) { + dev_dbg(dev, "RESUME\n"); if (priv->wake_irq >= 0) disable_irq_wake(priv->wake_irq); brcm_usb_init_common(&priv->ini); diff --git a/drivers/phy/socionext/phy-uniphier-usb3ss.c b/drivers/phy/socionext/phy-uniphier-usb3ss.c index 6700645bcbe6bfd6abeeb4432618b5a6c2596055..3b5ffc16a6947c8a32e4a8e341f395ebe1e5fe5d 100644 --- a/drivers/phy/socionext/phy-uniphier-usb3ss.c +++ b/drivers/phy/socionext/phy-uniphier-usb3ss.c @@ -22,11 +22,13 @@ #include #define SSPHY_TESTI 0x0 -#define SSPHY_TESTO 0x4 #define TESTI_DAT_MASK GENMASK(13, 6) #define TESTI_ADR_MASK GENMASK(5, 1) #define TESTI_WR_EN BIT(0) +#define SSPHY_TESTO 0x4 +#define TESTO_DAT_MASK GENMASK(7, 0) + #define PHY_F(regno, msb, lsb) { (regno), (msb), (lsb) } #define CDR_CPD_TRIM PHY_F(7, 3, 0) /* RxPLL charge pump current */ @@ -84,12 +86,12 @@ static void uniphier_u3ssphy_set_param(struct uniphier_u3ssphy_priv *priv, val = FIELD_PREP(TESTI_DAT_MASK, 1); val |= FIELD_PREP(TESTI_ADR_MASK, p->field.reg_no); uniphier_u3ssphy_testio_write(priv, val); - val = readl(priv->base + SSPHY_TESTO); + val = readl(priv->base + SSPHY_TESTO) & TESTO_DAT_MASK; /* update value */ - val &= ~FIELD_PREP(TESTI_DAT_MASK, field_mask); + val &= ~field_mask; data = field_mask & (p->value << p->field.lsb); - val = FIELD_PREP(TESTI_DAT_MASK, data); + val = FIELD_PREP(TESTI_DAT_MASK, data | val); val |= FIELD_PREP(TESTI_ADR_MASK, p->field.reg_no); uniphier_u3ssphy_testio_write(priv, val); uniphier_u3ssphy_testio_write(priv, val | TESTI_WR_EN); diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c index dceac7714872195e73f73cd04eb7d3dda117c42f..5536b8f4bfd13daaf109bc7138da32d19bac11fb 100644 --- a/drivers/phy/ti/phy-j721e-wiz.c +++ b/drivers/phy/ti/phy-j721e-wiz.c @@ -177,6 +177,7 @@ static const struct clk_div_table clk_div_table[] = { { .val = 1, .div = 2, }, { .val = 2, .div = 4, }, { .val = 3, .div = 8, }, + { /* sentinel */ }, }; static struct wiz_clk_div_sel clk_div_sel[] = { diff --git a/drivers/phy/xilinx/phy-zynqmp.c b/drivers/phy/xilinx/phy-zynqmp.c index 2b0f921b6ee3de1c93fb1e96a7c3b13909668283..b8ccac6f314672857b1128b05434778f3aa75631 100644 --- a/drivers/phy/xilinx/phy-zynqmp.c +++ b/drivers/phy/xilinx/phy-zynqmp.c @@ -134,7 +134,8 @@ #define PROT_BUS_WIDTH_10 0x0 #define PROT_BUS_WIDTH_20 0x1 #define PROT_BUS_WIDTH_40 0x2 -#define PROT_BUS_WIDTH_SHIFT 2 +#define PROT_BUS_WIDTH_SHIFT(n) ((n) * 2) +#define PROT_BUS_WIDTH_MASK(n) GENMASK((n) * 2 + 1, (n) * 2) /* Number of GT lanes */ #define NUM_LANES 4 @@ -443,12 +444,12 @@ static void xpsgtr_phy_init_sata(struct xpsgtr_phy *gtr_phy) static void xpsgtr_phy_init_sgmii(struct xpsgtr_phy *gtr_phy) { struct xpsgtr_dev *gtr_dev = gtr_phy->dev; + u32 mask = PROT_BUS_WIDTH_MASK(gtr_phy->lane); + u32 val = PROT_BUS_WIDTH_10 << PROT_BUS_WIDTH_SHIFT(gtr_phy->lane); /* Set SGMII protocol TX and RX bus width to 10 bits. */ - xpsgtr_write(gtr_dev, TX_PROT_BUS_WIDTH, - PROT_BUS_WIDTH_10 << (gtr_phy->lane * PROT_BUS_WIDTH_SHIFT)); - xpsgtr_write(gtr_dev, RX_PROT_BUS_WIDTH, - PROT_BUS_WIDTH_10 << (gtr_phy->lane * PROT_BUS_WIDTH_SHIFT)); + xpsgtr_clr_set(gtr_dev, TX_PROT_BUS_WIDTH, mask, val); + xpsgtr_clr_set(gtr_dev, RX_PROT_BUS_WIDTH, mask, val); xpsgtr_bypass_scrambler_8b10b(gtr_phy); } diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c index c5d88157f4b2899ae559bddf433ce4d98ea59a97..a94a9c82f0e5717c25a6206bd3cdf912b6d8ea69 100644 --- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c +++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c @@ -1264,16 +1264,18 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev) sizeof(*girq->parents), GFP_KERNEL); if (!girq->parents) { - pinctrl_remove_gpio_range(pc->pctl_dev, &pc->gpio_range); - return -ENOMEM; + err = -ENOMEM; + goto out_remove; } if (is_7211) { pc->wake_irq = devm_kcalloc(dev, BCM2835_NUM_IRQS, sizeof(*pc->wake_irq), GFP_KERNEL); - if (!pc->wake_irq) - return -ENOMEM; + if (!pc->wake_irq) { + err = -ENOMEM; + goto out_remove; + } } /* @@ -1301,8 +1303,10 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev) len = strlen(dev_name(pc->dev)) + 16; name = devm_kzalloc(pc->dev, len, GFP_KERNEL); - if (!name) - return -ENOMEM; + if (!name) { + err = -ENOMEM; + goto out_remove; + } snprintf(name, len, "%s:bank%d", dev_name(pc->dev), i); @@ -1321,11 +1325,14 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev) err = devm_gpiochip_add_data(dev, &pc->gpio_chip, pc); if (err) { dev_err(dev, "could not add GPIO chip\n"); - pinctrl_remove_gpio_range(pc->pctl_dev, &pc->gpio_range); - return err; + goto out_remove; } return 0; + +out_remove: + pinctrl_remove_gpio_range(pc->pctl_dev, &pc->gpio_range); + return err; } static struct platform_driver bcm2835_pinctrl_driver = { diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index b6ef1911c1dd16a3060a1e95d08e21f41d3fe8d3..348c670a7b07d331acdcfd3ca58f5167379bdf28 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -441,8 +441,8 @@ static void intel_gpio_set_gpio_mode(void __iomem *padcfg0) value &= ~PADCFG0_PMODE_MASK; value |= PADCFG0_PMODE_GPIO; - /* Disable input and output buffers */ - value |= PADCFG0_GPIORXDIS; + /* Disable TX buffer and enable RX (this will be input) */ + value &= ~PADCFG0_GPIORXDIS; value |= PADCFG0_GPIOTXDIS; /* Disable SCI/SMI/NMI generation */ @@ -487,9 +487,6 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, intel_gpio_set_gpio_mode(padcfg0); - /* Disable TX buffer and enable RX (this will be input) */ - __intel_gpio_set_direction(padcfg0, true); - raw_spin_unlock_irqrestore(&pctrl->lock, flags); return 0; @@ -1105,9 +1102,6 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned int type) intel_gpio_set_gpio_mode(reg); - /* Disable TX buffer and enable RX (this will be input) */ - __intel_gpio_set_direction(reg, true); - value = readl(reg); value &= ~(PADCFG0_RXEVCFG_MASK | PADCFG0_RXINV); @@ -1207,6 +1201,39 @@ static irqreturn_t intel_gpio_irq(int irq, void *data) return IRQ_RETVAL(ret); } +static void intel_gpio_irq_init(struct intel_pinctrl *pctrl) +{ + int i; + + for (i = 0; i < pctrl->ncommunities; i++) { + const struct intel_community *community; + void __iomem *base; + unsigned int gpp; + + community = &pctrl->communities[i]; + base = community->regs; + + for (gpp = 0; gpp < community->ngpps; gpp++) { + /* Mask and clear all interrupts */ + writel(0, base + community->ie_offset + gpp * 4); + writel(0xffff, base + community->is_offset + gpp * 4); + } + } +} + +static int intel_gpio_irq_init_hw(struct gpio_chip *gc) +{ + struct intel_pinctrl *pctrl = gpiochip_get_data(gc); + + /* + * Make sure the interrupt lines are in a proper state before + * further configuration. + */ + intel_gpio_irq_init(pctrl); + + return 0; +} + static int intel_gpio_add_community_ranges(struct intel_pinctrl *pctrl, const struct intel_community *community) { @@ -1311,6 +1338,7 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq) girq->num_parents = 0; girq->default_type = IRQ_TYPE_NONE; girq->handler = handle_bad_irq; + girq->init_hw = intel_gpio_irq_init_hw; ret = devm_gpiochip_add_data(pctrl->dev, &pctrl->chip, pctrl); if (ret) { @@ -1640,26 +1668,6 @@ int intel_pinctrl_suspend_noirq(struct device *dev) } EXPORT_SYMBOL_GPL(intel_pinctrl_suspend_noirq); -static void intel_gpio_irq_init(struct intel_pinctrl *pctrl) -{ - size_t i; - - for (i = 0; i < pctrl->ncommunities; i++) { - const struct intel_community *community; - void __iomem *base; - unsigned int gpp; - - community = &pctrl->communities[i]; - base = community->regs; - - for (gpp = 0; gpp < community->ngpps; gpp++) { - /* Mask and clear all interrupts */ - writel(0, base + community->ie_offset + gpp * 4); - writel(0xffff, base + community->is_offset + gpp * 4); - } - } -} - static bool intel_gpio_update_reg(void __iomem *reg, u32 mask, u32 value) { u32 curr, updated; diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_common.c b/drivers/platform/x86/intel_speed_select_if/isst_if_common.c index 0c2aa22c7a12eaf36d9670dc450868fbc64b4543..407afafc7e83f0592e7e96a992974c462e43af42 100644 --- a/drivers/platform/x86/intel_speed_select_if/isst_if_common.c +++ b/drivers/platform/x86/intel_speed_select_if/isst_if_common.c @@ -532,7 +532,10 @@ static long isst_if_def_ioctl(struct file *file, unsigned int cmd, return ret; } -static DEFINE_MUTEX(punit_misc_dev_lock); +/* Lock to prevent module registration when already opened by user space */ +static DEFINE_MUTEX(punit_misc_dev_open_lock); +/* Lock to allow one share misc device for all ISST interace */ +static DEFINE_MUTEX(punit_misc_dev_reg_lock); static int misc_usage_count; static int misc_device_ret; static int misc_device_open; @@ -542,7 +545,7 @@ static int isst_if_open(struct inode *inode, struct file *file) int i, ret = 0; /* Fail open, if a module is going away */ - mutex_lock(&punit_misc_dev_lock); + mutex_lock(&punit_misc_dev_open_lock); for (i = 0; i < ISST_IF_DEV_MAX; ++i) { struct isst_if_cmd_cb *cb = &punit_callbacks[i]; @@ -564,7 +567,7 @@ static int isst_if_open(struct inode *inode, struct file *file) } else { misc_device_open++; } - mutex_unlock(&punit_misc_dev_lock); + mutex_unlock(&punit_misc_dev_open_lock); return ret; } @@ -573,7 +576,7 @@ static int isst_if_relase(struct inode *inode, struct file *f) { int i; - mutex_lock(&punit_misc_dev_lock); + mutex_lock(&punit_misc_dev_open_lock); misc_device_open--; for (i = 0; i < ISST_IF_DEV_MAX; ++i) { struct isst_if_cmd_cb *cb = &punit_callbacks[i]; @@ -581,7 +584,7 @@ static int isst_if_relase(struct inode *inode, struct file *f) if (cb->registered) module_put(cb->owner); } - mutex_unlock(&punit_misc_dev_lock); + mutex_unlock(&punit_misc_dev_open_lock); return 0; } @@ -598,6 +601,43 @@ static struct miscdevice isst_if_char_driver = { .fops = &isst_if_char_driver_ops, }; +static int isst_misc_reg(void) +{ + mutex_lock(&punit_misc_dev_reg_lock); + if (misc_device_ret) + goto unlock_exit; + + if (!misc_usage_count) { + misc_device_ret = isst_if_cpu_info_init(); + if (misc_device_ret) + goto unlock_exit; + + misc_device_ret = misc_register(&isst_if_char_driver); + if (misc_device_ret) { + isst_if_cpu_info_exit(); + goto unlock_exit; + } + } + misc_usage_count++; + +unlock_exit: + mutex_unlock(&punit_misc_dev_reg_lock); + + return misc_device_ret; +} + +static void isst_misc_unreg(void) +{ + mutex_lock(&punit_misc_dev_reg_lock); + if (misc_usage_count) + misc_usage_count--; + if (!misc_usage_count && !misc_device_ret) { + misc_deregister(&isst_if_char_driver); + isst_if_cpu_info_exit(); + } + mutex_unlock(&punit_misc_dev_reg_lock); +} + /** * isst_if_cdev_register() - Register callback for IOCTL * @device_type: The device type this callback handling. @@ -615,38 +655,31 @@ static struct miscdevice isst_if_char_driver = { */ int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb) { - if (misc_device_ret) - return misc_device_ret; + int ret; if (device_type >= ISST_IF_DEV_MAX) return -EINVAL; - mutex_lock(&punit_misc_dev_lock); + mutex_lock(&punit_misc_dev_open_lock); + /* Device is already open, we don't want to add new callbacks */ if (misc_device_open) { - mutex_unlock(&punit_misc_dev_lock); + mutex_unlock(&punit_misc_dev_open_lock); return -EAGAIN; } - if (!misc_usage_count) { - int ret; - - misc_device_ret = misc_register(&isst_if_char_driver); - if (misc_device_ret) - goto unlock_exit; - - ret = isst_if_cpu_info_init(); - if (ret) { - misc_deregister(&isst_if_char_driver); - misc_device_ret = ret; - goto unlock_exit; - } - } memcpy(&punit_callbacks[device_type], cb, sizeof(*cb)); punit_callbacks[device_type].registered = 1; - misc_usage_count++; -unlock_exit: - mutex_unlock(&punit_misc_dev_lock); + mutex_unlock(&punit_misc_dev_open_lock); - return misc_device_ret; + ret = isst_misc_reg(); + if (ret) { + /* + * No need of mutex as the misc device register failed + * as no one can open device yet. Hence no contention. + */ + punit_callbacks[device_type].registered = 0; + return ret; + } + return 0; } EXPORT_SYMBOL_GPL(isst_if_cdev_register); @@ -661,16 +694,12 @@ EXPORT_SYMBOL_GPL(isst_if_cdev_register); */ void isst_if_cdev_unregister(int device_type) { - mutex_lock(&punit_misc_dev_lock); - misc_usage_count--; + isst_misc_unreg(); + mutex_lock(&punit_misc_dev_open_lock); punit_callbacks[device_type].registered = 0; if (device_type == ISST_IF_DEV_MBOX) isst_delete_hash(); - if (!misc_usage_count && !misc_device_ret) { - misc_deregister(&isst_if_char_driver); - isst_if_cpu_info_exit(); - } - mutex_unlock(&punit_misc_dev_lock); + mutex_unlock(&punit_misc_dev_open_lock); } EXPORT_SYMBOL_GPL(isst_if_cdev_unregister); diff --git a/drivers/platform/x86/surface3_power.c b/drivers/platform/x86/surface3_power.c index cc4f9cba68563c137c1c71785e8d6c26479ef2c6..01aacf1bee0749d63836c3553337871754c633a4 100644 --- a/drivers/platform/x86/surface3_power.c +++ b/drivers/platform/x86/surface3_power.c @@ -233,14 +233,21 @@ static int mshw0011_bix(struct mshw0011_data *cdata, struct bix *bix) } bix->last_full_charg_capacity = ret; - /* get serial number */ + /* + * Get serial number, on some devices (with unofficial replacement + * battery?) reading any of the serial number range addresses gets + * nacked in this case just leave the serial number empty. + */ ret = i2c_smbus_read_i2c_block_data(client, MSHW0011_BAT0_REG_SERIAL_NO, sizeof(buf), buf); - if (ret != sizeof(buf)) { + if (ret == -EREMOTEIO) { + /* no serial number available */ + } else if (ret != sizeof(buf)) { dev_err(&client->dev, "Error reading serial no: %d\n", ret); return ret; + } else { + snprintf(bix->serial, ARRAY_SIZE(bix->serial), "%3pE%6pE", buf + 7, buf); } - snprintf(bix->serial, ARRAY_SIZE(bix->serial), "%3pE%6pE", buf + 7, buf); /* get cycle count */ ret = i2c_smbus_read_word_data(client, MSHW0011_BAT0_REG_CYCLE_CNT); diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c index 59b7e90cd587569f4feef93cf43223caed96bf87..ab6a9369649dba450c19fd14324f45b68838b3b3 100644 --- a/drivers/platform/x86/touchscreen_dmi.c +++ b/drivers/platform/x86/touchscreen_dmi.c @@ -756,6 +756,21 @@ static const struct ts_dmi_data predia_basic_data = { .properties = predia_basic_props, }; +static const struct property_entry rwc_nanote_p8_props[] = { + PROPERTY_ENTRY_U32("touchscreen-min-y", 46), + PROPERTY_ENTRY_U32("touchscreen-size-x", 1728), + PROPERTY_ENTRY_U32("touchscreen-size-y", 1140), + PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-rwc-nanote-p8.fw"), + PROPERTY_ENTRY_U32("silead,max-fingers", 10), + { } +}; + +static const struct ts_dmi_data rwc_nanote_p8_data = { + .acpi_name = "MSSL1680:00", + .properties = rwc_nanote_p8_props, +}; + static const struct property_entry schneider_sct101ctm_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1715), PROPERTY_ENTRY_U32("touchscreen-size-y", 1140), @@ -1326,6 +1341,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = { DMI_EXACT_MATCH(DMI_BOARD_NAME, "0E57"), }, }, + { + /* RWC NANOTE P8 */ + .driver_data = (void *)&rwc_nanote_p8_data, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Default string"), + DMI_MATCH(DMI_PRODUCT_NAME, "AY07J"), + DMI_MATCH(DMI_PRODUCT_SKU, "0001") + }, + }, { /* Schneider SCT101CTM */ .driver_data = (void *)&schneider_sct101ctm_data, diff --git a/drivers/power/reset/mt6323-poweroff.c b/drivers/power/reset/mt6323-poweroff.c index 0532803e6cbc40225f8151d1716279d899d166a5..d90e76fcb938384a6f3e85acc920bb5394799b42 100644 --- a/drivers/power/reset/mt6323-poweroff.c +++ b/drivers/power/reset/mt6323-poweroff.c @@ -57,6 +57,9 @@ static int mt6323_pwrc_probe(struct platform_device *pdev) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; + pwrc->base = res->start; pwrc->regmap = mt6397_chip->regmap; pwrc->dev = &pdev->dev; diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c index bb944ee5fe3b174bcdc1d3f12e444e1611ebf270..03e146e98abd5eadb82b213824fba5a252f700b8 100644 --- a/drivers/regulator/qcom_smd-regulator.c +++ b/drivers/regulator/qcom_smd-regulator.c @@ -9,6 +9,7 @@ #include #include #include +#include #include struct qcom_rpm_reg { @@ -1107,52 +1108,91 @@ static const struct of_device_id rpm_of_match[] = { }; MODULE_DEVICE_TABLE(of, rpm_of_match); -static int rpm_reg_probe(struct platform_device *pdev) +/** + * rpm_regulator_init_vreg() - initialize all attributes of a qcom_smd-regulator + * @vreg: Pointer to the individual qcom_smd-regulator resource + * @dev: Pointer to the top level qcom_smd-regulator PMIC device + * @node: Pointer to the individual qcom_smd-regulator resource + * device node + * @rpm: Pointer to the rpm bus node + * @pmic_rpm_data: Pointer to a null-terminated array of qcom_smd-regulator + * resources defined for the top level PMIC device + * + * Return: 0 on success, errno on failure + */ +static int rpm_regulator_init_vreg(struct qcom_rpm_reg *vreg, struct device *dev, + struct device_node *node, struct qcom_smd_rpm *rpm, + const struct rpm_regulator_data *pmic_rpm_data) { - const struct rpm_regulator_data *reg; - const struct of_device_id *match; - struct regulator_config config = { }; + struct regulator_config config = {}; + const struct rpm_regulator_data *rpm_data; struct regulator_dev *rdev; + int ret; + + for (rpm_data = pmic_rpm_data; rpm_data->name; rpm_data++) + if (of_node_name_eq(node, rpm_data->name)) + break; + + if (!rpm_data->name) { + dev_err(dev, "Unknown regulator %pOFn\n", node); + return -EINVAL; + } + + vreg->dev = dev; + vreg->rpm = rpm; + vreg->type = rpm_data->type; + vreg->id = rpm_data->id; + + memcpy(&vreg->desc, rpm_data->desc, sizeof(vreg->desc)); + vreg->desc.name = rpm_data->name; + vreg->desc.supply_name = rpm_data->supply; + vreg->desc.owner = THIS_MODULE; + vreg->desc.type = REGULATOR_VOLTAGE; + vreg->desc.of_match = rpm_data->name; + + config.dev = dev; + config.of_node = node; + config.driver_data = vreg; + + rdev = devm_regulator_register(dev, &vreg->desc, &config); + if (IS_ERR(rdev)) { + ret = PTR_ERR(rdev); + dev_err(dev, "%pOFn: devm_regulator_register() failed, ret=%d\n", node, ret); + return ret; + } + + return 0; +} + +static int rpm_reg_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + const struct rpm_regulator_data *vreg_data; + struct device_node *node; struct qcom_rpm_reg *vreg; struct qcom_smd_rpm *rpm; + int ret; rpm = dev_get_drvdata(pdev->dev.parent); if (!rpm) { - dev_err(&pdev->dev, "unable to retrieve handle to rpm\n"); + dev_err(&pdev->dev, "Unable to retrieve handle to rpm\n"); return -ENODEV; } - match = of_match_device(rpm_of_match, &pdev->dev); - if (!match) { - dev_err(&pdev->dev, "failed to match device\n"); + vreg_data = of_device_get_match_data(dev); + if (!vreg_data) return -ENODEV; - } - for (reg = match->data; reg->name; reg++) { + for_each_available_child_of_node(dev->of_node, node) { vreg = devm_kzalloc(&pdev->dev, sizeof(*vreg), GFP_KERNEL); if (!vreg) return -ENOMEM; - vreg->dev = &pdev->dev; - vreg->type = reg->type; - vreg->id = reg->id; - vreg->rpm = rpm; - - memcpy(&vreg->desc, reg->desc, sizeof(vreg->desc)); - - vreg->desc.id = -1; - vreg->desc.owner = THIS_MODULE; - vreg->desc.type = REGULATOR_VOLTAGE; - vreg->desc.name = reg->name; - vreg->desc.supply_name = reg->supply; - vreg->desc.of_match = reg->name; - - config.dev = &pdev->dev; - config.driver_data = vreg; - rdev = devm_regulator_register(&pdev->dev, &vreg->desc, &config); - if (IS_ERR(rdev)) { - dev_err(&pdev->dev, "failed to register %s\n", reg->name); - return PTR_ERR(rdev); + ret = rpm_regulator_init_vreg(vreg, dev, node, rpm, vreg_data); + + if (ret < 0) { + of_node_put(node); + return ret; } } diff --git a/drivers/remoteproc/qcom_pil_info.c b/drivers/remoteproc/qcom_pil_info.c index 7c007dd7b2000d65d118c8b493ddbca593659e5a..aca21560e20b8ae07fc9c1048b311fad67ff4d7f 100644 --- a/drivers/remoteproc/qcom_pil_info.c +++ b/drivers/remoteproc/qcom_pil_info.c @@ -104,7 +104,7 @@ int qcom_pil_info_store(const char *image, phys_addr_t base, size_t size) return -ENOMEM; found_unused: - memcpy_toio(entry, image, PIL_RELOC_NAME_LEN); + memcpy_toio(entry, image, strnlen(image, PIL_RELOC_NAME_LEN)); found_existing: /* Use two writel() as base is only aligned to 4 bytes on odd entries */ writel(base, entry + PIL_RELOC_NAME_LEN); diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c index 4bbbacdbf3bb7702f185f007b638daf83378d489..be90d77c5168d2784a595e9836fdf1031a4f5326 100644 --- a/drivers/rpmsg/rpmsg_char.c +++ b/drivers/rpmsg/rpmsg_char.c @@ -92,7 +92,7 @@ static int rpmsg_eptdev_destroy(struct device *dev, void *data) /* wake up any blocked readers */ wake_up_interruptible(&eptdev->readq); - device_del(&eptdev->dev); + cdev_device_del(&eptdev->cdev, &eptdev->dev); put_device(&eptdev->dev); return 0; @@ -332,7 +332,6 @@ static void rpmsg_eptdev_release_device(struct device *dev) ida_simple_remove(&rpmsg_ept_ida, dev->id); ida_simple_remove(&rpmsg_minor_ida, MINOR(eptdev->dev.devt)); - cdev_del(&eptdev->cdev); kfree(eptdev); } @@ -377,19 +376,13 @@ static int rpmsg_eptdev_create(struct rpmsg_ctrldev *ctrldev, dev->id = ret; dev_set_name(dev, "rpmsg%d", ret); - ret = cdev_add(&eptdev->cdev, dev->devt, 1); + ret = cdev_device_add(&eptdev->cdev, &eptdev->dev); if (ret) goto free_ept_ida; /* We can now rely on the release function for cleanup */ dev->release = rpmsg_eptdev_release_device; - ret = device_add(dev); - if (ret) { - dev_err(dev, "device_add failed: %d\n", ret); - put_device(dev); - } - return ret; free_ept_ida: @@ -458,7 +451,6 @@ static void rpmsg_ctrldev_release_device(struct device *dev) ida_simple_remove(&rpmsg_ctrl_ida, dev->id); ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt)); - cdev_del(&ctrldev->cdev); kfree(ctrldev); } @@ -493,19 +485,13 @@ static int rpmsg_chrdev_probe(struct rpmsg_device *rpdev) dev->id = ret; dev_set_name(&ctrldev->dev, "rpmsg_ctrl%d", ret); - ret = cdev_add(&ctrldev->cdev, dev->devt, 1); + ret = cdev_device_add(&ctrldev->cdev, &ctrldev->dev); if (ret) goto free_ctrl_ida; /* We can now rely on the release function for cleanup */ dev->release = rpmsg_ctrldev_release_device; - ret = device_add(dev); - if (ret) { - dev_err(&rpdev->dev, "device_add failed: %d\n", ret); - put_device(dev); - } - dev_set_drvdata(&rpdev->dev, ctrldev); return ret; @@ -531,7 +517,7 @@ static void rpmsg_chrdev_remove(struct rpmsg_device *rpdev) if (ret) dev_warn(&rpdev->dev, "failed to nuke endpoints: %d\n", ret); - device_del(&ctrldev->dev); + cdev_device_del(&ctrldev->cdev, &ctrldev->dev); put_device(&ctrldev->dev); } diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c index 91de940896e3d03170b8f0661ac0d5abd1883711..028ca5961bc2afff4f9a183f7473925b202ecada 100644 --- a/drivers/rpmsg/rpmsg_core.c +++ b/drivers/rpmsg/rpmsg_core.c @@ -473,13 +473,25 @@ static int rpmsg_dev_probe(struct device *dev) err = rpdrv->probe(rpdev); if (err) { dev_err(dev, "%s: failed: %d\n", __func__, err); - if (ept) - rpmsg_destroy_ept(ept); - goto out; + goto destroy_ept; } - if (ept && rpdev->ops->announce_create) + if (ept && rpdev->ops->announce_create) { err = rpdev->ops->announce_create(rpdev); + if (err) { + dev_err(dev, "failed to announce creation\n"); + goto remove_rpdev; + } + } + + return 0; + +remove_rpdev: + if (rpdrv->remove) + rpdrv->remove(rpdev); +destroy_ept: + if (ept) + rpmsg_destroy_ept(ept); out: return err; } diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index c633319cdb91319d8af218ec4eda30503e59ec4e..58c6382a2807caa293ed30da83ab0d4809c15463 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -463,7 +463,10 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t) min = t->time.tm_min; sec = t->time.tm_sec; + spin_lock_irq(&rtc_lock); rtc_control = CMOS_READ(RTC_CONTROL); + spin_unlock_irq(&rtc_lock); + if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { /* Writing 0xff means "don't care" or "match all". */ mon = (mon <= 12) ? bin2bcd(mon) : 0xff; diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c index 2ecd8752b088b6cfe1bdb97fd57511d72778c562..5add637c9ad23ea969379bfb14b71cd6c22c77a4 100644 --- a/drivers/rtc/rtc-mc146818-lib.c +++ b/drivers/rtc/rtc-mc146818-lib.c @@ -83,7 +83,7 @@ unsigned int mc146818_get_time(struct rtc_time *time) time->tm_year += real_year - 72; #endif - if (century > 20) + if (century > 19) time->tm_year += (century - 19) * 100; /* diff --git a/drivers/rtc/rtc-pxa.c b/drivers/rtc/rtc-pxa.c index d2f1d8f754bf3a3797b404ef0dc6cc9fca5d26c9..cf8119b6d32044b343d63e71f669190cc38ecfa1 100644 --- a/drivers/rtc/rtc-pxa.c +++ b/drivers/rtc/rtc-pxa.c @@ -330,6 +330,10 @@ static int __init pxa_rtc_probe(struct platform_device *pdev) if (sa1100_rtc->irq_alarm < 0) return -ENXIO; + sa1100_rtc->rtc = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(sa1100_rtc->rtc)) + return PTR_ERR(sa1100_rtc->rtc); + pxa_rtc->base = devm_ioremap(dev, pxa_rtc->ress->start, resource_size(pxa_rtc->ress)); if (!pxa_rtc->base) { diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index d24cafe02708fa941346510c6fd8aac21f1110e3..511bf8e0a436c2eb40f2601c6a68edf1724ea887 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -521,6 +521,8 @@ static void zfcp_fc_adisc_handler(void *data) goto out; } + /* re-init to undo drop from zfcp_fc_adisc() */ + port->d_id = ntoh24(adisc_resp->adisc_port_id); /* port is good, unblock rport without going through erp */ zfcp_scsi_schedule_rport_register(port); out: @@ -534,6 +536,7 @@ static int zfcp_fc_adisc(struct zfcp_port *port) struct zfcp_fc_req *fc_req; struct zfcp_adapter *adapter = port->adapter; struct Scsi_Host *shost = adapter->scsi_host; + u32 d_id; int ret; fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC); @@ -558,7 +561,15 @@ static int zfcp_fc_adisc(struct zfcp_port *port) fc_req->u.adisc.req.adisc_cmd = ELS_ADISC; hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost)); - ret = zfcp_fsf_send_els(adapter, port->d_id, &fc_req->ct_els, + d_id = port->d_id; /* remember as destination for send els below */ + /* + * Force fresh GID_PN lookup on next port recovery. + * Must happen after request setup and before sending request, + * to prevent race with port->d_id re-init in zfcp_fc_adisc_handler(). + */ + port->d_id = 0; + + ret = zfcp_fsf_send_els(adapter, d_id, &fc_req->ct_els, ZFCP_FC_CTELS_TMO); if (ret) kmem_cache_free(zfcp_fc_req_cache, fc_req); diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 6890bbe04a8c14d431b0cfb4990067c43e01060f..8f47bf83694f6b033ba4031684f12d93fed167a7 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -80,7 +80,7 @@ static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba); static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba); static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface, struct device *parent, int npiv); -static void bnx2fc_destroy_work(struct work_struct *work); +static void bnx2fc_port_destroy(struct fcoe_port *port); static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev); static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device @@ -506,7 +506,8 @@ static int bnx2fc_l2_rcv_thread(void *arg) static void bnx2fc_recv_frame(struct sk_buff *skb) { - u32 fr_len; + u64 crc_err; + u32 fr_len, fr_crc; struct fc_lport *lport; struct fcoe_rcv_info *fr; struct fc_stats *stats; @@ -540,6 +541,11 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) skb_pull(skb, sizeof(struct fcoe_hdr)); fr_len = skb->len - sizeof(struct fcoe_crc_eof); + stats = per_cpu_ptr(lport->stats, get_cpu()); + stats->RxFrames++; + stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; + put_cpu(); + fp = (struct fc_frame *)skb; fc_frame_init(fp); fr_dev(fp) = lport; @@ -622,16 +628,15 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) return; } - stats = per_cpu_ptr(lport->stats, smp_processor_id()); - stats->RxFrames++; - stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; + fr_crc = le32_to_cpu(fr_crc(fp)); - if (le32_to_cpu(fr_crc(fp)) != - ~crc32(~0, skb->data, fr_len)) { - if (stats->InvalidCRCCount < 5) + if (unlikely(fr_crc != ~crc32(~0, skb->data, fr_len))) { + stats = per_cpu_ptr(lport->stats, get_cpu()); + crc_err = (stats->InvalidCRCCount++); + put_cpu(); + if (crc_err < 5) printk(KERN_WARNING PFX "dropping frame with " "CRC error\n"); - stats->InvalidCRCCount++; kfree_skb(skb); return; } @@ -905,9 +910,6 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event, __bnx2fc_destroy(interface); } mutex_unlock(&bnx2fc_dev_lock); - - /* Ensure ALL destroy work has been completed before return */ - flush_workqueue(bnx2fc_wq); return; default: @@ -1213,8 +1215,8 @@ static int bnx2fc_vport_destroy(struct fc_vport *vport) mutex_unlock(&n_port->lp_mutex); bnx2fc_free_vport(interface->hba, port->lport); bnx2fc_port_shutdown(port->lport); + bnx2fc_port_destroy(port); bnx2fc_interface_put(interface); - queue_work(bnx2fc_wq, &port->destroy_work); return 0; } @@ -1523,7 +1525,6 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface, port->lport = lport; port->priv = interface; port->get_netdev = bnx2fc_netdev; - INIT_WORK(&port->destroy_work, bnx2fc_destroy_work); /* Configure fcoe_port */ rc = bnx2fc_lport_config(lport); @@ -1651,8 +1652,8 @@ static void __bnx2fc_destroy(struct bnx2fc_interface *interface) bnx2fc_interface_cleanup(interface); bnx2fc_stop(interface); list_del(&interface->list); + bnx2fc_port_destroy(port); bnx2fc_interface_put(interface); - queue_work(bnx2fc_wq, &port->destroy_work); } /** @@ -1692,15 +1693,12 @@ netdev_err: return rc; } -static void bnx2fc_destroy_work(struct work_struct *work) +static void bnx2fc_port_destroy(struct fcoe_port *port) { - struct fcoe_port *port; struct fc_lport *lport; - port = container_of(work, struct fcoe_port, destroy_work); lport = port->lport; - - BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n"); + BNX2FC_HBA_DBG(lport, "Entered %s, destroying lport %p\n", __func__, lport); bnx2fc_if_destroy(lport); } @@ -2554,9 +2552,6 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev) __bnx2fc_destroy(interface); mutex_unlock(&bnx2fc_dev_lock); - /* Ensure ALL destroy work has been completed before return */ - flush_workqueue(bnx2fc_wq); - bnx2fc_ulp_stop(hba); /* unregister cnic device */ if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic)) diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 93e507677bdcb35934d8a563cb0d5d36df224657..03bc472f302a290607cfde8ac7cddb3bd6389776 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -374,6 +374,7 @@ struct lpfc_vport { #define FC_VPORT_LOGO_RCVD 0x200 /* LOGO received on vport */ #define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */ #define FC_LOGO_RCVD_DID_CHNG 0x800 /* FDISC on phys port detect DID chng*/ +#define FC_PT2PT_NO_NVME 0x1000 /* Don't send NVME PRLI */ #define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */ #define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */ #define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */ @@ -763,7 +764,6 @@ struct lpfc_hba { #define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */ #define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */ #define HBA_IOQ_FLUSH 0x8000 /* FCP/NVME I/O queues being flushed */ -#define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */ #define HBA_RECOVERABLE_UE 0x20000 /* Firmware supports recoverable UE */ #define HBA_FORCED_LINK_SPEED 0x40000 /* * Firmware supports Forced Link Speed @@ -772,6 +772,7 @@ struct lpfc_hba { #define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */ #define HBA_DEFER_FLOGI 0x800000 /* Defer FLOGI till read_sparm cmpl */ + struct completion *fw_dump_cmpl; /* cmpl event tracker for fw_dump */ uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ struct lpfc_dmabuf slim2p; @@ -898,6 +899,16 @@ struct lpfc_hba { uint32_t cfg_hostmem_hgp; uint32_t cfg_log_verbose; uint32_t cfg_enable_fc4_type; +#define LPFC_ENABLE_FCP 1 +#define LPFC_ENABLE_NVME 2 +#define LPFC_ENABLE_BOTH 3 +#if (IS_ENABLED(CONFIG_NVME_FC)) +#define LPFC_MAX_ENBL_FC4_TYPE LPFC_ENABLE_BOTH +#define LPFC_DEF_ENBL_FC4_TYPE LPFC_ENABLE_BOTH +#else +#define LPFC_MAX_ENBL_FC4_TYPE LPFC_ENABLE_FCP +#define LPFC_DEF_ENBL_FC4_TYPE LPFC_ENABLE_FCP +#endif uint32_t cfg_aer_support; uint32_t cfg_sriov_nr_virtfn; uint32_t cfg_request_firmware_upgrade; @@ -918,9 +929,6 @@ struct lpfc_hba { uint32_t cfg_ras_fwlog_func; uint32_t cfg_enable_bbcr; /* Enable BB Credit Recovery */ uint32_t cfg_enable_dpp; /* Enable Direct Packet Push */ -#define LPFC_ENABLE_FCP 1 -#define LPFC_ENABLE_NVME 2 -#define LPFC_ENABLE_BOTH 3 uint32_t cfg_enable_pbde; struct nvmet_fc_target_port *targetport; lpfc_vpd_t vpd; /* vital product data */ diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 2c59a5bf353907237c648794608b5510c909e4d0..f0d1ced6301624f71aa274f56dc26cb77f1191c0 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1142,6 +1142,9 @@ lpfc_issue_lip(struct Scsi_Host *shost) pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK; pmboxq->u.mb.mbxOwner = OWN_HOST; + if ((vport->fc_flag & FC_PT2PT) && (vport->fc_flag & FC_PT2PT_NO_NVME)) + vport->fc_flag &= ~FC_PT2PT_NO_NVME; + mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2); if ((mbxstatus == MBX_SUCCESS) && @@ -1536,25 +1539,25 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode) before_fc_flag = phba->pport->fc_flag; sriov_nr_virtfn = phba->cfg_sriov_nr_virtfn; - /* Disable SR-IOV virtual functions if enabled */ - if (phba->cfg_sriov_nr_virtfn) { - pci_disable_sriov(pdev); - phba->cfg_sriov_nr_virtfn = 0; - } + if (opcode == LPFC_FW_DUMP) { + init_completion(&online_compl); + phba->fw_dump_cmpl = &online_compl; + } else { + /* Disable SR-IOV virtual functions if enabled */ + if (phba->cfg_sriov_nr_virtfn) { + pci_disable_sriov(pdev); + phba->cfg_sriov_nr_virtfn = 0; + } - if (opcode == LPFC_FW_DUMP) - phba->hba_flag |= HBA_FW_DUMP_OP; + status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); - status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); + if (status != 0) + return status; - if (status != 0) { - phba->hba_flag &= ~HBA_FW_DUMP_OP; - return status; + /* wait for the device to be quiesced before firmware reset */ + msleep(100); } - /* wait for the device to be quiesced before firmware reset */ - msleep(100); - reg_val = readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET); @@ -1583,24 +1586,42 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode) lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "3153 Fail to perform the requested " "access: x%x\n", reg_val); + if (phba->fw_dump_cmpl) + phba->fw_dump_cmpl = NULL; return rc; } /* keep the original port state */ - if (before_fc_flag & FC_OFFLINE_MODE) - goto out; - - init_completion(&online_compl); - job_posted = lpfc_workq_post_event(phba, &status, &online_compl, - LPFC_EVT_ONLINE); - if (!job_posted) + if (before_fc_flag & FC_OFFLINE_MODE) { + if (phba->fw_dump_cmpl) + phba->fw_dump_cmpl = NULL; goto out; + } - wait_for_completion(&online_compl); + /* Firmware dump will trigger an HA_ERATT event, and + * lpfc_handle_eratt_s4 routine already handles bringing the port back + * online. + */ + if (opcode == LPFC_FW_DUMP) { + wait_for_completion(phba->fw_dump_cmpl); + } else { + init_completion(&online_compl); + job_posted = lpfc_workq_post_event(phba, &status, &online_compl, + LPFC_EVT_ONLINE); + if (!job_posted) + goto out; + wait_for_completion(&online_compl); + } out: /* in any case, restore the virtual functions enabled as before */ if (sriov_nr_virtfn) { + /* If fw_dump was performed, first disable to clean up */ + if (opcode == LPFC_FW_DUMP) { + pci_disable_sriov(pdev); + phba->cfg_sriov_nr_virtfn = 0; + } + sriov_err = lpfc_sli_probe_sriov_nr_virtfn(phba, sriov_nr_virtfn); if (!sriov_err) @@ -3779,8 +3800,8 @@ LPFC_ATTR_R(nvmet_mrq_post, * 3 - register both FCP and NVME * Supported values are [1,3]. Default value is 3 */ -LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH, - LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH, +LPFC_ATTR_R(enable_fc4_type, LPFC_DEF_ENBL_FC4_TYPE, + LPFC_ENABLE_FCP, LPFC_MAX_ENBL_FC4_TYPE, "Enable FC4 Protocol support - FCP / NVME"); /* diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 3d9889b3d5c8a6ce487d4137501f9bb2ccbcee0b..387b0cd1ea18f8624c49ddd521a4a1db64ece118 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1067,7 +1067,8 @@ stop_rr_fcf_flogi: /* FLOGI failed, so there is no fabric */ spin_lock_irq(shost->host_lock); - vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); + vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP | + FC_PT2PT_NO_NVME); spin_unlock_irq(shost->host_lock); /* If private loop, then allow max outstanding els to be @@ -3945,6 +3946,23 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Added for Vendor specifc support * Just keep retrying for these Rsn / Exp codes */ + if ((vport->fc_flag & FC_PT2PT) && + cmd == ELS_CMD_NVMEPRLI) { + switch (stat.un.b.lsRjtRsnCode) { + case LSRJT_UNABLE_TPC: + case LSRJT_INVALID_CMD: + case LSRJT_LOGICAL_ERR: + case LSRJT_CMD_UNSUPPORTED: + lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, + "0168 NVME PRLI LS_RJT " + "reason %x port doesn't " + "support NVME, disabling NVME\n", + stat.un.b.lsRjtRsnCode); + retry = 0; + vport->fc_flag |= FC_PT2PT_NO_NVME; + goto out_retry; + } + } switch (stat.un.b.lsRjtRsnCode) { case LSRJT_UNABLE_TPC: /* The driver has a VALID PLOGI but the rport has diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index f4a672e5497161e1cb4666244e0c4a05e2937ffb..68ff233f936e5bde73c1718e48a3c9ebbea09161 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -635,10 +635,16 @@ lpfc_work_done(struct lpfc_hba *phba) if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) lpfc_sli4_post_async_mbox(phba); - if (ha_copy & HA_ERATT) + if (ha_copy & HA_ERATT) { /* Handle the error attention event */ lpfc_handle_eratt(phba); + if (phba->fw_dump_cmpl) { + complete(phba->fw_dump_cmpl); + phba->fw_dump_cmpl = NULL; + } + } + if (ha_copy & HA_MBATT) lpfc_sli_handle_mb_event(phba); diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 37612299a34a14ca4825cee107e19b3e04e9035c..1149bfc42fe6442e955912d28062c9bb312280b8 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1998,7 +1998,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) } if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { - lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "3143 Port Down: Firmware Update " "Detected\n"); en_rn_msg = false; diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 6afcb1426e35724f48d61219e8776110204d2b70..e33f752318c198657b6d534c26f0c0fea883fe3f 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -2010,8 +2010,9 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, * is configured try it. */ ndlp->nlp_fc4_type |= NLP_FC4_FCP; - if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || - (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { + if ((!(vport->fc_flag & FC_PT2PT_NO_NVME)) && + (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH || + vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { ndlp->nlp_fc4_type |= NLP_FC4_NVME; /* We need to update the localport also */ lpfc_nvme_update_localport(vport); diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 06a23718a7c7feb630addf1fd8e24cde908f18d8..a50f870c5f7250efc2106df130fc8cd509185f56 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -4629,12 +4629,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba) phba->fcf.fcf_flag = 0; spin_unlock_irq(&phba->hbalock); - /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */ - if (phba->hba_flag & HBA_FW_DUMP_OP) { - phba->hba_flag &= ~HBA_FW_DUMP_OP; - return rc; - } - /* Now physically reset the device */ lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0389 Performing PCI function reset!\n"); @@ -7378,6 +7372,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) struct lpfc_vport *vport = phba->pport; struct lpfc_dmabuf *mp; struct lpfc_rqb *rqbp; + u32 flg; /* Perform a PCI function reset to start from clean */ rc = lpfc_pci_function_reset(phba); @@ -7391,7 +7386,17 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) else { spin_lock_irq(&phba->hbalock); phba->sli.sli_flag |= LPFC_SLI_ACTIVE; + flg = phba->sli.sli_flag; spin_unlock_irq(&phba->hbalock); + /* Allow a little time after setting SLI_ACTIVE for any polled + * MBX commands to complete via BSG. + */ + for (i = 0; i < 50 && (flg & LPFC_SLI_MBOX_ACTIVE); i++) { + msleep(20); + spin_lock_irq(&phba->hbalock); + flg = phba->sli.sli_flag; + spin_unlock_irq(&phba->hbalock); + } } lpfc_sli4_dip(phba); @@ -8928,7 +8933,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, "(%d):2541 Mailbox command x%x " "(x%x/x%x) failure: " "mqe_sta: x%x mcqe_sta: x%x/x%x " - "Data: x%x x%x\n,", + "Data: x%x x%x\n", mboxq->vport ? mboxq->vport->vpi : 0, mboxq->u.mb.mbxCommand, lpfc_sli_config_mbox_subsys_get(phba, @@ -8962,7 +8967,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, "(%d):2597 Sync Mailbox command " "x%x (x%x/x%x) failure: " "mqe_sta: x%x mcqe_sta: x%x/x%x " - "Data: x%x x%x\n,", + "Data: x%x x%x\n", mboxq->vport ? mboxq->vport->vpi : 0, mboxq->u.mb.mbxCommand, lpfc_sli_config_mbox_subsys_get(phba, @@ -12408,6 +12413,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba) uint32_t uerr_sta_hi, uerr_sta_lo; uint32_t if_type, portsmphr; struct lpfc_register portstat_reg; + u32 logmask; /* * For now, use the SLI4 device internal unrecoverable error @@ -12458,7 +12464,12 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba) readl(phba->sli4_hba.u.if_type2.ERR1regaddr); phba->work_status[1] = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); - lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + logmask = LOG_TRACE_EVENT; + if (phba->work_status[0] == + SLIPORT_ERR1_REG_ERR_CODE_2 && + phba->work_status[1] == SLIPORT_ERR2_REG_FW_RESTART) + logmask = LOG_SLI; + lpfc_printf_log(phba, KERN_ERR, logmask, "2885 Port Status Event: " "port status reg 0x%x, " "port smphr reg 0x%x, " diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c index 78c41bbf67562c03c514ff342568071041ed7ddd..e6a6678967e52e0b4a181d3510b8b56f5fa4ff1f 100644 --- a/drivers/scsi/myrs.c +++ b/drivers/scsi/myrs.c @@ -2272,7 +2272,8 @@ static void myrs_cleanup(struct myrs_hba *cs) myrs_unmap(cs); if (cs->mmio_base) { - cs->disable_intr(cs); + if (cs->disable_intr) + cs->disable_intr(cs); iounmap(cs->mmio_base); cs->mmio_base = NULL; } diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index 5d751628a6340942d86e4464b32140ce3b5b1439..9b318958d78cc393a2a33c5db0b73d401e9d6aae 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -1323,7 +1323,9 @@ int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, int q_index = circularQ - pm8001_ha->inbnd_q_tbl; int rv = -1; - WARN_ON(q_index >= PM8001_MAX_INB_NUM); + if (WARN_ON(q_index >= pm8001_ha->max_q_num)) + return -EINVAL; + spin_lock_irqsave(&circularQ->iq_lock, flags); rv = pm8001_mpi_msg_free_get(circularQ, pm8001_ha->iomb_size, &pMessage); diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index c3bb58885033b33889a40761bfffd0f80283b9c4..75ac4d86d9c4bc43995cab5410319bac0c0a6816 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -753,8 +753,13 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev, res = -TMF_RESP_FUNC_FAILED; /* Even TMF timed out, return direct. */ if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { + struct pm8001_ccb_info *ccb = task->lldd_task; + pm8001_dbg(pm8001_ha, FAIL, "TMF task[%x]timeout.\n", tmf->tmf); + + if (ccb) + ccb->task = NULL; goto ex_err; } diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index a203a4fc2674a0b6aac60be64d0fcf7a81580737..2a3ce4680734be48f141037bdccbc954337df186 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -2133,9 +2133,9 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb) pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", t, status, ts->resp, ts->stat); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); if (t->slow_task) complete(&t->slow_task->completion); - pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); } else { spin_unlock_irqrestore(&t->task_state_lock, flags); pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); @@ -2726,9 +2726,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", t, status, ts->resp, ts->stat); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); if (t->slow_task) complete(&t->slow_task->completion); - pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); } else { spin_unlock_irqrestore(&t->task_state_lock, flags); pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); @@ -4057,10 +4057,22 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec) unsigned long flags; u32 regval; + /* + * Fatal errors are programmed to be signalled in irq vector + * pm8001_ha->max_q_num - 1 through pm8001_ha->main_cfg_tbl.pm80xx_tbl. + * fatal_err_interrupt + */ if (vec == (pm8001_ha->max_q_num - 1)) { + u32 mipsall_ready; + + if (pm8001_ha->chip_id == chip_8008 || + pm8001_ha->chip_id == chip_8009) + mipsall_ready = SCRATCH_PAD_MIPSALL_READY_8PORT; + else + mipsall_ready = SCRATCH_PAD_MIPSALL_READY_16PORT; + regval = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); - if ((regval & SCRATCH_PAD_MIPSALL_READY) != - SCRATCH_PAD_MIPSALL_READY) { + if ((regval & mipsall_ready) != mipsall_ready) { pm8001_ha->controller_fatal_error = true; pm8001_dbg(pm8001_ha, FAIL, "Firmware Fatal error! Regval:0x%x\n", diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h index 701951a0f715bb18f943df9b3771d875a330eb24..0dfe9034f7e7f69b58bee99d0c3294de3ca2a0f2 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.h +++ b/drivers/scsi/pm8001/pm80xx_hwi.h @@ -1391,8 +1391,12 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t; #define SCRATCH_PAD_BOOT_LOAD_SUCCESS 0x0 #define SCRATCH_PAD_IOP0_READY 0xC00 #define SCRATCH_PAD_IOP1_READY 0x3000 -#define SCRATCH_PAD_MIPSALL_READY (SCRATCH_PAD_IOP1_READY | \ +#define SCRATCH_PAD_MIPSALL_READY_16PORT (SCRATCH_PAD_IOP1_READY | \ SCRATCH_PAD_IOP0_READY | \ + SCRATCH_PAD_ILA_READY | \ + SCRATCH_PAD_RAAE_READY) +#define SCRATCH_PAD_MIPSALL_READY_8PORT (SCRATCH_PAD_IOP0_READY | \ + SCRATCH_PAD_ILA_READY | \ SCRATCH_PAD_RAAE_READY) /* boot loader state */ diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c index 63f99f4eeed972bb8d149d599050bc876fde7db1..472374d83cede79f44e41c65d9511ec3c758820b 100644 --- a/drivers/scsi/qedf/qedf_io.c +++ b/drivers/scsi/qedf/qedf_io.c @@ -2268,6 +2268,7 @@ process_els: io_req->tm_flags == FCP_TMF_TGT_RESET) { clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); io_req->sc_cmd = NULL; + kref_put(&io_req->refcount, qedf_release_cmd); complete(&io_req->tm_done); } diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index c63dcc39f76c2ae5df87e67f7b847f490c81bce8..e64457f53da86da662c9b7ee4b3ccfa844b04c70 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -1859,6 +1859,7 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled) vport_qedf->cmd_mgr = base_qedf->cmd_mgr; init_completion(&vport_qedf->flogi_compl); INIT_LIST_HEAD(&vport_qedf->fcports); + INIT_DELAYED_WORK(&vport_qedf->stag_work, qedf_stag_change_work); rc = qedf_vport_libfc_config(vport, vn_port); if (rc) { diff --git a/drivers/scsi/scsi_debugfs.c b/drivers/scsi/scsi_debugfs.c index c19ea7ab54cbd2b7daf96e19550454fd75ffaf34..d9a18124cfc9d4604930d431a9f2cca118f9ffc1 100644 --- a/drivers/scsi/scsi_debugfs.c +++ b/drivers/scsi/scsi_debugfs.c @@ -10,6 +10,7 @@ static const char *const scsi_cmd_flags[] = { SCSI_CMD_FLAG_NAME(TAGGED), SCSI_CMD_FLAG_NAME(UNCHECKED_ISA_DMA), SCSI_CMD_FLAG_NAME(INITIALIZED), + SCSI_CMD_FLAG_NAME(LAST), }; #undef SCSI_CMD_FLAG_NAME diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c index 3717eea37ecb386fd772e6dd90182b3674d41ceb..e91a0a5bc7a3ec1b8fb4d99c7a4c934517a39363 100644 --- a/drivers/scsi/scsi_pm.c +++ b/drivers/scsi/scsi_pm.c @@ -262,7 +262,7 @@ static int sdev_runtime_resume(struct device *dev) blk_pre_runtime_resume(sdev->request_queue); if (pm && pm->runtime_resume) err = pm->runtime_resume(dev); - blk_post_runtime_resume(sdev->request_queue, err); + blk_post_runtime_resume(sdev->request_queue); return err; } diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 4cb4ab9c6137ee7d4c5ba509b37438c76c644aef..464418413ced0ec7b6ecabcbd4f0a8448017a0cb 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -917,7 +917,7 @@ static void get_capabilities(struct scsi_cd *cd) /* allocate transfer buffer */ - buffer = kmalloc(512, GFP_KERNEL | GFP_DMA); + buffer = kmalloc(512, GFP_KERNEL); if (!buffer) { sr_printk(KERN_ERR, cd, "out of memory.\n"); return; diff --git a/drivers/scsi/sr_vendor.c b/drivers/scsi/sr_vendor.c index 1f988a1b9166f49f83384e7525b35f37861580a4..a61635326ae0ac07e7a042b18ceb708ebd243de6 100644 --- a/drivers/scsi/sr_vendor.c +++ b/drivers/scsi/sr_vendor.c @@ -131,7 +131,7 @@ int sr_set_blocklength(Scsi_CD *cd, int blocklength) if (cd->vendor == VENDOR_TOSHIBA) density = (blocklength > 2048) ? 0x81 : 0x83; - buffer = kmalloc(512, GFP_KERNEL | GFP_DMA); + buffer = kmalloc(512, GFP_KERNEL); if (!buffer) return -ENOMEM; @@ -179,7 +179,7 @@ int sr_cd_check(struct cdrom_device_info *cdi) if (cd->cdi.mask & CDC_MULTI_SESSION) return 0; - buffer = kmalloc(512, GFP_KERNEL | GFP_DMA); + buffer = kmalloc(512, GFP_KERNEL); if (!buffer) return -ENOMEM; diff --git a/drivers/scsi/ufs/tc-dwc-g210-pci.c b/drivers/scsi/ufs/tc-dwc-g210-pci.c index 67a6a61154b7184771bbe2447f19a84f69ac4b18..4e471484539d24abd57030bf2e8799c4d546f9e9 100644 --- a/drivers/scsi/ufs/tc-dwc-g210-pci.c +++ b/drivers/scsi/ufs/tc-dwc-g210-pci.c @@ -135,7 +135,6 @@ tc_dwc_g210_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return err; } - pci_set_drvdata(pdev, hba); pm_runtime_put_noidle(&pdev->dev); pm_runtime_allow(&pdev->dev); diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c index fadd566025b86ab4ffe18aefd366823ea6f1ae98..4bf8ec88676ee54a08e65c3408c66b0a051f951e 100644 --- a/drivers/scsi/ufs/ufshcd-pci.c +++ b/drivers/scsi/ufs/ufshcd-pci.c @@ -347,8 +347,6 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return err; } - pci_set_drvdata(pdev, hba); - hba->vops = (struct ufs_hba_variant_ops *)id->driver_data; err = ufshcd_init(hba, mmio_base, pdev->irq); diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c index 8c92d1bde64be902b23cb1fb03049fec56f70dde..0f2430fb398db6b780b70c87e03c66ca20c17174 100644 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c @@ -92,6 +92,11 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba) clki->min_freq = clkfreq[i]; clki->max_freq = clkfreq[i+1]; clki->name = devm_kstrdup(dev, name, GFP_KERNEL); + if (!clki->name) { + ret = -ENOMEM; + goto out; + } + if (!strcmp(name, "ref_clk")) clki->keep_link_active = true; dev_dbg(dev, "%s: min %u max %u name %s\n", "freq-table-hz", @@ -128,6 +133,8 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name, return -ENOMEM; vreg->name = devm_kstrdup(dev, name, GFP_KERNEL); + if (!vreg->name) + return -ENOMEM; snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name); if (of_property_read_u32(np, prop_name, &vreg->max_uA)) { @@ -412,8 +419,6 @@ int ufshcd_pltfrm_init(struct platform_device *pdev, goto dealloc_host; } - platform_set_drvdata(pdev, hba); - pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index e3a9a02cadf5a7240216a633e447b11257628987..bf302776340ce1b5a324410b81e34c36c5467e9e 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -9085,6 +9085,13 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) struct device *dev = hba->dev; char eh_wq_name[sizeof("ufs_eh_wq_00")]; + /* + * dev_set_drvdata() must be called before any callbacks are registered + * that use dev_get_drvdata() (frequency scaling, clock scaling, hwmon, + * sysfs). + */ + dev_set_drvdata(dev, hba); + if (!mmio_base) { dev_err(hba->dev, "Invalid memory reference for mmio_base is NULL\n"); diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h index 6795e1f0e8f8c50aec6d0bf26d5b9bc1b64d576b..1d999228efc85104127f6d56b4cd6b1cc8f684e0 100644 --- a/drivers/scsi/ufs/ufshci.h +++ b/drivers/scsi/ufs/ufshci.h @@ -138,7 +138,8 @@ enum { #define INT_FATAL_ERRORS (DEVICE_FATAL_ERROR |\ CONTROLLER_FATAL_ERROR |\ SYSTEM_BUS_FATAL_ERROR |\ - CRYPTO_ENGINE_FATAL_ERROR) + CRYPTO_ENGINE_FATAL_ERROR |\ + UIC_LINK_LOST) /* HCS - Host Controller Status 30h */ #define DEVICE_PRESENT 0x1 diff --git a/drivers/soc/aspeed/aspeed-lpc-ctrl.c b/drivers/soc/aspeed/aspeed-lpc-ctrl.c index 040c7dc1d479242d6843e6eb56fa0901b297c82c..71b555c715d2e68d8f08bafa2daddb870fdbc5e5 100644 --- a/drivers/soc/aspeed/aspeed-lpc-ctrl.c +++ b/drivers/soc/aspeed/aspeed-lpc-ctrl.c @@ -251,10 +251,9 @@ static int aspeed_lpc_ctrl_probe(struct platform_device *pdev) } lpc_ctrl->clk = devm_clk_get(dev, NULL); - if (IS_ERR(lpc_ctrl->clk)) { - dev_err(dev, "couldn't get clock\n"); - return PTR_ERR(lpc_ctrl->clk); - } + if (IS_ERR(lpc_ctrl->clk)) + return dev_err_probe(dev, PTR_ERR(lpc_ctrl->clk), + "couldn't get clock\n"); rc = clk_prepare_enable(lpc_ctrl->clk); if (rc) { dev_err(dev, "couldn't enable clock\n"); diff --git a/drivers/soc/qcom/cpr.c b/drivers/soc/qcom/cpr.c index b24cc77d1889fb75a404cd100975ac97164f7750..6298561bc29c9dfd30739416c73e87585ae5d611 100644 --- a/drivers/soc/qcom/cpr.c +++ b/drivers/soc/qcom/cpr.c @@ -1043,7 +1043,7 @@ static int cpr_interpolate(const struct corner *corner, int step_volt, return corner->uV; temp = f_diff * (uV_high - uV_low); - do_div(temp, f_high - f_low); + temp = div64_ul(temp, f_high - f_low); /* * max_volt_scale has units of uV/MHz while freq values diff --git a/drivers/soc/ti/pruss.c b/drivers/soc/ti/pruss.c index cc0b4ad7a3d34fb06654f0f04c2ef37c12d496a7..30695172a508f610d9571f841aa0461701f905db 100644 --- a/drivers/soc/ti/pruss.c +++ b/drivers/soc/ti/pruss.c @@ -131,7 +131,7 @@ static int pruss_clk_init(struct pruss *pruss, struct device_node *cfg_node) clks_np = of_get_child_by_name(cfg_node, "clocks"); if (!clks_np) { - dev_err(dev, "%pOF is missing its 'clocks' node\n", clks_np); + dev_err(dev, "%pOF is missing its 'clocks' node\n", cfg_node); return -ENODEV; } diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c index 3c0ae6dbc43e212b451fc670e88a739b12963eba..4a80f043b7b17375e6f66ac92336452e40f9431a 100644 --- a/drivers/spi/spi-bcm-qspi.c +++ b/drivers/spi/spi-bcm-qspi.c @@ -551,7 +551,7 @@ static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs) u32 rd = 0; u32 wr = 0; - if (qspi->base[CHIP_SELECT]) { + if (cs >= 0 && qspi->base[CHIP_SELECT]) { rd = bcm_qspi_read(qspi, CHIP_SELECT, 0); wr = (rd & ~0xff) | (1 << cs); if (rd == wr) diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c index c208efeadd1847a4be807ce4dc886851f031dc35..0bc7daa7afc83d0d20424800845bb16f1c2c473f 100644 --- a/drivers/spi/spi-meson-spicc.c +++ b/drivers/spi/spi-meson-spicc.c @@ -693,6 +693,11 @@ static int meson_spicc_probe(struct platform_device *pdev) writel_relaxed(0, spicc->base + SPICC_INTREG); irq = platform_get_irq(pdev, 0); + if (irq < 0) { + ret = irq; + goto out_master; + } + ret = devm_request_irq(&pdev->dev, irq, meson_spicc_irq, 0, NULL, spicc); if (ret) { diff --git a/drivers/spi/spi-meson-spifc.c b/drivers/spi/spi-meson-spifc.c index 8eca6f24cb799b461c6e63f3e190b87f2c600c56..c8ed7815c4ba6d84b18da6adc73111d44ce0f5f2 100644 --- a/drivers/spi/spi-meson-spifc.c +++ b/drivers/spi/spi-meson-spifc.c @@ -349,6 +349,7 @@ static int meson_spifc_probe(struct platform_device *pdev) return 0; out_clk: clk_disable_unprepare(spifc->clk); + pm_runtime_disable(spifc->dev); out_err: spi_master_put(master); return ret; diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c index 83e56ee62649d445ea08b05b99a54059e17264f7..92a09dfb99a8e7a124d8be3a0823af4da977cb5f 100644 --- a/drivers/spi/spi-mt65xx.c +++ b/drivers/spi/spi-mt65xx.c @@ -540,7 +540,7 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id) else mdata->state = MTK_SPI_IDLE; - if (!master->can_dma(master, master->cur_msg->spi, trans)) { + if (!master->can_dma(master, NULL, trans)) { if (trans->rx_buf) { cnt = mdata->xfer_len / 4; ioread32_rep(mdata->base + SPI_RX_DATA_REG, diff --git a/drivers/spi/spi-uniphier.c b/drivers/spi/spi-uniphier.c index 6a9ef8ee3cc9028f73c3bcf0a1f4ab3a8252373e..ad0088e3947237a8c70c37c9de385b67b8e2febb 100644 --- a/drivers/spi/spi-uniphier.c +++ b/drivers/spi/spi-uniphier.c @@ -726,7 +726,7 @@ static int uniphier_spi_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "failed to get TX DMA capacities: %d\n", ret); - goto out_disable_clk; + goto out_release_dma; } dma_tx_burst = caps.max_burst; } @@ -735,7 +735,7 @@ static int uniphier_spi_probe(struct platform_device *pdev) if (IS_ERR_OR_NULL(master->dma_rx)) { if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER) { ret = -EPROBE_DEFER; - goto out_disable_clk; + goto out_release_dma; } master->dma_rx = NULL; dma_rx_burst = INT_MAX; @@ -744,7 +744,7 @@ static int uniphier_spi_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "failed to get RX DMA capacities: %d\n", ret); - goto out_disable_clk; + goto out_release_dma; } dma_rx_burst = caps.max_burst; } @@ -753,10 +753,20 @@ static int uniphier_spi_probe(struct platform_device *pdev) ret = devm_spi_register_master(&pdev->dev, master); if (ret) - goto out_disable_clk; + goto out_release_dma; return 0; +out_release_dma: + if (!IS_ERR_OR_NULL(master->dma_rx)) { + dma_release_channel(master->dma_rx); + master->dma_rx = NULL; + } + if (!IS_ERR_OR_NULL(master->dma_tx)) { + dma_release_channel(master->dma_tx); + master->dma_tx = NULL; + } + out_disable_clk: clk_disable_unprepare(priv->clk); @@ -767,12 +777,13 @@ out_master_put: static int uniphier_spi_remove(struct platform_device *pdev) { - struct uniphier_spi_priv *priv = platform_get_drvdata(pdev); + struct spi_master *master = platform_get_drvdata(pdev); + struct uniphier_spi_priv *priv = spi_master_get_devdata(master); - if (priv->master->dma_tx) - dma_release_channel(priv->master->dma_tx); - if (priv->master->dma_rx) - dma_release_channel(priv->master->dma_rx); + if (master->dma_tx) + dma_release_channel(master->dma_tx); + if (master->dma_rx) + dma_release_channel(master->dma_rx); clk_disable_unprepare(priv->clk); diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c index b635835729d66401501b99a31782760ddee54763..13c0b15fe17649490b7075c0d5119e4d012b7413 100644 --- a/drivers/spi/spi-zynq-qspi.c +++ b/drivers/spi/spi-zynq-qspi.c @@ -570,6 +570,9 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem, if (op->dummy.nbytes) { tmpbuf = kzalloc(op->dummy.nbytes, GFP_KERNEL); + if (!tmpbuf) + return -ENOMEM; + memset(tmpbuf, 0xff, op->dummy.nbytes); reinit_completion(&xqspi->data_completion); xqspi->txbuf = tmpbuf; diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h index f4f5ff0dc0c027562d3539715d21946be77add88..c6ec045a447c9e42c7b4526095bdb8a4f974ca2a 100644 --- a/drivers/staging/fbtft/fbtft.h +++ b/drivers/staging/fbtft/fbtft.h @@ -343,7 +343,10 @@ static int __init fbtft_driver_module_init(void) \ ret = spi_register_driver(&fbtft_driver_spi_driver); \ if (ret < 0) \ return ret; \ - return platform_driver_register(&fbtft_driver_platform_driver); \ + ret = platform_driver_register(&fbtft_driver_platform_driver); \ + if (ret < 0) \ + spi_unregister_driver(&fbtft_driver_spi_driver); \ + return ret; \ } \ \ static void __exit fbtft_driver_module_exit(void) \ diff --git a/drivers/staging/greybus/audio_topology.c b/drivers/staging/greybus/audio_topology.c index 2bb8e7b60e8d551d535b2023bc607bc059beb3b7..e1579f356af5c1f49cadc9f2a5790ed9f49de1d7 100644 --- a/drivers/staging/greybus/audio_topology.c +++ b/drivers/staging/greybus/audio_topology.c @@ -147,6 +147,9 @@ static const char **gb_generate_enum_strings(struct gbaudio_module_info *gb, items = le32_to_cpu(gbenum->items); strings = devm_kcalloc(gb->dev, items, sizeof(char *), GFP_KERNEL); + if (!strings) + return NULL; + data = gbenum->names; for (i = 0; i < items; i++) { @@ -655,6 +658,8 @@ static int gbaudio_tplg_create_enum_kctl(struct gbaudio_module_info *gb, /* since count=1, and reg is dummy */ gbe->items = le32_to_cpu(gb_enum->items); gbe->texts = gb_generate_enum_strings(gb, gb_enum); + if (!gbe->texts) + return -ENOMEM; /* debug enum info */ dev_dbg(gb->dev, "Max:%d, name_length:%d\n", gbe->items, @@ -862,6 +867,8 @@ static int gbaudio_tplg_create_enum_ctl(struct gbaudio_module_info *gb, /* since count=1, and reg is dummy */ gbe->items = le32_to_cpu(gb_enum->items); gbe->texts = gb_generate_enum_strings(gb, gb_enum); + if (!gbe->texts) + return -ENOMEM; /* debug enum info */ dev_dbg(gb->dev, "Max:%d, name_length:%d\n", gbe->items, @@ -1072,6 +1079,10 @@ static int gbaudio_tplg_create_widget(struct gbaudio_module_info *module, csize += le16_to_cpu(gbenum->names_length); control->texts = (const char * const *) gb_generate_enum_strings(module, gbenum); + if (!control->texts) { + ret = -ENOMEM; + goto error; + } control->items = le32_to_cpu(gbenum->items); } else { csize = sizeof(struct gb_audio_control); @@ -1181,6 +1192,10 @@ static int gbaudio_tplg_process_kcontrols(struct gbaudio_module_info *module, csize += le16_to_cpu(gbenum->names_length); control->texts = (const char * const *) gb_generate_enum_strings(module, gbenum); + if (!control->texts) { + ret = -ENOMEM; + goto error; + } control->items = le32_to_cpu(gbenum->items); } else { csize = sizeof(struct gb_audio_control); diff --git a/drivers/staging/media/atomisp/i2c/ov2680.h b/drivers/staging/media/atomisp/i2c/ov2680.h index 49920245e0647ba231fafc9206720c6441c8fede..cafb798a71abeb76d784bb93c9d09a4f0c0cca08 100644 --- a/drivers/staging/media/atomisp/i2c/ov2680.h +++ b/drivers/staging/media/atomisp/i2c/ov2680.h @@ -289,8 +289,6 @@ static struct ov2680_reg const ov2680_global_setting[] = { */ static struct ov2680_reg const ov2680_QCIF_30fps[] = { {0x3086, 0x01}, - {0x3501, 0x24}, - {0x3502, 0x40}, {0x370a, 0x23}, {0x3801, 0xa0}, {0x3802, 0x00}, @@ -334,8 +332,6 @@ static struct ov2680_reg const ov2680_QCIF_30fps[] = { */ static struct ov2680_reg const ov2680_CIF_30fps[] = { {0x3086, 0x01}, - {0x3501, 0x24}, - {0x3502, 0x40}, {0x370a, 0x23}, {0x3801, 0xa0}, {0x3802, 0x00}, @@ -377,8 +373,6 @@ static struct ov2680_reg const ov2680_CIF_30fps[] = { */ static struct ov2680_reg const ov2680_QVGA_30fps[] = { {0x3086, 0x01}, - {0x3501, 0x24}, - {0x3502, 0x40}, {0x370a, 0x23}, {0x3801, 0xa0}, {0x3802, 0x00}, @@ -420,8 +414,6 @@ static struct ov2680_reg const ov2680_QVGA_30fps[] = { */ static struct ov2680_reg const ov2680_656x496_30fps[] = { {0x3086, 0x01}, - {0x3501, 0x24}, - {0x3502, 0x40}, {0x370a, 0x23}, {0x3801, 0xa0}, {0x3802, 0x00}, @@ -463,8 +455,6 @@ static struct ov2680_reg const ov2680_656x496_30fps[] = { */ static struct ov2680_reg const ov2680_720x592_30fps[] = { {0x3086, 0x01}, - {0x3501, 0x26}, - {0x3502, 0x40}, {0x370a, 0x23}, {0x3801, 0x00}, // X_ADDR_START; {0x3802, 0x00}, @@ -508,8 +498,6 @@ static struct ov2680_reg const ov2680_720x592_30fps[] = { */ static struct ov2680_reg const ov2680_800x600_30fps[] = { {0x3086, 0x01}, - {0x3501, 0x26}, - {0x3502, 0x40}, {0x370a, 0x23}, {0x3801, 0x00}, {0x3802, 0x00}, @@ -551,8 +539,6 @@ static struct ov2680_reg const ov2680_800x600_30fps[] = { */ static struct ov2680_reg const ov2680_720p_30fps[] = { {0x3086, 0x00}, - {0x3501, 0x48}, - {0x3502, 0xe0}, {0x370a, 0x21}, {0x3801, 0xa0}, {0x3802, 0x00}, @@ -594,8 +580,6 @@ static struct ov2680_reg const ov2680_720p_30fps[] = { */ static struct ov2680_reg const ov2680_1296x976_30fps[] = { {0x3086, 0x00}, - {0x3501, 0x48}, - {0x3502, 0xe0}, {0x370a, 0x21}, {0x3801, 0xa0}, {0x3802, 0x00}, @@ -637,8 +621,6 @@ static struct ov2680_reg const ov2680_1296x976_30fps[] = { */ static struct ov2680_reg const ov2680_1456x1096_30fps[] = { {0x3086, 0x00}, - {0x3501, 0x48}, - {0x3502, 0xe0}, {0x370a, 0x21}, {0x3801, 0x90}, {0x3802, 0x00}, @@ -682,8 +664,6 @@ static struct ov2680_reg const ov2680_1456x1096_30fps[] = { static struct ov2680_reg const ov2680_1616x916_30fps[] = { {0x3086, 0x00}, - {0x3501, 0x48}, - {0x3502, 0xe0}, {0x370a, 0x21}, {0x3801, 0x00}, {0x3802, 0x00}, @@ -726,8 +706,6 @@ static struct ov2680_reg const ov2680_1616x916_30fps[] = { #if 0 static struct ov2680_reg const ov2680_1616x1082_30fps[] = { {0x3086, 0x00}, - {0x3501, 0x48}, - {0x3502, 0xe0}, {0x370a, 0x21}, {0x3801, 0x00}, {0x3802, 0x00}, @@ -769,8 +747,6 @@ static struct ov2680_reg const ov2680_1616x1082_30fps[] = { */ static struct ov2680_reg const ov2680_1616x1216_30fps[] = { {0x3086, 0x00}, - {0x3501, 0x48}, - {0x3502, 0xe0}, {0x370a, 0x21}, {0x3801, 0x00}, {0x3802, 0x00}, diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.c b/drivers/staging/media/atomisp/pci/atomisp_cmd.c index 592ea990d4ca4014b72a7666d5be077e7b888280..90d50a693ce57243784d3c9f1354473d3978945e 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_cmd.c +++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.c @@ -1138,9 +1138,10 @@ void atomisp_buf_done(struct atomisp_sub_device *asd, int error, asd->frame_status[vb->i] = ATOMISP_FRAME_STATUS_OK; } - } else + } else { asd->frame_status[vb->i] = ATOMISP_FRAME_STATUS_OK; + } } else { asd->frame_status[vb->i] = ATOMISP_FRAME_STATUS_OK; } @@ -1714,6 +1715,12 @@ void atomisp_wdt_refresh_pipe(struct atomisp_video_pipe *pipe, { unsigned long next; + if (!pipe->asd) { + dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, pipe->vdev.name); + return; + } + if (delay != ATOMISP_WDT_KEEP_CURRENT_DELAY) pipe->wdt_duration = delay; @@ -1776,6 +1783,12 @@ void atomisp_wdt_refresh(struct atomisp_sub_device *asd, unsigned int delay) /* ISP2401 */ void atomisp_wdt_stop_pipe(struct atomisp_video_pipe *pipe, bool sync) { + if (!pipe->asd) { + dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, pipe->vdev.name); + return; + } + if (!atomisp_is_wdt_running(pipe)) return; @@ -4108,6 +4121,12 @@ void atomisp_handle_parameter_and_buffer(struct atomisp_video_pipe *pipe) unsigned long irqflags; bool need_to_enqueue_buffer = false; + if (!asd) { + dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, pipe->vdev.name); + return; + } + if (atomisp_is_vf_pipe(pipe)) return; @@ -4195,6 +4214,12 @@ int atomisp_set_parameters(struct video_device *vdev, struct atomisp_css_params *css_param = &asd->params.css_param; int ret; + if (!asd) { + dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) { dev_err(asd->isp->dev, "%s: internal error!\n", __func__); return -EINVAL; @@ -4855,6 +4880,12 @@ int atomisp_try_fmt(struct video_device *vdev, struct v4l2_format *f, int source_pad = atomisp_subdev_source_pad(vdev); int ret; + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + if (!isp->inputs[asd->input_curr].camera) return -EINVAL; @@ -4945,9 +4976,9 @@ atomisp_try_fmt_file(struct atomisp_device *isp, struct v4l2_format *f) depth = get_pixel_depth(pixelformat); - if (field == V4L2_FIELD_ANY) + if (field == V4L2_FIELD_ANY) { field = V4L2_FIELD_NONE; - else if (field != V4L2_FIELD_NONE) { + } else if (field != V4L2_FIELD_NONE) { dev_err(isp->dev, "Wrong output field\n"); return -EINVAL; } @@ -5201,6 +5232,12 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev, const struct atomisp_in_fmt_conv *fc; int ret, i; + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + v4l2_fh_init(&fh.vfh, vdev); isp_sink_crop = atomisp_subdev_get_rect( @@ -5512,6 +5549,7 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, unsigned int dvs_env_w, unsigned int dvs_env_h) { struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd; + struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); const struct atomisp_format_bridge *format; struct v4l2_subdev_pad_config pad_cfg; struct v4l2_subdev_format vformat = { @@ -5527,6 +5565,12 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, struct v4l2_subdev_fh fh; int ret; + if (!asd) { + dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + v4l2_fh_init(&fh.vfh, vdev); stream_index = atomisp_source_pad_to_stream_id(asd, source_pad); @@ -5617,6 +5661,12 @@ int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f) struct v4l2_subdev_fh fh; int ret; + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + if (source_pad >= ATOMISP_SUBDEV_PADS_NUM) return -EINVAL; @@ -6050,6 +6100,12 @@ int atomisp_set_fmt_file(struct video_device *vdev, struct v4l2_format *f) struct v4l2_subdev_fh fh; int ret; + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + v4l2_fh_init(&fh.vfh, vdev); dev_dbg(isp->dev, "setting fmt %ux%u 0x%x for file inject\n", @@ -6374,6 +6430,12 @@ bool atomisp_is_vf_pipe(struct atomisp_video_pipe *pipe) { struct atomisp_sub_device *asd = pipe->asd; + if (!asd) { + dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, pipe->vdev.name); + return false; + } + if (pipe == &asd->video_out_vf) return true; @@ -6587,17 +6649,23 @@ static int atomisp_get_pipe_id(struct atomisp_video_pipe *pipe) { struct atomisp_sub_device *asd = pipe->asd; - if (ATOMISP_USE_YUVPP(asd)) + if (!asd) { + dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, pipe->vdev.name); + return -EINVAL; + } + + if (ATOMISP_USE_YUVPP(asd)) { return IA_CSS_PIPE_ID_YUVPP; - else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) + } else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) { return IA_CSS_PIPE_ID_VIDEO; - else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT) + } else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT) { return IA_CSS_PIPE_ID_CAPTURE; - else if (pipe == &asd->video_out_video_capture) + } else if (pipe == &asd->video_out_video_capture) { return IA_CSS_PIPE_ID_VIDEO; - else if (pipe == &asd->video_out_vf) + } else if (pipe == &asd->video_out_vf) { return IA_CSS_PIPE_ID_CAPTURE; - else if (pipe == &asd->video_out_preview) { + } else if (pipe == &asd->video_out_preview) { if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) return IA_CSS_PIPE_ID_VIDEO; else @@ -6624,6 +6692,12 @@ int atomisp_get_invalid_frame_num(struct video_device *vdev, struct ia_css_pipe_info p_info; int ret; + if (!asd) { + dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + if (asd->isp->inputs[asd->input_curr].camera_caps-> sensor[asd->sensor_curr].stream_num > 1) { /* External ISP */ diff --git a/drivers/staging/media/atomisp/pci/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp_fops.c index f1e6b25978534143b450da34c817f5c6bb866bac..b751df31cc24cd4181f0750ccbe97155779c5ef7 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_fops.c +++ b/drivers/staging/media/atomisp/pci/atomisp_fops.c @@ -877,6 +877,11 @@ done: else pipe->users++; rt_mutex_unlock(&isp->mutex); + + /* Ensure that a mode is set */ + if (asd) + v4l2_ctrl_s_ctrl(asd->run_mode, pipe->default_run_mode); + return 0; css_error: @@ -1171,6 +1176,12 @@ static int atomisp_mmap(struct file *file, struct vm_area_struct *vma) u32 origin_size, new_size; int ret; + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + if (!(vma->vm_flags & (VM_WRITE | VM_READ))) return -EACCES; diff --git a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c index 135994d44802c70b3b6415cf38cd787261888f5e..34480ca164746fe58d1f141ba1dbc7ff4fbcd223 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c +++ b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c @@ -481,7 +481,7 @@ fail: static u8 gmin_get_pmic_id_and_addr(struct device *dev) { - struct i2c_client *power; + struct i2c_client *power = NULL; static u8 pmic_i2c_addr; if (pmic_id) diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c index 9da82855552dec0c65b80c1aeb35e12940b5c4a9..8a0648fd7c8135bbb371dc7f1c3b236548ce3e03 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c +++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c @@ -646,6 +646,12 @@ static int atomisp_g_input(struct file *file, void *fh, unsigned int *input) struct atomisp_device *isp = video_get_drvdata(vdev); struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd; + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + rt_mutex_lock(&isp->mutex); *input = asd->input_curr; rt_mutex_unlock(&isp->mutex); @@ -665,6 +671,12 @@ static int atomisp_s_input(struct file *file, void *fh, unsigned int input) struct v4l2_subdev *motor; int ret; + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + rt_mutex_lock(&isp->mutex); if (input >= ATOM_ISP_MAX_INPUTS || input >= isp->input_cnt) { dev_dbg(isp->dev, "input_cnt: %d\n", isp->input_cnt); @@ -761,18 +773,33 @@ static int atomisp_enum_fmt_cap(struct file *file, void *fh, struct video_device *vdev = video_devdata(file); struct atomisp_device *isp = video_get_drvdata(vdev); struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd; - struct v4l2_subdev_mbus_code_enum code = { 0 }; + struct v4l2_subdev_mbus_code_enum code = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + }; + struct v4l2_subdev *camera; unsigned int i, fi = 0; int rval; + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + + camera = isp->inputs[asd->input_curr].camera; + if(!camera) { + dev_err(isp->dev, "%s(): camera is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + rt_mutex_lock(&isp->mutex); - rval = v4l2_subdev_call(isp->inputs[asd->input_curr].camera, pad, - enum_mbus_code, NULL, &code); + + rval = v4l2_subdev_call(camera, pad, enum_mbus_code, NULL, &code); if (rval == -ENOIOCTLCMD) { dev_warn(isp->dev, - "enum_mbus_code pad op not supported. Please fix your sensor driver!\n"); - // rval = v4l2_subdev_call(isp->inputs[asd->input_curr].camera, - // video, enum_mbus_fmt, 0, &code.code); + "enum_mbus_code pad op not supported by %s. Please fix your sensor driver!\n", + camera->name); } rt_mutex_unlock(&isp->mutex); @@ -802,6 +829,8 @@ static int atomisp_enum_fmt_cap(struct file *file, void *fh, f->pixelformat = format->pixelformat; return 0; } + dev_err(isp->dev, "%s(): format for code %x not found.\n", + __func__, code.code); return -EINVAL; } @@ -834,6 +863,72 @@ static int atomisp_g_fmt_file(struct file *file, void *fh, return 0; } +static int atomisp_adjust_fmt(struct v4l2_format *f) +{ + const struct atomisp_format_bridge *format_bridge; + u32 padded_width; + + format_bridge = atomisp_get_format_bridge(f->fmt.pix.pixelformat); + + padded_width = f->fmt.pix.width + pad_w; + + if (format_bridge->planar) { + f->fmt.pix.bytesperline = padded_width; + f->fmt.pix.sizeimage = PAGE_ALIGN(f->fmt.pix.height * + DIV_ROUND_UP(format_bridge->depth * + padded_width, 8)); + } else { + f->fmt.pix.bytesperline = DIV_ROUND_UP(format_bridge->depth * + padded_width, 8); + f->fmt.pix.sizeimage = PAGE_ALIGN(f->fmt.pix.height * f->fmt.pix.bytesperline); + } + + if (f->fmt.pix.field == V4L2_FIELD_ANY) + f->fmt.pix.field = V4L2_FIELD_NONE; + + format_bridge = atomisp_get_format_bridge(f->fmt.pix.pixelformat); + if (!format_bridge) + return -EINVAL; + + /* Currently, raw formats are broken!!! */ + if (format_bridge->sh_fmt == IA_CSS_FRAME_FORMAT_RAW) { + f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420; + + format_bridge = atomisp_get_format_bridge(f->fmt.pix.pixelformat); + if (!format_bridge) + return -EINVAL; + } + + padded_width = f->fmt.pix.width + pad_w; + + if (format_bridge->planar) { + f->fmt.pix.bytesperline = padded_width; + f->fmt.pix.sizeimage = PAGE_ALIGN(f->fmt.pix.height * + DIV_ROUND_UP(format_bridge->depth * + padded_width, 8)); + } else { + f->fmt.pix.bytesperline = DIV_ROUND_UP(format_bridge->depth * + padded_width, 8); + f->fmt.pix.sizeimage = PAGE_ALIGN(f->fmt.pix.height * f->fmt.pix.bytesperline); + } + + if (f->fmt.pix.field == V4L2_FIELD_ANY) + f->fmt.pix.field = V4L2_FIELD_NONE; + + /* + * FIXME: do we need to setup this differently, depending on the + * sensor or the pipeline? + */ + f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709; + f->fmt.pix.ycbcr_enc = V4L2_YCBCR_ENC_709; + f->fmt.pix.xfer_func = V4L2_XFER_FUNC_709; + + f->fmt.pix.width -= pad_w; + f->fmt.pix.height -= pad_h; + + return 0; +} + /* This function looks up the closest available resolution. */ static int atomisp_try_fmt_cap(struct file *file, void *fh, struct v4l2_format *f) @@ -845,7 +940,11 @@ static int atomisp_try_fmt_cap(struct file *file, void *fh, rt_mutex_lock(&isp->mutex); ret = atomisp_try_fmt(vdev, f, NULL); rt_mutex_unlock(&isp->mutex); - return ret; + + if (ret) + return ret; + + return atomisp_adjust_fmt(f); } static int atomisp_s_fmt_cap(struct file *file, void *fh, @@ -1027,6 +1126,12 @@ int __atomisp_reqbufs(struct file *file, void *fh, u16 stream_id = atomisp_source_pad_to_stream_id(asd, source_pad); int ret = 0, i = 0; + if (!asd) { + dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + if (req->count == 0) { mutex_lock(&pipe->capq.vb_lock); if (!list_empty(&pipe->capq.stream)) @@ -1154,6 +1259,12 @@ static int atomisp_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf) u32 pgnr; int ret = 0; + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + rt_mutex_lock(&isp->mutex); if (isp->isp_fatal_error) { ret = -EIO; @@ -1389,6 +1500,12 @@ static int atomisp_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf) struct atomisp_device *isp = video_get_drvdata(vdev); int ret = 0; + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + rt_mutex_lock(&isp->mutex); if (isp->isp_fatal_error) { @@ -1640,6 +1757,12 @@ static int atomisp_streamon(struct file *file, void *fh, int ret = 0; unsigned long irqflags; + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + dev_dbg(isp->dev, "Start stream on pad %d for asd%d\n", atomisp_subdev_source_pad(vdev), asd->index); @@ -1901,6 +2024,12 @@ int __atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) unsigned long flags; bool first_streamoff = false; + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + dev_dbg(isp->dev, "Stop stream on pad %d for asd%d\n", atomisp_subdev_source_pad(vdev), asd->index); @@ -2150,6 +2279,12 @@ static int atomisp_g_ctrl(struct file *file, void *fh, struct atomisp_device *isp = video_get_drvdata(vdev); int i, ret = -EINVAL; + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + for (i = 0; i < ctrls_num; i++) { if (ci_v4l2_controls[i].id == control->id) { ret = 0; @@ -2229,6 +2364,12 @@ static int atomisp_s_ctrl(struct file *file, void *fh, struct atomisp_device *isp = video_get_drvdata(vdev); int i, ret = -EINVAL; + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + for (i = 0; i < ctrls_num; i++) { if (ci_v4l2_controls[i].id == control->id) { ret = 0; @@ -2310,6 +2451,12 @@ static int atomisp_queryctl(struct file *file, void *fh, struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd; struct atomisp_device *isp = video_get_drvdata(vdev); + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + switch (qc->id) { case V4L2_CID_FOCUS_ABSOLUTE: case V4L2_CID_FOCUS_RELATIVE: @@ -2355,6 +2502,12 @@ static int atomisp_camera_g_ext_ctrls(struct file *file, void *fh, int i; int ret = 0; + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + if (!IS_ISP2401) motor = isp->inputs[asd->input_curr].motor; else @@ -2466,6 +2619,12 @@ static int atomisp_camera_s_ext_ctrls(struct file *file, void *fh, int i; int ret = 0; + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + if (!IS_ISP2401) motor = isp->inputs[asd->input_curr].motor; else @@ -2591,6 +2750,12 @@ static int atomisp_g_parm(struct file *file, void *fh, struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd; struct atomisp_device *isp = video_get_drvdata(vdev); + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { dev_err(isp->dev, "unsupported v4l2 buf type\n"); return -EINVAL; @@ -2613,6 +2778,12 @@ static int atomisp_s_parm(struct file *file, void *fh, int rval; int fps; + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { dev_err(isp->dev, "unsupported v4l2 buf type\n"); return -EINVAL; diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.c b/drivers/staging/media/atomisp/pci/atomisp_subdev.c index dcc2dd981ca6077c5fcbafaa41d0e8944c8711f1..628e85799274d6cf50c5787e92c00a543cbe384a 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_subdev.c +++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.c @@ -1178,23 +1178,28 @@ static int isp_subdev_init_entities(struct atomisp_sub_device *asd) atomisp_init_acc_pipe(asd, &asd->video_acc); - ret = atomisp_video_init(&asd->video_in, "MEMORY"); + ret = atomisp_video_init(&asd->video_in, "MEMORY", + ATOMISP_RUN_MODE_SDV); if (ret < 0) return ret; - ret = atomisp_video_init(&asd->video_out_capture, "CAPTURE"); + ret = atomisp_video_init(&asd->video_out_capture, "CAPTURE", + ATOMISP_RUN_MODE_STILL_CAPTURE); if (ret < 0) return ret; - ret = atomisp_video_init(&asd->video_out_vf, "VIEWFINDER"); + ret = atomisp_video_init(&asd->video_out_vf, "VIEWFINDER", + ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE); if (ret < 0) return ret; - ret = atomisp_video_init(&asd->video_out_preview, "PREVIEW"); + ret = atomisp_video_init(&asd->video_out_preview, "PREVIEW", + ATOMISP_RUN_MODE_PREVIEW); if (ret < 0) return ret; - ret = atomisp_video_init(&asd->video_out_video_capture, "VIDEO"); + ret = atomisp_video_init(&asd->video_out_video_capture, "VIDEO", + ATOMISP_RUN_MODE_VIDEO); if (ret < 0) return ret; diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.h b/drivers/staging/media/atomisp/pci/atomisp_subdev.h index 330a77eed8aa65501491c2bd63e6720a4ac2b66a..12215d7406169066e3d3b232dd4752f4216142f1 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_subdev.h +++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.h @@ -81,6 +81,9 @@ struct atomisp_video_pipe { /* the link list to store per_frame parameters */ struct list_head per_frame_params; + /* Store here the initial run mode */ + unsigned int default_run_mode; + unsigned int buffers_in_css; /* irq_lock is used to protect video buffer state change operations and diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c index fa1bd99cd6f17758ac3626378aecc6ac9a54f4f2..8aeea74cfd06bb0244c45ae9c726a79bb35024d5 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c +++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c @@ -447,7 +447,8 @@ const struct atomisp_dfs_config dfs_config_cht_soc = { .dfs_table_size = ARRAY_SIZE(dfs_rules_cht_soc), }; -int atomisp_video_init(struct atomisp_video_pipe *video, const char *name) +int atomisp_video_init(struct atomisp_video_pipe *video, const char *name, + unsigned int run_mode) { int ret; const char *direction; @@ -478,6 +479,7 @@ int atomisp_video_init(struct atomisp_video_pipe *video, const char *name) "ATOMISP ISP %s %s", name, direction); video->vdev.release = video_device_release_empty; video_set_drvdata(&video->vdev, video->isp); + video->default_run_mode = run_mode; return 0; } @@ -711,15 +713,15 @@ static int atomisp_mrfld_power(struct atomisp_device *isp, bool enable) dev_dbg(isp->dev, "IUNIT power-%s.\n", enable ? "on" : "off"); - /*WA:Enable DVFS*/ + /* WA for P-Unit, if DVFS enabled, ISP timeout observed */ if (IS_CHT && enable) - punit_ddr_dvfs_enable(true); + punit_ddr_dvfs_enable(false); /* * FIXME:WA for ECS28A, with this sleep, CTS * android.hardware.camera2.cts.CameraDeviceTest#testCameraDeviceAbort * PASS, no impact on other platforms - */ + */ if (IS_BYT && enable) msleep(10); @@ -727,7 +729,7 @@ static int atomisp_mrfld_power(struct atomisp_device *isp, bool enable) iosf_mbi_modify(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSPM0, val, MRFLD_ISPSSPM0_ISPSSC_MASK); - /*WA:Enable DVFS*/ + /* WA:Enable DVFS */ if (IS_CHT && !enable) punit_ddr_dvfs_enable(true); @@ -1182,6 +1184,7 @@ static void atomisp_unregister_entities(struct atomisp_device *isp) v4l2_device_unregister(&isp->v4l2_dev); media_device_unregister(&isp->media_dev); + media_device_cleanup(&isp->media_dev); } static int atomisp_register_entities(struct atomisp_device *isp) diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.h b/drivers/staging/media/atomisp/pci/atomisp_v4l2.h index 81bb356b81720a359c8996acfce0f42a89b1abc1..72611b8286a4aec66728b175447bb7673be57cb3 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_v4l2.h +++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.h @@ -27,7 +27,8 @@ struct v4l2_device; struct atomisp_device; struct firmware; -int atomisp_video_init(struct atomisp_video_pipe *video, const char *name); +int atomisp_video_init(struct atomisp_video_pipe *video, const char *name, + unsigned int run_mode); void atomisp_acc_init(struct atomisp_acc_pipe *video, const char *name); void atomisp_video_unregister(struct atomisp_video_pipe *video); void atomisp_acc_unregister(struct atomisp_acc_pipe *video); diff --git a/drivers/staging/media/atomisp/pci/sh_css.c b/drivers/staging/media/atomisp/pci/sh_css.c index ddee04c8248d0433552b0c41cdce24b1cb84393c..54a18921fbd15d8dd0a6ae41c0d09f2cf81c0e4a 100644 --- a/drivers/staging/media/atomisp/pci/sh_css.c +++ b/drivers/staging/media/atomisp/pci/sh_css.c @@ -527,6 +527,7 @@ ia_css_stream_input_format_bits_per_pixel(struct ia_css_stream *stream) return bpp; } +/* TODO: move define to proper file in tools */ #define GP_ISEL_TPG_MODE 0x90058 #if !defined(ISP2401) @@ -579,12 +580,8 @@ sh_css_config_input_network(struct ia_css_stream *stream) { vblank_cycles = vblank_lines * (width + hblank_cycles); sh_css_sp_configure_sync_gen(width, height, hblank_cycles, vblank_cycles); - if (!IS_ISP2401) { - if (pipe->stream->config.mode == IA_CSS_INPUT_MODE_TPG) { - /* TODO: move define to proper file in tools */ - ia_css_device_store_uint32(GP_ISEL_TPG_MODE, 0); - } - } + if (pipe->stream->config.mode == IA_CSS_INPUT_MODE_TPG) + ia_css_device_store_uint32(GP_ISEL_TPG_MODE, 0); } ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "sh_css_config_input_network() leave:\n"); @@ -1019,16 +1016,14 @@ static bool sh_css_translate_stream_cfg_to_isys_stream_descr( * ia_css_isys_stream_capture_indication() instead of * ia_css_pipeline_sp_wait_for_isys_stream_N() as isp processing of * capture takes longer than getting an ISYS frame - * - * Only 2401 relevant ?? */ -#if 0 // FIXME: NOT USED on Yocto Aero - isys_stream_descr->polling_mode - = early_polling ? INPUT_SYSTEM_POLL_ON_CAPTURE_REQUEST - : INPUT_SYSTEM_POLL_ON_WAIT_FOR_FRAME; - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "sh_css_translate_stream_cfg_to_isys_stream_descr() leave:\n"); -#endif + if (IS_ISP2401) { + isys_stream_descr->polling_mode + = early_polling ? INPUT_SYSTEM_POLL_ON_CAPTURE_REQUEST + : INPUT_SYSTEM_POLL_ON_WAIT_FOR_FRAME; + ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, + "sh_css_translate_stream_cfg_to_isys_stream_descr() leave:\n"); + } return rc; } @@ -1451,7 +1446,7 @@ static void start_pipe( assert(me); /* all callers are in this file and call with non null argument */ - if (!IS_ISP2401) { + if (IS_ISP2401) { coord = &me->config.internal_frame_origin_bqs_on_sctbl; params = me->stream->isp_params_configs; } diff --git a/drivers/staging/media/atomisp/pci/sh_css_mipi.c b/drivers/staging/media/atomisp/pci/sh_css_mipi.c index d5ae7f0b5864bb7acde86e6b8adb62c660960f5f..651eda0469b23eed8d090752f819d7c7657e19dc 100644 --- a/drivers/staging/media/atomisp/pci/sh_css_mipi.c +++ b/drivers/staging/media/atomisp/pci/sh_css_mipi.c @@ -389,17 +389,17 @@ static bool buffers_needed(struct ia_css_pipe *pipe) { if (!IS_ISP2401) { if (pipe->stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) - return false; - else return true; + else + return false; } if (pipe->stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR || pipe->stream->config.mode == IA_CSS_INPUT_MODE_TPG || pipe->stream->config.mode == IA_CSS_INPUT_MODE_PRBS) - return false; + return true; - return true; + return false; } int @@ -439,14 +439,17 @@ allocate_mipi_frames(struct ia_css_pipe *pipe, return 0; /* AM TODO: Check */ } - if (!IS_ISP2401) + if (!IS_ISP2401) { port = (unsigned int)pipe->stream->config.source.port.port; - else - err = ia_css_mipi_is_source_port_valid(pipe, &port); + } else { + /* Returns true if port is valid. So, invert it */ + err = !ia_css_mipi_is_source_port_valid(pipe, &port); + } assert(port < N_CSI_PORTS); - if (port >= N_CSI_PORTS || err) { + if ((!IS_ISP2401 && port >= N_CSI_PORTS) || + (IS_ISP2401 && err)) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "allocate_mipi_frames(%p) exit: error: port is not correct (port=%d).\n", pipe, port); @@ -571,14 +574,17 @@ free_mipi_frames(struct ia_css_pipe *pipe) { return err; } - if (!IS_ISP2401) + if (!IS_ISP2401) { port = (unsigned int)pipe->stream->config.source.port.port; - else - err = ia_css_mipi_is_source_port_valid(pipe, &port); + } else { + /* Returns true if port is valid. So, invert it */ + err = !ia_css_mipi_is_source_port_valid(pipe, &port); + } assert(port < N_CSI_PORTS); - if (port >= N_CSI_PORTS || err) { + if ((!IS_ISP2401 && port >= N_CSI_PORTS) || + (IS_ISP2401 && err)) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "free_mipi_frames(%p, %d) exit: error: pipe port is not correct.\n", pipe, port); @@ -683,14 +689,17 @@ send_mipi_frames(struct ia_css_pipe *pipe) { /* TODO: AM: maybe this should be returning an error. */ } - if (!IS_ISP2401) + if (!IS_ISP2401) { port = (unsigned int)pipe->stream->config.source.port.port; - else - err = ia_css_mipi_is_source_port_valid(pipe, &port); + } else { + /* Returns true if port is valid. So, invert it */ + err = !ia_css_mipi_is_source_port_valid(pipe, &port); + } assert(port < N_CSI_PORTS); - if (port >= N_CSI_PORTS || err) { + if ((!IS_ISP2401 && port >= N_CSI_PORTS) || + (IS_ISP2401 && err)) { IA_CSS_ERROR("send_mipi_frames(%p) exit: invalid port specified (port=%d).\n", pipe, port); return err; diff --git a/drivers/staging/media/atomisp/pci/sh_css_params.c b/drivers/staging/media/atomisp/pci/sh_css_params.c index 24fc497bd4915eb2e138f839dcb4d25bfa348c02..8d6514c45eeb63ea8e5cfea0e9dd0234a9462a09 100644 --- a/drivers/staging/media/atomisp/pci/sh_css_params.c +++ b/drivers/staging/media/atomisp/pci/sh_css_params.c @@ -2437,7 +2437,7 @@ sh_css_create_isp_params(struct ia_css_stream *stream, unsigned int i; struct sh_css_ddr_address_map *ddr_ptrs; struct sh_css_ddr_address_map_size *ddr_ptrs_size; - int err = 0; + int err; size_t params_size; struct ia_css_isp_parameters *params = kvmalloc(sizeof(struct ia_css_isp_parameters), GFP_KERNEL); @@ -2482,7 +2482,11 @@ sh_css_create_isp_params(struct ia_css_stream *stream, succ &= (ddr_ptrs->macc_tbl != mmgr_NULL); *isp_params_out = params; - return err; + + if (!succ) + return -ENOMEM; + + return 0; } static bool diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c index 7749ca9a8ebbfe7a432bdf4a9b4976b0eebf4a80..bc97ec0a7e4af193a22e4fd5b0855552e79b8666 100644 --- a/drivers/staging/media/hantro/hantro_drv.c +++ b/drivers/staging/media/hantro/hantro_drv.c @@ -829,7 +829,7 @@ static int hantro_probe(struct platform_device *pdev) ret = clk_bulk_prepare(vpu->variant->num_clocks, vpu->clocks); if (ret) { dev_err(&pdev->dev, "Failed to prepare clocks\n"); - return ret; + goto err_pm_disable; } ret = v4l2_device_register(&pdev->dev, &vpu->v4l2_dev); @@ -885,6 +885,7 @@ err_v4l2_unreg: v4l2_device_unregister(&vpu->v4l2_dev); err_clk_unprepare: clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks); +err_pm_disable: pm_runtime_dont_use_autosuspend(vpu->dev); pm_runtime_disable(vpu->dev); return ret; diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h index 4cabaf21c1ca051b28296b27f1aff8322d0955cc..367db4acc78523ae8b198346a9ff626cee0ead5c 100644 --- a/drivers/staging/rtl8192e/rtllib.h +++ b/drivers/staging/rtl8192e/rtllib.h @@ -1982,7 +1982,7 @@ void rtllib_softmac_xmit(struct rtllib_txb *txb, struct rtllib_device *ieee); void rtllib_stop_send_beacons(struct rtllib_device *ieee); void notify_wx_assoc_event(struct rtllib_device *ieee); void rtllib_start_ibss(struct rtllib_device *ieee); -void rtllib_softmac_init(struct rtllib_device *ieee); +int rtllib_softmac_init(struct rtllib_device *ieee); void rtllib_softmac_free(struct rtllib_device *ieee); void rtllib_disassociate(struct rtllib_device *ieee); void rtllib_stop_scan(struct rtllib_device *ieee); diff --git a/drivers/staging/rtl8192e/rtllib_module.c b/drivers/staging/rtl8192e/rtllib_module.c index 64d9feee1f392d33fb575336289570e565caa300..f00ac94b2639b4ca467c693cfe5be87771c5813b 100644 --- a/drivers/staging/rtl8192e/rtllib_module.c +++ b/drivers/staging/rtl8192e/rtllib_module.c @@ -88,7 +88,7 @@ struct net_device *alloc_rtllib(int sizeof_priv) err = rtllib_networks_allocate(ieee); if (err) { pr_err("Unable to allocate beacon storage: %d\n", err); - goto failed; + goto free_netdev; } rtllib_networks_initialize(ieee); @@ -121,11 +121,13 @@ struct net_device *alloc_rtllib(int sizeof_priv) ieee->hwsec_active = 0; memset(ieee->swcamtable, 0, sizeof(struct sw_cam_table) * 32); - rtllib_softmac_init(ieee); + err = rtllib_softmac_init(ieee); + if (err) + goto free_crypt_info; ieee->pHTInfo = kzalloc(sizeof(struct rt_hi_throughput), GFP_KERNEL); if (!ieee->pHTInfo) - return NULL; + goto free_softmac; HTUpdateDefaultSetting(ieee); HTInitializeHTInfo(ieee); @@ -141,8 +143,14 @@ struct net_device *alloc_rtllib(int sizeof_priv) return dev; - failed: +free_softmac: + rtllib_softmac_free(ieee); +free_crypt_info: + lib80211_crypt_info_free(&ieee->crypt_info); + rtllib_networks_free(ieee); +free_netdev: free_netdev(dev); + return NULL; } EXPORT_SYMBOL(alloc_rtllib); diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c index 2c752ba5a802a275fe36e1be366eb9974ac86a18..e8e72f79ca00790671ef024b59ab359f1db6620b 100644 --- a/drivers/staging/rtl8192e/rtllib_softmac.c +++ b/drivers/staging/rtl8192e/rtllib_softmac.c @@ -2953,7 +2953,7 @@ void rtllib_start_protocol(struct rtllib_device *ieee) } } -void rtllib_softmac_init(struct rtllib_device *ieee) +int rtllib_softmac_init(struct rtllib_device *ieee) { int i; @@ -2964,7 +2964,8 @@ void rtllib_softmac_init(struct rtllib_device *ieee) ieee->seq_ctrl[i] = 0; ieee->dot11d_info = kzalloc(sizeof(struct rt_dot11d_info), GFP_ATOMIC); if (!ieee->dot11d_info) - netdev_err(ieee->dev, "Can't alloc memory for DOT11D\n"); + return -ENOMEM; + ieee->LinkDetectInfo.SlotIndex = 0; ieee->LinkDetectInfo.SlotNum = 2; ieee->LinkDetectInfo.NumRecvBcnInPeriod = 0; @@ -3030,6 +3031,7 @@ void rtllib_softmac_init(struct rtllib_device *ieee) tasklet_setup(&ieee->ps_task, rtllib_sta_ps); + return 0; } void rtllib_softmac_free(struct rtllib_device *ieee) diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c index 8075f60fd02c337a5fcb0efc8ad74a066737cfb3..2d5cf1714ae05de28eb17ba66db9b5be12338e98 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.c +++ b/drivers/target/iscsi/iscsi_target_tpg.c @@ -443,6 +443,9 @@ static bool iscsit_tpg_check_network_portal( break; } spin_unlock(&tpg->tpg_np_lock); + + if (match) + break; } spin_unlock(&tiqn->tiqn_tpg_lock); diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c index f255a96ae5a48ca6323b0d932348cdb86fe6379f..6ea80add7378fce0de69adecad0b611d38ce0247 100644 --- a/drivers/tee/optee/core.c +++ b/drivers/tee/optee/core.c @@ -588,6 +588,7 @@ static int optee_remove(struct platform_device *pdev) /* Unregister OP-TEE specific client devices on TEE bus */ optee_unregister_devices(); + teedev_close_context(optee->ctx); /* * Ask OP-TEE to free all cached shared memory objects to decrease * reference counters and also avoid wild pointers in secure world @@ -633,6 +634,7 @@ static int optee_probe(struct platform_device *pdev) struct optee *optee = NULL; void *memremaped_shm = NULL; struct tee_device *teedev; + struct tee_context *ctx; u32 sec_caps; int rc; @@ -719,6 +721,12 @@ static int optee_probe(struct platform_device *pdev) optee_supp_init(&optee->supp); optee->memremaped_shm = memremaped_shm; optee->pool = pool; + ctx = teedev_open(optee->teedev); + if (IS_ERR(ctx)) { + rc = PTR_ERR(ctx); + goto err; + } + optee->ctx = ctx; /* * Ensure that there are no pre-existing shm objects before enabling diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h index f6bb4a763ba94e80f0463cc42957702ef47f1b93..ea09533e30cdef5b214043e801241618084cc38f 100644 --- a/drivers/tee/optee/optee_private.h +++ b/drivers/tee/optee/optee_private.h @@ -70,6 +70,7 @@ struct optee_supp { * struct optee - main service struct * @supp_teedev: supplicant device * @teedev: client device + * @ctx: driver internal TEE context * @invoke_fn: function to issue smc or hvc * @call_queue: queue of threads waiting to call @invoke_fn * @wait_queue: queue of threads from secure world waiting for a @@ -87,6 +88,7 @@ struct optee { struct tee_device *supp_teedev; struct tee_device *teedev; optee_invoke_fn *invoke_fn; + struct tee_context *ctx; struct optee_call_queue call_queue; struct optee_wait_queue wait_queue; struct optee_supp supp; diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c index 9dbdd783d6f2d3b15c4bff071bc153f147ebcd58..f1e0332b0f6e8609d024f535fffb19252b8a2777 100644 --- a/drivers/tee/optee/rpc.c +++ b/drivers/tee/optee/rpc.c @@ -284,6 +284,7 @@ static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz) } static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx, + struct optee *optee, struct optee_msg_arg *arg, struct optee_call_ctx *call_ctx) { @@ -313,7 +314,8 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx, shm = cmd_alloc_suppl(ctx, sz); break; case OPTEE_MSG_RPC_SHM_TYPE_KERNEL: - shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV); + shm = tee_shm_alloc(optee->ctx, sz, + TEE_SHM_MAPPED | TEE_SHM_PRIV); break; default: arg->ret = TEEC_ERROR_BAD_PARAMETERS; @@ -470,7 +472,7 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee, break; case OPTEE_MSG_RPC_CMD_SHM_ALLOC: free_pages_list(call_ctx); - handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx); + handle_rpc_func_cmd_shm_alloc(ctx, optee, arg, call_ctx); break; case OPTEE_MSG_RPC_CMD_SHM_FREE: handle_rpc_func_cmd_shm_free(ctx, arg); @@ -501,7 +503,7 @@ void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param, switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) { case OPTEE_SMC_RPC_FUNC_ALLOC: - shm = tee_shm_alloc(ctx, param->a1, + shm = tee_shm_alloc(optee->ctx, param->a1, TEE_SHM_MAPPED | TEE_SHM_PRIV); if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) { reg_pair_from_64(¶m->a1, ¶m->a2, pa); diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c index 6ade4a5c4840789829fbd28b7825aad3742d9dc5..e07f997cf8dd3b5132be4d20a21245e092301376 100644 --- a/drivers/tee/tee_core.c +++ b/drivers/tee/tee_core.c @@ -43,7 +43,7 @@ static DEFINE_SPINLOCK(driver_lock); static struct class *tee_class; static dev_t tee_devt; -static struct tee_context *teedev_open(struct tee_device *teedev) +struct tee_context *teedev_open(struct tee_device *teedev) { int rc; struct tee_context *ctx; @@ -70,6 +70,7 @@ err: return ERR_PTR(rc); } +EXPORT_SYMBOL_GPL(teedev_open); void teedev_ctx_get(struct tee_context *ctx) { @@ -96,11 +97,14 @@ void teedev_ctx_put(struct tee_context *ctx) kref_put(&ctx->refcount, teedev_ctx_release); } -static void teedev_close_context(struct tee_context *ctx) +void teedev_close_context(struct tee_context *ctx) { - tee_device_put(ctx->teedev); + struct tee_device *teedev = ctx->teedev; + teedev_ctx_put(ctx); + tee_device_put(teedev); } +EXPORT_SYMBOL_GPL(teedev_close_context); static int tee_open(struct inode *inode, struct file *filp) { diff --git a/drivers/thermal/imx8mm_thermal.c b/drivers/thermal/imx8mm_thermal.c index a1e4f9bb4cb01e7eb17538d1597759c8ec1ae557..0f4cabd2a8c6247f6ccd152cbec2f306d13efcb7 100644 --- a/drivers/thermal/imx8mm_thermal.c +++ b/drivers/thermal/imx8mm_thermal.c @@ -21,6 +21,7 @@ #define TPS 0x4 #define TRITSR 0x20 /* TMU immediate temp */ +#define TER_ADC_PD BIT(30) #define TER_EN BIT(31) #define TRITSR_TEMP0_VAL_MASK 0xff #define TRITSR_TEMP1_VAL_MASK 0xff0000 @@ -113,6 +114,8 @@ static void imx8mm_tmu_enable(struct imx8mm_tmu *tmu, bool enable) val = readl_relaxed(tmu->base + TER); val = enable ? (val | TER_EN) : (val & ~TER_EN); + if (tmu->socdata->version == TMU_VER2) + val = enable ? (val & ~TER_ADC_PD) : (val | TER_ADC_PD); writel_relaxed(val, tmu->base + TER); } diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c index 2c7473d86a59b013b43872777dfa5d7011152e6c..16663373b6829999971f5210f27193131fbf3a37 100644 --- a/drivers/thermal/imx_thermal.c +++ b/drivers/thermal/imx_thermal.c @@ -15,6 +15,7 @@ #include #include #include +#include #define REG_SET 0x4 #define REG_CLR 0x8 @@ -194,6 +195,7 @@ static struct thermal_soc_data thermal_imx7d_data = { }; struct imx_thermal_data { + struct device *dev; struct cpufreq_policy *policy; struct thermal_zone_device *tz; struct thermal_cooling_device *cdev; @@ -252,44 +254,15 @@ static int imx_get_temp(struct thermal_zone_device *tz, int *temp) const struct thermal_soc_data *soc_data = data->socdata; struct regmap *map = data->tempmon; unsigned int n_meas; - bool wait, run_measurement; u32 val; + int ret; - run_measurement = !data->irq_enabled; - if (!run_measurement) { - /* Check if a measurement is currently in progress */ - regmap_read(map, soc_data->temp_data, &val); - wait = !(val & soc_data->temp_valid_mask); - } else { - /* - * Every time we measure the temperature, we will power on the - * temperature sensor, enable measurements, take a reading, - * disable measurements, power off the temperature sensor. - */ - regmap_write(map, soc_data->sensor_ctrl + REG_CLR, - soc_data->power_down_mask); - regmap_write(map, soc_data->sensor_ctrl + REG_SET, - soc_data->measure_temp_mask); - - wait = true; - } - - /* - * According to the temp sensor designers, it may require up to ~17us - * to complete a measurement. - */ - if (wait) - usleep_range(20, 50); + ret = pm_runtime_resume_and_get(data->dev); + if (ret < 0) + return ret; regmap_read(map, soc_data->temp_data, &val); - if (run_measurement) { - regmap_write(map, soc_data->sensor_ctrl + REG_CLR, - soc_data->measure_temp_mask); - regmap_write(map, soc_data->sensor_ctrl + REG_SET, - soc_data->power_down_mask); - } - if ((val & soc_data->temp_valid_mask) == 0) { dev_dbg(&tz->device, "temp measurement never finished\n"); return -EAGAIN; @@ -328,6 +301,8 @@ static int imx_get_temp(struct thermal_zone_device *tz, int *temp) enable_irq(data->irq); } + pm_runtime_put(data->dev); + return 0; } @@ -335,24 +310,16 @@ static int imx_change_mode(struct thermal_zone_device *tz, enum thermal_device_mode mode) { struct imx_thermal_data *data = tz->devdata; - struct regmap *map = data->tempmon; - const struct thermal_soc_data *soc_data = data->socdata; if (mode == THERMAL_DEVICE_ENABLED) { - regmap_write(map, soc_data->sensor_ctrl + REG_CLR, - soc_data->power_down_mask); - regmap_write(map, soc_data->sensor_ctrl + REG_SET, - soc_data->measure_temp_mask); + pm_runtime_get(data->dev); if (!data->irq_enabled) { data->irq_enabled = true; enable_irq(data->irq); } } else { - regmap_write(map, soc_data->sensor_ctrl + REG_CLR, - soc_data->measure_temp_mask); - regmap_write(map, soc_data->sensor_ctrl + REG_SET, - soc_data->power_down_mask); + pm_runtime_put(data->dev); if (data->irq_enabled) { disable_irq(data->irq); @@ -393,6 +360,11 @@ static int imx_set_trip_temp(struct thermal_zone_device *tz, int trip, int temp) { struct imx_thermal_data *data = tz->devdata; + int ret; + + ret = pm_runtime_resume_and_get(data->dev); + if (ret < 0) + return ret; /* do not allow changing critical threshold */ if (trip == IMX_TRIP_CRITICAL) @@ -406,6 +378,8 @@ static int imx_set_trip_temp(struct thermal_zone_device *tz, int trip, imx_set_alarm_temp(data, temp); + pm_runtime_put(data->dev); + return 0; } @@ -681,6 +655,8 @@ static int imx_thermal_probe(struct platform_device *pdev) if (!data) return -ENOMEM; + data->dev = &pdev->dev; + map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "fsl,tempmon"); if (IS_ERR(map)) { ret = PTR_ERR(map); @@ -800,6 +776,16 @@ static int imx_thermal_probe(struct platform_device *pdev) data->socdata->power_down_mask); regmap_write(map, data->socdata->sensor_ctrl + REG_SET, data->socdata->measure_temp_mask); + /* After power up, we need a delay before first access can be done. */ + usleep_range(20, 50); + + /* the core was configured and enabled just before */ + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(data->dev); + + ret = pm_runtime_resume_and_get(data->dev); + if (ret < 0) + goto disable_runtime_pm; data->irq_enabled = true; ret = thermal_zone_device_enable(data->tz); @@ -814,10 +800,15 @@ static int imx_thermal_probe(struct platform_device *pdev) goto thermal_zone_unregister; } + pm_runtime_put(data->dev); + return 0; thermal_zone_unregister: thermal_zone_device_unregister(data->tz); +disable_runtime_pm: + pm_runtime_put_noidle(data->dev); + pm_runtime_disable(data->dev); clk_disable: clk_disable_unprepare(data->thermal_clk); legacy_cleanup: @@ -829,13 +820,9 @@ legacy_cleanup: static int imx_thermal_remove(struct platform_device *pdev) { struct imx_thermal_data *data = platform_get_drvdata(pdev); - struct regmap *map = data->tempmon; - /* Disable measurements */ - regmap_write(map, data->socdata->sensor_ctrl + REG_SET, - data->socdata->power_down_mask); - if (!IS_ERR(data->thermal_clk)) - clk_disable_unprepare(data->thermal_clk); + pm_runtime_put_noidle(data->dev); + pm_runtime_disable(data->dev); thermal_zone_device_unregister(data->tz); imx_thermal_unregister_legacy_cooling(data); @@ -858,29 +845,79 @@ static int __maybe_unused imx_thermal_suspend(struct device *dev) ret = thermal_zone_device_disable(data->tz); if (ret) return ret; + + return pm_runtime_force_suspend(data->dev); +} + +static int __maybe_unused imx_thermal_resume(struct device *dev) +{ + struct imx_thermal_data *data = dev_get_drvdata(dev); + int ret; + + ret = pm_runtime_force_resume(data->dev); + if (ret) + return ret; + /* Enabled thermal sensor after resume */ + return thermal_zone_device_enable(data->tz); +} + +static int __maybe_unused imx_thermal_runtime_suspend(struct device *dev) +{ + struct imx_thermal_data *data = dev_get_drvdata(dev); + const struct thermal_soc_data *socdata = data->socdata; + struct regmap *map = data->tempmon; + int ret; + + ret = regmap_write(map, socdata->sensor_ctrl + REG_CLR, + socdata->measure_temp_mask); + if (ret) + return ret; + + ret = regmap_write(map, socdata->sensor_ctrl + REG_SET, + socdata->power_down_mask); + if (ret) + return ret; + clk_disable_unprepare(data->thermal_clk); return 0; } -static int __maybe_unused imx_thermal_resume(struct device *dev) +static int __maybe_unused imx_thermal_runtime_resume(struct device *dev) { struct imx_thermal_data *data = dev_get_drvdata(dev); + const struct thermal_soc_data *socdata = data->socdata; + struct regmap *map = data->tempmon; int ret; ret = clk_prepare_enable(data->thermal_clk); if (ret) return ret; - /* Enabled thermal sensor after resume */ - ret = thermal_zone_device_enable(data->tz); + + ret = regmap_write(map, socdata->sensor_ctrl + REG_CLR, + socdata->power_down_mask); + if (ret) + return ret; + + ret = regmap_write(map, socdata->sensor_ctrl + REG_SET, + socdata->measure_temp_mask); if (ret) return ret; + /* + * According to the temp sensor designers, it may require up to ~17us + * to complete a measurement. + */ + usleep_range(20, 50); + return 0; } -static SIMPLE_DEV_PM_OPS(imx_thermal_pm_ops, - imx_thermal_suspend, imx_thermal_resume); +static const struct dev_pm_ops imx_thermal_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(imx_thermal_suspend, imx_thermal_resume) + SET_RUNTIME_PM_OPS(imx_thermal_runtime_suspend, + imx_thermal_runtime_resume, NULL) +}; static struct platform_driver imx_thermal = { .driver = { diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c index 0966551cbaaa0aad9259c39ee6a91b745a7af406..793d7b58fc6501b80b77bb5b42ee6439f9955e69 100644 --- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c @@ -402,6 +402,10 @@ static void int3400_notify(acpi_handle handle, thermal_prop[3] = kasprintf(GFP_KERNEL, "EVENT=%d", therm_event); thermal_prop[4] = NULL; kobject_uevent_env(&priv->thermal->device.kobj, KOBJ_CHANGE, thermal_prop); + kfree(thermal_prop[0]); + kfree(thermal_prop[1]); + kfree(thermal_prop[2]); + kfree(thermal_prop[3]); } static int int3400_thermal_get_temp(struct thermal_zone_device *thermal, diff --git a/drivers/thunderbolt/acpi.c b/drivers/thunderbolt/acpi.c index b5442f979b4d0170950d9ded3d3dbe9e3066d6da..6355fdf7d71a3eef91b5422401bbe176bfb8e4e0 100644 --- a/drivers/thunderbolt/acpi.c +++ b/drivers/thunderbolt/acpi.c @@ -7,6 +7,7 @@ */ #include +#include #include "tb.h" @@ -74,8 +75,18 @@ static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data, pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM))) { const struct device_link *link; + /* + * Make them both active first to make sure the NHI does + * not runtime suspend before the consumer. The + * pm_runtime_put() below then allows the consumer to + * runtime suspend again (which then allows NHI runtime + * suspend too now that the device link is established). + */ + pm_runtime_get_sync(&pdev->dev); + link = device_link_add(&pdev->dev, &nhi->pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER | + DL_FLAG_RPM_ACTIVE | DL_FLAG_PM_RUNTIME); if (link) { dev_dbg(&nhi->pdev->dev, "created link from %s\n", @@ -84,6 +95,8 @@ static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data, dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n", dev_name(&pdev->dev)); } + + pm_runtime_put(&pdev->dev); } out_put: diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index d76880ae68c83be740ee77277955ed43986df9af..05562b3cca451a9bf5549a9edb6c3e9530653797 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -317,6 +317,7 @@ static struct tty_driver *gsm_tty_driver; #define GSM1_ESCAPE_BITS 0x20 #define XON 0x11 #define XOFF 0x13 +#define ISO_IEC_646_MASK 0x7F static const struct tty_port_operations gsm_port_ops; @@ -433,7 +434,7 @@ static u8 gsm_encode_modem(const struct gsm_dlci *dlci) modembits |= MDM_RTR; if (dlci->modem_tx & TIOCM_RI) modembits |= MDM_IC; - if (dlci->modem_tx & TIOCM_CD) + if (dlci->modem_tx & TIOCM_CD || dlci->gsm->initiator) modembits |= MDM_DV; return modembits; } @@ -526,7 +527,8 @@ static int gsm_stuff_frame(const u8 *input, u8 *output, int len) int olen = 0; while (len--) { if (*input == GSM1_SOF || *input == GSM1_ESCAPE - || *input == XON || *input == XOFF) { + || (*input & ISO_IEC_646_MASK) == XON + || (*input & ISO_IEC_646_MASK) == XOFF) { *output++ = GSM1_ESCAPE; *output++ = *input++ ^ GSM1_ESCAPE_BITS; olen++; @@ -1424,6 +1426,9 @@ static void gsm_dlci_close(struct gsm_dlci *dlci) if (dlci->addr != 0) { tty_port_tty_hangup(&dlci->port, false); kfifo_reset(&dlci->fifo); + /* Ensure that gsmtty_open() can return. */ + tty_port_set_initialized(&dlci->port, 0); + wake_up_interruptible(&dlci->port.open_wait); } else dlci->gsm->dead = true; wake_up(&dlci->gsm->event); @@ -1483,7 +1488,7 @@ static void gsm_dlci_t1(struct timer_list *t) dlci->mode = DLCI_MODE_ADM; gsm_dlci_open(dlci); } else { - gsm_dlci_close(dlci); + gsm_dlci_begin_close(dlci); /* prevent half open link */ } break; @@ -1717,7 +1722,12 @@ static void gsm_dlci_release(struct gsm_dlci *dlci) gsm_destroy_network(dlci); mutex_unlock(&dlci->mutex); - tty_hangup(tty); + /* We cannot use tty_hangup() because in tty_kref_put() the tty + * driver assumes that the hangup queue is free and reuses it to + * queue release_one_tty() -> NULL pointer panic in + * process_one_work(). + */ + tty_vhangup(tty); tty_port_tty_set(&dlci->port, NULL); tty_kref_put(tty); @@ -3171,9 +3181,9 @@ static void gsmtty_throttle(struct tty_struct *tty) if (dlci->state == DLCI_CLOSED) return; if (C_CRTSCTS(tty)) - dlci->modem_tx &= ~TIOCM_DTR; + dlci->modem_tx &= ~TIOCM_RTS; dlci->throttled = true; - /* Send an MSC with DTR cleared */ + /* Send an MSC with RTS cleared */ gsmtty_modem_update(dlci, 0); } @@ -3183,9 +3193,9 @@ static void gsmtty_unthrottle(struct tty_struct *tty) if (dlci->state == DLCI_CLOSED) return; if (C_CRTSCTS(tty)) - dlci->modem_tx |= TIOCM_DTR; + dlci->modem_tx |= TIOCM_RTS; dlci->throttled = false; - /* Send an MSC with DTR set */ + /* Send an MSC with RTS set */ gsmtty_modem_update(dlci, 0); } diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index e4f4b2186bcecce1467d37c1221ac1353610a882..58190135efb7dd3088f9895cdceb1e01a646d7c8 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -1372,7 +1372,7 @@ handle_newline: put_tty_queue(c, ldata); smp_store_release(&ldata->canon_head, ldata->read_head); kill_fasync(&tty->fasync, SIGIO, POLL_IN); - wake_up_interruptible_poll(&tty->read_wait, EPOLLIN); + wake_up_interruptible_poll(&tty->read_wait, EPOLLIN | EPOLLRDNORM); return 0; } } @@ -1653,7 +1653,7 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp, if (read_cnt(ldata)) { kill_fasync(&tty->fasync, SIGIO, POLL_IN); - wake_up_interruptible_poll(&tty->read_wait, EPOLLIN); + wake_up_interruptible_poll(&tty->read_wait, EPOLLIN | EPOLLRDNORM); } } @@ -2024,7 +2024,7 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty, return false; canon_head = smp_load_acquire(&ldata->canon_head); - n = min(*nr + 1, canon_head - ldata->read_tail); + n = min(*nr, canon_head - ldata->read_tail); tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1); size = min_t(size_t, tail + n, N_TTY_BUF_SIZE); @@ -2046,10 +2046,8 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty, n += N_TTY_BUF_SIZE; c = n + found; - if (!found || read_buf(ldata, eol) != __DISABLED_CHAR) { - c = min(*nr, c); + if (!found || read_buf(ldata, eol) != __DISABLED_CHAR) n = c; - } n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu tail:%zu more:%zu\n", __func__, eol, found, n, c, tail, more); diff --git a/drivers/tty/serial/8250/8250_gsc.c b/drivers/tty/serial/8250/8250_gsc.c index 673cda3d011d0c64c0c71a19dfa3052447d21c4f..948d0a1c6ae8edc91eefaadb5037f4a311d12509 100644 --- a/drivers/tty/serial/8250/8250_gsc.c +++ b/drivers/tty/serial/8250/8250_gsc.c @@ -26,7 +26,7 @@ static int __init serial_init_chip(struct parisc_device *dev) unsigned long address; int err; -#ifdef CONFIG_64BIT +#if defined(CONFIG_64BIT) && defined(CONFIG_IOSAPIC) if (!dev->irq && (dev->id.sversion == 0xad)) dev->irq = iosapic_serial_irq(dev); #endif diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c index 65e9045dafe6d16a38260a4ee4dc23dc1026ab28..5595c63c46eaf33bc4fd4d0ef0360a8f0b3b6ea7 100644 --- a/drivers/tty/serial/8250/8250_of.c +++ b/drivers/tty/serial/8250/8250_of.c @@ -83,8 +83,17 @@ static int of_platform_serial_setup(struct platform_device *ofdev, port->mapsize = resource_size(&resource); /* Check for shifted address mapping */ - if (of_property_read_u32(np, "reg-offset", &prop) == 0) + if (of_property_read_u32(np, "reg-offset", &prop) == 0) { + if (prop >= port->mapsize) { + dev_warn(&ofdev->dev, "reg-offset %u exceeds region size %pa\n", + prop, &port->mapsize); + ret = -EINVAL; + goto err_unprepare; + } + port->mapbase += prop; + port->mapsize -= prop; + } port->iotype = UPIO_MEM; if (of_property_read_u32(np, "reg-io-width", &prop) == 0) { diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 019328d644d8b5852b05bb9c8a6c0efc204cf6e3..3a985e953b8e904f69da97f163af86010e85c544 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -5171,8 +5171,30 @@ static const struct pci_device_id serial_pci_tbl[] = { { PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */ pbn_b2_4_115200 }, + /* Brainboxes Devices */ /* - * BrainBoxes UC-260 + * Brainboxes UC-101 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0BA1, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + /* + * Brainboxes UC-235/246 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0AA1, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_1_115200 }, + /* + * Brainboxes UC-257 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0861, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + /* + * Brainboxes UC-260/271/701/756 */ { PCI_VENDOR_ID_INTASHIELD, 0x0D21, PCI_ANY_ID, PCI_ANY_ID, @@ -5180,7 +5202,81 @@ static const struct pci_device_id serial_pci_tbl[] = { pbn_b2_4_115200 }, { PCI_VENDOR_ID_INTASHIELD, 0x0E34, PCI_ANY_ID, PCI_ANY_ID, - PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00, + PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00, + pbn_b2_4_115200 }, + /* + * Brainboxes UC-268 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0841, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_4_115200 }, + /* + * Brainboxes UC-275/279 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0881, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_8_115200 }, + /* + * Brainboxes UC-302 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x08E1, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + /* + * Brainboxes UC-310 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x08C1, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + /* + * Brainboxes UC-313 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x08A3, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + /* + * Brainboxes UC-320/324 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0A61, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_1_115200 }, + /* + * Brainboxes UC-346 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0B02, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_4_115200 }, + /* + * Brainboxes UC-357 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0A81, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0A83, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + /* + * Brainboxes UC-368 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0C41, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_4_115200 }, + /* + * Brainboxes UC-420/431 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0921, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, pbn_b2_4_115200 }, /* * Perle PCI-RAS cards diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c index 3284f34e9dfe145036d60a16ba63351eb84ed15f..75d61e038a77580ecaceebb529b00ba915f98465 100644 --- a/drivers/tty/serial/amba-pl010.c +++ b/drivers/tty/serial/amba-pl010.c @@ -448,14 +448,11 @@ pl010_set_termios(struct uart_port *port, struct ktermios *termios, if ((termios->c_cflag & CREAD) == 0) uap->port.ignore_status_mask |= UART_DUMMY_RSR_RX; - /* first, disable everything */ old_cr = readb(uap->port.membase + UART010_CR) & ~UART010_CR_MSIE; if (UART_ENABLE_MS(port, termios->c_cflag)) old_cr |= UART010_CR_MSIE; - writel(0, uap->port.membase + UART010_CR); - /* Set baud rate */ quot -= 1; writel((quot & 0xf00) >> 8, uap->port.membase + UART010_LCRM); diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 4cf696fc41765a094ab0f941a1e88a7ad0966053..8e451958288be6b007e6140218aa42e5684acbee 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -2136,32 +2136,13 @@ static const char *pl011_type(struct uart_port *port) return uap->port.type == PORT_AMBA ? uap->type : NULL; } -/* - * Release the memory region(s) being used by 'port' - */ -static void pl011_release_port(struct uart_port *port) -{ - release_mem_region(port->mapbase, SZ_4K); -} - -/* - * Request the memory region(s) being used by 'port' - */ -static int pl011_request_port(struct uart_port *port) -{ - return request_mem_region(port->mapbase, SZ_4K, "uart-pl011") - != NULL ? 0 : -EBUSY; -} - /* * Configure/autoconfigure the port. */ static void pl011_config_port(struct uart_port *port, int flags) { - if (flags & UART_CONFIG_TYPE) { + if (flags & UART_CONFIG_TYPE) port->type = PORT_AMBA; - pl011_request_port(port); - } } /* @@ -2176,6 +2157,8 @@ static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser) ret = -EINVAL; if (ser->baud_base < 9600) ret = -EINVAL; + if (port->mapbase != (unsigned long) ser->iomem_base) + ret = -EINVAL; return ret; } @@ -2195,8 +2178,6 @@ static const struct uart_ops amba_pl011_pops = { .flush_buffer = pl011_dma_flush_buffer, .set_termios = pl011_set_termios, .type = pl011_type, - .release_port = pl011_release_port, - .request_port = pl011_request_port, .config_port = pl011_config_port, .verify_port = pl011_verify_port, #ifdef CONFIG_CONSOLE_POLL @@ -2226,8 +2207,6 @@ static const struct uart_ops sbsa_uart_pops = { .shutdown = sbsa_uart_shutdown, .set_termios = sbsa_uart_set_termios, .type = pl011_type, - .release_port = pl011_release_port, - .request_port = pl011_request_port, .config_port = pl011_config_port, .verify_port = pl011_verify_port, #ifdef CONFIG_CONSOLE_POLL diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index a24e5c2b30bc956ab1f828b8fb702125e3259cd5..602065bfc9bb8766e9c16038dd10fabc90f5ae89 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -1004,6 +1004,13 @@ static void atmel_tx_dma(struct uart_port *port) desc->callback = atmel_complete_tx_dma; desc->callback_param = atmel_port; atmel_port->cookie_tx = dmaengine_submit(desc); + if (dma_submit_error(atmel_port->cookie_tx)) { + dev_err(port->dev, "dma_submit_error %d\n", + atmel_port->cookie_tx); + return; + } + + dma_async_issue_pending(chan); } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) @@ -1264,6 +1271,13 @@ static int atmel_prepare_rx_dma(struct uart_port *port) desc->callback_param = port; atmel_port->desc_rx = desc; atmel_port->cookie_rx = dmaengine_submit(desc); + if (dma_submit_error(atmel_port->cookie_rx)) { + dev_err(port->dev, "dma_submit_error %d\n", + atmel_port->cookie_rx); + goto chan_err; + } + + dma_async_issue_pending(atmel_port->chan_rx); return 0; diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 28cc328ddb6ebb3dd391c44349fb75f610ff7cc4..93cd8ad57f385c595e0d842b4f003e3cf9c55d87 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -508,18 +508,21 @@ static void imx_uart_stop_tx(struct uart_port *port) static void imx_uart_stop_rx(struct uart_port *port) { struct imx_port *sport = (struct imx_port *)port; - u32 ucr1, ucr2; + u32 ucr1, ucr2, ucr4; ucr1 = imx_uart_readl(sport, UCR1); ucr2 = imx_uart_readl(sport, UCR2); + ucr4 = imx_uart_readl(sport, UCR4); if (sport->dma_is_enabled) { ucr1 &= ~(UCR1_RXDMAEN | UCR1_ATDMAEN); } else { ucr1 &= ~UCR1_RRDYEN; ucr2 &= ~UCR2_ATEN; + ucr4 &= ~UCR4_OREN; } imx_uart_writel(sport, ucr1, UCR1); + imx_uart_writel(sport, ucr4, UCR4); ucr2 &= ~UCR2_RXEN; imx_uart_writel(sport, ucr2, UCR2); @@ -1576,7 +1579,7 @@ static void imx_uart_shutdown(struct uart_port *port) imx_uart_writel(sport, ucr1, UCR1); ucr4 = imx_uart_readl(sport, UCR4); - ucr4 &= ~(UCR4_OREN | UCR4_TCEN); + ucr4 &= ~UCR4_TCEN; imx_uart_writel(sport, ucr4, UCR4); spin_unlock_irqrestore(&sport->port.lock, flags); diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index 8e3b83fac33cc8e8b376c1b18a67eaf373d32382..5fcac9eba277475a970a74378aab64b5157c499c 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -737,12 +737,15 @@ static irqreturn_t sc16is7xx_irq(int irq, void *dev_id) static void sc16is7xx_tx_proc(struct kthread_work *ws) { struct uart_port *port = &(to_sc16is7xx_one(ws, tx_work)->port); + struct sc16is7xx_port *s = dev_get_drvdata(port->dev); if ((port->rs485.flags & SER_RS485_ENABLED) && (port->rs485.delay_rts_before_send > 0)) msleep(port->rs485.delay_rts_before_send); + mutex_lock(&s->efr_lock); sc16is7xx_handle_tx(port); + mutex_unlock(&s->efr_lock); } static void sc16is7xx_reconf_rs485(struct uart_port *port) diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 046bedca7b8f550f79bca2010bc6768aae6186d6..be0d9922e320e04a0c609cbece0e66a8b698b1a0 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -162,7 +162,7 @@ static void uart_port_dtr_rts(struct uart_port *uport, int raise) int RTS_after_send = !!(uport->rs485.flags & SER_RS485_RTS_AFTER_SEND); if (raise) { - if (rs485_on && !RTS_after_send) { + if (rs485_on && RTS_after_send) { uart_set_mctrl(uport, TIOCM_DTR); uart_clear_mctrl(uport, TIOCM_RTS); } else { @@ -171,7 +171,7 @@ static void uart_port_dtr_rts(struct uart_port *uport, int raise) } else { unsigned int clear = TIOCM_DTR; - clear |= (!rs485_on || !RTS_after_send) ? TIOCM_RTS : 0; + clear |= (!rs485_on || RTS_after_send) ? TIOCM_RTS : 0; uart_clear_mctrl(uport, clear); } } @@ -2414,7 +2414,8 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state, * We probably don't need a spinlock around this, but */ spin_lock_irqsave(&port->lock, flags); - port->ops->set_mctrl(port, port->mctrl & TIOCM_DTR); + port->mctrl &= TIOCM_DTR; + port->ops->set_mctrl(port, port->mctrl); spin_unlock_irqrestore(&port->lock, flags); /* diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c index 844059861f9e18057001610d9b16df591189ac52..0eadf0547175c5150fc3c730cd4b7638f5ad420a 100644 --- a/drivers/tty/serial/stm32-usart.c +++ b/drivers/tty/serial/stm32-usart.c @@ -574,7 +574,7 @@ static void stm32_usart_start_tx(struct uart_port *port) struct serial_rs485 *rs485conf = &port->rs485; struct circ_buf *xmit = &port->state->xmit; - if (uart_circ_empty(xmit)) + if (uart_circ_empty(xmit) && !port->x_char) return; if (rs485conf->flags & SER_RS485_ENABLED) { diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c index 7081ab322b4028e1284d11391c88741f30470650..48923cd8c07d14d9de69e1ff88476423ea0607f8 100644 --- a/drivers/tty/serial/uartlite.c +++ b/drivers/tty/serial/uartlite.c @@ -615,7 +615,7 @@ static struct uart_driver ulite_uart_driver = { * * Returns: 0 on success, <0 otherwise */ -static int ulite_assign(struct device *dev, int id, u32 base, int irq, +static int ulite_assign(struct device *dev, int id, phys_addr_t base, int irq, struct uartlite_data *pdata) { struct uart_port *port; diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c index 90e4fcd3dc39a932888fa18497b184c2fdc6c0d1..a9c6ea8986af08dcc004011321b67135df86cd62 100644 --- a/drivers/tty/vt/vt_ioctl.c +++ b/drivers/tty/vt/vt_ioctl.c @@ -699,8 +699,8 @@ static int vt_setactivate(struct vt_setactivate __user *sa) if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES) return -ENXIO; - vsa.console = array_index_nospec(vsa.console, MAX_NR_CONSOLES + 1); vsa.console--; + vsa.console = array_index_nospec(vsa.console, MAX_NR_CONSOLES); console_lock(); ret = vc_allocate(vsa.console); if (ret) { @@ -945,6 +945,7 @@ int vt_ioctl(struct tty_struct *tty, return -ENXIO; arg--; + arg = array_index_nospec(arg, MAX_NR_CONSOLES); console_lock(); ret = vc_allocate(arg); console_unlock(); diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c index a18d7c4222ddfedb62c3e1605190239601c1a1d5..3c705f1bead8c17acec460896f48f6cf13011274 100644 --- a/drivers/usb/common/ulpi.c +++ b/drivers/usb/common/ulpi.c @@ -39,8 +39,11 @@ static int ulpi_match(struct device *dev, struct device_driver *driver) struct ulpi *ulpi = to_ulpi_dev(dev); const struct ulpi_device_id *id; - /* Some ULPI devices don't have a vendor id so rely on OF match */ - if (ulpi->id.vendor == 0) + /* + * Some ULPI devices don't have a vendor id + * or provide an id_table so rely on OF match. + */ + if (ulpi->id.vendor == 0 || !drv->id_table) return of_driver_match_device(dev, driver); for (id = drv->id_table; id->vendor; id++) @@ -129,6 +132,7 @@ static const struct attribute_group *ulpi_dev_attr_groups[] = { static void ulpi_dev_release(struct device *dev) { + of_node_put(dev->of_node); kfree(to_ulpi_dev(dev)); } @@ -246,12 +250,16 @@ static int ulpi_register(struct device *dev, struct ulpi *ulpi) return ret; ret = ulpi_read_id(ulpi); - if (ret) + if (ret) { + of_node_put(ulpi->dev.of_node); return ret; + } ret = device_register(&ulpi->dev); - if (ret) + if (ret) { + put_device(&ulpi->dev); return ret; + } dev_dbg(&ulpi->dev, "registered ULPI PHY: vendor %04x, product %04x\n", ulpi->id.vendor, ulpi->id.product); @@ -298,7 +306,6 @@ EXPORT_SYMBOL_GPL(ulpi_register_interface); */ void ulpi_unregister_interface(struct ulpi *ulpi) { - of_node_put(ulpi->dev.of_node); device_unregister(&ulpi->dev); } EXPORT_SYMBOL_GPL(ulpi_unregister_interface); diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index f161c603278c919bc9d631991eda9336e52d0a45..92357f32d2c17f912bbb3614085bd4a3231b8539 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1562,6 +1562,13 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags) urb->hcpriv = NULL; INIT_LIST_HEAD(&urb->urb_list); atomic_dec(&urb->use_count); + /* + * Order the write of urb->use_count above before the read + * of urb->reject below. Pairs with the memory barriers in + * usb_kill_urb() and usb_poison_urb(). + */ + smp_mb__after_atomic(); + atomic_dec(&urb->dev->urbnum); if (atomic_read(&urb->reject)) wake_up(&usb_kill_urb_queue); @@ -1666,6 +1673,13 @@ static void __usb_hcd_giveback_urb(struct urb *urb) usb_anchor_resume_wakeups(anchor); atomic_dec(&urb->use_count); + /* + * Order the write of urb->use_count above before the read + * of urb->reject below. Pairs with the memory barriers in + * usb_kill_urb() and usb_poison_urb(). + */ + smp_mb__after_atomic(); + if (unlikely(atomic_read(&urb->reject))) wake_up(&usb_kill_urb_queue); usb_put_urb(urb); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 24f7010c1ee6d567362c4402f86df0e9bec5aa39..6ab15e05a6f9e003a67ee2700a9682f48d4a03c6 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -1109,7 +1109,10 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) } else { hub_power_on(hub, true); } - } + /* Give some time on remote wakeup to let links to transit to U0 */ + } else if (hub_is_superspeed(hub->hdev)) + msleep(20); + init2: /* diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c index 357b149b20d3a1cafdbb1d36aba1a157984166f0..9c285026f827675d099e7f3c38a39b1d6b07e47e 100644 --- a/drivers/usb/core/urb.c +++ b/drivers/usb/core/urb.c @@ -706,6 +706,12 @@ void usb_kill_urb(struct urb *urb) if (!(urb && urb->dev && urb->ep)) return; atomic_inc(&urb->reject); + /* + * Order the write of urb->reject above before the read + * of urb->use_count below. Pairs with the barriers in + * __usb_hcd_giveback_urb() and usb_hcd_submit_urb(). + */ + smp_mb__after_atomic(); usb_hcd_unlink_urb(urb, -ENOENT); wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0); @@ -747,6 +753,12 @@ void usb_poison_urb(struct urb *urb) if (!urb) return; atomic_inc(&urb->reject); + /* + * Order the write of urb->reject above before the read + * of urb->use_count below. Pairs with the barriers in + * __usb_hcd_giveback_urb() and usb_hcd_submit_urb(). + */ + smp_mb__after_atomic(); if (!urb->dev || !urb->ep) return; diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h index 641e4251cb7f10d501e059532092918686eaf108..03d16a08261d88e9f1ad91660cb363ed11155b65 100644 --- a/drivers/usb/dwc2/core.h +++ b/drivers/usb/dwc2/core.h @@ -1406,6 +1406,7 @@ void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg); void dwc2_hsotg_disconnect(struct dwc2_hsotg *dwc2); int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode); #define dwc2_is_device_connected(hsotg) (hsotg->connected) +#define dwc2_is_device_enabled(hsotg) (hsotg->enabled) int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg); int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup); int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg); @@ -1434,6 +1435,7 @@ static inline int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode) { return 0; } #define dwc2_is_device_connected(hsotg) (0) +#define dwc2_is_device_enabled(hsotg) (0) static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg) { return 0; } static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, diff --git a/drivers/usb/dwc2/drd.c b/drivers/usb/dwc2/drd.c index aa6eb76f64ddc0ea78b39bf53451005fa7bc87a1..36f2c38416e5ec4c2b942c0a49db3571ee9d04fd 100644 --- a/drivers/usb/dwc2/drd.c +++ b/drivers/usb/dwc2/drd.c @@ -109,8 +109,10 @@ static int dwc2_drd_role_sw_set(struct usb_role_switch *sw, enum usb_role role) already = dwc2_ovr_avalid(hsotg, true); } else if (role == USB_ROLE_DEVICE) { already = dwc2_ovr_bvalid(hsotg, true); - /* This clear DCTL.SFTDISCON bit */ - dwc2_hsotg_core_connect(hsotg); + if (dwc2_is_device_enabled(hsotg)) { + /* This clear DCTL.SFTDISCON bit */ + dwc2_hsotg_core_connect(hsotg); + } } else { if (dwc2_is_device_mode(hsotg)) { if (!dwc2_ovr_bvalid(hsotg, false)) diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 449f19c3633c23cde59fcb2d55739f34ccf9b17a..ec54971063f8f499d77fe5e1c842b268a68d6f0a 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -5032,7 +5032,7 @@ int dwc2_hsotg_suspend(struct dwc2_hsotg *hsotg) hsotg->gadget.speed = USB_SPEED_UNKNOWN; spin_unlock_irqrestore(&hsotg->lock, flags); - for (ep = 0; ep < hsotg->num_of_eps; ep++) { + for (ep = 1; ep < hsotg->num_of_eps; ep++) { if (hsotg->eps_in[ep]) dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep); if (hsotg->eps_out[ep]) diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 17117870f6cea1989f9df85b841f5f8b2c671e4d..98df8d52c765c9b4147a3d572907b12d40678bb3 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -81,8 +81,8 @@ static const struct acpi_gpio_mapping acpi_dwc3_byt_gpios[] = { static struct gpiod_lookup_table platform_bytcr_gpios = { .dev_id = "0000:00:16.0", .table = { - GPIO_LOOKUP("INT33FC:00", 54, "reset", GPIO_ACTIVE_HIGH), - GPIO_LOOKUP("INT33FC:02", 14, "cs", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("INT33FC:00", 54, "cs", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("INT33FC:02", 14, "reset", GPIO_ACTIVE_HIGH), {} }, }; diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c index 2a29e2f681fe6f0e8285362d41db0ac3cb034927..504f8af4d0f80ba319276d9c8347a6c0d5b2f38c 100644 --- a/drivers/usb/dwc3/dwc3-qcom.c +++ b/drivers/usb/dwc3/dwc3-qcom.c @@ -764,9 +764,12 @@ static int dwc3_qcom_probe(struct platform_device *pdev) if (qcom->acpi_pdata->is_urs) { qcom->urs_usb = dwc3_qcom_create_urs_usb_platdev(dev); - if (!qcom->urs_usb) { + if (IS_ERR_OR_NULL(qcom->urs_usb)) { dev_err(dev, "failed to create URS USB platdev\n"); - return -ENODEV; + if (!qcom->urs_usb) + return -ENODEV; + else + return PTR_ERR(qcom->urs_usb); } } } diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index e9a87e1f495081f12ef6263c38c53a0bd3c25f27..b68fe48ac57923a66598b39ad0392344a75fbec6 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -1072,6 +1072,19 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb, if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable) trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(stream_id); + /* + * As per data book 4.2.3.2TRB Control Bit Rules section + * + * The controller autonomously checks the HWO field of a TRB to determine if the + * entire TRB is valid. Therefore, software must ensure that the rest of the TRB + * is valid before setting the HWO field to '1'. In most systems, this means that + * software must update the fourth DWORD of a TRB last. + * + * However there is a possibility of CPU re-ordering here which can cause + * controller to observe the HWO bit set prematurely. + * Add a write memory barrier to prevent CPU re-ordering. + */ + wmb(); trb->ctrl |= DWC3_TRB_CTRL_HWO; dwc3_ep_inc_enq(dep); @@ -3762,9 +3775,11 @@ static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt) unsigned long flags; irqreturn_t ret = IRQ_NONE; + local_bh_disable(); spin_lock_irqsave(&dwc->lock, flags); ret = dwc3_process_event_buf(evt); spin_unlock_irqrestore(&dwc->lock, flags); + local_bh_enable(); return ret; } diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 8bec0cbf844ed944b90f3338f937cd07400c2208..a980799900e71c69545b799ce143719344b0de94 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -1944,6 +1944,9 @@ unknown: if (w_index != 0x5 || (w_value >> 8)) break; interface = w_value & 0xFF; + if (interface >= MAX_CONFIG_INTERFACES || + !os_desc_cfg->interface[interface]) + break; buf[6] = w_index; count = count_ext_prop(os_desc_cfg, interface); diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index cbb7947f366f939b900fc1918622f530933cb3e1..bb0d92837f6770f804310b25faaa2bd7d5453c4a 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -614,7 +614,7 @@ static int ffs_ep0_open(struct inode *inode, struct file *file) file->private_data = ffs; ffs_data_opened(ffs); - return 0; + return stream_open(inode, file); } static int ffs_ep0_release(struct inode *inode, struct file *file) @@ -1152,7 +1152,7 @@ ffs_epfile_open(struct inode *inode, struct file *file) file->private_data = epfile; ffs_data_opened(epfile->ffs); - return 0; + return stream_open(inode, file); } static int ffs_aio_cancel(struct kiocb *kiocb) @@ -1710,16 +1710,24 @@ static void ffs_data_put(struct ffs_data *ffs) static void ffs_data_closed(struct ffs_data *ffs) { + struct ffs_epfile *epfiles; + unsigned long flags; + ENTER(); if (atomic_dec_and_test(&ffs->opened)) { if (ffs->no_disconnect) { ffs->state = FFS_DEACTIVATED; - if (ffs->epfiles) { - ffs_epfiles_destroy(ffs->epfiles, - ffs->eps_count); - ffs->epfiles = NULL; - } + spin_lock_irqsave(&ffs->eps_lock, flags); + epfiles = ffs->epfiles; + ffs->epfiles = NULL; + spin_unlock_irqrestore(&ffs->eps_lock, + flags); + + if (epfiles) + ffs_epfiles_destroy(epfiles, + ffs->eps_count); + if (ffs->setup_state == FFS_SETUP_PENDING) __ffs_ep0_stall(ffs); } else { @@ -1766,14 +1774,27 @@ static struct ffs_data *ffs_data_new(const char *dev_name) static void ffs_data_clear(struct ffs_data *ffs) { + struct ffs_epfile *epfiles; + unsigned long flags; + ENTER(); ffs_closed(ffs); BUG_ON(ffs->gadget); - if (ffs->epfiles) { - ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count); + spin_lock_irqsave(&ffs->eps_lock, flags); + epfiles = ffs->epfiles; + ffs->epfiles = NULL; + spin_unlock_irqrestore(&ffs->eps_lock, flags); + + /* + * potential race possible between ffs_func_eps_disable + * & ffs_epfile_release therefore maintaining a local + * copy of epfile will save us from use-after-free. + */ + if (epfiles) { + ffs_epfiles_destroy(epfiles, ffs->eps_count); ffs->epfiles = NULL; } @@ -1921,12 +1942,15 @@ static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count) static void ffs_func_eps_disable(struct ffs_function *func) { - struct ffs_ep *ep = func->eps; - struct ffs_epfile *epfile = func->ffs->epfiles; - unsigned count = func->ffs->eps_count; + struct ffs_ep *ep; + struct ffs_epfile *epfile; + unsigned short count; unsigned long flags; spin_lock_irqsave(&func->ffs->eps_lock, flags); + count = func->ffs->eps_count; + epfile = func->ffs->epfiles; + ep = func->eps; while (count--) { /* pending requests get nuked */ if (likely(ep->ep)) @@ -1944,14 +1968,18 @@ static void ffs_func_eps_disable(struct ffs_function *func) static int ffs_func_eps_enable(struct ffs_function *func) { - struct ffs_data *ffs = func->ffs; - struct ffs_ep *ep = func->eps; - struct ffs_epfile *epfile = ffs->epfiles; - unsigned count = ffs->eps_count; + struct ffs_data *ffs; + struct ffs_ep *ep; + struct ffs_epfile *epfile; + unsigned short count; unsigned long flags; int ret = 0; spin_lock_irqsave(&func->ffs->eps_lock, flags); + ffs = func->ffs; + ep = func->eps; + epfile = ffs->epfiles; + count = ffs->eps_count; while(count--) { ep->ep->driver_data = ep; diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c index 282737e4609ce85cc3ee16b491de22f99f5b65dd..2c65a9bb3c81bc52a5cd073b758b90deba5c714f 100644 --- a/drivers/usb/gadget/function/f_sourcesink.c +++ b/drivers/usb/gadget/function/f_sourcesink.c @@ -583,6 +583,7 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in, if (is_iso) { switch (speed) { + case USB_SPEED_SUPER_PLUS: case USB_SPEED_SUPER: size = ss->isoc_maxpacket * (ss->isoc_mult + 1) * diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c index dd960cea642f3ae0fc5b5080734138ba481762cf..11cc6056b5902ddbdd28bfc670d176138f5c0177 100644 --- a/drivers/usb/gadget/function/f_uac2.c +++ b/drivers/usb/gadget/function/f_uac2.c @@ -176,7 +176,7 @@ static struct uac2_input_terminal_descriptor io_in_it_desc = { .bDescriptorSubtype = UAC_INPUT_TERMINAL, /* .bTerminalID = DYNAMIC */ - .wTerminalType = cpu_to_le16(UAC_INPUT_TERMINAL_UNDEFINED), + .wTerminalType = cpu_to_le16(UAC_INPUT_TERMINAL_MICROPHONE), .bAssocTerminal = 0, /* .bCSourceID = DYNAMIC */ .iChannelNames = 0, @@ -204,7 +204,7 @@ static struct uac2_output_terminal_descriptor io_out_ot_desc = { .bDescriptorSubtype = UAC_OUTPUT_TERMINAL, /* .bTerminalID = DYNAMIC */ - .wTerminalType = cpu_to_le16(UAC_OUTPUT_TERMINAL_UNDEFINED), + .wTerminalType = cpu_to_le16(UAC_OUTPUT_TERMINAL_SPEAKER), .bAssocTerminal = 0, /* .bSourceID = DYNAMIC */ /* .bCSourceID = DYNAMIC */ diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c index 64de9f1b874c55a3fdb4dddb49ecbdc4eec0aa1b..0f14c5291af0742af2b33695711912210297b490 100644 --- a/drivers/usb/gadget/function/rndis.c +++ b/drivers/usb/gadget/function/rndis.c @@ -637,14 +637,17 @@ static int rndis_set_response(struct rndis_params *params, rndis_set_cmplt_type *resp; rndis_resp_t *r; + BufLength = le32_to_cpu(buf->InformationBufferLength); + BufOffset = le32_to_cpu(buf->InformationBufferOffset); + if ((BufLength > RNDIS_MAX_TOTAL_SIZE) || + (BufOffset + 8 >= RNDIS_MAX_TOTAL_SIZE)) + return -EINVAL; + r = rndis_add_response(params, sizeof(rndis_set_cmplt_type)); if (!r) return -ENOMEM; resp = (rndis_set_cmplt_type *)r->buf; - BufLength = le32_to_cpu(buf->InformationBufferLength); - BufOffset = le32_to_cpu(buf->InformationBufferOffset); - #ifdef VERBOSE_DEBUG pr_debug("%s: Length: %d\n", __func__, BufLength); pr_debug("%s: Offset: %d\n", __func__, BufOffset); @@ -919,6 +922,7 @@ struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v) params->resp_avail = resp_avail; params->v = v; INIT_LIST_HEAD(¶ms->resp_queue); + spin_lock_init(¶ms->resp_lock); pr_debug("%s: configNr = %d\n", __func__, i); return params; @@ -1012,12 +1016,14 @@ void rndis_free_response(struct rndis_params *params, u8 *buf) { rndis_resp_t *r, *n; + spin_lock(¶ms->resp_lock); list_for_each_entry_safe(r, n, ¶ms->resp_queue, list) { if (r->buf == buf) { list_del(&r->list); kfree(r); } } + spin_unlock(¶ms->resp_lock); } EXPORT_SYMBOL_GPL(rndis_free_response); @@ -1027,14 +1033,17 @@ u8 *rndis_get_next_response(struct rndis_params *params, u32 *length) if (!length) return NULL; + spin_lock(¶ms->resp_lock); list_for_each_entry_safe(r, n, ¶ms->resp_queue, list) { if (!r->send) { r->send = 1; *length = r->length; + spin_unlock(¶ms->resp_lock); return r->buf; } } + spin_unlock(¶ms->resp_lock); return NULL; } EXPORT_SYMBOL_GPL(rndis_get_next_response); @@ -1051,7 +1060,9 @@ static rndis_resp_t *rndis_add_response(struct rndis_params *params, u32 length) r->length = length; r->send = 0; + spin_lock(¶ms->resp_lock); list_add_tail(&r->list, ¶ms->resp_queue); + spin_unlock(¶ms->resp_lock); return r; } diff --git a/drivers/usb/gadget/function/rndis.h b/drivers/usb/gadget/function/rndis.h index f6167f7fea82b59da4b0eb40c0fd7effc06bec5c..6206b8b7490f64b0e11dcc71aa2d99691d5d1ac3 100644 --- a/drivers/usb/gadget/function/rndis.h +++ b/drivers/usb/gadget/function/rndis.h @@ -174,6 +174,7 @@ typedef struct rndis_params { void (*resp_avail)(void *v); void *v; struct list_head resp_queue; + spinlock_t resp_lock; } rndis_params; /* RNDIS Message parser and other useless functions */ diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c index 062dfac3039968d3f09868141aed177f91d8e5d4..33efa6915b91dc0745f1e0d12816c935c2f72822 100644 --- a/drivers/usb/gadget/legacy/raw_gadget.c +++ b/drivers/usb/gadget/legacy/raw_gadget.c @@ -1003,7 +1003,7 @@ static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io, ret = -EBUSY; goto out_unlock; } - if ((in && !ep->ep->caps.dir_in) || (!in && ep->ep->caps.dir_in)) { + if (in != usb_endpoint_dir_in(ep->ep->desc)) { dev_dbg(&dev->gadget->dev, "fail, wrong direction\n"); ret = -EINVAL; goto out_unlock; diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index 57d417a7c3e0a687e9fbc11b604b7bdb93aedc8f..601829a6b4badd7f0fd98805edcb0659f28f4a0f 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c @@ -2378,6 +2378,8 @@ static void handle_ext_role_switch_states(struct device *dev, switch (role) { case USB_ROLE_NONE: usb3->connection_state = USB_ROLE_NONE; + if (cur_role == USB_ROLE_HOST) + device_release_driver(host); if (usb3->driver) usb3_disconnect(usb3); usb3_vbus_out(usb3, false); diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c index d5e9d20c097d205054378fb79796296b969d4043..096f56a09e6a2ae53d1ac8d525574766608adee6 100644 --- a/drivers/usb/gadget/udc/udc-xilinx.c +++ b/drivers/usb/gadget/udc/udc-xilinx.c @@ -1612,6 +1612,8 @@ static void xudc_getstatus(struct xusb_udc *udc) break; case USB_RECIP_ENDPOINT: epnum = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK; + if (epnum >= XUSB_MAX_ENDPOINTS) + goto stall; target_ep = &udc->ep[epnum]; epcfgreg = udc->read_fn(udc->addr + target_ep->offset); halt = epcfgreg & XUSB_EP_CFG_STALL_MASK; @@ -1679,6 +1681,10 @@ static void xudc_set_clear_feature(struct xusb_udc *udc) case USB_RECIP_ENDPOINT: if (!udc->setup.wValue) { endpoint = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK; + if (endpoint >= XUSB_MAX_ENDPOINTS) { + xudc_ep0_stall(udc); + return; + } target_ep = &udc->ep[endpoint]; outinbit = udc->setup.wIndex & USB_ENDPOINT_DIR_MASK; outinbit = outinbit >> 7; diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c index 70dbd95c3f063a43db2035df59b152e66ea45984..be9e9db7cad1048f75aab2c01eb994a5171a6577 100644 --- a/drivers/usb/host/uhci-platform.c +++ b/drivers/usb/host/uhci-platform.c @@ -113,7 +113,8 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev) num_ports); } if (of_device_is_compatible(np, "aspeed,ast2400-uhci") || - of_device_is_compatible(np, "aspeed,ast2500-uhci")) { + of_device_is_compatible(np, "aspeed,ast2500-uhci") || + of_device_is_compatible(np, "aspeed,ast2600-uhci")) { uhci->is_aspeed = 1; dev_info(&pdev->dev, "Enabled Aspeed implementation workarounds\n"); diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index c1edcc9b13cece07c9dc0e00c27a7f0848f4875c..dc570ce4e8319ad09d6088e3dc2ee557ccba5c78 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -437,6 +437,9 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev) struct xhci_hcd *xhci = hcd_to_xhci(hcd); int ret; + if (pm_runtime_suspended(dev)) + pm_runtime_resume(dev); + ret = xhci_priv_suspend_quirk(hcd); if (ret) return ret; diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index b062a978bb1d3dcc050d7f9a610a24c96b5fae0e..37d340339b045048999cceb07c8cb6c0f3c37f5a 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -1091,6 +1091,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) int retval = 0; bool comp_timer_running = false; bool pending_portevent = false; + bool reinit_xhc = false; if (!hcd->state) return 0; @@ -1107,10 +1108,11 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); spin_lock_irq(&xhci->lock); - if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend) - hibernated = true; - if (!hibernated) { + if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend) + reinit_xhc = true; + + if (!reinit_xhc) { /* * Some controllers might lose power during suspend, so wait * for controller not ready bit to clear, just as in xHC init. @@ -1143,12 +1145,17 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) spin_unlock_irq(&xhci->lock); return -ETIMEDOUT; } - temp = readl(&xhci->op_regs->status); } - /* If restore operation fails, re-initialize the HC during resume */ - if ((temp & STS_SRE) || hibernated) { + temp = readl(&xhci->op_regs->status); + /* re-initialize the HC on Restore Error, or Host Controller Error */ + if (temp & (STS_SRE | STS_HCE)) { + reinit_xhc = true; + xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp); + } + + if (reinit_xhc) { if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !(xhci_all_ports_seen_u0(xhci))) { del_timer_sync(&xhci->comp_mode_recovery_timer); @@ -1577,9 +1584,12 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag struct urb_priv *urb_priv; int num_tds; - if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, - true, true, __func__) <= 0) + if (!urb) return -EINVAL; + ret = xhci_check_args(hcd, urb->dev, urb->ep, + true, true, __func__); + if (ret <= 0) + return ret ? ret : -EINVAL; slot_id = urb->dev->slot_id; ep_index = xhci_get_endpoint_index(&urb->ep->desc); @@ -3379,7 +3389,7 @@ static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, return -EINVAL; ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__); if (ret <= 0) - return -EINVAL; + return ret ? ret : -EINVAL; if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) { xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" " descriptor for ep 0x%x does not support streams\n", diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c index 8a3d9c0c8d8bc54304b5d46dc801ea1476af64f1..157b31d354ac23b14576f7f00cdb07d5d8827671 100644 --- a/drivers/usb/misc/ftdi-elan.c +++ b/drivers/usb/misc/ftdi-elan.c @@ -202,6 +202,7 @@ static void ftdi_elan_delete(struct kref *kref) mutex_unlock(&ftdi_module_lock); kfree(ftdi->bulk_in_buffer); ftdi->bulk_in_buffer = NULL; + kfree(ftdi); } static void ftdi_elan_put_kref(struct usb_ftdi *ftdi) diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index f26861246f653073448b9ccf311834d2f0611fe7..a2a38fc76ca53c602b94bba123fd9776d03a7a57 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -81,10 +81,10 @@ #define CH341_QUIRK_SIMULATE_BREAK BIT(1) static const struct usb_device_id id_table[] = { - { USB_DEVICE(0x1a86, 0x5512) }, { USB_DEVICE(0x1a86, 0x5523) }, { USB_DEVICE(0x1a86, 0x7522) }, { USB_DEVICE(0x1a86, 0x7523) }, + { USB_DEVICE(0x2184, 0x0057) }, { USB_DEVICE(0x4348, 0x5523) }, { USB_DEVICE(0x9986, 0x7523) }, { }, diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index f906c1308f9f9c136599d041b0f6d711b8f844ee..7ac668023da872f844a2e6764aee50f4187c9865 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -55,6 +55,7 @@ static void cp210x_enable_event_mode(struct usb_serial_port *port); static void cp210x_disable_event_mode(struct usb_serial_port *port); static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x0404, 0x034C) }, /* NCR Retail IO Box */ { USB_DEVICE(0x045B, 0x0053) }, /* Renesas RX610 RX-Stick */ { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */ { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ @@ -72,6 +73,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */ { USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */ { USB_DEVICE(0x0FDE, 0xCA05) }, /* OWL Wireless Electricity Monitor CM-160 */ + { USB_DEVICE(0x106F, 0x0003) }, /* CPI / Money Controls Bulk Coin Recycler */ { USB_DEVICE(0x10A6, 0xAA26) }, /* Knock-off DCU-11 cable */ { USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */ { USB_DEVICE(0x10B5, 0xAC70) }, /* Nokia CA-42 USB */ diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index dfcf79bdfddce2effb15b979cbfd5102a270ccba..b74621dc2a65888024a3502515baae552109e59c 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -969,6 +969,7 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_023_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_034_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_101_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_159_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_1_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_2_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_3_PID) }, @@ -977,12 +978,14 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_6_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_7_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_8_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_235_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_257_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_1_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_2_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_3_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_4_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_313_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_320_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_324_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_1_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_2_PID) }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 755858ca20bacf7702efc2d42c68b135cbc984f3..d1a9564697a4becf6f32b52461eef0fe36c1bbc0 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -1506,6 +1506,9 @@ #define BRAINBOXES_VX_023_PID 0x1003 /* VX-023 ExpressCard 1 Port RS422/485 */ #define BRAINBOXES_VX_034_PID 0x1004 /* VX-034 ExpressCard 2 Port RS422/485 */ #define BRAINBOXES_US_101_PID 0x1011 /* US-101 1xRS232 */ +#define BRAINBOXES_US_159_PID 0x1021 /* US-159 1xRS232 */ +#define BRAINBOXES_US_235_PID 0x1017 /* US-235 1xRS232 */ +#define BRAINBOXES_US_320_PID 0x1019 /* US-320 1xRS422/485 */ #define BRAINBOXES_US_324_PID 0x1013 /* US-324 1xRS422/485 1Mbaud */ #define BRAINBOXES_US_606_1_PID 0x2001 /* US-606 6 Port RS232 Serial Port 1 and 2 */ #define BRAINBOXES_US_606_2_PID 0x2002 /* US-606 6 Port RS232 Serial Port 3 and 4 */ diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 21b1488fe446128b56ee835735b230e11c97ac18..b878f4c87fee8e5d1eafc3cf5e46ce785a71e753 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -198,6 +198,8 @@ static void option_instat_callback(struct urb *urb); #define DELL_PRODUCT_5821E 0x81d7 #define DELL_PRODUCT_5821E_ESIM 0x81e0 +#define DELL_PRODUCT_5829E_ESIM 0x81e4 +#define DELL_PRODUCT_5829E 0x81e6 #define KYOCERA_VENDOR_ID 0x0c88 #define KYOCERA_PRODUCT_KPC650 0x17da @@ -1063,6 +1065,10 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E_ESIM), .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, + { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E), + .driver_info = RSVD(0) | RSVD(6) }, + { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E_ESIM), + .driver_info = RSVD(0) | RSVD(6) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, @@ -1273,10 +1279,16 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(2) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7011, 0xff), /* Telit LE910-S1 (ECM) */ .driver_info = NCTRL(2) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701a, 0xff), /* Telit LE910R1 (RNDIS) */ + .driver_info = NCTRL(2) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701b, 0xff), /* Telit LE910R1 (ECM) */ + .driver_info = NCTRL(2) }, { USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */ .driver_info = NCTRL(0) | ZLP }, { USB_DEVICE(TELIT_VENDOR_ID, 0x9200), /* Telit LE910S1 flashing device */ .driver_info = NCTRL(0) | ZLP }, + { USB_DEVICE(TELIT_VENDOR_ID, 0x9201), /* Telit LE910R1 flashing device */ + .driver_info = NCTRL(0) | ZLP }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), .driver_info = RSVD(1) }, @@ -1649,6 +1661,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(2) }, { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0x00, 0x00) }, /* ZTE MF871A */ + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1485, 0xff, 0xff, 0xff), /* ZTE MF286D */ + .driver_info = RSVD(5) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) }, diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 29191d33c0e3edf43c4a1be70adc03e902b3ce53..1a05e3dcfec8a10652ec437bc0d80ce38c401d50 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -2301,6 +2301,16 @@ UNUSUAL_DEV( 0x2027, 0xa001, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init, US_FL_SCM_MULT_TARG ), +/* + * Reported by DocMAX + * and Thomas Weißschuh + */ +UNUSUAL_DEV( 0x2109, 0x0715, 0x9999, 0x9999, + "VIA Labs, Inc.", + "VL817 SATA Bridge", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_IGNORE_UAS), + UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001, "ST", "2A", diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index 721d9c4ddc81f9df7de71c93b1309614aac0d3c7..8333c80b5f7c119628c36e11575b3882e52d7507 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -4164,7 +4164,8 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port) case SNK_TRYWAIT_DEBOUNCE: break; case SNK_ATTACH_WAIT: - tcpm_set_state(port, SNK_UNATTACHED, 0); + case SNK_DEBOUNCED: + /* Do nothing, as TCPM is still waiting for vbus to reaach VSAFE5V to connect */ break; case SNK_NEGOTIATE_CAPABILITIES: diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c index bff96d64dddffee6857125ad6040f5d67a873847..6db7c8ddd51cd0aac16d89b86e83963fe39af70b 100644 --- a/drivers/usb/typec/ucsi/ucsi_ccg.c +++ b/drivers/usb/typec/ucsi/ucsi_ccg.c @@ -325,7 +325,7 @@ static int ucsi_ccg_init(struct ucsi_ccg *uc) if (status < 0) return status; - if (!data) + if (!(data & DEV_INT)) return 0; status = ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data)); diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index fbdc9468818d32a5a44783e4f57804ece5e9be32..65d6f8fd81e706a25e12937d5d81bfb78bdd2535 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -812,8 +812,6 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque MLX5_SET(virtio_q, vq_ctx, umem_3_id, mvq->umem3.id); MLX5_SET(virtio_q, vq_ctx, umem_3_size, mvq->umem3.size); MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn); - if (MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, eth_frame_offload_type)) - MLX5_SET(virtio_q, vq_ctx, virtio_version_1_0, 1); err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); if (err) diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 5cd1ee66d23266931202d3229720905b83a2d318..c282fc0d04bd1b96e76c2cb95929bb3ec7675bef 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -573,16 +573,18 @@ err: return ret; } -static int vhost_vsock_stop(struct vhost_vsock *vsock) +static int vhost_vsock_stop(struct vhost_vsock *vsock, bool check_owner) { size_t i; - int ret; + int ret = 0; mutex_lock(&vsock->dev.mutex); - ret = vhost_dev_check_owner(&vsock->dev); - if (ret) - goto err; + if (check_owner) { + ret = vhost_dev_check_owner(&vsock->dev); + if (ret) + goto err; + } for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { struct vhost_virtqueue *vq = &vsock->vqs[i]; @@ -697,7 +699,12 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file) * inefficient. Room for improvement here. */ vsock_for_each_connected_socket(vhost_vsock_reset_orphans); - vhost_vsock_stop(vsock); + /* Don't check the owner, because we are in the release path, so we + * need to stop the vsock device in any case. + * vhost_vsock_stop() can not fail in this case, so we don't need to + * check the return code. + */ + vhost_vsock_stop(vsock, false); vhost_vsock_flush(vsock); vhost_dev_stop(&vsock->dev); @@ -801,7 +808,7 @@ static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl, if (start) return vhost_vsock_start(vsock); else - return vhost_vsock_stop(vsock); + return vhost_vsock_stop(vsock, true); case VHOST_GET_FEATURES: features = VHOST_VSOCK_FEATURES; if (copy_to_user(argp, &features, sizeof(features))) diff --git a/drivers/video/backlight/qcom-wled.c b/drivers/video/backlight/qcom-wled.c index cd11c5776438184c6bc466b1a0632d623940acc4..486d35da0150724e124d477b31153da2911c2fd9 100644 --- a/drivers/video/backlight/qcom-wled.c +++ b/drivers/video/backlight/qcom-wled.c @@ -231,14 +231,14 @@ struct wled { static int wled3_set_brightness(struct wled *wled, u16 brightness) { int rc, i; - u8 v[2]; + __le16 v; - v[0] = brightness & 0xff; - v[1] = (brightness >> 8) & 0xf; + v = cpu_to_le16(brightness & WLED3_SINK_REG_BRIGHT_MAX); for (i = 0; i < wled->cfg.num_strings; ++i) { rc = regmap_bulk_write(wled->regmap, wled->ctrl_addr + - WLED3_SINK_REG_BRIGHT(i), v, 2); + WLED3_SINK_REG_BRIGHT(wled->cfg.enabled_strings[i]), + &v, sizeof(v)); if (rc < 0) return rc; } @@ -250,18 +250,18 @@ static int wled4_set_brightness(struct wled *wled, u16 brightness) { int rc, i; u16 low_limit = wled->max_brightness * 4 / 1000; - u8 v[2]; + __le16 v; /* WLED4's lower limit of operation is 0.4% */ if (brightness > 0 && brightness < low_limit) brightness = low_limit; - v[0] = brightness & 0xff; - v[1] = (brightness >> 8) & 0xf; + v = cpu_to_le16(brightness & WLED3_SINK_REG_BRIGHT_MAX); for (i = 0; i < wled->cfg.num_strings; ++i) { rc = regmap_bulk_write(wled->regmap, wled->sink_addr + - WLED4_SINK_REG_BRIGHT(i), v, 2); + WLED4_SINK_REG_BRIGHT(wled->cfg.enabled_strings[i]), + &v, sizeof(v)); if (rc < 0) return rc; } @@ -273,21 +273,20 @@ static int wled5_set_brightness(struct wled *wled, u16 brightness) { int rc, offset; u16 low_limit = wled->max_brightness * 1 / 1000; - u8 v[2]; + __le16 v; /* WLED5's lower limit is 0.1% */ if (brightness < low_limit) brightness = low_limit; - v[0] = brightness & 0xff; - v[1] = (brightness >> 8) & 0x7f; + v = cpu_to_le16(brightness & WLED5_SINK_REG_BRIGHT_MAX_15B); offset = (wled->cfg.mod_sel == MOD_A) ? WLED5_SINK_REG_MOD_A_BRIGHTNESS_LSB : WLED5_SINK_REG_MOD_B_BRIGHTNESS_LSB; rc = regmap_bulk_write(wled->regmap, wled->sink_addr + offset, - v, 2); + &v, sizeof(v)); return rc; } @@ -572,7 +571,7 @@ unlock_mutex: static void wled_auto_string_detection(struct wled *wled) { - int rc = 0, i, delay_time_us; + int rc = 0, i, j, delay_time_us; u32 sink_config = 0; u8 sink_test = 0, sink_valid = 0, val; bool fault_set; @@ -619,14 +618,15 @@ static void wled_auto_string_detection(struct wled *wled) /* Iterate through the strings one by one */ for (i = 0; i < wled->cfg.num_strings; i++) { - sink_test = BIT((WLED4_SINK_REG_CURR_SINK_SHFT + i)); + j = wled->cfg.enabled_strings[i]; + sink_test = BIT((WLED4_SINK_REG_CURR_SINK_SHFT + j)); /* Enable feedback control */ rc = regmap_write(wled->regmap, wled->ctrl_addr + - WLED3_CTRL_REG_FEEDBACK_CONTROL, i + 1); + WLED3_CTRL_REG_FEEDBACK_CONTROL, j + 1); if (rc < 0) { dev_err(wled->dev, "Failed to enable feedback for SINK %d rc = %d\n", - i + 1, rc); + j + 1, rc); goto failed_detect; } @@ -635,7 +635,7 @@ static void wled_auto_string_detection(struct wled *wled) WLED4_SINK_REG_CURR_SINK, sink_test); if (rc < 0) { dev_err(wled->dev, "Failed to configure SINK %d rc=%d\n", - i + 1, rc); + j + 1, rc); goto failed_detect; } @@ -662,7 +662,7 @@ static void wled_auto_string_detection(struct wled *wled) if (fault_set) dev_dbg(wled->dev, "WLED OVP fault detected with SINK %d\n", - i + 1); + j + 1); else sink_valid |= sink_test; @@ -702,15 +702,16 @@ static void wled_auto_string_detection(struct wled *wled) /* Enable valid sinks */ if (wled->version == 4) { for (i = 0; i < wled->cfg.num_strings; i++) { + j = wled->cfg.enabled_strings[i]; if (sink_config & - BIT(WLED4_SINK_REG_CURR_SINK_SHFT + i)) + BIT(WLED4_SINK_REG_CURR_SINK_SHFT + j)) val = WLED4_SINK_REG_STR_MOD_MASK; else /* Disable modulator_en for unused sink */ val = 0; rc = regmap_write(wled->regmap, wled->sink_addr + - WLED4_SINK_REG_STR_MOD_EN(i), val); + WLED4_SINK_REG_STR_MOD_EN(j), val); if (rc < 0) { dev_err(wled->dev, "Failed to configure MODULATOR_EN rc=%d\n", rc); @@ -1256,21 +1257,6 @@ static const struct wled_var_cfg wled5_ovp_cfg = { .size = 16, }; -static u32 wled3_num_strings_values_fn(u32 idx) -{ - return idx + 1; -} - -static const struct wled_var_cfg wled3_num_strings_cfg = { - .fn = wled3_num_strings_values_fn, - .size = 3, -}; - -static const struct wled_var_cfg wled4_num_strings_cfg = { - .fn = wled3_num_strings_values_fn, - .size = 4, -}; - static u32 wled3_switch_freq_values_fn(u32 idx) { return 19200 / (2 * (1 + idx)); @@ -1344,11 +1330,6 @@ static int wled_configure(struct wled *wled) .val_ptr = &cfg->switch_freq, .cfg = &wled3_switch_freq_cfg, }, - { - .name = "qcom,num-strings", - .val_ptr = &cfg->num_strings, - .cfg = &wled3_num_strings_cfg, - }, }; const struct wled_u32_opts wled4_opts[] = { @@ -1372,11 +1353,6 @@ static int wled_configure(struct wled *wled) .val_ptr = &cfg->switch_freq, .cfg = &wled3_switch_freq_cfg, }, - { - .name = "qcom,num-strings", - .val_ptr = &cfg->num_strings, - .cfg = &wled4_num_strings_cfg, - }, }; const struct wled_u32_opts wled5_opts[] = { @@ -1400,11 +1376,6 @@ static int wled_configure(struct wled *wled) .val_ptr = &cfg->switch_freq, .cfg = &wled3_switch_freq_cfg, }, - { - .name = "qcom,num-strings", - .val_ptr = &cfg->num_strings, - .cfg = &wled4_num_strings_cfg, - }, { .name = "qcom,modulator-sel", .val_ptr = &cfg->mod_sel, @@ -1523,16 +1494,57 @@ static int wled_configure(struct wled *wled) *bool_opts[i].val_ptr = true; } - cfg->num_strings = cfg->num_strings + 1; - string_len = of_property_count_elems_of_size(dev->of_node, "qcom,enabled-strings", sizeof(u32)); - if (string_len > 0) - of_property_read_u32_array(dev->of_node, + if (string_len > 0) { + if (string_len > wled->max_string_count) { + dev_err(dev, "Cannot have more than %d strings\n", + wled->max_string_count); + return -EINVAL; + } + + rc = of_property_read_u32_array(dev->of_node, "qcom,enabled-strings", wled->cfg.enabled_strings, - sizeof(u32)); + string_len); + if (rc) { + dev_err(dev, "Failed to read %d elements from qcom,enabled-strings: %d\n", + string_len, rc); + return rc; + } + + for (i = 0; i < string_len; ++i) { + if (wled->cfg.enabled_strings[i] >= wled->max_string_count) { + dev_err(dev, + "qcom,enabled-strings index %d at %d is out of bounds\n", + wled->cfg.enabled_strings[i], i); + return -EINVAL; + } + } + + cfg->num_strings = string_len; + } + + rc = of_property_read_u32(dev->of_node, "qcom,num-strings", &val); + if (!rc) { + if (val < 1 || val > wled->max_string_count) { + dev_err(dev, "qcom,num-strings must be between 1 and %d\n", + wled->max_string_count); + return -EINVAL; + } + + if (string_len > 0) { + dev_warn(dev, "Only one of qcom,num-strings or qcom,enabled-strings" + " should be set\n"); + if (val > string_len) { + dev_err(dev, "qcom,num-strings exceeds qcom,enabled-strings\n"); + return -EINVAL; + } + } + + cfg->num_strings = val; + } return 0; } diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig index ee33b8ec62bb259b2368fa97dc7294c751fbe059..47c4939577725059a905459d818f9c4bad9c0203 100644 --- a/drivers/video/console/Kconfig +++ b/drivers/video/console/Kconfig @@ -78,6 +78,26 @@ config FRAMEBUFFER_CONSOLE help Low-level framebuffer-based console driver. +config FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION + bool "Enable legacy fbcon hardware acceleration code" + depends on FRAMEBUFFER_CONSOLE + default y if PARISC + default n + help + This option enables the fbcon (framebuffer text-based) hardware + acceleration for graphics drivers which were written for the fbdev + graphics interface. + + On modern machines, on mainstream machines (like x86-64) or when + using a modern Linux distribution those fbdev drivers usually aren't used. + So enabling this option wouldn't have any effect, which is why you want + to disable this option on such newer machines. + + If you compile this kernel for older machines which still require the + fbdev drivers, you may want to say Y. + + If unsure, select n. + config FRAMEBUFFER_CONSOLE_DETECT_PRIMARY bool "Map the console to the primary display device" depends on FRAMEBUFFER_CONSOLE diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 42c72d051158fa47451187d0450c30a1113c5017..f102519ccefb43be0fb7806e7d0c716c51d1fdb1 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -1033,7 +1033,7 @@ static void fbcon_init(struct vc_data *vc, int init) struct vc_data *svc = *default_mode; struct fbcon_display *t, *p = &fb_display[vc->vc_num]; int logo = 1, new_rows, new_cols, rows, cols, charcnt = 256; - int ret; + int cap, ret; if (WARN_ON(info_idx == -1)) return; @@ -1042,6 +1042,7 @@ static void fbcon_init(struct vc_data *vc, int init) con2fb_map[vc->vc_num] = info_idx; info = registered_fb[con2fb_map[vc->vc_num]]; + cap = info->flags; if (logo_shown < 0 && console_loglevel <= CONSOLE_LOGLEVEL_QUIET) logo_shown = FBCON_LOGO_DONTSHOW; @@ -1146,13 +1147,13 @@ static void fbcon_init(struct vc_data *vc, int init) ops->graphics = 0; - /* - * No more hw acceleration for fbcon. - * - * FIXME: Garbage collect all the now dead code after sufficient time - * has passed. - */ - p->scrollmode = SCROLL_REDRAW; +#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION + if ((cap & FBINFO_HWACCEL_COPYAREA) && + !(cap & FBINFO_HWACCEL_DISABLED)) + p->scrollmode = SCROLL_MOVE; + else /* default to something safe */ + p->scrollmode = SCROLL_REDRAW; +#endif /* * ++guenther: console.c:vc_allocate() relies on initializing @@ -1718,7 +1719,7 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, count = vc->vc_rows; if (logo_shown >= 0) goto redraw_up; - switch (p->scrollmode) { + switch (fb_scrollmode(p)) { case SCROLL_MOVE: fbcon_redraw_blit(vc, info, p, t, b - t - count, count); @@ -1808,7 +1809,7 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, count = vc->vc_rows; if (logo_shown >= 0) goto redraw_down; - switch (p->scrollmode) { + switch (fb_scrollmode(p)) { case SCROLL_MOVE: fbcon_redraw_blit(vc, info, p, b - 1, b - t - count, -count); @@ -1959,6 +1960,48 @@ static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, height, width); } +static void updatescrollmode_accel(struct fbcon_display *p, + struct fb_info *info, + struct vc_data *vc) +{ +#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION + struct fbcon_ops *ops = info->fbcon_par; + int cap = info->flags; + u16 t = 0; + int ypan = FBCON_SWAP(ops->rotate, info->fix.ypanstep, + info->fix.xpanstep); + int ywrap = FBCON_SWAP(ops->rotate, info->fix.ywrapstep, t); + int yres = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); + int vyres = FBCON_SWAP(ops->rotate, info->var.yres_virtual, + info->var.xres_virtual); + int good_pan = (cap & FBINFO_HWACCEL_YPAN) && + divides(ypan, vc->vc_font.height) && vyres > yres; + int good_wrap = (cap & FBINFO_HWACCEL_YWRAP) && + divides(ywrap, vc->vc_font.height) && + divides(vc->vc_font.height, vyres) && + divides(vc->vc_font.height, yres); + int reading_fast = cap & FBINFO_READS_FAST; + int fast_copyarea = (cap & FBINFO_HWACCEL_COPYAREA) && + !(cap & FBINFO_HWACCEL_DISABLED); + int fast_imageblit = (cap & FBINFO_HWACCEL_IMAGEBLIT) && + !(cap & FBINFO_HWACCEL_DISABLED); + + if (good_wrap || good_pan) { + if (reading_fast || fast_copyarea) + p->scrollmode = good_wrap ? + SCROLL_WRAP_MOVE : SCROLL_PAN_MOVE; + else + p->scrollmode = good_wrap ? SCROLL_REDRAW : + SCROLL_PAN_REDRAW; + } else { + if (reading_fast || (fast_copyarea && !fast_imageblit)) + p->scrollmode = SCROLL_MOVE; + else + p->scrollmode = SCROLL_REDRAW; + } +#endif +} + static void updatescrollmode(struct fbcon_display *p, struct fb_info *info, struct vc_data *vc) @@ -1974,6 +2017,9 @@ static void updatescrollmode(struct fbcon_display *p, p->vrows -= (yres - (fh * vc->vc_rows)) / fh; if ((yres % fh) && (vyres % fh < yres % fh)) p->vrows--; + + /* update scrollmode in case hardware acceleration is used */ + updatescrollmode_accel(p, info, vc); } #define PITCH(w) (((w) + 7) >> 3) @@ -2134,7 +2180,7 @@ static int fbcon_switch(struct vc_data *vc) updatescrollmode(p, info, vc); - switch (p->scrollmode) { + switch (fb_scrollmode(p)) { case SCROLL_WRAP_MOVE: scrollback_phys_max = p->vrows - vc->vc_rows; break; diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h index 9315b360c898136bb2a65bd348aa16f57dcbecea..0f16cbc99e6a4f483018a46928f9e957917f9179 100644 --- a/drivers/video/fbdev/core/fbcon.h +++ b/drivers/video/fbdev/core/fbcon.h @@ -29,7 +29,9 @@ struct fbcon_display { /* Filled in by the low-level console driver */ const u_char *fontdata; int userfont; /* != 0 if fontdata kmalloc()ed */ - u_short scrollmode; /* Scroll Method */ +#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION + u_short scrollmode; /* Scroll Method, use fb_scrollmode() */ +#endif u_short inverse; /* != 0 text black on white as default */ short yscroll; /* Hardware scrolling */ int vrows; /* number of virtual rows */ @@ -208,6 +210,17 @@ static inline int attr_col_ec(int shift, struct vc_data *vc, #define SCROLL_REDRAW 0x004 #define SCROLL_PAN_REDRAW 0x005 +static inline u_short fb_scrollmode(struct fbcon_display *fb) +{ +#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION + return fb->scrollmode; +#else + /* hardcoded to SCROLL_REDRAW if acceleration was disabled. */ + return SCROLL_REDRAW; +#endif +} + + #ifdef CONFIG_FB_TILEBLITTING extern void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info); #endif diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c index bbd869efd03bc94ae9387d4355eee28a280fb9c2..f75b24c32d497e27fdb6c56b512b68e92349b9a2 100644 --- a/drivers/video/fbdev/core/fbcon_ccw.c +++ b/drivers/video/fbdev/core/fbcon_ccw.c @@ -65,7 +65,7 @@ static void ccw_bmove(struct vc_data *vc, struct fb_info *info, int sy, { struct fbcon_ops *ops = info->fbcon_par; struct fb_copyarea area; - u32 vyres = GETVYRES(ops->p->scrollmode, info); + u32 vyres = GETVYRES(ops->p, info); area.sx = sy * vc->vc_font.height; area.sy = vyres - ((sx + width) * vc->vc_font.width); @@ -83,7 +83,7 @@ static void ccw_clear(struct vc_data *vc, struct fb_info *info, int sy, struct fbcon_ops *ops = info->fbcon_par; struct fb_fillrect region; int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; - u32 vyres = GETVYRES(ops->p->scrollmode, info); + u32 vyres = GETVYRES(ops->p, info); region.color = attr_bgcol_ec(bgshift,vc,info); region.dx = sy * vc->vc_font.height; @@ -140,7 +140,7 @@ static void ccw_putcs(struct vc_data *vc, struct fb_info *info, u32 cnt, pitch, size; u32 attribute = get_attribute(info, scr_readw(s)); u8 *dst, *buf = NULL; - u32 vyres = GETVYRES(ops->p->scrollmode, info); + u32 vyres = GETVYRES(ops->p, info); if (!ops->fontbuffer) return; @@ -229,7 +229,7 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode, int attribute, use_sw = vc->vc_cursor_type & CUR_SW; int err = 1, dx, dy; char *src; - u32 vyres = GETVYRES(ops->p->scrollmode, info); + u32 vyres = GETVYRES(ops->p, info); if (!ops->fontbuffer) return; @@ -387,7 +387,7 @@ static int ccw_update_start(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; u32 yoffset; - u32 vyres = GETVYRES(ops->p->scrollmode, info); + u32 vyres = GETVYRES(ops->p, info); int err; yoffset = (vyres - info->var.yres) - ops->var.xoffset; diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c index a34cbe8e98744b12ffee9e45fa834f2d7aab8a4f..cf03dc62f35d37869fd6aac1bb679285527a9f97 100644 --- a/drivers/video/fbdev/core/fbcon_cw.c +++ b/drivers/video/fbdev/core/fbcon_cw.c @@ -50,7 +50,7 @@ static void cw_bmove(struct vc_data *vc, struct fb_info *info, int sy, { struct fbcon_ops *ops = info->fbcon_par; struct fb_copyarea area; - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vxres = GETVXRES(ops->p, info); area.sx = vxres - ((sy + height) * vc->vc_font.height); area.sy = sx * vc->vc_font.width; @@ -68,7 +68,7 @@ static void cw_clear(struct vc_data *vc, struct fb_info *info, int sy, struct fbcon_ops *ops = info->fbcon_par; struct fb_fillrect region; int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vxres = GETVXRES(ops->p, info); region.color = attr_bgcol_ec(bgshift,vc,info); region.dx = vxres - ((sy + height) * vc->vc_font.height); @@ -125,7 +125,7 @@ static void cw_putcs(struct vc_data *vc, struct fb_info *info, u32 cnt, pitch, size; u32 attribute = get_attribute(info, scr_readw(s)); u8 *dst, *buf = NULL; - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vxres = GETVXRES(ops->p, info); if (!ops->fontbuffer) return; @@ -212,7 +212,7 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode, int attribute, use_sw = vc->vc_cursor_type & CUR_SW; int err = 1, dx, dy; char *src; - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vxres = GETVXRES(ops->p, info); if (!ops->fontbuffer) return; @@ -369,7 +369,7 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode, static int cw_update_start(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vxres = GETVXRES(ops->p, info); u32 xoffset; int err; diff --git a/drivers/video/fbdev/core/fbcon_rotate.h b/drivers/video/fbdev/core/fbcon_rotate.h index e233444cda664783d055b372929cafd2f4ac2e89..01cbe303b8a29592d3236add1f8b46db85d2b74f 100644 --- a/drivers/video/fbdev/core/fbcon_rotate.h +++ b/drivers/video/fbdev/core/fbcon_rotate.h @@ -12,11 +12,11 @@ #define _FBCON_ROTATE_H #define GETVYRES(s,i) ({ \ - (s == SCROLL_REDRAW || s == SCROLL_MOVE) ? \ + (fb_scrollmode(s) == SCROLL_REDRAW || fb_scrollmode(s) == SCROLL_MOVE) ? \ (i)->var.yres : (i)->var.yres_virtual; }) #define GETVXRES(s,i) ({ \ - (s == SCROLL_REDRAW || s == SCROLL_MOVE || !(i)->fix.xpanstep) ? \ + (fb_scrollmode(s) == SCROLL_REDRAW || fb_scrollmode(s) == SCROLL_MOVE || !(i)->fix.xpanstep) ? \ (i)->var.xres : (i)->var.xres_virtual; }) diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c index 199cbc7abe35315d5adb9c79aaa29ca20eedcd32..c5d2da731d68629b2a2fc4ce15fcdc5d654ced21 100644 --- a/drivers/video/fbdev/core/fbcon_ud.c +++ b/drivers/video/fbdev/core/fbcon_ud.c @@ -50,8 +50,8 @@ static void ud_bmove(struct vc_data *vc, struct fb_info *info, int sy, { struct fbcon_ops *ops = info->fbcon_par; struct fb_copyarea area; - u32 vyres = GETVYRES(ops->p->scrollmode, info); - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vyres = GETVYRES(ops->p, info); + u32 vxres = GETVXRES(ops->p, info); area.sy = vyres - ((sy + height) * vc->vc_font.height); area.sx = vxres - ((sx + width) * vc->vc_font.width); @@ -69,8 +69,8 @@ static void ud_clear(struct vc_data *vc, struct fb_info *info, int sy, struct fbcon_ops *ops = info->fbcon_par; struct fb_fillrect region; int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; - u32 vyres = GETVYRES(ops->p->scrollmode, info); - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vyres = GETVYRES(ops->p, info); + u32 vxres = GETVXRES(ops->p, info); region.color = attr_bgcol_ec(bgshift,vc,info); region.dy = vyres - ((sy + height) * vc->vc_font.height); @@ -162,8 +162,8 @@ static void ud_putcs(struct vc_data *vc, struct fb_info *info, u32 mod = vc->vc_font.width % 8, cnt, pitch, size; u32 attribute = get_attribute(info, scr_readw(s)); u8 *dst, *buf = NULL; - u32 vyres = GETVYRES(ops->p->scrollmode, info); - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vyres = GETVYRES(ops->p, info); + u32 vxres = GETVXRES(ops->p, info); if (!ops->fontbuffer) return; @@ -259,8 +259,8 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode, int attribute, use_sw = vc->vc_cursor_type & CUR_SW; int err = 1, dx, dy; char *src; - u32 vyres = GETVYRES(ops->p->scrollmode, info); - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vyres = GETVYRES(ops->p, info); + u32 vxres = GETVXRES(ops->p, info); if (!ops->fontbuffer) return; @@ -410,8 +410,8 @@ static int ud_update_start(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; int xoffset, yoffset; - u32 vyres = GETVYRES(ops->p->scrollmode, info); - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vyres = GETVYRES(ops->p, info); + u32 vxres = GETVXRES(ops->p, info); int err; xoffset = vxres - info->var.xres - ops->var.xoffset; diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c index 4dc9077dd2ac04d07401e210b13fcb052b0f6ab1..3c309ab20887473d3f7f1b97fc1fbf62d9349856 100644 --- a/drivers/video/fbdev/hyperv_fb.c +++ b/drivers/video/fbdev/hyperv_fb.c @@ -286,8 +286,6 @@ struct hvfb_par { static uint screen_width = HVFB_WIDTH; static uint screen_height = HVFB_HEIGHT; -static uint screen_width_max = HVFB_WIDTH; -static uint screen_height_max = HVFB_HEIGHT; static uint screen_depth; static uint screen_fb_size; static uint dio_fb_size; /* FB size for deferred IO */ @@ -581,7 +579,6 @@ static int synthvid_get_supported_resolution(struct hv_device *hdev) int ret = 0; unsigned long t; u8 index; - int i; memset(msg, 0, sizeof(struct synthvid_msg)); msg->vid_hdr.type = SYNTHVID_RESOLUTION_REQUEST; @@ -612,13 +609,6 @@ static int synthvid_get_supported_resolution(struct hv_device *hdev) goto out; } - for (i = 0; i < msg->resolution_resp.resolution_count; i++) { - screen_width_max = max_t(unsigned int, screen_width_max, - msg->resolution_resp.supported_resolution[i].width); - screen_height_max = max_t(unsigned int, screen_height_max, - msg->resolution_resp.supported_resolution[i].height); - } - screen_width = msg->resolution_resp.supported_resolution[index].width; screen_height = @@ -940,7 +930,7 @@ static void hvfb_get_option(struct fb_info *info) if (x < HVFB_WIDTH_MIN || y < HVFB_HEIGHT_MIN || (synthvid_ver_ge(par->synthvid_version, SYNTHVID_VERSION_WIN10) && - (x > screen_width_max || y > screen_height_max)) || + (x * y * screen_depth / 8 > screen_fb_size)) || (par->synthvid_version == SYNTHVID_VERSION_WIN8 && x * y * screen_depth / 8 > SYNTHVID_FB_SIZE_WIN8) || (par->synthvid_version == SYNTHVID_VERSION_WIN7 && @@ -1193,8 +1183,8 @@ static int hvfb_probe(struct hv_device *hdev, } hvfb_get_option(info); - pr_info("Screen resolution: %dx%d, Color depth: %d\n", - screen_width, screen_height, screen_depth); + pr_info("Screen resolution: %dx%d, Color depth: %d, Frame buffer size: %d\n", + screen_width, screen_height, screen_depth, screen_fb_size); ret = hvfb_getmem(hdev, info); if (ret) { diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c index 1e8a38a7967d890a9e44fb245e2c6ce11f7b0f5c..5c6e9dc88060ba30187bb5727a1c519c32d8d09e 100644 --- a/drivers/video/fbdev/vga16fb.c +++ b/drivers/video/fbdev/vga16fb.c @@ -184,6 +184,25 @@ static inline void setindex(int index) vga_io_w(VGA_GFX_I, index); } +/* Check if the video mode is supported by the driver */ +static inline int check_mode_supported(void) +{ + /* non-x86 architectures treat orig_video_isVGA as a boolean flag */ +#if defined(CONFIG_X86) + /* only EGA and VGA in 16 color graphic mode are supported */ + if (screen_info.orig_video_isVGA != VIDEO_TYPE_EGAC && + screen_info.orig_video_isVGA != VIDEO_TYPE_VGAC) + return -ENODEV; + + if (screen_info.orig_video_mode != 0x0D && /* 320x200/4 (EGA) */ + screen_info.orig_video_mode != 0x0E && /* 640x200/4 (EGA) */ + screen_info.orig_video_mode != 0x10 && /* 640x350/4 (EGA) */ + screen_info.orig_video_mode != 0x12) /* 640x480/4 (VGA) */ + return -ENODEV; +#endif + return 0; +} + static void vga16fb_pan_var(struct fb_info *info, struct fb_var_screeninfo *var) { @@ -1422,6 +1441,11 @@ static int __init vga16fb_init(void) vga16fb_setup(option); #endif + + ret = check_mode_supported(); + if (ret) + return ret; + ret = platform_driver_register(&vga16fb_driver); if (!ret) { diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index cce75d3b3ba05da8a9dba8c1208a2eaf86dce261..3cc2a4ee7152c6fb280f21b848707743581e5042 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -1124,8 +1124,10 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq, if (virtqueue_use_indirect(_vq, total_sg)) { err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs, in_sgs, data, gfp); - if (err != -ENOMEM) + if (err != -ENOMEM) { + END_USE(vq); return err; + } /* fall back on direct */ } diff --git a/drivers/w1/slaves/w1_ds28e04.c b/drivers/w1/slaves/w1_ds28e04.c index e4f336111edc6feb51cf0c602b19585bed1b87a5..6cef6e2edb892d710f58b5ee8c606b28554dde7f 100644 --- a/drivers/w1/slaves/w1_ds28e04.c +++ b/drivers/w1/slaves/w1_ds28e04.c @@ -32,7 +32,7 @@ static int w1_strong_pullup = 1; module_param_named(strong_pullup, w1_strong_pullup, int, 0); /* enable/disable CRC checking on DS28E04-100 memory accesses */ -static char w1_enable_crccheck = 1; +static bool w1_enable_crccheck = true; #define W1_EEPROM_SIZE 512 #define W1_PAGE_COUNT 16 @@ -339,32 +339,18 @@ static BIN_ATTR_RW(pio, 1); static ssize_t crccheck_show(struct device *dev, struct device_attribute *attr, char *buf) { - if (put_user(w1_enable_crccheck + 0x30, buf)) - return -EFAULT; - - return sizeof(w1_enable_crccheck); + return sysfs_emit(buf, "%d\n", w1_enable_crccheck); } static ssize_t crccheck_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - char val; - - if (count != 1 || !buf) - return -EINVAL; + int err = kstrtobool(buf, &w1_enable_crccheck); - if (get_user(val, buf)) - return -EFAULT; + if (err) + return err; - /* convert to decimal */ - val = val - 0x30; - if (val != 0 && val != 1) - return -EINVAL; - - /* set the new value */ - w1_enable_crccheck = val; - - return sizeof(w1_enable_crccheck); + return count; } static DEVICE_ATTR_RW(crccheck); diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index b9651f797676c47b036c6f7c173ee9130383bcc4..54778aadf618da4a5612404bd4f45851b47aa2df 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -240,13 +240,13 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map) if (!refcount_dec_and_test(&map->users)) return; + if (map->pages && !use_ptemod) + unmap_grant_pages(map, 0, map->count); + if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) { notify_remote_via_evtchn(map->notify.event); evtchn_put(map->notify.event); } - - if (map->pages && !use_ptemod) - unmap_grant_pages(map, 0, map->count); gntdev_free_map(map); } diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c index 72b67d810b8c202ac8323c700361920c96dd193c..a13ef836fe4e1b13b7162b0ce6fc5c630e048c70 100644 --- a/fs/9p/vfs_inode_dotl.c +++ b/fs/9p/vfs_inode_dotl.c @@ -541,7 +541,10 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr) { int retval; struct p9_fid *fid = NULL; - struct p9_iattr_dotl p9attr; + struct p9_iattr_dotl p9attr = { + .uid = INVALID_UID, + .gid = INVALID_GID, + }; struct inode *inode = d_inode(dentry); p9_debug(P9_DEBUG_VFS, "\n"); @@ -551,14 +554,22 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr) return retval; p9attr.valid = v9fs_mapped_iattr_valid(iattr->ia_valid); - p9attr.mode = iattr->ia_mode; - p9attr.uid = iattr->ia_uid; - p9attr.gid = iattr->ia_gid; - p9attr.size = iattr->ia_size; - p9attr.atime_sec = iattr->ia_atime.tv_sec; - p9attr.atime_nsec = iattr->ia_atime.tv_nsec; - p9attr.mtime_sec = iattr->ia_mtime.tv_sec; - p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec; + if (iattr->ia_valid & ATTR_MODE) + p9attr.mode = iattr->ia_mode; + if (iattr->ia_valid & ATTR_UID) + p9attr.uid = iattr->ia_uid; + if (iattr->ia_valid & ATTR_GID) + p9attr.gid = iattr->ia_gid; + if (iattr->ia_valid & ATTR_SIZE) + p9attr.size = iattr->ia_size; + if (iattr->ia_valid & ATTR_ATIME_SET) { + p9attr.atime_sec = iattr->ia_atime.tv_sec; + p9attr.atime_nsec = iattr->ia_atime.tv_nsec; + } + if (iattr->ia_valid & ATTR_MTIME_SET) { + p9attr.mtime_sec = iattr->ia_mtime.tv_sec; + p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec; + } if (iattr->ia_valid & ATTR_FILE) { fid = iattr->ia_file->private_data; diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 6e447bdaf9ec8b381465dfbaeb73a240b6af4416..baff31a147e7dfa802c6a8823dce91837443daf5 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -1213,7 +1213,12 @@ again: ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0); if (ret < 0) goto out; - BUG_ON(ret == 0); + if (ret == 0) { + /* This shouldn't happen, indicates a bug or fs corruption. */ + ASSERT(ret != 0); + ret = -EUCLEAN; + goto out; + } #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS if (trans && likely(trans->type != __TRANS_DUMMY) && @@ -1361,10 +1366,18 @@ again: goto out; if (!ret && extent_item_pos) { /* - * we've recorded that parent, so we must extend - * its inode list here + * We've recorded that parent, so we must extend + * its inode list here. + * + * However if there was corruption we may not + * have found an eie, return an error in this + * case. */ - BUG_ON(!eie); + ASSERT(eie); + if (!eie) { + ret = -EUCLEAN; + goto out; + } while (eie->next) eie = eie->next; eie->next = ref->inode_list; diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 519cf145f9bd176596bef40a03a17a6b3e81b07e..5addd1e36a8ee1699b5f8be4a4b48c36aeabe282 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -2589,12 +2589,9 @@ static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root, { struct btrfs_fs_info *fs_info = root->fs_info; struct extent_buffer *b; - int root_lock; + int root_lock = 0; int level = 0; - /* We try very hard to do read locks on the root */ - root_lock = BTRFS_READ_LOCK; - if (p->search_commit_root) { /* * The commit roots are read only so we always do read locks, @@ -2632,6 +2629,9 @@ static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root, goto out; } + /* We try very hard to do read locks on the root */ + root_lock = BTRFS_READ_LOCK; + /* * If the level is set to maximum, we can skip trying to get the read * lock. @@ -2658,6 +2658,17 @@ static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root, level = btrfs_header_level(b); out: + /* + * The root may have failed to write out at some point, and thus is no + * longer valid, return an error in this case. + */ + if (!extent_buffer_uptodate(b)) { + if (root_lock) + btrfs_tree_unlock_rw(b, root_lock); + free_extent_buffer(b); + return ERR_PTR(-EIO); + } + p->nodes[level] = b; if (!p->skip_locking) p->locks[level] = root_lock; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index ff3f0638cdb907af56c04cc32a710ff01da00cd2..1d9262a35473ca7f9d696bc69d9fc0dcec43f01d 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -10094,9 +10094,19 @@ static int btrfs_add_swap_extent(struct swap_info_struct *sis, struct btrfs_swap_info *bsi) { unsigned long nr_pages; + unsigned long max_pages; u64 first_ppage, first_ppage_reported, next_ppage; int ret; + /* + * Our swapfile may have had its size extended after the swap header was + * written. In that case activating the swapfile should not go beyond + * the max size set in the swap header. + */ + if (bsi->nr_pages >= sis->max) + return 0; + + max_pages = sis->max - bsi->nr_pages; first_ppage = ALIGN(bsi->block_start, PAGE_SIZE) >> PAGE_SHIFT; next_ppage = ALIGN_DOWN(bsi->block_start + bsi->block_len, PAGE_SIZE) >> PAGE_SHIFT; @@ -10104,6 +10114,7 @@ static int btrfs_add_swap_extent(struct swap_info_struct *sis, if (first_ppage >= next_ppage) return 0; nr_pages = next_ppage - first_ppage; + nr_pages = min(nr_pages, max_pages); first_ppage_reported = first_ppage; if (bsi->start == 0) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 040db0dfba264315572a4abe3933e4213e4137e8..b5e9bfe884c4b7eec01517fdb708eb2fa13636b7 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -3103,10 +3103,8 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file, inode_lock(inode); err = btrfs_delete_subvolume(dir, dentry); inode_unlock(inode); - if (!err) { - fsnotify_rmdir(dir, dentry); - d_delete(dentry); - } + if (!err) + d_delete_notify(dir, dentry); out_dput: dput(dentry); diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 4bac32a274ceb0b6d4602574e512170be06b1f94..e39a12037b40362dec3ffb9e853cf72b7c6c5521 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -941,6 +941,14 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info) int ret = 0; int slot; + /* + * We need to have subvol_sem write locked, to prevent races between + * concurrent tasks trying to enable quotas, because we will unlock + * and relock qgroup_ioctl_lock before setting fs_info->quota_root + * and before setting BTRFS_FS_QUOTA_ENABLED. + */ + lockdep_assert_held_write(&fs_info->subvol_sem); + mutex_lock(&fs_info->qgroup_ioctl_lock); if (fs_info->quota_root) goto out; @@ -1118,8 +1126,19 @@ out_add_root: goto out_free_path; } + mutex_unlock(&fs_info->qgroup_ioctl_lock); + /* + * Commit the transaction while not holding qgroup_ioctl_lock, to avoid + * a deadlock with tasks concurrently doing other qgroup operations, such + * adding/removing qgroups or adding/deleting qgroup relations for example, + * because all qgroup operations first start or join a transaction and then + * lock the qgroup_ioctl_lock mutex. + * We are safe from a concurrent task trying to enable quotas, by calling + * this function, since we are serialized by fs_info->subvol_sem. + */ ret = btrfs_commit_transaction(trans); trans = NULL; + mutex_lock(&fs_info->qgroup_ioctl_lock); if (ret) goto out_free_path; @@ -1167,9 +1186,24 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info) struct btrfs_trans_handle *trans = NULL; int ret = 0; + /* + * We need to have subvol_sem write locked, to prevent races between + * concurrent tasks trying to disable quotas, because we will unlock + * and relock qgroup_ioctl_lock across BTRFS_FS_QUOTA_ENABLED changes. + */ + lockdep_assert_held_write(&fs_info->subvol_sem); + mutex_lock(&fs_info->qgroup_ioctl_lock); if (!fs_info->quota_root) goto out; + + /* + * Request qgroup rescan worker to complete and wait for it. This wait + * must be done before transaction start for quota disable since it may + * deadlock with transaction by the qgroup rescan worker. + */ + clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); + btrfs_qgroup_wait_for_completion(fs_info, false); mutex_unlock(&fs_info->qgroup_ioctl_lock); /* @@ -1187,14 +1221,13 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info) if (IS_ERR(trans)) { ret = PTR_ERR(trans); trans = NULL; + set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); goto out; } if (!fs_info->quota_root) goto out; - clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); - btrfs_qgroup_wait_for_completion(fs_info, false); spin_lock(&fs_info->qgroup_lock); quota_root = fs_info->quota_root; fs_info->quota_root = NULL; @@ -3371,6 +3404,9 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, btrfs_warn(fs_info, "qgroup rescan init failed, qgroup is not enabled"); ret = -EINVAL; + } else if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) { + /* Quota disable is in progress */ + ret = -EBUSY; } if (ret) { diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 10f020ab1186f1fd049ed9cf80060bf33801ca1e..6b80dee17f49d99219bdd86f5fc130b1bb098a42 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -5006,6 +5006,10 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len) lock_page(page); if (!PageUptodate(page)) { unlock_page(page); + btrfs_err(fs_info, + "send: IO error at offset %llu for inode %llu root %llu", + page_offset(page), sctx->cur_ino, + sctx->send_root->root_key.objectid); put_page(page); ret = -EIO; break; diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index d4a3a56726aa8cba77739a45e8f591d420370838..32f1b15b25dcc34b027b3f4883e36b12e4793e4c 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -947,6 +947,7 @@ static int check_dev_item(struct extent_buffer *leaf, struct btrfs_key *key, int slot) { struct btrfs_dev_item *ditem; + const u32 item_size = btrfs_item_size_nr(leaf, slot); if (key->objectid != BTRFS_DEV_ITEMS_OBJECTID) { dev_item_err(leaf, slot, @@ -954,6 +955,13 @@ static int check_dev_item(struct extent_buffer *leaf, key->objectid, BTRFS_DEV_ITEMS_OBJECTID); return -EUCLEAN; } + + if (unlikely(item_size != sizeof(*ditem))) { + dev_item_err(leaf, slot, "invalid item size: has %u expect %zu", + item_size, sizeof(*ditem)); + return -EUCLEAN; + } + ditem = btrfs_item_ptr(leaf, slot, struct btrfs_dev_item); if (btrfs_device_id(leaf, ditem) != key->offset) { dev_item_err(leaf, slot, @@ -989,6 +997,7 @@ static int check_inode_item(struct extent_buffer *leaf, struct btrfs_inode_item *iitem; u64 super_gen = btrfs_super_generation(fs_info->super_copy); u32 valid_mask = (S_IFMT | S_ISUID | S_ISGID | S_ISVTX | 0777); + const u32 item_size = btrfs_item_size_nr(leaf, slot); u32 mode; int ret; @@ -996,6 +1005,12 @@ static int check_inode_item(struct extent_buffer *leaf, if (ret < 0) return ret; + if (unlikely(item_size != sizeof(*iitem))) { + generic_err(leaf, slot, "invalid item size: has %u expect %zu", + item_size, sizeof(*iitem)); + return -EUCLEAN; + } + iitem = btrfs_item_ptr(leaf, slot, struct btrfs_inode_item); /* Here we use super block generation + 1 to handle log tree */ diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 8ed881fd7440d13292cbd17dee2fa688b68e2d3e..450050801f3b6ef3a1fd094aaeddd0d5d11d8e06 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -577,6 +577,7 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry, struct ceph_inode_info *ci = ceph_inode(dir); struct inode *inode; struct timespec64 now; + struct ceph_string *pool_ns; struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb); struct ceph_vino vino = { .ino = req->r_deleg_ino, .snap = CEPH_NOSNAP }; @@ -626,6 +627,12 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry, in.max_size = cpu_to_le64(lo->stripe_unit); ceph_file_layout_to_legacy(lo, &in.layout); + /* lo is private, so pool_ns can't change */ + pool_ns = rcu_dereference_raw(lo->pool_ns); + if (pool_ns) { + iinfo.pool_ns_len = pool_ns->len; + iinfo.pool_ns_data = pool_ns->str; + } down_read(&mdsc->snap_rwsem); ret = ceph_fill_inode(inode, NULL, &iinfo, NULL, req->r_session, @@ -743,8 +750,10 @@ retry: restore_deleg_ino(dir, req->r_deleg_ino); ceph_mdsc_put_request(req); try_async = false; + ceph_put_string(rcu_dereference_raw(lo.pool_ns)); goto retry; } + ceph_put_string(rcu_dereference_raw(lo.pool_ns)); goto out_req; } } diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index b0983e2a4e2c72d214d46abf66cc2ba0e4f1d4b8..5ad27e484014fdf2cce3df1895089ae4955fe269 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -36,6 +36,14 @@ */ DEFINE_SPINLOCK(configfs_dirent_lock); +/* + * All of link_obj/unlink_obj/link_group/unlink_group require that + * subsys->su_mutex is held. + * But parent configfs_subsystem is NULL when config_item is root. + * Use this mutex when config_item is root. + */ +static DEFINE_MUTEX(configfs_subsystem_mutex); + static void configfs_d_iput(struct dentry * dentry, struct inode * inode) { @@ -1805,8 +1813,8 @@ void configfs_unregister_group(struct config_group *group) configfs_detach_group(&group->cg_item); d_inode(dentry)->i_flags |= S_DEAD; dont_mount(dentry); + d_drop(dentry); fsnotify_rmdir(d_inode(parent), dentry); - d_delete(dentry); inode_unlock(d_inode(parent)); dput(dentry); @@ -1884,7 +1892,9 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys) group->cg_item.ci_name = group->cg_item.ci_namebuf; sd = root->d_fsdata; + mutex_lock(&configfs_subsystem_mutex); link_group(to_config_group(sd->s_element), group); + mutex_unlock(&configfs_subsystem_mutex); inode_lock_nested(d_inode(root), I_MUTEX_PARENT); @@ -1909,7 +1919,9 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys) inode_unlock(d_inode(root)); if (err) { + mutex_lock(&configfs_subsystem_mutex); unlink_group(group); + mutex_unlock(&configfs_subsystem_mutex); configfs_release_fs(); } put_fragment(frag); @@ -1947,16 +1959,18 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys) configfs_detach_group(&group->cg_item); d_inode(dentry)->i_flags |= S_DEAD; dont_mount(dentry); - fsnotify_rmdir(d_inode(root), dentry); inode_unlock(d_inode(dentry)); - d_delete(dentry); + d_drop(dentry); + fsnotify_rmdir(d_inode(root), dentry); inode_unlock(d_inode(root)); dput(dentry); + mutex_lock(&configfs_subsystem_mutex); unlink_group(group); + mutex_unlock(&configfs_subsystem_mutex); configfs_release_fs(); } diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c index 3aa5eb9ce498e46b89ef40e7cc900378de3bb29a..96059af28f5084827dceda7aaac2bb8094fe36cc 100644 --- a/fs/debugfs/file.c +++ b/fs/debugfs/file.c @@ -147,7 +147,7 @@ static int debugfs_locked_down(struct inode *inode, struct file *filp, const struct file_operations *real_fops) { - if ((inode->i_mode & 07777) == 0444 && + if ((inode->i_mode & 07777 & ~0444) == 0 && !(filp->f_mode & FMODE_WRITE) && !real_fops->unlocked_ioctl && !real_fops->compat_ioctl && diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index 42e5a766d33c7d1fea3ca9c05be73639c38c421e..4f25015aa5342a332c76ad7d931a507784eea764 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -621,8 +621,8 @@ void devpts_pty_kill(struct dentry *dentry) dentry->d_fsdata = NULL; drop_nlink(dentry->d_inode); - fsnotify_unlink(d_inode(dentry->d_parent), dentry); d_drop(dentry); + fsnotify_unlink(d_inode(dentry->d_parent), dentry); dput(dentry); /* d_alloc_name() in devpts_pty_new() */ } diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 002123efc6b052346ee8c7093789fbbb08929bd1..1e9d8999b9390a32c4dcbf217a5b1be93d183885 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -3975,6 +3975,14 @@ static int validate_message(struct dlm_lkb *lkb, struct dlm_message *ms) int from = ms->m_header.h_nodeid; int error = 0; + /* currently mixing of user/kernel locks are not supported */ + if (ms->m_flags & DLM_IFL_USER && ~lkb->lkb_flags & DLM_IFL_USER) { + log_error(lkb->lkb_resource->res_ls, + "got user dlm message for a kernel lock"); + error = -EINVAL; + goto out; + } + switch (ms->m_type) { case DLM_MSG_CONVERT: case DLM_MSG_UNLOCK: @@ -4003,6 +4011,7 @@ static int validate_message(struct dlm_lkb *lkb, struct dlm_message *ms) error = -EINVAL; } +out: if (error) log_error(lkb->lkb_resource->res_ls, "ignore invalid message %d from %d %x %x %x %d", diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 0c78fdfb1f6faaec81253a6535055cef23a0acc8..68b765369c928fcda16b0a92f61910a96d9b6dd1 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -471,8 +471,8 @@ int dlm_lowcomms_connect_node(int nodeid) static void lowcomms_error_report(struct sock *sk) { struct connection *con; - struct sockaddr_storage saddr; void (*orig_report)(struct sock *) = NULL; + struct inet_sock *inet; read_lock_bh(&sk->sk_callback_lock); con = sock2con(sk); @@ -480,34 +480,33 @@ static void lowcomms_error_report(struct sock *sk) goto out; orig_report = listen_sock.sk_error_report; - if (con->sock == NULL || - kernel_getpeername(con->sock, (struct sockaddr *)&saddr) < 0) { - printk_ratelimited(KERN_ERR "dlm: node %d: socket error " - "sending to node %d, port %d, " - "sk_err=%d/%d\n", dlm_our_nodeid(), - con->nodeid, dlm_config.ci_tcp_port, - sk->sk_err, sk->sk_err_soft); - } else if (saddr.ss_family == AF_INET) { - struct sockaddr_in *sin4 = (struct sockaddr_in *)&saddr; + inet = inet_sk(sk); + switch (sk->sk_family) { + case AF_INET: printk_ratelimited(KERN_ERR "dlm: node %d: socket error " - "sending to node %d at %pI4, port %d, " + "sending to node %d at %pI4, dport %d, " "sk_err=%d/%d\n", dlm_our_nodeid(), - con->nodeid, &sin4->sin_addr.s_addr, - dlm_config.ci_tcp_port, sk->sk_err, + con->nodeid, &inet->inet_daddr, + ntohs(inet->inet_dport), sk->sk_err, sk->sk_err_soft); - } else { - struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&saddr; - + break; +#if IS_ENABLED(CONFIG_IPV6) + case AF_INET6: printk_ratelimited(KERN_ERR "dlm: node %d: socket error " - "sending to node %d at %u.%u.%u.%u, " - "port %d, sk_err=%d/%d\n", dlm_our_nodeid(), - con->nodeid, sin6->sin6_addr.s6_addr32[0], - sin6->sin6_addr.s6_addr32[1], - sin6->sin6_addr.s6_addr32[2], - sin6->sin6_addr.s6_addr32[3], - dlm_config.ci_tcp_port, sk->sk_err, + "sending to node %d at %pI6c, " + "dport %d, sk_err=%d/%d\n", dlm_our_nodeid(), + con->nodeid, &sk->sk_v6_daddr, + ntohs(inet->inet_dport), sk->sk_err, sk->sk_err_soft); + break; +#endif + default: + printk_ratelimited(KERN_ERR "dlm: node %d: socket error " + "invalid socket family %d set, " + "sk_err=%d/%d\n", dlm_our_nodeid(), + sk->sk_family, sk->sk_err, sk->sk_err_soft); + goto out; } out: read_unlock_bh(&sk->sk_callback_lock); diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 115a77b96e5e144780fe31f584a7b4fac5aca23c..455eb349c76f85d3a4a93371b98b6431508ea9ad 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -2778,6 +2778,10 @@ bool ext4_fc_replay_check_excluded(struct super_block *sb, ext4_fsblk_t block); void ext4_fc_replay_cleanup(struct super_block *sb); int ext4_fc_commit(journal_t *journal, tid_t commit_tid); int __init ext4_fc_init_dentry_cache(void); +void ext4_fc_destroy_dentry_cache(void); +int ext4_fc_record_regions(struct super_block *sb, int ino, + ext4_lblk_t lblk, ext4_fsblk_t pblk, + int len, int replay); /* mballoc.c */ extern const struct seq_operations ext4_mb_seq_groups_ops; diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c index 0fd0c42a4f7db23689f98bbc21bef44ca2d1896a..6ff7b4020df8ad18899f88495fb112ae90db8428 100644 --- a/fs/ext4/ext4_jbd2.c +++ b/fs/ext4/ext4_jbd2.c @@ -162,6 +162,8 @@ int __ext4_journal_ensure_credits(handle_t *handle, int check_cred, { if (!ext4_handle_valid(handle)) return 0; + if (is_handle_aborted(handle)) + return -EROFS; if (jbd2_handle_buffer_credits(handle) >= check_cred && handle->h_revoke_credits >= revoke_cred) return 0; diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index b8c9df6ab67f5fd2a7c7a6edb00f31f5512d2e11..0fda3051760d1fe6ae976ffc2b1cb5d62fae74ed 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -4638,8 +4638,6 @@ static long ext4_zero_range(struct file *file, loff_t offset, ret = ext4_mark_inode_dirty(handle, inode); if (unlikely(ret)) goto out_handle; - ext4_fc_track_range(handle, inode, offset >> inode->i_sb->s_blocksize_bits, - (offset + len - 1) >> inode->i_sb->s_blocksize_bits); /* Zero out partial block at the edges of the range */ ret = ext4_zero_partial_blocks(handle, inode, offset, len); if (ret >= 0) @@ -6090,11 +6088,15 @@ int ext4_ext_clear_bb(struct inode *inode) ext4_mb_mark_bb(inode->i_sb, path[j].p_block, 1, 0); + ext4_fc_record_regions(inode->i_sb, inode->i_ino, + 0, path[j].p_block, 1, 1); } ext4_ext_drop_refs(path); kfree(path); } ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, 0); + ext4_fc_record_regions(inode->i_sb, inode->i_ino, + map.m_lblk, map.m_pblk, map.m_len, 1); } cur = cur + map.m_len; } diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c index 08ca690f928bd3857ed8fb7e617dbe014c6ea8ac..501e60713010e57a0f84ad3b4335b9a43d23d497 100644 --- a/fs/ext4/fast_commit.c +++ b/fs/ext4/fast_commit.c @@ -1388,14 +1388,15 @@ static int ext4_fc_record_modified_inode(struct super_block *sb, int ino) if (state->fc_modified_inodes[i] == ino) return 0; if (state->fc_modified_inodes_used == state->fc_modified_inodes_size) { - state->fc_modified_inodes_size += - EXT4_FC_REPLAY_REALLOC_INCREMENT; state->fc_modified_inodes = krealloc( - state->fc_modified_inodes, sizeof(int) * - state->fc_modified_inodes_size, - GFP_KERNEL); + state->fc_modified_inodes, + sizeof(int) * (state->fc_modified_inodes_size + + EXT4_FC_REPLAY_REALLOC_INCREMENT), + GFP_KERNEL); if (!state->fc_modified_inodes) return -ENOMEM; + state->fc_modified_inodes_size += + EXT4_FC_REPLAY_REALLOC_INCREMENT; } state->fc_modified_inodes[state->fc_modified_inodes_used++] = ino; return 0; @@ -1427,7 +1428,9 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl, } inode = NULL; - ext4_fc_record_modified_inode(sb, ino); + ret = ext4_fc_record_modified_inode(sb, ino); + if (ret) + goto out; raw_fc_inode = (struct ext4_inode *) (val + offsetof(struct ext4_fc_inode, fc_raw_inode)); @@ -1558,16 +1561,23 @@ out: } /* - * Record physical disk regions which are in use as per fast commit area. Our - * simple replay phase allocator excludes these regions from allocation. + * Record physical disk regions which are in use as per fast commit area, + * and used by inodes during replay phase. Our simple replay phase + * allocator excludes these regions from allocation. */ -static int ext4_fc_record_regions(struct super_block *sb, int ino, - ext4_lblk_t lblk, ext4_fsblk_t pblk, int len) +int ext4_fc_record_regions(struct super_block *sb, int ino, + ext4_lblk_t lblk, ext4_fsblk_t pblk, int len, int replay) { struct ext4_fc_replay_state *state; struct ext4_fc_alloc_region *region; state = &EXT4_SB(sb)->s_fc_replay_state; + /* + * during replay phase, the fc_regions_valid may not same as + * fc_regions_used, update it when do new additions. + */ + if (replay && state->fc_regions_used != state->fc_regions_valid) + state->fc_regions_used = state->fc_regions_valid; if (state->fc_regions_used == state->fc_regions_size) { state->fc_regions_size += EXT4_FC_REPLAY_REALLOC_INCREMENT; @@ -1585,6 +1595,9 @@ static int ext4_fc_record_regions(struct super_block *sb, int ino, region->pblk = pblk; region->len = len; + if (replay) + state->fc_regions_valid++; + return 0; } @@ -1616,6 +1629,8 @@ static int ext4_fc_replay_add_range(struct super_block *sb, } ret = ext4_fc_record_modified_inode(sb, inode->i_ino); + if (ret) + goto out; start = le32_to_cpu(ex->ee_block); start_pblk = ext4_ext_pblock(ex); @@ -1633,18 +1648,14 @@ static int ext4_fc_replay_add_range(struct super_block *sb, map.m_pblk = 0; ret = ext4_map_blocks(NULL, inode, &map, 0); - if (ret < 0) { - iput(inode); - return 0; - } + if (ret < 0) + goto out; if (ret == 0) { /* Range is not mapped */ path = ext4_find_extent(inode, cur, NULL, 0); - if (IS_ERR(path)) { - iput(inode); - return 0; - } + if (IS_ERR(path)) + goto out; memset(&newex, 0, sizeof(newex)); newex.ee_block = cpu_to_le32(cur); ext4_ext_store_pblock( @@ -1658,10 +1669,8 @@ static int ext4_fc_replay_add_range(struct super_block *sb, up_write((&EXT4_I(inode)->i_data_sem)); ext4_ext_drop_refs(path); kfree(path); - if (ret) { - iput(inode); - return 0; - } + if (ret) + goto out; goto next; } @@ -1674,10 +1683,8 @@ static int ext4_fc_replay_add_range(struct super_block *sb, ret = ext4_ext_replay_update_ex(inode, cur, map.m_len, ext4_ext_is_unwritten(ex), start_pblk + cur - start); - if (ret) { - iput(inode); - return 0; - } + if (ret) + goto out; /* * Mark the old blocks as free since they aren't used * anymore. We maintain an array of all the modified @@ -1697,10 +1704,8 @@ static int ext4_fc_replay_add_range(struct super_block *sb, ext4_ext_is_unwritten(ex), map.m_pblk); ret = ext4_ext_replay_update_ex(inode, cur, map.m_len, ext4_ext_is_unwritten(ex), map.m_pblk); - if (ret) { - iput(inode); - return 0; - } + if (ret) + goto out; /* * We may have split the extent tree while toggling the state. * Try to shrink the extent tree now. @@ -1712,6 +1717,7 @@ next: } ext4_ext_replay_shrink_inode(inode, i_size_read(inode) >> sb->s_blocksize_bits); +out: iput(inode); return 0; } @@ -1741,6 +1747,8 @@ ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl, } ret = ext4_fc_record_modified_inode(sb, inode->i_ino); + if (ret) + goto out; jbd_debug(1, "DEL_RANGE, inode %ld, lblk %d, len %d\n", inode->i_ino, le32_to_cpu(lrange.fc_lblk), @@ -1750,10 +1758,8 @@ ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl, map.m_len = remaining; ret = ext4_map_blocks(NULL, inode, &map, 0); - if (ret < 0) { - iput(inode); - return 0; - } + if (ret < 0) + goto out; if (ret > 0) { remaining -= ret; cur += ret; @@ -1764,16 +1770,18 @@ ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl, } } - ret = ext4_punch_hole(inode, - le32_to_cpu(lrange.fc_lblk) << sb->s_blocksize_bits, - le32_to_cpu(lrange.fc_len) << sb->s_blocksize_bits); + down_write(&EXT4_I(inode)->i_data_sem); + ret = ext4_ext_remove_space(inode, le32_to_cpu(lrange.fc_lblk), + le32_to_cpu(lrange.fc_lblk) + + le32_to_cpu(lrange.fc_len) - 1); + up_write(&EXT4_I(inode)->i_data_sem); if (ret) - jbd_debug(1, "ext4_punch_hole returned %d", ret); + goto out; ext4_ext_replay_shrink_inode(inode, i_size_read(inode) >> sb->s_blocksize_bits); ext4_mark_inode_dirty(NULL, inode); +out: iput(inode); - return 0; } @@ -1951,7 +1959,7 @@ static int ext4_fc_replay_scan(journal_t *journal, ret = ext4_fc_record_regions(sb, le32_to_cpu(ext.fc_ino), le32_to_cpu(ex->ee_block), ext4_ext_pblock(ex), - ext4_ext_get_actual_len(ex)); + ext4_ext_get_actual_len(ex), 0); if (ret < 0) break; ret = JBD2_FC_REPLAY_CONTINUE; @@ -2166,3 +2174,8 @@ int __init ext4_fc_init_dentry_cache(void) return 0; } + +void ext4_fc_destroy_dentry_cache(void) +{ + kmem_cache_destroy(ext4_fc_dentry_cachep); +} diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index a96b688a0410fbf97579b37deb4c8af07e8be685..ae1f0c57f54d26dad4109472ab943324100a17a3 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -1120,7 +1120,15 @@ static void ext4_restore_inline_data(handle_t *handle, struct inode *inode, struct ext4_iloc *iloc, void *buf, int inline_size) { - ext4_create_inline_data(handle, inode, inline_size); + int ret; + + ret = ext4_create_inline_data(handle, inode, inline_size); + if (ret) { + ext4_msg(inode->i_sb, KERN_EMERG, + "error restoring inline_data for inode -- potential data loss! (inode %lu, error %d)", + inode->i_ino, ret); + return; + } ext4_write_inline_data(inode, iloc, buf, 0, inline_size); ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA); } diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 317aa1b90fb955321523061ace3d6f295793f962..d59474a541897af1b4a6185c34c3cc686e7d3a8c 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -741,10 +741,11 @@ out_sem: if (ret) return ret; } - ext4_fc_track_range(handle, inode, map->m_lblk, - map->m_lblk + map->m_len - 1); } - + if (retval > 0 && (map->m_flags & EXT4_MAP_UNWRITTEN || + map->m_flags & EXT4_MAP_MAPPED)) + ext4_fc_track_range(handle, inode, map->m_lblk, + map->m_lblk + map->m_len - 1); if (retval < 0) ext_debug(inode, "failed with err %d\n", retval); return retval; @@ -4445,7 +4446,7 @@ has_buffer: static int __ext4_get_inode_loc_noinmem(struct inode *inode, struct ext4_iloc *iloc) { - ext4_fsblk_t err_blk; + ext4_fsblk_t err_blk = 0; int ret; ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, iloc, 0, @@ -4460,7 +4461,7 @@ static int __ext4_get_inode_loc_noinmem(struct inode *inode, int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) { - ext4_fsblk_t err_blk; + ext4_fsblk_t err_blk = 0; int ret; /* We have all inode data except xattrs in memory here. */ @@ -5467,8 +5468,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) ext4_fc_track_range(handle, inode, (attr->ia_size > 0 ? attr->ia_size - 1 : 0) >> inode->i_sb->s_blocksize_bits, - (oldsize > 0 ? oldsize - 1 : 0) >> - inode->i_sb->s_blocksize_bits); + EXT_MAX_BLOCKS - 1); else ext4_fc_track_range( handle, inode, diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index cb54ea6461fd87a8950986924a296752b7cd01ee..413bf3d2f784437945dec5e36ad9649feda031da 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -1123,8 +1123,6 @@ resizefs_out: sizeof(range))) return -EFAULT; - range.minlen = max((unsigned int)range.minlen, - q->limits.discard_granularity); ret = ext4_trim_fs(sb, &range); if (ret < 0) return ret; diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index d7cb7d719ee5845924146bd92c5837b4042de266..110c25824a67fd1ead6c89dfdec1e614a2a07671 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -4234,7 +4234,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b, */ static noinline_for_stack int ext4_mb_discard_group_preallocations(struct super_block *sb, - ext4_group_t group, int needed) + ext4_group_t group, int *busy) { struct ext4_group_info *grp = ext4_get_group_info(sb, group); struct buffer_head *bitmap_bh = NULL; @@ -4242,8 +4242,7 @@ ext4_mb_discard_group_preallocations(struct super_block *sb, struct list_head list; struct ext4_buddy e4b; int err; - int busy = 0; - int free, free_total = 0; + int free = 0; mb_debug(sb, "discard preallocation for group %u\n", group); if (list_empty(&grp->bb_prealloc_list)) @@ -4266,19 +4265,14 @@ ext4_mb_discard_group_preallocations(struct super_block *sb, goto out_dbg; } - if (needed == 0) - needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1; - INIT_LIST_HEAD(&list); -repeat: - free = 0; ext4_lock_group(sb, group); list_for_each_entry_safe(pa, tmp, &grp->bb_prealloc_list, pa_group_list) { spin_lock(&pa->pa_lock); if (atomic_read(&pa->pa_count)) { spin_unlock(&pa->pa_lock); - busy = 1; + *busy = 1; continue; } if (pa->pa_deleted) { @@ -4318,22 +4312,13 @@ repeat: call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); } - free_total += free; - - /* if we still need more blocks and some PAs were used, try again */ - if (free_total < needed && busy) { - ext4_unlock_group(sb, group); - cond_resched(); - busy = 0; - goto repeat; - } ext4_unlock_group(sb, group); ext4_mb_unload_buddy(&e4b); put_bh(bitmap_bh); out_dbg: mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n", - free_total, group, grp->bb_free); - return free_total; + free, group, grp->bb_free); + return free; } /* @@ -4875,13 +4860,24 @@ static int ext4_mb_discard_preallocations(struct super_block *sb, int needed) { ext4_group_t i, ngroups = ext4_get_groups_count(sb); int ret; - int freed = 0; + int freed = 0, busy = 0; + int retry = 0; trace_ext4_mb_discard_preallocations(sb, needed); + + if (needed == 0) + needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1; + repeat: for (i = 0; i < ngroups && needed > 0; i++) { - ret = ext4_mb_discard_group_preallocations(sb, i, needed); + ret = ext4_mb_discard_group_preallocations(sb, i, &busy); freed += ret; needed -= ret; + cond_resched(); + } + + if (needed > 0 && busy && ++retry < 3) { + busy = 0; + goto repeat; } return freed; @@ -5177,7 +5173,8 @@ static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle, struct super_block *sb = ar->inode->i_sb; ext4_group_t group; ext4_grpblk_t blkoff; - int i = sb->s_blocksize; + ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); + ext4_grpblk_t i = 0; ext4_fsblk_t goal, block; struct ext4_super_block *es = EXT4_SB(sb)->s_es; @@ -5199,19 +5196,26 @@ static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle, ext4_get_group_no_and_offset(sb, max(ext4_group_first_block_no(sb, group), goal), NULL, &blkoff); - i = mb_find_next_zero_bit(bitmap_bh->b_data, sb->s_blocksize, + while (1) { + i = mb_find_next_zero_bit(bitmap_bh->b_data, max, blkoff); + if (i >= max) + break; + if (ext4_fc_replay_check_excluded(sb, + ext4_group_first_block_no(sb, group) + i)) { + blkoff = i + 1; + } else + break; + } brelse(bitmap_bh); - if (i >= sb->s_blocksize) - continue; - if (ext4_fc_replay_check_excluded(sb, - ext4_group_first_block_no(sb, group) + i)) - continue; - break; + if (i < max) + break; } - if (group >= ext4_get_groups_count(sb) && i >= sb->s_blocksize) + if (group >= ext4_get_groups_count(sb) || i >= max) { + *errp = -ENOSPC; return 0; + } block = ext4_group_first_block_no(sb, group) + i; ext4_mb_mark_bb(sb, block, 1, 1); @@ -5815,6 +5819,7 @@ out: */ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) { + struct request_queue *q = bdev_get_queue(sb->s_bdev); struct ext4_group_info *grp; ext4_group_t group, first_group, last_group; ext4_grpblk_t cnt = 0, first_cluster, last_cluster; @@ -5833,6 +5838,13 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) start >= max_blks || range->len < sb->s_blocksize) return -EINVAL; + /* No point to try to trim less than discard granularity */ + if (range->minlen < q->limits.discard_granularity) { + minlen = EXT4_NUM_B2C(EXT4_SB(sb), + q->limits.discard_granularity >> sb->s_blocksize_bits); + if (minlen > EXT4_CLUSTERS_PER_GROUP(sb)) + goto out; + } if (end >= max_blks) end = max_blks - 1; if (end <= first_data_blk) diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c index c5e3fc998211acfe9449e8be42ef2c6cd96542d7..49912814f3d8dda93f50b98f664f586a81c8d072 100644 --- a/fs/ext4/migrate.c +++ b/fs/ext4/migrate.c @@ -437,12 +437,12 @@ int ext4_ext_migrate(struct inode *inode) percpu_down_write(&sbi->s_writepages_rwsem); /* - * Worst case we can touch the allocation bitmaps, a bgd - * block, and a block to link in the orphan list. We do need - * need to worry about credits for modifying the quota inode. + * Worst case we can touch the allocation bitmaps and a block + * group descriptor block. We do need need to worry about + * credits for modifying the quota inode. */ handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, - 4 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb)); + 3 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb)); if (IS_ERR(handle)) { retval = PTR_ERR(handle); @@ -459,6 +459,13 @@ int ext4_ext_migrate(struct inode *inode) ext4_journal_stop(handle); goto out_unlock; } + /* + * Use the correct seed for checksum (i.e. the seed from 'inode'). This + * is so that the metadata blocks will have the correct checksum after + * the migration. + */ + ei = EXT4_I(inode); + EXT4_I(tmp_inode)->i_csum_seed = ei->i_csum_seed; i_size_write(tmp_inode, i_size_read(inode)); /* * Set the i_nlink to zero so it will be deleted later @@ -467,7 +474,6 @@ int ext4_ext_migrate(struct inode *inode) clear_nlink(tmp_inode); ext4_ext_tree_init(handle, tmp_inode); - ext4_orphan_add(handle, tmp_inode); ext4_journal_stop(handle); /* @@ -492,17 +498,10 @@ int ext4_ext_migrate(struct inode *inode) handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1); if (IS_ERR(handle)) { - /* - * It is impossible to update on-disk structures without - * a handle, so just rollback in-core changes and live other - * work to orphan_list_cleanup() - */ - ext4_orphan_del(NULL, tmp_inode); retval = PTR_ERR(handle); goto out_tmp_inode; } - ei = EXT4_I(inode); i_data = ei->i_data; memset(&lb, 0, sizeof(lb)); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index b1af6588bad011940a5117ea9f4234fa1ed4549c..9e210bc85c817e03aab29c88aee0968bc5779353 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -6341,10 +6341,7 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id, lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA); err = dquot_quota_on(sb, type, format_id, path); - if (err) { - lockdep_set_quota_inode(path->dentry->d_inode, - I_DATA_SEM_NORMAL); - } else { + if (!err) { struct inode *inode = d_inode(path->dentry); handle_t *handle; @@ -6364,7 +6361,12 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id, ext4_journal_stop(handle); unlock_inode: inode_unlock(inode); + if (err) + dquot_quota_off(sb, type); } + if (err) + lockdep_set_quota_inode(path->dentry->d_inode, + I_DATA_SEM_NORMAL); return err; } @@ -6427,8 +6429,19 @@ static int ext4_enable_quotas(struct super_block *sb) "Failed to enable quota tracking " "(type=%d, err=%d). Please run " "e2fsck to fix.", type, err); - for (type--; type >= 0; type--) + for (type--; type >= 0; type--) { + struct inode *inode; + + inode = sb_dqopt(sb)->files[type]; + if (inode) + inode = igrab(inode); dquot_quota_off(sb, type); + if (inode) { + lockdep_set_quota_inode(inode, + I_DATA_SEM_NORMAL); + iput(inode); + } + } return err; } @@ -6532,7 +6545,7 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type, struct buffer_head *bh; handle_t *handle = journal_current_handle(); - if (EXT4_SB(sb)->s_journal && !handle) { + if (!handle) { ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)" " cancelled because transaction is not started", (unsigned long long)off, (unsigned long long)len); @@ -6716,6 +6729,7 @@ static int __init ext4_init_fs(void) out: unregister_as_ext2(); unregister_as_ext3(); + ext4_fc_destroy_dentry_cache(); out05: destroy_inodecache(); out1: @@ -6742,6 +6756,7 @@ static void __exit ext4_exit_fs(void) unregister_as_ext2(); unregister_as_ext3(); unregister_filesystem(&ext4_fs_type); + ext4_fc_destroy_dentry_cache(); destroy_inodecache(); ext4_exit_mballoc(); ext4_exit_sysfs(); diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index 30987ea011f1a57c4c1c1dabd7fad7b4d95587c0..ec542e8c46cc981ed6f59e0442fdf8c7daf36251 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -1362,25 +1362,38 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc, enum iostat_type io_type) { struct address_space *mapping = cc->inode->i_mapping; - int _submitted, compr_blocks, ret; - int i = -1, err = 0; + int _submitted, compr_blocks, ret, i; compr_blocks = f2fs_compressed_blocks(cc); - if (compr_blocks < 0) { - err = compr_blocks; - goto out_err; + + for (i = 0; i < cc->cluster_size; i++) { + if (!cc->rpages[i]) + continue; + + redirty_page_for_writepage(wbc, cc->rpages[i]); + unlock_page(cc->rpages[i]); } + if (compr_blocks < 0) + return compr_blocks; + for (i = 0; i < cc->cluster_size; i++) { if (!cc->rpages[i]) continue; retry_write: + lock_page(cc->rpages[i]); + if (cc->rpages[i]->mapping != mapping) { +continue_unlock: unlock_page(cc->rpages[i]); continue; } - BUG_ON(!PageLocked(cc->rpages[i])); + if (!PageDirty(cc->rpages[i])) + goto continue_unlock; + + if (!clear_page_dirty_for_io(cc->rpages[i])) + goto continue_unlock; ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted, NULL, NULL, wbc, io_type, @@ -1395,26 +1408,15 @@ retry_write: * avoid deadlock caused by cluster update race * from foreground operation. */ - if (IS_NOQUOTA(cc->inode)) { - err = 0; - goto out_err; - } + if (IS_NOQUOTA(cc->inode)) + return 0; ret = 0; cond_resched(); congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT); - lock_page(cc->rpages[i]); - - if (!PageDirty(cc->rpages[i])) { - unlock_page(cc->rpages[i]); - continue; - } - - clear_page_dirty_for_io(cc->rpages[i]); goto retry_write; } - err = ret; - goto out_err; + return ret; } *submitted += _submitted; @@ -1423,14 +1425,6 @@ retry_write: f2fs_balance_fs(F2FS_M_SB(mapping), true); return 0; -out_err: - for (++i; i < cc->cluster_size; i++) { - if (!cc->rpages[i]) - continue; - redirty_page_for_writepage(wbc, cc->rpages[i]); - unlock_page(cc->rpages[i]); - } - return err; } int f2fs_write_multi_pages(struct compress_ctx *cc, diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index bc488a7d01903180a723f4a7f5fa8abf532fe004..6c4bf22a3e83e6c1e7e9763144370ac6be0b04a1 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -955,6 +955,7 @@ struct f2fs_sm_info { unsigned int segment_count; /* total # of segments */ unsigned int main_segments; /* # of segments in main area */ unsigned int reserved_segments; /* # of reserved segments */ + unsigned int additional_reserved_segments;/* reserved segs for IO align feature */ unsigned int ovp_segments; /* # of overprovision segments */ /* a threshold to reclaim prefree segments */ @@ -1984,6 +1985,11 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi, if (!__allow_reserved_blocks(sbi, inode, true)) avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks; + + if (F2FS_IO_ALIGNED(sbi)) + avail_user_block_count -= sbi->blocks_per_seg * + SM_I(sbi)->additional_reserved_segments; + if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { if (avail_user_block_count > sbi->unusable_block_count) avail_user_block_count -= sbi->unusable_block_count; @@ -2229,6 +2235,11 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi, if (!__allow_reserved_blocks(sbi, inode, false)) valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks; + + if (F2FS_IO_ALIGNED(sbi)) + valid_block_count += sbi->blocks_per_seg * + SM_I(sbi)->additional_reserved_segments; + user_block_count = sbi->user_block_count; if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) user_block_count -= sbi->unusable_block_count; diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 72f227f6ebad094c1712efae3eeedb0e2d47d7ba..6b240b71d2e835ce18541bc4d945a24694f94eda 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -998,6 +998,9 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, set_sbi_flag(sbi, SBI_NEED_FSCK); } + if (f2fs_check_nid_range(sbi, dni->ino)) + return false; + *nofs = ofs_of_node(node_page); source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node); f2fs_put_page(node_page, 1); diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h index 1bf33fc27b8f83b69630c1266c67e36a45318627..beef833a6960489fe5e4be6f166cd6df65ac31c4 100644 --- a/fs/f2fs/segment.h +++ b/fs/f2fs/segment.h @@ -539,7 +539,8 @@ static inline unsigned int free_segments(struct f2fs_sb_info *sbi) static inline unsigned int reserved_segments(struct f2fs_sb_info *sbi) { - return SM_I(sbi)->reserved_segments; + return SM_I(sbi)->reserved_segments + + SM_I(sbi)->additional_reserved_segments; } static inline unsigned int free_sections(struct f2fs_sb_info *sbi) diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index b7287b722e9e113c27c4b7797ec522dc6782a61b..af98abb17c272a1b1d2b1c32547da694916211a9 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -289,6 +289,46 @@ static inline void limit_reserve_root(struct f2fs_sb_info *sbi) F2FS_OPTION(sbi).s_resgid)); } +static inline int adjust_reserved_segment(struct f2fs_sb_info *sbi) +{ + unsigned int sec_blks = sbi->blocks_per_seg * sbi->segs_per_sec; + unsigned int avg_vblocks; + unsigned int wanted_reserved_segments; + block_t avail_user_block_count; + + if (!F2FS_IO_ALIGNED(sbi)) + return 0; + + /* average valid block count in section in worst case */ + avg_vblocks = sec_blks / F2FS_IO_SIZE(sbi); + + /* + * we need enough free space when migrating one section in worst case + */ + wanted_reserved_segments = (F2FS_IO_SIZE(sbi) / avg_vblocks) * + reserved_segments(sbi); + wanted_reserved_segments -= reserved_segments(sbi); + + avail_user_block_count = sbi->user_block_count - + sbi->current_reserved_blocks - + F2FS_OPTION(sbi).root_reserved_blocks; + + if (wanted_reserved_segments * sbi->blocks_per_seg > + avail_user_block_count) { + f2fs_err(sbi, "IO align feature can't grab additional reserved segment: %u, available segments: %u", + wanted_reserved_segments, + avail_user_block_count >> sbi->log_blocks_per_seg); + return -ENOSPC; + } + + SM_I(sbi)->additional_reserved_segments = wanted_reserved_segments; + + f2fs_info(sbi, "IO align feature needs additional reserved segment: %u", + wanted_reserved_segments); + + return 0; +} + static inline void adjust_unusable_cap_perc(struct f2fs_sb_info *sbi) { if (!F2FS_OPTION(sbi).unusable_cap_perc) @@ -3736,6 +3776,10 @@ try_onemore: goto free_nm; } + err = adjust_reserved_segment(sbi); + if (err) + goto free_nm; + /* For write statistics */ if (sb->s_bdev->bd_part) sbi->sectors_written_start = diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c index b8850c81068a0e861413041c0c78482f35dcd839..7ffd4bb398b0cbd0b24fb2222c77d26830051e2f 100644 --- a/fs/f2fs/sysfs.c +++ b/fs/f2fs/sysfs.c @@ -330,7 +330,9 @@ out: if (a->struct_type == RESERVED_BLOCKS) { spin_lock(&sbi->stat_lock); if (t > (unsigned long)(sbi->user_block_count - - F2FS_OPTION(sbi).root_reserved_blocks)) { + F2FS_OPTION(sbi).root_reserved_blocks - + sbi->blocks_per_seg * + SM_I(sbi)->additional_reserved_segments)) { spin_unlock(&sbi->stat_lock); return -EINVAL; } diff --git a/fs/file.c b/fs/file.c index 9d02352fa18c3e7dca4e4e338733e94b623eba0d..79a76d04c7c337cde54001ab5bef231a52240a63 100644 --- a/fs/file.c +++ b/fs/file.c @@ -817,28 +817,68 @@ void do_close_on_exec(struct files_struct *files) spin_unlock(&files->file_lock); } -static struct file *__fget_files(struct files_struct *files, unsigned int fd, - fmode_t mask, unsigned int refs) +static inline struct file *__fget_files_rcu(struct files_struct *files, + unsigned int fd, fmode_t mask, unsigned int refs) { - struct file *file; + for (;;) { + struct file *file; + struct fdtable *fdt = rcu_dereference_raw(files->fdt); + struct file __rcu **fdentry; - rcu_read_lock(); -loop: - file = fcheck_files(files, fd); - if (file) { - /* File object ref couldn't be taken. - * dup2() atomicity guarantee is the reason - * we loop to catch the new file (or NULL pointer) + if (unlikely(fd >= fdt->max_fds)) + return NULL; + + fdentry = fdt->fd + array_index_nospec(fd, fdt->max_fds); + file = rcu_dereference_raw(*fdentry); + if (unlikely(!file)) + return NULL; + + if (unlikely(file->f_mode & mask)) + return NULL; + + /* + * Ok, we have a file pointer. However, because we do + * this all locklessly under RCU, we may be racing with + * that file being closed. + * + * Such a race can take two forms: + * + * (a) the file ref already went down to zero, + * and get_file_rcu_many() fails. Just try + * again: */ - if (file->f_mode & mask) - file = NULL; - else if (!get_file_rcu_many(file, refs)) - goto loop; - else if (__fcheck_files(files, fd) != file) { + if (unlikely(!get_file_rcu_many(file, refs))) + continue; + + /* + * (b) the file table entry has changed under us. + * Note that we don't need to re-check the 'fdt->fd' + * pointer having changed, because it always goes + * hand-in-hand with 'fdt'. + * + * If so, we need to put our refs and try again. + */ + if (unlikely(rcu_dereference_raw(files->fdt) != fdt) || + unlikely(rcu_dereference_raw(*fdentry) != file)) { fput_many(file, refs); - goto loop; + continue; } + + /* + * Ok, we have a ref to the file, and checked that it + * still exists. + */ + return file; } +} + +static struct file *__fget_files(struct files_struct *files, unsigned int fd, + fmode_t mask, unsigned int refs) +{ + struct file *file; + + rcu_read_lock(); + file = __fget_files_rcu(files, fd, mask, refs); rcu_read_unlock(); return file; diff --git a/fs/fs_context.c b/fs/fs_context.c index 2834d1afa6e8053e93c6e78122be8d58107a36c9..740322dff4a30a5acbaebbffe5f57064912e88da 100644 --- a/fs/fs_context.c +++ b/fs/fs_context.c @@ -231,7 +231,7 @@ static struct fs_context *alloc_fs_context(struct file_system_type *fs_type, struct fs_context *fc; int ret = -ENOMEM; - fc = kzalloc(sizeof(struct fs_context), GFP_KERNEL); + fc = kzalloc(sizeof(struct fs_context), GFP_KERNEL_ACCOUNT); if (!fc) return ERR_PTR(-ENOMEM); @@ -530,7 +530,7 @@ static int legacy_parse_param(struct fs_context *fc, struct fs_parameter *param) param->key); } - if (len > PAGE_SIZE - 2 - size) + if (size + len + 2 > PAGE_SIZE) return invalf(fc, "VFS: Legacy: Cumulative options too large"); if (strchr(param->key, ',') || (param->type == fs_value_is_string && @@ -631,7 +631,7 @@ const struct fs_context_operations legacy_fs_context_ops = { */ static int legacy_init_fs_context(struct fs_context *fc) { - fc->fs_private = kzalloc(sizeof(struct legacy_fs_context), GFP_KERNEL); + fc->fs_private = kzalloc(sizeof(struct legacy_fs_context), GFP_KERNEL_ACCOUNT); if (!fc->fs_private) return -ENOMEM; fc->ops = &legacy_fs_context_ops; diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 4dd70b53df81a922c536fd926328f952a0ba8dff..e81d1c3eb7e11efb06a39ae91ec8c97a9a8dcee4 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -3251,7 +3251,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter) static int fuse_writeback_range(struct inode *inode, loff_t start, loff_t end) { - int err = filemap_write_and_wait_range(inode->i_mapping, start, -1); + int err = filemap_write_and_wait_range(inode->i_mapping, start, LLONG_MAX); if (!err) fuse_sync_writes(inode); diff --git a/fs/io_uring.c b/fs/io_uring.c index 104dff9c7131495e0a676b4c4e3c5c2816eeb6c4..019cbde8c3d67dc269b49d14bfb0f95fb5e751d1 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -4058,6 +4058,7 @@ static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head) } else { list_add_tail(&buf->list, &(*head)->list); } + cond_resched(); } return i ? i : -ENOMEM; diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 188f79d76988174c764c7dd41a9a163efd4245fb..b748329bb0bab13b8145874d6a70f18e9537c581 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -2795,6 +2795,7 @@ struct journal_head *jbd2_journal_grab_journal_head(struct buffer_head *bh) jbd_unlock_bh_journal_head(bh); return jh; } +EXPORT_SYMBOL(jbd2_journal_grab_journal_head); static void __journal_remove_journal_head(struct buffer_head *bh) { @@ -2847,6 +2848,7 @@ void jbd2_journal_put_journal_head(struct journal_head *jh) jbd_unlock_bh_journal_head(bh); } } +EXPORT_SYMBOL(jbd2_journal_put_journal_head); /* * Initialize jbd inode head diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c index 4fc8cd698d1a4ac0b4f13b694a0c63c214c276ce..bd7d58d27bfc63574918095b14f14fcf13cf1b7c 100644 --- a/fs/jffs2/file.c +++ b/fs/jffs2/file.c @@ -136,20 +136,15 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, struct page *pg; struct inode *inode = mapping->host; struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); + struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); pgoff_t index = pos >> PAGE_SHIFT; uint32_t pageofs = index << PAGE_SHIFT; int ret = 0; - pg = grab_cache_page_write_begin(mapping, index, flags); - if (!pg) - return -ENOMEM; - *pagep = pg; - jffs2_dbg(1, "%s()\n", __func__); if (pageofs > inode->i_size) { /* Make new hole frag from old EOF to new page */ - struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); struct jffs2_raw_inode ri; struct jffs2_full_dnode *fn; uint32_t alloc_len; @@ -160,7 +155,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); if (ret) - goto out_page; + goto out_err; mutex_lock(&f->sem); memset(&ri, 0, sizeof(ri)); @@ -190,7 +185,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, ret = PTR_ERR(fn); jffs2_complete_reservation(c); mutex_unlock(&f->sem); - goto out_page; + goto out_err; } ret = jffs2_add_full_dnode_to_inode(c, f, fn); if (f->metadata) { @@ -205,13 +200,26 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, jffs2_free_full_dnode(fn); jffs2_complete_reservation(c); mutex_unlock(&f->sem); - goto out_page; + goto out_err; } jffs2_complete_reservation(c); inode->i_size = pageofs; mutex_unlock(&f->sem); } + /* + * While getting a page and reading data in, lock c->alloc_sem until + * the page is Uptodate. Otherwise GC task may attempt to read the same + * page in read_cache_page(), which causes a deadlock. + */ + mutex_lock(&c->alloc_sem); + pg = grab_cache_page_write_begin(mapping, index, flags); + if (!pg) { + ret = -ENOMEM; + goto release_sem; + } + *pagep = pg; + /* * Read in the page if it wasn't already present. Cannot optimize away * the whole page write case until jffs2_write_end can handle the @@ -221,15 +229,17 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, mutex_lock(&f->sem); ret = jffs2_do_readpage_nolock(inode, pg); mutex_unlock(&f->sem); - if (ret) - goto out_page; + if (ret) { + unlock_page(pg); + put_page(pg); + goto release_sem; + } } jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags); - return ret; -out_page: - unlock_page(pg); - put_page(pg); +release_sem: + mutex_unlock(&c->alloc_sem); +out_err: return ret; } diff --git a/fs/namei.c b/fs/namei.c index 4c9d0c36545d3d850d96dec993290b28389b012a..72f354b62dd5da19104293c90b24f37f41050b18 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -3709,13 +3709,12 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry) dentry->d_inode->i_flags |= S_DEAD; dont_mount(dentry); detach_mounts(dentry); - fsnotify_rmdir(dir, dentry); out: inode_unlock(dentry->d_inode); dput(dentry); if (!error) - d_delete(dentry); + d_delete_notify(dir, dentry); return error; } EXPORT_SYMBOL(vfs_rmdir); @@ -3825,7 +3824,6 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry, struct inode **delegate if (!error) { dont_mount(dentry); detach_mounts(dentry); - fsnotify_unlink(dir, dentry); } } } @@ -3833,9 +3831,11 @@ out: inode_unlock(target); /* We don't d_delete() NFS sillyrenamed files--they still exist. */ - if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) { + if (!error && dentry->d_flags & DCACHE_NFSFS_RENAMED) { + fsnotify_unlink(dir, dentry); + } else if (!error) { fsnotify_link_count(target); - d_delete(dentry); + d_delete_notify(dir, dentry); } return error; diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h index 6a2033131c068e82e9934c6baba20c243dfad62f..ccd4f245cae240b812c3a7e6d44e2de32de819c5 100644 --- a/fs/nfs/callback.h +++ b/fs/nfs/callback.h @@ -170,7 +170,7 @@ struct cb_devicenotifyitem { }; struct cb_devicenotifyargs { - int ndevs; + uint32_t ndevs; struct cb_devicenotifyitem *devs; }; diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index be546ece383f5af783f6bdbd5c75a696c04ec823..b44219ce60b869c5c6556ab6053ee5e47c12b170 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -353,7 +353,7 @@ __be32 nfs4_callback_devicenotify(void *argp, void *resp, struct cb_process_state *cps) { struct cb_devicenotifyargs *args = argp; - int i; + uint32_t i; __be32 res = 0; struct nfs_client *clp = cps->clp; struct nfs_server *server = NULL; diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index 79ff172eb1c81ac0bc2b89740157b6163ca6aa8b..1725079a05276275c4b9d90d0af3bdc591079bc4 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c @@ -259,11 +259,9 @@ __be32 decode_devicenotify_args(struct svc_rqst *rqstp, void *argp) { struct cb_devicenotifyargs *args = argp; + uint32_t tmp, n, i; __be32 *p; __be32 status = 0; - u32 tmp; - int n, i; - args->ndevs = 0; /* Num of device notifications */ p = xdr_inline_decode(xdr, sizeof(uint32_t)); @@ -272,7 +270,7 @@ __be32 decode_devicenotify_args(struct svc_rqst *rqstp, goto out; } n = ntohl(*p++); - if (n <= 0) + if (n == 0) goto out; if (n > ULONG_MAX / sizeof(*args->devs)) { status = htonl(NFS4ERR_BADXDR); @@ -331,19 +329,21 @@ __be32 decode_devicenotify_args(struct svc_rqst *rqstp, dev->cbd_immediate = 0; } - args->ndevs++; - dprintk("%s: type %d layout 0x%x immediate %d\n", __func__, dev->cbd_notify_type, dev->cbd_layout_type, dev->cbd_immediate); } + args->ndevs = n; + dprintk("%s: ndevs %d\n", __func__, args->ndevs); + return 0; +err: + kfree(args->devs); out: + args->devs = NULL; + args->ndevs = 0; dprintk("%s: status %d ndevs %d\n", __func__, ntohl(status), args->ndevs); return status; -err: - kfree(args->devs); - goto out; } static __be32 decode_sessionid(struct xdr_stream *xdr, diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 723d425796cca39f07bda563fec4a1d5a3b5b33d..818ff8b1b99da7429cfef13b18c547ed23930221 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -177,6 +177,7 @@ struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_init) INIT_LIST_HEAD(&clp->cl_superblocks); clp->cl_rpcclient = ERR_PTR(-EINVAL); + clp->cl_flags = cl_init->init_flags; clp->cl_proto = cl_init->proto; clp->cl_nconnect = cl_init->nconnect; clp->cl_net = get_net(cl_init->net); @@ -426,7 +427,6 @@ struct nfs_client *nfs_get_client(const struct nfs_client_initdata *cl_init) list_add_tail(&new->cl_share_link, &nn->nfs_client_list); spin_unlock(&nn->nfs_client_lock); - new->cl_flags = cl_init->init_flags; return rpc_ops->init_client(new, cl_init); } diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 8b963c72dd3b1aab88569d918144f6a4b3b267ad..2ad56ff4752c7f8a85f270539d0bdc43ad7bb393 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1777,6 +1777,24 @@ out: no_open: res = nfs_lookup(dir, dentry, lookup_flags); + if (!res) { + inode = d_inode(dentry); + if ((lookup_flags & LOOKUP_DIRECTORY) && inode && + !(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) + res = ERR_PTR(-ENOTDIR); + else if (inode && S_ISREG(inode->i_mode)) + res = ERR_PTR(-EOPENSTALE); + } else if (!IS_ERR(res)) { + inode = d_inode(res); + if ((lookup_flags & LOOKUP_DIRECTORY) && inode && + !(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) { + dput(res); + res = ERR_PTR(-ENOTDIR); + } else if (inode && S_ISREG(inode->i_mode)) { + dput(res); + res = ERR_PTR(-EOPENSTALE); + } + } if (switched) { d_lookup_done(dentry); if (!res) @@ -2174,6 +2192,8 @@ nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) trace_nfs_link_enter(inode, dir, dentry); d_drop(dentry); + if (S_ISREG(inode->i_mode)) + nfs_sync_inode(inode); error = NFS_PROTO(dir)->link(inode, dir, &dentry->d_name); if (error == 0) { ihold(inode); @@ -2262,6 +2282,8 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry, } } + if (S_ISREG(old_inode->i_mode)) + nfs_sync_inode(old_inode); task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, NULL); if (IS_ERR(task)) { error = PTR_ERR(task); @@ -2467,7 +2489,7 @@ static struct nfs_access_entry *nfs_access_search_rbtree(struct inode *inode, co return NULL; } -static int nfs_access_get_cached_locked(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res, bool may_block) +static int nfs_access_get_cached_locked(struct inode *inode, const struct cred *cred, u32 *mask, bool may_block) { struct nfs_inode *nfsi = NFS_I(inode); struct nfs_access_entry *cache; @@ -2497,8 +2519,7 @@ static int nfs_access_get_cached_locked(struct inode *inode, const struct cred * spin_lock(&inode->i_lock); retry = false; } - res->cred = cache->cred; - res->mask = cache->mask; + *mask = cache->mask; list_move_tail(&cache->lru, &nfsi->access_cache_entry_lru); err = 0; out: @@ -2510,7 +2531,7 @@ out_zap: return -ENOENT; } -static int nfs_access_get_cached_rcu(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res) +static int nfs_access_get_cached_rcu(struct inode *inode, const struct cred *cred, u32 *mask) { /* Only check the most recently returned cache entry, * but do it without locking. @@ -2532,22 +2553,21 @@ static int nfs_access_get_cached_rcu(struct inode *inode, const struct cred *cre goto out; if (nfs_check_cache_invalid(inode, NFS_INO_INVALID_ACCESS)) goto out; - res->cred = cache->cred; - res->mask = cache->mask; + *mask = cache->mask; err = 0; out: rcu_read_unlock(); return err; } -int nfs_access_get_cached(struct inode *inode, const struct cred *cred, struct -nfs_access_entry *res, bool may_block) +int nfs_access_get_cached(struct inode *inode, const struct cred *cred, + u32 *mask, bool may_block) { int status; - status = nfs_access_get_cached_rcu(inode, cred, res); + status = nfs_access_get_cached_rcu(inode, cred, mask); if (status != 0) - status = nfs_access_get_cached_locked(inode, cred, res, + status = nfs_access_get_cached_locked(inode, cred, mask, may_block); return status; @@ -2668,7 +2688,7 @@ static int nfs_do_access(struct inode *inode, const struct cred *cred, int mask) trace_nfs_access_enter(inode); - status = nfs_access_get_cached(inode, cred, &cache, may_block); + status = nfs_access_get_cached(inode, cred, &cache.mask, may_block); if (status == 0) goto out_cached; diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 21addb78523d229666eff8cbd9a97247b160ba66..f27ecc2e490f23ee62aaa15493df9f1025d96614 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -195,6 +195,18 @@ bool nfs_check_cache_invalid(struct inode *inode, unsigned long flags) } EXPORT_SYMBOL_GPL(nfs_check_cache_invalid); +#ifdef CONFIG_NFS_V4_2 +static bool nfs_has_xattr_cache(const struct nfs_inode *nfsi) +{ + return nfsi->xattr_cache != NULL; +} +#else +static bool nfs_has_xattr_cache(const struct nfs_inode *nfsi) +{ + return false; +} +#endif + static void nfs_set_cache_invalid(struct inode *inode, unsigned long flags) { struct nfs_inode *nfsi = NFS_I(inode); @@ -210,6 +222,8 @@ static void nfs_set_cache_invalid(struct inode *inode, unsigned long flags) } else if (flags & NFS_INO_REVAL_PAGECACHE) flags |= NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_SIZE; + if (!nfs_has_xattr_cache(nfsi)) + flags &= ~NFS_INO_INVALID_XATTR; if (inode->i_mapping->nrpages == 0) flags &= ~(NFS_INO_INVALID_DATA|NFS_INO_DATA_INVAL_DEFER); nfsi->cache_validity |= flags; @@ -807,12 +821,9 @@ int nfs_getattr(const struct path *path, struct kstat *stat, } /* Flush out writes to the server in order to update c/mtime. */ - if ((request_mask & (STATX_CTIME|STATX_MTIME)) && - S_ISREG(inode->i_mode)) { - err = filemap_write_and_wait(inode->i_mapping); - if (err) - goto out; - } + if ((request_mask & (STATX_CTIME | STATX_MTIME)) && + S_ISREG(inode->i_mode)) + filemap_write_and_wait(inode->i_mapping); /* * We may force a getattr if the user cares about atime. diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 3e344bec3647b09889b113c6b568d6316b41140d..6d916563356ef372d594a482a4b2246888d5f090 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -281,7 +281,8 @@ struct rpc_clnt *nfs4_negotiate_security(struct rpc_clnt *, struct inode *, int nfs4_submount(struct fs_context *, struct nfs_server *); int nfs4_replace_transport(struct nfs_server *server, const struct nfs4_fs_locations *locations); - +size_t nfs_parse_server_name(char *string, size_t len, struct sockaddr *sa, + size_t salen, struct net *net, int port); /* nfs4proc.c */ extern int nfs4_handle_exception(struct nfs_server *, int, struct nfs4_exception *); extern int nfs4_async_handle_error(struct rpc_task *task, diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index 6d74f2e2de4615a14528f5696b7bdcc05cffcfc7..0e6437b08a3a502bdb3bbbb792294d59fb051b2a 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c @@ -1330,8 +1330,11 @@ int nfs4_update_server(struct nfs_server *server, const char *hostname, } nfs_put_client(clp); - if (server->nfs_client->cl_hostname == NULL) + if (server->nfs_client->cl_hostname == NULL) { server->nfs_client->cl_hostname = kstrdup(hostname, GFP_KERNEL); + if (server->nfs_client->cl_hostname == NULL) + return -ENOMEM; + } nfs_server_insert_lists(server); return nfs_probe_destination(server); diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c index 873342308dc0d9a131d93665b338e3eb1a49d6e9..3680c8da510c9787bcbae896a292a2cfec039469 100644 --- a/fs/nfs/nfs4namespace.c +++ b/fs/nfs/nfs4namespace.c @@ -164,16 +164,21 @@ static int nfs4_validate_fspath(struct dentry *dentry, return 0; } -static size_t nfs_parse_server_name(char *string, size_t len, - struct sockaddr *sa, size_t salen, struct net *net) +size_t nfs_parse_server_name(char *string, size_t len, struct sockaddr *sa, + size_t salen, struct net *net, int port) { ssize_t ret; ret = rpc_pton(net, string, len, sa, salen); if (ret == 0) { - ret = nfs_dns_resolve_name(net, string, len, sa, salen); - if (ret < 0) - ret = 0; + ret = rpc_uaddr2sockaddr(net, string, len, sa, salen); + if (ret == 0) { + ret = nfs_dns_resolve_name(net, string, len, sa, salen); + if (ret < 0) + ret = 0; + } + } else if (port) { + rpc_set_port(sa, port); } return ret; } @@ -328,7 +333,7 @@ static int try_location(struct fs_context *fc, nfs_parse_server_name(buf->data, buf->len, &ctx->nfs_server.address, sizeof(ctx->nfs_server._address), - fc->net_ns); + fc->net_ns, 0); if (ctx->nfs_server.addrlen == 0) continue; @@ -496,7 +501,7 @@ static int nfs4_try_replacing_one_location(struct nfs_server *server, continue; salen = nfs_parse_server_name(buf->data, buf->len, - sap, addr_bufsize, net); + sap, addr_bufsize, net, 0); if (salen == 0) continue; rpc_set_port(sap, NFS_PORT); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 3106bd28b1132a754e5c8daf280724817d080c0b..d222a980164b79e5869c9585f2aa9a2c9952d5e9 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -7597,7 +7597,7 @@ static int nfs4_xattr_set_nfs4_user(const struct xattr_handler *handler, const char *key, const void *buf, size_t buflen, int flags) { - struct nfs_access_entry cache; + u32 mask; int ret; if (!nfs_server_capable(inode, NFS_CAP_XATTR)) @@ -7612,8 +7612,8 @@ static int nfs4_xattr_set_nfs4_user(const struct xattr_handler *handler, * do a cached access check for the XA* flags to possibly avoid * doing an RPC and getting EACCES back. */ - if (!nfs_access_get_cached(inode, current_cred(), &cache, true)) { - if (!(cache.mask & NFS_ACCESS_XAWRITE)) + if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) { + if (!(mask & NFS_ACCESS_XAWRITE)) return -EACCES; } @@ -7634,14 +7634,14 @@ static int nfs4_xattr_get_nfs4_user(const struct xattr_handler *handler, struct dentry *unused, struct inode *inode, const char *key, void *buf, size_t buflen) { - struct nfs_access_entry cache; + u32 mask; ssize_t ret; if (!nfs_server_capable(inode, NFS_CAP_XATTR)) return -EOPNOTSUPP; - if (!nfs_access_get_cached(inode, current_cred(), &cache, true)) { - if (!(cache.mask & NFS_ACCESS_XAREAD)) + if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) { + if (!(mask & NFS_ACCESS_XAREAD)) return -EACCES; } @@ -7666,13 +7666,13 @@ nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len) ssize_t ret, size; char *buf; size_t buflen; - struct nfs_access_entry cache; + u32 mask; if (!nfs_server_capable(inode, NFS_CAP_XATTR)) return 0; - if (!nfs_access_get_cached(inode, current_cred(), &cache, true)) { - if (!(cache.mask & NFS_ACCESS_XALIST)) + if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) { + if (!(mask & NFS_ACCESS_XALIST)) return 0; } diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 4bf10792cb5b1049790ec0d0584f263fb3bac96c..cbeec29e9f21a69cc53a0833799954cd1a45fb11 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -2104,6 +2104,9 @@ static int nfs4_try_migration(struct nfs_server *server, const struct cred *cred } result = -NFS4ERR_NXIO; + if (!locations->nlocations) + goto out; + if (!(locations->fattr.valid & NFS_ATTR_FATTR_V4_LOCATIONS)) { dprintk("<-- %s: No fs_locations data, migration skipped\n", __func__); diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index c16b93df1bc14222d7dd4393704f7d090cbd6d74..e2f0e3446e22a99ad6d06ec9af112fe696840595 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -3680,8 +3680,6 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st if (unlikely(!p)) goto out_eio; n = be32_to_cpup(p); - if (n <= 0) - goto out_eio; for (res->nlocations = 0; res->nlocations < n; res->nlocations++) { u32 m; struct nfs4_fs_location *loc; @@ -4184,10 +4182,11 @@ static int decode_attr_security_label(struct xdr_stream *xdr, uint32_t *bitmap, } else printk(KERN_WARNING "%s: label too long (%u)!\n", __func__, len); + if (label && label->label) + dprintk("%s: label=%.*s, len=%d, PI=%d, LFS=%d\n", + __func__, label->len, (char *)label->label, + label->len, label->pi, label->lfs); } - if (label && label->label) - dprintk("%s: label=%s, len=%d, PI=%d, LFS=%d\n", __func__, - (char *)label->label, label->len, label->pi, label->lfs); return status; } diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c index a633044b0dc1f7b9022a627c0debca97df9e9e4d..981a4e4c9a3cf31cff0987c940cf6e6188cb7431 100644 --- a/fs/nfsd/nfs3proc.c +++ b/fs/nfsd/nfs3proc.c @@ -183,6 +183,11 @@ nfsd3_proc_write(struct svc_rqst *rqstp) (unsigned long long) argp->offset, argp->stable? " stable" : ""); + resp->status = nfserr_fbig; + if (argp->offset > (u64)OFFSET_MAX || + argp->offset + argp->len > (u64)OFFSET_MAX) + return rpc_success; + fh_copy(&resp->fh, &argp->fh); resp->committed = argp->stable; nvecs = svc_fill_write_vector(rqstp, rqstp->rq_arg.pages, diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 00440337efc1fd42f9523a6d14e7315e91ecbc32..7850d141c7621d7a6382d3b2fdca382f05b9456e 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -1008,8 +1008,9 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, unsigned long cnt; int nvecs; - if (write->wr_offset >= OFFSET_MAX) - return nfserr_inval; + if (write->wr_offset > (u64)OFFSET_MAX || + write->wr_offset + write->wr_buflen > (u64)OFFSET_MAX) + return nfserr_fbig; cnt = write->wr_buflen; trace_nfsd_write_start(rqstp, &cstate->current_fh, diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 210147960c52ecb59406fb2726e38b0667e1539f..d01d7929753efec03f8e942d0b61e320e12c85b9 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -4047,8 +4047,10 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp, status = nfserr_clid_inuse; if (client_has_state(old) && !same_creds(&unconf->cl_cred, - &old->cl_cred)) + &old->cl_cred)) { + old = NULL; goto out; + } status = mark_client_expired_locked(old); if (status) { old = NULL; diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index a8f954bbde4f5a466b97e67d4a75fa3b7e364dc1..dedec4771ecc2cd1d569184d19db07b356b7964d 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -1247,7 +1247,8 @@ static void nfsdfs_remove_file(struct inode *dir, struct dentry *dentry) clear_ncl(d_inode(dentry)); dget(dentry); ret = simple_unlink(dir, dentry); - d_delete(dentry); + d_drop(dentry); + fsnotify_unlink(dir, dentry); dput(dentry); WARN_ON_ONCE(ret); } @@ -1336,8 +1337,8 @@ void nfsd_client_rmdir(struct dentry *dentry) dget(dentry); ret = simple_rmdir(dir, dentry); WARN_ON_ONCE(ret); + d_drop(dentry); fsnotify_rmdir(dir, dentry); - d_delete(dentry); dput(dentry); inode_unlock(dir); } diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h index c8ca73d69ad04f51bdedb325ceb4f43b44757fdb..a952f4a9b2a68012d4ff1e04b7190b946c8a2ce3 100644 --- a/fs/nfsd/trace.h +++ b/fs/nfsd/trace.h @@ -175,14 +175,14 @@ TRACE_EVENT(nfsd_export_update, DECLARE_EVENT_CLASS(nfsd_io_class, TP_PROTO(struct svc_rqst *rqstp, struct svc_fh *fhp, - loff_t offset, - unsigned long len), + u64 offset, + u32 len), TP_ARGS(rqstp, fhp, offset, len), TP_STRUCT__entry( __field(u32, xid) __field(u32, fh_hash) - __field(loff_t, offset) - __field(unsigned long, len) + __field(u64, offset) + __field(u32, len) ), TP_fast_assign( __entry->xid = be32_to_cpu(rqstp->rq_xid); @@ -190,7 +190,7 @@ DECLARE_EVENT_CLASS(nfsd_io_class, __entry->offset = offset; __entry->len = len; ), - TP_printk("xid=0x%08x fh_hash=0x%08x offset=%lld len=%lu", + TP_printk("xid=0x%08x fh_hash=0x%08x offset=%llu len=%u", __entry->xid, __entry->fh_hash, __entry->offset, __entry->len) ) @@ -199,8 +199,8 @@ DECLARE_EVENT_CLASS(nfsd_io_class, DEFINE_EVENT(nfsd_io_class, nfsd_##name, \ TP_PROTO(struct svc_rqst *rqstp, \ struct svc_fh *fhp, \ - loff_t offset, \ - unsigned long len), \ + u64 offset, \ + u32 len), \ TP_ARGS(rqstp, fhp, offset, len)) DEFINE_NFSD_IO_EVENT(read_start); diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index 086b6bacbad176819cc0950a2f2bc0499eed49ec..18e014fa06480eae000d20fe7c16226b24b116a0 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -366,9 +366,6 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, if (fanotify_is_perm_event(event->mask)) FANOTIFY_PERM(event)->fd = fd; - if (f) - fd_install(fd, f); - /* Event info records order is: dir fid + name, child fid */ if (fanotify_event_dir_fh_len(event)) { info_type = info->name_len ? FAN_EVENT_INFO_TYPE_DFID_NAME : @@ -432,6 +429,9 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, count -= ret; } + if (f) + fd_install(fd, f); + return metadata.event_len; out_close_fd: diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index e7d04adb6cb874be6fed6096d8eaaed009360300..4f48003e4327138b67d30466649dc44f869ac584 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c @@ -1253,26 +1253,23 @@ static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh, { struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data; struct journal_head *jh; - int ret = 1; + int ret; if (ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap)) return 0; - if (!buffer_jbd(bg_bh)) + jh = jbd2_journal_grab_journal_head(bg_bh); + if (!jh) return 1; - jbd_lock_bh_journal_head(bg_bh); - if (buffer_jbd(bg_bh)) { - jh = bh2jh(bg_bh); - spin_lock(&jh->b_state_lock); - bg = (struct ocfs2_group_desc *) jh->b_committed_data; - if (bg) - ret = !ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap); - else - ret = 1; - spin_unlock(&jh->b_state_lock); - } - jbd_unlock_bh_journal_head(bg_bh); + spin_lock(&jh->b_state_lock); + bg = (struct ocfs2_group_desc *) jh->b_committed_data; + if (bg) + ret = !ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap); + else + ret = 1; + spin_unlock(&jh->b_state_lock); + jbd2_journal_put_journal_head(jh); return ret; } diff --git a/fs/orangefs/orangefs-bufmap.c b/fs/orangefs/orangefs-bufmap.c index 538e839590ef5c1d489aea7f6d94a7e9d1be821b..b501dc07f92224474f2cf1a27194e492429a019f 100644 --- a/fs/orangefs/orangefs-bufmap.c +++ b/fs/orangefs/orangefs-bufmap.c @@ -176,7 +176,7 @@ orangefs_bufmap_free(struct orangefs_bufmap *bufmap) { kfree(bufmap->page_array); kfree(bufmap->desc_array); - kfree(bufmap->buffer_index_array); + bitmap_free(bufmap->buffer_index_array); kfree(bufmap); } @@ -226,8 +226,7 @@ orangefs_bufmap_alloc(struct ORANGEFS_dev_map_desc *user_desc) bufmap->desc_size = user_desc->size; bufmap->desc_shift = ilog2(bufmap->desc_size); - bufmap->buffer_index_array = - kzalloc(DIV_ROUND_UP(bufmap->desc_count, BITS_PER_LONG), GFP_KERNEL); + bufmap->buffer_index_array = bitmap_zalloc(bufmap->desc_count, GFP_KERNEL); if (!bufmap->buffer_index_array) goto out_free_bufmap; @@ -250,7 +249,7 @@ orangefs_bufmap_alloc(struct ORANGEFS_dev_map_desc *user_desc) out_free_desc_array: kfree(bufmap->desc_array); out_free_index_array: - kfree(bufmap->buffer_index_array); + bitmap_free(bufmap->buffer_index_array); out_free_bufmap: kfree(bufmap); out: diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 3931f60e421f76378fddfba004880f2de740bf02..ba98371e9d16404e9a34c537a445509013c1b11e 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -430,7 +430,8 @@ static void smaps_page_accumulate(struct mem_size_stats *mss, } static void smaps_account(struct mem_size_stats *mss, struct page *page, - bool compound, bool young, bool dirty, bool locked) + bool compound, bool young, bool dirty, bool locked, + bool migration) { int i, nr = compound ? compound_nr(page) : 1; unsigned long size = nr * PAGE_SIZE; @@ -457,8 +458,15 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page, * page_count(page) == 1 guarantees the page is mapped exactly once. * If any subpage of the compound page mapped with PTE it would elevate * page_count(). + * + * The page_mapcount() is called to get a snapshot of the mapcount. + * Without holding the page lock this snapshot can be slightly wrong as + * we cannot always read the mapcount atomically. It is not safe to + * call page_mapcount() even with PTL held if the page is not mapped, + * especially for migration entries. Treat regular migration entries + * as mapcount == 1. */ - if (page_count(page) == 1) { + if ((page_count(page) == 1) || migration) { smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty, locked, true); return; @@ -495,6 +503,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr, struct vm_area_struct *vma = walk->vma; bool locked = !!(vma->vm_flags & VM_LOCKED); struct page *page = NULL; + bool migration = false; if (pte_present(*pte)) { page = vm_normal_page(vma, addr, *pte); @@ -514,9 +523,10 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr, } else { mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT; } - } else if (is_migration_entry(swpent)) + } else if (is_migration_entry(swpent)) { + migration = true; page = migration_entry_to_page(swpent); - else if (is_device_private_entry(swpent)) + } else if (is_device_private_entry(swpent)) page = device_private_entry_to_page(swpent); } else if (unlikely(IS_ENABLED(CONFIG_SHMEM) && mss->check_shmem_swap && pte_none(*pte))) { @@ -530,7 +540,8 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr, if (!page) return; - smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked); + smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), + locked, migration); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -541,6 +552,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, struct vm_area_struct *vma = walk->vma; bool locked = !!(vma->vm_flags & VM_LOCKED); struct page *page = NULL; + bool migration = false; if (pmd_present(*pmd)) { /* FOLL_DUMP will return -EFAULT on huge zero page */ @@ -548,8 +560,10 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, } else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) { swp_entry_t entry = pmd_to_swp_entry(*pmd); - if (is_migration_entry(entry)) + if (is_migration_entry(entry)) { + migration = true; page = migration_entry_to_page(entry); + } } if (IS_ERR_OR_NULL(page)) return; @@ -561,7 +575,9 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, /* pass */; else mss->file_thp += HPAGE_PMD_SIZE; - smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked); + + smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), + locked, migration); } #else static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, @@ -1366,6 +1382,7 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm, { u64 frame = 0, flags = 0; struct page *page = NULL; + bool migration = false; if (pte_present(pte)) { if (pm->show_pfn) @@ -1383,8 +1400,10 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm, frame = swp_type(entry) | (swp_offset(entry) << MAX_SWAPFILES_SHIFT); flags |= PM_SWAP; - if (is_migration_entry(entry)) + if (is_migration_entry(entry)) { + migration = true; page = migration_entry_to_page(entry); + } if (is_device_private_entry(entry)) page = device_private_entry_to_page(entry); @@ -1392,7 +1411,7 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm, if (page && !PageAnon(page)) flags |= PM_FILE; - if (page && page_mapcount(page) == 1) + if (page && !migration && page_mapcount(page) == 1) flags |= PM_MMAP_EXCLUSIVE; if (vma->vm_flags & VM_SOFTDIRTY) flags |= PM_SOFT_DIRTY; @@ -1408,8 +1427,9 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, spinlock_t *ptl; pte_t *pte, *orig_pte; int err = 0; - #ifdef CONFIG_TRANSPARENT_HUGEPAGE + bool migration = false; + ptl = pmd_trans_huge_lock(pmdp, vma); if (ptl) { u64 flags = 0, frame = 0; @@ -1444,11 +1464,12 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, if (pmd_swp_soft_dirty(pmd)) flags |= PM_SOFT_DIRTY; VM_BUG_ON(!is_pmd_migration_entry(pmd)); + migration = is_migration_entry(entry); page = migration_entry_to_page(entry); } #endif - if (page && page_mapcount(page) == 1) + if (page && !migration && page_mapcount(page) == 1) flags |= PM_MMAP_EXCLUSIVE; for (; addr != end; addr += PAGE_SIZE) { diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 4f13734637660056e299d4ed04ceb38fb04cb580..09fb8459bb5cea2ef07ae1f58b935c2fa06250f6 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -692,9 +692,14 @@ int dquot_quota_sync(struct super_block *sb, int type) /* This is not very clever (and fast) but currently I don't know about * any other simple way of getting quota data to disk and we must get * them there for userspace to be visible... */ - if (sb->s_op->sync_fs) - sb->s_op->sync_fs(sb, 1); - sync_blockdev(sb->s_bdev); + if (sb->s_op->sync_fs) { + ret = sb->s_op->sync_fs(sb, 1); + if (ret) + return ret; + } + ret = sync_blockdev(sb->s_bdev); + if (ret) + return ret; /* * Now when everything is written we can discard the pagecache so diff --git a/fs/select.c b/fs/select.c index 945896d0ac9e7624db36d48169ebc757d4f18adb..5edffee1162c2423821e2282153a5ff1f3c985eb 100644 --- a/fs/select.c +++ b/fs/select.c @@ -458,9 +458,11 @@ get_max: return max; } -#define POLLIN_SET (EPOLLRDNORM | EPOLLRDBAND | EPOLLIN | EPOLLHUP | EPOLLERR) -#define POLLOUT_SET (EPOLLWRBAND | EPOLLWRNORM | EPOLLOUT | EPOLLERR) -#define POLLEX_SET (EPOLLPRI) +#define POLLIN_SET (EPOLLRDNORM | EPOLLRDBAND | EPOLLIN | EPOLLHUP | EPOLLERR |\ + EPOLLNVAL) +#define POLLOUT_SET (EPOLLWRBAND | EPOLLWRNORM | EPOLLOUT | EPOLLERR |\ + EPOLLNVAL) +#define POLLEX_SET (EPOLLPRI | EPOLLNVAL) static inline void wait_key_set(poll_table *wait, unsigned long in, unsigned long out, unsigned long bit, @@ -527,6 +529,7 @@ static int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time) break; if (!(bit & all_bits)) continue; + mask = EPOLLNVAL; f = fdget(i); if (f.file) { wait_key_set(wait, in, out, bit, @@ -534,34 +537,34 @@ static int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time) mask = vfs_poll(f.file, wait); fdput(f); - if ((mask & POLLIN_SET) && (in & bit)) { - res_in |= bit; - retval++; - wait->_qproc = NULL; - } - if ((mask & POLLOUT_SET) && (out & bit)) { - res_out |= bit; - retval++; - wait->_qproc = NULL; - } - if ((mask & POLLEX_SET) && (ex & bit)) { - res_ex |= bit; - retval++; - wait->_qproc = NULL; - } - /* got something, stop busy polling */ - if (retval) { - can_busy_loop = false; - busy_flag = 0; - - /* - * only remember a returned - * POLL_BUSY_LOOP if we asked for it - */ - } else if (busy_flag & mask) - can_busy_loop = true; - } + if ((mask & POLLIN_SET) && (in & bit)) { + res_in |= bit; + retval++; + wait->_qproc = NULL; + } + if ((mask & POLLOUT_SET) && (out & bit)) { + res_out |= bit; + retval++; + wait->_qproc = NULL; + } + if ((mask & POLLEX_SET) && (ex & bit)) { + res_ex |= bit; + retval++; + wait->_qproc = NULL; + } + /* got something, stop busy polling */ + if (retval) { + can_busy_loop = false; + busy_flag = 0; + + /* + * only remember a returned + * POLL_BUSY_LOOP if we asked for it + */ + } else if (busy_flag & mask) + can_busy_loop = true; + } if (res_in) *rinp = res_in; diff --git a/fs/super.c b/fs/super.c index 98bb0629ee108e87ff8876457b535442addf695e..bae3fe80f852e73de6e55f75454bcff169590bea 100644 --- a/fs/super.c +++ b/fs/super.c @@ -1472,8 +1472,8 @@ struct dentry *mount_nodev(struct file_system_type *fs_type, } EXPORT_SYMBOL(mount_nodev); -static int reconfigure_single(struct super_block *s, - int flags, void *data) +int reconfigure_single(struct super_block *s, + int flags, void *data) { struct fs_context *fc; int ret; @@ -1667,11 +1667,9 @@ static void lockdep_sb_freeze_acquire(struct super_block *sb) percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_); } -static void sb_freeze_unlock(struct super_block *sb) +static void sb_freeze_unlock(struct super_block *sb, int level) { - int level; - - for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--) + for (level--; level >= 0; level--) percpu_up_write(sb->s_writers.rw_sem + level); } @@ -1742,7 +1740,14 @@ int freeze_super(struct super_block *sb) sb_wait_write(sb, SB_FREEZE_PAGEFAULT); /* All writers are done so after syncing there won't be dirty data */ - sync_filesystem(sb); + ret = sync_filesystem(sb); + if (ret) { + sb->s_writers.frozen = SB_UNFROZEN; + sb_freeze_unlock(sb, SB_FREEZE_PAGEFAULT); + wake_up(&sb->s_writers.wait_unfrozen); + deactivate_locked_super(sb); + return ret; + } /* Now wait for internal filesystem counter */ sb->s_writers.frozen = SB_FREEZE_FS; @@ -1754,7 +1759,7 @@ int freeze_super(struct super_block *sb) printk(KERN_ERR "VFS:Filesystem freeze failed\n"); sb->s_writers.frozen = SB_UNFROZEN; - sb_freeze_unlock(sb); + sb_freeze_unlock(sb, SB_FREEZE_FS); wake_up(&sb->s_writers.wait_unfrozen); deactivate_locked_super(sb); return ret; @@ -1805,7 +1810,7 @@ static int thaw_super_locked(struct super_block *sb) } sb->s_writers.frozen = SB_UNFROZEN; - sb_freeze_unlock(sb); + sb_freeze_unlock(sb, SB_FREEZE_FS); out: wake_up(&sb->s_writers.wait_unfrozen); deactivate_locked_super(sb); diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c index ade05887070dd64a04403c27ade76d8b0198a19d..8b7315c22f0d1208e3b897d61c912561642c8b00 100644 --- a/fs/tracefs/inode.c +++ b/fs/tracefs/inode.c @@ -262,7 +262,6 @@ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts) if (!gid_valid(gid)) return -EINVAL; opts->gid = gid; - set_gid(tracefs_mount->mnt_root, gid); break; case Opt_mode: if (match_octal(&args[0], &option)) @@ -289,7 +288,9 @@ static int tracefs_apply_options(struct super_block *sb) inode->i_mode |= opts->mode; inode->i_uid = opts->uid; - inode->i_gid = opts->gid; + + /* Set all the group ids to the mount option */ + set_gid(sb->s_root, opts->gid); return 0; } diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index cfd46753a685656b75eef1cf74b1e2e5da46277c..6a8f9efc2e2f08812a7518d04233e845ec9ac748 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -1853,7 +1853,6 @@ out: kthread_stop(c->bgt); c->bgt = NULL; } - free_wbufs(c); kfree(c->write_reserve_buf); c->write_reserve_buf = NULL; vfree(c->ileb_buf); diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c index 84ed23edebfd379fa5906860758e3fe44a09da18..87a77bf70ee19fcad4f4077b83d62c5741fb8f75 100644 --- a/fs/udf/ialloc.c +++ b/fs/udf/ialloc.c @@ -77,6 +77,7 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode) GFP_KERNEL); } if (!iinfo->i_data) { + make_bad_inode(inode); iput(inode); return ERR_PTR(-ENOMEM); } @@ -86,6 +87,7 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode) dinfo->i_location.partitionReferenceNum, start, &err); if (err) { + make_bad_inode(inode); iput(inode); return ERR_PTR(err); } diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 0dd2f93ac0480e55cab212dce3506e8c89427765..d32b836f6ca74b0bf678fd0e66914a79aecbcb6a 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -257,10 +257,6 @@ int udf_expand_file_adinicb(struct inode *inode) char *kaddr; struct udf_inode_info *iinfo = UDF_I(inode); int err; - struct writeback_control udf_wbc = { - .sync_mode = WB_SYNC_NONE, - .nr_to_write = 1, - }; WARN_ON_ONCE(!inode_is_locked(inode)); if (!iinfo->i_lenAlloc) { @@ -304,8 +300,10 @@ int udf_expand_file_adinicb(struct inode *inode) iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG; /* from now on we have normal address_space methods */ inode->i_data.a_ops = &udf_aops; + set_page_dirty(page); + unlock_page(page); up_write(&iinfo->i_data_sem); - err = inode->i_data.a_ops->writepage(page, &udf_wbc); + err = filemap_fdatawrite(inode->i_mapping); if (err) { /* Restore everything back so that we don't lose data... */ lock_page(page); @@ -316,6 +314,7 @@ int udf_expand_file_adinicb(struct inode *inode) unlock_page(page); iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB; inode->i_data.a_ops = &udf_adinicb_aops; + iinfo->i_lenAlloc = inode->i_size; up_write(&iinfo->i_data_sem); } put_page(page); diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 6ad3b89a8a2e06c4d2403fd816f3ae7872c458f7..0f5366792d22e1a12c9564ee912dc3c3947c9006 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -605,9 +605,10 @@ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int state); int acpi_disable_wakeup_device_power(struct acpi_device *dev); #ifdef CONFIG_X86 -bool acpi_device_always_present(struct acpi_device *adev); +bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *status); #else -static inline bool acpi_device_always_present(struct acpi_device *adev) +static inline bool acpi_device_override_status(struct acpi_device *adev, + unsigned long long *status) { return false; } diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index 647cb11d0a0a3026321dddcc9a261a6b94c645ef..7334037624c5c3e493c21d7ce96dd1c893a38e04 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -536,8 +536,14 @@ typedef u64 acpi_integer; * Can be used with access_width of struct acpi_generic_address and access_size of * struct acpi_resource_generic_register. */ -#define ACPI_ACCESS_BIT_WIDTH(size) (1 << ((size) + 2)) -#define ACPI_ACCESS_BYTE_WIDTH(size) (1 << ((size) - 1)) +#define ACPI_ACCESS_BIT_SHIFT 2 +#define ACPI_ACCESS_BYTE_SHIFT -1 +#define ACPI_ACCESS_BIT_MAX (31 - ACPI_ACCESS_BIT_SHIFT) +#define ACPI_ACCESS_BYTE_MAX (31 - ACPI_ACCESS_BYTE_SHIFT) +#define ACPI_ACCESS_BIT_DEFAULT (8 - ACPI_ACCESS_BIT_SHIFT) +#define ACPI_ACCESS_BYTE_DEFAULT (8 - ACPI_ACCESS_BYTE_SHIFT) +#define ACPI_ACCESS_BIT_WIDTH(size) (1 << ((size) + ACPI_ACCESS_BIT_SHIFT)) +#define ACPI_ACCESS_BYTE_WIDTH(size) (1 << ((size) + ACPI_ACCESS_BYTE_SHIFT)) /******************************************************************************* * diff --git a/include/linux/blk-pm.h b/include/linux/blk-pm.h index b80c65aba2493aeba29c94e641bb7bd2e96ec6a4..2580e05a8ab67203efde59504ffb655480b531cb 100644 --- a/include/linux/blk-pm.h +++ b/include/linux/blk-pm.h @@ -14,7 +14,7 @@ extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev); extern int blk_pre_runtime_suspend(struct request_queue *q); extern void blk_post_runtime_suspend(struct request_queue *q, int err); extern void blk_pre_runtime_resume(struct request_queue *q); -extern void blk_post_runtime_resume(struct request_queue *q, int err); +extern void blk_post_runtime_resume(struct request_queue *q); extern void blk_set_runtime_active(struct request_queue *q); #else static inline void blk_pm_runtime_init(struct request_queue *q, diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 6e330ff2f28df0a2e729a631524ac92ffa127c71..391bc1480dfb1da5142725f623c8813bab937ca6 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -367,6 +367,13 @@ static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log) log->level == BPF_LOG_KERNEL); } +static inline bool +bpf_verifier_log_attr_valid(const struct bpf_verifier_log *log) +{ + return log->len_total >= 128 && log->len_total <= UINT_MAX >> 2 && + log->level && log->ubuf && !(log->level & ~BPF_LOG_MASK); +} + #define BPF_MAX_SUBPROGS 256 struct bpf_subprog_info { diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 83a3ebff74560cff37fad91bb6536146a1541481..8f87c1a6f32310e15fd44df56ff4cd595444ccdb 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -42,6 +42,8 @@ struct module; * @shift: Cycle to nanosecond divisor (power of two) * @max_idle_ns: Maximum idle time permitted by the clocksource (nsecs) * @maxadj: Maximum adjustment value to mult (~11%) + * @uncertainty_margin: Maximum uncertainty in nanoseconds per half second. + * Zero says to use default WATCHDOG_THRESHOLD. * @archdata: Optional arch-specific data * @max_cycles: Maximum safe cycle value which won't overflow on * multiplication @@ -93,6 +95,7 @@ struct clocksource { u32 shift; u64 max_idle_ns; u32 maxadj; + u32 uncertainty_margin; #ifdef CONFIG_ARCH_CLOCKSOURCE_DATA struct arch_clocksource_data archdata; #endif diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h index 5b44b0195a28a07a0990748aedd591b2fb86e352..e869ce3ae6600fd95caac84c662da9472b9edc9f 100644 --- a/include/linux/fs_context.h +++ b/include/linux/fs_context.h @@ -140,6 +140,8 @@ extern int generic_parse_monolithic(struct fs_context *fc, void *data); extern int vfs_get_tree(struct fs_context *fc); extern void put_fs_context(struct fs_context *fc); extern void fc_drop_locked(struct fs_context *fc); +int reconfigure_single(struct super_block *s, + int flags, void *data); /* * sget() wrappers to be called from the ->get_tree() op. diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index f8acddcf54fb4ce2e7b03dda41f52dd936b591e8..79add91eaa04ebf3b4355cabae415709ab3bbb6f 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -203,6 +203,42 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode, fsnotify_name(dir, FS_CREATE, inode, &new_dentry->d_name, 0); } +/* + * fsnotify_delete - @dentry was unlinked and unhashed + * + * Caller must make sure that dentry->d_name is stable. + * + * Note: unlike fsnotify_unlink(), we have to pass also the unlinked inode + * as this may be called after d_delete() and old_dentry may be negative. + */ +static inline void fsnotify_delete(struct inode *dir, struct inode *inode, + struct dentry *dentry) +{ + __u32 mask = FS_DELETE; + + if (S_ISDIR(inode->i_mode)) + mask |= FS_ISDIR; + + fsnotify_name(dir, mask, inode, &dentry->d_name, 0); +} + +/** + * d_delete_notify - delete a dentry and call fsnotify_delete() + * @dentry: The dentry to delete + * + * This helper is used to guaranty that the unlinked inode cannot be found + * by lookup of this name after fsnotify_delete() event has been delivered. + */ +static inline void d_delete_notify(struct inode *dir, struct dentry *dentry) +{ + struct inode *inode = d_inode(dentry); + + ihold(inode); + d_delete(dentry); + fsnotify_delete(dir, inode, dentry); + iput(inode); +} + /* * fsnotify_unlink - 'name' was unlinked * @@ -210,10 +246,10 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode, */ static inline void fsnotify_unlink(struct inode *dir, struct dentry *dentry) { - /* Expected to be called before d_delete() */ - WARN_ON_ONCE(d_is_negative(dentry)); + if (WARN_ON_ONCE(d_is_negative(dentry))) + return; - fsnotify_dirent(dir, dentry, FS_DELETE); + fsnotify_delete(dir, d_inode(dentry), dentry); } /* @@ -233,10 +269,10 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry) */ static inline void fsnotify_rmdir(struct inode *dir, struct dentry *dentry) { - /* Expected to be called before d_delete() */ - WARN_ON_ONCE(d_is_negative(dentry)); + if (WARN_ON_ONCE(d_is_negative(dentry))) + return; - fsnotify_dirent(dir, dentry, FS_DELETE | FS_ISDIR); + fsnotify_delete(dir, d_inode(dentry), dentry); } /* diff --git a/include/linux/hid.h b/include/linux/hid.h index fc56d53cc68bffc85b9e7dc014d5b2d700d8e1e1..2ba33d708942c163bb5b3d5c01c8840129f393b5 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -345,6 +345,8 @@ struct hid_item { /* BIT(9) reserved for backward compatibility, was NO_INIT_INPUT_REPORTS */ #define HID_QUIRK_ALWAYS_POLL BIT(10) #define HID_QUIRK_INPUT_PER_APP BIT(11) +#define HID_QUIRK_X_INVERT BIT(12) +#define HID_QUIRK_Y_INVERT BIT(13) #define HID_QUIRK_SKIP_OUTPUT_REPORTS BIT(16) #define HID_QUIRK_SKIP_OUTPUT_REPORT_ID BIT(17) #define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP BIT(18) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 4b975111b53617c16967c6ad73897655d22f2184..1f467fb620fe1c06604e8ec10da75f51d37d7cf3 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -197,7 +197,7 @@ struct obj_cgroup { struct mem_cgroup *memcg; atomic_t nr_charged_bytes; union { - struct list_head list; + struct list_head list; /* protected by objcg_lock */ struct rcu_head rcu; }; }; @@ -300,7 +300,8 @@ struct mem_cgroup { int kmemcg_id; enum memcg_kmem_state kmem_state; struct obj_cgroup __rcu *objcg; - struct list_head objcg_list; /* list of inherited objcgs */ + /* list of inherited objcgs, protected by objcg_lock */ + struct list_head objcg_list; #endif MEMCG_PADDING(_pad2_); diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 63b550403317a44c9120bce08950ccc153f76bac..c142a152d6a41cfaf7ddfc0dd3162e8e96c459ff 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -938,6 +938,15 @@ static inline int is_highmem_idx(enum zone_type idx) #endif } +#ifdef CONFIG_ZONE_DMA +bool has_managed_dma(void); +#else +static inline bool has_managed_dma(void) +{ + return false; +} +#endif + /** * is_highmem - helper function to quickly check if a struct zone is a * highmem zone or not. This is an attempt to keep references diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3476d20b75d497743f3c68182ce3e2e064d3ffc6..861f2480c45715f0c7e8347f033e02f8ba4ba836 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2061,7 +2061,7 @@ struct net_device { struct netdev_queue *_tx ____cacheline_aligned_in_smp; unsigned int num_tx_queues; unsigned int real_num_tx_queues; - struct Qdisc *qdisc; + struct Qdisc __rcu *qdisc; unsigned int tx_queue_len; spinlock_t tx_global_lock; @@ -2543,6 +2543,7 @@ struct packet_type { struct net_device *); bool (*id_match)(struct packet_type *ptype, struct sock *sk); + struct net *af_packet_net; void *af_packet_priv; struct list_head list; }; diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index aff5cd382fef55d8cb5743d020e676e030176e53..1e0a3497bdb4644db96a7b0a2cdb935732800447 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -501,8 +501,8 @@ extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, struct nfs_fattr *fattr, struct nfs4_label *label); extern int nfs_may_open(struct inode *inode, const struct cred *cred, int openflags); extern void nfs_access_zap_cache(struct inode *inode); -extern int nfs_access_get_cached(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res, - bool may_block); +extern int nfs_access_get_cached(struct inode *inode, const struct cred *cred, + u32 *mask, bool may_block); /* * linux/fs/nfs/symlink.c diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index ce14fb2772b5b6ba9c5d944e8c8a95cfc9d13708..67a50c78232fe2e351d8700761f36e4d3b4ea927 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -678,18 +678,6 @@ struct perf_event { u64 total_time_running; u64 tstamp; - /* - * timestamp shadows the actual context timing but it can - * be safely used in NMI interrupt context. It reflects the - * context time as it was when the event was last scheduled in, - * or when ctx_sched_in failed to schedule the event because we - * run out of PMC. - * - * ctx_time already accounts for ctx->timestamp. Therefore to - * compute ctx_time for a sample, simply add perf_clock(). - */ - u64 shadow_ctx_time; - struct perf_event_attr attr; u16 header_size; u16 id_header_size; @@ -834,6 +822,7 @@ struct perf_event_context { */ u64 time; u64 timestamp; + u64 timeoffset; /* * These fields let us detect when two contexts have both @@ -916,6 +905,8 @@ struct bpf_perf_event_data_kern { struct perf_cgroup_info { u64 time; u64 timestamp; + u64 timeoffset; + int active; }; struct perf_cgroup { @@ -1235,7 +1226,18 @@ extern void perf_event_bpf_event(struct bpf_prog *prog, enum perf_bpf_event_type type, u16 flags); -extern struct perf_guest_info_callbacks *perf_guest_cbs; +extern struct perf_guest_info_callbacks __rcu *perf_guest_cbs; +static inline struct perf_guest_info_callbacks *perf_get_guest_cbs(void) +{ + /* + * Callbacks are RCU-protected and must be READ_ONCE to avoid reloading + * the callbacks between a !NULL check and dereferences, to ensure + * pending stores/changes to the callback pointers are visible before a + * non-NULL perf_guest_cbs is visible to readers, and to prevent a + * module from unloading callbacks while readers are active. + */ + return rcu_dereference(perf_guest_cbs); +} extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 7c869ea8dffc8caed4a259071f981fc88ba87763..9def1ac19546b659395e2e5554d160b612476931 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -44,6 +44,7 @@ static inline unsigned long pte_index(unsigned long address) { return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); } +#define pte_index pte_index #ifndef pmd_index static inline unsigned long pmd_index(unsigned long address) diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 161acd4ede448eed9c19a02a6e3ce958b94a670d..30091ab5de287c25b6b3d6653fa2e68c39a91896 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -58,6 +58,7 @@ extern void pm_runtime_get_suppliers(struct device *dev); extern void pm_runtime_put_suppliers(struct device *dev); extern void pm_runtime_new_link(struct device *dev); extern void pm_runtime_drop_link(struct device_link *link); +extern void pm_runtime_release_supplier(struct device_link *link, bool check_idle); /** * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter. @@ -279,6 +280,8 @@ static inline void pm_runtime_get_suppliers(struct device *dev) {} static inline void pm_runtime_put_suppliers(struct device *dev) {} static inline void pm_runtime_new_link(struct device *dev) {} static inline void pm_runtime_drop_link(struct device_link *link) {} +static inline void pm_runtime_release_supplier(struct device_link *link, + bool check_idle) {} #endif /* !CONFIG_PM */ diff --git a/include/linux/psi.h b/include/linux/psi.h index 7361023f3fdd50b52083797aab2e5b33461154bc..db4ecfaab879265168229a2f732e08adfcf7a3fd 100644 --- a/include/linux/psi.h +++ b/include/linux/psi.h @@ -33,7 +33,7 @@ void cgroup_move_task(struct task_struct *p, struct css_set *to); struct psi_trigger *psi_trigger_create(struct psi_group *group, char *buf, size_t nbytes, enum psi_res res); -void psi_trigger_replace(void **trigger_ptr, struct psi_trigger *t); +void psi_trigger_destroy(struct psi_trigger *t); __poll_t psi_trigger_poll(void **trigger_ptr, struct file *file, poll_table *wait); diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h index b95f3211566a22c88c53154cb8f7085d026ba98a..17d74f62c1818ad93f6568cc6ac6a074aba35f3c 100644 --- a/include/linux/psi_types.h +++ b/include/linux/psi_types.h @@ -128,9 +128,6 @@ struct psi_trigger { * events to one per window */ u64 last_event_time; - - /* Refcounting to prevent premature destruction */ - struct kref refcount; }; struct psi_group { diff --git a/include/linux/sched.h b/include/linux/sched.h index b85b26d9ccefeda08bb8be7d5d2f54f89d66741b..f996d1f343bb7cd1ec2a8dd15df0e2b006f0de20 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1544,7 +1544,6 @@ extern struct pid *cad_pid; #define PF_MEMALLOC 0x00000800 /* Allocating memory */ #define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */ #define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */ -#define PF_USED_ASYNC 0x00004000 /* Used async_schedule*(), used by module init */ #define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */ #define PF_FROZEN 0x00010000 /* Frozen for system suspend */ #define PF_KSWAPD 0x00020000 /* I am kswapd */ diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 8af13ba60c7e4500153ba1612f80a96f4ddae05d..4bcd65679cee0c46d48040330572d40d26715903 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -430,15 +430,7 @@ struct platform_hibernation_ops { #ifdef CONFIG_HIBERNATION /* kernel/power/snapshot.c */ -extern void __register_nosave_region(unsigned long b, unsigned long e, int km); -static inline void __init register_nosave_region(unsigned long b, unsigned long e) -{ - __register_nosave_region(b, e, 0); -} -static inline void __init register_nosave_region_late(unsigned long b, unsigned long e) -{ - __register_nosave_region(b, e, 1); -} +extern void register_nosave_region(unsigned long b, unsigned long e); extern int swsusp_page_is_forbidden(struct page *); extern void swsusp_set_page_free(struct page *); extern void swsusp_unset_page_free(struct page *); @@ -457,7 +449,6 @@ int pfn_is_nosave(unsigned long pfn); int hibernate_quiet_exec(int (*func)(void *data), void *data); #else /* CONFIG_HIBERNATION */ static inline void register_nosave_region(unsigned long b, unsigned long e) {} -static inline void register_nosave_region_late(unsigned long b, unsigned long e) {} static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } static inline void swsusp_set_page_free(struct page *p) {} static inline void swsusp_unset_page_free(struct page *p) {} @@ -505,14 +496,14 @@ extern void ksys_sync_helper(void); /* drivers/base/power/wakeup.c */ extern bool events_check_enabled; -extern unsigned int pm_wakeup_irq; extern suspend_state_t pm_suspend_target_state; extern bool pm_wakeup_pending(void); extern void pm_system_wakeup(void); extern void pm_system_cancel_wakeup(void); -extern void pm_wakeup_clear(bool reset); +extern void pm_wakeup_clear(unsigned int irq_number); extern void pm_system_irq_wakeup(unsigned int irq_number); +extern unsigned int pm_wakeup_irq(void); extern bool pm_get_wakeup_count(unsigned int *count, bool block); extern bool pm_save_wakeup_count(unsigned int count); extern void pm_wakep_autosleep_enabled(bool set); diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index 0c6c1de6f3b7782c833090aed78a28b4efdf7c3f..18a9949bba187e7979a4d409e0485ae3d149f7ca 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -582,4 +582,18 @@ struct tee_client_driver { #define to_tee_client_driver(d) \ container_of(d, struct tee_client_driver, driver) +/** + * teedev_open() - Open a struct tee_device + * @teedev: Device to open + * + * @return a pointer to struct tee_context on success or an ERR_PTR on failure. + */ +struct tee_context *teedev_open(struct tee_device *teedev); + +/** + * teedev_close_context() - closes a struct tee_context + * @ctx: The struct tee_context to close + */ +void teedev_close_context(struct tee_context *ctx); + #endif /*__TEE_DRV_H*/ diff --git a/include/linux/usb/role.h b/include/linux/usb/role.h index 0164fed31b06cc8f5f24c8dab3651961c89528f9..b9ccaeb8a4aef14e08e092a4da405c4f22764c67 100644 --- a/include/linux/usb/role.h +++ b/include/linux/usb/role.h @@ -90,6 +90,12 @@ fwnode_usb_role_switch_get(struct fwnode_handle *node) static inline void usb_role_switch_put(struct usb_role_switch *sw) { } +static inline struct usb_role_switch * +usb_role_switch_find_by_fwnode(const struct fwnode_handle *fwnode) +{ + return NULL; +} + static inline struct usb_role_switch * usb_role_switch_register(struct device *parent, const struct usb_role_switch_desc *desc) diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 78ea3e332688f9985336c25d943f24345fdbc083..e7ce719838b5eca5d1824170e33fd6d176d9b983 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -6,6 +6,8 @@ #define RTR_SOLICITATION_INTERVAL (4*HZ) #define RTR_SOLICITATION_MAX_INTERVAL (3600*HZ) /* 1 hour */ +#define MIN_VALID_LIFETIME (2*3600) /* 2 hours */ + #define TEMP_VALID_LIFETIME (7*86400) #define TEMP_PREFERRED_LIFETIME (86400) #define REGEN_MAX_RETRY (3) diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h index c8696a230b7d922479b6e2d8368f65050d4e2cc8..1a28f299a4c6138ac3e932aa64a90d12b35a37ff 100644 --- a/include/net/bond_3ad.h +++ b/include/net/bond_3ad.h @@ -262,7 +262,7 @@ struct ad_system { struct ad_bond_info { struct ad_system system; /* 802.3ad system structure */ struct bond_3ad_stats stats; - u32 agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */ + atomic_t agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */ u16 aggregator_identifier; }; diff --git a/include/net/checksum.h b/include/net/checksum.h index 0d05b9e8690b8bdf7c526f9625484d2bb54c8106..8b7d0c31598f5158fdae9a513c3cf8f01ffa9d99 100644 --- a/include/net/checksum.h +++ b/include/net/checksum.h @@ -22,7 +22,7 @@ #include #ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER -static inline +static __always_inline __wsum csum_and_copy_from_user (const void __user *src, void *dst, int len) { @@ -33,7 +33,7 @@ __wsum csum_and_copy_from_user (const void __user *src, void *dst, #endif #ifndef HAVE_CSUM_COPY_USER -static __inline__ __wsum csum_and_copy_to_user +static __always_inline __wsum csum_and_copy_to_user (const void *src, void __user *dst, int len) { __wsum sum = csum_partial(src, len, ~0U); @@ -45,7 +45,7 @@ static __inline__ __wsum csum_and_copy_to_user #endif #ifndef _HAVE_ARCH_CSUM_AND_COPY -static inline __wsum +static __always_inline __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len) { memcpy(dst, src, len); @@ -54,7 +54,7 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len) #endif #ifndef HAVE_ARCH_CSUM_ADD -static inline __wsum csum_add(__wsum csum, __wsum addend) +static __always_inline __wsum csum_add(__wsum csum, __wsum addend) { u32 res = (__force u32)csum; res += (__force u32)addend; @@ -62,12 +62,12 @@ static inline __wsum csum_add(__wsum csum, __wsum addend) } #endif -static inline __wsum csum_sub(__wsum csum, __wsum addend) +static __always_inline __wsum csum_sub(__wsum csum, __wsum addend) { return csum_add(csum, ~addend); } -static inline __sum16 csum16_add(__sum16 csum, __be16 addend) +static __always_inline __sum16 csum16_add(__sum16 csum, __be16 addend) { u16 res = (__force u16)csum; @@ -75,12 +75,12 @@ static inline __sum16 csum16_add(__sum16 csum, __be16 addend) return (__force __sum16)(res + (res < (__force u16)addend)); } -static inline __sum16 csum16_sub(__sum16 csum, __be16 addend) +static __always_inline __sum16 csum16_sub(__sum16 csum, __be16 addend) { return csum16_add(csum, ~addend); } -static inline __wsum +static __always_inline __wsum csum_block_add(__wsum csum, __wsum csum2, int offset) { u32 sum = (__force u32)csum2; @@ -92,36 +92,37 @@ csum_block_add(__wsum csum, __wsum csum2, int offset) return csum_add(csum, (__force __wsum)sum); } -static inline __wsum +static __always_inline __wsum csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len) { return csum_block_add(csum, csum2, offset); } -static inline __wsum +static __always_inline __wsum csum_block_sub(__wsum csum, __wsum csum2, int offset) { return csum_block_add(csum, ~csum2, offset); } -static inline __wsum csum_unfold(__sum16 n) +static __always_inline __wsum csum_unfold(__sum16 n) { return (__force __wsum)n; } -static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum) +static __always_inline +__wsum csum_partial_ext(const void *buff, int len, __wsum sum) { return csum_partial(buff, len, sum); } #define CSUM_MANGLED_0 ((__force __sum16)0xffff) -static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff) +static __always_inline void csum_replace_by_diff(__sum16 *sum, __wsum diff) { *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum))); } -static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to) +static __always_inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to) { __wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from); @@ -134,11 +135,16 @@ static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to) * m : old value of a 16bit field * m' : new value of a 16bit field */ -static inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new) +static __always_inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new) { *sum = ~csum16_add(csum16_sub(~(*sum), old), new); } +static inline void csum_replace(__wsum *csum, __wsum old, __wsum new) +{ + *csum = csum_add(csum_sub(*csum, old), new); +} + struct sk_buff; void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb, __be32 from, __be32 to, bool pseudohdr); @@ -148,16 +154,16 @@ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb, void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb, __wsum diff, bool pseudohdr); -static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb, - __be16 from, __be16 to, - bool pseudohdr) +static __always_inline +void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb, + __be16 from, __be16 to, bool pseudohdr) { inet_proto_csum_replace4(sum, skb, (__force __be32)from, (__force __be32)to, pseudohdr); } -static inline __wsum remcsum_adjust(void *ptr, __wsum csum, - int start, int offset) +static __always_inline __wsum remcsum_adjust(void *ptr, __wsum csum, + int start, int offset) { __sum16 *psum = (__sum16 *)(ptr + offset); __wsum delta; @@ -173,7 +179,7 @@ static inline __wsum remcsum_adjust(void *ptr, __wsum csum, return delta; } -static inline void remcsum_unadjust(__sum16 *psum, __wsum delta) +static __always_inline void remcsum_unadjust(__sum16 *psum, __wsum delta) { *psum = csum_fold(csum_sub(delta, (__force __wsum)*psum)); } diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h index 14efa0ded75dd93d6f63735a95feaf72de6fdef7..adab27ba1ecbf054f5284e4342b1551b104d9a2c 100644 --- a/include/net/dst_metadata.h +++ b/include/net/dst_metadata.h @@ -123,8 +123,20 @@ static inline struct metadata_dst *tun_dst_unclone(struct sk_buff *skb) memcpy(&new_md->u.tun_info, &md_dst->u.tun_info, sizeof(struct ip_tunnel_info) + md_size); +#ifdef CONFIG_DST_CACHE + /* Unclone the dst cache if there is one */ + if (new_md->u.tun_info.dst_cache.cache) { + int ret; + + ret = dst_cache_init(&new_md->u.tun_info.dst_cache, GFP_ATOMIC); + if (ret) { + metadata_dst_free(new_md); + return ERR_PTR(ret); + } + } +#endif + skb_dst_drop(skb); - dst_hold(&new_md->dst); skb_dst_set(skb, &new_md->dst); return new_md; } diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index bac79e817776c6e65ed37f5b5c6af61a5df59ec0..4cbd413e71a3faac2e694b4713f62b42cf9a75ed 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -116,8 +116,15 @@ int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net); static inline void fqdir_pre_exit(struct fqdir *fqdir) { - fqdir->high_thresh = 0; /* prevent creation of new frags */ - fqdir->dead = true; + /* Prevent creation of new frags. + * Pairs with READ_ONCE() in inet_frag_find(). + */ + WRITE_ONCE(fqdir->high_thresh, 0); + + /* Pairs with READ_ONCE() in inet_frag_kill(), ip_expire() + * and ip6frag_expire_frag_queue(). + */ + WRITE_ONCE(fqdir->dead, true); } void fqdir_exit(struct fqdir *fqdir); diff --git a/include/net/ip.h b/include/net/ip.h index 5538e54d4620c04aaab1781391b44d59a2ea894d..de2dc22a78f938470ca5cf4d973adaa538a39137 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -506,19 +506,18 @@ static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb, { struct iphdr *iph = ip_hdr(skb); + /* We had many attacks based on IPID, use the private + * generator as much as we can. + */ + if (sk && inet_sk(sk)->inet_daddr) { + iph->id = htons(inet_sk(sk)->inet_id); + inet_sk(sk)->inet_id += segs; + return; + } if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) { - /* This is only to work around buggy Windows95/2000 - * VJ compression implementations. If the ID field - * does not change, they drop every other packet in - * a TCP stream using header compression. - */ - if (sk && inet_sk(sk)->inet_daddr) { - iph->id = htons(inet_sk(sk)->inet_id); - inet_sk(sk)->inet_id += segs; - } else { - iph->id = 0; - } + iph->id = 0; } else { + /* Unfortunately we need the big hammer to get a suitable IPID */ __ip_select_ident(net, iph, segs); } } diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 88bc66b8d02b0a9297c0075ad2b335a7432f9150..95d93ecf07371b8845084104d5777964e8f3e839 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -280,7 +280,7 @@ static inline bool fib6_get_cookie_safe(const struct fib6_info *f6i, fn = rcu_dereference(f6i->fib6_node); if (fn) { - *cookie = fn->fn_sernum; + *cookie = READ_ONCE(fn->fn_sernum); /* pairs with smp_wmb() in fib6_update_sernum_upto_root() */ smp_rmb(); status = true; diff --git a/include/net/ipv6.h b/include/net/ipv6.h index bd1f396cc9c729d734fbf9132335266dfddec5f9..60601896d4747ff5800786b7421873135d47a857 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -390,17 +390,20 @@ static inline void txopt_put(struct ipv6_txoptions *opt) kfree_rcu(opt, rcu); } +#if IS_ENABLED(CONFIG_IPV6) struct ip6_flowlabel *__fl6_sock_lookup(struct sock *sk, __be32 label); extern struct static_key_false_deferred ipv6_flowlabel_exclusive; static inline struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label) { - if (static_branch_unlikely(&ipv6_flowlabel_exclusive.key)) + if (static_branch_unlikely(&ipv6_flowlabel_exclusive.key) && + READ_ONCE(sock_net(sk)->ipv6.flowlabel_has_excl)) return __fl6_sock_lookup(sk, label) ? : ERR_PTR(-ENOENT); return NULL; } +#endif struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space, struct ip6_flowlabel *fl, diff --git a/include/net/ipv6_frag.h b/include/net/ipv6_frag.h index 851029ecff13cb617d41b4043f039ae59e832b82..0a4779175a5238d8989a777ef8417223b6157679 100644 --- a/include/net/ipv6_frag.h +++ b/include/net/ipv6_frag.h @@ -67,7 +67,8 @@ ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq) struct sk_buff *head; rcu_read_lock(); - if (fq->q.fqdir->dead) + /* Paired with the WRITE_ONCE() in fqdir_pre_exit(). */ + if (READ_ONCE(fq->q.fqdir->dead)) goto out_rcu_unlock; spin_lock(&fq->q.lock); diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index ed4a9d098164fa4da6af874bf472ce0e0c70063c..76bfb6cd5815de40304750af985a50d753656e48 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -825,7 +825,7 @@ struct nft_expr_ops { int (*offload)(struct nft_offload_ctx *ctx, struct nft_flow_rule *flow, const struct nft_expr *expr); - u32 offload_flags; + bool (*offload_action)(const struct nft_expr *expr); const struct nft_expr_type *type; void *data; }; diff --git a/include/net/netfilter/nf_tables_offload.h b/include/net/netfilter/nf_tables_offload.h index 434a6158852f366c5f5e9bb6899bfe9823e9a8ec..7a453a35a41dda9147b6f10fe9ff22bdc86f1588 100644 --- a/include/net/netfilter/nf_tables_offload.h +++ b/include/net/netfilter/nf_tables_offload.h @@ -67,8 +67,6 @@ struct nft_flow_rule { struct flow_rule *rule; }; -#define NFT_OFFLOAD_F_ACTION (1 << 0) - void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow, enum flow_dissector_key_id addr_type); diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index 5ec054473d81a5d3f300c7cc841a881212cc40ea..1c0fbe3abf24714844b3f79fab06106325893061 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h @@ -80,9 +80,10 @@ struct netns_ipv6 { spinlock_t fib6_gc_lock; unsigned int ip6_rt_gc_expire; unsigned long ip6_rt_last_gc; + unsigned char flowlabel_has_excl; #ifdef CONFIG_IPV6_MULTIPLE_TABLES - unsigned int fib6_rules_require_fldissect; bool fib6_has_custom_rules; + unsigned int fib6_rules_require_fldissect; #ifdef CONFIG_IPV6_SUBTREES unsigned int fib6_routes_require_src; #endif diff --git a/include/net/route.h b/include/net/route.h index ff021cab657e503e7db2044bc7c3309053d6c173..a07c277cd33e82b96d9075a8447438316c5ef181 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -369,7 +369,7 @@ static inline struct neighbour *ip_neigh_gw4(struct net_device *dev, { struct neighbour *neigh; - neigh = __ipv4_neigh_lookup_noref(dev, daddr); + neigh = __ipv4_neigh_lookup_noref(dev, (__force u32)daddr); if (unlikely(!neigh)) neigh = __neigh_create(&arp_tbl, &daddr, dev, false); diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 9226a84dcc14d1a82308cabae6237e61ce870d29..1042c449e7db554e32389213bb5ca1dcdf353a0c 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -1261,6 +1261,7 @@ struct psched_ratecfg { u64 rate_bytes_ps; /* bytes per second */ u32 mult; u16 overhead; + u16 mpu; u8 linklayer; u8 shift; }; @@ -1270,6 +1271,9 @@ static inline u64 psched_l2t_ns(const struct psched_ratecfg *r, { len += r->overhead; + if (len < r->mpu) + len = r->mpu; + if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift; @@ -1292,6 +1296,7 @@ static inline void psched_ratecfg_getrate(struct tc_ratespec *res, res->rate = min_t(u64, r->rate_bytes_ps, ~0U); res->overhead = r->overhead; + res->mpu = r->mpu; res->linklayer = (r->linklayer & TC_LINKLAYER_MASK); } diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 6232a5f048bde66a35c056d08ea6421de3cdeb05..337d29875e518a2bc5c3a3383d754a2134014ac9 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -193,6 +193,11 @@ struct xfrm_state { struct xfrm_algo_aead *aead; const char *geniv; + /* mapping change rate limiting */ + __be16 new_mapping_sport; + u32 new_mapping; /* seconds */ + u32 mapping_maxage; /* seconds for input SA */ + /* Data for encapsulator */ struct xfrm_encap_tmpl *encap; struct sock __rcu *encap_sk; diff --git a/include/trace/events/cgroup.h b/include/trace/events/cgroup.h index 7f42a3de59e6b46ff313738371439bf532a88a79..dd7d7c9efecdf33d7c28c89d769e7d00e70cae2d 100644 --- a/include/trace/events/cgroup.h +++ b/include/trace/events/cgroup.h @@ -59,8 +59,8 @@ DECLARE_EVENT_CLASS(cgroup, TP_STRUCT__entry( __field( int, root ) - __field( int, id ) __field( int, level ) + __field( u64, id ) __string( path, path ) ), @@ -71,7 +71,7 @@ DECLARE_EVENT_CLASS(cgroup, __assign_str(path, path); ), - TP_printk("root=%d id=%d level=%d path=%s", + TP_printk("root=%d id=%llu level=%d path=%s", __entry->root, __entry->id, __entry->level, __get_str(path)) ); @@ -126,8 +126,8 @@ DECLARE_EVENT_CLASS(cgroup_migrate, TP_STRUCT__entry( __field( int, dst_root ) - __field( int, dst_id ) __field( int, dst_level ) + __field( u64, dst_id ) __field( int, pid ) __string( dst_path, path ) __string( comm, task->comm ) @@ -142,7 +142,7 @@ DECLARE_EVENT_CLASS(cgroup_migrate, __assign_str(comm, task->comm); ), - TP_printk("dst_root=%d dst_id=%d dst_level=%d dst_path=%s pid=%d comm=%s", + TP_printk("dst_root=%d dst_id=%llu dst_level=%d dst_path=%s pid=%d comm=%s", __entry->dst_root, __entry->dst_id, __entry->dst_level, __get_str(dst_path), __entry->pid, __get_str(comm)) ); @@ -171,8 +171,8 @@ DECLARE_EVENT_CLASS(cgroup_event, TP_STRUCT__entry( __field( int, root ) - __field( int, id ) __field( int, level ) + __field( u64, id ) __string( path, path ) __field( int, val ) ), @@ -185,7 +185,7 @@ DECLARE_EVENT_CLASS(cgroup_event, __entry->val = val; ), - TP_printk("root=%d id=%d level=%d path=%s val=%d", + TP_printk("root=%d id=%llu level=%d path=%s val=%d", __entry->root, __entry->id, __entry->level, __get_str(path), __entry->val) ); diff --git a/include/uapi/linux/can/isotp.h b/include/uapi/linux/can/isotp.h index 7793b26aa154d68f5fb2b074007afdfa0ccd3a44..c55935b64ccc8ef21f5c528e7536ac44474e826d 100644 --- a/include/uapi/linux/can/isotp.h +++ b/include/uapi/linux/can/isotp.h @@ -135,7 +135,7 @@ struct can_isotp_ll_options { #define CAN_ISOTP_FORCE_RXSTMIN 0x100 /* ignore CFs depending on rx stmin */ #define CAN_ISOTP_RX_EXT_ADDR 0x200 /* different rx extended addressing */ #define CAN_ISOTP_WAIT_TX_DONE 0x400 /* wait for tx completion */ - +#define CAN_ISOTP_SF_BROADCAST 0x800 /* 1-to-N functional addressing */ /* default values */ diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h index 4b3395082d15c95acc2afaeb7f15b32baf65ef53..26071021e986f6e3f6f71cf12fba8bd11a2390bd 100644 --- a/include/uapi/linux/netfilter/nf_conntrack_common.h +++ b/include/uapi/linux/netfilter/nf_conntrack_common.h @@ -106,7 +106,7 @@ enum ip_conntrack_status { IPS_NAT_CLASH = IPS_UNTRACKED, #endif - /* Conntrack got a helper explicitly attached via CT target. */ + /* Conntrack got a helper explicitly attached (ruleset, ctnetlink). */ IPS_HELPER_BIT = 13, IPS_HELPER = (1 << IPS_HELPER_BIT), diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h index ffc6a5391bb7bb8f7543cb36fb16487596fd3255..2290c98b47cf8f501809d49b8c920d37633e0039 100644 --- a/include/uapi/linux/xfrm.h +++ b/include/uapi/linux/xfrm.h @@ -308,6 +308,7 @@ enum xfrm_attr_type_t { XFRMA_SET_MARK, /* __u32 */ XFRMA_SET_MARK_MASK, /* __u32 */ XFRMA_IF_ID, /* __u32 */ + XFRMA_MTIMER_THRESH, /* __u32 in seconds for input SA */ __XFRMA_MAX #define XFRMA_OUTPUT_MARK XFRMA_SET_MARK /* Compatibility */ diff --git a/kernel/async.c b/kernel/async.c index 33258e6e20f83c319649c266142180926cba689c..1746cd65e271bcd7e41f3eb571d314c8ea4d8fa7 100644 --- a/kernel/async.c +++ b/kernel/async.c @@ -205,9 +205,6 @@ async_cookie_t async_schedule_node_domain(async_func_t func, void *data, atomic_inc(&entry_count); spin_unlock_irqrestore(&async_lock, flags); - /* mark that this task has queued an async job, used by module init */ - current->flags |= PF_USED_ASYNC; - /* schedule for execution */ queue_work_node(node, system_unbound_wq, &entry->work); diff --git a/kernel/audit.c b/kernel/audit.c index d784000921da328c5ecd40379e198fd88a9fd832..aeec86ed470883f28acde2f3b73011252f4d4d66 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -541,20 +541,22 @@ static void kauditd_printk_skb(struct sk_buff *skb) /** * kauditd_rehold_skb - Handle a audit record send failure in the hold queue * @skb: audit record + * @error: error code (unused) * * Description: * This should only be used by the kauditd_thread when it fails to flush the * hold queue. */ -static void kauditd_rehold_skb(struct sk_buff *skb) +static void kauditd_rehold_skb(struct sk_buff *skb, __always_unused int error) { - /* put the record back in the queue at the same place */ - skb_queue_head(&audit_hold_queue, skb); + /* put the record back in the queue */ + skb_queue_tail(&audit_hold_queue, skb); } /** * kauditd_hold_skb - Queue an audit record, waiting for auditd * @skb: audit record + * @error: error code * * Description: * Queue the audit record, waiting for an instance of auditd. When this @@ -564,19 +566,31 @@ static void kauditd_rehold_skb(struct sk_buff *skb) * and queue it, if we have room. If we want to hold on to the record, but we * don't have room, record a record lost message. */ -static void kauditd_hold_skb(struct sk_buff *skb) +static void kauditd_hold_skb(struct sk_buff *skb, int error) { /* at this point it is uncertain if we will ever send this to auditd so * try to send the message via printk before we go any further */ kauditd_printk_skb(skb); /* can we just silently drop the message? */ - if (!audit_default) { - kfree_skb(skb); - return; + if (!audit_default) + goto drop; + + /* the hold queue is only for when the daemon goes away completely, + * not -EAGAIN failures; if we are in a -EAGAIN state requeue the + * record on the retry queue unless it's full, in which case drop it + */ + if (error == -EAGAIN) { + if (!audit_backlog_limit || + skb_queue_len(&audit_retry_queue) < audit_backlog_limit) { + skb_queue_tail(&audit_retry_queue, skb); + return; + } + audit_log_lost("kauditd retry queue overflow"); + goto drop; } - /* if we have room, queue the message */ + /* if we have room in the hold queue, queue the message */ if (!audit_backlog_limit || skb_queue_len(&audit_hold_queue) < audit_backlog_limit) { skb_queue_tail(&audit_hold_queue, skb); @@ -585,24 +599,32 @@ static void kauditd_hold_skb(struct sk_buff *skb) /* we have no other options - drop the message */ audit_log_lost("kauditd hold queue overflow"); +drop: kfree_skb(skb); } /** * kauditd_retry_skb - Queue an audit record, attempt to send again to auditd * @skb: audit record + * @error: error code (unused) * * Description: * Not as serious as kauditd_hold_skb() as we still have a connected auditd, * but for some reason we are having problems sending it audit records so * queue the given record and attempt to resend. */ -static void kauditd_retry_skb(struct sk_buff *skb) +static void kauditd_retry_skb(struct sk_buff *skb, __always_unused int error) { - /* NOTE: because records should only live in the retry queue for a - * short period of time, before either being sent or moved to the hold - * queue, we don't currently enforce a limit on this queue */ - skb_queue_tail(&audit_retry_queue, skb); + if (!audit_backlog_limit || + skb_queue_len(&audit_retry_queue) < audit_backlog_limit) { + skb_queue_tail(&audit_retry_queue, skb); + return; + } + + /* we have to drop the record, send it via printk as a last effort */ + kauditd_printk_skb(skb); + audit_log_lost("kauditd retry queue overflow"); + kfree_skb(skb); } /** @@ -640,7 +662,7 @@ static void auditd_reset(const struct auditd_connection *ac) /* flush the retry queue to the hold queue, but don't touch the main * queue since we need to process that normally for multicast */ while ((skb = skb_dequeue(&audit_retry_queue))) - kauditd_hold_skb(skb); + kauditd_hold_skb(skb, -ECONNREFUSED); } /** @@ -714,16 +736,18 @@ static int kauditd_send_queue(struct sock *sk, u32 portid, struct sk_buff_head *queue, unsigned int retry_limit, void (*skb_hook)(struct sk_buff *skb), - void (*err_hook)(struct sk_buff *skb)) + void (*err_hook)(struct sk_buff *skb, int error)) { int rc = 0; - struct sk_buff *skb; + struct sk_buff *skb = NULL; + struct sk_buff *skb_tail; unsigned int failed = 0; /* NOTE: kauditd_thread takes care of all our locking, we just use * the netlink info passed to us (e.g. sk and portid) */ - while ((skb = skb_dequeue(queue))) { + skb_tail = skb_peek_tail(queue); + while ((skb != skb_tail) && (skb = skb_dequeue(queue))) { /* call the skb_hook for each skb we touch */ if (skb_hook) (*skb_hook)(skb); @@ -731,7 +755,7 @@ static int kauditd_send_queue(struct sock *sk, u32 portid, /* can we send to anyone via unicast? */ if (!sk) { if (err_hook) - (*err_hook)(skb); + (*err_hook)(skb, -ECONNREFUSED); continue; } @@ -745,7 +769,7 @@ retry: rc == -ECONNREFUSED || rc == -EPERM) { sk = NULL; if (err_hook) - (*err_hook)(skb); + (*err_hook)(skb, rc); if (rc == -EAGAIN) rc = 0; /* continue to drain the queue */ @@ -1540,6 +1564,20 @@ static void audit_receive(struct sk_buff *skb) nlh = nlmsg_next(nlh, &len); } audit_ctl_unlock(); + + /* can't block with the ctrl lock, so penalize the sender now */ + if (audit_backlog_limit && + (skb_queue_len(&audit_queue) > audit_backlog_limit)) { + DECLARE_WAITQUEUE(wait, current); + + /* wake kauditd to try and flush the queue */ + wake_up_interruptible(&kauditd_wait); + + add_wait_queue_exclusive(&audit_backlog_wait, &wait); + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(audit_backlog_wait_time); + remove_wait_queue(&audit_backlog_wait, &wait); + } } /* Log information about who is connecting to the audit multicast socket */ @@ -1824,7 +1862,9 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, * task_tgid_vnr() since auditd_pid is set in audit_receive_msg() * using a PID anchored in the caller's namespace * 2. generator holding the audit_cmd_mutex - we don't want to block - * while holding the mutex */ + * while holding the mutex, although we do penalize the sender + * later in audit_receive() when it is safe to block + */ if (!(auditd_test_task(current) || audit_ctl_owner_current())) { long stime = audit_backlog_wait_time; diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index aaf2fbaa0cc76e4a8615ac961a16da6042c76ec3..dc497eaf226635bc31b44a7901b2cc5654ca2b5d 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -4135,8 +4135,7 @@ static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size, log->len_total = log_size; /* log attributes have to be sane */ - if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 || - !log->level || !log->ubuf) { + if (!bpf_verifier_log_attr_valid(log)) { err = -EINVAL; goto errout; } diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c index f9913bc65ef8d7a34feea4261a1cab351ce66a26..1e4bf23528a3de1d955e7f73284508e90d31332e 100644 --- a/kernel/bpf/ringbuf.c +++ b/kernel/bpf/ringbuf.c @@ -108,7 +108,7 @@ static struct bpf_ringbuf *bpf_ringbuf_area_alloc(size_t data_sz, int numa_node) } rb = vmap(pages, nr_meta_pages + 2 * nr_data_pages, - VM_ALLOC | VM_USERMAP, PAGE_KERNEL); + VM_MAP | VM_USERMAP, PAGE_KERNEL); if (rb) { kmemleak_not_leak(pages); rb->pages = pages; diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 4477873ac3a0b3542f3ead39dfe72aa93fe207b7..56cd7e6589ff344b7d8a8883be626cca0d2f6fcd 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -664,13 +664,14 @@ BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf, u32, size, u64, flags) { struct pt_regs *regs; - long res; + long res = -EINVAL; if (!try_get_task_stack(task)) return -EFAULT; regs = task_pt_regs(task); - res = __bpf_get_stack(regs, task, NULL, buf, size, flags); + if (regs) + res = __bpf_get_stack(regs, task, NULL, buf, size, flags); put_task_stack(task); return res; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 209e6567cdab051df2d3640cac5b83ce577866eb..419dbc3d060ee1fea6835a59291bf36b9c570d7e 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -1308,6 +1308,7 @@ int generic_map_delete_batch(struct bpf_map *map, maybe_wait_bpf_programs(map); if (err) break; + cond_resched(); } if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) err = -EFAULT; @@ -1365,6 +1366,7 @@ int generic_map_update_batch(struct bpf_map *map, if (err) break; + cond_resched(); } if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) @@ -1462,6 +1464,7 @@ int generic_map_lookup_batch(struct bpf_map *map, swap(prev_key, key); retry = MAP_LOOKUP_RETRIES; cp++; + cond_resched(); } if (err == -EFAULT) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index b43c9de34a2c2d06a9d6fc05cbd868f9b40ca784..015bf2ba4a0b6eb511170b2c4d9fdb98d5e15d9e 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -7725,15 +7725,15 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state, { if (reg_type_may_be_null(reg->type) && reg->id == id && !WARN_ON_ONCE(!reg->id)) { - /* Old offset (both fixed and variable parts) should - * have been known-zero, because we don't allow pointer - * arithmetic on pointers that might be NULL. - */ if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || !tnum_equals_const(reg->var_off, 0) || reg->off)) { - __mark_reg_known_zero(reg); - reg->off = 0; + /* Old offset (both fixed and variable parts) should + * have been known-zero, because we don't allow pointer + * arithmetic on pointers that might be NULL. If we + * see this happening, don't convert the register. + */ + return; } if (is_null) { reg->type = SCALAR_VALUE; @@ -12349,11 +12349,11 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, log->ubuf = (char __user *) (unsigned long) attr->log_buf; log->len_total = attr->log_size; - ret = -EINVAL; /* log attributes have to be sane */ - if (log->len_total < 128 || log->len_total > UINT_MAX >> 2 || - !log->level || !log->ubuf || log->level & ~BPF_LOG_MASK) + if (!bpf_verifier_log_attr_valid(log)) { + ret = -EINVAL; goto err_unlock; + } } if (IS_ERR(btf_vmlinux)) { diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c index 7f71b54c06c5f2880c0a3a8672b5967989580c85..69fba563c810e2759be1a51a4ec1fe0251c59890 100644 --- a/kernel/cgroup/cgroup-v1.c +++ b/kernel/cgroup/cgroup-v1.c @@ -545,6 +545,14 @@ static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of, BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX); + /* + * Release agent gets called with all capabilities, + * require capabilities to set release agent. + */ + if ((of->file->f_cred->user_ns != &init_user_ns) || + !capable(CAP_SYS_ADMIN)) + return -EPERM; + cgrp = cgroup_kn_lock_live(of->kn, false); if (!cgrp) return -ENODEV; @@ -958,6 +966,12 @@ int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param) /* Specifying two release agents is forbidden */ if (ctx->release_agent) return invalfc(fc, "release_agent respecified"); + /* + * Release agent gets called with all capabilities, + * require capabilities to set release agent. + */ + if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN)) + return invalfc(fc, "Setting release_agent not allowed"); ctx->release_agent = param->string; param->string = NULL; break; diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 68e14f34261568ae03885c93c407375755cd9f8c..a43eda611d73782710bcbca9bf88d54817b42304 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -3601,6 +3601,12 @@ static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf, cgroup_get(cgrp); cgroup_kn_unlock(of->kn); + /* Allow only one trigger per file descriptor */ + if (of->priv) { + cgroup_put(cgrp); + return -EBUSY; + } + psi = cgroup_ino(cgrp) == 1 ? &psi_system : &cgrp->psi; new = psi_trigger_create(psi, buf, nbytes, res); if (IS_ERR(new)) { @@ -3608,8 +3614,7 @@ static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf, return PTR_ERR(new); } - psi_trigger_replace(&of->priv, new); - + smp_store_release(&of->priv, new); cgroup_put(cgrp); return nbytes; @@ -3644,7 +3649,7 @@ static __poll_t cgroup_pressure_poll(struct kernfs_open_file *of, static void cgroup_pressure_release(struct kernfs_open_file *of) { - psi_trigger_replace(&of->priv, NULL); + psi_trigger_destroy(of->priv); } #endif /* CONFIG_PSI */ diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 1999fcec45c71fcb929f81de1cbd3d6e93ebf78d..0aa224c31f10a827d753592f730b5c1ce3839115 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -1481,10 +1481,15 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, struct cpuset *sibling; struct cgroup_subsys_state *pos_css; + percpu_rwsem_assert_held(&cpuset_rwsem); + /* * Check all its siblings and call update_cpumasks_hier() * if their use_parent_ecpus flag is set in order for them * to use the right effective_cpus value. + * + * The update_cpumasks_hier() function may sleep. So we have to + * release the RCU read lock before calling it. */ rcu_read_lock(); cpuset_for_each_child(sibling, pos_css, parent) { @@ -1492,8 +1497,13 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, continue; if (!sibling->use_parent_ecpus) continue; + if (!css_tryget_online(&sibling->css)) + continue; + rcu_read_unlock(); update_cpumasks_hier(sibling, tmp); + rcu_read_lock(); + css_put(&sibling->css); } rcu_read_unlock(); } @@ -1566,8 +1576,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, * Make sure that subparts_cpus is a subset of cpus_allowed. */ if (cs->nr_subparts_cpus) { - cpumask_andnot(cs->subparts_cpus, cs->subparts_cpus, - cs->cpus_allowed); + cpumask_and(cs->subparts_cpus, cs->subparts_cpus, cs->cpus_allowed); cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus); } spin_unlock_irq(&callback_lock); @@ -2203,6 +2212,7 @@ static void cpuset_attach(struct cgroup_taskset *tset) cgroup_taskset_first(tset, &css); cs = css_cs(css); + cpus_read_lock(); percpu_down_write(&cpuset_rwsem); /* prepare for attach */ @@ -2258,6 +2268,7 @@ static void cpuset_attach(struct cgroup_taskset *tset) wake_up(&cpuset_attach_wq); percpu_up_write(&cpuset_rwsem); + cpus_read_unlock(); } /* The various types of files and directories in a cpuset file system */ diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c index d4637f72239b404f4fbef822e74cb4c441062034..b9082b572e0f822bb752a6924efdf1e11496b079 100644 --- a/kernel/dma/pool.c +++ b/kernel/dma/pool.c @@ -206,7 +206,7 @@ static int __init dma_atomic_pool_init(void) GFP_KERNEL); if (!atomic_pool_kernel) ret = -ENOMEM; - if (IS_ENABLED(CONFIG_ZONE_DMA)) { + if (has_managed_dma()) { atomic_pool_dma = __dma_atomic_pool_init(atomic_pool_size, GFP_KERNEL | GFP_DMA); if (!atomic_pool_dma) @@ -229,7 +229,7 @@ static inline struct gen_pool *dma_guess_pool(struct gen_pool *prev, gfp_t gfp) if (prev == NULL) { if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32)) return atomic_pool_dma32; - if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA)) + if (atomic_pool_dma && (gfp & GFP_DMA)) return atomic_pool_dma; return atomic_pool_kernel; } diff --git a/kernel/events/core.c b/kernel/events/core.c index 639b99a318db1fe02767f2efc883f4847226babb..c8b3f94f0dbb3f0965814e4b441ec70918ad22d2 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -266,7 +266,7 @@ static void event_function_call(struct perf_event *event, event_f func, void *da if (!event->parent) { /* * If this is a !child event, we must hold ctx::mutex to - * stabilize the the event->ctx relation. See + * stabilize the event->ctx relation. See * perf_event_ctx_lock(). */ lockdep_assert_held(&ctx->mutex); @@ -673,6 +673,23 @@ perf_event_set_state(struct perf_event *event, enum perf_event_state state) WRITE_ONCE(event->state, state); } +/* + * UP store-release, load-acquire + */ + +#define __store_release(ptr, val) \ +do { \ + barrier(); \ + WRITE_ONCE(*(ptr), (val)); \ +} while (0) + +#define __load_acquire(ptr) \ +({ \ + __unqual_scalar_typeof(*(ptr)) ___p = READ_ONCE(*(ptr)); \ + barrier(); \ + ___p; \ +}) + #ifdef CONFIG_CGROUP_PERF static inline bool @@ -718,34 +735,51 @@ static inline u64 perf_cgroup_event_time(struct perf_event *event) return t->time; } -static inline void __update_cgrp_time(struct perf_cgroup *cgrp) +static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now) { - struct perf_cgroup_info *info; - u64 now; - - now = perf_clock(); + struct perf_cgroup_info *t; - info = this_cpu_ptr(cgrp->info); + t = per_cpu_ptr(event->cgrp->info, event->cpu); + if (!__load_acquire(&t->active)) + return t->time; + now += READ_ONCE(t->timeoffset); + return now; +} - info->time += now - info->timestamp; +static inline void __update_cgrp_time(struct perf_cgroup_info *info, u64 now, bool adv) +{ + if (adv) + info->time += now - info->timestamp; info->timestamp = now; + /* + * see update_context_time() + */ + WRITE_ONCE(info->timeoffset, info->time - info->timestamp); } -static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) +static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx, bool final) { struct perf_cgroup *cgrp = cpuctx->cgrp; struct cgroup_subsys_state *css; + struct perf_cgroup_info *info; if (cgrp) { + u64 now = perf_clock(); + for (css = &cgrp->css; css; css = css->parent) { cgrp = container_of(css, struct perf_cgroup, css); - __update_cgrp_time(cgrp); + info = this_cpu_ptr(cgrp->info); + + __update_cgrp_time(info, now, true); + if (final) + __store_release(&info->active, 0); } } } static inline void update_cgrp_time_from_event(struct perf_event *event) { + struct perf_cgroup_info *info; struct perf_cgroup *cgrp; /* @@ -759,8 +793,10 @@ static inline void update_cgrp_time_from_event(struct perf_event *event) /* * Do not update time when cgroup is not active */ - if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) - __update_cgrp_time(event->cgrp); + if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) { + info = this_cpu_ptr(event->cgrp->info); + __update_cgrp_time(info, perf_clock(), true); + } } static inline void @@ -784,7 +820,8 @@ perf_cgroup_set_timestamp(struct task_struct *task, for (css = &cgrp->css; css; css = css->parent) { cgrp = container_of(css, struct perf_cgroup, css); info = this_cpu_ptr(cgrp->info); - info->timestamp = ctx->timestamp; + __update_cgrp_time(info, ctx->timestamp, false); + __store_release(&info->active, 1); } } @@ -801,7 +838,7 @@ static DEFINE_PER_CPU(struct list_head, cgrp_cpuctx_list); */ static void perf_cgroup_switch(struct task_struct *task, int mode) { - struct perf_cpu_context *cpuctx; + struct perf_cpu_context *cpuctx, *tmp; struct list_head *list; unsigned long flags; @@ -812,7 +849,7 @@ static void perf_cgroup_switch(struct task_struct *task, int mode) local_irq_save(flags); list = this_cpu_ptr(&cgrp_cpuctx_list); - list_for_each_entry(cpuctx, list, cgrp_cpuctx_entry) { + list_for_each_entry_safe(cpuctx, tmp, list, cgrp_cpuctx_entry) { WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0); perf_ctx_lock(cpuctx, cpuctx->task_ctx); @@ -980,14 +1017,6 @@ out: return ret; } -static inline void -perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) -{ - struct perf_cgroup_info *t; - t = per_cpu_ptr(event->cgrp->info, event->cpu); - event->shadow_ctx_time = now - t->timestamp; -} - static inline void perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx) { @@ -1065,7 +1094,8 @@ static inline void update_cgrp_time_from_event(struct perf_event *event) { } -static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) +static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx, + bool final) { } @@ -1097,12 +1127,12 @@ perf_cgroup_switch(struct task_struct *task, struct task_struct *next) { } -static inline void -perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) +static inline u64 perf_cgroup_event_time(struct perf_event *event) { + return 0; } -static inline u64 perf_cgroup_event_time(struct perf_event *event) +static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now) { return 0; } @@ -1300,7 +1330,7 @@ static void put_ctx(struct perf_event_context *ctx) * life-time rules separate them. That is an exiting task cannot fork, and a * spawning task cannot (yet) exit. * - * But remember that that these are parent<->child context relations, and + * But remember that these are parent<->child context relations, and * migration does not affect children, therefore these two orderings should not * interact. * @@ -1439,7 +1469,7 @@ static u64 primary_event_id(struct perf_event *event) /* * Get the perf_event_context for a task and lock it. * - * This has to cope with with the fact that until it is locked, + * This has to cope with the fact that until it is locked, * the context could get moved to another task. */ static struct perf_event_context * @@ -1524,22 +1554,59 @@ static void perf_unpin_context(struct perf_event_context *ctx) /* * Update the record of the current time in a context. */ -static void update_context_time(struct perf_event_context *ctx) +static void __update_context_time(struct perf_event_context *ctx, bool adv) { u64 now = perf_clock(); - ctx->time += now - ctx->timestamp; + if (adv) + ctx->time += now - ctx->timestamp; ctx->timestamp = now; + + /* + * The above: time' = time + (now - timestamp), can be re-arranged + * into: time` = now + (time - timestamp), which gives a single value + * offset to compute future time without locks on. + * + * See perf_event_time_now(), which can be used from NMI context where + * it's (obviously) not possible to acquire ctx->lock in order to read + * both the above values in a consistent manner. + */ + WRITE_ONCE(ctx->timeoffset, ctx->time - ctx->timestamp); +} + +static void update_context_time(struct perf_event_context *ctx) +{ + __update_context_time(ctx, true); } static u64 perf_event_time(struct perf_event *event) { struct perf_event_context *ctx = event->ctx; + if (unlikely(!ctx)) + return 0; + if (is_cgroup_event(event)) return perf_cgroup_event_time(event); - return ctx ? ctx->time : 0; + return ctx->time; +} + +static u64 perf_event_time_now(struct perf_event *event, u64 now) +{ + struct perf_event_context *ctx = event->ctx; + + if (unlikely(!ctx)) + return 0; + + if (is_cgroup_event(event)) + return perf_cgroup_event_time_now(event, now); + + if (!(__load_acquire(&ctx->is_active) & EVENT_TIME)) + return ctx->time; + + now += READ_ONCE(ctx->timeoffset); + return now; } static enum event_type_t get_event_type(struct perf_event *event) @@ -2333,7 +2400,7 @@ __perf_remove_from_context(struct perf_event *event, if (ctx->is_active & EVENT_TIME) { update_context_time(ctx); - update_cgrp_time_from_cpuctx(cpuctx); + update_cgrp_time_from_cpuctx(cpuctx, false); } event_sched_out(event, cpuctx, ctx); @@ -2342,6 +2409,9 @@ __perf_remove_from_context(struct perf_event *event, list_del_event(event, ctx); if (!ctx->nr_events && ctx->is_active) { + if (ctx == &cpuctx->ctx) + update_cgrp_time_from_cpuctx(cpuctx, true); + ctx->is_active = 0; ctx->rotate_necessary = 0; if (ctx->task) { @@ -2467,40 +2537,6 @@ void perf_event_disable_inatomic(struct perf_event *event) irq_work_queue(&event->pending); } -static void perf_set_shadow_time(struct perf_event *event, - struct perf_event_context *ctx) -{ - /* - * use the correct time source for the time snapshot - * - * We could get by without this by leveraging the - * fact that to get to this function, the caller - * has most likely already called update_context_time() - * and update_cgrp_time_xx() and thus both timestamp - * are identical (or very close). Given that tstamp is, - * already adjusted for cgroup, we could say that: - * tstamp - ctx->timestamp - * is equivalent to - * tstamp - cgrp->timestamp. - * - * Then, in perf_output_read(), the calculation would - * work with no changes because: - * - event is guaranteed scheduled in - * - no scheduled out in between - * - thus the timestamp would be the same - * - * But this is a bit hairy. - * - * So instead, we have an explicit cgroup call to remain - * within the time time source all along. We believe it - * is cleaner and simpler to understand. - */ - if (is_cgroup_event(event)) - perf_cgroup_set_shadow_time(event, event->tstamp); - else - event->shadow_ctx_time = event->tstamp - ctx->timestamp; -} - #define MAX_INTERRUPTS (~0ULL) static void perf_log_throttle(struct perf_event *event, int enable); @@ -2541,8 +2577,6 @@ event_sched_in(struct perf_event *event, perf_pmu_disable(event->pmu); - perf_set_shadow_time(event, ctx); - perf_log_itrace_start(event); if (event->pmu->add(event, PERF_EF_START)) { @@ -3216,16 +3250,6 @@ static void ctx_sched_out(struct perf_event_context *ctx, return; } - ctx->is_active &= ~event_type; - if (!(ctx->is_active & EVENT_ALL)) - ctx->is_active = 0; - - if (ctx->task) { - WARN_ON_ONCE(cpuctx->task_ctx != ctx); - if (!ctx->is_active) - cpuctx->task_ctx = NULL; - } - /* * Always update time if it was set; not only when it changes. * Otherwise we can 'forget' to update time for any but the last @@ -3239,7 +3263,22 @@ static void ctx_sched_out(struct perf_event_context *ctx, if (is_active & EVENT_TIME) { /* update (and stop) ctx time */ update_context_time(ctx); - update_cgrp_time_from_cpuctx(cpuctx); + update_cgrp_time_from_cpuctx(cpuctx, ctx == &cpuctx->ctx); + /* + * CPU-release for the below ->is_active store, + * see __load_acquire() in perf_event_time_now() + */ + barrier(); + } + + ctx->is_active &= ~event_type; + if (!(ctx->is_active & EVENT_ALL)) + ctx->is_active = 0; + + if (ctx->task) { + WARN_ON_ONCE(cpuctx->task_ctx != ctx); + if (!ctx->is_active) + cpuctx->task_ctx = NULL; } is_active ^= ctx->is_active; /* changed bits */ @@ -3676,13 +3715,19 @@ static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx, return 0; } +/* + * Because the userpage is strictly per-event (there is no concept of context, + * so there cannot be a context indirection), every userpage must be updated + * when context time starts :-( + * + * IOW, we must not miss EVENT_TIME edges. + */ static inline bool event_update_userpage(struct perf_event *event) { if (likely(!atomic_read(&event->mmap_count))) return false; perf_event_update_time(event); - perf_set_shadow_time(event, event->ctx); perf_event_update_userpage(event); return true; @@ -3766,13 +3811,23 @@ ctx_sched_in(struct perf_event_context *ctx, struct task_struct *task) { int is_active = ctx->is_active; - u64 now; lockdep_assert_held(&ctx->lock); if (likely(!ctx->nr_events)) return; + if (is_active ^ EVENT_TIME) { + /* start ctx time */ + __update_context_time(ctx, false); + perf_cgroup_set_timestamp(task, ctx); + /* + * CPU-release for the below ->is_active store, + * see __load_acquire() in perf_event_time_now() + */ + barrier(); + } + ctx->is_active |= (event_type | EVENT_TIME); if (ctx->task) { if (!is_active) @@ -3783,13 +3838,6 @@ ctx_sched_in(struct perf_event_context *ctx, is_active ^= ctx->is_active; /* changed bits */ - if (is_active & EVENT_TIME) { - /* start ctx time */ - now = perf_clock(); - ctx->timestamp = now; - perf_cgroup_set_timestamp(task, ctx); - } - /* * First go through the list and put on any pinned groups * in order to give them the best chance of going on. @@ -4325,6 +4373,18 @@ static inline u64 perf_event_count(struct perf_event *event) return local64_read(&event->count) + atomic64_read(&event->child_count); } +static void calc_timer_values(struct perf_event *event, + u64 *now, + u64 *enabled, + u64 *running) +{ + u64 ctx_time; + + *now = perf_clock(); + ctx_time = perf_event_time_now(event, *now); + __perf_update_times(event, ctx_time, enabled, running); +} + /* * NMI-safe method to read a local event, that is an event that * is: @@ -4384,10 +4444,9 @@ int perf_event_read_local(struct perf_event *event, u64 *value, *value = local64_read(&event->count); if (enabled || running) { - u64 now = event->shadow_ctx_time + perf_clock(); - u64 __enabled, __running; + u64 __enabled, __running, __now;; - __perf_update_times(event, now, &__enabled, &__running); + calc_timer_values(event, &__now, &__enabled, &__running); if (enabled) *enabled = __enabled; if (running) @@ -5694,18 +5753,6 @@ static int perf_event_index(struct perf_event *event) return event->pmu->event_idx(event); } -static void calc_timer_values(struct perf_event *event, - u64 *now, - u64 *enabled, - u64 *running) -{ - u64 ctx_time; - - *now = perf_clock(); - ctx_time = event->shadow_ctx_time + *now; - __perf_update_times(event, ctx_time, enabled, running); -} - static void perf_event_init_userpage(struct perf_event *event) { struct perf_event_mmap_page *userpg; @@ -5830,6 +5877,8 @@ static void ring_buffer_attach(struct perf_event *event, struct perf_buffer *old_rb = NULL; unsigned long flags; + WARN_ON_ONCE(event->parent); + if (event->rb) { /* * Should be impossible, we set this when removing @@ -5887,6 +5936,9 @@ static void ring_buffer_wakeup(struct perf_event *event) { struct perf_buffer *rb; + if (event->parent) + event = event->parent; + rcu_read_lock(); rb = rcu_dereference(event->rb); if (rb) { @@ -5900,6 +5952,9 @@ struct perf_buffer *ring_buffer_get(struct perf_event *event) { struct perf_buffer *rb; + if (event->parent) + event = event->parent; + rcu_read_lock(); rb = rcu_dereference(event->rb); if (rb) { @@ -6245,7 +6300,6 @@ accounting: ring_buffer_attach(event, rb); perf_event_update_time(event); - perf_set_shadow_time(event, event->ctx); perf_event_init_userpage(event); perf_event_update_userpage(event); } else { @@ -6395,18 +6449,25 @@ static void perf_pending_event(struct irq_work *entry) * Later on, we might change it to a list if there is * another virtualization implementation supporting the callbacks. */ -struct perf_guest_info_callbacks *perf_guest_cbs; +struct perf_guest_info_callbacks __rcu *perf_guest_cbs; int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) { - perf_guest_cbs = cbs; + if (WARN_ON_ONCE(rcu_access_pointer(perf_guest_cbs))) + return -EBUSY; + + rcu_assign_pointer(perf_guest_cbs, cbs); return 0; } EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks); int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) { - perf_guest_cbs = NULL; + if (WARN_ON_ONCE(rcu_access_pointer(perf_guest_cbs) != cbs)) + return -EINVAL; + + rcu_assign_pointer(perf_guest_cbs, NULL); + synchronize_rcu(); return 0; } EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks); @@ -6565,7 +6626,7 @@ static unsigned long perf_prepare_sample_aux(struct perf_event *event, if (WARN_ON_ONCE(READ_ONCE(sampler->oncpu) != smp_processor_id())) goto out; - rb = ring_buffer_get(sampler->parent ? sampler->parent : sampler); + rb = ring_buffer_get(sampler); if (!rb) goto out; @@ -6631,7 +6692,7 @@ static void perf_aux_sample_output(struct perf_event *event, if (WARN_ON_ONCE(!sampler || !data->aux_size)) return; - rb = ring_buffer_get(sampler->parent ? sampler->parent : sampler); + rb = ring_buffer_get(sampler); if (!rb) return; diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 00b0358739ab3b6b66b780e20f3374bbef4d4d34..e1bbb3b92921d8eb084d2844cd2ae85ebaf5e14f 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1735,7 +1735,7 @@ void uprobe_free_utask(struct task_struct *t) } /* - * Allocate a uprobe_task object for the task if if necessary. + * Allocate a uprobe_task object for the task if necessary. * Called when the thread hits a breakpoint. * * Returns: diff --git a/kernel/fork.c b/kernel/fork.c index e465903abed9e2d3cc8f06945f5737bf1b960219..a78c0b02edd55658f5e67c7c43a3559614823740 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2258,10 +2258,6 @@ static __latent_entropy struct task_struct *copy_process( goto bad_fork_cancel_cgroup; } - /* past the last point of failure */ - if (pidfile) - fd_install(pidfd, pidfile); - init_task_pid_links(p); if (likely(p->pid)) { ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); @@ -2310,6 +2306,9 @@ static __latent_entropy struct task_struct *copy_process( syscall_tracepoint_update(p); write_unlock_irq(&tasklist_lock); + if (pidfile) + fd_install(pidfd, pidfile); + proc_fork_connector(p); sched_post_fork(p, args); cgroup_post_fork(p, args); diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 1f6a2f1226fa948c3bbfdae2b38355795b2dbeaa..af4b35450556ff9a3165307edfa54c69b6e78b32 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -3387,7 +3387,7 @@ struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i) u16 chain_hlock = chain_hlocks[chain->base + i]; unsigned int class_idx = chain_hlock_class_idx(chain_hlock); - return lock_classes + class_idx - 1; + return lock_classes + class_idx; } /* @@ -3455,7 +3455,7 @@ static void print_chain_keys_chain(struct lock_chain *chain) hlock_id = chain_hlocks[chain->base + i]; chain_key = print_chain_key_iteration(hlock_id, chain_key); - print_lock_name(lock_classes + chain_hlock_class_idx(hlock_id) - 1); + print_lock_name(lock_classes + chain_hlock_class_idx(hlock_id)); printk("\n"); } } diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 2f8cd616d3b29aa93498db95e426acd4abbb153b..f00dd928fc7113c6a31aaa05d6214c08f3fee201 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1438,7 +1438,7 @@ rt_mutex_fasttrylock(struct rt_mutex *lock, } /* - * Performs the wakeup of the the top-waiter and re-enables preemption. + * Performs the wakeup of the top-waiter and re-enables preemption. */ void rt_mutex_postunlock(struct wake_q_head *wake_q) { @@ -1832,7 +1832,7 @@ struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock) * been started. * @waiter: the pre-initialized rt_mutex_waiter * - * Wait for the the lock acquisition started on our behalf by + * Wait for the lock acquisition started on our behalf by * rt_mutex_start_proxy_lock(). Upon failure, the caller must call * rt_mutex_cleanup_proxy_lock(). * diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index a163542d178ee11198080f6cad124c9e2c25d115..cc5cc889b5b7fb8e9d6e57863b177e38c462d059 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -1177,7 +1177,7 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state) /* * If there were already threads queued before us and: - * 1) there are no no active locks, wake the front + * 1) there are no active locks, wake the front * queued process(es) as the handoff bit might be set. * 2) there are no active writers and some readers, the lock * must be read owned; so we try to wake any read lock diff --git a/kernel/locking/semaphore.c b/kernel/locking/semaphore.c index d9dd94defc0a928c4697b49c2071090c6105966e..9aa855a96c4ae2e1149baafa56f1d0d6e1c8717e 100644 --- a/kernel/locking/semaphore.c +++ b/kernel/locking/semaphore.c @@ -119,7 +119,7 @@ EXPORT_SYMBOL(down_killable); * @sem: the semaphore to be acquired * * Try to acquire the semaphore atomically. Returns 0 if the semaphore has - * been acquired successfully or 1 if it it cannot be acquired. + * been acquired successfully or 1 if it cannot be acquired. * * NOTE: This return value is inverted from both spin_trylock and * mutex_trylock! Be careful about this when converting code. diff --git a/kernel/module.c b/kernel/module.c index 185b2655bc206b35b07cc2bf5866adc9af462a1f..5f4403198f04bd4ca736b0b85662dfd48f2e338c 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -3714,12 +3714,6 @@ static noinline int do_init_module(struct module *mod) } freeinit->module_init = mod->init_layout.base; - /* - * We want to find out whether @mod uses async during init. Clear - * PF_USED_ASYNC. async_schedule*() will set it. - */ - current->flags &= ~PF_USED_ASYNC; - do_mod_ctors(mod); /* Start the module */ if (mod->init != NULL) @@ -3745,22 +3739,13 @@ static noinline int do_init_module(struct module *mod) /* * We need to finish all async code before the module init sequence - * is done. This has potential to deadlock. For example, a newly - * detected block device can trigger request_module() of the - * default iosched from async probing task. Once userland helper - * reaches here, async_synchronize_full() will wait on the async - * task waiting on request_module() and deadlock. - * - * This deadlock is avoided by perfomring async_synchronize_full() - * iff module init queued any async jobs. This isn't a full - * solution as it will deadlock the same if module loading from - * async jobs nests more than once; however, due to the various - * constraints, this hack seems to be the best option for now. - * Please refer to the following thread for details. + * is done. This has potential to deadlock if synchronous module + * loading is requested from async (which is not allowed!). * - * http://thread.gmane.org/gmane.linux.kernel/1420814 + * See commit 0fdff3ec6d87 ("async, kmod: warn on synchronous + * request_module() from async workers") for more details. */ - if (!mod->async_probe_requested && (current->flags & PF_USED_ASYNC)) + if (!mod->async_probe_requested) async_synchronize_full(); ftrace_free_mem(mod, mod->init_layout.base, mod->init_layout.base + diff --git a/kernel/power/main.c b/kernel/power/main.c index 0aefd6f57e0acd0bf0fd726fcc6cd6c700ec7312..d6140ed15d0b17098e8e28ec981e33759a26790a 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -504,7 +504,10 @@ static ssize_t pm_wakeup_irq_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - return pm_wakeup_irq ? sprintf(buf, "%u\n", pm_wakeup_irq) : -ENODATA; + if (!pm_wakeup_irq()) + return -ENODATA; + + return sprintf(buf, "%u\n", pm_wakeup_irq()); } power_attr_ro(pm_wakeup_irq); diff --git a/kernel/power/process.c b/kernel/power/process.c index 45b054b7b5ec81f814f6bf12c9462318424e6e01..b9faa363c46af612125629eaa5fc41f5c0579609 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -134,7 +134,7 @@ int freeze_processes(void) if (!pm_freezing) atomic_inc(&system_freezing_cnt); - pm_wakeup_clear(true); + pm_wakeup_clear(0); pr_info("Freezing user space processes ... "); pm_freezing = true; error = try_to_freeze_tasks(true); diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 46b1804c1ddf7c22e81a21bd44b299899ddc069a..1da013f50059a32dc4744a68dafde9d6927c9000 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -944,8 +944,7 @@ static void memory_bm_recycle(struct memory_bitmap *bm) * Register a range of page frames the contents of which should not be saved * during hibernation (to be used in the early initialization code). */ -void __init __register_nosave_region(unsigned long start_pfn, - unsigned long end_pfn, int use_kmalloc) +void __init register_nosave_region(unsigned long start_pfn, unsigned long end_pfn) { struct nosave_region *region; @@ -961,18 +960,12 @@ void __init __register_nosave_region(unsigned long start_pfn, goto Report; } } - if (use_kmalloc) { - /* During init, this shouldn't fail */ - region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL); - BUG_ON(!region); - } else { - /* This allocation cannot fail */ - region = memblock_alloc(sizeof(struct nosave_region), - SMP_CACHE_BYTES); - if (!region) - panic("%s: Failed to allocate %zu bytes\n", __func__, - sizeof(struct nosave_region)); - } + /* This allocation cannot fail */ + region = memblock_alloc(sizeof(struct nosave_region), + SMP_CACHE_BYTES); + if (!region) + panic("%s: Failed to allocate %zu bytes\n", __func__, + sizeof(struct nosave_region)); region->start_pfn = start_pfn; region->end_pfn = end_pfn; list_add_tail(®ion->list, &nosave_regions); diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 32391acc806bf6c9d19bad31e958755eb3b0ab99..4aa4d5d3947f140e3793e575bbd3519bfa678735 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -138,8 +138,6 @@ static void s2idle_loop(void) break; } - pm_wakeup_clear(false); - s2idle_enter(); } diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c index 105df4dfc783971acdd1d17ee2971cb892c97d02..52571dcad768b988eaadbd3ce98a4ac42dd2f7dd 100644 --- a/kernel/power/wakelock.c +++ b/kernel/power/wakelock.c @@ -39,23 +39,20 @@ ssize_t pm_show_wakelocks(char *buf, bool show_active) { struct rb_node *node; struct wakelock *wl; - char *str = buf; - char *end = buf + PAGE_SIZE; + int len = 0; mutex_lock(&wakelocks_lock); for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) { wl = rb_entry(node, struct wakelock, node); if (wl->ws->active == show_active) - str += scnprintf(str, end - str, "%s ", wl->name); + len += sysfs_emit_at(buf, len, "%s ", wl->name); } - if (str > buf) - str--; - str += scnprintf(str, end - str, "\n"); + len += sysfs_emit_at(buf, len, "\n"); mutex_unlock(&wakelocks_lock); - return (str - buf); + return len; } #if CONFIG_PM_WAKELOCKS_LIMIT > 0 diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index b74e7ace4376be44e07f9575e5857be0044a424a..844c35803739e992c832398cd14c09c37ade7459 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1581,10 +1581,11 @@ static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp, struct rcu_data *rdp) { rcu_lockdep_assert_cblist_protected(rdp); - if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || - !raw_spin_trylock_rcu_node(rnp)) + if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || !raw_spin_trylock_rcu_node(rnp)) return; - WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp)); + // The grace period cannot end while we hold the rcu_node lock. + if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) + WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp)); raw_spin_unlock_rcu_node(rnp); } diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 0ffe185c1f46ade03145f3f5059b0ad637acf110..0dc16345e668c6ead06136c5a13047bfc4509ae0 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -387,6 +387,7 @@ retry_ipi: continue; } if (get_cpu() == cpu) { + mask_ofl_test |= mask; put_cpu(); continue; } diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 6ed153f226b392557cea032b40c14cd4d285f351..244f32e98360fdfda8199a6a3f8247d33a020cdb 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -628,7 +628,7 @@ static void rcu_read_unlock_special(struct task_struct *t) set_tsk_need_resched(current); set_preempt_need_resched(); if (IS_ENABLED(CONFIG_IRQ_WORK) && irqs_were_disabled && - !rdp->defer_qs_iw_pending && exp) { + !rdp->defer_qs_iw_pending && exp && cpu_online(rdp->cpu)) { // Get scheduler to re-evaluate and call hooks. // If !IRQ_WORK, FQS scan will eventually IPI. init_irq_work(&rdp->defer_qs_iw, diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 5a55d230045240246e34f5a9ec099e62e1fde0fc..ca0eef7d3852b56c55641b30de5c87a3c80523d2 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -147,10 +147,10 @@ void account_guest_time(struct task_struct *p, u64 cputime) /* Add guest time to cpustat. */ if (task_nice(p) > 0) { - cpustat[CPUTIME_NICE] += cputime; + task_group_account_field(p, CPUTIME_NICE, cputime); cpustat[CPUTIME_GUEST_NICE] += cputime; } else { - cpustat[CPUTIME_USER] += cputime; + task_group_account_field(p, CPUTIME_USER, cputime); cpustat[CPUTIME_GUEST] += cputime; } } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index c004e3b89c32467526ad91574571c230e78cc7a9..acd9833b8ec227c9e6329ef8e55ae2f323f4e430 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3379,7 +3379,6 @@ void set_task_rq_fair(struct sched_entity *se, se->avg.last_update_time = n_last_update_time; } - /* * When on migration a sched_entity joins/leaves the PELT hierarchy, we need to * propagate its contribution. The key to this propagation is the invariant @@ -3447,7 +3446,6 @@ void set_task_rq_fair(struct sched_entity *se, * XXX: only do this for the part of runnable > running ? * */ - static inline void update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) { @@ -3676,7 +3674,19 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) r = removed_util; sub_positive(&sa->util_avg, r); - sa->util_sum = sa->util_avg * divider; + sub_positive(&sa->util_sum, r * divider); + /* + * Because of rounding, se->util_sum might ends up being +1 more than + * cfs->util_sum. Although this is not a problem by itself, detaching + * a lot of tasks with the rounding problem between 2 updates of + * util_avg (~1ms) can make cfs->util_sum becoming null whereas + * cfs_util_avg is not. + * Check that util_sum is still above its lower bound for the new + * util_avg. Given that period_contrib might have moved since the last + * sync, we are only sure that util_sum must be above or equal to + * util_avg * minimum possible divider + */ + sa->util_sum = max_t(u32, sa->util_sum, sa->util_avg * PELT_MIN_DIVIDER); r = removed_runnable; sub_positive(&sa->runnable_avg, r); @@ -5149,7 +5159,7 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) /* * When a group wakes up we want to make sure that its quota is not already * expired/exceeded, otherwise it may be allowed to steal additional ticks of - * runtime as update_curr() throttling can not not trigger until it's on-rq. + * runtime as update_curr() throttling can not trigger until it's on-rq. */ static void check_enqueue_throttle(struct cfs_rq *cfs_rq) { @@ -6284,8 +6294,10 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) * pattern is IO completions. */ if (is_per_cpu_kthread(current) && + in_task() && prev == smp_processor_id() && - this_rq()->nr_running <= 1) { + this_rq()->nr_running <= 1 && + asym_fits_capacity(task_util, prev)) { return prev; } diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c index 16f57e71f9c441ada23d071fabcc8c0e32a54ccc..cc7cd512e4e33833d781b5b6686fd6085fbf879f 100644 --- a/kernel/sched/membarrier.c +++ b/kernel/sched/membarrier.c @@ -19,11 +19,11 @@ #endif #ifdef CONFIG_RSEQ -#define MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ_BITMASK \ +#define MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK \ (MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ \ - | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ_BITMASK) + | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ) #else -#define MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ_BITMASK 0 +#define MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK 0 #endif #define MEMBARRIER_CMD_BITMASK \ @@ -31,7 +31,8 @@ | MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED \ | MEMBARRIER_CMD_PRIVATE_EXPEDITED \ | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED \ - | MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK) + | MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK \ + | MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK) static void ipi_mb(void *info) { @@ -315,7 +316,7 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm) /* * For each cpu runqueue, if the task's mm match @mm, ensure that all - * @mm's membarrier state set bits are also set in in the runqueue's + * @mm's membarrier state set bits are also set in the runqueue's * membarrier state. This ensures that a runqueue scheduling * between threads which are users of @mm has its membarrier state * updated. diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h index 0b9aeebb9c325f0b572125b0b91b70f065422771..45bf08e22207cb5d5ee1445dfee9b0e8d1421d20 100644 --- a/kernel/sched/pelt.h +++ b/kernel/sched/pelt.h @@ -37,9 +37,11 @@ update_irq_load_avg(struct rq *rq, u64 running) } #endif +#define PELT_MIN_DIVIDER (LOAD_AVG_MAX - 1024) + static inline u32 get_pelt_divider(struct sched_avg *avg) { - return LOAD_AVG_MAX - 1024 + avg->period_contrib; + return PELT_MIN_DIVIDER + avg->period_contrib; } static inline void cfs_se_util_change(struct sched_avg *avg) diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c index d50a31ecedeec33b785b8b09565041927934962a..b7f38f3ad42a25fcfddae7a7e900df0d622c0c7c 100644 --- a/kernel/sched/psi.c +++ b/kernel/sched/psi.c @@ -1116,7 +1116,6 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group, t->event = 0; t->last_event_time = 0; init_waitqueue_head(&t->event_wait); - kref_init(&t->refcount); mutex_lock(&group->trigger_lock); @@ -1145,15 +1144,19 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group, return t; } -static void psi_trigger_destroy(struct kref *ref) +void psi_trigger_destroy(struct psi_trigger *t) { - struct psi_trigger *t = container_of(ref, struct psi_trigger, refcount); - struct psi_group *group = t->group; + struct psi_group *group; struct task_struct *task_to_destroy = NULL; - if (static_branch_likely(&psi_disabled)) + /* + * We do not check psi_disabled since it might have been disabled after + * the trigger got created. + */ + if (!t) return; + group = t->group; /* * Wakeup waiters to stop polling. Can happen if cgroup is deleted * from under a polling process. @@ -1189,9 +1192,9 @@ static void psi_trigger_destroy(struct kref *ref) mutex_unlock(&group->trigger_lock); /* - * Wait for both *trigger_ptr from psi_trigger_replace and - * poll_task RCUs to complete their read-side critical sections - * before destroying the trigger and optionally the poll_task + * Wait for psi_schedule_poll_work RCU to complete its read-side + * critical section before destroying the trigger and optionally the + * poll_task. */ synchronize_rcu(); /* @@ -1208,18 +1211,6 @@ static void psi_trigger_destroy(struct kref *ref) kfree(t); } -void psi_trigger_replace(void **trigger_ptr, struct psi_trigger *new) -{ - struct psi_trigger *old = *trigger_ptr; - - if (static_branch_likely(&psi_disabled)) - return; - - rcu_assign_pointer(*trigger_ptr, new); - if (old) - kref_put(&old->refcount, psi_trigger_destroy); -} - __poll_t psi_trigger_poll(void **trigger_ptr, struct file *file, poll_table *wait) { @@ -1229,24 +1220,15 @@ __poll_t psi_trigger_poll(void **trigger_ptr, if (static_branch_likely(&psi_disabled)) return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI; - rcu_read_lock(); - - t = rcu_dereference(*(void __rcu __force **)trigger_ptr); - if (!t) { - rcu_read_unlock(); + t = smp_load_acquire(trigger_ptr); + if (!t) return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI; - } - kref_get(&t->refcount); - - rcu_read_unlock(); poll_wait(file, &t->event_wait, wait); if (cmpxchg(&t->event, 1, 0) == 1) ret |= EPOLLPRI; - kref_put(&t->refcount, psi_trigger_destroy); - return ret; } @@ -1270,14 +1252,24 @@ static ssize_t psi_write(struct file *file, const char __user *user_buf, buf[buf_size - 1] = '\0'; - new = psi_trigger_create(&psi_system, buf, nbytes, res); - if (IS_ERR(new)) - return PTR_ERR(new); - seq = file->private_data; + /* Take seq->lock to protect seq->private from concurrent writes */ mutex_lock(&seq->lock); - psi_trigger_replace(&seq->private, new); + + /* Allow only one trigger per file descriptor */ + if (seq->private) { + mutex_unlock(&seq->lock); + return -EBUSY; + } + + new = psi_trigger_create(&psi_system, buf, nbytes, res); + if (IS_ERR(new)) { + mutex_unlock(&seq->lock); + return PTR_ERR(new); + } + + smp_store_release(&seq->private, new); mutex_unlock(&seq->lock); return nbytes; @@ -1312,7 +1304,7 @@ static int psi_fop_release(struct inode *inode, struct file *file) { struct seq_file *seq = file->private_data; - psi_trigger_replace(&seq->private, NULL); + psi_trigger_destroy(seq->private); return single_release(inode, file); } diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index b5cf418e2e3feca37defee4214fae140af66a833..41b14d924203958c65039064d6805cf49ec567f2 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -52,11 +52,8 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) rt_b->rt_period_timer.function = sched_rt_period_timer; } -static void start_rt_bandwidth(struct rt_bandwidth *rt_b) +static inline void do_start_rt_bandwidth(struct rt_bandwidth *rt_b) { - if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) - return; - raw_spin_lock(&rt_b->rt_runtime_lock); if (!rt_b->rt_period_active) { rt_b->rt_period_active = 1; @@ -75,6 +72,14 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) raw_spin_unlock(&rt_b->rt_runtime_lock); } +static void start_rt_bandwidth(struct rt_bandwidth *rt_b) +{ + if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) + return; + + do_start_rt_bandwidth(rt_b); +} + void init_rt_rq(struct rt_rq *rt_rq) { struct rt_prio_array *array; @@ -1022,13 +1027,17 @@ static void update_curr_rt(struct rq *rq) for_each_sched_rt_entity(rt_se) { struct rt_rq *rt_rq = rt_rq_of_se(rt_se); + int exceeded; if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { raw_spin_lock(&rt_rq->rt_runtime_lock); rt_rq->rt_time += delta_exec; - if (sched_rt_runtime_exceeded(rt_rq)) + exceeded = sched_rt_runtime_exceeded(rt_rq); + if (exceeded) resched_curr(rq); raw_spin_unlock(&rt_rq->rt_runtime_lock); + if (exceeded) + do_start_rt_bandwidth(sched_rt_bandwidth(rt_rq)); } } } @@ -2727,8 +2736,12 @@ static int sched_rt_global_validate(void) static void sched_rt_do_global(void) { + unsigned long flags; + + raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); def_rt_bandwidth.rt_runtime = global_rt_runtime(); def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period()); + raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); } int sched_rt_handler(struct ctl_table *table, int write, void *buffer, diff --git a/kernel/stackleak.c b/kernel/stackleak.c index ce161a8e8d97585c8b1152fec1ea088343cf15d8..dd07239ddff9f4ba562b7e46d9fcc4af0679fd7e 100644 --- a/kernel/stackleak.c +++ b/kernel/stackleak.c @@ -48,7 +48,7 @@ int stack_erasing_sysctl(struct ctl_table *table, int write, #define skip_erasing() false #endif /* CONFIG_STACKLEAK_RUNTIME_DISABLE */ -asmlinkage void notrace stackleak_erase(void) +asmlinkage void noinstr stackleak_erase(void) { /* It would be nice not to have 'kstack_ptr' and 'boundary' on stack */ unsigned long kstack_ptr = current->lowest_stack; @@ -102,9 +102,8 @@ asmlinkage void notrace stackleak_erase(void) /* Reset the 'lowest_stack' value for the next syscall */ current->lowest_stack = current_top_of_stack() - THREAD_SIZE/64; } -NOKPROBE_SYMBOL(stackleak_erase); -void __used __no_caller_saved_registers notrace stackleak_track_stack(void) +void __used __no_caller_saved_registers noinstr stackleak_track_stack(void) { unsigned long sp = current_stack_pointer; diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 74492f08660c4da4696458a7e9d21e50961bc653..e34ceb91f4c5a60bc7474d0b80281668e40c627e 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -93,6 +93,20 @@ static char override_name[CS_NAME_LEN]; static int finished_booting; static u64 suspend_start; +/* + * Threshold: 0.0312s, when doubled: 0.0625s. + * Also a default for cs->uncertainty_margin when registering clocks. + */ +#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 5) + +/* + * Maximum permissible delay between two readouts of the watchdog + * clocksource surrounding a read of the clocksource being validated. + * This delay could be due to SMIs, NMIs, or to VCPU preemptions. Used as + * a lower bound for cs->uncertainty_margin values when registering clocks. + */ +#define WATCHDOG_MAX_SKEW (100 * NSEC_PER_USEC) + #ifdef CONFIG_CLOCKSOURCE_WATCHDOG static void clocksource_watchdog_work(struct work_struct *work); static void clocksource_select(void); @@ -119,17 +133,9 @@ static int clocksource_watchdog_kthread(void *data); static void __clocksource_change_rating(struct clocksource *cs, int rating); /* - * Interval: 0.5sec Threshold: 0.0625s + * Interval: 0.5sec. */ #define WATCHDOG_INTERVAL (HZ >> 1) -#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4) - -/* - * Maximum permissible delay between two readouts of the watchdog - * clocksource surrounding a read of the clocksource being validated. - * This delay could be due to SMIs, NMIs, or to VCPU preemptions. - */ -#define WATCHDOG_MAX_SKEW (100 * NSEC_PER_USEC) static void clocksource_watchdog_work(struct work_struct *work) { @@ -194,17 +200,24 @@ void clocksource_mark_unstable(struct clocksource *cs) static ulong max_cswd_read_retries = 3; module_param(max_cswd_read_retries, ulong, 0644); -static bool cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow) +enum wd_read_status { + WD_READ_SUCCESS, + WD_READ_UNSTABLE, + WD_READ_SKIP +}; + +static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow) { unsigned int nretries; - u64 wd_end, wd_delta; - int64_t wd_delay; + u64 wd_end, wd_end2, wd_delta; + int64_t wd_delay, wd_seq_delay; for (nretries = 0; nretries <= max_cswd_read_retries; nretries++) { local_irq_disable(); *wdnow = watchdog->read(watchdog); *csnow = cs->read(cs); wd_end = watchdog->read(watchdog); + wd_end2 = watchdog->read(watchdog); local_irq_enable(); wd_delta = clocksource_delta(wd_end, *wdnow, watchdog->mask); @@ -215,13 +228,34 @@ static bool cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow) pr_warn("timekeeping watchdog on CPU%d: %s retried %d times before success\n", smp_processor_id(), watchdog->name, nretries); } - return true; + return WD_READ_SUCCESS; } + + /* + * Now compute delay in consecutive watchdog read to see if + * there is too much external interferences that cause + * significant delay in reading both clocksource and watchdog. + * + * If consecutive WD read-back delay > WATCHDOG_MAX_SKEW/2, + * report system busy, reinit the watchdog and skip the current + * watchdog test. + */ + wd_delta = clocksource_delta(wd_end2, wd_end, watchdog->mask); + wd_seq_delay = clocksource_cyc2ns(wd_delta, watchdog->mult, watchdog->shift); + if (wd_seq_delay > WATCHDOG_MAX_SKEW/2) + goto skip_test; } pr_warn("timekeeping watchdog on CPU%d: %s read-back delay of %lldns, attempt %d, marking unstable\n", smp_processor_id(), watchdog->name, wd_delay, nretries); - return false; + return WD_READ_UNSTABLE; + +skip_test: + pr_info("timekeeping watchdog on CPU%d: %s wd-wd read-back delay of %lldns\n", + smp_processor_id(), watchdog->name, wd_seq_delay); + pr_info("wd-%s-wd read-back delay of %lldns, clock-skew test skipped!\n", + cs->name, wd_delay); + return WD_READ_SKIP; } static u64 csnow_mid; @@ -284,6 +318,8 @@ static void clocksource_watchdog(struct timer_list *unused) int next_cpu, reset_pending; int64_t wd_nsec, cs_nsec; struct clocksource *cs; + enum wd_read_status read_ret; + u32 md; spin_lock(&watchdog_lock); if (!watchdog_running) @@ -300,9 +336,12 @@ static void clocksource_watchdog(struct timer_list *unused) continue; } - if (!cs_watchdog_read(cs, &csnow, &wdnow)) { - /* Clock readout unreliable, so give it up. */ - __clocksource_unstable(cs); + read_ret = cs_watchdog_read(cs, &csnow, &wdnow); + + if (read_ret != WD_READ_SUCCESS) { + if (read_ret == WD_READ_UNSTABLE) + /* Clock readout unreliable, so give it up. */ + __clocksource_unstable(cs); continue; } @@ -330,7 +369,8 @@ static void clocksource_watchdog(struct timer_list *unused) continue; /* Check the deviation from the watchdog clocksource. */ - if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) { + md = cs->uncertainty_margin + watchdog->uncertainty_margin; + if (abs(cs_nsec - wd_nsec) > md) { pr_warn("timekeeping watchdog on CPU%d: Marking clocksource '%s' as unstable because the skew is too large:\n", smp_processor_id(), cs->name); pr_warn(" '%s' wd_now: %llx wd_last: %llx mask: %llx\n", @@ -985,6 +1025,26 @@ void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, NSEC_PER_SEC / scale, sec * scale); } + + /* + * If the uncertainty margin is not specified, calculate it. + * If both scale and freq are non-zero, calculate the clock + * period, but bound below at 2*WATCHDOG_MAX_SKEW. However, + * if either of scale or freq is zero, be very conservative and + * take the tens-of-milliseconds WATCHDOG_THRESHOLD value for the + * uncertainty margin. Allow stupidly small uncertainty margins + * to be specified by the caller for testing purposes, but warn + * to discourage production use of this capability. + */ + if (scale && freq && !cs->uncertainty_margin) { + cs->uncertainty_margin = NSEC_PER_SEC / (scale * freq); + if (cs->uncertainty_margin < 2 * WATCHDOG_MAX_SKEW) + cs->uncertainty_margin = 2 * WATCHDOG_MAX_SKEW; + } else if (!cs->uncertainty_margin) { + cs->uncertainty_margin = WATCHDOG_THRESHOLD; + } + WARN_ON_ONCE(cs->uncertainty_margin < 2 * WATCHDOG_MAX_SKEW); + /* * Ensure clocksources that have large 'mult' values don't overflow * when adjusted. diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c index eddcf497044459daa04d9370ccd1b7379db31997..65409abcca8e111368bc86983e5b0a72a191ae7d 100644 --- a/kernel/time/jiffies.c +++ b/kernel/time/jiffies.c @@ -49,13 +49,14 @@ static u64 jiffies_read(struct clocksource *cs) * for "tick-less" systems. */ static struct clocksource clocksource_jiffies = { - .name = "jiffies", - .rating = 1, /* lowest valid rating*/ - .read = jiffies_read, - .mask = CLOCKSOURCE_MASK(32), - .mult = TICK_NSEC << JIFFIES_SHIFT, /* details above */ - .shift = JIFFIES_SHIFT, - .max_cycles = 10, + .name = "jiffies", + .rating = 1, /* lowest valid rating*/ + .uncertainty_margin = 32 * NSEC_PER_MSEC, + .read = jiffies_read, + .mask = CLOCKSOURCE_MASK(32), + .mult = TICK_NSEC << JIFFIES_SHIFT, /* details above */ + .shift = JIFFIES_SHIFT, + .max_cycles = 10, }; __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(jiffies_lock); diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index ba644760f50767ac4f9848414239432913e2c9b6..a9e074769881f7bd92432e4cfa358a40c1d12bbf 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1517,9 +1517,6 @@ static const struct bpf_func_proto bpf_perf_prog_read_value_proto = { BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx, void *, buf, u32, size, u64, flags) { -#ifndef CONFIG_X86 - return -ENOENT; -#else static const u32 br_entry_size = sizeof(struct perf_branch_entry); struct perf_branch_stack *br_stack = ctx->data->br_stack; u32 to_copy; @@ -1528,7 +1525,7 @@ BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx, return -EINVAL; if (unlikely(!br_stack)) - return -EINVAL; + return -ENOENT; if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE) return br_stack->nr * br_entry_size; @@ -1540,7 +1537,6 @@ BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx, memcpy(buf, br_stack->entries, to_copy); return to_copy; -#endif } static const struct bpf_func_proto bpf_read_branch_records_proto = { diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index cd2d094b9f820c8a7f3d1915173bda4041ea6544..f9fad789321b0d39cd352765f71ffc279399f12f 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -250,6 +250,10 @@ __setup("trace_clock=", set_trace_boot_clock); static int __init set_tracepoint_printk(char *str) { + /* Ignore the "tp_printk_stop_on_boot" param */ + if (*str == '_') + return 0; + if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0)) tracepoint_printk = 1; return 1; @@ -7257,7 +7261,8 @@ static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr) err = kzalloc(sizeof(*err), GFP_KERNEL); if (!err) err = ERR_PTR(-ENOMEM); - tr->n_err_log_entries++; + else + tr->n_err_log_entries++; return err; } diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 003e5f37861e3621b56ed552858855cabf262450..41a9bd52e1fdcd52bb31cb9f9941683d893d2f06 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -2154,6 +2154,8 @@ static struct hist_field *parse_unary(struct hist_trigger_data *hist_data, (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS); expr->fn = hist_field_unary_minus; expr->operands[0] = operand1; + expr->size = operand1->size; + expr->is_signed = operand1->is_signed; expr->operator = FIELD_OP_UNARY_MINUS; expr->name = expr_str(expr, 0); expr->type = kstrdup(operand1->type, GFP_KERNEL); @@ -2293,6 +2295,7 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data, /* The operand sizes should be the same, so just pick one */ expr->size = operand1->size; + expr->is_signed = operand1->is_signed; expr->operator = field_op; expr->name = expr_str(expr, 0); @@ -3506,6 +3509,7 @@ static int trace_action_create(struct hist_trigger_data *hist_data, var_ref_idx = find_var_ref_idx(hist_data, var_ref); if (WARN_ON(var_ref_idx < 0)) { + kfree(p); ret = var_ref_idx; goto err; } diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index f725802160c0bc05fab47c29f72234219b6612cc..d0309de2f84fea5bfce5cbe4a8321be92c547057 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c @@ -940,6 +940,16 @@ static void traceon_trigger(struct event_trigger_data *data, void *rec, struct ring_buffer_event *event) { + struct trace_event_file *file = data->private_data; + + if (file) { + if (tracer_tracing_is_on(file->tr)) + return; + + tracer_tracing_on(file->tr); + return; + } + if (tracing_is_on()) return; @@ -950,8 +960,15 @@ static void traceon_count_trigger(struct event_trigger_data *data, void *rec, struct ring_buffer_event *event) { - if (tracing_is_on()) - return; + struct trace_event_file *file = data->private_data; + + if (file) { + if (tracer_tracing_is_on(file->tr)) + return; + } else { + if (tracing_is_on()) + return; + } if (!data->count) return; @@ -959,13 +976,26 @@ traceon_count_trigger(struct event_trigger_data *data, void *rec, if (data->count != -1) (data->count)--; - tracing_on(); + if (file) + tracer_tracing_on(file->tr); + else + tracing_on(); } static void traceoff_trigger(struct event_trigger_data *data, void *rec, struct ring_buffer_event *event) { + struct trace_event_file *file = data->private_data; + + if (file) { + if (!tracer_tracing_is_on(file->tr)) + return; + + tracer_tracing_off(file->tr); + return; + } + if (!tracing_is_on()) return; @@ -976,8 +1006,15 @@ static void traceoff_count_trigger(struct event_trigger_data *data, void *rec, struct ring_buffer_event *event) { - if (!tracing_is_on()) - return; + struct trace_event_file *file = data->private_data; + + if (file) { + if (!tracer_tracing_is_on(file->tr)) + return; + } else { + if (!tracing_is_on()) + return; + } if (!data->count) return; @@ -985,7 +1022,10 @@ traceoff_count_trigger(struct event_trigger_data *data, void *rec, if (data->count != -1) (data->count)--; - tracing_off(); + if (file) + tracer_tracing_off(file->tr); + else + tracing_off(); } static int diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 552dbc9d522603a07fea2763af0b798a02406a8c..d8a9fc79412667144b4e133a347a93dc4c8c144a 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1183,15 +1183,18 @@ static int probes_profile_seq_show(struct seq_file *m, void *v) { struct dyn_event *ev = v; struct trace_kprobe *tk; + unsigned long nmissed; if (!is_trace_kprobe(ev)) return 0; tk = to_trace_kprobe(ev); + nmissed = trace_kprobe_is_return(tk) ? + tk->rp.kp.nmissed + tk->rp.nmissed : tk->rp.kp.nmissed; seq_printf(m, " %-44s %15lu %15lu\n", trace_probe_name(&tk->tp), trace_kprobe_nhit(tk), - tk->rp.kp.nmissed); + nmissed); return 0; } diff --git a/kernel/tsacct.c b/kernel/tsacct.c index 257ffb993ea2358bd81412f305f5e28f6982c060..fd2f7a052fdd94d6871e13420a73d27287e22c2a 100644 --- a/kernel/tsacct.c +++ b/kernel/tsacct.c @@ -38,11 +38,10 @@ void bacct_add_tsk(struct user_namespace *user_ns, stats->ac_btime = clamp_t(time64_t, btime, 0, U32_MAX); stats->ac_btime64 = btime; - if (thread_group_leader(tsk)) { + if (tsk->flags & PF_EXITING) stats->ac_exitcode = tsk->exit_code; - if (tsk->flags & PF_FORKNOEXEC) - stats->ac_flag |= AFORK; - } + if (thread_group_leader(tsk) && (tsk->flags & PF_FORKNOEXEC)) + stats->ac_flag |= AFORK; if (tsk->flags & PF_SUPERPRIV) stats->ac_flag |= ASU; if (tsk->flags & PF_DUMPCORE) diff --git a/lib/iov_iter.c b/lib/iov_iter.c index b364231b5fc8cc629f3cdf496e2b3a72ec5efbd3..1b0a349fbcd926b4e49e48728c545395e0ae6bf5 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -407,6 +407,7 @@ static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t by return 0; buf->ops = &page_cache_pipe_buf_ops; + buf->flags = 0; get_page(page); buf->page = page; buf->offset = offset; @@ -543,6 +544,7 @@ static size_t push_pipe(struct iov_iter *i, size_t size, break; buf->ops = &default_pipe_buf_ops; + buf->flags = 0; buf->page = page; buf->offset = 0; buf->len = min_t(ssize_t, left, PAGE_SIZE); diff --git a/lib/mpi/mpi-mod.c b/lib/mpi/mpi-mod.c index 47bc59edd4ff939f961389bda33ede24917f53ec..54fcc01564d9dc6f1a1ae5b58ad820e503f64fbe 100644 --- a/lib/mpi/mpi-mod.c +++ b/lib/mpi/mpi-mod.c @@ -40,6 +40,8 @@ mpi_barrett_t mpi_barrett_init(MPI m, int copy) mpi_normalize(m); ctx = kcalloc(1, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return NULL; if (copy) { ctx->m = mpi_copy(m); diff --git a/lib/test_hmm.c b/lib/test_hmm.c index 80a78877bd939714ab56d9a155b1e490e915548f..a85613068d6019215e2ee4c262a8873589e168b8 100644 --- a/lib/test_hmm.c +++ b/lib/test_hmm.c @@ -965,9 +965,33 @@ static long dmirror_fops_unlocked_ioctl(struct file *filp, return 0; } +static int dmirror_fops_mmap(struct file *file, struct vm_area_struct *vma) +{ + unsigned long addr; + + for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) { + struct page *page; + int ret; + + page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!page) + return -ENOMEM; + + ret = vm_insert_page(vma, addr, page); + if (ret) { + __free_page(page); + return ret; + } + put_page(page); + } + + return 0; +} + static const struct file_operations dmirror_fops = { .open = dmirror_fops_open, .release = dmirror_fops_release, + .mmap = dmirror_fops_mmap, .unlocked_ioctl = dmirror_fops_unlocked_ioctl, .llseek = default_llseek, .owner = THIS_MODULE, diff --git a/lib/test_meminit.c b/lib/test_meminit.c index e4f706a404b3a1f45b3443b4f8de0fbdd15107f8..3ca717f1139774db6b64df470dee4938bba2d7b4 100644 --- a/lib/test_meminit.c +++ b/lib/test_meminit.c @@ -337,6 +337,7 @@ static int __init do_kmem_cache_size_bulk(int size, int *total_failures) if (num) kmem_cache_free_bulk(c, num, objects); } + kmem_cache_destroy(c); *total_failures += fail; return 1; } diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c index 12ebc97e8b4354c72edf35f96239e3b53919dc76..d6fbf28ebf72c7fcd30c720d1f45ee2968bc847d 100644 --- a/mm/debug_vm_pgtable.c +++ b/mm/debug_vm_pgtable.c @@ -128,6 +128,8 @@ static void __init pte_advanced_tests(struct mm_struct *mm, ptep_test_and_clear_young(vma, vaddr, ptep); pte = ptep_get(ptep); WARN_ON(pte_young(pte)); + + ptep_get_and_clear_full(mm, vaddr, ptep, 1); } static void __init pte_savedwrite_tests(unsigned long pfn, pgprot_t prot) diff --git a/mm/hmm.c b/mm/hmm.c index fb617054f96316e79ad22007379aab8582f7d72b..cbe9d0c66650452ba3e4a57b6af6c7f8f0940f56 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -296,7 +296,8 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, * Since each architecture defines a struct page for the zero page, just * fall through and treat it like a normal page. */ - if (pte_special(pte) && !pte_devmap(pte) && + if (!vm_normal_page(walk->vma, addr, pte) && + !pte_devmap(pte) && !is_zero_pfn(pte_pfn(pte))) { if (hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0)) { pte_unmap(ptep); @@ -514,7 +515,7 @@ static int hmm_vma_walk_test(unsigned long start, unsigned long end, struct hmm_range *range = hmm_vma_walk->range; struct vm_area_struct *vma = walk->vma; - if (!(vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP)) && + if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)) && vma->vm_flags & VM_READ) return 0; diff --git a/mm/kmemleak.c b/mm/kmemleak.c index c0014d3b91c10f2689f99d83c9ba2bd04cbbfea7..56fcfcb8e61730d341f26cb9a89ef0ab710a7d9a 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -1401,7 +1401,8 @@ static void kmemleak_scan(void) { unsigned long flags; struct kmemleak_object *object; - int i; + struct zone *zone; + int __maybe_unused i; int new_leaks = 0; jiffies_last_scan = jiffies; @@ -1441,9 +1442,9 @@ static void kmemleak_scan(void) * Struct page scanning for each node. */ get_online_mems(); - for_each_online_node(i) { - unsigned long start_pfn = node_start_pfn(i); - unsigned long end_pfn = node_end_pfn(i); + for_each_populated_zone(zone) { + unsigned long start_pfn = zone->zone_start_pfn; + unsigned long end_pfn = zone_end_pfn(zone); unsigned long pfn; for (pfn = start_pfn; pfn < end_pfn; pfn++) { @@ -1452,8 +1453,8 @@ static void kmemleak_scan(void) if (!page) continue; - /* only scan pages belonging to this node */ - if (page_to_nid(page) != i) + /* only scan pages belonging to this zone */ + if (page_zone(page) != zone) continue; /* only scan if page is in use */ if (page_count(page) == 0) diff --git a/mm/memblock.c b/mm/memblock.c index faa4de579b3db191cf443e6ddd89283e456f7e9c..f72d539570339743eabc3f437aa73a7c92dcbcf5 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -366,14 +366,20 @@ void __init memblock_discard(void) addr = __pa(memblock.reserved.regions); size = PAGE_ALIGN(sizeof(struct memblock_region) * memblock.reserved.max); - __memblock_free_late(addr, size); + if (memblock_reserved_in_slab) + kfree(memblock.reserved.regions); + else + __memblock_free_late(addr, size); } if (memblock.memory.regions != memblock_memory_init_regions) { addr = __pa(memblock.memory.regions); size = PAGE_ALIGN(sizeof(struct memblock_region) * memblock.memory.max); - __memblock_free_late(addr, size); + if (memblock_memory_in_slab) + kfree(memblock.memory.regions); + else + __memblock_free_late(addr, size); } memblock_memory = NULL; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 4bb2a4c593f73200fb5cdd68b51705ed62b38ec6..dbe07fef268282cc7df52d3b503adfc9fb9d9717 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -250,7 +250,7 @@ struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr) } #ifdef CONFIG_MEMCG_KMEM -extern spinlock_t css_set_lock; +static DEFINE_SPINLOCK(objcg_lock); static void obj_cgroup_release(struct percpu_ref *ref) { @@ -284,13 +284,13 @@ static void obj_cgroup_release(struct percpu_ref *ref) WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1)); nr_pages = nr_bytes >> PAGE_SHIFT; - spin_lock_irqsave(&css_set_lock, flags); + spin_lock_irqsave(&objcg_lock, flags); memcg = obj_cgroup_memcg(objcg); if (nr_pages) __memcg_kmem_uncharge(memcg, nr_pages); list_del(&objcg->list); mem_cgroup_put(memcg); - spin_unlock_irqrestore(&css_set_lock, flags); + spin_unlock_irqrestore(&objcg_lock, flags); percpu_ref_exit(ref); kfree_rcu(objcg, rcu); @@ -322,7 +322,7 @@ static void memcg_reparent_objcgs(struct mem_cgroup *memcg, objcg = rcu_replace_pointer(memcg->objcg, NULL, true); - spin_lock_irq(&css_set_lock); + spin_lock_irq(&objcg_lock); /* Move active objcg to the parent's list */ xchg(&objcg->memcg, parent); @@ -337,7 +337,7 @@ static void memcg_reparent_objcgs(struct mem_cgroup *memcg, } list_splice(&memcg->objcg_list, &parent->objcg_list); - spin_unlock_irq(&css_set_lock); + spin_unlock_irq(&objcg_lock); percpu_ref_kill(&objcg->refcnt); } diff --git a/mm/mprotect.c b/mm/mprotect.c index 56c02beb60414129b898416a244f8404cc546e9a..7ea0aee0c08d95b8533fa8cfe73db61f91f17c84 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -94,7 +94,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, /* Also skip shared copy-on-write pages */ if (is_cow_mapping(vma->vm_flags) && - page_mapcount(page) != 1) + page_count(page) != 1) continue; /* diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2996887221208bb23bac11468a1d42e580f41525..8446d59c63044737bc442a267ac6d52b3dd4998f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3964,7 +3964,9 @@ void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) va_list args; static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1); - if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs)) + if ((gfp_mask & __GFP_NOWARN) || + !__ratelimit(&nopage_rs) || + ((gfp_mask & __GFP_DMA) && !has_managed_dma())) return; va_start(args, fmt); @@ -8901,3 +8903,18 @@ bool take_page_off_buddy(struct page *page) return ret; } #endif + +#ifdef CONFIG_ZONE_DMA +bool has_managed_dma(void) +{ + struct pglist_data *pgdat; + + for_each_online_pgdat(pgdat) { + struct zone *zone = &pgdat->node_zones[ZONE_DMA]; + + if (managed_zone(zone)) + return true; + } + return false; +} +#endif /* CONFIG_ZONE_DMA */ diff --git a/mm/shmem.c b/mm/shmem.c index ae8adca3b56d1d5073b92983c87ffaa8d3899baa..d3d8c5e7a296bfb74d65127052ddc020e6575c9f 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -527,7 +527,7 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, struct shmem_inode_info *info; struct page *page; unsigned long batch = sc ? sc->nr_to_scan : 128; - int removed = 0, split = 0; + int split = 0; if (list_empty(&sbinfo->shrinklist)) return SHRINK_STOP; @@ -542,7 +542,6 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, /* inode is about to be evicted */ if (!inode) { list_del_init(&info->shrinklist); - removed++; goto next; } @@ -550,12 +549,12 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, if (round_up(inode->i_size, PAGE_SIZE) == round_up(inode->i_size, HPAGE_PMD_SIZE)) { list_move(&info->shrinklist, &to_remove); - removed++; goto next; } list_move(&info->shrinklist, &list); next: + sbinfo->shrinklist_len--; if (!--batch) break; } @@ -575,7 +574,7 @@ next: inode = &info->vfs_inode; if (nr_to_split && split >= nr_to_split) - goto leave; + goto move_back; page = find_get_page(inode->i_mapping, (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT); @@ -589,38 +588,44 @@ next: } /* - * Leave the inode on the list if we failed to lock - * the page at this time. + * Move the inode on the list back to shrinklist if we failed + * to lock the page at this time. * * Waiting for the lock may lead to deadlock in the * reclaim path. */ if (!trylock_page(page)) { put_page(page); - goto leave; + goto move_back; } ret = split_huge_page(page); unlock_page(page); put_page(page); - /* If split failed leave the inode on the list */ + /* If split failed move the inode on the list back to shrinklist */ if (ret) - goto leave; + goto move_back; split++; drop: list_del_init(&info->shrinklist); - removed++; -leave: + goto put; +move_back: + /* + * Make sure the inode is either on the global list or deleted + * from any local list before iput() since it could be deleted + * in another thread once we put the inode (then the local list + * is corrupted). + */ + spin_lock(&sbinfo->shrinklist_lock); + list_move(&info->shrinklist, &sbinfo->shrinklist); + sbinfo->shrinklist_len++; + spin_unlock(&sbinfo->shrinklist_lock); +put: iput(inode); } - spin_lock(&sbinfo->shrinklist_lock); - list_splice_tail(&list, &sbinfo->shrinklist); - sbinfo->shrinklist_len -= removed; - spin_unlock(&sbinfo->shrinklist_lock); - return split; } diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 22278807b3f36dd4519ed39d916b460c50cfc498..23bd26057a828cd376bf4e3c0db411ee506fe26f 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -77,6 +77,7 @@ static void ax25_kill_by_device(struct net_device *dev) { ax25_dev *ax25_dev; ax25_cb *s; + struct sock *sk; if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) return; @@ -85,13 +86,15 @@ static void ax25_kill_by_device(struct net_device *dev) again: ax25_for_each(s, &ax25_list) { if (s->ax25_dev == ax25_dev) { + sk = s->sk; + sock_hold(sk); spin_unlock_bh(&ax25_list_lock); - lock_sock(s->sk); + lock_sock(sk); s->ax25_dev = NULL; - release_sock(s->sk); + release_sock(sk); ax25_disconnect(s, ENETUNREACH); spin_lock_bh(&ax25_list_lock); - + sock_put(sk); /* The entry could have been deleted from the * list meanwhile and thus the next pointer is * no longer valid. Play it safe and restart @@ -536,7 +539,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname, ax25_cb *ax25; struct net_device *dev; char devname[IFNAMSIZ]; - unsigned long opt; + unsigned int opt; int res = 0; if (level != SOL_AX25) @@ -568,7 +571,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname, break; case AX25_T1: - if (opt < 1 || opt > ULONG_MAX / HZ) { + if (opt < 1 || opt > UINT_MAX / HZ) { res = -EINVAL; break; } @@ -577,7 +580,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname, break; case AX25_T2: - if (opt < 1 || opt > ULONG_MAX / HZ) { + if (opt < 1 || opt > UINT_MAX / HZ) { res = -EINVAL; break; } @@ -593,7 +596,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname, break; case AX25_T3: - if (opt < 1 || opt > ULONG_MAX / HZ) { + if (opt < 1 || opt > UINT_MAX / HZ) { res = -EINVAL; break; } @@ -601,7 +604,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname, break; case AX25_IDLE: - if (opt > ULONG_MAX / (60 * HZ)) { + if (opt > UINT_MAX / (60 * HZ)) { res = -EINVAL; break; } diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c index c7a55647b520e87d560bdcb29a644eef30f2855c..121459704b0693e7597e90c7389034f66a2d13ea 100644 --- a/net/batman-adv/netlink.c +++ b/net/batman-adv/netlink.c @@ -1361,21 +1361,21 @@ static const struct genl_small_ops batadv_netlink_ops[] = { { .cmd = BATADV_CMD_TP_METER, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .doit = batadv_netlink_tp_meter_start, .internal_flags = BATADV_FLAG_NEED_MESH, }, { .cmd = BATADV_CMD_TP_METER_CANCEL, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .doit = batadv_netlink_tp_meter_cancel, .internal_flags = BATADV_FLAG_NEED_MESH, }, { .cmd = BATADV_CMD_GET_ROUTING_ALGOS, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .dumpit = batadv_algo_dump, }, { @@ -1390,68 +1390,68 @@ static const struct genl_small_ops batadv_netlink_ops[] = { { .cmd = BATADV_CMD_GET_TRANSTABLE_LOCAL, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .dumpit = batadv_tt_local_dump, }, { .cmd = BATADV_CMD_GET_TRANSTABLE_GLOBAL, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .dumpit = batadv_tt_global_dump, }, { .cmd = BATADV_CMD_GET_ORIGINATORS, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .dumpit = batadv_orig_dump, }, { .cmd = BATADV_CMD_GET_NEIGHBORS, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .dumpit = batadv_hardif_neigh_dump, }, { .cmd = BATADV_CMD_GET_GATEWAYS, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .dumpit = batadv_gw_dump, }, { .cmd = BATADV_CMD_GET_BLA_CLAIM, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .dumpit = batadv_bla_claim_dump, }, { .cmd = BATADV_CMD_GET_BLA_BACKBONE, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .dumpit = batadv_bla_backbone_dump, }, { .cmd = BATADV_CMD_GET_DAT_CACHE, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .dumpit = batadv_dat_cache_dump, }, { .cmd = BATADV_CMD_GET_MCAST_FLAGS, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .dumpit = batadv_mcast_flags_dump, }, { .cmd = BATADV_CMD_SET_MESH, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .doit = batadv_netlink_set_mesh, .internal_flags = BATADV_FLAG_NEED_MESH, }, { .cmd = BATADV_CMD_SET_HARDIF, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .doit = batadv_netlink_set_hardif, .internal_flags = BATADV_FLAG_NEED_MESH | BATADV_FLAG_NEED_HARDIF, @@ -1467,7 +1467,7 @@ static const struct genl_small_ops batadv_netlink_ops[] = { { .cmd = BATADV_CMD_SET_VLAN, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .doit = batadv_netlink_set_vlan, .internal_flags = BATADV_FLAG_NEED_MESH | BATADV_FLAG_NEED_VLAN, diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c index 0a2d78e811cf5bbea7d735bc345a1ddc9a46cbf8..83eb84e8e688f3bcab98c99758ee99ae7f85dc46 100644 --- a/net/bluetooth/cmtp/core.c +++ b/net/bluetooth/cmtp/core.c @@ -501,9 +501,7 @@ static int __init cmtp_init(void) { BT_INFO("CMTP (CAPI Emulation) ver %s", VERSION); - cmtp_init_sockets(); - - return 0; + return cmtp_init_sockets(); } static void __exit cmtp_exit(void) diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 2ad66f64879f1863e3f441b288deb82e673c5310..2e7998bad133bc45ca5e88c2b5f9d079c5a9ac4e 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -3810,6 +3810,7 @@ int hci_register_dev(struct hci_dev *hdev) return id; err_wqueue: + debugfs_remove_recursive(hdev->debugfs); destroy_workqueue(hdev->workqueue); destroy_workqueue(hdev->req_workqueue); err: diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 9f52145bb7b7639e5c221778188126f8143ef870..72b4127360c7f24f8036471c7ff3d3538fbb02c5 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -5661,7 +5661,13 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) struct hci_ev_le_advertising_info *ev = ptr; s8 rssi; - if (ev->length <= HCI_MAX_AD_LENGTH) { + if (ptr > (void *)skb_tail_pointer(skb) - sizeof(*ev)) { + bt_dev_err(hdev, "Malicious advertising data."); + break; + } + + if (ev->length <= HCI_MAX_AD_LENGTH && + ev->data + ev->length <= skb_tail_pointer(skb)) { rssi = ev->data[ev->length]; process_adv_report(hdev, ev->evt_type, &ev->bdaddr, ev->bdaddr_type, NULL, 0, rssi, diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 1a94ed2f8a4f8ebe72a706f01f9472b67b3e92e6..d965b7c66bd628b760e28d51ad441deb4a6a8f0b 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -2118,7 +2118,7 @@ int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance) /* Set duration per instance since controller is responsible for * scheduling it. */ - if (adv_instance && adv_instance->duration) { + if (adv_instance && adv_instance->timeout) { u16 duration = adv_instance->timeout * MSEC_PER_SEC; /* Time = N * 10 ms */ diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index 160c016a5dfb9eb7842cabc7e8fb2d5c0914d497..d2c67852059921bfd188eee1a4a79736f9415323 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -161,7 +161,11 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) break; } - if (chan->psm && bdaddr_type_is_le(chan->src_type)) + /* Use L2CAP_MODE_LE_FLOWCTL (CoC) in case of LE address and + * L2CAP_MODE_EXT_FLOWCTL (ECRED) has not been set. + */ + if (chan->psm && bdaddr_type_is_le(chan->src_type) && + chan->mode != L2CAP_MODE_EXT_FLOWCTL) chan->mode = L2CAP_MODE_LE_FLOWCTL; chan->state = BT_BOUND; @@ -172,6 +176,21 @@ done: return err; } +static void l2cap_sock_init_pid(struct sock *sk) +{ + struct l2cap_chan *chan = l2cap_pi(sk)->chan; + + /* Only L2CAP_MODE_EXT_FLOWCTL ever need to access the PID in order to + * group the channels being requested. + */ + if (chan->mode != L2CAP_MODE_EXT_FLOWCTL) + return; + + spin_lock(&sk->sk_peer_lock); + sk->sk_peer_pid = get_pid(task_tgid(current)); + spin_unlock(&sk->sk_peer_lock); +} + static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) { @@ -240,9 +259,15 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, return -EINVAL; } - if (chan->psm && bdaddr_type_is_le(chan->src_type) && !chan->mode) + /* Use L2CAP_MODE_LE_FLOWCTL (CoC) in case of LE address and + * L2CAP_MODE_EXT_FLOWCTL (ECRED) has not been set. + */ + if (chan->psm && bdaddr_type_is_le(chan->src_type) && + chan->mode != L2CAP_MODE_EXT_FLOWCTL) chan->mode = L2CAP_MODE_LE_FLOWCTL; + l2cap_sock_init_pid(sk); + err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid), &la.l2_bdaddr, la.l2_bdaddr_type); if (err) @@ -298,6 +323,8 @@ static int l2cap_sock_listen(struct socket *sock, int backlog) goto done; } + l2cap_sock_init_pid(sk); + sk->sk_max_ack_backlog = backlog; sk->sk_ack_backlog = 0; @@ -876,6 +903,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, struct l2cap_conn *conn; int len, err = 0; u32 opt; + u16 mtu; + u8 mode; BT_DBG("sk %p", sk); @@ -1058,16 +1087,16 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, break; } - if (copy_from_sockptr(&opt, optval, sizeof(u16))) { + if (copy_from_sockptr(&mtu, optval, sizeof(u16))) { err = -EFAULT; break; } if (chan->mode == L2CAP_MODE_EXT_FLOWCTL && sk->sk_state == BT_CONNECTED) - err = l2cap_chan_reconfigure(chan, opt); + err = l2cap_chan_reconfigure(chan, mtu); else - chan->imtu = opt; + chan->imtu = mtu; break; @@ -1089,14 +1118,14 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, break; } - if (copy_from_sockptr(&opt, optval, sizeof(u8))) { + if (copy_from_sockptr(&mode, optval, sizeof(u8))) { err = -EFAULT; break; } - BT_DBG("opt %u", opt); + BT_DBG("mode %u", mode); - err = l2cap_set_mode(chan, opt); + err = l2cap_set_mode(chan, mode); if (err) break; diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index 8edfb98ae1d583665f5396b5110a3875e0518561..68c0d0f928908e44ade334aa9931279190be8a13 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c @@ -743,6 +743,9 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff if (nf_bridge->frag_max_size && nf_bridge->frag_max_size < mtu) mtu = nf_bridge->frag_max_size; + nf_bridge_update_protocol(skb); + nf_bridge_push_encap_header(skb); + if (skb_is_gso(skb) || skb->len + mtu_reserved <= mtu) { nf_bridge_info_free(skb); return br_dev_queue_push_xmit(net, sk, skb); @@ -760,8 +763,6 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff IPCB(skb)->frag_max_size = nf_bridge->frag_max_size; - nf_bridge_update_protocol(skb); - data = this_cpu_ptr(&brnf_frag_data_storage); if (skb_vlan_tag_present(skb)) { @@ -789,8 +790,6 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size; - nf_bridge_update_protocol(skb); - data = this_cpu_ptr(&brnf_frag_data_storage); data->encap_size = nf_bridge_encap_header_len(skb); data->size = ETH_HLEN + data->encap_size; diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 08c77418c687b75e1ca303922314b010397296fc..852f4b54e8811a275ca3df6d261c5a9c481b6db9 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -543,10 +543,10 @@ static bool __allowed_ingress(const struct net_bridge *br, if (!br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) { if (*state == BR_STATE_FORWARDING) { *state = br_vlan_get_pvid_state(vg); - return br_vlan_state_allowed(*state, true); - } else { - return true; + if (!br_vlan_state_allowed(*state, true)) + goto drop; } + return true; } } v = br_vlan_find(vg, *vid); @@ -1873,7 +1873,8 @@ static int br_vlan_rtm_dump(struct sk_buff *skb, struct netlink_callback *cb) goto out_err; } err = br_vlan_dump_dev(dev, skb, cb, dump_flags); - if (err && err != -EMSGSIZE) + /* if the dump completed without an error we return 0 here */ + if (err != -EMSGSIZE) goto out_err; } else { for_each_netdev_rcu(net, dev) { diff --git a/net/can/isotp.c b/net/can/isotp.c index 53ce5b6448a5d70802ca4e07e48f439eed35d4ec..d0581dc6a65fd5565969f084e95fe126e17f947b 100644 --- a/net/can/isotp.c +++ b/net/can/isotp.c @@ -56,6 +56,7 @@ #include #include #include +#include #include #include #include @@ -145,6 +146,7 @@ struct isotp_sock { struct tpcon rx, tx; struct list_head notifier; wait_queue_head_t wait; + spinlock_t rx_lock; /* protect single thread state machine */ }; static LIST_HEAD(isotp_notifier_list); @@ -615,11 +617,17 @@ static void isotp_rcv(struct sk_buff *skb, void *data) n_pci_type = cf->data[ae] & 0xF0; + /* Make sure the state changes and data structures stay consistent at + * CAN frame reception time. This locking is not needed in real world + * use cases but the inconsistency can be triggered with syzkaller. + */ + spin_lock(&so->rx_lock); + if (so->opt.flags & CAN_ISOTP_HALF_DUPLEX) { /* check rx/tx path half duplex expectations */ if ((so->tx.state != ISOTP_IDLE && n_pci_type != N_PCI_FC) || (so->rx.state != ISOTP_IDLE && n_pci_type == N_PCI_FC)) - return; + goto out_unlock; } switch (n_pci_type) { @@ -668,6 +676,9 @@ static void isotp_rcv(struct sk_buff *skb, void *data) isotp_rcv_cf(sk, cf, ae, skb); break; } + +out_unlock: + spin_unlock(&so->rx_lock); } static void isotp_fill_dataframe(struct canfd_frame *cf, struct isotp_sock *so, @@ -874,24 +885,34 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) if (!size || size > MAX_MSG_LENGTH) { err = -EINVAL; - goto err_out; + goto err_out_drop; + } + + /* take care of a potential SF_DL ESC offset for TX_DL > 8 */ + off = (so->tx.ll_dl > CAN_MAX_DLEN) ? 1 : 0; + + /* does the given data fit into a single frame for SF_BROADCAST? */ + if ((so->opt.flags & CAN_ISOTP_SF_BROADCAST) && + (size > so->tx.ll_dl - SF_PCI_SZ4 - ae - off)) { + err = -EINVAL; + goto err_out_drop; } err = memcpy_from_msg(so->tx.buf, msg, size); if (err < 0) - goto err_out; + goto err_out_drop; dev = dev_get_by_index(sock_net(sk), so->ifindex); if (!dev) { err = -ENXIO; - goto err_out; + goto err_out_drop; } skb = sock_alloc_send_skb(sk, so->ll.mtu + sizeof(struct can_skb_priv), msg->msg_flags & MSG_DONTWAIT, &err); if (!skb) { dev_put(dev); - goto err_out; + goto err_out_drop; } can_skb_reserve(skb); @@ -904,9 +925,6 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) cf = (struct canfd_frame *)skb->data; skb_put_zero(skb, so->ll.mtu); - /* take care of a potential SF_DL ESC offset for TX_DL > 8 */ - off = (so->tx.ll_dl > CAN_MAX_DLEN) ? 1 : 0; - /* check for single frame transmission depending on TX_DL */ if (size <= so->tx.ll_dl - SF_PCI_SZ4 - ae - off) { /* The message size generally fits into a SingleFrame - good. @@ -956,7 +974,7 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) if (err) { pr_notice_once("can-isotp: %s: can_send_ret %d\n", __func__, err); - goto err_out; + goto err_out_drop; } if (wait_tx_done) { @@ -969,6 +987,9 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) return size; +err_out_drop: + /* drop this PDU and unlock a potential wait queue */ + old_state = ISOTP_IDLE; err_out: so->tx.state = old_state; if (so->tx.state == ISOTP_IDLE) @@ -1043,7 +1064,7 @@ static int isotp_release(struct socket *sock) lock_sock(sk); /* remove current filters & unregister */ - if (so->bound) { + if (so->bound && (!(so->opt.flags & CAN_ISOTP_SF_BROADCAST))) { if (so->ifindex) { struct net_device *dev; @@ -1083,15 +1104,12 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len) struct net_device *dev; int err = 0; int notify_enetdown = 0; + int do_rx_reg = 1; if (len < ISOTP_MIN_NAMELEN) return -EINVAL; - if (addr->can_addr.tp.rx_id == addr->can_addr.tp.tx_id) - return -EADDRNOTAVAIL; - - if ((addr->can_addr.tp.rx_id | addr->can_addr.tp.tx_id) & - (CAN_ERR_FLAG | CAN_RTR_FLAG)) + if (addr->can_addr.tp.tx_id & (CAN_ERR_FLAG | CAN_RTR_FLAG)) return -EADDRNOTAVAIL; if (!addr->can_ifindex) @@ -1099,6 +1117,23 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len) lock_sock(sk); + /* do not register frame reception for functional addressing */ + if (so->opt.flags & CAN_ISOTP_SF_BROADCAST) + do_rx_reg = 0; + + /* do not validate rx address for functional addressing */ + if (do_rx_reg) { + if (addr->can_addr.tp.rx_id == addr->can_addr.tp.tx_id) { + err = -EADDRNOTAVAIL; + goto out; + } + + if (addr->can_addr.tp.rx_id & (CAN_ERR_FLAG | CAN_RTR_FLAG)) { + err = -EADDRNOTAVAIL; + goto out; + } + } + if (so->bound && addr->can_ifindex == so->ifindex && addr->can_addr.tp.rx_id == so->rxid && addr->can_addr.tp.tx_id == so->txid) @@ -1124,13 +1159,14 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len) ifindex = dev->ifindex; - can_rx_register(net, dev, addr->can_addr.tp.rx_id, - SINGLE_MASK(addr->can_addr.tp.rx_id), isotp_rcv, sk, - "isotp", sk); + if (do_rx_reg) + can_rx_register(net, dev, addr->can_addr.tp.rx_id, + SINGLE_MASK(addr->can_addr.tp.rx_id), + isotp_rcv, sk, "isotp", sk); dev_put(dev); - if (so->bound) { + if (so->bound && do_rx_reg) { /* unregister old filter */ if (so->ifindex) { dev = dev_get_by_index(net, so->ifindex); @@ -1179,16 +1215,13 @@ static int isotp_getname(struct socket *sock, struct sockaddr *uaddr, int peer) return ISOTP_MIN_NAMELEN; } -static int isotp_setsockopt(struct socket *sock, int level, int optname, +static int isotp_setsockopt_locked(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct isotp_sock *so = isotp_sk(sk); int ret = 0; - if (level != SOL_CAN_ISOTP) - return -EINVAL; - if (so->bound) return -EISCONN; @@ -1263,6 +1296,22 @@ static int isotp_setsockopt(struct socket *sock, int level, int optname, return ret; } +static int isotp_setsockopt(struct socket *sock, int level, int optname, + sockptr_t optval, unsigned int optlen) + +{ + struct sock *sk = sock->sk; + int ret; + + if (level != SOL_CAN_ISOTP) + return -EINVAL; + + lock_sock(sk); + ret = isotp_setsockopt_locked(sock, level, optname, optval, optlen); + release_sock(sk); + return ret; +} + static int isotp_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { @@ -1330,7 +1379,7 @@ static void isotp_notify(struct isotp_sock *so, unsigned long msg, case NETDEV_UNREGISTER: lock_sock(sk); /* remove current filters & unregister */ - if (so->bound) + if (so->bound && (!(so->opt.flags & CAN_ISOTP_SF_BROADCAST))) can_rx_unregister(dev_net(dev), dev, so->rxid, SINGLE_MASK(so->rxid), isotp_rcv, sk); @@ -1407,6 +1456,7 @@ static int isotp_init(struct sock *sk) so->txtimer.function = isotp_tx_timer_handler; init_waitqueue_head(&so->wait); + spin_lock_init(&so->rx_lock); spin_lock(&isotp_notifier_lock); list_add_tail(&so->notifier, &isotp_notifier_list); diff --git a/net/core/dev.c b/net/core/dev.c index 60cf3cd0c282f8c538675af7865cb0d86da10eda..0bab2aca07fd37e7c16609b3cc6139fe184d042c 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -9339,6 +9339,12 @@ static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog, goto out_unlock; } old_prog = link->prog; + if (old_prog->type != new_prog->type || + old_prog->expected_attach_type != new_prog->expected_attach_type) { + err = -EINVAL; + goto out_unlock; + } + if (old_prog == new_prog) { /* no-op, don't disturb drivers */ bpf_prog_put(new_prog); diff --git a/net/core/devlink.c b/net/core/devlink.c index 442b67c044a9f9589744bae185f76ce1f1d89753..646d90f63dafc72c4a5bd6bcfda276677dec98c7 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -7852,8 +7852,6 @@ static const struct genl_small_ops devlink_nl_ops[] = { GENL_DONT_VALIDATE_DUMP_STRICT, .dumpit = devlink_nl_cmd_health_reporter_dump_get_dumpit, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT | - DEVLINK_NL_FLAG_NO_LOCK, }, { .cmd = DEVLINK_CMD_HEALTH_REPORTER_DUMP_CLEAR, diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index db65ce62b625ad375cba872ae2f2b31454baca81..ed9dd17f9348c284070fc0cdec5583fcc5533f8d 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -280,13 +280,17 @@ static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi, rcu_read_lock(); list_for_each_entry_rcu(new_stat, &hw_stats_list, list) { + struct net_device *dev; + /* * only add a note to our monitor buffer if: * 1) this is the dev we received on * 2) its after the last_rx delta * 3) our rx_dropped count has gone up */ - if ((new_stat->dev == napi->dev) && + /* Paired with WRITE_ONCE() in dropmon_net_event() */ + dev = READ_ONCE(new_stat->dev); + if ((dev == napi->dev) && (time_after(jiffies, new_stat->last_rx + dm_hw_check_delta)) && (napi->dev->stats.rx_dropped != new_stat->last_drop_val)) { trace_drop_common(NULL, NULL); @@ -1574,7 +1578,10 @@ static int dropmon_net_event(struct notifier_block *ev_block, mutex_lock(&net_dm_mutex); list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) { if (new_stat->dev == dev) { - new_stat->dev = NULL; + + /* Paired with READ_ONCE() in trace_napi_poll_hit() */ + WRITE_ONCE(new_stat->dev, NULL); + if (trace_state == TRACE_OFF) { list_del_rcu(&new_stat->list); kfree_rcu(new_stat, rcu); diff --git a/net/core/filter.c b/net/core/filter.c index abd58dce49bbc47c9f63dfcee242e72b5ce2f5e6..659a328024713624402f7ac05ff18502ff0a50ca 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2730,6 +2730,9 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start, if (unlikely(flags)) return -EINVAL; + if (unlikely(len == 0)) + return 0; + /* First find the starting scatterlist element */ i = msg->sg.start; do { @@ -4711,12 +4714,14 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname, switch (optname) { case SO_RCVBUF: val = min_t(u32, val, sysctl_rmem_max); + val = min_t(int, val, INT_MAX / 2); sk->sk_userlocks |= SOCK_RCVBUF_LOCK; WRITE_ONCE(sk->sk_rcvbuf, max_t(int, val * 2, SOCK_MIN_RCVBUF)); break; case SO_SNDBUF: val = min_t(u32, val, sysctl_wmem_max); + val = min_t(int, val, INT_MAX / 2); sk->sk_userlocks |= SOCK_SNDBUF_LOCK; WRITE_ONCE(sk->sk_sndbuf, max_t(int, val * 2, SOCK_MIN_SNDBUF)); @@ -7919,9 +7924,9 @@ void bpf_warn_invalid_xdp_action(u32 act) { const u32 act_max = XDP_REDIRECT; - WARN_ONCE(1, "%s XDP return value %u, expect packet loss!\n", - act > act_max ? "Illegal" : "Driver unsupported", - act); + pr_warn_once("%s XDP return value %u, expect packet loss!\n", + act > act_max ? "Illegal" : "Driver unsupported", + act); } EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action); diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c index c714e6a9dad4c91150a7dbd380f66ca82a3b81cd..eadb696360b486c0165c0b4906b5ea76c3430d40 100644 --- a/net/core/net-procfs.c +++ b/net/core/net-procfs.c @@ -193,12 +193,23 @@ static const struct seq_operations softnet_seq_ops = { .show = softnet_seq_show, }; -static void *ptype_get_idx(loff_t pos) +static void *ptype_get_idx(struct seq_file *seq, loff_t pos) { + struct list_head *ptype_list = NULL; struct packet_type *pt = NULL; + struct net_device *dev; loff_t i = 0; int t; + for_each_netdev_rcu(seq_file_net(seq), dev) { + ptype_list = &dev->ptype_all; + list_for_each_entry_rcu(pt, ptype_list, list) { + if (i == pos) + return pt; + ++i; + } + } + list_for_each_entry_rcu(pt, &ptype_all, list) { if (i == pos) return pt; @@ -219,22 +230,40 @@ static void *ptype_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { rcu_read_lock(); - return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN; + return *pos ? ptype_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; } static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos) { + struct net_device *dev; struct packet_type *pt; struct list_head *nxt; int hash; ++*pos; if (v == SEQ_START_TOKEN) - return ptype_get_idx(0); + return ptype_get_idx(seq, 0); pt = v; nxt = pt->list.next; + if (pt->dev) { + if (nxt != &pt->dev->ptype_all) + goto found; + + dev = pt->dev; + for_each_netdev_continue_rcu(seq_file_net(seq), dev) { + if (!list_empty(&dev->ptype_all)) { + nxt = dev->ptype_all.next; + goto found; + } + } + + nxt = ptype_all.next; + goto ptype_all; + } + if (pt->type == htons(ETH_P_ALL)) { +ptype_all: if (nxt != &ptype_all) goto found; hash = 0; @@ -263,7 +292,8 @@ static int ptype_seq_show(struct seq_file *seq, void *v) if (v == SEQ_START_TOKEN) seq_puts(seq, "Type Device Function\n"); - else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) { + else if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) && + (!pt->dev || net_eq(dev_net(pt->dev), seq_file_net(seq)))) { if (pt->type == htons(ETH_P_ALL)) seq_puts(seq, "ALL "); else diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index af591236010557823492f8ae043970344516b0ee..99303897b7bb7683a24c7128831f273d7fb682f1 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -1804,6 +1804,9 @@ static void remove_queue_kobjects(struct net_device *dev) net_rx_queue_update_kobjects(dev, real_rx, 0); netdev_queue_update_kobjects(dev, real_tx, 0); + + dev->real_num_rx_queues = 0; + dev->real_num_tx_queues = 0; #ifdef CONFIG_SYSFS kset_unregister(dev->queues_kset); #endif diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index ac852db83de9fb973a7962c6b4fb7b6cb3d6652b..cbff7d94b993ec941197c99af3118350e7c95445 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -183,8 +183,10 @@ static void ops_exit_list(const struct pernet_operations *ops, { struct net *net; if (ops->exit) { - list_for_each_entry(net, net_exit_list, exit_list) + list_for_each_entry(net, net_exit_list, exit_list) { ops->exit(net); + cond_resched(); + } } if (ops->exit_batch) ops->exit_batch(net_exit_list); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 27ffa83ffeb3ca8318efcd05abd7b53184f2f341..9ff6d4160daba60a77b30248c1d1fb539271c220 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1705,6 +1705,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, { struct ifinfomsg *ifm; struct nlmsghdr *nlh; + struct Qdisc *qdisc; ASSERT_RTNL(); nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); @@ -1722,6 +1723,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid)) goto nla_put_failure; + qdisc = rtnl_dereference(dev->qdisc); if (nla_put_string(skb, IFLA_IFNAME, dev->name) || nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) || nla_put_u8(skb, IFLA_OPERSTATE, @@ -1740,8 +1742,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, #endif put_master_ifindex(skb, dev) || nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) || - (dev->qdisc && - nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) || + (qdisc && + nla_put_string(skb, IFLA_QDISC, qdisc->ops->id)) || nla_put_ifalias(skb, dev) || nla_put_u32(skb, IFLA_CARRIER_CHANGES, atomic_read(&dev->carrier_up_count) + @@ -3238,8 +3240,8 @@ static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1]; unsigned char name_assign_type = NET_NAME_USER; struct nlattr *linkinfo[IFLA_INFO_MAX + 1]; - const struct rtnl_link_ops *m_ops = NULL; - struct net_device *master_dev = NULL; + const struct rtnl_link_ops *m_ops; + struct net_device *master_dev; struct net *net = sock_net(skb->sk); const struct rtnl_link_ops *ops; struct nlattr *tb[IFLA_MAX + 1]; @@ -3277,6 +3279,8 @@ replay: else dev = NULL; + master_dev = NULL; + m_ops = NULL; if (dev) { master_dev = netdev_master_upper_dev_get(dev); if (master_dev) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 0215ae898e8367c82a064ce93f854a35ed1a9629..fccc42c8ca0c7b5ad86fbb1528a977eb2096d977 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2139,7 +2139,7 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta) /* Free pulled out fragments. */ while ((list = skb_shinfo(skb)->frag_list) != insp) { skb_shinfo(skb)->frag_list = list->next; - kfree_skb(list); + consume_skb(list); } /* And insert new clone at head. */ if (clone) { @@ -6044,7 +6044,7 @@ static int pskb_carve_frag_list(struct sk_buff *skb, /* Free pulled out fragments. */ while ((list = shinfo->frag_list) != insp) { shinfo->frag_list = list->next; - kfree_skb(list); + consume_skb(list); } /* And insert new clone at head. */ if (clone) { diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c index b34e4f827e75668f0406993cc84daa9f14966237..a493965f157f289c4afd3ac5d8f8c58ef088a66f 100644 --- a/net/ieee802154/nl802154.c +++ b/net/ieee802154/nl802154.c @@ -1441,7 +1441,7 @@ static int nl802154_send_key(struct sk_buff *msg, u32 cmd, u32 portid, hdr = nl802154hdr_put(msg, portid, seq, flags, cmd); if (!hdr) - return -1; + return -ENOBUFS; if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex)) goto nla_put_failure; @@ -1634,7 +1634,7 @@ static int nl802154_send_device(struct sk_buff *msg, u32 cmd, u32 portid, hdr = nl802154hdr_put(msg, portid, seq, flags, cmd); if (!hdr) - return -1; + return -ENOBUFS; if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex)) goto nla_put_failure; @@ -1812,7 +1812,7 @@ static int nl802154_send_devkey(struct sk_buff *msg, u32 cmd, u32 portid, hdr = nl802154hdr_put(msg, portid, seq, flags, cmd); if (!hdr) - return -1; + return -ENOBUFS; if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex)) goto nla_put_failure; @@ -1988,7 +1988,7 @@ static int nl802154_send_seclevel(struct sk_buff *msg, u32 cmd, u32 portid, hdr = nl802154hdr_put(msg, portid, seq, flags, cmd); if (!hdr) - return -1; + return -ENOBUFS; if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex)) goto nla_put_failure; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index e2f85a16fad9b02a4d6857fb3819489ab89a9801..742218594741a918924bbd223e5d6c7c6366baa5 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1374,8 +1374,11 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb, } ops = rcu_dereference(inet_offloads[proto]); - if (likely(ops && ops->callbacks.gso_segment)) + if (likely(ops && ops->callbacks.gso_segment)) { segs = ops->callbacks.gso_segment(skb, features); + if (!segs) + skb->network_header = skb_mac_header(skb) + nhoff - skb->head; + } if (IS_ERR_OR_NULL(segs)) goto out; diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index ab6a8f35d369ddd948992b83d1ebf9d88ba2b4cb..838a876c168caf5a39af46ce90afea11691fd035 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -251,7 +252,6 @@ void free_fib_info(struct fib_info *fi) pr_warn("Freeing alive fib_info %p\n", fi); return; } - fib_info_cnt--; call_rcu(&fi->rcu, free_fib_info_rcu); } @@ -262,6 +262,10 @@ void fib_release_info(struct fib_info *fi) spin_lock_bh(&fib_info_lock); if (fi && --fi->fib_treeref == 0) { hlist_del(&fi->fib_hash); + + /* Paired with READ_ONCE() in fib_create_info(). */ + WRITE_ONCE(fib_info_cnt, fib_info_cnt - 1); + if (fi->fib_prefsrc) hlist_del(&fi->fib_lhash); if (fi->nh) { @@ -318,11 +322,15 @@ static inline int nh_comp(struct fib_info *fi, struct fib_info *ofi) static inline unsigned int fib_devindex_hashfn(unsigned int val) { - unsigned int mask = DEVINDEX_HASHSIZE - 1; + return hash_32(val, DEVINDEX_HASHBITS); +} + +static struct hlist_head * +fib_info_devhash_bucket(const struct net_device *dev) +{ + u32 val = net_hash_mix(dev_net(dev)) ^ dev->ifindex; - return (val ^ - (val >> DEVINDEX_HASHBITS) ^ - (val >> (DEVINDEX_HASHBITS * 2))) & mask; + return &fib_info_devhash[fib_devindex_hashfn(val)]; } static unsigned int fib_info_hashfn_1(int init_val, u8 protocol, u8 scope, @@ -432,12 +440,11 @@ int ip_fib_check_default(__be32 gw, struct net_device *dev) { struct hlist_head *head; struct fib_nh *nh; - unsigned int hash; spin_lock(&fib_info_lock); - hash = fib_devindex_hashfn(dev->ifindex); - head = &fib_info_devhash[hash]; + head = fib_info_devhash_bucket(dev); + hlist_for_each_entry(nh, head, nh_hash) { if (nh->fib_nh_dev == dev && nh->fib_nh_gw4 == gw && @@ -1431,7 +1438,9 @@ struct fib_info *fib_create_info(struct fib_config *cfg, #endif err = -ENOBUFS; - if (fib_info_cnt >= fib_info_hash_size) { + + /* Paired with WRITE_ONCE() in fib_release_info() */ + if (READ_ONCE(fib_info_cnt) >= fib_info_hash_size) { unsigned int new_size = fib_info_hash_size << 1; struct hlist_head *new_info_hash; struct hlist_head *new_laddrhash; @@ -1463,7 +1472,6 @@ struct fib_info *fib_create_info(struct fib_config *cfg, return ERR_PTR(err); } - fib_info_cnt++; fi->fib_net = net; fi->fib_protocol = cfg->fc_protocol; fi->fib_scope = cfg->fc_scope; @@ -1590,6 +1598,7 @@ link_it: fi->fib_treeref++; refcount_set(&fi->fib_clntref, 1); spin_lock_bh(&fib_info_lock); + fib_info_cnt++; hlist_add_head(&fi->fib_hash, &fib_info_hash[fib_info_hashfn(fi)]); if (fi->fib_prefsrc) { @@ -1603,12 +1612,10 @@ link_it: } else { change_nexthops(fi) { struct hlist_head *head; - unsigned int hash; if (!nexthop_nh->fib_nh_dev) continue; - hash = fib_devindex_hashfn(nexthop_nh->fib_nh_dev->ifindex); - head = &fib_info_devhash[hash]; + head = fib_info_devhash_bucket(nexthop_nh->fib_nh_dev); hlist_add_head(&nexthop_nh->nh_hash, head); } endfor_nexthops(fi) } @@ -1958,8 +1965,7 @@ void fib_nhc_update_mtu(struct fib_nh_common *nhc, u32 new, u32 orig) void fib_sync_mtu(struct net_device *dev, u32 orig_mtu) { - unsigned int hash = fib_devindex_hashfn(dev->ifindex); - struct hlist_head *head = &fib_info_devhash[hash]; + struct hlist_head *head = fib_info_devhash_bucket(dev); struct fib_nh *nh; hlist_for_each_entry(nh, head, nh_hash) { @@ -1978,12 +1984,11 @@ void fib_sync_mtu(struct net_device *dev, u32 orig_mtu) */ int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force) { - int ret = 0; - int scope = RT_SCOPE_NOWHERE; + struct hlist_head *head = fib_info_devhash_bucket(dev); struct fib_info *prev_fi = NULL; - unsigned int hash = fib_devindex_hashfn(dev->ifindex); - struct hlist_head *head = &fib_info_devhash[hash]; + int scope = RT_SCOPE_NOWHERE; struct fib_nh *nh; + int ret = 0; if (force) scope = -1; @@ -2128,7 +2133,6 @@ out: int fib_sync_up(struct net_device *dev, unsigned char nh_flags) { struct fib_info *prev_fi; - unsigned int hash; struct hlist_head *head; struct fib_nh *nh; int ret; @@ -2144,8 +2148,7 @@ int fib_sync_up(struct net_device *dev, unsigned char nh_flags) } prev_fi = NULL; - hash = fib_devindex_hashfn(dev->ifindex); - head = &fib_info_devhash[hash]; + head = fib_info_devhash_bucket(dev); ret = 0; hlist_for_each_entry(nh, head, nh_hash) { diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index 10d31733297d7358b55b45d64bc447039365df1d..e0e8a65d561ec6553970af7ac8948db515f774dc 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c @@ -204,9 +204,9 @@ void inet_frag_kill(struct inet_frag_queue *fq) /* The RCU read lock provides a memory barrier * guaranteeing that if fqdir->dead is false then * the hash table destruction will not start until - * after we unlock. Paired with inet_frags_exit_net(). + * after we unlock. Paired with fqdir_pre_exit(). */ - if (!fqdir->dead) { + if (!READ_ONCE(fqdir->dead)) { rhashtable_remove_fast(&fqdir->rhashtable, &fq->node, fqdir->f->rhash_params); refcount_dec(&fq->refcnt); @@ -321,9 +321,11 @@ static struct inet_frag_queue *inet_frag_create(struct fqdir *fqdir, /* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */ struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key) { + /* This pairs with WRITE_ONCE() in fqdir_pre_exit(). */ + long high_thresh = READ_ONCE(fqdir->high_thresh); struct inet_frag_queue *fq = NULL, *prev; - if (!fqdir->high_thresh || frag_mem_limit(fqdir) > fqdir->high_thresh) + if (!high_thresh || frag_mem_limit(fqdir) > high_thresh) return NULL; rcu_read_lock(); diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index cfeb8890f94ee95b2246ba98beb338a33fc45d1d..fad803d2d711ef0d97f7150f0f710a35ac822946 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -144,7 +144,8 @@ static void ip_expire(struct timer_list *t) rcu_read_lock(); - if (qp->q.fqdir->dead) + /* Paired with WRITE_ONCE() in fqdir_pre_exit(). */ + if (READ_ONCE(qp->q.fqdir->dead)) goto out_rcu_unlock; spin_lock(&qp->q.lock); diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index a9cc05043fa47618e13480fe062aadc10692c646..e4504dd510c6d741ae450990184737cc18a58a52 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -599,8 +599,9 @@ static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) key = &info->key; ip_tunnel_init_flow(&fl4, IPPROTO_GRE, key->u.ipv4.dst, key->u.ipv4.src, - tunnel_id_to_key32(key->tun_id), key->tos, 0, - skb->mark, skb_get_hash(skb)); + tunnel_id_to_key32(key->tun_id), + key->tos & ~INET_ECN_MASK, 0, skb->mark, + skb_get_hash(skb)); rt = ip_route_output_key(dev_net(dev), &fl4); if (IS_ERR(rt)) return PTR_ERR(rt); diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 10d4cde31c6bf517938f404c7597188ca9cb4c6b..5e48b3d3a00db61f457956607397307da0fe3900 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -162,12 +162,19 @@ int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk, iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr); iph->saddr = saddr; iph->protocol = sk->sk_protocol; - if (ip_dont_fragment(sk, &rt->dst)) { + /* Do not bother generating IPID for small packets (eg SYNACK) */ + if (skb->len <= IPV4_MIN_MTU || ip_dont_fragment(sk, &rt->dst)) { iph->frag_off = htons(IP_DF); iph->id = 0; } else { iph->frag_off = 0; - __ip_select_ident(net, iph, 1); + /* TCP packets here are SYNACK with fat IPv4/TCP options. + * Avoid using the hashed IP ident generator. + */ + if (sk->sk_protocol == IPPROTO_TCP) + iph->id = (__force __be16)prandom_u32(); + else + __ip_select_ident(net, iph, 1); } if (opt && opt->opt.optlen) { @@ -614,18 +621,6 @@ void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph, } EXPORT_SYMBOL(ip_fraglist_init); -static void ip_fraglist_ipcb_prepare(struct sk_buff *skb, - struct ip_fraglist_iter *iter) -{ - struct sk_buff *to = iter->frag; - - /* Copy the flags to each fragment. */ - IPCB(to)->flags = IPCB(skb)->flags; - - if (iter->offset == 0) - ip_options_fragment(to); -} - void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter) { unsigned int hlen = iter->hlen; @@ -671,7 +666,7 @@ void ip_frag_init(struct sk_buff *skb, unsigned int hlen, EXPORT_SYMBOL(ip_frag_init); static void ip_frag_ipcb(struct sk_buff *from, struct sk_buff *to, - bool first_frag, struct ip_frag_state *state) + bool first_frag) { /* Copy the flags to each fragment. */ IPCB(to)->flags = IPCB(from)->flags; @@ -850,8 +845,20 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, /* Prepare header of the next frame, * before previous one went down. */ if (iter.frag) { - ip_fraglist_ipcb_prepare(skb, &iter); + bool first_frag = (iter.offset == 0); + + IPCB(iter.frag)->flags = IPCB(skb)->flags; ip_fraglist_prepare(skb, &iter); + if (first_frag && IPCB(skb)->opt.optlen) { + /* ipcb->opt is not populated for frags + * coming from __ip_make_skb(), + * ip_options_fragment() needs optlen + */ + IPCB(iter.frag)->opt.optlen = + IPCB(skb)->opt.optlen; + ip_options_fragment(iter.frag); + ip_send_check(iter.iph); + } } skb->tstamp = tstamp; @@ -905,7 +912,7 @@ slow_path: err = PTR_ERR(skb2); goto fail; } - ip_frag_ipcb(skb, skb2, first_frag, &state); + ip_frag_ipcb(skb, skb2, first_frag); /* * Put this fragment into the sending queue. diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 939792a3881461275b9c7fae5b3a5e0881a59584..be1976536f1c00e0b8e525207c79004628b6f8fc 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -261,7 +261,9 @@ static int __net_init ipmr_rules_init(struct net *net) return 0; err2: + rtnl_lock(); ipmr_free_table(mrt); + rtnl_unlock(); err1: fib_rules_unregister(ops); return err; diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index a8b980ad11d4e1ca225dea411bb8a4af7c995ca3..1088564d4dbcb2490f25008a7872e9c7a378222e 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -505,8 +505,11 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par) if (IS_ERR(config)) return PTR_ERR(config); } - } else if (memcmp(&config->clustermac, &cipinfo->clustermac, ETH_ALEN)) + } else if (memcmp(&config->clustermac, &cipinfo->clustermac, ETH_ALEN)) { + clusterip_config_entry_put(config); + clusterip_config_put(config); return -EINVAL; + } ret = nf_ct_netns_get(par->net, par->family); if (ret < 0) { diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 8ce8b7300b9d3eaa47a56096a43498518176598a..e60ca03543a536b2b9a32522d87b93f94ea5b02b 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -172,16 +172,22 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident) struct sock *sk = NULL; struct inet_sock *isk; struct hlist_nulls_node *hnode; - int dif = skb->dev->ifindex; + int dif, sdif; if (skb->protocol == htons(ETH_P_IP)) { + dif = inet_iif(skb); + sdif = inet_sdif(skb); pr_debug("try to find: num = %d, daddr = %pI4, dif = %d\n", (int)ident, &ip_hdr(skb)->daddr, dif); #if IS_ENABLED(CONFIG_IPV6) } else if (skb->protocol == htons(ETH_P_IPV6)) { + dif = inet6_iif(skb); + sdif = inet6_sdif(skb); pr_debug("try to find: num = %d, daddr = %pI6c, dif = %d\n", (int)ident, &ipv6_hdr(skb)->daddr, dif); #endif + } else { + return NULL; } read_lock_bh(&ping_table.lock); @@ -220,7 +226,8 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident) continue; } - if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) + if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif && + sk->sk_bound_dev_if != sdif) continue; sock_hold(sk); diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 7d26e0f8bdaeb5f334e77deb5966a976db3d3ff4..5d95f80314f95521eb645f55d7cda33fe894f848 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -721,6 +721,7 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) int ret = -EINVAL; int chk_addr_ret; + lock_sock(sk); if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in)) goto out; @@ -740,7 +741,9 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) inet->inet_saddr = 0; /* Use device */ sk_dst_reset(sk); ret = 0; -out: return ret; +out: + release_sock(sk); + return ret; } /* diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 991e3434957b846d4730cc52cb4800c83f170bd5..12dd08af12b5e74dcecd569fbd9a30c3390d8704 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1620,6 +1620,8 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, (mss != tcp_skb_seglen(skb))) goto out; + if (!tcp_skb_can_collapse(prev, skb)) + goto out; len = skb->len; pcount = tcp_skb_pcount(skb); if (tcp_skb_shift(prev, skb, pcount, len)) diff --git a/net/ipv4/udp_tunnel_nic.c b/net/ipv4/udp_tunnel_nic.c index b91003538d87a03855df9bd35b751017d37fe031..bc3a043a5d5c7635b7acabe76bb19f7ad59d8db9 100644 --- a/net/ipv4/udp_tunnel_nic.c +++ b/net/ipv4/udp_tunnel_nic.c @@ -846,7 +846,7 @@ udp_tunnel_nic_unregister(struct net_device *dev, struct udp_tunnel_nic *utn) list_for_each_entry(node, &info->shared->devices, list) if (node->dev == dev) break; - if (node->dev != dev) + if (list_entry_is_head(node, &info->shared->devices, list)) return; list_del(&node->list); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 29526937077b3471e640c510d5793b38e452e7f9..4dde49e628fab3c025e2f04b4ee31f67eff6ddb2 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -2577,7 +2577,7 @@ int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev, __u32 valid_lft, u32 prefered_lft) { struct inet6_ifaddr *ifp = ipv6_get_ifaddr(net, addr, dev, 1); - int create = 0; + int create = 0, update_lft = 0; if (!ifp && valid_lft) { int max_addresses = in6_dev->cnf.max_addresses; @@ -2621,19 +2621,32 @@ int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev, unsigned long now; u32 stored_lft; - /* Update lifetime (RFC4862 5.5.3 e) - * We deviate from RFC4862 by honoring all Valid Lifetimes to - * improve the reaction of SLAAC to renumbering events - * (draft-gont-6man-slaac-renum-06, Section 4.2) - */ + /* update lifetime (RFC2462 5.5.3 e) */ spin_lock_bh(&ifp->lock); now = jiffies; if (ifp->valid_lft > (now - ifp->tstamp) / HZ) stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ; else stored_lft = 0; - if (!create && stored_lft) { + const u32 minimum_lft = min_t(u32, + stored_lft, MIN_VALID_LIFETIME); + valid_lft = max(valid_lft, minimum_lft); + + /* RFC4862 Section 5.5.3e: + * "Note that the preferred lifetime of the + * corresponding address is always reset to + * the Preferred Lifetime in the received + * Prefix Information option, regardless of + * whether the valid lifetime is also reset or + * ignored." + * + * So we should always update prefered_lft here. + */ + update_lft = 1; + } + + if (update_lft) { ifp->valid_lft = valid_lft; ifp->prefered_lft = prefered_lft; ifp->tstamp = now; diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index e43f1fbac28b67ed3fe186adbde26d2653167864..c783b912313217bf3a8fc5838142c03f03c22017 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -110,7 +110,7 @@ void fib6_update_sernum(struct net *net, struct fib6_info *f6i) fn = rcu_dereference_protected(f6i->fib6_node, lockdep_is_held(&f6i->fib6_table->tb6_lock)); if (fn) - fn->fn_sernum = fib6_new_sernum(net); + WRITE_ONCE(fn->fn_sernum, fib6_new_sernum(net)); } /* @@ -587,12 +587,13 @@ static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb, spin_unlock_bh(&table->tb6_lock); if (res > 0) { cb->args[4] = 1; - cb->args[5] = w->root->fn_sernum; + cb->args[5] = READ_ONCE(w->root->fn_sernum); } } else { - if (cb->args[5] != w->root->fn_sernum) { + int sernum = READ_ONCE(w->root->fn_sernum); + if (cb->args[5] != sernum) { /* Begin at the root if the tree changed */ - cb->args[5] = w->root->fn_sernum; + cb->args[5] = sernum; w->state = FWS_INIT; w->node = w->root; w->skip = w->count; @@ -1342,7 +1343,7 @@ static void __fib6_update_sernum_upto_root(struct fib6_info *rt, /* paired with smp_rmb() in rt6_get_cookie_safe() */ smp_wmb(); while (fn) { - fn->fn_sernum = sernum; + WRITE_ONCE(fn->fn_sernum, sernum); fn = rcu_dereference_protected(fn->parent, lockdep_is_held(&rt->fib6_table->tb6_lock)); } @@ -2171,8 +2172,8 @@ static int fib6_clean_node(struct fib6_walker *w) }; if (c->sernum != FIB6_NO_SERNUM_CHANGE && - w->node->fn_sernum != c->sernum) - w->node->fn_sernum = c->sernum; + READ_ONCE(w->node->fn_sernum) != c->sernum) + WRITE_ONCE(w->node->fn_sernum, c->sernum); if (!c->func) { WARN_ON_ONCE(c->sernum == FIB6_NO_SERNUM_CHANGE); @@ -2536,7 +2537,7 @@ static void ipv6_route_seq_setup_walk(struct ipv6_route_iter *iter, iter->w.state = FWS_INIT; iter->w.node = iter->w.root; iter->w.args = iter; - iter->sernum = iter->w.root->fn_sernum; + iter->sernum = READ_ONCE(iter->w.root->fn_sernum); INIT_LIST_HEAD(&iter->w.lh); fib6_walker_link(net, &iter->w); } @@ -2564,8 +2565,10 @@ static struct fib6_table *ipv6_route_seq_next_table(struct fib6_table *tbl, static void ipv6_route_check_sernum(struct ipv6_route_iter *iter) { - if (iter->sernum != iter->w.root->fn_sernum) { - iter->sernum = iter->w.root->fn_sernum; + int sernum = READ_ONCE(iter->w.root->fn_sernum); + + if (iter->sernum != sernum) { + iter->sernum = sernum; iter->w.state = FWS_INIT; iter->w.node = iter->w.root; WARN_ON(iter->w.skip); diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index aa673a6a7e4328d03e89527ed3daaccd69c39084..ceb85c67ce3952b7142eeec29bff46a7eaf5217b 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -450,8 +450,10 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, err = -EINVAL; goto done; } - if (fl_shared_exclusive(fl) || fl->opt) + if (fl_shared_exclusive(fl) || fl->opt) { + WRITE_ONCE(sock_net(sk)->ipv6.flowlabel_has_excl, 1); static_branch_deferred_inc(&ipv6_flowlabel_exclusive); + } return fl; done: diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 09fa49bbf617db53eb04fd9d739276fc3a14c53d..9a0263f2523237058ec316d4cf007a9917460681 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -755,6 +755,7 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb, fl6->daddr = key->u.ipv6.dst; fl6->flowlabel = key->label; fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL); + fl6->fl6_gre_key = tunnel_id_to_key32(key->tun_id); dsfield = key->tos; flags = key->tun_flags & @@ -990,6 +991,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, fl6.daddr = key->u.ipv6.dst; fl6.flowlabel = key->label; fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL); + fl6.fl6_gre_key = tunnel_id_to_key32(key->tun_id); dsfield = key->tos; if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT)) @@ -1098,6 +1100,7 @@ static void ip6gre_tnl_link_config_common(struct ip6_tnl *t) fl6->flowi6_oif = p->link; fl6->flowlabel = 0; fl6->flowi6_proto = IPPROTO_GRE; + fl6->fl6_gre_key = t->parms.o_key; if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS)) fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo; @@ -1543,7 +1546,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev) static struct inet6_protocol ip6gre_protocol __read_mostly = { .handler = gre_rcv, .err_handler = ip6gre_err, - .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, + .flags = INET6_PROTO_FINAL, }; static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head) diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index a80f90bf3ae7dc1aec904fd93b3d8e8c87a926e4..15c8eef1ef443854e207bfaa238cb7aa321fedb9 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -113,6 +113,8 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, if (likely(ops && ops->callbacks.gso_segment)) { skb_reset_transport_header(skb); segs = ops->callbacks.gso_segment(skb, features); + if (!segs) + skb->network_header = skb_mac_header(skb) + nhoff - skb->head; } if (IS_ERR_OR_NULL(segs)) diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 08441f06afd482066fa1381f8f0d1fb436a1d73d..3a2741569b84764797021a975345db6a687e1fc7 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1066,14 +1066,14 @@ int ip6_tnl_xmit_ctl(struct ip6_tnl *t, if (unlikely(!ipv6_chk_addr_and_flags(net, laddr, ldev, false, 0, IFA_F_TENTATIVE))) - pr_warn("%s xmit: Local address not yet configured!\n", - p->name); + pr_warn_ratelimited("%s xmit: Local address not yet configured!\n", + p->name); else if (!(p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) && !ipv6_addr_is_multicast(raddr) && unlikely(ipv6_chk_addr_and_flags(net, raddr, ldev, true, 0, IFA_F_TENTATIVE))) - pr_warn("%s xmit: Routing loop! Remote address found on this node!\n", - p->name); + pr_warn_ratelimited("%s xmit: Routing loop! Remote address found on this node!\n", + p->name); else ret = 1; rcu_read_unlock(); diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 06b0d2c329b94b25f2945b220347c5d9defdc72e..41cb348a7c3c4904ba4111d1c0e48044c7963ee2 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -248,7 +248,9 @@ static int __net_init ip6mr_rules_init(struct net *net) return 0; err2: + rtnl_lock(); ip6mr_free_table(mrt); + rtnl_unlock(); err1: fib_rules_unregister(ops); return err; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 654bf4ca61260ba6650ac714e0a1d729571556d6..352e645c546ebfd036398730c677670c324500bb 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2674,7 +2674,7 @@ static void ip6_link_failure(struct sk_buff *skb) if (from) { fn = rcu_dereference(from->fib6_node); if (fn && (rt->rt6i_flags & RTF_DEFAULT)) - fn->fn_sernum = -1; + WRITE_ONCE(fn->fn_sernum, -1); } } rcu_read_unlock(); diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 6a24431b9009502c54b51e5a239435fcdae37883..d27c444a19ed1d2335a249e1966af4d13f30a8ca 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -4800,7 +4800,7 @@ void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, goto drop; break; case RX_ENC_VHT: - if (WARN_ONCE(status->rate_idx > 9 || + if (WARN_ONCE(status->rate_idx > 11 || !status->nss || status->nss > 8, "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n", diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index f4cf26b606f92ba9fc01ce8b72660707c326188c..8369af0c50eab3a1773de0d385d2d72b1aa72302 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1832,15 +1832,17 @@ repeat: pr_debug("nf_conntrack_in: Can't track with proto module\n"); nf_conntrack_put(&ct->ct_general); skb->_nfct = 0; - NF_CT_STAT_INC_ATOMIC(state->net, invalid); - if (ret == -NF_DROP) - NF_CT_STAT_INC_ATOMIC(state->net, drop); /* Special case: TCP tracker reports an attempt to reopen a * closed/aborted connection. We have to go back and create a * fresh conntrack. */ if (ret == -NF_REPEAT) goto repeat; + + NF_CT_STAT_INC_ATOMIC(state->net, invalid); + if (ret == -NF_DROP) + NF_CT_STAT_INC_ATOMIC(state->net, drop); + ret = -ret; goto out; } diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index c6bcc28ae33876601f735237e8c515de490ebb0c..eeeaa34b3e7b5d5cecaf24554fa8e51641a89abe 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -2283,7 +2283,8 @@ ctnetlink_create_conntrack(struct net *net, if (helper->from_nlattr) helper->from_nlattr(helpinfo, ct); - /* not in hash table yet so not strictly necessary */ + /* disable helper auto-assignment for this entry */ + ct->status |= IPS_HELPER; RCU_INIT_POINTER(help->helper, helper); } } else { diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 810cca24b399019862f185a65d064aa56aa8a21c..7626f3e1c70a774968676d1111ee6376284afc17 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -489,6 +489,15 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct, pr_debug("Setting vtag %x for dir %d\n", ih->init_tag, !dir); ct->proto.sctp.vtag[!dir] = ih->init_tag; + + /* don't renew timeout on init retransmit so + * port reuse by client or NAT middlebox cannot + * keep entry alive indefinitely (incl. nat info). + */ + if (new_state == SCTP_CONNTRACK_CLOSED && + old_state == SCTP_CONNTRACK_CLOSED && + nf_ct_is_confirmed(ct)) + ignore = true; } ct->proto.sctp.state = new_state; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index b781ba97c474e3216e85bf95c4505358297ec1d3..fdd1da9ecea9e1e4757d52619ab55c27848df8a6 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -5924,12 +5924,15 @@ static int nf_tables_updobj(const struct nft_ctx *ctx, { struct nft_object *newobj; struct nft_trans *trans; - int err; + int err = -ENOMEM; + + if (!try_module_get(type->owner)) + return -ENOENT; trans = nft_trans_alloc(ctx, NFT_MSG_NEWOBJ, sizeof(struct nft_trans_obj)); if (!trans) - return -ENOMEM; + goto err_trans; newobj = nft_obj_init(ctx, type, attr); if (IS_ERR(newobj)) { @@ -5946,6 +5949,8 @@ static int nf_tables_updobj(const struct nft_ctx *ctx, err_free_trans: kfree(trans); +err_trans: + module_put(type->owner); return err; } @@ -7555,7 +7560,7 @@ static void nft_obj_commit_update(struct nft_trans *trans) if (obj->ops->update) obj->ops->update(obj, newobj); - kfree(newobj); + nft_obj_destroy(&trans->ctx, newobj); } static void nft_commit_release(struct nft_trans *trans) @@ -8202,7 +8207,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) break; case NFT_MSG_NEWOBJ: if (nft_trans_obj_update(trans)) { - kfree(nft_trans_obj_newobj(trans)); + nft_obj_destroy(&trans->ctx, nft_trans_obj_newobj(trans)); nft_trans_destroy(trans); } else { trans->ctx.table->use--; diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c index e5fcbb0e4b8e5041bfbf0cc08efbabdcb4c81b63..839fd09f1bb4a7e43eb5539b952032be6eaccc84 100644 --- a/net/netfilter/nf_tables_offload.c +++ b/net/netfilter/nf_tables_offload.c @@ -94,7 +94,8 @@ struct nft_flow_rule *nft_flow_rule_create(struct net *net, expr = nft_expr_first(rule); while (nft_expr_more(rule, expr)) { - if (expr->ops->offload_flags & NFT_OFFLOAD_F_ACTION) + if (expr->ops->offload_action && + expr->ops->offload_action(expr)) num_actions++; expr = nft_expr_next(expr); diff --git a/net/netfilter/nft_dup_netdev.c b/net/netfilter/nft_dup_netdev.c index 40788b3f1071a7e82f1dddff270d808e8edf40cb..70c457476b87407cb50fadeff7f3e45835c68daa 100644 --- a/net/netfilter/nft_dup_netdev.c +++ b/net/netfilter/nft_dup_netdev.c @@ -67,6 +67,11 @@ static int nft_dup_netdev_offload(struct nft_offload_ctx *ctx, return nft_fwd_dup_netdev_offload(ctx, flow, FLOW_ACTION_MIRRED, oif); } +static bool nft_dup_netdev_offload_action(const struct nft_expr *expr) +{ + return true; +} + static struct nft_expr_type nft_dup_netdev_type; static const struct nft_expr_ops nft_dup_netdev_ops = { .type = &nft_dup_netdev_type, @@ -75,6 +80,7 @@ static const struct nft_expr_ops nft_dup_netdev_ops = { .init = nft_dup_netdev_init, .dump = nft_dup_netdev_dump, .offload = nft_dup_netdev_offload, + .offload_action = nft_dup_netdev_offload_action, }; static struct nft_expr_type nft_dup_netdev_type __read_mostly = { diff --git a/net/netfilter/nft_fwd_netdev.c b/net/netfilter/nft_fwd_netdev.c index b77985986b24ea6fd2f530bcc3e4befde45e3b99..3b0dcd170551be91d4e45a412d13de6de585d19e 100644 --- a/net/netfilter/nft_fwd_netdev.c +++ b/net/netfilter/nft_fwd_netdev.c @@ -77,6 +77,11 @@ static int nft_fwd_netdev_offload(struct nft_offload_ctx *ctx, return nft_fwd_dup_netdev_offload(ctx, flow, FLOW_ACTION_REDIRECT, oif); } +static bool nft_fwd_netdev_offload_action(const struct nft_expr *expr) +{ + return true; +} + struct nft_fwd_neigh { enum nft_registers sreg_dev:8; enum nft_registers sreg_addr:8; @@ -219,6 +224,7 @@ static const struct nft_expr_ops nft_fwd_netdev_ops = { .dump = nft_fwd_netdev_dump, .validate = nft_fwd_validate, .offload = nft_fwd_netdev_offload, + .offload_action = nft_fwd_netdev_offload_action, }; static const struct nft_expr_ops * diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c index c63eb3b171784bcf82ea32ed676a657bd9177472..5c9d88560a474de749c392542755520ad2f002f2 100644 --- a/net/netfilter/nft_immediate.c +++ b/net/netfilter/nft_immediate.c @@ -213,6 +213,16 @@ static int nft_immediate_offload(struct nft_offload_ctx *ctx, return 0; } +static bool nft_immediate_offload_action(const struct nft_expr *expr) +{ + const struct nft_immediate_expr *priv = nft_expr_priv(expr); + + if (priv->dreg == NFT_REG_VERDICT) + return true; + + return false; +} + static const struct nft_expr_ops nft_imm_ops = { .type = &nft_imm_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_immediate_expr)), @@ -224,7 +234,7 @@ static const struct nft_expr_ops nft_imm_ops = { .dump = nft_immediate_dump, .validate = nft_immediate_validate, .offload = nft_immediate_offload, - .offload_flags = NFT_OFFLOAD_F_ACTION, + .offload_action = nft_immediate_offload_action, }; struct nft_expr_type nft_imm_type __read_mostly = { diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c index 1ebee25de67721934531095848d358c9895f0620..6a8495bd08bb2585af8cf0ff7defbe3cfd6cceae 100644 --- a/net/netfilter/nft_payload.c +++ b/net/netfilter/nft_payload.c @@ -502,6 +502,9 @@ static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt, struct sk_buff *skb, unsigned int *l4csum_offset) { + if (pkt->xt.fragoff) + return -1; + switch (pkt->tprot) { case IPPROTO_TCP: *l4csum_offset = offsetof(struct tcphdr, check); diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c index 2d73f265b12c9bbbe2b2b12b117333729ae073a2..f67c4436c5d31f38fd59310a744437db7ff981f3 100644 --- a/net/netfilter/nft_set_pipapo.c +++ b/net/netfilter/nft_set_pipapo.c @@ -1290,6 +1290,11 @@ static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old) if (!new->scratch_aligned) goto out_scratch; #endif + for_each_possible_cpu(i) + *per_cpu_ptr(new->scratch, i) = NULL; + + if (pipapo_realloc_scratch(new, old->bsize_max)) + goto out_scratch_realloc; rcu_head_init(&new->rcu); @@ -1334,6 +1339,9 @@ out_lt: kvfree(dst->lt); dst--; } +out_scratch_realloc: + for_each_possible_cpu(i) + kfree(*per_cpu_ptr(new->scratch, i)); #ifdef NFT_PIPAPO_ALIGN free_percpu(new->scratch_aligned); #endif diff --git a/net/netfilter/nft_synproxy.c b/net/netfilter/nft_synproxy.c index 4fda8b3f176265b445e075fa51ddd0eb14c5ebe6..59c4dfaf2ea1fc97cade542fae9901fa89742217 100644 --- a/net/netfilter/nft_synproxy.c +++ b/net/netfilter/nft_synproxy.c @@ -191,8 +191,10 @@ static int nft_synproxy_do_init(const struct nft_ctx *ctx, if (err) goto nf_ct_failure; err = nf_synproxy_ipv6_init(snet, ctx->net); - if (err) + if (err) { + nf_synproxy_ipv4_fini(snet, ctx->net); goto nf_ct_failure; + } break; } diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index eef0e3f2f25b0972debee403b424891094ef4dea..e5c8a295e64066971869b83ece1daee60a7f2ddc 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -298,7 +298,7 @@ static int nr_setsockopt(struct socket *sock, int level, int optname, { struct sock *sk = sock->sk; struct nr_sock *nr = nr_sk(sk); - unsigned long opt; + unsigned int opt; if (level != SOL_NETROM) return -ENOPROTOOPT; @@ -306,18 +306,18 @@ static int nr_setsockopt(struct socket *sock, int level, int optname, if (optlen < sizeof(unsigned int)) return -EINVAL; - if (copy_from_sockptr(&opt, optval, sizeof(unsigned long))) + if (copy_from_sockptr(&opt, optval, sizeof(opt))) return -EFAULT; switch (optname) { case NETROM_T1: - if (opt < 1 || opt > ULONG_MAX / HZ) + if (opt < 1 || opt > UINT_MAX / HZ) return -EINVAL; nr->t1 = opt * HZ; return 0; case NETROM_T2: - if (opt < 1 || opt > ULONG_MAX / HZ) + if (opt < 1 || opt > UINT_MAX / HZ) return -EINVAL; nr->t2 = opt * HZ; return 0; @@ -329,13 +329,13 @@ static int nr_setsockopt(struct socket *sock, int level, int optname, return 0; case NETROM_T4: - if (opt < 1 || opt > ULONG_MAX / HZ) + if (opt < 1 || opt > UINT_MAX / HZ) return -EINVAL; nr->t4 = opt * HZ; return 0; case NETROM_IDLE: - if (opt > ULONG_MAX / (60 * HZ)) + if (opt > UINT_MAX / (60 * HZ)) return -EINVAL; nr->idle = opt * 60 * HZ; return 0; diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c index 6cfd30fc07985e69fb68e84b71f3c9fa64aa62b2..0b93a17b9f11f53395c1a2e6f1ebddb3720bb180 100644 --- a/net/nfc/llcp_sock.c +++ b/net/nfc/llcp_sock.c @@ -789,6 +789,11 @@ static int llcp_sock_sendmsg(struct socket *sock, struct msghdr *msg, lock_sock(sk); + if (!llcp_sock->local) { + release_sock(sk); + return -ENODEV; + } + if (sk->sk_type == SOCK_DGRAM) { DECLARE_SOCKADDR(struct sockaddr_nfc_llcp *, addr, msg->msg_name); diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index fc487f9812fc554003b8d6c8c3b36a7ea3d760df..525c1540f10e6484344135d1707ea7100c4fc91d 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -422,12 +422,43 @@ static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto, memcpy(addr, new_addr, sizeof(__be32[4])); } -static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask) +static void set_ipv6_dsfield(struct sk_buff *skb, struct ipv6hdr *nh, u8 ipv6_tclass, u8 mask) { + u8 old_ipv6_tclass = ipv6_get_dsfield(nh); + + ipv6_tclass = OVS_MASKED(old_ipv6_tclass, ipv6_tclass, mask); + + if (skb->ip_summed == CHECKSUM_COMPLETE) + csum_replace(&skb->csum, (__force __wsum)(old_ipv6_tclass << 12), + (__force __wsum)(ipv6_tclass << 12)); + + ipv6_change_dsfield(nh, ~mask, ipv6_tclass); +} + +static void set_ipv6_fl(struct sk_buff *skb, struct ipv6hdr *nh, u32 fl, u32 mask) +{ + u32 ofl; + + ofl = nh->flow_lbl[0] << 16 | nh->flow_lbl[1] << 8 | nh->flow_lbl[2]; + fl = OVS_MASKED(ofl, fl, mask); + /* Bits 21-24 are always unmasked, so this retains their values. */ - OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16)); - OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8)); - OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask); + nh->flow_lbl[0] = (u8)(fl >> 16); + nh->flow_lbl[1] = (u8)(fl >> 8); + nh->flow_lbl[2] = (u8)fl; + + if (skb->ip_summed == CHECKSUM_COMPLETE) + csum_replace(&skb->csum, (__force __wsum)htonl(ofl), (__force __wsum)htonl(fl)); +} + +static void set_ipv6_ttl(struct sk_buff *skb, struct ipv6hdr *nh, u8 new_ttl, u8 mask) +{ + new_ttl = OVS_MASKED(nh->hop_limit, new_ttl, mask); + + if (skb->ip_summed == CHECKSUM_COMPLETE) + csum_replace(&skb->csum, (__force __wsum)(nh->hop_limit << 8), + (__force __wsum)(new_ttl << 8)); + nh->hop_limit = new_ttl; } static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl, @@ -545,18 +576,17 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key, } } if (mask->ipv6_tclass) { - ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass); + set_ipv6_dsfield(skb, nh, key->ipv6_tclass, mask->ipv6_tclass); flow_key->ip.tos = ipv6_get_dsfield(nh); } if (mask->ipv6_label) { - set_ipv6_fl(nh, ntohl(key->ipv6_label), + set_ipv6_fl(skb, nh, ntohl(key->ipv6_label), ntohl(mask->ipv6_label)); flow_key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL); } if (mask->ipv6_hlimit) { - OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit, - mask->ipv6_hlimit); + set_ipv6_ttl(skb, nh, key->ipv6_hlimit, mask->ipv6_hlimit); flow_key->ip.ttl = nh->hop_limit; } return 0; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index f78097aa403a8bea058145a29829392b7015661e..a31334b92be7e975330420f34298731efd8e9872 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1735,6 +1735,7 @@ static int fanout_add(struct sock *sk, struct fanout_args *args) match->prot_hook.dev = po->prot_hook.dev; match->prot_hook.func = packet_rcv_fanout; match->prot_hook.af_packet_priv = match; + match->prot_hook.af_packet_net = read_pnet(&match->net); match->prot_hook.id_match = match_fanout_group; match->max_num_members = args->max_num_members; list_add(&match->list, &fanout_list); @@ -1749,7 +1750,10 @@ static int fanout_add(struct sock *sk, struct fanout_args *args) err = -ENOSPC; if (refcount_read(&match->sk_ref) < match->max_num_members) { __dev_remove_pack(&po->prot_hook); - po->fanout = match; + + /* Paired with packet_setsockopt(PACKET_FANOUT_DATA) */ + WRITE_ONCE(po->fanout, match); + po->rollover = rollover; rollover = NULL; refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1); @@ -3323,6 +3327,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol, po->prot_hook.func = packet_rcv_spkt; po->prot_hook.af_packet_priv = sk; + po->prot_hook.af_packet_net = sock_net(sk); if (proto) { po->prot_hook.type = proto; @@ -3904,7 +3909,8 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, } case PACKET_FANOUT_DATA: { - if (!po->fanout) + /* Paired with the WRITE_ONCE() in fanout_add() */ + if (!READ_ONCE(po->fanout)) return -EINVAL; return fanout_set_data(po, optval, optlen); diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c index 6be2672a65eabebd5b8fc28d82ed9b3c58542005..df864e6922679140d5b7c8c3aadd65a92906e9e3 100644 --- a/net/rxrpc/call_event.c +++ b/net/rxrpc/call_event.c @@ -157,7 +157,7 @@ static void rxrpc_congestion_timeout(struct rxrpc_call *call) static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) { struct sk_buff *skb; - unsigned long resend_at, rto_j; + unsigned long resend_at; rxrpc_seq_t cursor, seq, top; ktime_t now, max_age, oldest, ack_ts; int ix; @@ -165,10 +165,8 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) _enter("{%d,%d}", call->tx_hard_ack, call->tx_top); - rto_j = call->peer->rto_j; - now = ktime_get_real(); - max_age = ktime_sub(now, jiffies_to_usecs(rto_j)); + max_age = ktime_sub(now, jiffies_to_usecs(call->peer->rto_j)); spin_lock_bh(&call->lock); @@ -213,7 +211,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) } resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(now, oldest))); - resend_at += jiffies + rto_j; + resend_at += jiffies + rxrpc_get_rto_backoff(call->peer, retrans); WRITE_ONCE(call->resend_at, resend_at); if (unacked) diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c index 10f2bf2e9068abc15ef4ece90e7d6652194b4f77..a45c83f22236e2648c2fa4f97ebebb088361a071 100644 --- a/net/rxrpc/output.c +++ b/net/rxrpc/output.c @@ -468,7 +468,7 @@ done: if (call->peer->rtt_count > 1) { unsigned long nowj = jiffies, ack_lost_at; - ack_lost_at = rxrpc_get_rto_backoff(call->peer, retrans); + ack_lost_at = rxrpc_get_rto_backoff(call->peer, false); ack_lost_at += nowj; WRITE_ONCE(call->ack_lost_at, ack_lost_at); rxrpc_reduce_call_timer(call, ack_lost_at, nowj, diff --git a/net/sched/act_api.c b/net/sched/act_api.c index f613299ca7f0a668cc2c7579a4f0515e16af1d78..7b29aa1a3ce9a20ba165fb1030628d83c064a3bb 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -680,15 +680,24 @@ int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions, restart_act_graph: for (i = 0; i < nr_actions; i++) { const struct tc_action *a = actions[i]; + int repeat_ttl; if (jmp_prgcnt > 0) { jmp_prgcnt -= 1; continue; } + + repeat_ttl = 32; repeat: ret = a->ops->act(skb, a, res); - if (ret == TC_ACT_REPEAT) - goto repeat; /* we need a ttl - JHS */ + + if (unlikely(ret == TC_ACT_REPEAT)) { + if (--repeat_ttl != 0) + goto repeat; + /* suspicious opcode, stop pipeline */ + net_warn_ratelimited("TC_ACT_REPEAT abuse ?\n"); + return TC_ACT_OK; + } if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) { jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK; diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index 812c3c70a53a04029ccf11b78d0c59d877fb2180..825b3e9b55f7e23900e55af586b45e826e1f5fbb 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -514,11 +514,6 @@ static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p, struct nf_conn *ct; u8 dir; - /* Previously seen or loopback */ - ct = nf_ct_get(skb, &ctinfo); - if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED) - return false; - switch (family) { case NFPROTO_IPV4: if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph)) diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index cb1331b3574512932890bef65df1e1deacf5f88d..9a789a057a741ba9ffe58dbad5104284406347de 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -1045,7 +1045,7 @@ static int __tcf_qdisc_find(struct net *net, struct Qdisc **q, /* Find qdisc */ if (!*parent) { - *q = dev->qdisc; + *q = rcu_dereference(dev->qdisc); *parent = (*q)->handle; } else { *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent)); @@ -1954,9 +1954,9 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n, bool prio_allocate; u32 parent; u32 chain_index; - struct Qdisc *q = NULL; + struct Qdisc *q; struct tcf_chain_info chain_info; - struct tcf_chain *chain = NULL; + struct tcf_chain *chain; struct tcf_block *block; struct tcf_proto *tp; unsigned long cl; @@ -1984,6 +1984,8 @@ replay: tp = NULL; cl = 0; block = NULL; + q = NULL; + chain = NULL; if (prio == 0) { /* If no priority is provided by the user, @@ -2589,7 +2591,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) parent = tcm->tcm_parent; if (!parent) - q = dev->qdisc; + q = rtnl_dereference(dev->qdisc); else q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); if (!q) @@ -2804,8 +2806,8 @@ static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n, struct tcmsg *t; u32 parent; u32 chain_index; - struct Qdisc *q = NULL; - struct tcf_chain *chain = NULL; + struct Qdisc *q; + struct tcf_chain *chain; struct tcf_block *block; unsigned long cl; int err; @@ -2815,6 +2817,7 @@ static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n, return -EPERM; replay: + q = NULL; err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack); if (err < 0) @@ -2974,7 +2977,7 @@ static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb) parent = tcm->tcm_parent; if (!parent) { - q = dev->qdisc; + q = rtnl_dereference(dev->qdisc); parent = q->handle; } else { q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 7b24582a8a1643044a98e51badec7e14b529ba42..6e18aa41778285e7fa97e606f46cfc67e0f03236 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -301,7 +301,7 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) if (!handle) return NULL; - q = qdisc_match_from_root(dev->qdisc, handle); + q = qdisc_match_from_root(rtnl_dereference(dev->qdisc), handle); if (q) goto out; @@ -320,7 +320,7 @@ struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle) if (!handle) return NULL; - q = qdisc_match_from_root(dev->qdisc, handle); + q = qdisc_match_from_root(rcu_dereference(dev->qdisc), handle); if (q) goto out; @@ -1082,10 +1082,10 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, skip: if (!ingress) { notify_and_destroy(net, skb, n, classid, - dev->qdisc, new); + rtnl_dereference(dev->qdisc), new); if (new && !new->ops->attach) qdisc_refcount_inc(new); - dev->qdisc = new ? : &noop_qdisc; + rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc); if (new && new->ops->attach) new->ops->attach(new); @@ -1204,7 +1204,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev, err = -ENOENT; if (!ops) { - NL_SET_ERR_MSG(extack, "Specified qdisc not found"); + NL_SET_ERR_MSG(extack, "Specified qdisc kind is unknown"); goto err_out; } @@ -1460,7 +1460,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, q = dev_ingress_queue(dev)->qdisc_sleeping; } } else { - q = dev->qdisc; + q = rtnl_dereference(dev->qdisc); } if (!q) { NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device"); @@ -1549,7 +1549,7 @@ replay: q = dev_ingress_queue(dev)->qdisc_sleeping; } } else { - q = dev->qdisc; + q = rtnl_dereference(dev->qdisc); } /* It may be default qdisc, ignore it */ @@ -1771,7 +1771,8 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) s_q_idx = 0; q_idx = 0; - if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx, + if (tc_dump_qdisc_root(rtnl_dereference(dev->qdisc), + skb, cb, &q_idx, s_q_idx, true, tca[TCA_DUMP_INVISIBLE]) < 0) goto done; @@ -2047,7 +2048,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, } else if (qid1) { qid = qid1; } else if (qid == 0) - qid = dev->qdisc->handle; + qid = rtnl_dereference(dev->qdisc)->handle; /* Now qid is genuine qdisc handle consistent * both with parent and child. @@ -2058,7 +2059,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, portid = TC_H_MAKE(qid, portid); } else { if (qid == 0) - qid = dev->qdisc->handle; + qid = rtnl_dereference(dev->qdisc)->handle; } /* OK. Locate qdisc */ @@ -2219,7 +2220,8 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) s_t = cb->args[0]; t = 0; - if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t, true) < 0) + if (tc_dump_tclass_root(rtnl_dereference(dev->qdisc), + skb, tcm, cb, &t, s_t, true) < 0) goto done; dev_queue = dev_ingress_queue(dev); diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 6a9c1a39874a0f239797a65941d5591d509c675d..5d5391adb667cb508aaf3d7eec0a5efb990c52e9 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -1088,30 +1088,33 @@ static void attach_default_qdiscs(struct net_device *dev) if (!netif_is_multiqueue(dev) || dev->priv_flags & IFF_NO_QUEUE) { netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); - dev->qdisc = txq->qdisc_sleeping; - qdisc_refcount_inc(dev->qdisc); + qdisc = txq->qdisc_sleeping; + rcu_assign_pointer(dev->qdisc, qdisc); + qdisc_refcount_inc(qdisc); } else { qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL); if (qdisc) { - dev->qdisc = qdisc; + rcu_assign_pointer(dev->qdisc, qdisc); qdisc->ops->attach(qdisc); } } + qdisc = rtnl_dereference(dev->qdisc); /* Detect default qdisc setup/init failed and fallback to "noqueue" */ - if (dev->qdisc == &noop_qdisc) { + if (qdisc == &noop_qdisc) { netdev_warn(dev, "default qdisc (%s) fail, fallback to %s\n", default_qdisc_ops->id, noqueue_qdisc_ops.id); dev->priv_flags |= IFF_NO_QUEUE; netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); - dev->qdisc = txq->qdisc_sleeping; - qdisc_refcount_inc(dev->qdisc); + qdisc = txq->qdisc_sleeping; + rcu_assign_pointer(dev->qdisc, qdisc); + qdisc_refcount_inc(qdisc); dev->priv_flags ^= IFF_NO_QUEUE; } #ifdef CONFIG_NET_SCHED - if (dev->qdisc != &noop_qdisc) - qdisc_hash_add(dev->qdisc, false); + if (qdisc != &noop_qdisc) + qdisc_hash_add(qdisc, false); #endif } @@ -1141,7 +1144,7 @@ void dev_activate(struct net_device *dev) * and noqueue_qdisc for virtual interfaces */ - if (dev->qdisc == &noop_qdisc) + if (rtnl_dereference(dev->qdisc) == &noop_qdisc) attach_default_qdiscs(dev); if (!netif_carrier_ok(dev)) @@ -1306,7 +1309,7 @@ static int qdisc_change_tx_queue_len(struct net_device *dev, void dev_qdisc_change_real_num_tx(struct net_device *dev, unsigned int new_real_tx) { - struct Qdisc *qdisc = dev->qdisc; + struct Qdisc *qdisc = rtnl_dereference(dev->qdisc); if (qdisc->ops->change_real_num_tx) qdisc->ops->change_real_num_tx(qdisc, new_real_tx); @@ -1346,7 +1349,7 @@ static void dev_init_scheduler_queue(struct net_device *dev, void dev_init_scheduler(struct net_device *dev) { - dev->qdisc = &noop_qdisc; + rcu_assign_pointer(dev->qdisc, &noop_qdisc); netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); if (dev_ingress_queue(dev)) dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); @@ -1374,8 +1377,8 @@ void dev_shutdown(struct net_device *dev) netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); if (dev_ingress_queue(dev)) shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); - qdisc_put(dev->qdisc); - dev->qdisc = &noop_qdisc; + qdisc_put(rtnl_dereference(dev->qdisc)); + rcu_assign_pointer(dev->qdisc, &noop_qdisc); WARN_ON(timer_pending(&dev->watchdog_timer)); } @@ -1386,6 +1389,7 @@ void psched_ratecfg_precompute(struct psched_ratecfg *r, { memset(r, 0, sizeof(*r)); r->overhead = conf->overhead; + r->mpu = conf->mpu; r->rate_bytes_ps = max_t(u64, conf->rate, rate64); r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK); r->mult = 1; diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 2a22dc85951eee320457b3cfb84f3f247799c754..4eb9ef9c28003e718ef5572da6af23137fb2a50d 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -1002,16 +1002,11 @@ void smc_smcd_terminate_all(struct smcd_dev *smcd) /* Called when an SMCR device is removed or the smc module is unloaded. * If smcibdev is given, all SMCR link groups using this device are terminated. * If smcibdev is NULL, all SMCR link groups are terminated. - * - * We must wait here for QPs been destroyed before we destroy the CQs, - * or we won't received any CQEs and cdc_pend_tx_wr cannot reach 0 thus - * smc_sock cannot be released. */ void smc_smcr_terminate_all(struct smc_ib_device *smcibdev) { struct smc_link_group *lgr, *lg; LIST_HEAD(lgr_free_list); - LIST_HEAD(lgr_linkdown_list); int i; spin_lock_bh(&smc_lgr_list.lock); @@ -1023,7 +1018,7 @@ void smc_smcr_terminate_all(struct smc_ib_device *smcibdev) list_for_each_entry_safe(lgr, lg, &smc_lgr_list.list, list) { for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { if (lgr->lnk[i].smcibdev == smcibdev) - list_move_tail(&lgr->list, &lgr_linkdown_list); + smcr_link_down_cond_sched(&lgr->lnk[i]); } } } @@ -1035,16 +1030,6 @@ void smc_smcr_terminate_all(struct smc_ib_device *smcibdev) __smc_lgr_terminate(lgr, false); } - list_for_each_entry_safe(lgr, lg, &lgr_linkdown_list, list) { - for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { - if (lgr->lnk[i].smcibdev == smcibdev) { - mutex_lock(&lgr->llc_conf_mutex); - smcr_link_down_cond(&lgr->lnk[i]); - mutex_unlock(&lgr->llc_conf_mutex); - } - } - } - if (smcibdev) { if (atomic_read(&smcibdev->lnk_cnt)) wait_event(smcibdev->lnks_deleted, diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c index f3c18b991d35c42442b332fbe280e976a8fd6517..9007c7e3bae4e89470ea7fd72061a5f777910f60 100644 --- a/net/smc/smc_pnet.c +++ b/net/smc/smc_pnet.c @@ -112,7 +112,7 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name) pnettable = &sn->pnettable; /* remove table entry */ - write_lock(&pnettable->lock); + mutex_lock(&pnettable->lock); list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) { if (!pnet_name || @@ -130,7 +130,7 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name) rc = 0; } } - write_unlock(&pnettable->lock); + mutex_unlock(&pnettable->lock); /* if this is not the initial namespace, stop here */ if (net != &init_net) @@ -191,7 +191,7 @@ static int smc_pnet_add_by_ndev(struct net_device *ndev) sn = net_generic(net, smc_net_id); pnettable = &sn->pnettable; - write_lock(&pnettable->lock); + mutex_lock(&pnettable->lock); list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) { if (pnetelem->type == SMC_PNET_ETH && !pnetelem->ndev && !strncmp(pnetelem->eth_name, ndev->name, IFNAMSIZ)) { @@ -205,7 +205,7 @@ static int smc_pnet_add_by_ndev(struct net_device *ndev) break; } } - write_unlock(&pnettable->lock); + mutex_unlock(&pnettable->lock); return rc; } @@ -223,7 +223,7 @@ static int smc_pnet_remove_by_ndev(struct net_device *ndev) sn = net_generic(net, smc_net_id); pnettable = &sn->pnettable; - write_lock(&pnettable->lock); + mutex_lock(&pnettable->lock); list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) { if (pnetelem->type == SMC_PNET_ETH && pnetelem->ndev == ndev) { dev_put(pnetelem->ndev); @@ -236,7 +236,7 @@ static int smc_pnet_remove_by_ndev(struct net_device *ndev) break; } } - write_unlock(&pnettable->lock); + mutex_unlock(&pnettable->lock); return rc; } @@ -371,7 +371,7 @@ static int smc_pnet_add_eth(struct smc_pnettable *pnettable, struct net *net, rc = -EEXIST; new_netdev = true; - write_lock(&pnettable->lock); + mutex_lock(&pnettable->lock); list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) { if (tmp_pe->type == SMC_PNET_ETH && !strncmp(tmp_pe->eth_name, eth_name, IFNAMSIZ)) { @@ -381,9 +381,9 @@ static int smc_pnet_add_eth(struct smc_pnettable *pnettable, struct net *net, } if (new_netdev) { list_add_tail(&new_pe->list, &pnettable->pnetlist); - write_unlock(&pnettable->lock); + mutex_unlock(&pnettable->lock); } else { - write_unlock(&pnettable->lock); + mutex_unlock(&pnettable->lock); kfree(new_pe); goto out_put; } @@ -445,7 +445,7 @@ static int smc_pnet_add_ib(struct smc_pnettable *pnettable, char *ib_name, new_pe->ib_port = ib_port; new_ibdev = true; - write_lock(&pnettable->lock); + mutex_lock(&pnettable->lock); list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) { if (tmp_pe->type == SMC_PNET_IB && !strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX)) { @@ -455,9 +455,9 @@ static int smc_pnet_add_ib(struct smc_pnettable *pnettable, char *ib_name, } if (new_ibdev) { list_add_tail(&new_pe->list, &pnettable->pnetlist); - write_unlock(&pnettable->lock); + mutex_unlock(&pnettable->lock); } else { - write_unlock(&pnettable->lock); + mutex_unlock(&pnettable->lock); kfree(new_pe); } return (new_ibdev) ? 0 : -EEXIST; @@ -602,7 +602,7 @@ static int _smc_pnet_dump(struct net *net, struct sk_buff *skb, u32 portid, pnettable = &sn->pnettable; /* dump pnettable entries */ - read_lock(&pnettable->lock); + mutex_lock(&pnettable->lock); list_for_each_entry(pnetelem, &pnettable->pnetlist, list) { if (pnetid && !smc_pnet_match(pnetelem->pnet_name, pnetid)) continue; @@ -617,7 +617,7 @@ static int _smc_pnet_dump(struct net *net, struct sk_buff *skb, u32 portid, break; } } - read_unlock(&pnettable->lock); + mutex_unlock(&pnettable->lock); return idx; } @@ -859,7 +859,7 @@ int smc_pnet_net_init(struct net *net) struct smc_pnetids_ndev *pnetids_ndev = &sn->pnetids_ndev; INIT_LIST_HEAD(&pnettable->pnetlist); - rwlock_init(&pnettable->lock); + mutex_init(&pnettable->lock); INIT_LIST_HEAD(&pnetids_ndev->list); rwlock_init(&pnetids_ndev->lock); @@ -939,7 +939,7 @@ static int smc_pnet_find_ndev_pnetid_by_table(struct net_device *ndev, sn = net_generic(net, smc_net_id); pnettable = &sn->pnettable; - read_lock(&pnettable->lock); + mutex_lock(&pnettable->lock); list_for_each_entry(pnetelem, &pnettable->pnetlist, list) { if (pnetelem->type == SMC_PNET_ETH && ndev == pnetelem->ndev) { /* get pnetid of netdev device */ @@ -948,7 +948,7 @@ static int smc_pnet_find_ndev_pnetid_by_table(struct net_device *ndev, break; } } - read_unlock(&pnettable->lock); + mutex_unlock(&pnettable->lock); return rc; } @@ -1129,7 +1129,7 @@ int smc_pnetid_by_table_ib(struct smc_ib_device *smcibdev, u8 ib_port) sn = net_generic(&init_net, smc_net_id); pnettable = &sn->pnettable; - read_lock(&pnettable->lock); + mutex_lock(&pnettable->lock); list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) { if (tmp_pe->type == SMC_PNET_IB && !strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX) && @@ -1139,7 +1139,7 @@ int smc_pnetid_by_table_ib(struct smc_ib_device *smcibdev, u8 ib_port) break; } } - read_unlock(&pnettable->lock); + mutex_unlock(&pnettable->lock); return rc; } @@ -1158,7 +1158,7 @@ int smc_pnetid_by_table_smcd(struct smcd_dev *smcddev) sn = net_generic(&init_net, smc_net_id); pnettable = &sn->pnettable; - read_lock(&pnettable->lock); + mutex_lock(&pnettable->lock); list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) { if (tmp_pe->type == SMC_PNET_IB && !strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX)) { @@ -1167,7 +1167,7 @@ int smc_pnetid_by_table_smcd(struct smcd_dev *smcddev) break; } } - read_unlock(&pnettable->lock); + mutex_unlock(&pnettable->lock); return rc; } diff --git a/net/smc/smc_pnet.h b/net/smc/smc_pnet.h index 14039272f7e4263a3d86f29d0c4f048731af387f..80a88eea494918663f0263b9577c0ca49dcf1542 100644 --- a/net/smc/smc_pnet.h +++ b/net/smc/smc_pnet.h @@ -29,7 +29,7 @@ struct smc_link_group; * @pnetlist: List of PNETIDs */ struct smc_pnettable { - rwlock_t lock; + struct mutex lock; struct list_head pnetlist; }; diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index eadc0ede928c320a7afae33e1821d7e1ce63f246..5f854ffbab925b20b49516a35740168c4a3701e0 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -599,9 +599,9 @@ static int __rpc_rmdir(struct inode *dir, struct dentry *dentry) dget(dentry); ret = simple_rmdir(dir, dentry); + d_drop(dentry); if (!ret) fsnotify_rmdir(dir, dentry); - d_delete(dentry); dput(dentry); return ret; } @@ -612,9 +612,9 @@ static int __rpc_unlink(struct inode *dir, struct dentry *dentry) dget(dentry); ret = simple_unlink(dir, dentry); + d_drop(dentry); if (!ret) fsnotify_unlink(dir, dentry); - d_delete(dentry); dput(dentry); return ret; } diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 25554260a5931541288c2abb72184fa44e847462..dcc1992b14d76a9ecdfc5affa12ccb3a8258eec9 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -449,6 +449,7 @@ static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt) IB_POLL_WORKQUEUE); if (IS_ERR(ep->re_attr.send_cq)) { rc = PTR_ERR(ep->re_attr.send_cq); + ep->re_attr.send_cq = NULL; goto out_destroy; } @@ -457,6 +458,7 @@ static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt) IB_POLL_WORKQUEUE); if (IS_ERR(ep->re_attr.recv_cq)) { rc = PTR_ERR(ep->re_attr.recv_cq); + ep->re_attr.recv_cq = NULL; goto out_destroy; } ep->re_receive_count = 0; @@ -495,6 +497,7 @@ static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt) ep->re_pd = ib_alloc_pd(device, 0); if (IS_ERR(ep->re_pd)) { rc = PTR_ERR(ep->re_pd); + ep->re_pd = NULL; goto out_destroy; } diff --git a/net/tipc/link.c b/net/tipc/link.c index 29591955d08a5367d8274dd3b1f81f2455fe400b..fb835a3822f49d5d509b5b73f552d61328884197 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -2159,7 +2159,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, struct tipc_msg *hdr = buf_msg(skb); struct tipc_gap_ack_blks *ga = NULL; bool reply = msg_probe(hdr), retransmitted = false; - u16 dlen = msg_data_sz(hdr), glen = 0; + u32 dlen = msg_data_sz(hdr), glen = 0; u16 peers_snd_nxt = msg_next_sent(hdr); u16 peers_tol = msg_link_tolerance(hdr); u16 peers_prio = msg_linkprio(hdr); @@ -2173,6 +2173,10 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, void *data; trace_tipc_proto_rcv(skb, false, l->name); + + if (dlen > U16_MAX) + goto exit; + if (tipc_link_is_blocked(l) || !xmitq) goto exit; @@ -2268,7 +2272,8 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, /* Receive Gap ACK blocks from peer if any */ glen = tipc_get_gap_ack_blks(&ga, l, hdr, true); - + if(glen > dlen) + break; tipc_mon_rcv(l->net, data + glen, dlen - glen, l->addr, &l->mon_state, l->bearer_id); diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c index 6dce2abf436eeddd4983e1fe09417338af87a97f..a37190da5a504878d2fbda8f6b11bbe0e84760a8 100644 --- a/net/tipc/monitor.c +++ b/net/tipc/monitor.c @@ -465,6 +465,8 @@ void tipc_mon_rcv(struct net *net, void *data, u16 dlen, u32 addr, state->probing = false; /* Sanity check received domain record */ + if (new_member_cnt > MAX_MON_DOMAIN) + return; if (dlen < dom_rec_len(arrv_dom, 0)) return; if (dlen != dom_rec_len(arrv_dom, new_member_cnt)) diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index fe4edce459ad4a2d657bc854f5668e5defcd9b8d..a757fe28bcb5f064858c82cee9171cf4bd78e7ae 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c @@ -315,7 +315,7 @@ static bool tipc_update_nametbl(struct net *net, struct distr_item *i, pr_warn_ratelimited("Failed to remove binding %u,%u from %x\n", type, lower, node); } else { - pr_warn("Unrecognized name table message received\n"); + pr_warn_ratelimited("Unknown name table message received\n"); } return false; } diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index f6a6acef42235e170dfd1806e657022ce75bb093..54c5328f492d2e7bc3870a624cc0236e159168eb 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -931,7 +931,7 @@ static int __tipc_nl_add_nametable_publ(struct tipc_nl_msg *msg, list_for_each_entry(p, &sr->all_publ, all_publ) if (p->key == *last_key) break; - if (p->key != *last_key) + if (list_entry_is_head(p, &sr->all_publ, all_publ)) return -EPIPE; } else { p = list_first_entry(&sr->all_publ, diff --git a/net/tipc/socket.c b/net/tipc/socket.c index ce957ee5383c4c8907645a3c5012935fd6c9640d..8d2c98531af456389f0087660c4a83a2a0e0b4f7 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -3743,7 +3743,7 @@ static int __tipc_nl_list_sk_publ(struct sk_buff *skb, if (p->key == *last_publ) break; } - if (p->key != *last_publ) { + if (list_entry_is_head(p, &tsk->publications, binding_sock)) { /* We never set seq or call nl_dump_check_consistent() * this means that setting prev_seq here will cause the * consistence check to fail in the netlink callback diff --git a/net/unix/garbage.c b/net/unix/garbage.c index 12e2ddaf887f204a091f157905f270046fc384a6..d45d5366115a769b21bfc1db5a67f7d53c3fa9b8 100644 --- a/net/unix/garbage.c +++ b/net/unix/garbage.c @@ -192,8 +192,11 @@ void wait_for_unix_gc(void) { /* If number of inflight sockets is insane, * force a garbage collect right now. + * Paired with the WRITE_ONCE() in unix_inflight(), + * unix_notinflight() and gc_in_progress(). */ - if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress) + if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC && + !READ_ONCE(gc_in_progress)) unix_gc(); wait_event(unix_gc_wait, gc_in_progress == false); } @@ -213,7 +216,9 @@ void unix_gc(void) if (gc_in_progress) goto out; - gc_in_progress = true; + /* Paired with READ_ONCE() in wait_for_unix_gc(). */ + WRITE_ONCE(gc_in_progress, true); + /* First, select candidates for garbage collection. Only * in-flight sockets are considered, and from those only ones * which don't have any external reference. @@ -299,7 +304,10 @@ void unix_gc(void) /* All candidates should have been detached by now. */ BUG_ON(!list_empty(&gc_candidates)); - gc_in_progress = false; + + /* Paired with READ_ONCE() in wait_for_unix_gc(). */ + WRITE_ONCE(gc_in_progress, false); + wake_up(&unix_gc_wait); out: diff --git a/net/unix/scm.c b/net/unix/scm.c index 052ae709ce2899e74ebb005d8886e42ccbf8b849..aa27a02478dc1a7e4022f77e6ea7ac55f40b95c7 100644 --- a/net/unix/scm.c +++ b/net/unix/scm.c @@ -60,7 +60,8 @@ void unix_inflight(struct user_struct *user, struct file *fp) } else { BUG_ON(list_empty(&u->link)); } - unix_tot_inflight++; + /* Paired with READ_ONCE() in wait_for_unix_gc() */ + WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1); } user->unix_inflight++; spin_unlock(&unix_gc_lock); @@ -80,7 +81,8 @@ void unix_notinflight(struct user_struct *user, struct file *fp) if (atomic_long_dec_and_test(&u->inflight)) list_del_init(&u->link); - unix_tot_inflight--; + /* Paired with READ_ONCE() in wait_for_unix_gc() */ + WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1); } user->unix_inflight--; spin_unlock(&unix_gc_lock); diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 7fe36dbcbe1875bc64e0a4326e37884e8be81d20..005aa701f4d522023c713149e360914a8e44e375 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -1357,6 +1357,7 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr, sk->sk_state = sk->sk_state == TCP_ESTABLISHED ? TCP_CLOSING : TCP_CLOSE; sock->state = SS_UNCONNECTED; vsock_transport_cancel_pkt(vsk); + vsock_remove_connected(vsk); goto out_wait; } else if (timeout == 0) { err = -ETIMEDOUT; diff --git a/net/xfrm/xfrm_compat.c b/net/xfrm/xfrm_compat.c index 2bf26939016316a70eb8dcc35adf7f582cb2e84e..a0f62fa02e06e0aa97901aaf226dc84895f6a8ec 100644 --- a/net/xfrm/xfrm_compat.c +++ b/net/xfrm/xfrm_compat.c @@ -127,6 +127,7 @@ static const struct nla_policy compat_policy[XFRMA_MAX+1] = { [XFRMA_SET_MARK] = { .type = NLA_U32 }, [XFRMA_SET_MARK_MASK] = { .type = NLA_U32 }, [XFRMA_IF_ID] = { .type = NLA_U32 }, + [XFRMA_MTIMER_THRESH] = { .type = NLA_U32 }, }; static struct nlmsghdr *xfrm_nlmsg_put_compat(struct sk_buff *skb, @@ -274,9 +275,10 @@ static int xfrm_xlate64_attr(struct sk_buff *dst, const struct nlattr *src) case XFRMA_SET_MARK: case XFRMA_SET_MARK_MASK: case XFRMA_IF_ID: + case XFRMA_MTIMER_THRESH: return xfrm_nla_cpy(dst, src, nla_len(src)); default: - BUILD_BUG_ON(XFRMA_MAX != XFRMA_IF_ID); + BUILD_BUG_ON(XFRMA_MAX != XFRMA_MTIMER_THRESH); pr_warn_once("unsupported nla_type %d\n", src->nla_type); return -EOPNOTSUPP; } @@ -431,7 +433,7 @@ static int xfrm_xlate32_attr(void *dst, const struct nlattr *nla, int err; if (type > XFRMA_MAX) { - BUILD_BUG_ON(XFRMA_MAX != XFRMA_IF_ID); + BUILD_BUG_ON(XFRMA_MAX != XFRMA_MTIMER_THRESH); NL_SET_ERR_MSG(extack, "Bad attribute"); return -EOPNOTSUPP; } diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c index e9ce23343f5cac44703f5394fc0f8c2baee82e49..e1fae61a5bb90d8dd16f286529f716db18810c74 100644 --- a/net/xfrm/xfrm_interface.c +++ b/net/xfrm/xfrm_interface.c @@ -643,11 +643,16 @@ static int xfrmi_newlink(struct net *src_net, struct net_device *dev, struct netlink_ext_ack *extack) { struct net *net = dev_net(dev); - struct xfrm_if_parms p; + struct xfrm_if_parms p = {}; struct xfrm_if *xi; int err; xfrmi_netlink_parms(data, &p); + if (!p.if_id) { + NL_SET_ERR_MSG(extack, "if_id must be non zero"); + return -EINVAL; + } + xi = xfrmi_locate(net, &p); if (xi) return -EEXIST; @@ -672,7 +677,12 @@ static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[], { struct xfrm_if *xi = netdev_priv(dev); struct net *net = xi->net; - struct xfrm_if_parms p; + struct xfrm_if_parms p = {}; + + if (!p.if_id) { + NL_SET_ERR_MSG(extack, "if_id must be non zero"); + return -EINVAL; + } xfrmi_netlink_parms(data, &p); xi = xfrmi_locate(net, &p); diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 3a9831c05ec71c7733be2b1742a82115a1a34cdf..c4a195cb36817ccbadd630dd2f798bd9bcc500fd 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -31,8 +31,10 @@ #include #include #include +#include #include #include +#include #if IS_ENABLED(CONFIG_IPV6_MIP6) #include #endif @@ -3293,7 +3295,7 @@ decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse) fl4->flowi4_proto = iph->protocol; fl4->daddr = reverse ? iph->saddr : iph->daddr; fl4->saddr = reverse ? iph->daddr : iph->saddr; - fl4->flowi4_tos = iph->tos; + fl4->flowi4_tos = iph->tos & ~INET_ECN_MASK; if (!ip_is_fragment(iph)) { switch (iph->protocol) { @@ -3455,6 +3457,26 @@ decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse) } fl6->flowi6_proto = nexthdr; return; + case IPPROTO_GRE: + if (!onlyproto && + (nh + offset + 12 < skb->data || + pskb_may_pull(skb, nh + offset + 12 - skb->data))) { + struct gre_base_hdr *gre_hdr; + __be32 *gre_key; + + nh = skb_network_header(skb); + gre_hdr = (struct gre_base_hdr *)(nh + offset); + gre_key = (__be32 *)(gre_hdr + 1); + + if (gre_hdr->flags & GRE_KEY) { + if (gre_hdr->flags & GRE_CSUM) + gre_key++; + fl6->fl6_gre_key = *gre_key; + } + } + fl6->flowi6_proto = nexthdr; + return; + #if IS_ENABLED(CONFIG_IPV6_MIP6) case IPPROTO_MH: offset += ipv6_optlen(exthdr); diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index c158e70e8ae1020eb63dd09b632dd38a4037138a..65e2805fa113ab0d46a95cd541851838d204e97a 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -1557,6 +1557,9 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, x->km.seq = orig->km.seq; x->replay = orig->replay; x->preplay = orig->preplay; + x->mapping_maxage = orig->mapping_maxage; + x->new_mapping = 0; + x->new_mapping_sport = 0; return x; @@ -2208,7 +2211,7 @@ int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol) } EXPORT_SYMBOL(km_query); -int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport) +static int __km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport) { int err = -EINVAL; struct xfrm_mgr *km; @@ -2223,6 +2226,24 @@ int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport) rcu_read_unlock(); return err; } + +int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport) +{ + int ret = 0; + + if (x->mapping_maxage) { + if ((jiffies / HZ - x->new_mapping) > x->mapping_maxage || + x->new_mapping_sport != sport) { + x->new_mapping_sport = sport; + x->new_mapping = jiffies / HZ; + ret = __km_new_mapping(x, ipaddr, sport); + } + } else { + ret = __km_new_mapping(x, ipaddr, sport); + } + + return ret; +} EXPORT_SYMBOL(km_new_mapping); void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid) diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 6f97665b632ede2263c9ddcd1c07351e523be079..d0fdfbf4c5f72e0db3ff6ac1802af3a80c226297 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -282,6 +282,10 @@ static int verify_newsa_info(struct xfrm_usersa_info *p, err = 0; + if (attrs[XFRMA_MTIMER_THRESH]) + if (!attrs[XFRMA_ENCAP]) + err = -EINVAL; + out: return err; } @@ -521,6 +525,7 @@ static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs, struct nlattr *lt = attrs[XFRMA_LTIME_VAL]; struct nlattr *et = attrs[XFRMA_ETIMER_THRESH]; struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH]; + struct nlattr *mt = attrs[XFRMA_MTIMER_THRESH]; if (re) { struct xfrm_replay_state_esn *replay_esn; @@ -552,6 +557,9 @@ static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs, if (rt) x->replay_maxdiff = nla_get_u32(rt); + + if (mt) + x->mapping_maxage = nla_get_u32(mt); } static void xfrm_smark_init(struct nlattr **attrs, struct xfrm_mark *m) @@ -621,8 +629,13 @@ static struct xfrm_state *xfrm_state_construct(struct net *net, xfrm_smark_init(attrs, &x->props.smark); - if (attrs[XFRMA_IF_ID]) + if (attrs[XFRMA_IF_ID]) { x->if_id = nla_get_u32(attrs[XFRMA_IF_ID]); + if (!x->if_id) { + err = -EINVAL; + goto error; + } + } err = __xfrm_init_state(x, false, attrs[XFRMA_OFFLOAD_DEV]); if (err) @@ -964,8 +977,13 @@ static int copy_to_user_state_extra(struct xfrm_state *x, if (ret) goto out; } - if (x->security) + if (x->security) { ret = copy_sec_ctx(x->security, skb); + if (ret) + goto out; + } + if (x->mapping_maxage) + ret = nla_put_u32(skb, XFRMA_MTIMER_THRESH, x->mapping_maxage); out: return ret; } @@ -1353,8 +1371,13 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh, mark = xfrm_mark_get(attrs, &m); - if (attrs[XFRMA_IF_ID]) + if (attrs[XFRMA_IF_ID]) { if_id = nla_get_u32(attrs[XFRMA_IF_ID]); + if (!if_id) { + err = -EINVAL; + goto out_noput; + } + } if (p->info.seq) { x = xfrm_find_acq_byseq(net, mark, p->info.seq); @@ -1667,8 +1690,13 @@ static struct xfrm_policy *xfrm_policy_construct(struct net *net, struct xfrm_us xfrm_mark_get(attrs, &xp->mark); - if (attrs[XFRMA_IF_ID]) + if (attrs[XFRMA_IF_ID]) { xp->if_id = nla_get_u32(attrs[XFRMA_IF_ID]); + if (!xp->if_id) { + err = -EINVAL; + goto error; + } + } return xp; error: @@ -2898,7 +2926,7 @@ static inline unsigned int xfrm_sa_len(struct xfrm_state *x) if (x->props.extra_flags) l += nla_total_size(sizeof(x->props.extra_flags)); if (x->xso.dev) - l += nla_total_size(sizeof(x->xso)); + l += nla_total_size(sizeof(struct xfrm_user_offload)); if (x->props.smark.v | x->props.smark.m) { l += nla_total_size(sizeof(x->props.smark.v)); l += nla_total_size(sizeof(x->props.smark.m)); @@ -2909,6 +2937,9 @@ static inline unsigned int xfrm_sa_len(struct xfrm_state *x) /* Must count x->lastused as it may become non-zero behind our back. */ l += nla_total_size_64bit(sizeof(u64)); + if (x->mapping_maxage) + l += nla_total_size(sizeof(x->mapping_maxage)); + return l; } diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn index 6baee1200615d53ec7d783108a456825b385d5e0..23d3967786b9ff952f4b6fed4cdaf9c46d5cbf1c 100644 --- a/scripts/Makefile.extrawarn +++ b/scripts/Makefile.extrawarn @@ -51,6 +51,7 @@ KBUILD_CFLAGS += -Wno-sign-compare KBUILD_CFLAGS += -Wno-format-zero-length KBUILD_CFLAGS += $(call cc-disable-warning, pointer-to-enum-cast) KBUILD_CFLAGS += -Wno-tautological-constant-out-of-range-compare +KBUILD_CFLAGS += $(call cc-disable-warning, unaligned-access) endif endif diff --git a/scripts/dtc/dtx_diff b/scripts/dtc/dtx_diff index d3422ee15e300bc76af3b15adc682aacf2d4e1ee..f2bbde4bba86bc70b27bb5a992a262c1997db811 100755 --- a/scripts/dtc/dtx_diff +++ b/scripts/dtc/dtx_diff @@ -59,12 +59,8 @@ Otherwise DTx is treated as a dts source file (aka .dts). or '/include/' to be processed. If DTx_1 and DTx_2 are in different architectures, then this script - may not work since \${ARCH} is part of the include path. Two possible - workarounds: - - `basename $0` \\ - <(ARCH=arch_of_dtx_1 `basename $0` DTx_1) \\ - <(ARCH=arch_of_dtx_2 `basename $0` DTx_2) + may not work since \${ARCH} is part of the include path. The following + workaround can be used: `basename $0` ARCH=arch_of_dtx_1 DTx_1 >tmp_dtx_1.dts `basename $0` ARCH=arch_of_dtx_2 DTx_2 >tmp_dtx_2.dts diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c index a39d93e3c6ae8d34f617b835d8ebcf5900cbad89..867b06c6d27976b6458dc37e80234ce6bf0dbb2c 100644 --- a/scripts/kconfig/confdata.c +++ b/scripts/kconfig/confdata.c @@ -968,14 +968,19 @@ static int conf_write_dep(const char *name) static int conf_touch_deps(void) { - const char *name; + const char *name, *tmp; struct symbol *sym; int res, i; - strcpy(depfile_path, "include/config/"); - depfile_prefix_len = strlen(depfile_path); - name = conf_get_autoconfig_name(); + tmp = strrchr(name, '/'); + depfile_prefix_len = tmp ? tmp - name + 1 : 0; + if (depfile_prefix_len + 1 > sizeof(depfile_path)) + return -1; + + strncpy(depfile_path, name, depfile_prefix_len); + depfile_path[depfile_prefix_len] = 0; + conf_read_simple(name, S_DEF_AUTO); sym_calc_value(modules_sym); diff --git a/scripts/kconfig/preprocess.c b/scripts/kconfig/preprocess.c index 0590f86df6e40cfd073100904f3ec5cfbe650f5f..748da578b418c4acb53f454e17e2370d2050ceb6 100644 --- a/scripts/kconfig/preprocess.c +++ b/scripts/kconfig/preprocess.c @@ -141,7 +141,7 @@ static char *do_lineno(int argc, char *argv[]) static char *do_shell(int argc, char *argv[]) { FILE *p; - char buf[256]; + char buf[4096]; char *cmd; size_t nread; int i; diff --git a/scripts/module.lds.S b/scripts/module.lds.S index 69b9b71a6a4731ba43f74996039c5272ce2254c8..c5f12195817bb2b7e5011483abe12cd914dab494 100644 --- a/scripts/module.lds.S +++ b/scripts/module.lds.S @@ -23,6 +23,32 @@ SECTIONS { .init_array 0 : ALIGN(8) { *(SORT(.init_array.*)) *(.init_array) } __jump_table 0 : ALIGN(8) { KEEP(*(__jump_table)) } + + __patchable_function_entries : { *(__patchable_function_entries) } + +#ifdef CONFIG_LTO_CLANG + /* + * With CONFIG_LTO_CLANG, LLD always enables -fdata-sections and + * -ffunction-sections, which increases the size of the final module. + * Merge the split sections in the final binary. + */ + .bss : { + *(.bss .bss.[0-9a-zA-Z_]*) + *(.bss..L*) + } + + .data : { + *(.data .data.[0-9a-zA-Z_]*) + *(.data..L*) + } + + .rodata : { + *(.rodata .rodata.[0-9a-zA-Z_]*) + *(.rodata..L*) + } + + .text : { *(.text .text.[0-9a-zA-Z_]*) } +#endif } /* bring in arch-specific sections */ diff --git a/scripts/sphinx-pre-install b/scripts/sphinx-pre-install index 828a8615a9181ccdb4a9883df20ea58c9762984d..8fcea769d44f50dcf3a85054d33871cc2898c911 100755 --- a/scripts/sphinx-pre-install +++ b/scripts/sphinx-pre-install @@ -76,6 +76,7 @@ my %texlive = ( 'ucs.sty' => 'texlive-ucs', 'upquote.sty' => 'texlive-upquote', 'wrapfig.sty' => 'texlive-wrapfig', + 'ctexhook.sty' => 'texlive-ctex', ); # @@ -370,6 +371,9 @@ sub give_debian_hints() ); if ($pdf) { + check_missing_file(["/usr/share/texlive/texmf-dist/tex/latex/ctex/ctexhook.sty"], + "texlive-lang-chinese", 2); + check_missing_file(["/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf"], "fonts-dejavu", 2); diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c index ea8ff8a07b36b4b80cbb664126614643e628d1e7..98d5a800fe5b01dfc3b538ab21ba1c516eb9fa5a 100644 --- a/security/integrity/ima/ima_fs.c +++ b/security/integrity/ima/ima_fs.c @@ -496,12 +496,12 @@ int __init ima_fs_init(void) return 0; out: + securityfs_remove(ima_policy); securityfs_remove(violations); securityfs_remove(runtime_measurements_count); securityfs_remove(ascii_runtime_measurements); securityfs_remove(binary_runtime_measurements); securityfs_remove(ima_symlink); securityfs_remove(ima_dir); - securityfs_remove(ima_policy); return -1; } diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c index 9b5adeaa47fc819c4c9fc6396871db2848770505..e737c216efc4971133f09731ac6a3fba968c1b0e 100644 --- a/security/integrity/ima/ima_policy.c +++ b/security/integrity/ima/ima_policy.c @@ -1636,6 +1636,14 @@ int ima_policy_show(struct seq_file *m, void *v) rcu_read_lock(); + /* Do not print rules with inactive LSM labels */ + for (i = 0; i < MAX_LSM_RULES; i++) { + if (entry->lsm[i].args_p && !entry->lsm[i].rule) { + rcu_read_unlock(); + return 0; + } + } + if (entry->action & MEASURE) seq_puts(m, pt(Opt_measure)); if (entry->action & DONT_MEASURE) diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c index f83255a39e6538574617592587b5067a456b26c8..f64c01d53e96aabf0b00d55266a3be2ce4e12725 100644 --- a/security/integrity/ima/ima_template.c +++ b/security/integrity/ima/ima_template.c @@ -27,6 +27,7 @@ static struct ima_template_desc builtin_templates[] = { static LIST_HEAD(defined_templates); static DEFINE_SPINLOCK(template_list); +static int template_setup_done; static const struct ima_template_field supported_fields[] = { {.field_id = "d", .field_init = ima_eventdigest_init, @@ -80,10 +81,11 @@ static int __init ima_template_setup(char *str) struct ima_template_desc *template_desc; int template_len = strlen(str); - if (ima_template) + if (template_setup_done) return 1; - ima_init_template_list(); + if (!ima_template) + ima_init_template_list(); /* * Verify that a template with the supplied name exists. @@ -107,6 +109,7 @@ static int __init ima_template_setup(char *str) } ima_template = template_desc; + template_setup_done = 1; return 1; } __setup("ima_template=", ima_template_setup); @@ -115,7 +118,7 @@ static int __init ima_template_fmt_setup(char *str) { int num_templates = ARRAY_SIZE(builtin_templates); - if (ima_template) + if (template_setup_done) return 1; if (template_desc_init_fields(str, NULL, NULL) < 0) { @@ -126,6 +129,7 @@ static int __init ima_template_fmt_setup(char *str) builtin_templates[num_templates - 1].fmt = str; ima_template = builtin_templates + num_templates - 1; + template_setup_done = 1; return 1; } diff --git a/security/integrity/integrity_audit.c b/security/integrity/integrity_audit.c index 29220056207f4e51e83afda6965816d7032d5f6e..0ec5e4c22cb2a1066c2b897776ead6d3db72635c 100644 --- a/security/integrity/integrity_audit.c +++ b/security/integrity/integrity_audit.c @@ -45,6 +45,8 @@ void integrity_audit_message(int audit_msgno, struct inode *inode, return; ab = audit_log_start(audit_context(), GFP_KERNEL, audit_msgno); + if (!ab) + return; audit_log_format(ab, "pid=%d uid=%u auid=%u ses=%u", task_pid_nr(current), from_kuid(&init_user_ns, current_uid()), diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index ff2191ae53528d80a0890aa9f03989663559feaa..86159b32921ccadc2c49b88452ba9047cd8da8a0 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -947,18 +947,22 @@ out: static int selinux_add_opt(int token, const char *s, void **mnt_opts) { struct selinux_mnt_opts *opts = *mnt_opts; + bool is_alloc_opts = false; if (token == Opt_seclabel) /* eaten and completely ignored */ return 0; + if (!s) + return -ENOMEM; + if (!opts) { opts = kzalloc(sizeof(struct selinux_mnt_opts), GFP_KERNEL); if (!opts) return -ENOMEM; *mnt_opts = opts; + is_alloc_opts = true; } - if (!s) - return -ENOMEM; + switch (token) { case Opt_context: if (opts->context || opts->defcontext) @@ -983,6 +987,10 @@ static int selinux_add_opt(int token, const char *s, void **mnt_opts) } return 0; Einval: + if (is_alloc_opts) { + kfree(opts); + *mnt_opts = NULL; + } pr_warn(SEL_MOUNT_FAIL_MSG); return -EINVAL; } diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c index 1ef74c085f2b0f288ca689097e38ce03be6090c7..865611127357ee83b8016b65f26dd36791a18727 100644 --- a/security/selinux/ss/conditional.c +++ b/security/selinux/ss/conditional.c @@ -152,6 +152,8 @@ static void cond_list_destroy(struct policydb *p) for (i = 0; i < p->cond_list_len; i++) cond_node_destroy(&p->cond_list[i]); kfree(p->cond_list); + p->cond_list = NULL; + p->cond_list_len = 0; } void cond_policydb_destroy(struct policydb *p) @@ -440,7 +442,6 @@ int cond_read_list(struct policydb *p, void *fp) return 0; err: cond_list_destroy(p); - p->cond_list = NULL; return rc; } diff --git a/sound/core/jack.c b/sound/core/jack.c index d6502dff247a817063dd1945d976c1b47e14a6f5..dc2e06ae2414960beec0a04a1c0042e13407440f 100644 --- a/sound/core/jack.c +++ b/sound/core/jack.c @@ -54,10 +54,13 @@ static int snd_jack_dev_free(struct snd_device *device) struct snd_card *card = device->card; struct snd_jack_kctl *jack_kctl, *tmp_jack_kctl; + down_write(&card->controls_rwsem); list_for_each_entry_safe(jack_kctl, tmp_jack_kctl, &jack->kctl_list, list) { list_del_init(&jack_kctl->list); snd_ctl_remove(card, jack_kctl->kctl); } + up_write(&card->controls_rwsem); + if (jack->private_free) jack->private_free(jack); diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c index 77727a69c3c4ea1f0bf97ef068f5bde2d746a59b..d79febeebf0c591725703112330e020426ab7669 100644 --- a/sound/core/oss/pcm_oss.c +++ b/sound/core/oss/pcm_oss.c @@ -2056,7 +2056,7 @@ static int snd_pcm_oss_set_trigger(struct snd_pcm_oss_file *pcm_oss_file, int tr int err, cmd; #ifdef OSS_DEBUG - pcm_dbg(substream->pcm, "pcm_oss: trigger = 0x%x\n", trigger); + pr_debug("pcm_oss: trigger = 0x%x\n", trigger); #endif psubstream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK]; diff --git a/sound/core/pcm.c b/sound/core/pcm.c index 41cbdac5b1cfaa6e93fc0d992727c04dc070f1c4..a8ae5928decdad11d29699114ea8dea3757a37c2 100644 --- a/sound/core/pcm.c +++ b/sound/core/pcm.c @@ -810,7 +810,11 @@ EXPORT_SYMBOL(snd_pcm_new_internal); static void free_chmap(struct snd_pcm_str *pstr) { if (pstr->chmap_kctl) { - snd_ctl_remove(pstr->pcm->card, pstr->chmap_kctl); + struct snd_card *card = pstr->pcm->card; + + down_write(&card->controls_rwsem); + snd_ctl_remove(card, pstr->chmap_kctl); + up_write(&card->controls_rwsem); pstr->chmap_kctl = NULL; } } diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c index 71a6ea62c3be7c8a0a1885da0b32b2b527ef8644..4ff0b927230c2f5ee4efe89046b61e68d0880eb9 100644 --- a/sound/core/seq/seq_queue.c +++ b/sound/core/seq/seq_queue.c @@ -234,12 +234,15 @@ struct snd_seq_queue *snd_seq_queue_find_name(char *name) /* -------------------------------------------------------- */ +#define MAX_CELL_PROCESSES_IN_QUEUE 1000 + void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop) { unsigned long flags; struct snd_seq_event_cell *cell; snd_seq_tick_time_t cur_tick; snd_seq_real_time_t cur_time; + int processed = 0; if (q == NULL) return; @@ -262,6 +265,8 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop) if (!cell) break; snd_seq_dispatch_event(cell, atomic, hop); + if (++processed >= MAX_CELL_PROCESSES_IN_QUEUE) + goto out; /* the rest processed at the next batch */ } /* Process time queue... */ @@ -271,14 +276,19 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop) if (!cell) break; snd_seq_dispatch_event(cell, atomic, hop); + if (++processed >= MAX_CELL_PROCESSES_IN_QUEUE) + goto out; /* the rest processed at the next batch */ } + out: /* free lock */ spin_lock_irqsave(&q->check_lock, flags); if (q->check_again) { q->check_again = 0; - spin_unlock_irqrestore(&q->check_lock, flags); - goto __again; + if (processed < MAX_CELL_PROCESSES_IN_QUEUE) { + spin_unlock_irqrestore(&q->check_lock, flags); + goto __again; + } } q->check_blocked = 0; spin_unlock_irqrestore(&q->check_lock, flags); diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 6dece719be669c5f1034d684d08e04d41ddbbd5b..39281106477ebf2df18b9ecd273c272076bc3783 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -1727,8 +1727,11 @@ void snd_hda_ctls_clear(struct hda_codec *codec) { int i; struct hda_nid_item *items = codec->mixers.list; + + down_write(&codec->card->controls_rwsem); for (i = 0; i < codec->mixers.used; i++) snd_ctl_remove(codec->card, items[i].kctl); + up_write(&codec->card->controls_rwsem); snd_array_free(&codec->mixers); snd_array_free(&codec->nids); } diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index 323df011b94a3de5188ea3b1f1d4a3975b3657d9..8ee3be7bbd24e7676231eed11ede07ee4bdebd23 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c @@ -91,6 +91,12 @@ static void snd_hda_gen_spec_free(struct hda_gen_spec *spec) free_kctls(spec); snd_array_free(&spec->paths); snd_array_free(&spec->loopback_list); +#ifdef CONFIG_SND_HDA_GENERIC_LEDS + if (spec->led_cdevs[LED_AUDIO_MUTE]) + led_classdev_unregister(spec->led_cdevs[LED_AUDIO_MUTE]); + if (spec->led_cdevs[LED_AUDIO_MICMUTE]) + led_classdev_unregister(spec->led_cdevs[LED_AUDIO_MICMUTE]); +#endif } /* @@ -3911,7 +3917,10 @@ static int create_mute_led_cdev(struct hda_codec *codec, enum led_brightness), bool micmute) { + struct hda_gen_spec *spec = codec->spec; struct led_classdev *cdev; + int idx = micmute ? LED_AUDIO_MICMUTE : LED_AUDIO_MUTE; + int err; cdev = devm_kzalloc(&codec->core.dev, sizeof(*cdev), GFP_KERNEL); if (!cdev) @@ -3921,10 +3930,14 @@ static int create_mute_led_cdev(struct hda_codec *codec, cdev->max_brightness = 1; cdev->default_trigger = micmute ? "audio-micmute" : "audio-mute"; cdev->brightness_set_blocking = callback; - cdev->brightness = ledtrig_audio_get(micmute ? LED_AUDIO_MICMUTE : LED_AUDIO_MUTE); + cdev->brightness = ledtrig_audio_get(idx); cdev->flags = LED_CORE_SUSPENDRESUME; - return devm_led_classdev_register(&codec->core.dev, cdev); + err = led_classdev_register(&codec->core.dev, cdev); + if (err < 0) + return err; + spec->led_cdevs[idx] = cdev; + return 0; } static void vmaster_update_mute_led(void *private_data, int enabled) diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h index 0886bc81f40be64901b50d6db1a21145891dc810..578faa9adcdcdba43563efddd5fc9e40856c6cb7 100644 --- a/sound/pci/hda/hda_generic.h +++ b/sound/pci/hda/hda_generic.h @@ -305,6 +305,9 @@ struct hda_gen_spec { struct hda_jack_callback *cb); void (*mic_autoswitch_hook)(struct hda_codec *codec, struct hda_jack_callback *cb); + + /* leds */ + struct led_classdev *led_cdevs[NUM_AUDIO_LEDS]; }; /* values for add_stereo_mix_input flag */ diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 3cc936f2cbf8d02344b402f4251204c2b4882000..600ea241ead79630bdfcd91da02be4259e401140 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -1652,6 +1652,7 @@ static const struct snd_pci_quirk probe_mask_list[] = { /* forced codec slots */ SND_PCI_QUIRK(0x1043, 0x1262, "ASUS W5Fm", 0x103), SND_PCI_QUIRK(0x1046, 0x1262, "ASUS W5F", 0x103), + SND_PCI_QUIRK(0x1558, 0x0351, "Schenker Dock 15", 0x105), /* WinFast VP200 H (Teradici) user reported broken communication */ SND_PCI_QUIRK(0x3a21, 0x040d, "WinFast VP200 H", 0x101), {} @@ -1837,8 +1838,6 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci, assign_position_fix(chip, check_position_fix(chip, position_fix[dev])); - check_probe_mask(chip, dev); - if (single_cmd < 0) /* allow fallback to single_cmd at errors */ chip->fallback_to_single_cmd = 1; else /* explicitly set to single_cmd or not */ @@ -1866,6 +1865,8 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci, chip->bus.core.needs_damn_long_delay = 1; } + check_probe_mask(chip, dev); + err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops); if (err < 0) { dev_err(card->dev, "Error creating device [card]!\n"); diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 14ce48f1a8e47e406bd9d4474e76d459d5dfff07..ed0cfcb05ef0d4ee0243a1ed4d47ea17a494313f 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -97,6 +97,7 @@ struct alc_spec { unsigned int gpio_mic_led_mask; struct alc_coef_led mute_led_coef; struct alc_coef_led mic_led_coef; + struct mutex coef_mutex; hda_nid_t headset_mic_pin; hda_nid_t headphone_mic_pin; @@ -133,8 +134,24 @@ struct alc_spec { * COEF access helper functions */ -static int alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid, - unsigned int coef_idx) +static void coef_mutex_lock(struct hda_codec *codec) +{ + struct alc_spec *spec = codec->spec; + + snd_hda_power_up_pm(codec); + mutex_lock(&spec->coef_mutex); +} + +static void coef_mutex_unlock(struct hda_codec *codec) +{ + struct alc_spec *spec = codec->spec; + + mutex_unlock(&spec->coef_mutex); + snd_hda_power_down_pm(codec); +} + +static int __alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid, + unsigned int coef_idx) { unsigned int val; @@ -143,28 +160,56 @@ static int alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid, return val; } +static int alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid, + unsigned int coef_idx) +{ + unsigned int val; + + coef_mutex_lock(codec); + val = __alc_read_coefex_idx(codec, nid, coef_idx); + coef_mutex_unlock(codec); + return val; +} + #define alc_read_coef_idx(codec, coef_idx) \ alc_read_coefex_idx(codec, 0x20, coef_idx) -static void alc_write_coefex_idx(struct hda_codec *codec, hda_nid_t nid, - unsigned int coef_idx, unsigned int coef_val) +static void __alc_write_coefex_idx(struct hda_codec *codec, hda_nid_t nid, + unsigned int coef_idx, unsigned int coef_val) { snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_COEF_INDEX, coef_idx); snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_PROC_COEF, coef_val); } +static void alc_write_coefex_idx(struct hda_codec *codec, hda_nid_t nid, + unsigned int coef_idx, unsigned int coef_val) +{ + coef_mutex_lock(codec); + __alc_write_coefex_idx(codec, nid, coef_idx, coef_val); + coef_mutex_unlock(codec); +} + #define alc_write_coef_idx(codec, coef_idx, coef_val) \ alc_write_coefex_idx(codec, 0x20, coef_idx, coef_val) +static void __alc_update_coefex_idx(struct hda_codec *codec, hda_nid_t nid, + unsigned int coef_idx, unsigned int mask, + unsigned int bits_set) +{ + unsigned int val = __alc_read_coefex_idx(codec, nid, coef_idx); + + if (val != -1) + __alc_write_coefex_idx(codec, nid, coef_idx, + (val & ~mask) | bits_set); +} + static void alc_update_coefex_idx(struct hda_codec *codec, hda_nid_t nid, unsigned int coef_idx, unsigned int mask, unsigned int bits_set) { - unsigned int val = alc_read_coefex_idx(codec, nid, coef_idx); - - if (val != -1) - alc_write_coefex_idx(codec, nid, coef_idx, - (val & ~mask) | bits_set); + coef_mutex_lock(codec); + __alc_update_coefex_idx(codec, nid, coef_idx, mask, bits_set); + coef_mutex_unlock(codec); } #define alc_update_coef_idx(codec, coef_idx, mask, bits_set) \ @@ -197,13 +242,15 @@ struct coef_fw { static void alc_process_coef_fw(struct hda_codec *codec, const struct coef_fw *fw) { + coef_mutex_lock(codec); for (; fw->nid; fw++) { if (fw->mask == (unsigned short)-1) - alc_write_coefex_idx(codec, fw->nid, fw->idx, fw->val); + __alc_write_coefex_idx(codec, fw->nid, fw->idx, fw->val); else - alc_update_coefex_idx(codec, fw->nid, fw->idx, - fw->mask, fw->val); + __alc_update_coefex_idx(codec, fw->nid, fw->idx, + fw->mask, fw->val); } + coef_mutex_unlock(codec); } /* @@ -1160,6 +1207,7 @@ static int alc_alloc_spec(struct hda_codec *codec, hda_nid_t mixer_nid) codec->spdif_status_reset = 1; codec->forced_resume = 1; codec->patch_ops = alc_patch_ops; + mutex_init(&spec->coef_mutex); err = alc_codec_rename_from_preset(codec); if (err < 0) { @@ -1936,6 +1984,7 @@ enum { ALC887_FIXUP_ASUS_BASS, ALC887_FIXUP_BASS_CHMAP, ALC1220_FIXUP_GB_DUAL_CODECS, + ALC1220_FIXUP_GB_X570, ALC1220_FIXUP_CLEVO_P950, ALC1220_FIXUP_CLEVO_PB51ED, ALC1220_FIXUP_CLEVO_PB51ED_PINS, @@ -2125,6 +2174,30 @@ static void alc1220_fixup_gb_dual_codecs(struct hda_codec *codec, } } +static void alc1220_fixup_gb_x570(struct hda_codec *codec, + const struct hda_fixup *fix, + int action) +{ + static const hda_nid_t conn1[] = { 0x0c }; + static const struct coef_fw gb_x570_coefs[] = { + WRITE_COEF(0x07, 0x03c0), + WRITE_COEF(0x1a, 0x01c1), + WRITE_COEF(0x1b, 0x0202), + WRITE_COEF(0x43, 0x3005), + {} + }; + + switch (action) { + case HDA_FIXUP_ACT_PRE_PROBE: + snd_hda_override_conn_list(codec, 0x14, ARRAY_SIZE(conn1), conn1); + snd_hda_override_conn_list(codec, 0x1b, ARRAY_SIZE(conn1), conn1); + break; + case HDA_FIXUP_ACT_INIT: + alc_process_coef_fw(codec, gb_x570_coefs); + break; + } +} + static void alc1220_fixup_clevo_p950(struct hda_codec *codec, const struct hda_fixup *fix, int action) @@ -2427,6 +2500,10 @@ static const struct hda_fixup alc882_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = alc1220_fixup_gb_dual_codecs, }, + [ALC1220_FIXUP_GB_X570] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc1220_fixup_gb_x570, + }, [ALC1220_FIXUP_CLEVO_P950] = { .type = HDA_FIXUP_FUNC, .v.func = alc1220_fixup_clevo_p950, @@ -2529,8 +2606,9 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x13fe, 0x1009, "Advantech MIT-W101", ALC886_FIXUP_EAPD), SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE), SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), - SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_CLEVO_P950), - SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_GB_X570), + SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_GB_X570), + SND_PCI_QUIRK(0x1458, 0xa0d5, "Gigabyte X570S Aorus Master", ALC1220_FIXUP_GB_X570), SND_PCI_QUIRK(0x1462, 0x11f7, "MSI-GE63", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x1229, "MSI-GP73", ALC1220_FIXUP_CLEVO_P950), @@ -2605,6 +2683,7 @@ static const struct hda_model_fixup alc882_fixup_models[] = { {.id = ALC882_FIXUP_NO_PRIMARY_HP, .name = "no-primary-hp"}, {.id = ALC887_FIXUP_ASUS_BASS, .name = "asus-bass"}, {.id = ALC1220_FIXUP_GB_DUAL_CODECS, .name = "dual-codecs"}, + {.id = ALC1220_FIXUP_GB_X570, .name = "gb-x570"}, {.id = ALC1220_FIXUP_CLEVO_P950, .name = "clevo-p950"}, {} }; @@ -6729,6 +6808,8 @@ enum { ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE, ALC233_FIXUP_NO_AUDIO_JACK, ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME, + ALC285_FIXUP_LEGION_Y9000X_SPEAKERS, + ALC285_FIXUP_LEGION_Y9000X_AUTOMUTE, }; static const struct hda_fixup alc269_fixups[] = { @@ -8319,6 +8400,18 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF, }, + [ALC285_FIXUP_LEGION_Y9000X_SPEAKERS] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc285_fixup_ideapad_s740_coef, + .chained = true, + .chain_id = ALC285_FIXUP_LEGION_Y9000X_AUTOMUTE, + }, + [ALC285_FIXUP_LEGION_Y9000X_AUTOMUTE] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc287_fixup_legion_15imhg05_speakers, + .chained = true, + .chain_id = ALC269_FIXUP_THINKPAD_ACPI, + }, [ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS] = { .type = HDA_FIXUP_VERBS, //.v.verbs = legion_15imhg05_coefs, @@ -8708,6 +8801,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS), SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401), + SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2), SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC), @@ -8857,13 +8951,17 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x17aa, 0x31af, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340), + SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME), + SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", ALC287_FIXUP_13S_GEN2_SPEAKERS), + SND_PCI_QUIRK(0x17aa, 0x3824, "Legion Y9000X 2020", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF), + SND_PCI_QUIRK(0x17aa, 0x3834, "Lenovo IdeaPad Slim 9i 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), + SND_PCI_QUIRK(0x17aa, 0x383d, "Legion Y9000X 2019", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x3843, "Yoga 9i", ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP), - SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS), + SND_PCI_QUIRK(0x17aa, 0x384a, "Lenovo Yoga 7 15ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x3852, "Lenovo Yoga 7 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x3853, "Lenovo Yoga 7 15ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), - SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", ALC287_FIXUP_13S_GEN2_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI), diff --git a/sound/soc/codecs/cpcap.c b/sound/soc/codecs/cpcap.c index c0425e3707d9c2d91c56370f7e62d9ed74493ef1..a3597137fee3e40eda5d52d9bca9a31d9725a814 100644 --- a/sound/soc/codecs/cpcap.c +++ b/sound/soc/codecs/cpcap.c @@ -1544,6 +1544,8 @@ static int cpcap_codec_probe(struct platform_device *pdev) { struct device_node *codec_node = of_get_child_by_name(pdev->dev.parent->of_node, "audio-codec"); + if (!codec_node) + return -ENODEV; pdev->dev.of_node = codec_node; diff --git a/sound/soc/codecs/max9759.c b/sound/soc/codecs/max9759.c index 00e9d4fd1651fe570a49dc89a85b7ad37bd25126..0c261335c8a16cbb680738e41484b1a2a91545a9 100644 --- a/sound/soc/codecs/max9759.c +++ b/sound/soc/codecs/max9759.c @@ -64,7 +64,8 @@ static int speaker_gain_control_put(struct snd_kcontrol *kcontrol, struct snd_soc_component *c = snd_soc_kcontrol_component(kcontrol); struct max9759 *priv = snd_soc_component_get_drvdata(c); - if (ucontrol->value.integer.value[0] > 3) + if (ucontrol->value.integer.value[0] < 0 || + ucontrol->value.integer.value[0] > 3) return -EINVAL; priv->gain = ucontrol->value.integer.value[0]; diff --git a/sound/soc/codecs/rt5663.c b/sound/soc/codecs/rt5663.c index 619fb9a031e39a8a03e5134d7b8006e88292618a..db8a41aaa3859c15a5735852126c4383575fee92 100644 --- a/sound/soc/codecs/rt5663.c +++ b/sound/soc/codecs/rt5663.c @@ -3461,6 +3461,7 @@ static void rt5663_calibrate(struct rt5663_priv *rt5663) static int rt5663_parse_dp(struct rt5663_priv *rt5663, struct device *dev) { int table_size; + int ret; device_property_read_u32(dev, "realtek,dc_offset_l_manual", &rt5663->pdata.dc_offset_l_manual); @@ -3477,9 +3478,11 @@ static int rt5663_parse_dp(struct rt5663_priv *rt5663, struct device *dev) table_size = sizeof(struct impedance_mapping_table) * rt5663->pdata.impedance_sensing_num; rt5663->imp_table = devm_kzalloc(dev, table_size, GFP_KERNEL); - device_property_read_u32_array(dev, + ret = device_property_read_u32_array(dev, "realtek,impedance_sensing_table", (u32 *)rt5663->imp_table, table_size); + if (ret) + return ret; } return 0; @@ -3504,8 +3507,11 @@ static int rt5663_i2c_probe(struct i2c_client *i2c, if (pdata) rt5663->pdata = *pdata; - else - rt5663_parse_dp(rt5663, &i2c->dev); + else { + ret = rt5663_parse_dp(rt5663, &i2c->dev); + if (ret) + return ret; + } for (i = 0; i < ARRAY_SIZE(rt5663->supplies); i++) rt5663->supplies[i].supply = rt5663_supply_names[i]; diff --git a/sound/soc/codecs/tas2770.c b/sound/soc/codecs/tas2770.c index 61c3238bc265678c099e988a5bf216c66465d39b..315fd9d971c8c3661b0acfb5ca2c86e05a1ee678 100644 --- a/sound/soc/codecs/tas2770.c +++ b/sound/soc/codecs/tas2770.c @@ -38,10 +38,12 @@ static void tas2770_reset(struct tas2770_priv *tas2770) gpiod_set_value_cansleep(tas2770->reset_gpio, 0); msleep(20); gpiod_set_value_cansleep(tas2770->reset_gpio, 1); + usleep_range(1000, 2000); } snd_soc_component_write(tas2770->component, TAS2770_SW_RST, TAS2770_RST); + usleep_range(1000, 2000); } static int tas2770_set_bias_level(struct snd_soc_component *component, @@ -110,6 +112,7 @@ static int tas2770_codec_resume(struct snd_soc_component *component) if (tas2770->sdz_gpio) { gpiod_set_value_cansleep(tas2770->sdz_gpio, 1); + usleep_range(1000, 2000); } else { ret = snd_soc_component_update_bits(component, TAS2770_PWR_CTRL, TAS2770_PWR_CTRL_MASK, @@ -510,8 +513,10 @@ static int tas2770_codec_probe(struct snd_soc_component *component) tas2770->component = component; - if (tas2770->sdz_gpio) + if (tas2770->sdz_gpio) { gpiod_set_value_cansleep(tas2770->sdz_gpio, 1); + usleep_range(1000, 2000); + } tas2770_reset(tas2770); diff --git a/sound/soc/fsl/fsl_asrc.c b/sound/soc/fsl/fsl_asrc.c index 02c81d2e34ad00f5437a46d5f264679552507a24..5e3c71f025f4531faf9df246b538b8414029423d 100644 --- a/sound/soc/fsl/fsl_asrc.c +++ b/sound/soc/fsl/fsl_asrc.c @@ -19,6 +19,7 @@ #include "fsl_asrc.h" #define IDEAL_RATIO_DECIMAL_DEPTH 26 +#define DIVIDER_NUM 64 #define pair_err(fmt, ...) \ dev_err(&asrc->pdev->dev, "Pair %c: " fmt, 'A' + index, ##__VA_ARGS__) @@ -101,6 +102,55 @@ static unsigned char clk_map_imx8qxp[2][ASRC_CLK_MAP_LEN] = { }, }; +/* + * According to RM, the divider range is 1 ~ 8, + * prescaler is power of 2 from 1 ~ 128. + */ +static int asrc_clk_divider[DIVIDER_NUM] = { + 1, 2, 4, 8, 16, 32, 64, 128, /* divider = 1 */ + 2, 4, 8, 16, 32, 64, 128, 256, /* divider = 2 */ + 3, 6, 12, 24, 48, 96, 192, 384, /* divider = 3 */ + 4, 8, 16, 32, 64, 128, 256, 512, /* divider = 4 */ + 5, 10, 20, 40, 80, 160, 320, 640, /* divider = 5 */ + 6, 12, 24, 48, 96, 192, 384, 768, /* divider = 6 */ + 7, 14, 28, 56, 112, 224, 448, 896, /* divider = 7 */ + 8, 16, 32, 64, 128, 256, 512, 1024, /* divider = 8 */ +}; + +/* + * Check if the divider is available for internal ratio mode + */ +static bool fsl_asrc_divider_avail(int clk_rate, int rate, int *div) +{ + u32 rem, i; + u64 n; + + if (div) + *div = 0; + + if (clk_rate == 0 || rate == 0) + return false; + + n = clk_rate; + rem = do_div(n, rate); + + if (div) + *div = n; + + if (rem != 0) + return false; + + for (i = 0; i < DIVIDER_NUM; i++) { + if (n == asrc_clk_divider[i]) + break; + } + + if (i == DIVIDER_NUM) + return false; + + return true; +} + /** * fsl_asrc_sel_proc - Select the pre-processing and post-processing options * @inrate: input sample rate @@ -330,12 +380,12 @@ static int fsl_asrc_config_pair(struct fsl_asrc_pair *pair, bool use_ideal_rate) enum asrc_word_width input_word_width; enum asrc_word_width output_word_width; u32 inrate, outrate, indiv, outdiv; - u32 clk_index[2], div[2], rem[2]; + u32 clk_index[2], div[2]; u64 clk_rate; int in, out, channels; int pre_proc, post_proc; struct clk *clk; - bool ideal; + bool ideal, div_avail; if (!config) { pair_err("invalid pair config\n"); @@ -415,8 +465,7 @@ static int fsl_asrc_config_pair(struct fsl_asrc_pair *pair, bool use_ideal_rate) clk = asrc_priv->asrck_clk[clk_index[ideal ? OUT : IN]]; clk_rate = clk_get_rate(clk); - rem[IN] = do_div(clk_rate, inrate); - div[IN] = (u32)clk_rate; + div_avail = fsl_asrc_divider_avail(clk_rate, inrate, &div[IN]); /* * The divider range is [1, 1024], defined by the hardware. For non- @@ -425,7 +474,7 @@ static int fsl_asrc_config_pair(struct fsl_asrc_pair *pair, bool use_ideal_rate) * only result in different converting speeds. So remainder does not * matter, as long as we keep the divider within its valid range. */ - if (div[IN] == 0 || (!ideal && (div[IN] > 1024 || rem[IN] != 0))) { + if (div[IN] == 0 || (!ideal && !div_avail)) { pair_err("failed to support input sample rate %dHz by asrck_%x\n", inrate, clk_index[ideal ? OUT : IN]); return -EINVAL; @@ -436,13 +485,12 @@ static int fsl_asrc_config_pair(struct fsl_asrc_pair *pair, bool use_ideal_rate) clk = asrc_priv->asrck_clk[clk_index[OUT]]; clk_rate = clk_get_rate(clk); if (ideal && use_ideal_rate) - rem[OUT] = do_div(clk_rate, IDEAL_RATIO_RATE); + div_avail = fsl_asrc_divider_avail(clk_rate, IDEAL_RATIO_RATE, &div[OUT]); else - rem[OUT] = do_div(clk_rate, outrate); - div[OUT] = clk_rate; + div_avail = fsl_asrc_divider_avail(clk_rate, outrate, &div[OUT]); /* Output divider has the same limitation as the input one */ - if (div[OUT] == 0 || (!ideal && (div[OUT] > 1024 || rem[OUT] != 0))) { + if (div[OUT] == 0 || (!ideal && !div_avail)) { pair_err("failed to support output sample rate %dHz by asrck_%x\n", outrate, clk_index[OUT]); return -EINVAL; @@ -621,8 +669,7 @@ static void fsl_asrc_select_clk(struct fsl_asrc_priv *asrc_priv, clk_index = asrc_priv->clk_map[j][i]; clk_rate = clk_get_rate(asrc_priv->asrck_clk[clk_index]); /* Only match a perfect clock source with no remainder */ - if (clk_rate != 0 && (clk_rate / rate[j]) <= 1024 && - (clk_rate % rate[j]) == 0) + if (fsl_asrc_divider_avail(clk_rate, rate[j], NULL)) break; } diff --git a/sound/soc/fsl/fsl_mqs.c b/sound/soc/fsl/fsl_mqs.c index 69aeb0e71844d9f614e156f4e8a0cbcdb41d728f..0d4efbed41dab6cac2e413f332fa4d1dae55b7bf 100644 --- a/sound/soc/fsl/fsl_mqs.c +++ b/sound/soc/fsl/fsl_mqs.c @@ -337,4 +337,4 @@ module_platform_driver(fsl_mqs_driver); MODULE_AUTHOR("Shengjiu Wang "); MODULE_DESCRIPTION("MQS codec driver"); MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("platform: fsl-mqs"); +MODULE_ALIAS("platform:fsl-mqs"); diff --git a/sound/soc/fsl/pcm030-audio-fabric.c b/sound/soc/fsl/pcm030-audio-fabric.c index af3c3b90c0acab59c88d0050838edcc23a848c0b..83b4a22bf15ac0b8883f3144c55f34d5959f6012 100644 --- a/sound/soc/fsl/pcm030-audio-fabric.c +++ b/sound/soc/fsl/pcm030-audio-fabric.c @@ -93,16 +93,21 @@ static int pcm030_fabric_probe(struct platform_device *op) dev_err(&op->dev, "platform_device_alloc() failed\n"); ret = platform_device_add(pdata->codec_device); - if (ret) + if (ret) { dev_err(&op->dev, "platform_device_add() failed: %d\n", ret); + platform_device_put(pdata->codec_device); + } ret = snd_soc_register_card(card); - if (ret) + if (ret) { dev_err(&op->dev, "snd_soc_register_card() failed: %d\n", ret); + platform_device_del(pdata->codec_device); + platform_device_put(pdata->codec_device); + } platform_set_drvdata(op, pdata); - return ret; + } static int pcm030_fabric_remove(struct platform_device *op) diff --git a/sound/soc/intel/catpt/dsp.c b/sound/soc/intel/catpt/dsp.c index 9e807b94173219c687758db9b5510bd9d423beab..38a92bbc1ed5682c72db8825ee34101560348fa3 100644 --- a/sound/soc/intel/catpt/dsp.c +++ b/sound/soc/intel/catpt/dsp.c @@ -65,6 +65,7 @@ static int catpt_dma_memcpy(struct catpt_dev *cdev, struct dma_chan *chan, { struct dma_async_tx_descriptor *desc; enum dma_status status; + int ret; desc = dmaengine_prep_dma_memcpy(chan, dst_addr, src_addr, size, DMA_CTRL_ACK); @@ -77,13 +78,22 @@ static int catpt_dma_memcpy(struct catpt_dev *cdev, struct dma_chan *chan, catpt_updatel_shim(cdev, HMDC, CATPT_HMDC_HDDA(CATPT_DMA_DEVID, chan->chan_id), CATPT_HMDC_HDDA(CATPT_DMA_DEVID, chan->chan_id)); - dmaengine_submit(desc); + + ret = dma_submit_error(dmaengine_submit(desc)); + if (ret) { + dev_err(cdev->dev, "submit tx failed: %d\n", ret); + goto clear_hdda; + } + status = dma_wait_for_async_tx(desc); + ret = (status == DMA_COMPLETE) ? 0 : -EPROTO; + +clear_hdda: /* regardless of status, disable access to HOST memory in demand mode */ catpt_updatel_shim(cdev, HMDC, CATPT_HMDC_HDDA(CATPT_DMA_DEVID, chan->chan_id), 0); - return (status == DMA_COMPLETE) ? 0 : -EPROTO; + return ret; } int catpt_dma_memcpy_todsp(struct catpt_dev *cdev, struct dma_chan *chan, diff --git a/sound/soc/mediatek/mt8173/mt8173-max98090.c b/sound/soc/mediatek/mt8173/mt8173-max98090.c index fc94314bfc02ffd40dd9863fe29dc80419f5e04b..3bdd4931316cd0e624e327e2372d609eb738af68 100644 --- a/sound/soc/mediatek/mt8173/mt8173-max98090.c +++ b/sound/soc/mediatek/mt8173/mt8173-max98090.c @@ -180,6 +180,9 @@ static int mt8173_max98090_dev_probe(struct platform_device *pdev) if (ret) dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n", __func__, ret); + + of_node_put(codec_node); + of_node_put(platform_node); return ret; } diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c index 0f28dc2217c091e174f8fbe132d51dac25c03ba7..390da5bf727ebefedec2c9d92c9b5fa1f963e733 100644 --- a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c +++ b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c @@ -218,6 +218,8 @@ static int mt8173_rt5650_rt5514_dev_probe(struct platform_device *pdev) if (ret) dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n", __func__, ret); + + of_node_put(platform_node); return ret; } diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c index 077c6ee0678067c3ac2a2a6836a4b40625c9f5e4..c8e4e85e105752ac8db257a3d768181c10178103 100644 --- a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c +++ b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c @@ -285,6 +285,8 @@ static int mt8173_rt5650_rt5676_dev_probe(struct platform_device *pdev) if (ret) dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n", __func__, ret); + + of_node_put(platform_node); return ret; } diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650.c b/sound/soc/mediatek/mt8173/mt8173-rt5650.c index c28ebf891cb05f77b50e88869dc75424edaad51e..e168d31f44459cf75cf242c81bf847a698d2b4fc 100644 --- a/sound/soc/mediatek/mt8173/mt8173-rt5650.c +++ b/sound/soc/mediatek/mt8173/mt8173-rt5650.c @@ -323,6 +323,8 @@ static int mt8173_rt5650_dev_probe(struct platform_device *pdev) if (ret) dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n", __func__, ret); + + of_node_put(platform_node); return ret; } diff --git a/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c b/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c index 20d31b69a5c00bf74354ea60aff898377fb3942d..9cc0f26b08fbcb296d0c657bb006ef241bf6afc5 100644 --- a/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c +++ b/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c @@ -787,7 +787,11 @@ static int mt8183_da7219_max98357_dev_probe(struct platform_device *pdev) return ret; } - return devm_snd_soc_register_card(&pdev->dev, card); + ret = devm_snd_soc_register_card(&pdev->dev, card); + + of_node_put(platform_node); + of_node_put(hdmi_codec); + return ret; } #ifdef CONFIG_OF diff --git a/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c b/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c index 79ba2f2d845223b4603f3ca3862f8ba41ed1dc88..14ce8b93597f3521fe7071aadf3f78a2aaef97ad 100644 --- a/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c +++ b/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c @@ -720,7 +720,12 @@ mt8183_mt6358_ts3a227_max98357_dev_probe(struct platform_device *pdev) __func__, ret); } - return devm_snd_soc_register_card(&pdev->dev, card); + ret = devm_snd_soc_register_card(&pdev->dev, card); + + of_node_put(platform_node); + of_node_put(ec_codec); + of_node_put(hdmi_codec); + return ret; } #ifdef CONFIG_OF diff --git a/sound/soc/samsung/idma.c b/sound/soc/samsung/idma.c index 66bcc2f97544bfecf0f905c363a85c37fe3a34a1..c3f1b054e2389c5e233258541d614d759c20a795 100644 --- a/sound/soc/samsung/idma.c +++ b/sound/soc/samsung/idma.c @@ -360,6 +360,8 @@ static int preallocate_idma_buffer(struct snd_pcm *pcm, int stream) buf->addr = idma.lp_tx_addr; buf->bytes = idma_hardware.buffer_bytes_max; buf->area = (unsigned char * __force)ioremap(buf->addr, buf->bytes); + if (!buf->area) + return -ENOMEM; return 0; } diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c index 10f48827bb0e0fee2a6a7d7565d97114f4f06963..caa8d45ebb209231e724ff35adc783a261b7be1e 100644 --- a/sound/soc/soc-ops.c +++ b/sound/soc/soc-ops.c @@ -308,7 +308,7 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol, unsigned int sign_bit = mc->sign_bit; unsigned int mask = (1 << fls(max)) - 1; unsigned int invert = mc->invert; - int err; + int err, ret; bool type_2r = false; unsigned int val2 = 0; unsigned int val, val_mask; @@ -316,13 +316,27 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol, if (sign_bit) mask = BIT(sign_bit + 1) - 1; - val = ((ucontrol->value.integer.value[0] + min) & mask); + val = ucontrol->value.integer.value[0]; + if (mc->platform_max && val > mc->platform_max) + return -EINVAL; + if (val > max - min) + return -EINVAL; + if (val < 0) + return -EINVAL; + val = (val + min) & mask; if (invert) val = max - val; val_mask = mask << shift; val = val << shift; if (snd_soc_volsw_is_stereo(mc)) { - val2 = ((ucontrol->value.integer.value[1] + min) & mask); + val2 = ucontrol->value.integer.value[1]; + if (mc->platform_max && val2 > mc->platform_max) + return -EINVAL; + if (val2 > max - min) + return -EINVAL; + if (val2 < 0) + return -EINVAL; + val2 = (val2 + min) & mask; if (invert) val2 = max - val2; if (reg == reg2) { @@ -336,12 +350,18 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol, err = snd_soc_component_update_bits(component, reg, val_mask, val); if (err < 0) return err; + ret = err; - if (type_2r) + if (type_2r) { err = snd_soc_component_update_bits(component, reg2, val_mask, - val2); + val2); + /* Don't discard any error code or drop change flag */ + if (ret == 0 || err < 0) { + ret = err; + } + } - return err; + return ret; } EXPORT_SYMBOL_GPL(snd_soc_put_volsw); @@ -409,8 +429,15 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol, int err = 0; unsigned int val, val_mask, val2 = 0; + val = ucontrol->value.integer.value[0]; + if (mc->platform_max && val > mc->platform_max) + return -EINVAL; + if (val > max - min) + return -EINVAL; + if (val < 0) + return -EINVAL; val_mask = mask << shift; - val = (ucontrol->value.integer.value[0] + min) & mask; + val = (val + min) & mask; val = val << shift; err = snd_soc_component_update_bits(component, reg, val_mask, val); @@ -483,7 +510,7 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol, unsigned int mask = (1 << fls(max)) - 1; unsigned int invert = mc->invert; unsigned int val, val_mask; - int ret; + int err, ret; if (invert) val = (max - ucontrol->value.integer.value[0]) & mask; @@ -492,9 +519,10 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol, val_mask = mask << shift; val = val << shift; - ret = snd_soc_component_update_bits(component, reg, val_mask, val); - if (ret < 0) - return ret; + err = snd_soc_component_update_bits(component, reg, val_mask, val); + if (err < 0) + return err; + ret = err; if (snd_soc_volsw_is_stereo(mc)) { if (invert) @@ -504,8 +532,12 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol, val_mask = mask << shift; val = val << shift; - ret = snd_soc_component_update_bits(component, rreg, val_mask, + err = snd_soc_component_update_bits(component, rreg, val_mask, val); + /* Don't discard any error code or drop change flag */ + if (ret == 0 || err < 0) { + ret = err; + } } return ret; @@ -859,6 +891,8 @@ int snd_soc_put_xr_sx(struct snd_kcontrol *kcontrol, unsigned int i, regval, regmask; int err; + if (val < mc->min || val > mc->max) + return -EINVAL; if (invert) val = max - val; val &= mask; diff --git a/sound/soc/uniphier/Kconfig b/sound/soc/uniphier/Kconfig index aa3592ee1358b06466dd9a05017142681b9669cc..ddfa6424c656bde0df43e24f80d7739c6fb6c94e 100644 --- a/sound/soc/uniphier/Kconfig +++ b/sound/soc/uniphier/Kconfig @@ -23,7 +23,6 @@ config SND_SOC_UNIPHIER_LD11 tristate "UniPhier LD11/LD20 Device Driver" depends on SND_SOC_UNIPHIER select SND_SOC_UNIPHIER_AIO - select SND_SOC_UNIPHIER_AIO_DMA help This adds ASoC driver for Socionext UniPhier LD11/LD20 input and output that can be used with other codecs. @@ -34,7 +33,6 @@ config SND_SOC_UNIPHIER_PXS2 tristate "UniPhier PXs2 Device Driver" depends on SND_SOC_UNIPHIER select SND_SOC_UNIPHIER_AIO - select SND_SOC_UNIPHIER_AIO_DMA help This adds ASoC driver for Socionext UniPhier PXs2 input and output that can be used with other codecs. diff --git a/sound/soc/xilinx/xlnx_formatter_pcm.c b/sound/soc/xilinx/xlnx_formatter_pcm.c index 91afea9d5de6787906ac5e817ba3406488fa3f58..ce19a6058b27964dfcd45336770b2e7cdd417cec 100644 --- a/sound/soc/xilinx/xlnx_formatter_pcm.c +++ b/sound/soc/xilinx/xlnx_formatter_pcm.c @@ -37,6 +37,7 @@ #define XLNX_AUD_XFER_COUNT 0x28 #define XLNX_AUD_CH_STS_START 0x2C #define XLNX_BYTES_PER_CH 0x44 +#define XLNX_AUD_ALIGN_BYTES 64 #define AUD_STS_IOC_IRQ_MASK BIT(31) #define AUD_STS_CH_STS_MASK BIT(29) @@ -368,12 +369,32 @@ static int xlnx_formatter_pcm_open(struct snd_soc_component *component, snd_soc_set_runtime_hwparams(substream, &xlnx_pcm_hardware); runtime->private_data = stream_data; - /* Resize the period size divisible by 64 */ + /* Resize the period bytes as divisible by 64 */ err = snd_pcm_hw_constraint_step(runtime, 0, - SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 64); + SNDRV_PCM_HW_PARAM_PERIOD_BYTES, + XLNX_AUD_ALIGN_BYTES); if (err) { dev_err(component->dev, - "unable to set constraint on period bytes\n"); + "Unable to set constraint on period bytes\n"); + return err; + } + + /* Resize the buffer bytes as divisible by 64 */ + err = snd_pcm_hw_constraint_step(runtime, 0, + SNDRV_PCM_HW_PARAM_BUFFER_BYTES, + XLNX_AUD_ALIGN_BYTES); + if (err) { + dev_err(component->dev, + "Unable to set constraint on buffer bytes\n"); + return err; + } + + /* Set periods as integer multiple */ + err = snd_pcm_hw_constraint_integer(runtime, + SNDRV_PCM_HW_PARAM_PERIODS); + if (err < 0) { + dev_err(component->dev, + "Unable to set constraint on periods to be integer\n"); return err; } diff --git a/sound/usb/format.c b/sound/usb/format.c index 4693384db069509a994c1b16909198ffd9109b74..e8a63ea2189d1728c20b7439ddbdbc9506b06108 100644 --- a/sound/usb/format.c +++ b/sound/usb/format.c @@ -365,7 +365,7 @@ static int parse_uac2_sample_rate_range(struct snd_usb_audio *chip, for (rate = min; rate <= max; rate += res) { /* Filter out invalid rates on Presonus Studio 1810c */ - if (chip->usb_id == USB_ID(0x0194f, 0x010c) && + if (chip->usb_id == USB_ID(0x194f, 0x010c) && !s1810c_valid_sample_rate(fp, rate)) goto skip_rate; diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index 8297117f4766e79b0d84431dad3aef2582efe35f..86fdd669f3fd76694234ddfd2c9b36dde2acc744 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c @@ -3033,7 +3033,7 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer) err = snd_rme_controls_create(mixer); break; - case USB_ID(0x0194f, 0x010c): /* Presonus Studio 1810c */ + case USB_ID(0x194f, 0x010c): /* Presonus Studio 1810c */ err = snd_sc1810_init_mixer(mixer); break; case USB_ID(0x2a39, 0x3fb0): /* RME Babyface Pro FS */ diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index af8814f2830ae7436a9871219838bb792b5f2559..f45a570be4ab1bdcee785ca913456f12ee6edd56 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -93,7 +93,7 @@ * combination. */ { - USB_DEVICE(0x041e, 0x4095), + USB_AUDIO_DEVICE(0x041e, 0x4095), .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { .ifnum = QUIRK_ANY_INTERFACE, .type = QUIRK_COMPOSITE, diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 2337707ddafb99e8d6ca12bfefd7af8287afdb5c..e5cb6daf2747aa1b148825ecbc6a62ae95542cdc 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -1315,7 +1315,7 @@ int snd_usb_apply_interface_quirk(struct snd_usb_audio *chip, if (chip->usb_id == USB_ID(0x0763, 0x2012)) return fasttrackpro_skip_setting_quirk(chip, iface, altno); /* presonus studio 1810c: skip altsets incompatible with device_setup */ - if (chip->usb_id == USB_ID(0x0194f, 0x010c)) + if (chip->usb_id == USB_ID(0x194f, 0x010c)) return s1810c_skip_setting_quirk(chip, iface, altno); diff --git a/tools/bpf/bpftool/Documentation/Makefile b/tools/bpf/bpftool/Documentation/Makefile index f33cb02de95cf1da1e9a783208d65314ad523ac7..3601b1d1974caac3373459398baf1283f34f5d0a 100644 --- a/tools/bpf/bpftool/Documentation/Makefile +++ b/tools/bpf/bpftool/Documentation/Makefile @@ -1,6 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only include ../../../scripts/Makefile.include -include ../../../scripts/utilities.mak INSTALL ?= install RM ?= rm -f diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile index f60e6ad3a1dffaffe040655315ad735df6adeea2..1896ef69b4492b666a8d79adc9a04d506b78f2bb 100644 --- a/tools/bpf/bpftool/Makefile +++ b/tools/bpf/bpftool/Makefile @@ -1,6 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only include ../../scripts/Makefile.include -include ../../scripts/utilities.mak ifeq ($(srctree),) srctree := $(patsubst %/,%,$(dir $(CURDIR))) diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c index c58a135dc355e68e82de8c00363acf3c95f3757f..1854d6b978604013b2decac6b983d7468dfe345c 100644 --- a/tools/bpf/bpftool/main.c +++ b/tools/bpf/bpftool/main.c @@ -396,6 +396,8 @@ int main(int argc, char **argv) }; int opt, ret; + setlinebuf(stdout); + last_do_help = do_help; pretty_output = false; json_output = false; diff --git a/tools/bpf/resolve_btfids/Makefile b/tools/bpf/resolve_btfids/Makefile index bb9fa8de7e625a559f8edfa58272f082ec32c58a..af9f9d3534c96540feed44bafae1dcde25fb44cb 100644 --- a/tools/bpf/resolve_btfids/Makefile +++ b/tools/bpf/resolve_btfids/Makefile @@ -9,7 +9,11 @@ ifeq ($(V),1) msg = else Q = @ - msg = @printf ' %-8s %s%s\n' "$(1)" "$(notdir $(2))" "$(if $(3), $(3))"; + ifeq ($(silent),1) + msg = + else + msg = @printf ' %-8s %s%s\n' "$(1)" "$(notdir $(2))" "$(if $(3), $(3))"; + endif MAKEFLAGS=--no-print-directory endif diff --git a/tools/include/nolibc/nolibc.h b/tools/include/nolibc/nolibc.h index 2551e9b71167b3526bcf692bb29cc00658f502ea..b8cecb66d28b7f236a1afc3436f8ce315c27500b 100644 --- a/tools/include/nolibc/nolibc.h +++ b/tools/include/nolibc/nolibc.h @@ -422,16 +422,22 @@ struct stat { }) /* startup code */ +/* + * x86-64 System V ABI mandates: + * 1) %rsp must be 16-byte aligned right before the function call. + * 2) The deepest stack frame should be zero (the %rbp). + * + */ asm(".section .text\n" ".global _start\n" "_start:\n" "pop %rdi\n" // argc (first arg, %rdi) "mov %rsp, %rsi\n" // argv[] (second arg, %rsi) "lea 8(%rsi,%rdi,8),%rdx\n" // then a NULL then envp (third arg, %rdx) - "and $-16, %rsp\n" // x86 ABI : esp must be 16-byte aligned when - "sub $8, %rsp\n" // entering the callee + "xor %ebp, %ebp\n" // zero the stack frame + "and $-16, %rsp\n" // x86 ABI : esp must be 16-byte aligned before call "call main\n" // main() returns the status code, we'll exit with it. - "movzb %al, %rdi\n" // retrieve exit code from 8 lower bits + "mov %eax, %edi\n" // retrieve exit code (32 bit) "mov $60, %rax\n" // NR_exit == 60 "syscall\n" // really exit "hlt\n" // ensure it does not return @@ -600,20 +606,28 @@ struct sys_stat_struct { }) /* startup code */ +/* + * i386 System V ABI mandates: + * 1) last pushed argument must be 16-byte aligned. + * 2) The deepest stack frame should be set to zero + * + */ asm(".section .text\n" ".global _start\n" "_start:\n" "pop %eax\n" // argc (first arg, %eax) "mov %esp, %ebx\n" // argv[] (second arg, %ebx) "lea 4(%ebx,%eax,4),%ecx\n" // then a NULL then envp (third arg, %ecx) - "and $-16, %esp\n" // x86 ABI : esp must be 16-byte aligned when + "xor %ebp, %ebp\n" // zero the stack frame + "and $-16, %esp\n" // x86 ABI : esp must be 16-byte aligned before + "sub $4, %esp\n" // the call instruction (args are aligned) "push %ecx\n" // push all registers on the stack so that we "push %ebx\n" // support both regparm and plain stack modes "push %eax\n" "call main\n" // main() returns the status code in %eax - "movzbl %al, %ebx\n" // retrieve exit code from lower 8 bits - "movl $1, %eax\n" // NR_exit == 1 - "int $0x80\n" // exit now + "mov %eax, %ebx\n" // retrieve exit code (32-bit int) + "movl $1, %eax\n" // NR_exit == 1 + "int $0x80\n" // exit now "hlt\n" // ensure it does not ""); @@ -797,7 +811,6 @@ asm(".section .text\n" "and %r3, %r1, $-8\n" // AAPCS : sp must be 8-byte aligned in the "mov %sp, %r3\n" // callee, an bl doesn't push (lr=pc) "bl main\n" // main() returns the status code, we'll exit with it. - "and %r0, %r0, $0xff\n" // limit exit code to 8 bits "movs r7, $1\n" // NR_exit == 1 "svc $0x00\n" ""); @@ -994,7 +1007,6 @@ asm(".section .text\n" "add x2, x2, x1\n" // + argv "and sp, x1, -16\n" // sp must be 16-byte aligned in the callee "bl main\n" // main() returns the status code, we'll exit with it. - "and x0, x0, 0xff\n" // limit exit code to 8 bits "mov x8, 93\n" // NR_exit == 93 "svc #0\n" ""); @@ -1199,7 +1211,7 @@ asm(".section .text\n" "addiu $sp,$sp,-16\n" // the callee expects to save a0..a3 there! "jal main\n" // main() returns the status code, we'll exit with it. "nop\n" // delayed slot - "and $a0, $v0, 0xff\n" // limit exit code to 8 bits + "move $a0, $v0\n" // retrieve 32-bit exit code from v0 "li $v0, 4001\n" // NR_exit == 4001 "syscall\n" ".end __start\n" @@ -1397,7 +1409,6 @@ asm(".section .text\n" "add a2,a2,a1\n" // + argv "andi sp,a1,-16\n" // sp must be 16-byte aligned "call main\n" // main() returns the status code, we'll exit with it. - "andi a0, a0, 0xff\n" // limit exit code to 8 bits "li a7, 93\n" // NR_exit == 93 "ecall\n" ""); diff --git a/tools/lib/subcmd/subcmd-util.h b/tools/lib/subcmd/subcmd-util.h index 794a375dad3601e26f870edfa75b3458f1341aa8..b2aec04fce8f674e61eddf6e4820b738f4d4c7e3 100644 --- a/tools/lib/subcmd/subcmd-util.h +++ b/tools/lib/subcmd/subcmd-util.h @@ -50,15 +50,8 @@ static NORETURN inline void die(const char *err, ...) static inline void *xrealloc(void *ptr, size_t size) { void *ret = realloc(ptr, size); - if (!ret && !size) - ret = realloc(ptr, 1); - if (!ret) { - ret = realloc(ptr, size); - if (!ret && !size) - ret = realloc(ptr, 1); - if (!ret) - die("Out of memory, realloc failed"); - } + if (!ret) + die("Out of memory, realloc failed"); return ret; } diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c index 0374adcb223c75c418b7b9bb51e0c7215e012160..ac99c0764bee83a87a3f25f8367694bdb0bd3be0 100644 --- a/tools/perf/util/bpf-loader.c +++ b/tools/perf/util/bpf-loader.c @@ -1215,9 +1215,10 @@ bpf__obj_config_map(struct bpf_object *obj, pr_debug("ERROR: Invalid map config option '%s'\n", map_opt); err = -BPF_LOADER_ERRNO__OBJCONF_MAP_OPT; out: - free(map_name); if (!err) *key_scan_pos += strlen(map_opt); + + free(map_name); return err; } diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c index bcb494dc816a0b47aa1884ba82d7a85d2e176b19..48754083791d8d35594a942809b189a727997689 100644 --- a/tools/perf/util/data.c +++ b/tools/perf/util/data.c @@ -44,10 +44,6 @@ int perf_data__create_dir(struct perf_data *data, int nr) if (!files) return -ENOMEM; - data->dir.version = PERF_DIR_VERSION; - data->dir.files = files; - data->dir.nr = nr; - for (i = 0; i < nr; i++) { struct perf_data_file *file = &files[i]; @@ -62,6 +58,9 @@ int perf_data__create_dir(struct perf_data *data, int nr) file->fd = ret; } + data->dir.version = PERF_DIR_VERSION; + data->dir.files = files; + data->dir.nr = nr; return 0; out_err: diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c index 5cda5565777a0bfca8f319df7f5a7a79fb0dc4cf..0af163abaa62b87f3d5a4fb841f64e8f84be394d 100644 --- a/tools/perf/util/debug.c +++ b/tools/perf/util/debug.c @@ -145,7 +145,7 @@ static int trace_event_printer(enum binary_printer_ops op, break; case BINARY_PRINT_CHAR_DATA: printed += color_fprintf(fp, color, "%c", - isprint(ch) ? ch : '.'); + isprint(ch) && isascii(ch) ? ch : '.'); break; case BINARY_PRINT_CHAR_PAD: printed += color_fprintf(fp, color, " "); diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 1cad6051d8b0892c3f04213d93b0f871bd5bea20..1a1cbd16d76d475b3ef06c2455faab6a13a09fb3 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1014,6 +1014,17 @@ struct evsel_config_term *__evsel__get_config_term(struct evsel *evsel, enum evs return found_term; } +static void evsel__set_default_freq_period(struct record_opts *opts, + struct perf_event_attr *attr) +{ + if (opts->freq) { + attr->freq = 1; + attr->sample_freq = opts->freq; + } else { + attr->sample_period = opts->default_interval; + } +} + /* * The enable_on_exec/disabled value strategy: * @@ -1080,14 +1091,12 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts, * We default some events to have a default interval. But keep * it a weak assumption overridable by the user. */ - if (!attr->sample_period) { - if (opts->freq) { - attr->freq = 1; - attr->sample_freq = opts->freq; - } else { - attr->sample_period = opts->default_interval; - } - } + if ((evsel->is_libpfm_event && !attr->sample_period) || + (!evsel->is_libpfm_event && (!attr->sample_period || + opts->user_freq != UINT_MAX || + opts->user_interval != ULLONG_MAX))) + evsel__set_default_freq_period(opts, attr); + /* * If attr->freq was set (here or earlier), ask for period * to be sampled. diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 07db6cfad65b988e3c77642bba4d05c8cff3b268..d103084fcd56cbf5db6df63b305a716a12aea3dc 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -3035,6 +3035,9 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev, for (j = 0; j < num_matched_functions; j++) { sym = syms[j]; + if (sym->type != STT_FUNC) + continue; + /* There can be duplicated symbols in the map */ for (i = 0; i < j; i++) if (sym->start == syms[i]->start) { diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c index a963b5b8eb72409152596d6b33f0d961aeb1aa40..96fe9c1af33644b71393dffb16d1c5c86a0a5342 100644 --- a/tools/perf/util/stat-display.c +++ b/tools/perf/util/stat-display.c @@ -555,15 +555,16 @@ static void collect_all_aliases(struct perf_stat_config *config, struct evsel *c alias = list_prepare_entry(counter, &(evlist->core.entries), core.node); list_for_each_entry_continue (alias, &evlist->core.entries, core.node) { - if (strcmp(evsel__name(alias), evsel__name(counter)) || - alias->scale != counter->scale || - alias->cgrp != counter->cgrp || - strcmp(alias->unit, counter->unit) || - evsel__is_clock(alias) != evsel__is_clock(counter) || - !strcmp(alias->pmu_name, counter->pmu_name)) - break; - alias->merged_stat = true; - cb(config, alias, data, false); + /* Merge events with the same name, etc. but on different PMUs. */ + if (!strcmp(evsel__name(alias), evsel__name(counter)) && + alias->scale == counter->scale && + alias->cgrp == counter->cgrp && + !strcmp(alias->unit, counter->unit) && + evsel__is_clock(alias) == evsel__is_clock(counter) && + strcmp(alias->pmu_name, counter->pmu_name)) { + alias->merged_stat = true; + cb(config, alias, data, false); + } } } diff --git a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c index fafeddaad6a998e33ea99d95be5440fd6ed32558..23915be6172d63bda9e969c6b9b6f079f50f8d25 100644 --- a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c +++ b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c @@ -105,4 +105,6 @@ void test_skb_ctx(void) "ctx_out_mark", "skb->mark == %u, expected %d\n", skb.mark, 10); + + bpf_object__close(obj); } diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h index 1858435de7aaf91a3f2ef12e1c8a74e9ee934c4e..5cb90ca29218643a08fe5edd832f0675dfaee38e 100644 --- a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h +++ b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h @@ -235,7 +235,7 @@ SEC("sk_msg1") int bpf_prog4(struct sk_msg_md *msg) { int *bytes, zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5; - int *start, *end, *start_push, *end_push, *start_pop, *pop; + int *start, *end, *start_push, *end_push, *start_pop, *pop, err = 0; bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero); if (bytes) @@ -249,8 +249,11 @@ int bpf_prog4(struct sk_msg_md *msg) bpf_msg_pull_data(msg, *start, *end, 0); start_push = bpf_map_lookup_elem(&sock_bytes, &two); end_push = bpf_map_lookup_elem(&sock_bytes, &three); - if (start_push && end_push) - bpf_msg_push_data(msg, *start_push, *end_push, 0); + if (start_push && end_push) { + err = bpf_msg_push_data(msg, *start_push, *end_push, 0); + if (err) + return SK_DROP; + } start_pop = bpf_map_lookup_elem(&sock_bytes, &four); pop = bpf_map_lookup_elem(&sock_bytes, &five); if (start_pop && pop) @@ -263,6 +266,7 @@ int bpf_prog6(struct sk_msg_md *msg) { int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5, key = 0; int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop, *f; + int err = 0; __u64 flags = 0; bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero); @@ -279,8 +283,11 @@ int bpf_prog6(struct sk_msg_md *msg) start_push = bpf_map_lookup_elem(&sock_bytes, &two); end_push = bpf_map_lookup_elem(&sock_bytes, &three); - if (start_push && end_push) - bpf_msg_push_data(msg, *start_push, *end_push, 0); + if (start_push && end_push) { + err = bpf_msg_push_data(msg, *start_push, *end_push, 0); + if (err) + return SK_DROP; + } start_pop = bpf_map_lookup_elem(&sock_bytes, &four); pop = bpf_map_lookup_elem(&sock_bytes, &five); @@ -338,7 +345,7 @@ SEC("sk_msg5") int bpf_prog10(struct sk_msg_md *msg) { int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop; - int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5; + int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5, err = 0; bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero); if (bytes) @@ -352,8 +359,11 @@ int bpf_prog10(struct sk_msg_md *msg) bpf_msg_pull_data(msg, *start, *end, 0); start_push = bpf_map_lookup_elem(&sock_bytes, &two); end_push = bpf_map_lookup_elem(&sock_bytes, &three); - if (start_push && end_push) - bpf_msg_push_data(msg, *start_push, *end_push, 0); + if (start_push && end_push) { + err = bpf_msg_push_data(msg, *start_push, *end_push, 0); + if (err) + return SK_PASS; + } start_pop = bpf_map_lookup_elem(&sock_bytes, &four); pop = bpf_map_lookup_elem(&sock_bytes, &five); if (start_pop && pop) diff --git a/tools/testing/selftests/clone3/clone3.c b/tools/testing/selftests/clone3/clone3.c index 42be3b9258301e87823edfe7284142fca5d49158..cd4582129c7d69c1e45b6a3acbd7a5f2c6c07e67 100644 --- a/tools/testing/selftests/clone3/clone3.c +++ b/tools/testing/selftests/clone3/clone3.c @@ -52,6 +52,12 @@ static int call_clone3(uint64_t flags, size_t size, enum test_mode test_mode) size = sizeof(struct __clone_args); switch (test_mode) { + case CLONE3_ARGS_NO_TEST: + /* + * Uses default 'flags' and 'SIGCHLD' + * assignment. + */ + break; case CLONE3_ARGS_ALL_0: args.flags = 0; args.exit_signal = 0; @@ -120,8 +126,6 @@ static void test_clone3(uint64_t flags, size_t size, int expected, int main(int argc, char *argv[]) { - pid_t pid; - uid_t uid = getuid(); ksft_print_header(); diff --git a/tools/testing/selftests/exec/Makefile b/tools/testing/selftests/exec/Makefile index dd61118df66edbcde89f92a2626d2e9186122fda..2d7fca446c7f7b418f1be5580ce42f1c205978f2 100644 --- a/tools/testing/selftests/exec/Makefile +++ b/tools/testing/selftests/exec/Makefile @@ -3,9 +3,9 @@ CFLAGS = -Wall CFLAGS += -Wno-nonnull CFLAGS += -D_GNU_SOURCE -TEST_PROGS := binfmt_script non-regular -TEST_GEN_PROGS := execveat load_address_4096 load_address_2097152 load_address_16777216 -TEST_GEN_FILES := execveat.symlink execveat.denatured script subdir pipe +TEST_PROGS := binfmt_script +TEST_GEN_PROGS := execveat load_address_4096 load_address_2097152 load_address_16777216 non-regular +TEST_GEN_FILES := execveat.symlink execveat.denatured script subdir # Makefile is a run-time dependency, since it's accessed by the execveat test TEST_FILES := Makefile diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/profile.tc b/tools/testing/selftests/ftrace/test.d/kprobe/profile.tc index 98166fa3eb91ccbbef854d9ad8c5c5a605b594f9..34fb89b0c61fa35bcc55c81ee2cbdd8f274af492 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/profile.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/profile.tc @@ -1,6 +1,6 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 -# description: Kprobe dynamic event - adding and removing +# description: Kprobe profile # requires: kprobe_events ! grep -q 'myevent' kprobe_profile diff --git a/tools/testing/selftests/futex/Makefile b/tools/testing/selftests/futex/Makefile index 12631f0076a10274dff566228b05748e9a217be0..11e157d7533b8fbaf5ac691b3df7057e0ff558c1 100644 --- a/tools/testing/selftests/futex/Makefile +++ b/tools/testing/selftests/futex/Makefile @@ -11,7 +11,7 @@ all: @for DIR in $(SUBDIRS); do \ BUILD_TARGET=$(OUTPUT)/$$DIR; \ mkdir $$BUILD_TARGET -p; \ - make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\ + $(MAKE) OUTPUT=$$BUILD_TARGET -C $$DIR $@;\ if [ -e $$DIR/$(TEST_PROGS) ]; then \ rsync -a $$DIR/$(TEST_PROGS) $$BUILD_TARGET/; \ fi \ @@ -32,6 +32,6 @@ override define CLEAN @for DIR in $(SUBDIRS); do \ BUILD_TARGET=$(OUTPUT)/$$DIR; \ mkdir $$BUILD_TARGET -p; \ - make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\ + $(MAKE) OUTPUT=$$BUILD_TARGET -C $$DIR $@;\ done endef diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h index edce85420d1934e3892f5ffe1d5c6d06c0df593f..3e7b2e521cde435333b0c1e5fafbc77c0f2f7dc1 100644 --- a/tools/testing/selftests/kselftest_harness.h +++ b/tools/testing/selftests/kselftest_harness.h @@ -871,7 +871,8 @@ static void __timeout_handler(int sig, siginfo_t *info, void *ucontext) } t->timed_out = true; - kill(t->pid, SIGKILL); + // signal process group + kill(-(t->pid), SIGKILL); } void __wait_for_test(struct __test_metadata *t) @@ -965,7 +966,7 @@ void __run_test(struct __fixture_metadata *f, t->passed = 1; t->skip = 0; t->trigger = 0; - t->step = 0; + t->step = 1; t->no_print = 0; memset(t->results->reason, 0, sizeof(t->results->reason)); @@ -981,6 +982,7 @@ void __run_test(struct __fixture_metadata *f, ksft_print_msg("ERROR SPAWNING TEST CHILD\n"); t->passed = 0; } else if (t->pid == 0) { + setpgrp(); t->fn(t, variant); if (t->skip) _exit(255); diff --git a/tools/testing/selftests/mincore/mincore_selftest.c b/tools/testing/selftests/mincore/mincore_selftest.c index 5a1e85ff5d32a4a54fe5578e655692c15750f454..2cf6f2f277ab8a22951e57f906a4880f5f236480 100644 --- a/tools/testing/selftests/mincore/mincore_selftest.c +++ b/tools/testing/selftests/mincore/mincore_selftest.c @@ -208,15 +208,21 @@ TEST(check_file_mmap) errno = 0; fd = open(".", O_TMPFILE | O_RDWR, 0600); - ASSERT_NE(-1, fd) { - TH_LOG("Can't create temporary file: %s", - strerror(errno)); + if (fd < 0) { + ASSERT_EQ(errno, EOPNOTSUPP) { + TH_LOG("Can't create temporary file: %s", + strerror(errno)); + } + SKIP(goto out_free, "O_TMPFILE not supported by filesystem."); } errno = 0; retval = fallocate(fd, 0, 0, FILE_SIZE); - ASSERT_EQ(0, retval) { - TH_LOG("Error allocating space for the temporary file: %s", - strerror(errno)); + if (retval) { + ASSERT_EQ(errno, EOPNOTSUPP) { + TH_LOG("Error allocating space for the temporary file: %s", + strerror(errno)); + } + SKIP(goto out_close, "fallocate not supported by filesystem."); } /* @@ -272,7 +278,9 @@ TEST(check_file_mmap) } munmap(addr, FILE_SIZE); +out_close: close(fd); +out_free: free(vec); } diff --git a/tools/testing/selftests/netfilter/nft_concat_range.sh b/tools/testing/selftests/netfilter/nft_concat_range.sh index 5a4938d6dcf25a3f8137be799091f4f0d16ad7fd..b5eef5ffb58e5f8981085e483409c259929546fc 100755 --- a/tools/testing/selftests/netfilter/nft_concat_range.sh +++ b/tools/testing/selftests/netfilter/nft_concat_range.sh @@ -27,7 +27,7 @@ TYPES="net_port port_net net6_port port_proto net6_port_mac net6_port_mac_proto net_port_mac_proto_net" # Reported bugs, also described by TYPE_ variables below -BUGS="flush_remove_add" +BUGS="flush_remove_add reload" # List of possible paths to pktgen script from kernel tree for performance tests PKTGEN_SCRIPT_PATHS=" @@ -337,6 +337,23 @@ TYPE_flush_remove_add=" display Add two elements, flush, re-add " +TYPE_reload=" +display net,mac with reload +type_spec ipv4_addr . ether_addr +chain_spec ip daddr . ether saddr +dst addr4 +src mac +start 1 +count 1 +src_delta 2000 +tools sendip nc bash +proto udp + +race_repeat 0 + +perf_duration 0 +" + # Set template for all tests, types and rules are filled in depending on test set_template=' flush ruleset @@ -1455,6 +1472,59 @@ test_bug_flush_remove_add() { nft flush ruleset } +# - add ranged element, check that packets match it +# - reload the set, check packets still match +test_bug_reload() { + setup veth send_"${proto}" set || return ${KSELFTEST_SKIP} + rstart=${start} + + range_size=1 + for i in $(seq "${start}" $((start + count))); do + end=$((start + range_size)) + + # Avoid negative or zero-sized port ranges + if [ $((end / 65534)) -gt $((start / 65534)) ]; then + start=${end} + end=$((end + 1)) + fi + srcstart=$((start + src_delta)) + srcend=$((end + src_delta)) + + add "$(format)" || return 1 + range_size=$((range_size + 1)) + start=$((end + range_size)) + done + + # check kernel does allocate pcpu sctrach map + # for reload with no elemet add/delete + ( echo flush set inet filter test ; + nft list set inet filter test ) | nft -f - + + start=${rstart} + range_size=1 + + for i in $(seq "${start}" $((start + count))); do + end=$((start + range_size)) + + # Avoid negative or zero-sized port ranges + if [ $((end / 65534)) -gt $((start / 65534)) ]; then + start=${end} + end=$((end + 1)) + fi + srcstart=$((start + src_delta)) + srcend=$((end + src_delta)) + + for j in $(seq ${start} $((range_size / 2 + 1)) ${end}); do + send_match "${j}" $((j + src_delta)) || return 1 + done + + range_size=$((range_size + 1)) + start=$((end + range_size)) + done + + nft flush ruleset +} + test_reported_issues() { eval test_bug_"${subtest}" } @@ -1513,4 +1583,4 @@ for name in ${TESTS}; do done done -[ ${passed} -eq 0 ] && exit ${KSELFTEST_SKIP} +[ ${passed} -eq 0 ] && exit ${KSELFTEST_SKIP} || exit 0 diff --git a/tools/testing/selftests/openat2/Makefile b/tools/testing/selftests/openat2/Makefile index 4b93b1417b8626043f220a0ecae479bd82191165..843ba56d8e49ecf35777214fff8690b4f6c688e7 100644 --- a/tools/testing/selftests/openat2/Makefile +++ b/tools/testing/selftests/openat2/Makefile @@ -5,4 +5,4 @@ TEST_GEN_PROGS := openat2_test resolve_test rename_attack_test include ../lib.mk -$(TEST_GEN_PROGS): helpers.c +$(TEST_GEN_PROGS): helpers.c helpers.h diff --git a/tools/testing/selftests/openat2/helpers.h b/tools/testing/selftests/openat2/helpers.h index a6ea27344db2db4aac7b2e7a676dedb4283a84ab..7056340b9339e9f4159999c2797d15f5ddf6d3fd 100644 --- a/tools/testing/selftests/openat2/helpers.h +++ b/tools/testing/selftests/openat2/helpers.h @@ -9,6 +9,7 @@ #define _GNU_SOURCE #include +#include #include #include #include "../kselftest.h" @@ -62,11 +63,12 @@ bool needs_openat2(const struct open_how *how); (similar to chroot(2)). */ #endif /* RESOLVE_IN_ROOT */ -#define E_func(func, ...) \ - do { \ - if (func(__VA_ARGS__) < 0) \ - ksft_exit_fail_msg("%s:%d %s failed\n", \ - __FILE__, __LINE__, #func);\ +#define E_func(func, ...) \ + do { \ + errno = 0; \ + if (func(__VA_ARGS__) < 0) \ + ksft_exit_fail_msg("%s:%d %s failed - errno:%d\n", \ + __FILE__, __LINE__, #func, errno); \ } while (0) #define E_asprintf(...) E_func(asprintf, __VA_ARGS__) diff --git a/tools/testing/selftests/openat2/openat2_test.c b/tools/testing/selftests/openat2/openat2_test.c index b386367c606b1d9a3fecc4ca359006468e051412..453152b58e7f0098cc28de2bc479fd7631e76496 100644 --- a/tools/testing/selftests/openat2/openat2_test.c +++ b/tools/testing/selftests/openat2/openat2_test.c @@ -244,6 +244,16 @@ void test_openat2_flags(void) unlink(path); fd = sys_openat2(AT_FDCWD, path, &test->how); + if (fd < 0 && fd == -EOPNOTSUPP) { + /* + * Skip the testcase if it failed because not supported + * by FS. (e.g. a valid O_TMPFILE combination on NFS) + */ + ksft_test_result_skip("openat2 with %s fails with %d (%s)\n", + test->name, fd, strerror(-fd)); + goto next; + } + if (test->err >= 0) failed = (fd < 0); else @@ -288,7 +298,7 @@ skip: else resultfn("openat2 with %s fails with %d (%s)\n", test->name, test->err, strerror(-test->err)); - +next: free(fdpath); fflush(stdout); } diff --git a/tools/testing/selftests/pidfd/pidfd.h b/tools/testing/selftests/pidfd/pidfd.h index 01f8d3c0cf2cb844ecc233e5cc1f6b7fa1a42c0f..6922d6417e1cf06094c10627e0bd491d292d6fd2 100644 --- a/tools/testing/selftests/pidfd/pidfd.h +++ b/tools/testing/selftests/pidfd/pidfd.h @@ -68,7 +68,7 @@ #define PIDFD_SKIP 3 #define PIDFD_XFAIL 4 -int wait_for_pid(pid_t pid) +static inline int wait_for_pid(pid_t pid) { int status, ret; @@ -78,13 +78,20 @@ again: if (errno == EINTR) goto again; + ksft_print_msg("waitpid returned -1, errno=%d\n", errno); return -1; } - if (!WIFEXITED(status)) + if (!WIFEXITED(status)) { + ksft_print_msg( + "waitpid !WIFEXITED, WIFSIGNALED=%d, WTERMSIG=%d\n", + WIFSIGNALED(status), WTERMSIG(status)); return -1; + } - return WEXITSTATUS(status); + ret = WEXITSTATUS(status); + ksft_print_msg("waitpid WEXITSTATUS=%d\n", ret); + return ret; } static inline int sys_pidfd_open(pid_t pid, unsigned int flags) diff --git a/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c b/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c index 22558524f71c34da86e9e0dacc5a73c523254e72..3fd8e903118f532d43a2ac251ec71531f19ca161 100644 --- a/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c +++ b/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c @@ -12,6 +12,7 @@ #include #include #include +#include #include "pidfd.h" #include "../kselftest.h" @@ -80,7 +81,10 @@ static inline int error_check(struct error *err, const char *test_name) return err->code; } +#define CHILD_STACK_SIZE 8192 + struct child { + char *stack; pid_t pid; int fd; }; @@ -89,17 +93,22 @@ static struct child clone_newns(int (*fn)(void *), void *args, struct error *err) { static int flags = CLONE_PIDFD | CLONE_NEWPID | CLONE_NEWNS | SIGCHLD; - size_t stack_size = 1024; - char *stack[1024] = { 0 }; struct child ret; if (!(flags & CLONE_NEWUSER) && geteuid() != 0) flags |= CLONE_NEWUSER; + ret.stack = mmap(NULL, CHILD_STACK_SIZE, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0); + if (ret.stack == MAP_FAILED) { + error_set(err, -1, "mmap of stack failed (errno %d)", errno); + return ret; + } + #ifdef __ia64__ - ret.pid = __clone2(fn, stack, stack_size, flags, args, &ret.fd); + ret.pid = __clone2(fn, ret.stack, CHILD_STACK_SIZE, flags, args, &ret.fd); #else - ret.pid = clone(fn, stack + stack_size, flags, args, &ret.fd); + ret.pid = clone(fn, ret.stack + CHILD_STACK_SIZE, flags, args, &ret.fd); #endif if (ret.pid < 0) { @@ -129,6 +138,11 @@ static inline int child_join(struct child *child, struct error *err) else if (r > 0) error_set(err, r, "child %d reported: %d", child->pid, r); + if (munmap(child->stack, CHILD_STACK_SIZE)) { + error_set(err, -1, "munmap of child stack failed (errno %d)", errno); + r = -1; + } + return r; } diff --git a/tools/testing/selftests/pidfd/pidfd_test.c b/tools/testing/selftests/pidfd/pidfd_test.c index 529eb700ac26a39833a4e2c4c25b4caeb57af272..9a2d64901d591f877a7f9db3989df3cb01dce893 100644 --- a/tools/testing/selftests/pidfd/pidfd_test.c +++ b/tools/testing/selftests/pidfd/pidfd_test.c @@ -441,7 +441,6 @@ static void test_pidfd_poll_exec(int use_waitpid) { int pid, pidfd = 0; int status, ret; - pthread_t t1; time_t prog_start = time(NULL); const char *test_name = "pidfd_poll check for premature notification on child thread exec"; @@ -500,13 +499,14 @@ static int child_poll_leader_exit_test(void *args) */ *child_exit_secs = time(NULL); syscall(SYS_exit, 0); + /* Never reached, but appeases compiler thinking we should return. */ + exit(0); } static void test_pidfd_poll_leader_exit(int use_waitpid) { int pid, pidfd = 0; - int status, ret; - time_t prog_start = time(NULL); + int status, ret = 0; const char *test_name = "pidfd_poll check for premature notification on non-empty" "group leader exit"; diff --git a/tools/testing/selftests/pidfd/pidfd_wait.c b/tools/testing/selftests/pidfd/pidfd_wait.c index be2943f072f6088ba77df89ba2b7c942b9534465..17999e082aa715525f013f194b278e1bdf6786ad 100644 --- a/tools/testing/selftests/pidfd/pidfd_wait.c +++ b/tools/testing/selftests/pidfd/pidfd_wait.c @@ -39,7 +39,7 @@ static int sys_waitid(int which, pid_t pid, siginfo_t *info, int options, TEST(wait_simple) { - int pidfd = -1, status = 0; + int pidfd = -1; pid_t parent_tid = -1; struct clone_args args = { .parent_tid = ptr_to_u64(&parent_tid), @@ -47,7 +47,6 @@ TEST(wait_simple) .flags = CLONE_PIDFD | CLONE_PARENT_SETTID, .exit_signal = SIGCHLD, }; - int ret; pid_t pid; siginfo_t info = { .si_signo = 0, @@ -88,7 +87,7 @@ TEST(wait_simple) TEST(wait_states) { - int pidfd = -1, status = 0; + int pidfd = -1; pid_t parent_tid = -1; struct clone_args args = { .parent_tid = ptr_to_u64(&parent_tid), diff --git a/tools/testing/selftests/powerpc/security/spectre_v2.c b/tools/testing/selftests/powerpc/security/spectre_v2.c index adc2b7294e5fddb54150bd797b04efc5825b3c3d..83647b8277e7dfa2efa7a45e9bb327dd30a02208 100644 --- a/tools/testing/selftests/powerpc/security/spectre_v2.c +++ b/tools/testing/selftests/powerpc/security/spectre_v2.c @@ -193,7 +193,7 @@ int spectre_v2_test(void) * We are not vulnerable and reporting otherwise, so * missing such a mismatch is safe. */ - if (state == VULNERABLE) + if (miss_percent > 95) return 4; return 1; diff --git a/tools/testing/selftests/rtc/settings b/tools/testing/selftests/rtc/settings index ba4d85f74cd6b9a671652d2610ac08bbd386a577..a953c96aa16e1e814867b33d06e894ffb664bb1b 100644 --- a/tools/testing/selftests/rtc/settings +++ b/tools/testing/selftests/rtc/settings @@ -1 +1 @@ -timeout=90 +timeout=180 diff --git a/tools/testing/selftests/vm/hmm-tests.c b/tools/testing/selftests/vm/hmm-tests.c index c9404ef9698e2b2d96c96178dc64529cc4014470..426dccc08f90675192fd5e96f3de089e7f4ba6ce 100644 --- a/tools/testing/selftests/vm/hmm-tests.c +++ b/tools/testing/selftests/vm/hmm-tests.c @@ -1242,6 +1242,48 @@ TEST_F(hmm, anon_teardown) } } +/* + * Test memory snapshot without faulting in pages accessed by the device. + */ +TEST_F(hmm, mixedmap) +{ + struct hmm_buffer *buffer; + unsigned long npages; + unsigned long size; + unsigned char *m; + int ret; + + npages = 1; + size = npages << self->page_shift; + + buffer = malloc(sizeof(*buffer)); + ASSERT_NE(buffer, NULL); + + buffer->fd = -1; + buffer->size = size; + buffer->mirror = malloc(npages); + ASSERT_NE(buffer->mirror, NULL); + + + /* Reserve a range of addresses. */ + buffer->ptr = mmap(NULL, size, + PROT_READ | PROT_WRITE, + MAP_PRIVATE, + self->fd, 0); + ASSERT_NE(buffer->ptr, MAP_FAILED); + + /* Simulate a device snapshotting CPU pagetables. */ + ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages); + ASSERT_EQ(ret, 0); + ASSERT_EQ(buffer->cpages, npages); + + /* Check what the device saw. */ + m = buffer->mirror; + ASSERT_EQ(m[0], HMM_DMIRROR_PROT_READ); + + hmm_buffer_free(buffer); +} + /* * Test memory snapshot without faulting in pages accessed by the device. */ diff --git a/tools/testing/selftests/zram/zram.sh b/tools/testing/selftests/zram/zram.sh index 232e958ec454756501f2caa8eaf2133067fe10ac..b0b91d9b0dc2143cf689c9d42be8410057790215 100755 --- a/tools/testing/selftests/zram/zram.sh +++ b/tools/testing/selftests/zram/zram.sh @@ -2,9 +2,6 @@ # SPDX-License-Identifier: GPL-2.0 TCID="zram.sh" -# Kselftest framework requirement - SKIP code is 4. -ksft_skip=4 - . ./zram_lib.sh run_zram () { @@ -18,14 +15,4 @@ echo "" check_prereqs -# check zram module exists -MODULE_PATH=/lib/modules/`uname -r`/kernel/drivers/block/zram/zram.ko -if [ -f $MODULE_PATH ]; then - run_zram -elif [ -b /dev/zram0 ]; then - run_zram -else - echo "$TCID : No zram.ko module or /dev/zram0 device file not found" - echo "$TCID : CONFIG_ZRAM is not set" - exit $ksft_skip -fi +run_zram diff --git a/tools/testing/selftests/zram/zram01.sh b/tools/testing/selftests/zram/zram01.sh index 114863d9fb8768c906732d7ce780e3f6ea73e44c..8f4affe34f3e4c1d379d96515506e048954be073 100755 --- a/tools/testing/selftests/zram/zram01.sh +++ b/tools/testing/selftests/zram/zram01.sh @@ -33,9 +33,7 @@ zram_algs="lzo" zram_fill_fs() { - local mem_free0=$(free -m | awk 'NR==2 {print $4}') - - for i in $(seq 0 $(($dev_num - 1))); do + for i in $(seq $dev_start $dev_end); do echo "fill zram$i..." local b=0 while [ true ]; do @@ -45,29 +43,17 @@ zram_fill_fs() b=$(($b + 1)) done echo "zram$i can be filled with '$b' KB" - done - local mem_free1=$(free -m | awk 'NR==2 {print $4}') - local used_mem=$(($mem_free0 - $mem_free1)) + local mem_used_total=`awk '{print $3}' "/sys/block/zram$i/mm_stat"` + local v=$((100 * 1024 * $b / $mem_used_total)) + if [ "$v" -lt 100 ]; then + echo "FAIL compression ratio: 0.$v:1" + ERR_CODE=-1 + return + fi - local total_size=0 - for sm in $zram_sizes; do - local s=$(echo $sm | sed 's/M//') - total_size=$(($total_size + $s)) + echo "zram compression ratio: $(echo "scale=2; $v / 100 " | bc):1: OK" done - - echo "zram used ${used_mem}M, zram disk sizes ${total_size}M" - - local v=$((100 * $total_size / $used_mem)) - - if [ "$v" -lt 100 ]; then - echo "FAIL compression ratio: 0.$v:1" - ERR_CODE=-1 - zram_cleanup - return - fi - - echo "zram compression ratio: $(echo "scale=2; $v / 100 " | bc):1: OK" } check_prereqs @@ -81,7 +67,6 @@ zram_mount zram_fill_fs zram_cleanup -zram_unload if [ $ERR_CODE -ne 0 ]; then echo "$TCID : [FAIL]" diff --git a/tools/testing/selftests/zram/zram02.sh b/tools/testing/selftests/zram/zram02.sh index e83b404807c09a7cf2a3cdefb9ec4eb746319a36..2418b0c4ed136b3b603ade62cd2838ff61af38b8 100755 --- a/tools/testing/selftests/zram/zram02.sh +++ b/tools/testing/selftests/zram/zram02.sh @@ -36,7 +36,6 @@ zram_set_memlimit zram_makeswap zram_swapoff zram_cleanup -zram_unload if [ $ERR_CODE -ne 0 ]; then echo "$TCID : [FAIL]" diff --git a/tools/testing/selftests/zram/zram_lib.sh b/tools/testing/selftests/zram/zram_lib.sh index 6f872f266fd1151b6771f55147d265468a3ad74d..21ec1966de76ca4047fe50eea82015bc03986c98 100755 --- a/tools/testing/selftests/zram/zram_lib.sh +++ b/tools/testing/selftests/zram/zram_lib.sh @@ -5,12 +5,17 @@ # Author: Alexey Kodanev # Modified: Naresh Kamboju -MODULE=0 dev_makeswap=-1 dev_mounted=-1 - +dev_start=0 +dev_end=-1 +module_load=-1 +sys_control=-1 # Kselftest framework requirement - SKIP code is 4. ksft_skip=4 +kernel_version=`uname -r | cut -d'.' -f1,2` +kernel_major=${kernel_version%.*} +kernel_minor=${kernel_version#*.} trap INT @@ -25,68 +30,104 @@ check_prereqs() fi } +kernel_gte() +{ + major=${1%.*} + minor=${1#*.} + + if [ $kernel_major -gt $major ]; then + return 0 + elif [[ $kernel_major -eq $major && $kernel_minor -ge $minor ]]; then + return 0 + fi + + return 1 +} + zram_cleanup() { echo "zram cleanup" local i= - for i in $(seq 0 $dev_makeswap); do + for i in $(seq $dev_start $dev_makeswap); do swapoff /dev/zram$i done - for i in $(seq 0 $dev_mounted); do + for i in $(seq $dev_start $dev_mounted); do umount /dev/zram$i done - for i in $(seq 0 $(($dev_num - 1))); do + for i in $(seq $dev_start $dev_end); do echo 1 > /sys/block/zram${i}/reset rm -rf zram$i done -} + if [ $sys_control -eq 1 ]; then + for i in $(seq $dev_start $dev_end); do + echo $i > /sys/class/zram-control/hot_remove + done + fi -zram_unload() -{ - if [ $MODULE -ne 0 ] ; then - echo "zram rmmod zram" + if [ $module_load -eq 1 ]; then rmmod zram > /dev/null 2>&1 fi } zram_load() { - # check zram module exists - MODULE_PATH=/lib/modules/`uname -r`/kernel/drivers/block/zram/zram.ko - if [ -f $MODULE_PATH ]; then - MODULE=1 - echo "create '$dev_num' zram device(s)" - modprobe zram num_devices=$dev_num - if [ $? -ne 0 ]; then - echo "failed to insert zram module" - exit 1 - fi - - dev_num_created=$(ls /dev/zram* | wc -w) + echo "create '$dev_num' zram device(s)" + + # zram module loaded, new kernel + if [ -d "/sys/class/zram-control" ]; then + echo "zram modules already loaded, kernel supports" \ + "zram-control interface" + dev_start=$(ls /dev/zram* | wc -w) + dev_end=$(($dev_start + $dev_num - 1)) + sys_control=1 + + for i in $(seq $dev_start $dev_end); do + cat /sys/class/zram-control/hot_add > /dev/null + done + + echo "all zram devices (/dev/zram$dev_start~$dev_end" \ + "successfully created" + return 0 + fi - if [ "$dev_num_created" -ne "$dev_num" ]; then - echo "unexpected num of devices: $dev_num_created" - ERR_CODE=-1 + # detect old kernel or built-in + modprobe zram num_devices=$dev_num + if [ ! -d "/sys/class/zram-control" ]; then + if grep -q '^zram' /proc/modules; then + rmmod zram > /dev/null 2>&1 + if [ $? -ne 0 ]; then + echo "zram module is being used on old kernel" \ + "without zram-control interface" + exit $ksft_skip + fi else - echo "zram load module successful" + echo "test needs CONFIG_ZRAM=m on old kernel without" \ + "zram-control interface" + exit $ksft_skip fi - elif [ -b /dev/zram0 ]; then - echo "/dev/zram0 device file found: OK" - else - echo "ERROR: No zram.ko module or no /dev/zram0 device found" - echo "$TCID : CONFIG_ZRAM is not set" - exit 1 + modprobe zram num_devices=$dev_num fi + + module_load=1 + dev_end=$(($dev_num - 1)) + echo "all zram devices (/dev/zram0~$dev_end) successfully created" } zram_max_streams() { echo "set max_comp_streams to zram device(s)" - local i=0 + kernel_gte 4.7 + if [ $? -eq 0 ]; then + echo "The device attribute max_comp_streams was"\ + "deprecated in 4.7" + return 0 + fi + + local i=$dev_start for max_s in $zram_max_streams; do local sys_path="/sys/block/zram${i}/max_comp_streams" echo $max_s > $sys_path || \ @@ -98,7 +139,7 @@ zram_max_streams() echo "FAIL can't set max_streams '$max_s', get $max_stream" i=$(($i + 1)) - echo "$sys_path = '$max_streams' ($i/$dev_num)" + echo "$sys_path = '$max_streams'" done echo "zram max streams: OK" @@ -108,15 +149,16 @@ zram_compress_alg() { echo "test that we can set compression algorithm" - local algs=$(cat /sys/block/zram0/comp_algorithm) + local i=$dev_start + local algs=$(cat /sys/block/zram${i}/comp_algorithm) echo "supported algs: $algs" - local i=0 + for alg in $zram_algs; do local sys_path="/sys/block/zram${i}/comp_algorithm" echo "$alg" > $sys_path || \ echo "FAIL can't set '$alg' to $sys_path" i=$(($i + 1)) - echo "$sys_path = '$alg' ($i/$dev_num)" + echo "$sys_path = '$alg'" done echo "zram set compression algorithm: OK" @@ -125,14 +167,14 @@ zram_compress_alg() zram_set_disksizes() { echo "set disk size to zram device(s)" - local i=0 + local i=$dev_start for ds in $zram_sizes; do local sys_path="/sys/block/zram${i}/disksize" echo "$ds" > $sys_path || \ echo "FAIL can't set '$ds' to $sys_path" i=$(($i + 1)) - echo "$sys_path = '$ds' ($i/$dev_num)" + echo "$sys_path = '$ds'" done echo "zram set disksizes: OK" @@ -142,14 +184,14 @@ zram_set_memlimit() { echo "set memory limit to zram device(s)" - local i=0 + local i=$dev_start for ds in $zram_mem_limits; do local sys_path="/sys/block/zram${i}/mem_limit" echo "$ds" > $sys_path || \ echo "FAIL can't set '$ds' to $sys_path" i=$(($i + 1)) - echo "$sys_path = '$ds' ($i/$dev_num)" + echo "$sys_path = '$ds'" done echo "zram set memory limit: OK" @@ -158,8 +200,8 @@ zram_set_memlimit() zram_makeswap() { echo "make swap with zram device(s)" - local i=0 - for i in $(seq 0 $(($dev_num - 1))); do + local i=$dev_start + for i in $(seq $dev_start $dev_end); do mkswap /dev/zram$i > err.log 2>&1 if [ $? -ne 0 ]; then cat err.log @@ -182,7 +224,7 @@ zram_makeswap() zram_swapoff() { local i= - for i in $(seq 0 $dev_makeswap); do + for i in $(seq $dev_start $dev_end); do swapoff /dev/zram$i > err.log 2>&1 if [ $? -ne 0 ]; then cat err.log @@ -196,7 +238,7 @@ zram_swapoff() zram_makefs() { - local i=0 + local i=$dev_start for fs in $zram_filesystems; do # if requested fs not supported default it to ext2 which mkfs.$fs > /dev/null 2>&1 || fs=ext2 @@ -215,7 +257,7 @@ zram_makefs() zram_mount() { local i=0 - for i in $(seq 0 $(($dev_num - 1))); do + for i in $(seq $dev_start $dev_end); do echo "mount /dev/zram$i" mkdir zram$i mount /dev/zram$i zram$i > /dev/null || \ diff --git a/usr/include/Makefile b/usr/include/Makefile index f6b3c85d900ede51844ca37a4ccd7d3817d23c0e..703a255cddc6358dc14f80f300abee8b9387ada4 100644 --- a/usr/include/Makefile +++ b/usr/include/Makefile @@ -34,7 +34,6 @@ no-header-test += linux/hdlc/ioctl.h no-header-test += linux/ivtv.h no-header-test += linux/kexec.h no-header-test += linux/matroxfb.h -no-header-test += linux/nfc.h no-header-test += linux/omap3isp.h no-header-test += linux/omapfb.h no-header-test += linux/patchkey.h diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index c2323c27a28b52dd14c6405dcbc8d54b1356efe0..518cd8dc390e2d73a21aeac626865cb5e1a9fccf 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c @@ -451,8 +451,8 @@ bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin) idx = srcu_read_lock(&kvm->irq_srcu); gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin); if (gsi != -1) - hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list, - link) + hlist_for_each_entry_srcu(kian, &kvm->irq_ack_notifier_list, + link, srcu_read_lock_held(&kvm->irq_srcu)) if (kian->gsi == gsi) { srcu_read_unlock(&kvm->irq_srcu, idx); return true; @@ -468,8 +468,8 @@ void kvm_notify_acked_gsi(struct kvm *kvm, int gsi) { struct kvm_irq_ack_notifier *kian; - hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list, - link) + hlist_for_each_entry_srcu(kian, &kvm->irq_ack_notifier_list, + link, srcu_read_lock_held(&kvm->irq_srcu)) if (kian->gsi == gsi) kian->irq_acked(kian); } diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 4a7d377b3a500b69aee71b535a4ad56c9ae45d41..d22de439250768a5b1c7cd6780424ae69a22316a 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1691,7 +1691,6 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn { return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn); } -EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_memslot); bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) {