diff --git a/.mailmap b/.mailmap index 71127b2608d20913bd29b3c87835d115391fb3d7..c94da2a63d0fa7fd7e7f3c34dc7c1dc608b5ffe7 100644 --- a/.mailmap +++ b/.mailmap @@ -70,6 +70,8 @@ Baolin Wang Baolin Wang Bart Van Assche Bart Van Assche +Ben Dooks +Ben Dooks Ben Gardner Ben M Cahill Ben Widawsky @@ -233,6 +235,7 @@ Jisheng Zhang Johan Hovold Johan Hovold John Crispin +John Keeping John Paul Adrian Glaubitz John Stultz @@ -364,6 +367,11 @@ Nicolas Pitre Nicolas Saenz Julienne Nicolas Saenz Julienne Niklas Söderlund +Nikolay Aleksandrov +Nikolay Aleksandrov +Nikolay Aleksandrov +Nikolay Aleksandrov +Nikolay Aleksandrov Oleksandr Natalenko Oleksij Rempel Oleksij Rempel diff --git a/CREDITS b/CREDITS index 2d9da9a7defa666cbfcd2aab7fcca821f2027066..de7e4dbbc5991194ce9bcaeb94a368e79d79832a 100644 --- a/CREDITS +++ b/CREDITS @@ -1706,6 +1706,10 @@ S: Panoramastrasse 18 S: D-69126 Heidelberg S: Germany +N: Neil Horman +M: nhorman@tuxdriver.com +D: SCTP protocol maintainer. + N: Simon Horman M: horms@verge.net.au D: Renesas ARM/ARM64 SoC maintainer diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index f67c0829350b8e0ae9120cf873fedc118bab72e7..e592a93644739f0b1812fa5754c2f93e8b2bf08d 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -1213,23 +1213,25 @@ PAGE_SIZE multiple when read back. A read-write single value file which exists on non-root cgroups. The default is "max". - Memory usage throttle limit. This is the main mechanism to - control memory usage of a cgroup. If a cgroup's usage goes + Memory usage throttle limit. If a cgroup's usage goes over the high boundary, the processes of the cgroup are throttled and put under heavy reclaim pressure. Going over the high limit never invokes the OOM killer and - under extreme conditions the limit may be breached. + under extreme conditions the limit may be breached. The high + limit should be used in scenarios where an external process + monitors the limited cgroup to alleviate heavy reclaim + pressure. memory.max A read-write single value file which exists on non-root cgroups. The default is "max". - Memory usage hard limit. This is the final protection - mechanism. If a cgroup's memory usage reaches this limit and - can't be reduced, the OOM killer is invoked in the cgroup. - Under certain circumstances, the usage may go over the limit - temporarily. + Memory usage hard limit. This is the main mechanism to limit + memory usage of a cgroup. If a cgroup's memory usage reaches + this limit and can't be reduced, the OOM killer is invoked in + the cgroup. Under certain circumstances, the usage may go + over the limit temporarily. In default configuration regular 0-order allocations always succeed unless OOM killer chooses current task as a victim. @@ -1238,10 +1240,6 @@ PAGE_SIZE multiple when read back. Caller could retry them differently, return into userspace as -ENOMEM or silently ignore in cases like disk readahead. - This is the ultimate protection mechanism. As long as the - high limit is used and monitored properly, this limit's - utility is limited to providing the final safety net. - memory.reclaim A write-only nested-keyed file which exists for all cgroups. diff --git a/Documentation/admin-guide/cifs/changes.rst b/Documentation/admin-guide/cifs/changes.rst index 3147bbae9c43f21a41c5e110404821b5c2027e62..8c42c4de510b1046eeca71db2dd2e15bbde11897 100644 --- a/Documentation/admin-guide/cifs/changes.rst +++ b/Documentation/admin-guide/cifs/changes.rst @@ -5,5 +5,5 @@ Changes See https://wiki.samba.org/index.php/LinuxCIFSKernel for summary information about fixes/improvements to CIFS/SMB2/SMB3 support (changes to cifs.ko module) by kernel version (and cifs internal module version). -This may be easier to read than parsing the output of "git log fs/cifs" -by release. +This may be easier to read than parsing the output of +"git log fs/smb/client" by release. diff --git a/Documentation/admin-guide/cifs/usage.rst b/Documentation/admin-guide/cifs/usage.rst index 2e151cd8c2e4de65c13d5c1f51b346911bc45d98..5f936b4b601881d3e031b71a1cc258c5ddb9585e 100644 --- a/Documentation/admin-guide/cifs/usage.rst +++ b/Documentation/admin-guide/cifs/usage.rst @@ -45,7 +45,7 @@ Installation instructions If you have built the CIFS vfs as module (successfully) simply type ``make modules_install`` (or if you prefer, manually copy the file to -the modules directory e.g. /lib/modules/2.4.10-4GB/kernel/fs/cifs/cifs.ko). +the modules directory e.g. /lib/modules/6.3.0-060300-generic/kernel/fs/smb/client/cifs.ko). If you have built the CIFS vfs into the kernel itself, follow the instructions for your distribution on how to install a new kernel (usually you @@ -66,15 +66,15 @@ If cifs is built as a module, then the size and number of network buffers and maximum number of simultaneous requests to one server can be configured. Changing these from their defaults is not recommended. By executing modinfo:: - modinfo kernel/fs/cifs/cifs.ko + modinfo -on kernel/fs/cifs/cifs.ko the list of configuration changes that can be made +on kernel/fs/smb/client/cifs.ko the list of configuration changes that can be made at module initialization time (by running insmod cifs.ko) can be seen. Recommendations =============== -To improve security the SMB2.1 dialect or later (usually will get SMB3) is now +To improve security the SMB2.1 dialect or later (usually will get SMB3.1.1) is now the new default. To use old dialects (e.g. to mount Windows XP) use "vers=1.0" on mount (or vers=2.0 for Windows Vista). Note that the CIFS (vers=1.0) is much older and less secure than the default dialect SMB3 which includes diff --git a/Documentation/admin-guide/quickly-build-trimmed-linux.rst b/Documentation/admin-guide/quickly-build-trimmed-linux.rst index ff4f4cc8522bdbfa018269c1704a95827bd81c26..f08149bc53f84dd427e514d7eb898169030da8a7 100644 --- a/Documentation/admin-guide/quickly-build-trimmed-linux.rst +++ b/Documentation/admin-guide/quickly-build-trimmed-linux.rst @@ -215,12 +215,14 @@ again. reduce the compile time enormously, especially if you are running an universal kernel from a commodity Linux distribution. - There is a catch: the make target 'localmodconfig' will disable kernel - features you have not directly or indirectly through some program utilized - since you booted the system. You can reduce or nearly eliminate that risk by - using tricks outlined in the reference section; for quick testing purposes - that risk is often negligible, but it is an aspect you want to keep in mind - in case your kernel behaves oddly. + There is a catch: 'localmodconfig' is likely to disable kernel features you + did not use since you booted your Linux -- like drivers for currently + disconnected peripherals or a virtualization software not haven't used yet. + You can reduce or nearly eliminate that risk with tricks the reference + section outlines; but when building a kernel just for quick testing purposes + it is often negligible if such features are missing. But you should keep that + aspect in mind when using a kernel built with this make target, as it might + be the reason why something you only use occasionally stopped working. [:ref:`details`] @@ -271,6 +273,9 @@ again. does nothing at all; in that case you have to manually install your kernel, as outlined in the reference section. + If you are running a immutable Linux distribution, check its documentation + and the web to find out how to install your own kernel there. + [:ref:`details`] .. _another_sbs: @@ -291,29 +296,29 @@ again. version you care about, as git otherwise might retrieve the entire commit history:: - git fetch --shallow-exclude=v6.1 origin - - If you modified the sources (for example by applying a patch), you now need - to discard those modifications; that's because git otherwise will not be able - to switch to the sources of another version due to potential conflicting - changes:: - - git reset --hard + git fetch --shallow-exclude=v6.0 origin - Now checkout the version you are interested in, as explained above:: + Now switch to the version you are interested in -- but be aware the command + used here will discard any modifications you performed, as they would + conflict with the sources you want to checkout:: - git checkout --detach origin/master + git checkout --force --detach origin/master At this point you might want to patch the sources again or set/modify a build - tag, as explained earlier; afterwards adjust the build configuration to the - new codebase and build your next kernel:: + tag, as explained earlier. Afterwards adjust the build configuration to the + new codebase using olddefconfig, which will now adjust the configuration file + you prepared earlier using localmodconfig (~/linux/.config) for your next + kernel:: # reminder: if you want to apply patches, do it at this point # reminder: you might want to update your build tag at this point make olddefconfig + + Now build your kernel:: + make -j $(nproc --all) - Install the kernel as outlined above:: + Afterwards install the kernel as outlined above:: command -v installkernel && sudo make modules_install install @@ -584,11 +589,11 @@ versions and individual commits at hand at any time:: curl -L \ https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/clone.bundle \ -o linux-stable.git.bundle - git clone clone.bundle ~/linux/ + git clone linux-stable.git.bundle ~/linux/ rm linux-stable.git.bundle cd ~/linux/ - git remote set-url origin - https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git + git remote set-url origin \ + https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git git fetch origin git checkout --detach origin/master diff --git a/Documentation/cdrom/index.rst b/Documentation/cdrom/index.rst index e87a8785bc1a7172e7ed0ae897da3ba994b87587..e9b022d7093957d0e698cbc2ba0a303e3f6e4b4c 100644 --- a/Documentation/cdrom/index.rst +++ b/Documentation/cdrom/index.rst @@ -1,8 +1,8 @@ .. SPDX-License-Identifier: GPL-2.0 -===== -cdrom -===== +====== +CD-ROM +====== .. toctree:: :maxdepth: 1 diff --git a/Documentation/devicetree/bindings/ata/ahci-common.yaml b/Documentation/devicetree/bindings/ata/ahci-common.yaml index 7fdf40954a4c6798211d8b9178248f3d7f1a02b7..38770c4c85fd5defe674fca48d81e638eb2f1f97 100644 --- a/Documentation/devicetree/bindings/ata/ahci-common.yaml +++ b/Documentation/devicetree/bindings/ata/ahci-common.yaml @@ -8,7 +8,7 @@ title: Common Properties for Serial ATA AHCI controllers maintainers: - Hans de Goede - - Damien Le Moal + - Damien Le Moal description: This document defines device tree properties for a common AHCI SATA diff --git a/Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml b/Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml index 9b31f864e071ee7c53f63eb87db631eb20c1e075..71364c6081ff50d9573102aa48c9bbaa4ea75f1e 100644 --- a/Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml +++ b/Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml @@ -32,7 +32,7 @@ properties: maxItems: 1 iommus: - maxItems: 1 + maxItems: 4 power-domains: maxItems: 1 diff --git a/Documentation/devicetree/bindings/cache/qcom,llcc.yaml b/Documentation/devicetree/bindings/cache/qcom,llcc.yaml index d8b91944180ae1d03edd7fb99091bf7ccfe81b3c..44892aa589fd00215fc284077b95b3d0ed5a7e42 100644 --- a/Documentation/devicetree/bindings/cache/qcom,llcc.yaml +++ b/Documentation/devicetree/bindings/cache/qcom,llcc.yaml @@ -129,6 +129,7 @@ allOf: - qcom,sm8250-llcc - qcom,sm8350-llcc - qcom,sm8450-llcc + - qcom,sm8550-llcc then: properties: reg: diff --git a/Documentation/devicetree/bindings/clock/canaan,k210-clk.yaml b/Documentation/devicetree/bindings/clock/canaan,k210-clk.yaml index 998e5cce652f32085e5e75797d5d7df1874a92e2..380cb6d8002529cdc20d01811e7c6720ab4408f3 100644 --- a/Documentation/devicetree/bindings/clock/canaan,k210-clk.yaml +++ b/Documentation/devicetree/bindings/clock/canaan,k210-clk.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: Canaan Kendryte K210 Clock maintainers: - - Damien Le Moal + - Damien Le Moal description: | Canaan Kendryte K210 SoC clocks driver bindings. The clock diff --git a/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml b/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml index e6c1ebfe8a324773cab5a53a4a98fd294cdd9c99..130e16d025bcf8242249e9e0ee83941001c4247c 100644 --- a/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml +++ b/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml @@ -82,6 +82,18 @@ properties: Indicates if the DSI controller is driving a panel which needs 2 DSI links. + qcom,master-dsi: + type: boolean + description: | + Indicates if the DSI controller is the master DSI controller when + qcom,dual-dsi-mode enabled. + + qcom,sync-dual-dsi: + type: boolean + description: | + Indicates if the DSI controller needs to sync the other DSI controller + with MIPI DCS commands when qcom,dual-dsi-mode enabled. + assigned-clocks: minItems: 2 maxItems: 4 diff --git a/Documentation/devicetree/bindings/firmware/qcom,scm.yaml b/Documentation/devicetree/bindings/firmware/qcom,scm.yaml index 367d04ad19236242d4c05bc88a43931964588cc6..83381f3a1341f286c5eb5d9f8688cdb75b89f6cc 100644 --- a/Documentation/devicetree/bindings/firmware/qcom,scm.yaml +++ b/Documentation/devicetree/bindings/firmware/qcom,scm.yaml @@ -71,6 +71,8 @@ properties: minItems: 1 maxItems: 3 + dma-coherent: true + interconnects: maxItems: 1 diff --git a/Documentation/devicetree/bindings/fpga/lattice,sysconfig.yaml b/Documentation/devicetree/bindings/fpga/lattice,sysconfig.yaml index 4fb05eb84e2af4488d06fc46345e24fb3f958af0..164331eb627502dae2545ab636f1e7ede6e45a66 100644 --- a/Documentation/devicetree/bindings/fpga/lattice,sysconfig.yaml +++ b/Documentation/devicetree/bindings/fpga/lattice,sysconfig.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: Lattice Slave SPI sysCONFIG FPGA manager maintainers: - - Ivan Bornyakov + - Vladimir Georgiev description: | Lattice sysCONFIG port, which is used for FPGA configuration, among others, diff --git a/Documentation/devicetree/bindings/fpga/microchip,mpf-spi-fpga-mgr.yaml b/Documentation/devicetree/bindings/fpga/microchip,mpf-spi-fpga-mgr.yaml index 527532f039cecad9b39248b74fa9839804da6345..a157eecfb5fc333ced32389a02f79281b3d4161d 100644 --- a/Documentation/devicetree/bindings/fpga/microchip,mpf-spi-fpga-mgr.yaml +++ b/Documentation/devicetree/bindings/fpga/microchip,mpf-spi-fpga-mgr.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: Microchip Polarfire FPGA manager. maintainers: - - Ivan Bornyakov + - Vladimir Georgiev description: Device Tree Bindings for Microchip Polarfire FPGA Manager using slave SPI to diff --git a/Documentation/devicetree/bindings/i2c/opencores,i2c-ocores.yaml b/Documentation/devicetree/bindings/i2c/opencores,i2c-ocores.yaml index 85d9efb743eefcbc067e2b5d1008680049285c1c..d9ef8672901125b33a64a7f8143878823781fbda 100644 --- a/Documentation/devicetree/bindings/i2c/opencores,i2c-ocores.yaml +++ b/Documentation/devicetree/bindings/i2c/opencores,i2c-ocores.yaml @@ -60,6 +60,7 @@ properties: default: 0 regstep: + $ref: /schemas/types.yaml#/definitions/uint32 description: | deprecated, use reg-shift above deprecated: true diff --git a/Documentation/devicetree/bindings/i3c/silvaco,i3c-master.yaml b/Documentation/devicetree/bindings/i3c/silvaco,i3c-master.yaml index 62f3ca66274feb7ea39b74f847ab773c5e1e0711..32c821f97779b7ea95ae2e16ab68698eff8ac8d2 100644 --- a/Documentation/devicetree/bindings/i3c/silvaco,i3c-master.yaml +++ b/Documentation/devicetree/bindings/i3c/silvaco,i3c-master.yaml @@ -44,7 +44,7 @@ required: - clock-names - clocks -additionalProperties: true +unevaluatedProperties: false examples: - | diff --git a/Documentation/devicetree/bindings/iio/adc/nxp,imx8qxp-adc.yaml b/Documentation/devicetree/bindings/iio/adc/nxp,imx8qxp-adc.yaml index 63369ba388e4738cd4934884a37fbc444749458b..0a192ca192c5b3221843afacfe1b60c06425423e 100644 --- a/Documentation/devicetree/bindings/iio/adc/nxp,imx8qxp-adc.yaml +++ b/Documentation/devicetree/bindings/iio/adc/nxp,imx8qxp-adc.yaml @@ -39,6 +39,12 @@ properties: power-domains: maxItems: 1 + vref-supply: + description: | + External ADC reference voltage supply on VREFH pad. If VERID[MVI] is + set, there are additional, internal reference voltages selectable. + VREFH1 is always from VREFH pad. + "#io-channel-cells": const: 1 @@ -72,6 +78,7 @@ examples: assigned-clocks = <&clk IMX_SC_R_ADC_0>; assigned-clock-rates = <24000000>; power-domains = <&pd IMX_SC_R_ADC_0>; + vref-supply = <®_1v8>; #io-channel-cells = <1>; }; }; diff --git a/Documentation/devicetree/bindings/iio/adc/renesas,rcar-gyroadc.yaml b/Documentation/devicetree/bindings/iio/adc/renesas,rcar-gyroadc.yaml index 1c7aee5ed3e0bfb208f5438503f7cccc5065d2a7..36dff3250ea76fe51b6ca32ed850617b1c58b2f0 100644 --- a/Documentation/devicetree/bindings/iio/adc/renesas,rcar-gyroadc.yaml +++ b/Documentation/devicetree/bindings/iio/adc/renesas,rcar-gyroadc.yaml @@ -90,7 +90,7 @@ patternProperties: of the MAX chips to the GyroADC, while MISO line of each Maxim ADC connects to a shared input pin of the GyroADC. enum: - - adi,7476 + - adi,ad7476 - fujitsu,mb88101a - maxim,max1162 - maxim,max11100 diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml index 92117261e1e1d0e985e47fe6fca5814a2e4293db..39e64c7f6360cacfe615f0b65a2ec8e85482c17d 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml +++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml @@ -166,6 +166,12 @@ properties: resets: maxItems: 1 + mediatek,broken-save-restore-fw: + type: boolean + description: + Asserts that the firmware on this device has issues saving and restoring + GICR registers when the GIC redistributors are powered off. + dependencies: mbi-ranges: [ msi-controller ] msi-controller: [ mbi-ranges ] diff --git a/Documentation/devicetree/bindings/mfd/canaan,k210-sysctl.yaml b/Documentation/devicetree/bindings/mfd/canaan,k210-sysctl.yaml index 8459d36422050a14c227e34ba10e0f02fe54cd8c..3b3beab9db3f31a597fdff2b2e3135a1d7947de4 100644 --- a/Documentation/devicetree/bindings/mfd/canaan,k210-sysctl.yaml +++ b/Documentation/devicetree/bindings/mfd/canaan,k210-sysctl.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: Canaan Kendryte K210 System Controller maintainers: - - Damien Le Moal + - Damien Le Moal description: Canaan Inc. Kendryte K210 SoC system controller which provides a diff --git a/Documentation/devicetree/bindings/net/can/st,stm32-bxcan.yaml b/Documentation/devicetree/bindings/net/can/st,stm32-bxcan.yaml index 769fa5c27b76ee427a46daeef2055a516f29268f..de1d4298893be8b7f3c2090f338484d1bb9d6a5d 100644 --- a/Documentation/devicetree/bindings/net/can/st,stm32-bxcan.yaml +++ b/Documentation/devicetree/bindings/net/can/st,stm32-bxcan.yaml @@ -21,11 +21,22 @@ properties: st,can-primary: description: - Primary and secondary mode of the bxCAN peripheral is only relevant - if the chip has two CAN peripherals. In that case they share some - of the required logic. + Primary mode of the bxCAN peripheral is only relevant if the chip has + two CAN peripherals in dual CAN configuration. In that case they share + some of the required logic. + Not to be used if the peripheral is in single CAN configuration. To avoid misunderstandings, it should be noted that ST documentation - uses the terms master/slave instead of primary/secondary. + uses the terms master instead of primary. + type: boolean + + st,can-secondary: + description: + Secondary mode of the bxCAN peripheral is only relevant if the chip + has two CAN peripherals in dual CAN configuration. In that case they + share some of the required logic. + Not to be used if the peripheral is in single CAN configuration. + To avoid misunderstandings, it should be noted that ST documentation + uses the terms slave instead of secondary. type: boolean reg: diff --git a/Documentation/devicetree/bindings/net/realtek-bluetooth.yaml b/Documentation/devicetree/bindings/net/realtek-bluetooth.yaml index 8cc2b9924680aa6614eccba2523665ae1dc84a4d..043e118c605c834cf652ab50a3a6a0e6f954fec8 100644 --- a/Documentation/devicetree/bindings/net/realtek-bluetooth.yaml +++ b/Documentation/devicetree/bindings/net/realtek-bluetooth.yaml @@ -11,7 +11,7 @@ maintainers: - Alistair Francis description: - RTL8723CS/RTL8723CS/RTL8821CS/RTL8822CS is a WiFi + BT chip. WiFi part + RTL8723BS/RTL8723CS/RTL8821CS/RTL8822CS is a WiFi + BT chip. WiFi part is connected over SDIO, while BT is connected over serial. It speaks H5 protocol with few extra commands to upload firmware and change module speed. @@ -27,7 +27,7 @@ properties: - items: - enum: - realtek,rtl8821cs-bt - - const: realtek,rtl8822cs-bt + - const: realtek,rtl8723bs-bt device-wake-gpios: maxItems: 1 diff --git a/Documentation/devicetree/bindings/pinctrl/canaan,k210-fpioa.yaml b/Documentation/devicetree/bindings/pinctrl/canaan,k210-fpioa.yaml index 7f4f36a58e56cba672fdd4b12ca3e496c6e4fc2e..739a08f00467787419ff7575d2e4846e0db30b9b 100644 --- a/Documentation/devicetree/bindings/pinctrl/canaan,k210-fpioa.yaml +++ b/Documentation/devicetree/bindings/pinctrl/canaan,k210-fpioa.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: Canaan Kendryte K210 FPIOA maintainers: - - Damien Le Moal + - Damien Le Moal description: The Canaan Kendryte K210 SoC Fully Programmable IO Array (FPIOA) diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.yaml index c91d3e3a094b8f319785e2c0aefcf8d1761c31db..80f960671857042ca4018b6a495822dbb3056c6e 100644 --- a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.yaml +++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.yaml @@ -144,8 +144,9 @@ $defs: enum: [0, 1, 2, 3, 4, 5, 6, 7] qcom,paired: - - description: - Indicates that the pin should be operating in paired mode. + type: boolean + description: + Indicates that the pin should be operating in paired mode. required: - pins diff --git a/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml b/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml index afad3135ed67cc8f57adc269d19d27d5082d1ed3..f9c211a9a938e3e254b16070f37f1cb8053a7358 100644 --- a/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml +++ b/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml @@ -29,6 +29,7 @@ properties: - qcom,qcm2290-rpmpd - qcom,qcs404-rpmpd - qcom,qdu1000-rpmhpd + - qcom,sa8155p-rpmhpd - qcom,sa8540p-rpmhpd - qcom,sa8775p-rpmhpd - qcom,sdm660-rpmpd diff --git a/Documentation/devicetree/bindings/reset/canaan,k210-rst.yaml b/Documentation/devicetree/bindings/reset/canaan,k210-rst.yaml index ee8a2dcf5dfae9dea839339d8a75e8a68ac8188a..0c0135964b91d17c396e2ede223e066b843698ba 100644 --- a/Documentation/devicetree/bindings/reset/canaan,k210-rst.yaml +++ b/Documentation/devicetree/bindings/reset/canaan,k210-rst.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: Canaan Kendryte K210 Reset Controller maintainers: - - Damien Le Moal + - Damien Le Moal description: | Canaan Kendryte K210 reset controller driver which supports the SoC diff --git a/Documentation/devicetree/bindings/riscv/canaan.yaml b/Documentation/devicetree/bindings/riscv/canaan.yaml index f8f3f286bd55924d8e1ce5b8f5f38bf4a461a56a..41fd11f70a49b26dd91b3dfd0e9cb13a069c120d 100644 --- a/Documentation/devicetree/bindings/riscv/canaan.yaml +++ b/Documentation/devicetree/bindings/riscv/canaan.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: Canaan SoC-based boards maintainers: - - Damien Le Moal + - Damien Le Moal description: Canaan Kendryte K210 SoC-based boards diff --git a/Documentation/devicetree/bindings/serial/8250_omap.yaml b/Documentation/devicetree/bindings/serial/8250_omap.yaml index eb3488d8f9ee61854288c4b43a221fe03e5d7f6e..6a7be42da523c60e8b5d9c5ae8e3c6f2b1bb22db 100644 --- a/Documentation/devicetree/bindings/serial/8250_omap.yaml +++ b/Documentation/devicetree/bindings/serial/8250_omap.yaml @@ -70,6 +70,7 @@ properties: dsr-gpios: true rng-gpios: true dcd-gpios: true + rs485-rts-active-high: true rts-gpio: true power-domains: true clock-frequency: true diff --git a/Documentation/devicetree/bindings/sound/tas2562.yaml b/Documentation/devicetree/bindings/sound/tas2562.yaml index a5bb561bfcfbafdcd8f0d0c17be3d3ec1ba9c7bd..31a3024ea7898eba853ef98b300b97c1335a1a24 100644 --- a/Documentation/devicetree/bindings/sound/tas2562.yaml +++ b/Documentation/devicetree/bindings/sound/tas2562.yaml @@ -55,7 +55,9 @@ properties: description: TDM TX current sense time slot. '#sound-dai-cells': - const: 1 + # The codec has a single DAI, the #sound-dai-cells=<1>; case is left in for backward + # compatibility but is deprecated. + enum: [0, 1] required: - compatible @@ -72,7 +74,7 @@ examples: codec: codec@4c { compatible = "ti,tas2562"; reg = <0x4c>; - #sound-dai-cells = <1>; + #sound-dai-cells = <0>; interrupt-parent = <&gpio1>; interrupts = <14>; shutdown-gpios = <&gpio1 15 0>; diff --git a/Documentation/devicetree/bindings/sound/tas2770.yaml b/Documentation/devicetree/bindings/sound/tas2770.yaml index 26088adb9dc24073828412171f37b634d1fc5e44..8908bf1122e965fe8eaab5c2244ac8d5650407eb 100644 --- a/Documentation/devicetree/bindings/sound/tas2770.yaml +++ b/Documentation/devicetree/bindings/sound/tas2770.yaml @@ -57,7 +57,9 @@ properties: - 1 # Falling edge '#sound-dai-cells': - const: 1 + # The codec has a single DAI, the #sound-dai-cells=<1>; case is left in for backward + # compatibility but is deprecated. + enum: [0, 1] required: - compatible @@ -74,7 +76,7 @@ examples: codec: codec@41 { compatible = "ti,tas2770"; reg = <0x41>; - #sound-dai-cells = <1>; + #sound-dai-cells = <0>; interrupt-parent = <&gpio1>; interrupts = <14>; reset-gpio = <&gpio1 15 0>; diff --git a/Documentation/devicetree/bindings/sound/tas27xx.yaml b/Documentation/devicetree/bindings/sound/tas27xx.yaml index 8cba01316855d4ac1bf9b87ed3a975d2c13ac87a..a876545ec87d8c6bc766ee7513fb231d52fc848d 100644 --- a/Documentation/devicetree/bindings/sound/tas27xx.yaml +++ b/Documentation/devicetree/bindings/sound/tas27xx.yaml @@ -50,7 +50,9 @@ properties: description: TDM TX voltage sense time slot. '#sound-dai-cells': - const: 1 + # The codec has a single DAI, the #sound-dai-cells=<1>; case is left in for backward + # compatibility but is deprecated. + enum: [0, 1] required: - compatible @@ -67,7 +69,7 @@ examples: codec: codec@38 { compatible = "ti,tas2764"; reg = <0x38>; - #sound-dai-cells = <1>; + #sound-dai-cells = <0>; interrupt-parent = <&gpio1>; interrupts = <14>; reset-gpios = <&gpio1 15 0>; diff --git a/Documentation/devicetree/bindings/sound/tlv320aic32x4.txt b/Documentation/devicetree/bindings/sound/tlv320aic32x4.txt index f59125bc79d1bfb96cc91cee6f08d1b6785ba891..0b4e21bde5bc1c27fd39835517beb92004c743d0 100644 --- a/Documentation/devicetree/bindings/sound/tlv320aic32x4.txt +++ b/Documentation/devicetree/bindings/sound/tlv320aic32x4.txt @@ -8,7 +8,7 @@ Required properties: "ti,tlv320aic32x6" TLV320AIC3206, TLV320AIC3256 "ti,tas2505" TAS2505, TAS2521 - reg: I2C slave address - - supply-*: Required supply regulators are: + - *-supply: Required supply regulators are: "iov" - digital IO power supply "ldoin" - LDO power supply "dv" - Digital core power supply diff --git a/Documentation/devicetree/bindings/usb/cdns,usb3.yaml b/Documentation/devicetree/bindings/usb/cdns,usb3.yaml index cae46c4982adf69e3369fa450924df14e03fa804..69a93a0722f07f5c45ec6d5f817eed79231463ab 100644 --- a/Documentation/devicetree/bindings/usb/cdns,usb3.yaml +++ b/Documentation/devicetree/bindings/usb/cdns,usb3.yaml @@ -64,7 +64,7 @@ properties: description: size of memory intended as internal memory for endpoints buffers expressed in KB - $ref: /schemas/types.yaml#/definitions/uint32 + $ref: /schemas/types.yaml#/definitions/uint16 cdns,phyrst-a-enable: description: Enable resetting of PHY if Rx fail is detected diff --git a/Documentation/devicetree/bindings/usb/snps,dwc3.yaml b/Documentation/devicetree/bindings/usb/snps,dwc3.yaml index 50edc4da780e9caf68f7821bcff9c06ffa80bba2..4f7625955cccccff6964c98cfed9a1dc1f225326 100644 --- a/Documentation/devicetree/bindings/usb/snps,dwc3.yaml +++ b/Documentation/devicetree/bindings/usb/snps,dwc3.yaml @@ -287,7 +287,7 @@ properties: description: High-Speed PHY interface selection between UTMI+ and ULPI when the DWC_USB3_HSPHY_INTERFACE has value 3. - $ref: /schemas/types.yaml#/definitions/uint8 + $ref: /schemas/types.yaml#/definitions/string enum: [utmi, ulpi] snps,quirk-frame-length-adjustment: diff --git a/Documentation/devicetree/usage-model.rst b/Documentation/devicetree/usage-model.rst index b6a287955ee56ac14b9902c524e42cf496ad7a89..0717426856b229be1c52efc79b18dbc617d4c92f 100644 --- a/Documentation/devicetree/usage-model.rst +++ b/Documentation/devicetree/usage-model.rst @@ -415,6 +415,6 @@ When using the DT, this creates problems for of_platform_populate() because it must decide whether to register each node as either a platform_device or an amba_device. This unfortunately complicates the device creation model a little bit, but the solution turns out not to -be too invasive. If a node is compatible with "arm,amba-primecell", then +be too invasive. If a node is compatible with "arm,primecell", then of_platform_populate() will register it as an amba_device instead of a platform_device. diff --git a/Documentation/filesystems/index.rst b/Documentation/filesystems/index.rst index fbb2b5ada95b38838d8ae6ab97e476c28d3be341..eb252fc972aa95e8d731981a8ea3340b0da0160a 100644 --- a/Documentation/filesystems/index.rst +++ b/Documentation/filesystems/index.rst @@ -72,7 +72,6 @@ Documentation for filesystem implementations. befs bfs btrfs - cifs/index ceph coda configfs @@ -111,6 +110,7 @@ Documentation for filesystem implementations. ramfs-rootfs-initramfs relay romfs + smb/index spufs/index squashfs sysfs diff --git a/Documentation/filesystems/ramfs-rootfs-initramfs.rst b/Documentation/filesystems/ramfs-rootfs-initramfs.rst index 164960631925d05a4684fc5920e63cf204f57663..447f767c646276551570baa9de4dd19ccda95c61 100644 --- a/Documentation/filesystems/ramfs-rootfs-initramfs.rst +++ b/Documentation/filesystems/ramfs-rootfs-initramfs.rst @@ -6,8 +6,7 @@ Ramfs, rootfs and initramfs October 17, 2005 -Rob Landley -============================= +:Author: Rob Landley What is ramfs? -------------- diff --git a/Documentation/filesystems/sharedsubtree.rst b/Documentation/filesystems/sharedsubtree.rst index d83395354250d9ca02dc19a38cdf9762733ded04..1cf56489ed484dcf77581e6be1faa490e600a3ab 100644 --- a/Documentation/filesystems/sharedsubtree.rst +++ b/Documentation/filesystems/sharedsubtree.rst @@ -147,6 +147,7 @@ replicas continue to be exactly same. 3) Setting mount states +----------------------- The mount command (util-linux package) can be used to set mount states:: @@ -612,6 +613,7 @@ replicas continue to be exactly same. 6) Quiz +------- A. What is the result of the following command sequence? @@ -673,6 +675,7 @@ replicas continue to be exactly same. /mnt/1/test be? 7) FAQ +------ Q1. Why is bind mount needed? How is it different from symbolic links? symbolic links can get stale if the destination mount gets @@ -841,6 +844,7 @@ replicas continue to be exactly same. tmp usr tmp usr tmp usr 8) Implementation +----------------- 8A) Datastructure diff --git a/Documentation/filesystems/cifs/cifsroot.rst b/Documentation/filesystems/smb/cifsroot.rst similarity index 97% rename from Documentation/filesystems/cifs/cifsroot.rst rename to Documentation/filesystems/smb/cifsroot.rst index 4930bb443134aecdc4445b8058d8272bb11aa696..bf2d9db3acb924bb8e86c3ddc0bfdffbc098ed6e 100644 --- a/Documentation/filesystems/cifs/cifsroot.rst +++ b/Documentation/filesystems/smb/cifsroot.rst @@ -59,7 +59,7 @@ the root file system via SMB protocol. Enables the kernel to mount the root file system via SMB that are located in the and specified in this option. -The default mount options are set in fs/cifs/cifsroot.c. +The default mount options are set in fs/smb/client/cifsroot.c. server-ip IPv4 address of the server. diff --git a/Documentation/filesystems/cifs/index.rst b/Documentation/filesystems/smb/index.rst similarity index 100% rename from Documentation/filesystems/cifs/index.rst rename to Documentation/filesystems/smb/index.rst diff --git a/Documentation/filesystems/cifs/ksmbd.rst b/Documentation/filesystems/smb/ksmbd.rst similarity index 100% rename from Documentation/filesystems/cifs/ksmbd.rst rename to Documentation/filesystems/smb/ksmbd.rst diff --git a/Documentation/fpga/index.rst b/Documentation/fpga/index.rst index f80f95667ca217a0c87f68ad0a06cfcb1c2db87d..43c968871d995ad4e2ece499a15837a352e8e0ee 100644 --- a/Documentation/fpga/index.rst +++ b/Documentation/fpga/index.rst @@ -1,7 +1,7 @@ .. SPDX-License-Identifier: GPL-2.0 ==== -fpga +FPGA ==== .. toctree:: diff --git a/Documentation/locking/index.rst b/Documentation/locking/index.rst index 7003bd5aeff4cf711f9fde4b9fdde5c120a79559..6a9ea96c8bcb7013ac97715550d6845bfb188593 100644 --- a/Documentation/locking/index.rst +++ b/Documentation/locking/index.rst @@ -1,7 +1,7 @@ .. SPDX-License-Identifier: GPL-2.0 ======= -locking +Locking ======= .. toctree:: diff --git a/Documentation/mm/page_table_check.rst b/Documentation/mm/page_table_check.rst index cfd8f4117cf3e9a1ef4800653b19d3711fe069d7..c12838ce6b8de2468c81b2d5d18b2fa74317e271 100644 --- a/Documentation/mm/page_table_check.rst +++ b/Documentation/mm/page_table_check.rst @@ -52,3 +52,22 @@ Build kernel with: Optionally, build kernel with PAGE_TABLE_CHECK_ENFORCED in order to have page table support without extra kernel parameter. + +Implementation notes +==================== + +We specifically decided not to use VMA information in order to avoid relying on +MM states (except for limited "struct page" info). The page table check is a +separate from Linux-MM state machine that verifies that the user accessible +pages are not falsely shared. + +PAGE_TABLE_CHECK depends on EXCLUSIVE_SYSTEM_RAM. The reason is that without +EXCLUSIVE_SYSTEM_RAM, users are allowed to map arbitrary physical memory +regions into the userspace via /dev/mem. At the same time, pages may change +their properties (e.g., from anonymous pages to named pages) while they are +still being mapped in the userspace, leading to "corruption" detected by the +page table check. + +Even with EXCLUSIVE_SYSTEM_RAM, I/O pages may be still allowed to be mapped via +/dev/mem. However, these pages are always considered as named pages, so they +won't break the logic used in the page table check. diff --git a/Documentation/netlink/specs/ethtool.yaml b/Documentation/netlink/specs/ethtool.yaml index 129f413ea3496fbd0bde88ce9bf786931d2a23ba..4846345bade4604d268a5ce5186378f4312d3dac 100644 --- a/Documentation/netlink/specs/ethtool.yaml +++ b/Documentation/netlink/specs/ethtool.yaml @@ -60,22 +60,6 @@ attribute-sets: type: nest nested-attributes: bitset-bits - - - name: u64-array - attributes: - - - name: u64 - type: nest - multi-attr: true - nested-attributes: u64 - - - name: s32-array - attributes: - - - name: s32 - type: nest - multi-attr: true - nested-attributes: s32 - name: string attributes: @@ -239,7 +223,7 @@ attribute-sets: name: tx-min-frag-size type: u32 - - name: tx-min-frag-size + name: rx-min-frag-size type: u32 - name: verify-enabled @@ -310,7 +294,7 @@ attribute-sets: name: master-slave-state type: u8 - - name: master-slave-lanes + name: lanes type: u32 - name: rate-matching @@ -338,7 +322,7 @@ attribute-sets: name: ext-substate type: u8 - - name: down-cnt + name: ext-down-cnt type: u32 - name: debug @@ -593,7 +577,7 @@ attribute-sets: name: phc-index type: u32 - - name: cable-test-nft-nest-result + name: cable-test-ntf-nest-result attributes: - name: pair @@ -602,7 +586,7 @@ attribute-sets: name: code type: u8 - - name: cable-test-nft-nest-fault-length + name: cable-test-ntf-nest-fault-length attributes: - name: pair @@ -611,16 +595,16 @@ attribute-sets: name: cm type: u32 - - name: cable-test-nft-nest + name: cable-test-ntf-nest attributes: - name: result type: nest - nested-attributes: cable-test-nft-nest-result + nested-attributes: cable-test-ntf-nest-result - name: fault-length type: nest - nested-attributes: cable-test-nft-nest-fault-length + nested-attributes: cable-test-ntf-nest-fault-length - name: cable-test attributes: @@ -634,7 +618,7 @@ attribute-sets: - name: nest type: nest - nested-attributes: cable-test-nft-nest + nested-attributes: cable-test-ntf-nest - name: cable-test-tdr-cfg attributes: @@ -705,16 +689,16 @@ attribute-sets: type: u8 - name: corrected - type: nest - nested-attributes: u64-array + type: binary + sub-type: u64 - name: uncorr - type: nest - nested-attributes: u64-array + type: binary + sub-type: u64 - name: corr-bits - type: nest - nested-attributes: u64-array + type: binary + sub-type: u64 - name: fec attributes: @@ -792,7 +776,7 @@ attribute-sets: name: hist-bkt-hi type: u32 - - name: hist-bkt-val + name: hist-val type: u64 - name: stats @@ -827,8 +811,8 @@ attribute-sets: type: u32 - name: index - type: nest - nested-attributes: s32-array + type: binary + sub-type: s32 - name: module attributes: @@ -981,7 +965,7 @@ operations: - duplex - master-slave-cfg - master-slave-state - - master-slave-lanes + - lanes - rate-matching dump: *linkmodes-get-op - @@ -1015,7 +999,7 @@ operations: - sqi-max - ext-state - ext-substate - - down-cnt + - ext-down-cnt dump: *linkstate-get-op - name: debug-get @@ -1367,7 +1351,7 @@ operations: reply: attributes: - header - - cable-test-nft-nest + - cable-test-ntf-nest - name: cable-test-tdr-act doc: Cable test TDR. @@ -1555,7 +1539,7 @@ operations: - hkey dump: *rss-get-op - - name: plca-get + name: plca-get-cfg doc: Get PLCA params. attribute-set: plca @@ -1577,7 +1561,7 @@ operations: - burst-tmr dump: *plca-get-op - - name: plca-set + name: plca-set-cfg doc: Set PLCA params. attribute-set: plca @@ -1601,7 +1585,7 @@ operations: - name: plca-ntf doc: Notification for change in PLCA params. - notify: plca-get + notify: plca-get-cfg - name: mm-get doc: Get MAC Merge configuration and state diff --git a/Documentation/netlink/specs/handshake.yaml b/Documentation/netlink/specs/handshake.yaml index 614f1a585511487a85339de90e2b675a800e5439..6d89e30f5fd52b1bfe7c1f36f9b5a859ccccfc2b 100644 --- a/Documentation/netlink/specs/handshake.yaml +++ b/Documentation/netlink/specs/handshake.yaml @@ -68,6 +68,9 @@ attribute-sets: type: nest nested-attributes: x509 multi-attr: true + - + name: peername + type: string - name: done attributes: @@ -105,6 +108,7 @@ operations: - auth-mode - peer-identity - certificate + - peername - name: done doc: Handler reports handshake completion diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst index 3a7a714cc08f0a74dbc6de9dfd90f83a566b539e..3354ca3608ee671adc06939d78302420099b3110 100644 --- a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst +++ b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst @@ -40,6 +40,7 @@ flow_steering_mode: Device flow steering mode --------------------------------------------- The flow steering mode parameter controls the flow steering mode of the driver. Two modes are supported: + 1. 'dmfs' - Device managed flow steering. 2. 'smfs' - Software/Driver managed flow steering. @@ -99,6 +100,7 @@ between representors and stacked devices. By default metadata is enabled on the supported devices in E-switch. Metadata is applicable only for E-switch in switchdev mode and users may disable it when NONE of the below use cases will be in use: + 1. HCA is in Dual/multi-port RoCE mode. 2. VF/SF representor bonding (Usually used for Live migration) 3. Stacked devices @@ -180,7 +182,8 @@ User commands examples: $ devlink health diagnose pci/0000:82:00.0 reporter tx -NOTE: This command has valid output only when interface is up, otherwise the command has empty output. +.. note:: + This command has valid output only when interface is up, otherwise the command has empty output. - Show number of tx errors indicated, number of recover flows ended successfully, is autorecover enabled and graceful period from last recover:: @@ -232,8 +235,9 @@ User commands examples: $ devlink health dump show pci/0000:82:00.0 reporter fw -NOTE: This command can run only on the PF which has fw tracer ownership, -running it on other PF or any VF will return "Operation not permitted". +.. note:: + This command can run only on the PF which has fw tracer ownership, + running it on other PF or any VF will return "Operation not permitted". fw fatal reporter ----------------- @@ -256,7 +260,8 @@ User commands examples: $ devlink health dump show pci/0000:82:00.1 reporter fw_fatal -NOTE: This command can run only on PF. +.. note:: + This command can run only on PF. vnic reporter ------------- @@ -265,28 +270,37 @@ It is responsible for querying the vnic diagnostic counters from fw and displayi them in realtime. Description of the vnic counters: -total_q_under_processor_handle: number of queues in an error state due to -an async error or errored command. -send_queue_priority_update_flow: number of QP/SQ priority/SL update -events. -cq_overrun: number of times CQ entered an error state due to an -overflow. -async_eq_overrun: number of times an EQ mapped to async events was -overrun. -comp_eq_overrun: number of times an EQ mapped to completion events was -overrun. -quota_exceeded_command: number of commands issued and failed due to quota -exceeded. -invalid_command: number of commands issued and failed dues to any reason -other than quota exceeded. -nic_receive_steering_discard: number of packets that completed RX flow -steering but were discarded due to a mismatch in flow table. + +- total_q_under_processor_handle + number of queues in an error state due to + an async error or errored command. +- send_queue_priority_update_flow + number of QP/SQ priority/SL update events. +- cq_overrun + number of times CQ entered an error state due to an overflow. +- async_eq_overrun + number of times an EQ mapped to async events was overrun. + comp_eq_overrun number of times an EQ mapped to completion events was + overrun. +- quota_exceeded_command + number of commands issued and failed due to quota exceeded. +- invalid_command + number of commands issued and failed dues to any reason other than quota + exceeded. +- nic_receive_steering_discard + number of packets that completed RX flow + steering but were discarded due to a mismatch in flow table. User commands examples: -- Diagnose PF/VF vnic counters + +- Diagnose PF/VF vnic counters:: + $ devlink health diagnose pci/0000:82:00.1 reporter vnic + - Diagnose representor vnic counters (performed by supplying devlink port of the - representor, which can be obtained via devlink port command) + representor, which can be obtained via devlink port command):: + $ devlink health diagnose pci/0000:82:00.1/65537 reporter vnic -NOTE: This command can run over all interfaces such as PF/VF and representor ports. +.. note:: + This command can run over all interfaces such as PF/VF and representor ports. diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst index 6ec06a33688a68c40dd882efedcdbe4480a1c5ec..80b8f73a0244f4682967e74bb2edec010bf0c27a 100644 --- a/Documentation/networking/ip-sysctl.rst +++ b/Documentation/networking/ip-sysctl.rst @@ -1352,8 +1352,8 @@ ping_group_range - 2 INTEGERS Restrict ICMP_PROTO datagram sockets to users in the group range. The default is "1 0", meaning, that nobody (not even root) may create ping sockets. Setting it to "100 100" would grant permissions - to the single group. "0 4294967295" would enable it for the world, "100 - 4294967295" would enable it for the users, but not daemons. + to the single group. "0 4294967294" would enable it for the world, "100 + 4294967294" would enable it for the users, but not daemons. tcp_early_demux - BOOLEAN Enable early demux for established TCP sockets. diff --git a/Documentation/networking/tls-handshake.rst b/Documentation/networking/tls-handshake.rst index a2817a88e905fa167244cebaf376684f3800faf2..6f5ea1646a4704653a41caee551ed85fdda12f0f 100644 --- a/Documentation/networking/tls-handshake.rst +++ b/Documentation/networking/tls-handshake.rst @@ -53,6 +53,7 @@ fills in a structure that contains the parameters of the request: struct socket *ta_sock; tls_done_func_t ta_done; void *ta_data; + const char *ta_peername; unsigned int ta_timeout_ms; key_serial_t ta_keyring; key_serial_t ta_my_cert; @@ -71,6 +72,10 @@ instantiated a struct file in sock->file. has completed. Further explanation of this function is in the "Handshake Completion" sesction below. +The consumer can provide a NUL-terminated hostname in the @ta_peername +field that is sent as part of ClientHello. If no peername is provided, +the DNS hostname associated with the server's IP address is used instead. + The consumer can fill in the @ta_timeout_ms field to force the servicing handshake agent to exit after a number of milliseconds. This enables the socket to be fully closed once both the kernel and the handshake agent diff --git a/Documentation/pcmcia/index.rst b/Documentation/pcmcia/index.rst index 7ae1f62fca14f8c5d06bd7d461d54853114063e3..8067236c51ab4208af1789eb907c479891685346 100644 --- a/Documentation/pcmcia/index.rst +++ b/Documentation/pcmcia/index.rst @@ -1,7 +1,7 @@ .. SPDX-License-Identifier: GPL-2.0 ====== -pcmcia +PCMCIA ====== .. toctree:: diff --git a/Documentation/process/maintainer-netdev.rst b/Documentation/process/maintainer-netdev.rst index f73ac9e175a8dd9e94ceeebbf5257018397bcbc7..83614cec9328b62b2333947a5254b1f57519545f 100644 --- a/Documentation/process/maintainer-netdev.rst +++ b/Documentation/process/maintainer-netdev.rst @@ -127,13 +127,32 @@ the value of ``Message-ID`` to the URL above. Updating patch status ~~~~~~~~~~~~~~~~~~~~~ -It may be tempting to help the maintainers and update the state of your -own patches when you post a new version or spot a bug. Please **do not** -do that. -Interfering with the patch status on patchwork will only cause confusion. Leave -it to the maintainer to figure out what is the most recent and current -version that should be applied. If there is any doubt, the maintainer -will reply and ask what should be done. +Contributors and reviewers do not have the permissions to update patch +state directly in patchwork. Patchwork doesn't expose much information +about the history of the state of patches, therefore having multiple +people update the state leads to confusion. + +Instead of delegating patchwork permissions netdev uses a simple mail +bot which looks for special commands/lines within the emails sent to +the mailing list. For example to mark a series as Changes Requested +one needs to send the following line anywhere in the email thread:: + + pw-bot: changes-requested + +As a result the bot will set the entire series to Changes Requested. +This may be useful when author discovers a bug in their own series +and wants to prevent it from getting applied. + +The use of the bot is entirely optional, if in doubt ignore its existence +completely. Maintainers will classify and update the state of the patches +themselves. No email should ever be sent to the list with the main purpose +of communicating with the bot, the bot commands should be seen as metadata. + +The use of the bot is restricted to authors of the patches (the ``From:`` +header on patch submission and command must match!), maintainers themselves +and a handful of senior reviewers. Bot records its activity here: + + https://patchwork.hopto.org/pw-bot.html Review timelines ~~~~~~~~~~~~~~~~ diff --git a/Documentation/riscv/patch-acceptance.rst b/Documentation/riscv/patch-acceptance.rst index 07d5a5623e2a3e87e5244b94b789c8174a983efe..634aa222b410c100945b9e954b333ce5b4871e57 100644 --- a/Documentation/riscv/patch-acceptance.rst +++ b/Documentation/riscv/patch-acceptance.rst @@ -16,6 +16,24 @@ tested code over experimental code. We wish to extend these same principles to the RISC-V-related code that will be accepted for inclusion in the kernel. +Patchwork +--------- + +RISC-V has a patchwork instance, where the status of patches can be checked: + + https://patchwork.kernel.org/project/linux-riscv/list/ + +If your patch does not appear in the default view, the RISC-V maintainers have +likely either requested changes, or expect it to be applied to another tree. + +Automation runs against this patchwork instance, building/testing patches as +they arrive. The automation applies patches against the current HEAD of the +RISC-V `for-next` and `fixes` branches, depending on whether the patch has been +detected as a fix. Failing those, it will use the RISC-V `master` branch. +The exact commit to which a series has been applied will be noted on patchwork. +Patches for which any of the checks fail are unlikely to be applied and in most +cases will need to be resubmitted. + Submit Checklist Addendum ------------------------- We'll only accept patches for new modules or extensions if the diff --git a/Documentation/s390/vfio-ap.rst b/Documentation/s390/vfio-ap.rst index d46e98c7c1ec6cabe1316c7122cea4a33933927f..bb3f4c4e288563dc1ca850b8f7f86b7b7347a56e 100644 --- a/Documentation/s390/vfio-ap.rst +++ b/Documentation/s390/vfio-ap.rst @@ -551,7 +551,6 @@ These are the steps: * IOMMU_SUPPORT * S390 * ZCRYPT - * S390_AP_IOMMU * VFIO * KVM diff --git a/Documentation/staging/crc32.rst b/Documentation/staging/crc32.rst index 8a6860f33b4e7622003687a018816bc06469d5f9..7542220967cb4c3366665ee7a7730b62ae44bcdb 100644 --- a/Documentation/staging/crc32.rst +++ b/Documentation/staging/crc32.rst @@ -1,5 +1,5 @@ ================================= -brief tutorial on CRC computation +Brief tutorial on CRC computation ================================= A CRC is a long-division remainder. You add the CRC to the message, diff --git a/Documentation/timers/index.rst b/Documentation/timers/index.rst index df510ad0c989bef451443dd503cd70947c378d35..983f91f8f023e073ee0513708126cdede8c745d6 100644 --- a/Documentation/timers/index.rst +++ b/Documentation/timers/index.rst @@ -1,7 +1,7 @@ .. SPDX-License-Identifier: GPL-2.0 ====== -timers +Timers ====== .. toctree:: diff --git a/Documentation/trace/histogram.rst b/Documentation/trace/histogram.rst index 479c9eac63352207d51859a65cc229f36022d361..3c9b263de9c24f6b3367bc1a8ae7cd80f06e993c 100644 --- a/Documentation/trace/histogram.rst +++ b/Documentation/trace/histogram.rst @@ -35,7 +35,7 @@ Documentation written by Tom Zanussi in place of an explicit value field - this is simply a count of event hits. If 'values' isn't specified, an implicit 'hitcount' value will be automatically created and used as the only value. - Keys can be any field, or the special string 'stacktrace', which + Keys can be any field, or the special string 'common_stacktrace', which will use the event's kernel stacktrace as the key. The keywords 'keys' or 'key' can be used to specify keys, and the keywords 'values', 'vals', or 'val' can be used to specify values. Compound @@ -54,7 +54,7 @@ Documentation written by Tom Zanussi 'compatible' if the fields named in the trigger share the same number and type of fields and those fields also have the same names. Note that any two events always share the compatible 'hitcount' and - 'stacktrace' fields and can therefore be combined using those + 'common_stacktrace' fields and can therefore be combined using those fields, however pointless that may be. 'hist' triggers add a 'hist' file to each event's subdirectory. @@ -547,9 +547,9 @@ Extended error information the hist trigger display symbolic call_sites, we can have the hist trigger additionally display the complete set of kernel stack traces that led to each call_site. To do that, we simply use the special - value 'stacktrace' for the key parameter:: + value 'common_stacktrace' for the key parameter:: - # echo 'hist:keys=stacktrace:values=bytes_req,bytes_alloc:sort=bytes_alloc' > \ + # echo 'hist:keys=common_stacktrace:values=bytes_req,bytes_alloc:sort=bytes_alloc' > \ /sys/kernel/tracing/events/kmem/kmalloc/trigger The above trigger will use the kernel stack trace in effect when an @@ -561,9 +561,9 @@ Extended error information every callpath to a kmalloc for a kernel compile):: # cat /sys/kernel/tracing/events/kmem/kmalloc/hist - # trigger info: hist:keys=stacktrace:vals=bytes_req,bytes_alloc:sort=bytes_alloc:size=2048 [active] + # trigger info: hist:keys=common_stacktrace:vals=bytes_req,bytes_alloc:sort=bytes_alloc:size=2048 [active] - { stacktrace: + { common_stacktrace: __kmalloc_track_caller+0x10b/0x1a0 kmemdup+0x20/0x50 hidraw_report_event+0x8a/0x120 [hid] @@ -581,7 +581,7 @@ Extended error information cpu_startup_entry+0x315/0x3e0 rest_init+0x7c/0x80 } hitcount: 3 bytes_req: 21 bytes_alloc: 24 - { stacktrace: + { common_stacktrace: __kmalloc_track_caller+0x10b/0x1a0 kmemdup+0x20/0x50 hidraw_report_event+0x8a/0x120 [hid] @@ -596,7 +596,7 @@ Extended error information do_IRQ+0x5a/0xf0 ret_from_intr+0x0/0x30 } hitcount: 3 bytes_req: 21 bytes_alloc: 24 - { stacktrace: + { common_stacktrace: kmem_cache_alloc_trace+0xeb/0x150 aa_alloc_task_context+0x27/0x40 apparmor_cred_prepare+0x1f/0x50 @@ -608,7 +608,7 @@ Extended error information . . . - { stacktrace: + { common_stacktrace: __kmalloc+0x11b/0x1b0 i915_gem_execbuffer2+0x6c/0x2c0 [i915] drm_ioctl+0x349/0x670 [drm] @@ -616,7 +616,7 @@ Extended error information SyS_ioctl+0x81/0xa0 system_call_fastpath+0x12/0x6a } hitcount: 17726 bytes_req: 13944120 bytes_alloc: 19593808 - { stacktrace: + { common_stacktrace: __kmalloc+0x11b/0x1b0 load_elf_phdrs+0x76/0xa0 load_elf_binary+0x102/0x1650 @@ -625,7 +625,7 @@ Extended error information SyS_execve+0x3a/0x50 return_from_execve+0x0/0x23 } hitcount: 33348 bytes_req: 17152128 bytes_alloc: 20226048 - { stacktrace: + { common_stacktrace: kmem_cache_alloc_trace+0xeb/0x150 apparmor_file_alloc_security+0x27/0x40 security_file_alloc+0x16/0x20 @@ -636,7 +636,7 @@ Extended error information SyS_open+0x1e/0x20 system_call_fastpath+0x12/0x6a } hitcount: 4766422 bytes_req: 9532844 bytes_alloc: 38131376 - { stacktrace: + { common_stacktrace: __kmalloc+0x11b/0x1b0 seq_buf_alloc+0x1b/0x50 seq_read+0x2cc/0x370 @@ -1026,7 +1026,7 @@ Extended error information First we set up an initially paused stacktrace trigger on the netif_receive_skb event:: - # echo 'hist:key=stacktrace:vals=len:pause' > \ + # echo 'hist:key=common_stacktrace:vals=len:pause' > \ /sys/kernel/tracing/events/net/netif_receive_skb/trigger Next, we set up an 'enable_hist' trigger on the sched_process_exec @@ -1060,9 +1060,9 @@ Extended error information $ wget https://www.kernel.org/pub/linux/kernel/v3.x/patch-3.19.xz # cat /sys/kernel/tracing/events/net/netif_receive_skb/hist - # trigger info: hist:keys=stacktrace:vals=len:sort=hitcount:size=2048 [paused] + # trigger info: hist:keys=common_stacktrace:vals=len:sort=hitcount:size=2048 [paused] - { stacktrace: + { common_stacktrace: __netif_receive_skb_core+0x46d/0x990 __netif_receive_skb+0x18/0x60 netif_receive_skb_internal+0x23/0x90 @@ -1079,7 +1079,7 @@ Extended error information kthread+0xd2/0xf0 ret_from_fork+0x42/0x70 } hitcount: 85 len: 28884 - { stacktrace: + { common_stacktrace: __netif_receive_skb_core+0x46d/0x990 __netif_receive_skb+0x18/0x60 netif_receive_skb_internal+0x23/0x90 @@ -1097,7 +1097,7 @@ Extended error information irq_thread+0x11f/0x150 kthread+0xd2/0xf0 } hitcount: 98 len: 664329 - { stacktrace: + { common_stacktrace: __netif_receive_skb_core+0x46d/0x990 __netif_receive_skb+0x18/0x60 process_backlog+0xa8/0x150 @@ -1115,7 +1115,7 @@ Extended error information inet_sendmsg+0x64/0xa0 sock_sendmsg+0x3d/0x50 } hitcount: 115 len: 13030 - { stacktrace: + { common_stacktrace: __netif_receive_skb_core+0x46d/0x990 __netif_receive_skb+0x18/0x60 netif_receive_skb_internal+0x23/0x90 @@ -1142,14 +1142,14 @@ Extended error information into the histogram. In order to avoid having to set everything up again, we can just clear the histogram first:: - # echo 'hist:key=stacktrace:vals=len:clear' >> \ + # echo 'hist:key=common_stacktrace:vals=len:clear' >> \ /sys/kernel/tracing/events/net/netif_receive_skb/trigger Just to verify that it is in fact cleared, here's what we now see in the hist file:: # cat /sys/kernel/tracing/events/net/netif_receive_skb/hist - # trigger info: hist:keys=stacktrace:vals=len:sort=hitcount:size=2048 [paused] + # trigger info: hist:keys=common_stacktrace:vals=len:sort=hitcount:size=2048 [paused] Totals: Hits: 0 @@ -1485,12 +1485,12 @@ Extended error information And here's an example that shows how to combine histogram data from any two events even if they don't share any 'compatible' fields - other than 'hitcount' and 'stacktrace'. These commands create a + other than 'hitcount' and 'common_stacktrace'. These commands create a couple of triggers named 'bar' using those fields:: - # echo 'hist:name=bar:key=stacktrace:val=hitcount' > \ + # echo 'hist:name=bar:key=common_stacktrace:val=hitcount' > \ /sys/kernel/tracing/events/sched/sched_process_fork/trigger - # echo 'hist:name=bar:key=stacktrace:val=hitcount' > \ + # echo 'hist:name=bar:key=common_stacktrace:val=hitcount' > \ /sys/kernel/tracing/events/net/netif_rx/trigger And displaying the output of either shows some interesting if @@ -1501,16 +1501,16 @@ Extended error information # event histogram # - # trigger info: hist:name=bar:keys=stacktrace:vals=hitcount:sort=hitcount:size=2048 [active] + # trigger info: hist:name=bar:keys=common_stacktrace:vals=hitcount:sort=hitcount:size=2048 [active] # - { stacktrace: + { common_stacktrace: kernel_clone+0x18e/0x330 kernel_thread+0x29/0x30 kthreadd+0x154/0x1b0 ret_from_fork+0x3f/0x70 } hitcount: 1 - { stacktrace: + { common_stacktrace: netif_rx_internal+0xb2/0xd0 netif_rx_ni+0x20/0x70 dev_loopback_xmit+0xaa/0xd0 @@ -1528,7 +1528,7 @@ Extended error information call_cpuidle+0x3b/0x60 cpu_startup_entry+0x22d/0x310 } hitcount: 1 - { stacktrace: + { common_stacktrace: netif_rx_internal+0xb2/0xd0 netif_rx_ni+0x20/0x70 dev_loopback_xmit+0xaa/0xd0 @@ -1543,7 +1543,7 @@ Extended error information SyS_sendto+0xe/0x10 entry_SYSCALL_64_fastpath+0x12/0x6a } hitcount: 2 - { stacktrace: + { common_stacktrace: netif_rx_internal+0xb2/0xd0 netif_rx+0x1c/0x60 loopback_xmit+0x6c/0xb0 @@ -1561,7 +1561,7 @@ Extended error information sock_sendmsg+0x38/0x50 ___sys_sendmsg+0x14e/0x270 } hitcount: 76 - { stacktrace: + { common_stacktrace: netif_rx_internal+0xb2/0xd0 netif_rx+0x1c/0x60 loopback_xmit+0x6c/0xb0 @@ -1579,7 +1579,7 @@ Extended error information sock_sendmsg+0x38/0x50 ___sys_sendmsg+0x269/0x270 } hitcount: 77 - { stacktrace: + { common_stacktrace: netif_rx_internal+0xb2/0xd0 netif_rx+0x1c/0x60 loopback_xmit+0x6c/0xb0 @@ -1597,7 +1597,7 @@ Extended error information sock_sendmsg+0x38/0x50 SYSC_sendto+0xef/0x170 } hitcount: 88 - { stacktrace: + { common_stacktrace: kernel_clone+0x18e/0x330 SyS_clone+0x19/0x20 entry_SYSCALL_64_fastpath+0x12/0x6a @@ -1949,7 +1949,7 @@ uninterruptible state:: # cd /sys/kernel/tracing # echo 's:block_lat pid_t pid; u64 delta; unsigned long[] stack;' > dynamic_events - # echo 'hist:keys=next_pid:ts=common_timestamp.usecs,st=stacktrace if prev_state == 2' >> events/sched/sched_switch/trigger + # echo 'hist:keys=next_pid:ts=common_timestamp.usecs,st=common_stacktrace if prev_state == 2' >> events/sched/sched_switch/trigger # echo 'hist:keys=prev_pid:delta=common_timestamp.usecs-$ts,s=$st:onmax($delta).trace(block_lat,prev_pid,$delta,$s)' >> events/sched/sched_switch/trigger # echo 1 > events/synthetic/block_lat/enable # cat trace diff --git a/Documentation/trace/user_events.rst b/Documentation/trace/user_events.rst index f79987e16cf43822a9f37d3ecb7fe507062f8460..e7b07313550a3f60e3fe26cad1f2028cd4dc6eb1 100644 --- a/Documentation/trace/user_events.rst +++ b/Documentation/trace/user_events.rst @@ -14,10 +14,6 @@ Programs can view status of the events via /sys/kernel/tracing/user_events_status and can both register and write data out via /sys/kernel/tracing/user_events_data. -Programs can also use /sys/kernel/tracing/dynamic_events to register and -delete user based events via the u: prefix. The format of the command to -dynamic_events is the same as the ioctl with the u: prefix applied. - Typically programs will register a set of events that they wish to expose to tools that can read trace_events (such as ftrace and perf). The registration process tells the kernel which address and bit to reflect if any tool has @@ -144,6 +140,9 @@ its name. Delete will only succeed if there are no references left to the event (in both user and kernel space). User programs should use a separate file to request deletes than the one used for registration due to this. +**NOTE:** By default events will auto-delete when there are no references left +to the event. Flags in the future may change this logic. + Unregistering ------------- If after registering an event it is no longer wanted to be updated then it can diff --git a/Documentation/translations/zh_CN/devicetree/usage-model.rst b/Documentation/translations/zh_CN/devicetree/usage-model.rst index c6aee82c7e6e72535d12abcc117d601f8d7557ae..19ba4ae0cd811e055891452e349f55e77c3e9a6b 100644 --- a/Documentation/translations/zh_CN/devicetree/usage-model.rst +++ b/Documentation/translations/zh_CN/devicetree/usage-model.rst @@ -325,6 +325,6 @@ Primecell设备。然而,棘手的一点是,AMBA总线上的所有设备并 当使用DT时,这给of_platform_populate()带来了问题,因为它必须决定是否将 每个节点注册为platform_device或amba_device。不幸的是,这使设备创建模型 -变得有点复杂,但解决方案原来并不是太具有侵略性。如果一个节点与“arm,amba-primecell” +变得有点复杂,但解决方案原来并不是太具有侵略性。如果一个节点与“arm,primecell” 兼容,那么of_platform_populate()将把它注册为amba_device而不是 platform_device。 diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst index 176e8fc3f31b9393421c9c75073d927911f1b4c2..4f7b23faebb98c74022604ff24084eb1e8257524 100644 --- a/Documentation/userspace-api/ioctl/ioctl-number.rst +++ b/Documentation/userspace-api/ioctl/ioctl-number.rst @@ -363,7 +363,7 @@ Code Seq# Include File Comments 0xCC 00-0F drivers/misc/ibmvmc.h pseries VMC driver 0xCD 01 linux/reiserfs_fs.h 0xCE 01-02 uapi/linux/cxl_mem.h Compute Express Link Memory Devices -0xCF 02 fs/cifs/ioctl.c +0xCF 02 fs/smb/client/cifs_ioctl.h 0xDB 00-0F drivers/char/mwave/mwavepub.h 0xDD 00-3F ZFCP device driver see drivers/s390/scsi/ diff --git a/MAINTAINERS b/MAINTAINERS index e0ad886d316324d8bdd25befb706cd2fda82d9f9..35e19594640d0a780fe581fc4fed1d8eef1b131a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -956,7 +956,8 @@ F: Documentation/networking/device_drivers/ethernet/amazon/ena.rst F: drivers/net/ethernet/amazon/ AMAZON RDMA EFA DRIVER -M: Gal Pressman +M: Michael Margolin +R: Gal Pressman R: Yossi Leybovich L: linux-rdma@vger.kernel.org S: Supported @@ -1600,7 +1601,7 @@ F: drivers/media/i2c/ar0521.c ARASAN NAND CONTROLLER DRIVER M: Miquel Raynal -M: Naga Sureshkumar Relli +R: Michal Simek L: linux-mtd@lists.infradead.org S: Maintained F: Documentation/devicetree/bindings/mtd/arasan,nand-controller.yaml @@ -1677,10 +1678,7 @@ F: drivers/power/reset/arm-versatile-reboot.c F: drivers/soc/versatile/ ARM KOMEDA DRM-KMS DRIVER -M: James (Qian) Wang M: Liviu Dudau -M: Mihail Atanassov -L: Mali DP Maintainers S: Supported T: git git://anongit.freedesktop.org/drm/drm-misc F: Documentation/devicetree/bindings/display/arm,komeda.yaml @@ -1701,8 +1699,6 @@ F: include/uapi/drm/panfrost_drm.h ARM MALI-DP DRM DRIVER M: Liviu Dudau -M: Brian Starkey -L: Mali DP Maintainers S: Supported T: git git://anongit.freedesktop.org/drm/drm-misc F: Documentation/devicetree/bindings/display/arm,malidp.yaml @@ -1768,7 +1764,7 @@ F: include/linux/amba/mmci.h ARM PRIMECELL PL35X NAND CONTROLLER DRIVER M: Miquel Raynal -M: Naga Sureshkumar Relli +R: Michal Simek L: linux-mtd@lists.infradead.org S: Maintained F: Documentation/devicetree/bindings/mtd/arm,pl353-nand-r2p1.yaml @@ -1776,7 +1772,7 @@ F: drivers/mtd/nand/raw/pl35x-nand-controller.c ARM PRIMECELL PL35X SMC DRIVER M: Miquel Raynal -M: Naga Sureshkumar Relli +R: Michal Simek L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/memory-controllers/arm,pl35x-smc.yaml @@ -2434,6 +2430,15 @@ X: drivers/net/wireless/atmel/ N: at91 N: atmel +ARM/MICROCHIP (ARM64) SoC support +M: Conor Dooley +M: Nicolas Ferre +M: Claudiu Beznea +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Supported +T: git https://git.kernel.org/pub/scm/linux/kernel/git/at91/linux.git +F: arch/arm64/boot/dts/microchip/ + ARM/Microchip Sparx5 SoC support M: Lars Povlsen M: Steen Hegelund @@ -2441,8 +2446,7 @@ M: Daniel Machon M: UNGLinuxDriver@microchip.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Supported -T: git git://github.com/microchip-ung/linux-upstream.git -F: arch/arm64/boot/dts/microchip/ +F: arch/arm64/boot/dts/microchip/sparx* F: drivers/net/ethernet/microchip/vcap/ F: drivers/pinctrl/pinctrl-microchip-sgpio.c N: sparx5 @@ -3541,7 +3545,7 @@ F: Documentation/filesystems/befs.rst F: fs/befs/ BFQ I/O SCHEDULER -M: Paolo Valente +M: Paolo Valente M: Jens Axboe L: linux-block@vger.kernel.org S: Maintained @@ -4914,7 +4918,6 @@ F: drivers/media/cec/i2c/ch7322.c CIRRUS LOGIC AUDIO CODEC DRIVERS M: James Schulman M: David Rhodes -M: Lucas Tanure M: Richard Fitzgerald L: alsa-devel@alsa-project.org (moderated for non-subscribers) L: patches@opensource.cirrus.com @@ -5136,7 +5139,7 @@ X: drivers/clk/clkdev.c COMMON INTERNET FILE SYSTEM CLIENT (CIFS and SMB3) M: Steve French -R: Paulo Alcantara (DFS, global name space) +R: Paulo Alcantara (DFS, global name space) R: Ronnie Sahlberg (directory leases, sparse files) R: Shyam Prasad N (multichannel) R: Tom Talpey (RDMA, smbdirect) @@ -5146,8 +5149,8 @@ S: Supported W: https://wiki.samba.org/index.php/LinuxCIFS T: git git://git.samba.org/sfrench/cifs-2.6.git F: Documentation/admin-guide/cifs/ -F: fs/cifs/ -F: fs/smbfs_common/ +F: fs/smb/client/ +F: fs/smb/common/ F: include/uapi/linux/cifs COMPACTPCI HOTPLUG CORE @@ -5725,6 +5728,14 @@ F: include/linux/tfrc.h F: include/uapi/linux/dccp.h F: net/dccp/ +DEBUGOBJECTS: +M: Thomas Gleixner +L: linux-kernel@vger.kernel.org +S: Maintained +T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git core/debugobjects +F: lib/debugobjects.c +F: include/linux/debugobjects.h + DECSTATION PLATFORM SUPPORT M: "Maciej W. Rozycki" L: linux-mips@vger.kernel.org @@ -6012,7 +6023,7 @@ W: http://www.dialog-semiconductor.com/products F: Documentation/devicetree/bindings/input/da90??-onkey.txt F: Documentation/devicetree/bindings/input/dlg,da72??.txt F: Documentation/devicetree/bindings/mfd/da90*.txt -F: Documentation/devicetree/bindings/mfd/da90*.yaml +F: Documentation/devicetree/bindings/mfd/dlg,da90*.yaml F: Documentation/devicetree/bindings/regulator/da92*.txt F: Documentation/devicetree/bindings/regulator/dlg,da9*.yaml F: Documentation/devicetree/bindings/regulator/slg51000.txt @@ -6211,6 +6222,7 @@ X: Documentation/devicetree/ X: Documentation/driver-api/media/ X: Documentation/firmware-guide/acpi/ X: Documentation/i2c/ +X: Documentation/netlink/ X: Documentation/power/ X: Documentation/spi/ X: Documentation/userspace-api/media/ @@ -8158,6 +8170,7 @@ F: include/linux/spi/spi-fsl-dspi.h FREESCALE ENETC ETHERNET DRIVERS M: Claudiu Manoil +M: Vladimir Oltean L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/freescale/enetc/ @@ -8786,6 +8799,7 @@ F: include/linux/gpio/regmap.h GPIO SUBSYSTEM M: Linus Walleij M: Bartosz Golaszewski +R: Andy Shevchenko L: linux-gpio@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git @@ -9339,7 +9353,7 @@ F: include/linux/hisi_acc_qm.h HISILICON ROCE DRIVER M: Haoyue Xu -M: Wenpeng Liang +M: Junxian Huang L: linux-rdma@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt @@ -9682,8 +9696,9 @@ F: include/uapi/linux/i2c-*.h F: include/uapi/linux/i2c.h I2C SUBSYSTEM HOST DRIVERS +M: Andi Shyti L: linux-i2c@vger.kernel.org -S: Odd Fixes +S: Maintained W: https://i2c.wiki.kernel.org/ Q: https://patchwork.ozlabs.org/project/linux-i2c/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux.git @@ -9957,8 +9972,9 @@ M: Miquel Raynal L: linux-wpan@vger.kernel.org S: Maintained W: https://linux-wpan.org/ -T: git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan.git -T: git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan-next.git +Q: https://patchwork.kernel.org/project/linux-wpan/list/ +T: git git://git.kernel.org/pub/scm/linux/kernel/git/wpan/wpan.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/wpan/wpan-next.git F: Documentation/networking/ieee802154.rst F: drivers/net/ieee802154/ F: include/linux/ieee802154.h @@ -10110,7 +10126,7 @@ S: Maintained F: Documentation/process/kernel-docs.rst INDUSTRY PACK SUBSYSTEM (IPACK) -M: Samuel Iglesias Gonsalvez +M: Vaibhav Gupta M: Jens Taprogge M: Greg Kroah-Hartman L: industrypack-devel@lists.sourceforge.net @@ -11305,9 +11321,9 @@ R: Tom Talpey L: linux-cifs@vger.kernel.org S: Maintained T: git git://git.samba.org/ksmbd.git -F: Documentation/filesystems/cifs/ksmbd.rst -F: fs/ksmbd/ -F: fs/smbfs_common/ +F: Documentation/filesystems/smb/ksmbd.rst +F: fs/smb/common/ +F: fs/smb/server/ KERNEL UNIT TESTING FRAMEWORK (KUnit) M: Brendan Higgins @@ -13254,10 +13270,11 @@ F: drivers/memory/mtk-smi.c F: include/soc/mediatek/smi.h MEDIATEK SWITCH DRIVER -M: Sean Wang +M: Arınç ÜNAL +M: Daniel Golle M: Landen Chao M: DENG Qingfang -M: Daniel Golle +M: Sean Wang L: netdev@vger.kernel.org S: Maintained F: drivers/net/dsa/mt7530-mdio.c @@ -13832,7 +13849,7 @@ F: drivers/tty/serial/8250/8250_pci1xxxx.c MICROCHIP POLARFIRE FPGA DRIVERS M: Conor Dooley -R: Ivan Bornyakov +R: Vladimir Georgiev L: linux-fpga@vger.kernel.org S: Supported F: Documentation/devicetree/bindings/fpga/microchip,mpf-spi-fpga-mgr.yaml @@ -14566,6 +14583,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git F: Documentation/devicetree/bindings/net/ F: drivers/connector/ F: drivers/net/ +X: drivers/net/wireless/ F: include/dt-bindings/net/ F: include/linux/etherdevice.h F: include/linux/fcdevice.h @@ -14615,6 +14633,7 @@ B: mailto:netdev@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git F: Documentation/core-api/netlink.rst +F: Documentation/netlink/ F: Documentation/networking/ F: Documentation/process/maintainer-netdev.rst F: Documentation/userspace-api/netlink/ @@ -14629,6 +14648,7 @@ F: include/uapi/linux/netdevice.h F: lib/net_utils.c F: lib/random32.c F: net/ +X: net/bluetooth/ F: tools/net/ F: tools/testing/selftests/net/ @@ -14928,6 +14948,7 @@ F: drivers/ntb/hw/intel/ NTFS FILESYSTEM M: Anton Altaparmakov +R: Namjae Jeon L: linux-ntfs-dev@lists.sourceforge.net S: Supported W: http://www.tuxera.com/ @@ -16365,7 +16386,7 @@ F: Documentation/devicetree/bindings/pci/intel,keembay-pcie* F: drivers/pci/controller/dwc/pcie-keembay.c PCIE DRIVER FOR INTEL LGM GW SOC -M: Rahul Tanwar +M: Chuanhua Lei L: linux-pci@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/pci/intel-gw-pcie.yaml @@ -17808,7 +17829,7 @@ F: tools/testing/selftests/rtc/ Real-time Linux Analysis (RTLA) tools M: Daniel Bristot de Oliveira M: Steven Rostedt -L: linux-trace-devel@vger.kernel.org +L: linux-trace-kernel@vger.kernel.org S: Maintained F: Documentation/tools/rtla/ F: tools/tracing/rtla/ @@ -18029,6 +18050,14 @@ S: Maintained F: Documentation/devicetree/bindings/usb/renesas,rzn1-usbf.yaml F: drivers/usb/gadget/udc/renesas_usbf.c +RENESAS RZ/V2M I2C DRIVER +M: Fabrizio Castro +L: linux-i2c@vger.kernel.org +L: linux-renesas-soc@vger.kernel.org +S: Supported +F: Documentation/devicetree/bindings/i2c/renesas,rzv2m.yaml +F: drivers/i2c/busses/i2c-rzv2m.c + RENESAS USB PHY DRIVER M: Yoshihiro Shimoda L: linux-renesas-soc@vger.kernel.org @@ -18370,7 +18399,7 @@ F: drivers/infiniband/ulp/rtrs/ RUNTIME VERIFICATION (RV) M: Daniel Bristot de Oliveira M: Steven Rostedt -L: linux-trace-devel@vger.kernel.org +L: linux-trace-kernel@vger.kernel.org S: Maintained F: Documentation/trace/rv/ F: include/linux/rv.h @@ -18575,10 +18604,9 @@ F: Documentation/admin-guide/LSM/SafeSetID.rst F: security/safesetid/ SAMSUNG AUDIO (ASoC) DRIVERS -M: Krzysztof Kozlowski M: Sylwester Nawrocki L: alsa-devel@alsa-project.org (moderated for non-subscribers) -S: Supported +S: Maintained B: mailto:linux-samsung-soc@vger.kernel.org F: Documentation/devicetree/bindings/sound/samsung* F: sound/soc/samsung/ @@ -18706,7 +18734,6 @@ F: include/dt-bindings/clock/samsung,*.h F: include/linux/clk/samsung.h SAMSUNG SPI DRIVERS -M: Krzysztof Kozlowski M: Andi Shyti L: linux-spi@vger.kernel.org L: linux-samsung-soc@vger.kernel.org @@ -18842,12 +18869,11 @@ F: drivers/target/ F: include/target/ SCTP PROTOCOL -M: Neil Horman M: Marcelo Ricardo Leitner M: Xin Long L: linux-sctp@vger.kernel.org S: Maintained -W: http://lksctp.sourceforge.net +W: https://github.com/sctp/lksctp-tools/wiki F: Documentation/networking/sctp.rst F: include/linux/sctp.h F: include/net/sctp/ @@ -19116,6 +19142,9 @@ SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS M: Karsten Graul M: Wenjia Zhang M: Jan Karcher +R: D. Wythe +R: Tony Lu +R: Wen Gu L: linux-s390@vger.kernel.org S: Supported F: net/smc/ diff --git a/Makefile b/Makefile index f836936fb4d8b919f0b1692bff41b4df61dfd0ec..e51e4d9174ab380e2061dee296c229ed2f761f95 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ VERSION = 6 PATCHLEVEL = 4 SUBLEVEL = 0 -EXTRAVERSION = -rc2 +EXTRAVERSION = NAME = Hurr durr I'ma ninja sloth # *DOCUMENTATION* diff --git a/arch/arm/boot/dts/am57xx-cl-som-am57x.dts b/arch/arm/boot/dts/am57xx-cl-som-am57x.dts index 2fc9a5d5e0c0dea8668c230860c327b41134df60..625b9b311b49d2791f949ed06285838a6c36fca9 100644 --- a/arch/arm/boot/dts/am57xx-cl-som-am57x.dts +++ b/arch/arm/boot/dts/am57xx-cl-som-am57x.dts @@ -527,7 +527,7 @@ interrupt-parent = <&gpio1>; interrupts = <31 0>; - pendown-gpio = <&gpio1 31 0>; + pendown-gpio = <&gpio1 31 GPIO_ACTIVE_LOW>; ti,x-min = /bits/ 16 <0x0>; diff --git a/arch/arm/boot/dts/at91-sama7g5ek.dts b/arch/arm/boot/dts/at91-sama7g5ek.dts index aa5cc0e98bbab2b5e6512eebbaf0292b2e3467cd..217e9b96c61e5dea644c6b2f46d5807f58312b38 100644 --- a/arch/arm/boot/dts/at91-sama7g5ek.dts +++ b/arch/arm/boot/dts/at91-sama7g5ek.dts @@ -792,7 +792,7 @@ }; &shdwc { - atmel,shdwc-debouncer = <976>; + debounce-delay-us = <976>; status = "okay"; input@0 { diff --git a/arch/arm/boot/dts/at91sam9261ek.dts b/arch/arm/boot/dts/at91sam9261ek.dts index 88869ca874d1ae3cab71bb2ff98d8a739822877f..045cb253f23a6d07a39724f23be412927975e570 100644 --- a/arch/arm/boot/dts/at91sam9261ek.dts +++ b/arch/arm/boot/dts/at91sam9261ek.dts @@ -156,7 +156,7 @@ compatible = "ti,ads7843"; interrupts-extended = <&pioC 2 IRQ_TYPE_EDGE_BOTH>; spi-max-frequency = <3000000>; - pendown-gpio = <&pioC 2 GPIO_ACTIVE_HIGH>; + pendown-gpio = <&pioC 2 GPIO_ACTIVE_LOW>; ti,x-min = /bits/ 16 <150>; ti,x-max = /bits/ 16 <3830>; diff --git a/arch/arm/boot/dts/imx6qdl-mba6.dtsi b/arch/arm/boot/dts/imx6qdl-mba6.dtsi index 78555a6188510f63da0bf1a2659423cbc56205de..7b7e6c2ad190c838b38c8e9a74ceea01992fdc1b 100644 --- a/arch/arm/boot/dts/imx6qdl-mba6.dtsi +++ b/arch/arm/boot/dts/imx6qdl-mba6.dtsi @@ -209,6 +209,7 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_pcie>; reset-gpio = <&gpio6 7 GPIO_ACTIVE_LOW>; + vpcie-supply = <®_pcie>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6ull-dhcor-som.dtsi b/arch/arm/boot/dts/imx6ull-dhcor-som.dtsi index 5882c7565f6495b31c0d2c5f88431658e3ecf758..32a6022625d979041eb038c70213e696eb81b3ed 100644 --- a/arch/arm/boot/dts/imx6ull-dhcor-som.dtsi +++ b/arch/arm/boot/dts/imx6ull-dhcor-som.dtsi @@ -8,6 +8,7 @@ #include #include #include +#include #include "imx6ull.dtsi" / { @@ -84,16 +85,20 @@ regulators { vdd_soc_in_1v4: buck1 { + regulator-allowed-modes = ; /* PFM */ regulator-always-on; regulator-boot-on; + regulator-initial-mode = ; regulator-max-microvolt = <1400000>; regulator-min-microvolt = <1400000>; regulator-name = "vdd_soc_in_1v4"; }; vcc_3v3: buck2 { + regulator-allowed-modes = ; /* PWM */ regulator-always-on; regulator-boot-on; + regulator-initial-mode = ; regulator-max-microvolt = <3300000>; regulator-min-microvolt = <3300000>; regulator-name = "vcc_3v3"; @@ -106,8 +111,10 @@ * the voltage is set to 1.5V. */ vcc_ddr_1v35: buck3 { + regulator-allowed-modes = ; /* PWM */ regulator-always-on; regulator-boot-on; + regulator-initial-mode = ; regulator-max-microvolt = <1500000>; regulator-min-microvolt = <1500000>; regulator-name = "vcc_ddr_1v35"; diff --git a/arch/arm/boot/dts/imx7d-pico-hobbit.dts b/arch/arm/boot/dts/imx7d-pico-hobbit.dts index d917dc4f2f22759bc546c18248bbff7fcc3d726f..6ad39dca70096601ff1b7b8065f040e7c1d711a9 100644 --- a/arch/arm/boot/dts/imx7d-pico-hobbit.dts +++ b/arch/arm/boot/dts/imx7d-pico-hobbit.dts @@ -64,7 +64,7 @@ interrupt-parent = <&gpio2>; interrupts = <7 0>; spi-max-frequency = <1000000>; - pendown-gpio = <&gpio2 7 0>; + pendown-gpio = <&gpio2 7 GPIO_ACTIVE_LOW>; vcc-supply = <®_3p3v>; ti,x-min = /bits/ 16 <0>; ti,x-max = /bits/ 16 <4095>; diff --git a/arch/arm/boot/dts/imx7d-sdb.dts b/arch/arm/boot/dts/imx7d-sdb.dts index f483bc0afe5ea75f8235f32ce8c06a60ba4e0eec..234e5fc647b22829891b0a38c2490ded483cabde 100644 --- a/arch/arm/boot/dts/imx7d-sdb.dts +++ b/arch/arm/boot/dts/imx7d-sdb.dts @@ -205,7 +205,7 @@ pinctrl-0 = <&pinctrl_tsc2046_pendown>; interrupt-parent = <&gpio2>; interrupts = <29 0>; - pendown-gpio = <&gpio2 29 GPIO_ACTIVE_HIGH>; + pendown-gpio = <&gpio2 29 GPIO_ACTIVE_LOW>; touchscreen-max-pressure = <255>; wakeup-source; }; diff --git a/arch/arm/boot/dts/omap3-cm-t3x.dtsi b/arch/arm/boot/dts/omap3-cm-t3x.dtsi index e61b8a2bfb7deafd2d35da4a7b77c46ddba5db0b..51baedf1603bd43248ad4ed15b36dfdb5b6d6274 100644 --- a/arch/arm/boot/dts/omap3-cm-t3x.dtsi +++ b/arch/arm/boot/dts/omap3-cm-t3x.dtsi @@ -227,7 +227,7 @@ interrupt-parent = <&gpio2>; interrupts = <25 0>; /* gpio_57 */ - pendown-gpio = <&gpio2 25 GPIO_ACTIVE_HIGH>; + pendown-gpio = <&gpio2 25 GPIO_ACTIVE_LOW>; ti,x-min = /bits/ 16 <0x0>; ti,x-max = /bits/ 16 <0x0fff>; diff --git a/arch/arm/boot/dts/omap3-devkit8000-lcd-common.dtsi b/arch/arm/boot/dts/omap3-devkit8000-lcd-common.dtsi index 3decc2d78a6ca0776d16fd8bb74dbdefcd54466a..a7f99ae0c1fe9aca257d39458668640ea05ff222 100644 --- a/arch/arm/boot/dts/omap3-devkit8000-lcd-common.dtsi +++ b/arch/arm/boot/dts/omap3-devkit8000-lcd-common.dtsi @@ -54,7 +54,7 @@ interrupt-parent = <&gpio1>; interrupts = <27 0>; /* gpio_27 */ - pendown-gpio = <&gpio1 27 GPIO_ACTIVE_HIGH>; + pendown-gpio = <&gpio1 27 GPIO_ACTIVE_LOW>; ti,x-min = /bits/ 16 <0x0>; ti,x-max = /bits/ 16 <0x0fff>; diff --git a/arch/arm/boot/dts/omap3-lilly-a83x.dtsi b/arch/arm/boot/dts/omap3-lilly-a83x.dtsi index c595afe4181d7839139b620689575a8006a04093..d310b5c7bac362e8e2ae24e512d3667841fc9752 100644 --- a/arch/arm/boot/dts/omap3-lilly-a83x.dtsi +++ b/arch/arm/boot/dts/omap3-lilly-a83x.dtsi @@ -311,7 +311,7 @@ interrupt-parent = <&gpio1>; interrupts = <8 0>; /* boot6 / gpio_8 */ spi-max-frequency = <1000000>; - pendown-gpio = <&gpio1 8 GPIO_ACTIVE_HIGH>; + pendown-gpio = <&gpio1 8 GPIO_ACTIVE_LOW>; vcc-supply = <®_vcc3>; pinctrl-names = "default"; pinctrl-0 = <&tsc2048_pins>; diff --git a/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi b/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi index 1d6e88f99eb317fcdfb117b3a8ac050ee8fb6d82..c3570acc35fadaabafeb7ea1433b6322535b322f 100644 --- a/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi +++ b/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi @@ -149,7 +149,7 @@ interrupt-parent = <&gpio4>; interrupts = <18 0>; /* gpio_114 */ - pendown-gpio = <&gpio4 18 GPIO_ACTIVE_HIGH>; + pendown-gpio = <&gpio4 18 GPIO_ACTIVE_LOW>; ti,x-min = /bits/ 16 <0x0>; ti,x-max = /bits/ 16 <0x0fff>; diff --git a/arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi b/arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi index 7e30f9d45790e7c804453837ac34aa6b47f31ce7..d95a0e130058cbe7c6ef3c1c3771970f77d48bea 100644 --- a/arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi +++ b/arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi @@ -160,7 +160,7 @@ interrupt-parent = <&gpio4>; interrupts = <18 0>; /* gpio_114 */ - pendown-gpio = <&gpio4 18 GPIO_ACTIVE_HIGH>; + pendown-gpio = <&gpio4 18 GPIO_ACTIVE_LOW>; ti,x-min = /bits/ 16 <0x0>; ti,x-max = /bits/ 16 <0x0fff>; diff --git a/arch/arm/boot/dts/omap3-pandora-common.dtsi b/arch/arm/boot/dts/omap3-pandora-common.dtsi index 559853764487f28f171708aba3e00dbb436eda38..4c3b6bab179cc1be81e22f5735c1a245efb32385 100644 --- a/arch/arm/boot/dts/omap3-pandora-common.dtsi +++ b/arch/arm/boot/dts/omap3-pandora-common.dtsi @@ -651,7 +651,7 @@ pinctrl-0 = <&penirq_pins>; interrupt-parent = <&gpio3>; interrupts = <30 IRQ_TYPE_NONE>; /* GPIO_94 */ - pendown-gpio = <&gpio3 30 GPIO_ACTIVE_HIGH>; + pendown-gpio = <&gpio3 30 GPIO_ACTIVE_LOW>; vcc-supply = <&vaux4>; ti,x-min = /bits/ 16 <0>; diff --git a/arch/arm/boot/dts/omap5-cm-t54.dts b/arch/arm/boot/dts/omap5-cm-t54.dts index 2d87b9fc230ee20c1936f41b1c026691e2b56b1a..af288d63a26a464b456e5f353a0464d61715022e 100644 --- a/arch/arm/boot/dts/omap5-cm-t54.dts +++ b/arch/arm/boot/dts/omap5-cm-t54.dts @@ -354,7 +354,7 @@ interrupt-parent = <&gpio1>; interrupts = <15 0>; /* gpio1_wk15 */ - pendown-gpio = <&gpio1 15 GPIO_ACTIVE_HIGH>; + pendown-gpio = <&gpio1 15 GPIO_ACTIVE_LOW>; ti,x-min = /bits/ 16 <0x0>; diff --git a/arch/arm/boot/dts/qcom-apq8026-asus-sparrow.dts b/arch/arm/boot/dts/qcom-apq8026-asus-sparrow.dts index 7a80e1c9f126b8ca63f01c5b7dbf7e5305bb3a57..aa0e0e8d2a973ea75f3f86e938f0c909c4ea73dd 100644 --- a/arch/arm/boot/dts/qcom-apq8026-asus-sparrow.dts +++ b/arch/arm/boot/dts/qcom-apq8026-asus-sparrow.dts @@ -268,7 +268,6 @@ function = "gpio"; drive-strength = <8>; bias-disable; - input-enable; }; wlan_hostwake_default_state: wlan-hostwake-default-state { @@ -276,7 +275,6 @@ function = "gpio"; drive-strength = <2>; bias-disable; - input-enable; }; wlan_regulator_default_state: wlan-regulator-default-state { diff --git a/arch/arm/boot/dts/qcom-apq8026-huawei-sturgeon.dts b/arch/arm/boot/dts/qcom-apq8026-huawei-sturgeon.dts index d64096028ab13b5216308624e781b2cb6749fcd0..5593a3a60d6c4dc732db544bee5ea8a5c6a08a25 100644 --- a/arch/arm/boot/dts/qcom-apq8026-huawei-sturgeon.dts +++ b/arch/arm/boot/dts/qcom-apq8026-huawei-sturgeon.dts @@ -352,7 +352,6 @@ function = "gpio"; drive-strength = <2>; bias-disable; - input-enable; }; wlan_regulator_default_state: wlan-regulator-default-state { diff --git a/arch/arm/boot/dts/qcom-apq8026-lg-lenok.dts b/arch/arm/boot/dts/qcom-apq8026-lg-lenok.dts index b82381229adf6ebdb1533af643e781a6717e6791..b887e5361ec3a22b280554486a2a96216906f1fc 100644 --- a/arch/arm/boot/dts/qcom-apq8026-lg-lenok.dts +++ b/arch/arm/boot/dts/qcom-apq8026-lg-lenok.dts @@ -307,7 +307,6 @@ function = "gpio"; drive-strength = <2>; bias-disable; - input-enable; }; touch_pins: touch-state { @@ -317,7 +316,6 @@ drive-strength = <8>; bias-pull-down; - input-enable; }; reset-pins { @@ -335,7 +333,6 @@ function = "gpio"; drive-strength = <2>; bias-disable; - input-enable; }; wlan_regulator_default_state: wlan-regulator-default-state { diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi index 672b246afbba27cd0474c60509b68dfdd0f64f6b..d2289205ff812844b99766781b5bb00f1752eb1b 100644 --- a/arch/arm/boot/dts/qcom-apq8064.dtsi +++ b/arch/arm/boot/dts/qcom-apq8064.dtsi @@ -83,6 +83,7 @@ L2: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; }; idle-states { diff --git a/arch/arm/boot/dts/qcom-apq8084.dtsi b/arch/arm/boot/dts/qcom-apq8084.dtsi index b653ea40c441c6c119e9d3d68d76120d5cfbe39d..83839e1ec4d16cc1534eb176c694e71111ba3213 100644 --- a/arch/arm/boot/dts/qcom-apq8084.dtsi +++ b/arch/arm/boot/dts/qcom-apq8084.dtsi @@ -74,6 +74,7 @@ L2: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; qcom,saw = <&saw_l2>; }; diff --git a/arch/arm/boot/dts/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom-ipq4019.dtsi index dfcfb3339c232e063502fac0feae3695b3559ada..f0ef86fadc9d9a70bc1a99b63a6b861a7ae90fbd 100644 --- a/arch/arm/boot/dts/qcom-ipq4019.dtsi +++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi @@ -102,6 +102,7 @@ L2: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; qcom,saw = <&saw_l2>; }; }; diff --git a/arch/arm/boot/dts/qcom-ipq8064.dtsi b/arch/arm/boot/dts/qcom-ipq8064.dtsi index af6764770fd15e9a12728878d393b9ae38d24419..7581845737a8b507fcfab4cfb5547bc89c382484 100644 --- a/arch/arm/boot/dts/qcom-ipq8064.dtsi +++ b/arch/arm/boot/dts/qcom-ipq8064.dtsi @@ -45,6 +45,7 @@ L2: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; }; }; diff --git a/arch/arm/boot/dts/qcom-mdm9615-wp8548-mangoh-green.dts b/arch/arm/boot/dts/qcom-mdm9615-wp8548-mangoh-green.dts index a8304769b509a8945bcdf90a34b38d465bf21cc4..b269fdca1460c8401bd595d8b58bc8e01b1c9607 100644 --- a/arch/arm/boot/dts/qcom-mdm9615-wp8548-mangoh-green.dts +++ b/arch/arm/boot/dts/qcom-mdm9615-wp8548-mangoh-green.dts @@ -49,7 +49,6 @@ gpioext1-pins { pins = "gpio2"; function = "gpio"; - input-enable; bias-disable; }; }; diff --git a/arch/arm/boot/dts/qcom-msm8660.dtsi b/arch/arm/boot/dts/qcom-msm8660.dtsi index f601b40ebcf4e9e75caa868c919592ca8af83f5f..78023ed2fdf71f5168692d43a8b9d80573666545 100644 --- a/arch/arm/boot/dts/qcom-msm8660.dtsi +++ b/arch/arm/boot/dts/qcom-msm8660.dtsi @@ -36,6 +36,7 @@ L2: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; }; }; diff --git a/arch/arm/boot/dts/qcom-msm8960.dtsi b/arch/arm/boot/dts/qcom-msm8960.dtsi index 2a668cd535ccebb9f18c23ad8544190ba9b3f3e5..616fef2ea682570c6c49634525a309bdc55a585d 100644 --- a/arch/arm/boot/dts/qcom-msm8960.dtsi +++ b/arch/arm/boot/dts/qcom-msm8960.dtsi @@ -42,6 +42,7 @@ L2: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; }; }; diff --git a/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts b/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts index ab35f2d644c0e83b284d87ea8230ba9d9dcc0f1d..861695cecf84e5f57b2ecbaff4895e124b7ad672 100644 --- a/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts +++ b/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts @@ -592,7 +592,6 @@ pins = "gpio73"; function = "gpio"; bias-disable; - input-enable; }; touch_pin: touch-state { @@ -602,7 +601,6 @@ drive-strength = <2>; bias-disable; - input-enable; }; reset-pins { diff --git a/arch/arm/boot/dts/qcom-msm8974-sony-xperia-rhine.dtsi b/arch/arm/boot/dts/qcom-msm8974-sony-xperia-rhine.dtsi index d3bec03b126c8b68cb404fa68ab3b60e9579ef99..68a2f9094e536f758b25ed0e97d97fc87257b71e 100644 --- a/arch/arm/boot/dts/qcom-msm8974-sony-xperia-rhine.dtsi +++ b/arch/arm/boot/dts/qcom-msm8974-sony-xperia-rhine.dtsi @@ -433,7 +433,6 @@ function = "gpio"; drive-strength = <2>; bias-disable; - input-enable; }; sdc1_on: sdc1-on-state { diff --git a/arch/arm/boot/dts/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom-msm8974.dtsi index 8208012684d4a28edf627bdac4f9e17837b226ba..7ed0d925a4e9903a362d62010adba97aef0b7141 100644 --- a/arch/arm/boot/dts/qcom-msm8974.dtsi +++ b/arch/arm/boot/dts/qcom-msm8974.dtsi @@ -80,6 +80,7 @@ L2: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; qcom,saw = <&saw_l2>; }; diff --git a/arch/arm/boot/dts/qcom-msm8974pro-oneplus-bacon.dts b/arch/arm/boot/dts/qcom-msm8974pro-oneplus-bacon.dts index 8d2a054d8feed72afa744122f73b3ff94275a861..8230d0e1d95d1de1bff1ad1af990e1850b6b4246 100644 --- a/arch/arm/boot/dts/qcom-msm8974pro-oneplus-bacon.dts +++ b/arch/arm/boot/dts/qcom-msm8974pro-oneplus-bacon.dts @@ -461,7 +461,6 @@ function = "gpio"; drive-strength = <2>; bias-disable; - input-enable; }; reset-pins { diff --git a/arch/arm/boot/dts/qcom-msm8974pro-samsung-klte.dts b/arch/arm/boot/dts/qcom-msm8974pro-samsung-klte.dts index b9698ffb66ca82c14211beb5f0a501f28a3060a0..eb505d6d7f31f656ec575e7bd89071c5306e89ae 100644 --- a/arch/arm/boot/dts/qcom-msm8974pro-samsung-klte.dts +++ b/arch/arm/boot/dts/qcom-msm8974pro-samsung-klte.dts @@ -704,7 +704,6 @@ pins = "gpio75"; function = "gpio"; drive-strength = <16>; - input-enable; }; devwake-pins { @@ -760,14 +759,12 @@ i2c_touchkey_pins: i2c-touchkey-state { pins = "gpio95", "gpio96"; function = "gpio"; - input-enable; bias-pull-up; }; i2c_led_gpioex_pins: i2c-led-gpioex-state { pins = "gpio120", "gpio121"; function = "gpio"; - input-enable; bias-pull-down; }; @@ -781,7 +778,6 @@ wifi_pin: wifi-state { pins = "gpio92"; function = "gpio"; - input-enable; bias-pull-down; }; diff --git a/arch/arm/boot/dts/qcom-msm8974pro-sony-xperia-shinano-castor.dts b/arch/arm/boot/dts/qcom-msm8974pro-sony-xperia-shinano-castor.dts index 04bc58d87abf9c4948a5d025882ce6ec1e511705..0f650ed3100523854453c66411b8c26606a41531 100644 --- a/arch/arm/boot/dts/qcom-msm8974pro-sony-xperia-shinano-castor.dts +++ b/arch/arm/boot/dts/qcom-msm8974pro-sony-xperia-shinano-castor.dts @@ -631,7 +631,6 @@ function = "gpio"; drive-strength = <2>; bias-disable; - input-enable; }; bt_host_wake_pin: bt-host-wake-state { diff --git a/arch/arm/boot/dts/stm32f429.dtsi b/arch/arm/boot/dts/stm32f429.dtsi index c9e05e3540d6408791be69435b23bb3b131d554b..00bf53f99c294c6c183366798bbd3bca7569456b 100644 --- a/arch/arm/boot/dts/stm32f429.dtsi +++ b/arch/arm/boot/dts/stm32f429.dtsi @@ -387,6 +387,7 @@ interrupt-names = "tx", "rx0", "rx1", "sce"; resets = <&rcc STM32F4_APB1_RESET(CAN2)>; clocks = <&rcc 0 STM32F4_APB1_CLOCK(CAN2)>; + st,can-secondary; st,gcan = <&gcan>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/stm32f7-pinctrl.dtsi b/arch/arm/boot/dts/stm32f7-pinctrl.dtsi index c8e6c52fb248e0bb405472ceefa11828fa7e07a9..9f65403295ca059fc917e1c575a32b77cab96a56 100644 --- a/arch/arm/boot/dts/stm32f7-pinctrl.dtsi +++ b/arch/arm/boot/dts/stm32f7-pinctrl.dtsi @@ -283,6 +283,88 @@ slew-rate = <2>; }; }; + + can1_pins_a: can1-0 { + pins1 { + pinmux = ; /* CAN1_TX */ + }; + pins2 { + pinmux = ; /* CAN1_RX */ + bias-pull-up; + }; + }; + + can1_pins_b: can1-1 { + pins1 { + pinmux = ; /* CAN1_TX */ + }; + pins2 { + pinmux = ; /* CAN1_RX */ + bias-pull-up; + }; + }; + + can1_pins_c: can1-2 { + pins1 { + pinmux = ; /* CAN1_TX */ + }; + pins2 { + pinmux = ; /* CAN1_RX */ + bias-pull-up; + + }; + }; + + can1_pins_d: can1-3 { + pins1 { + pinmux = ; /* CAN1_TX */ + }; + pins2 { + pinmux = ; /* CAN1_RX */ + bias-pull-up; + + }; + }; + + can2_pins_a: can2-0 { + pins1 { + pinmux = ; /* CAN2_TX */ + }; + pins2 { + pinmux = ; /* CAN2_RX */ + bias-pull-up; + }; + }; + + can2_pins_b: can2-1 { + pins1 { + pinmux = ; /* CAN2_TX */ + }; + pins2 { + pinmux = ; /* CAN2_RX */ + bias-pull-up; + }; + }; + + can3_pins_a: can3-0 { + pins1 { + pinmux = ; /* CAN3_TX */ + }; + pins2 { + pinmux = ; /* CAN3_RX */ + bias-pull-up; + }; + }; + + can3_pins_b: can3-1 { + pins1 { + pinmux = ; /* CAN3_TX */ + }; + pins2 { + pinmux = ; /* CAN3_RX */ + bias-pull-up; + }; + }; }; }; }; diff --git a/arch/arm/boot/dts/vexpress-v2p-ca5s.dts b/arch/arm/boot/dts/vexpress-v2p-ca5s.dts index 3b88209bacea2f8664dcc4653f6bbca2397de4cb..ff1f9a1bcfcfc3bdffe1234ae103d876dfb891e9 100644 --- a/arch/arm/boot/dts/vexpress-v2p-ca5s.dts +++ b/arch/arm/boot/dts/vexpress-v2p-ca5s.dts @@ -132,6 +132,7 @@ reg = <0x2c0f0000 0x1000>; interrupts = <0 84 4>; cache-level = <2>; + cache-unified; }; pmu { diff --git a/arch/arm/include/asm/arm_pmuv3.h b/arch/arm/include/asm/arm_pmuv3.h index 78d3d4b82c6c2598d63d4c691ba0e5ae59b67e5c..f3cd04ff022df106f17ce676ede60d647cb3cf20 100644 --- a/arch/arm/include/asm/arm_pmuv3.h +++ b/arch/arm/include/asm/arm_pmuv3.h @@ -92,7 +92,7 @@ #define RETURN_READ_PMEVCNTRN(n) \ return read_sysreg(PMEVCNTR##n) -static unsigned long read_pmevcntrn(int n) +static inline unsigned long read_pmevcntrn(int n) { PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN); return 0; @@ -100,14 +100,14 @@ static unsigned long read_pmevcntrn(int n) #define WRITE_PMEVCNTRN(n) \ write_sysreg(val, PMEVCNTR##n) -static void write_pmevcntrn(int n, unsigned long val) +static inline void write_pmevcntrn(int n, unsigned long val) { PMEVN_SWITCH(n, WRITE_PMEVCNTRN); } #define WRITE_PMEVTYPERN(n) \ write_sysreg(val, PMEVTYPER##n) -static void write_pmevtypern(int n, unsigned long val) +static inline void write_pmevtypern(int n, unsigned long val) { PMEVN_SWITCH(n, WRITE_PMEVTYPERN); } @@ -222,6 +222,11 @@ static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr) return false; } +static inline bool kvm_set_pmuserenr(u64 val) +{ + return false; +} + /* PMU Version in DFR Register */ #define ARMV8_PMU_DFR_VER_NI 0 #define ARMV8_PMU_DFR_VER_V3P4 0x5 diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index 60dc56d8acfb9aa08a289077e35e959621e2a1f3..437dd0352fd443a887f0a5db99b4d0919475e075 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c @@ -334,16 +334,14 @@ static bool at91_pm_eth_quirk_is_valid(struct at91_pm_quirk_eth *eth) pdev = of_find_device_by_node(eth->np); if (!pdev) return false; + /* put_device(eth->dev) is called at the end of suspend. */ eth->dev = &pdev->dev; } /* No quirks if device isn't a wakeup source. */ - if (!device_may_wakeup(eth->dev)) { - put_device(eth->dev); + if (!device_may_wakeup(eth->dev)) return false; - } - /* put_device(eth->dev) is called at the end of suspend. */ return true; } @@ -439,14 +437,14 @@ clk_unconfigure: pr_err("AT91: PM: failed to enable %s clocks\n", j == AT91_PM_G_ETH ? "geth" : "eth"); } - } else { - /* - * Release the reference to eth->dev taken in - * at91_pm_eth_quirk_is_valid(). - */ - put_device(eth->dev); - eth->dev = NULL; } + + /* + * Release the reference to eth->dev taken in + * at91_pm_eth_quirk_is_valid(). + */ + put_device(eth->dev); + eth->dev = NULL; } return ret; diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index b1201d25a8a4ee75e6eeaddb46f626655883e6bf..343e1e1cae10a3d51098b2ad7b9f1d457fcca2a5 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1516,7 +1516,7 @@ config XEN # 16K | 27 | 14 | 13 | 11 | # 64K | 29 | 16 | 13 | 13 | config ARCH_FORCE_MAX_ORDER - int "Order of maximal physically contiguous allocations" if EXPERT && (ARM64_4K_PAGES || ARM64_16K_PAGES) + int default "13" if ARM64_64K_PAGES default "11" if ARM64_16K_PAGES default "10" diff --git a/arch/arm64/boot/dts/arm/foundation-v8.dtsi b/arch/arm64/boot/dts/arm/foundation-v8.dtsi index 029578072d8fb495ce64cc86ee59e0dc3c36ab08..7b41537731a6aec17fee92655a52ab132aaefd36 100644 --- a/arch/arm64/boot/dts/arm/foundation-v8.dtsi +++ b/arch/arm64/boot/dts/arm/foundation-v8.dtsi @@ -59,6 +59,7 @@ L2_0: l2-cache0 { compatible = "cache"; cache-level = <2>; + cache-unified; }; }; diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts index ef68f5aae7ddf7b7ac1d886ca30de613d08aebd7..afdf954206f1d1a895b6c0106b0b7afd7d9053b8 100644 --- a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts +++ b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts @@ -72,6 +72,7 @@ L2_0: l2-cache0 { compatible = "cache"; cache-level = <2>; + cache-unified; }; }; diff --git a/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts b/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts index 796cd7d02eb55ac954a7d828000c706651250130..7bdeb965f0a964f3dd77d9c6d39717b4b9b00201 100644 --- a/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts +++ b/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts @@ -58,6 +58,7 @@ L2_0: l2-cache0 { compatible = "cache"; cache-level = <2>; + cache-unified; }; }; diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi index 2209c1ac6e9b011726b31b4269deac85b6445a92..e62a43591361b360a64f11998bda741708bbc1f0 100644 --- a/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi @@ -171,6 +171,7 @@ conn_subsys: bus@5b000000 { interrupt-names = "host", "peripheral", "otg", "wakeup"; phys = <&usb3_phy>; phy-names = "cdns3,usb3-phy"; + cdns,on-chip-buff-size = /bits/ 16 <18>; status = "disabled"; }; }; diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi index 2dce8f2ee3ea1f9b9133d1258e2b1a486d6e3c83..adb98a72bdfd911166c0bfdc62a4675240db356e 100644 --- a/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi @@ -90,6 +90,8 @@ dma_subsys: bus@5a000000 { clocks = <&uart0_lpcg IMX_LPCG_CLK_4>, <&uart0_lpcg IMX_LPCG_CLK_0>; clock-names = "ipg", "baud"; + assigned-clocks = <&clk IMX_SC_R_UART_0 IMX_SC_PM_CLK_PER>; + assigned-clock-rates = <80000000>; power-domains = <&pd IMX_SC_R_UART_0>; status = "disabled"; }; @@ -100,6 +102,8 @@ dma_subsys: bus@5a000000 { clocks = <&uart1_lpcg IMX_LPCG_CLK_4>, <&uart1_lpcg IMX_LPCG_CLK_0>; clock-names = "ipg", "baud"; + assigned-clocks = <&clk IMX_SC_R_UART_1 IMX_SC_PM_CLK_PER>; + assigned-clock-rates = <80000000>; power-domains = <&pd IMX_SC_R_UART_1>; status = "disabled"; }; @@ -110,6 +114,8 @@ dma_subsys: bus@5a000000 { clocks = <&uart2_lpcg IMX_LPCG_CLK_4>, <&uart2_lpcg IMX_LPCG_CLK_0>; clock-names = "ipg", "baud"; + assigned-clocks = <&clk IMX_SC_R_UART_2 IMX_SC_PM_CLK_PER>; + assigned-clock-rates = <80000000>; power-domains = <&pd IMX_SC_R_UART_2>; status = "disabled"; }; @@ -120,6 +126,8 @@ dma_subsys: bus@5a000000 { clocks = <&uart3_lpcg IMX_LPCG_CLK_4>, <&uart3_lpcg IMX_LPCG_CLK_0>; clock-names = "ipg", "baud"; + assigned-clocks = <&clk IMX_SC_R_UART_3 IMX_SC_PM_CLK_PER>; + assigned-clock-rates = <80000000>; power-domains = <&pd IMX_SC_R_UART_3>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mn-beacon-baseboard.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-beacon-baseboard.dtsi index 9e82069c941fa6ac78f6daeb4168e1e6e001813b..5a1f7c30afe57a09f16c658fc50ef228c3b86bd6 100644 --- a/arch/arm64/boot/dts/freescale/imx8mn-beacon-baseboard.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mn-beacon-baseboard.dtsi @@ -81,7 +81,7 @@ &ecspi2 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_espi2>; - cs-gpios = <&gpio5 9 GPIO_ACTIVE_LOW>; + cs-gpios = <&gpio5 13 GPIO_ACTIVE_LOW>; status = "okay"; eeprom@0 { @@ -202,7 +202,7 @@ MX8MN_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0x82 MX8MN_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0x82 MX8MN_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0x82 - MX8MN_IOMUXC_ECSPI1_SS0_GPIO5_IO9 0x41 + MX8MN_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0x41 >; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi index 67072e6c77d5fb7834b41a938881216d87939496..cbd9d124c80d09adcdc630a8cf4710e89201aa50 100644 --- a/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi @@ -98,11 +98,17 @@ #address-cells = <1>; #size-cells = <0>; - ethphy: ethernet-phy@4 { + ethphy: ethernet-phy@4 { /* AR8033 or ADIN1300 */ compatible = "ethernet-phy-ieee802.3-c22"; reg = <4>; reset-gpios = <&gpio1 9 GPIO_ACTIVE_LOW>; reset-assert-us = <10000>; + /* + * Deassert delay: + * ADIN1300 requires 5ms. + * AR8033 requires 1ms. + */ + reset-deassert-us = <20000>; }; }; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi index bd84db55005313ac4e6c96b2929cf3b684e95c67..8be8f090e8b8e9e7c89a998463b23b067e40ff30 100644 --- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi @@ -1069,13 +1069,6 @@ <&clk IMX8MN_CLK_DISP_APB_ROOT>, <&clk IMX8MN_CLK_DISP_AXI_ROOT>; clock-names = "pix", "axi", "disp_axi"; - assigned-clocks = <&clk IMX8MN_CLK_DISP_PIXEL_ROOT>, - <&clk IMX8MN_CLK_DISP_AXI>, - <&clk IMX8MN_CLK_DISP_APB>; - assigned-clock-parents = <&clk IMX8MN_CLK_DISP_PIXEL>, - <&clk IMX8MN_SYS_PLL2_1000M>, - <&clk IMX8MN_SYS_PLL1_800M>; - assigned-clock-rates = <594000000>, <500000000>, <200000000>; interrupts = ; power-domains = <&disp_blk_ctrl IMX8MN_DISPBLK_PD_LCDIF>; status = "disabled"; @@ -1093,12 +1086,6 @@ clocks = <&clk IMX8MN_CLK_DSI_CORE>, <&clk IMX8MN_CLK_DSI_PHY_REF>; clock-names = "bus_clk", "sclk_mipi"; - assigned-clocks = <&clk IMX8MN_CLK_DSI_CORE>, - <&clk IMX8MN_CLK_DSI_PHY_REF>; - assigned-clock-parents = <&clk IMX8MN_SYS_PLL1_266M>, - <&clk IMX8MN_CLK_24M>; - assigned-clock-rates = <266000000>, <24000000>; - samsung,pll-clock-frequency = <24000000>; interrupts = ; power-domains = <&disp_blk_ctrl IMX8MN_DISPBLK_PD_MIPI_DSI>; status = "disabled"; @@ -1142,6 +1129,21 @@ "lcdif-axi", "lcdif-apb", "lcdif-pix", "dsi-pclk", "dsi-ref", "csi-aclk", "csi-pclk"; + assigned-clocks = <&clk IMX8MN_CLK_DSI_CORE>, + <&clk IMX8MN_CLK_DSI_PHY_REF>, + <&clk IMX8MN_CLK_DISP_PIXEL>, + <&clk IMX8MN_CLK_DISP_AXI>, + <&clk IMX8MN_CLK_DISP_APB>; + assigned-clock-parents = <&clk IMX8MN_SYS_PLL1_266M>, + <&clk IMX8MN_CLK_24M>, + <&clk IMX8MN_VIDEO_PLL1_OUT>, + <&clk IMX8MN_SYS_PLL2_1000M>, + <&clk IMX8MN_SYS_PLL1_800M>; + assigned-clock-rates = <266000000>, + <24000000>, + <594000000>, + <500000000>, + <200000000>; #power-domain-cells = <1>; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi index f81391993354f484aa60380a4b23545b3cfb724e..428c60462e3d6be7c09f62ea51f48be069af802f 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi @@ -1211,13 +1211,6 @@ <&clk IMX8MP_CLK_MEDIA_APB_ROOT>, <&clk IMX8MP_CLK_MEDIA_AXI_ROOT>; clock-names = "pix", "axi", "disp_axi"; - assigned-clocks = <&clk IMX8MP_CLK_MEDIA_DISP1_PIX_ROOT>, - <&clk IMX8MP_CLK_MEDIA_AXI>, - <&clk IMX8MP_CLK_MEDIA_APB>; - assigned-clock-parents = <&clk IMX8MP_CLK_MEDIA_DISP1_PIX>, - <&clk IMX8MP_SYS_PLL2_1000M>, - <&clk IMX8MP_SYS_PLL1_800M>; - assigned-clock-rates = <594000000>, <500000000>, <200000000>; interrupts = ; power-domains = <&media_blk_ctrl IMX8MP_MEDIABLK_PD_LCDIF_1>; status = "disabled"; @@ -1237,11 +1230,6 @@ <&clk IMX8MP_CLK_MEDIA_APB_ROOT>, <&clk IMX8MP_CLK_MEDIA_AXI_ROOT>; clock-names = "pix", "axi", "disp_axi"; - assigned-clocks = <&clk IMX8MP_CLK_MEDIA_DISP2_PIX>, - <&clk IMX8MP_VIDEO_PLL1>; - assigned-clock-parents = <&clk IMX8MP_VIDEO_PLL1_OUT>, - <&clk IMX8MP_VIDEO_PLL1_REF_SEL>; - assigned-clock-rates = <0>, <1039500000>; power-domains = <&media_blk_ctrl IMX8MP_MEDIABLK_PD_LCDIF_2>; status = "disabled"; @@ -1296,11 +1284,16 @@ "disp1", "disp2", "isp", "phy"; assigned-clocks = <&clk IMX8MP_CLK_MEDIA_AXI>, - <&clk IMX8MP_CLK_MEDIA_APB>; + <&clk IMX8MP_CLK_MEDIA_APB>, + <&clk IMX8MP_CLK_MEDIA_DISP1_PIX>, + <&clk IMX8MP_CLK_MEDIA_DISP2_PIX>, + <&clk IMX8MP_VIDEO_PLL1>; assigned-clock-parents = <&clk IMX8MP_SYS_PLL2_1000M>, - <&clk IMX8MP_SYS_PLL1_800M>; - assigned-clock-rates = <500000000>, <200000000>; - + <&clk IMX8MP_SYS_PLL1_800M>, + <&clk IMX8MP_VIDEO_PLL1_OUT>, + <&clk IMX8MP_VIDEO_PLL1_OUT>; + assigned-clock-rates = <500000000>, <200000000>, + <0>, <0>, <1039500000>; #power-domain-cells = <1>; lvds_bridge: bridge@5c { diff --git a/arch/arm64/boot/dts/freescale/imx8qm-mek.dts b/arch/arm64/boot/dts/freescale/imx8qm-mek.dts index ce9d3f0b98fc0383667fe8ffd60b239eaab348a2..607cd6b4e9721f04b849347d6464efc8ba240753 100644 --- a/arch/arm64/boot/dts/freescale/imx8qm-mek.dts +++ b/arch/arm64/boot/dts/freescale/imx8qm-mek.dts @@ -82,8 +82,8 @@ pinctrl-0 = <&pinctrl_usdhc2>; bus-width = <4>; vmmc-supply = <®_usdhc2_vmmc>; - cd-gpios = <&lsio_gpio4 22 GPIO_ACTIVE_LOW>; - wp-gpios = <&lsio_gpio4 21 GPIO_ACTIVE_HIGH>; + cd-gpios = <&lsio_gpio5 22 GPIO_ACTIVE_LOW>; + wp-gpios = <&lsio_gpio5 21 GPIO_ACTIVE_HIGH>; status = "okay"; }; diff --git a/arch/arm64/boot/dts/freescale/imx8x-colibri-eval-v3.dtsi b/arch/arm64/boot/dts/freescale/imx8x-colibri-eval-v3.dtsi index 7264d784ae723cfa977b0953b4fed03c7d679b35..9af769ab8cebbdab47accd034fdc331d36e694f9 100644 --- a/arch/arm64/boot/dts/freescale/imx8x-colibri-eval-v3.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8x-colibri-eval-v3.dtsi @@ -33,6 +33,12 @@ }; }; +&iomuxc { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_ext_io0>, <&pinctrl_hog0>, <&pinctrl_hog1>, + <&pinctrl_lpspi2_cs2>; +}; + /* Colibri SPI */ &lpspi2 { status = "okay"; diff --git a/arch/arm64/boot/dts/freescale/imx8x-colibri-iris.dtsi b/arch/arm64/boot/dts/freescale/imx8x-colibri-iris.dtsi index 5f30c88855e70769ebded3a2716eafa37f232cf0..f8953067bc3bb86096c83365f3f3a58327aa365d 100644 --- a/arch/arm64/boot/dts/freescale/imx8x-colibri-iris.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8x-colibri-iris.dtsi @@ -48,8 +48,7 @@ , /* SODIMM 101 */ , /* SODIMM 97 */ , /* SODIMM 85 */ - , /* SODIMM 79 */ - ; /* SODIMM 45 */ + ; /* SODIMM 79 */ }; pinctrl_uart1_forceoff: uart1forceoffgrp { diff --git a/arch/arm64/boot/dts/freescale/imx8x-colibri.dtsi b/arch/arm64/boot/dts/freescale/imx8x-colibri.dtsi index 7cad79102e1afec6b06bd91fae24408832e180fd..49d105eb4769197af85317886bbb4acd13ef3248 100644 --- a/arch/arm64/boot/dts/freescale/imx8x-colibri.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8x-colibri.dtsi @@ -363,10 +363,6 @@ /* TODO VPU Encoder/Decoder */ &iomuxc { - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_ext_io0>, <&pinctrl_hog0>, <&pinctrl_hog1>, - <&pinctrl_hog2>, <&pinctrl_lpspi2_cs2>; - /* On-module touch pen-down interrupt */ pinctrl_ad7879_int: ad7879intgrp { fsl,pins = ; @@ -499,8 +495,7 @@ }; pinctrl_hog1: hog1grp { - fsl,pins = , /* SODIMM 75 */ - ; /* SODIMM 93 */ + fsl,pins = ; /* SODIMM 93 */ }; pinctrl_hog2: hog2grp { @@ -774,3 +769,10 @@ fsl,pins = ; }; }; + +/* Delete peripherals which are not present on SOC, but are defined in imx8-ss-*.dtsi */ + +/delete-node/ &adc1; +/delete-node/ &adc1_lpcg; +/delete-node/ &dsp; +/delete-node/ &dsp_lpcg; diff --git a/arch/arm64/boot/dts/qcom/ipq5332.dtsi b/arch/arm64/boot/dts/qcom/ipq5332.dtsi index 12e0e179e139cc9d506f0a938dc0808ba6d36aae..af4d97143bcf53e45f8d67b836d15cbf8e8f8d10 100644 --- a/arch/arm64/boot/dts/qcom/ipq5332.dtsi +++ b/arch/arm64/boot/dts/qcom/ipq5332.dtsi @@ -73,6 +73,7 @@ L2_0: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; }; }; diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi index 9ff4e9d45065bfb8a72dc04507c45e5ab796c36a..f531797f26195f027205594083690eb7afb8c104 100644 --- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi +++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi @@ -83,7 +83,8 @@ L2_0: l2-cache { compatible = "cache"; - cache-level = <0x2>; + cache-level = <2>; + cache-unified; }; }; diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi index 84e715aa4310332c7e0ed7e2570a674f0bfe6fd6..5b2c1986c8f4f3c8696edd240edd63d69bd00169 100644 --- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi +++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi @@ -66,7 +66,8 @@ L2_0: l2-cache { compatible = "cache"; - cache-level = <0x2>; + cache-level = <2>; + cache-unified; }; }; diff --git a/arch/arm64/boot/dts/qcom/ipq9574.dtsi b/arch/arm64/boot/dts/qcom/ipq9574.dtsi index 3bb7435f5e7f08a05faad0c9b270f020798e8def..0ed19fbf7d87d1c7abdb8db7258b39d25660020b 100644 --- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi +++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi @@ -72,6 +72,7 @@ L2_0: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; }; }; diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi index 7e0fa37a3adf8d6c30d97d1915a2c8782352a8b0..834e0b66b7f2e68a477a9443894e80e9a1002dad 100644 --- a/arch/arm64/boot/dts/qcom/msm8916.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi @@ -180,6 +180,7 @@ L2_0: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; }; idle-states { diff --git a/arch/arm64/boot/dts/qcom/msm8953.dtsi b/arch/arm64/boot/dts/qcom/msm8953.dtsi index 602cb188a6354266d9108316aeccf697271f9ac4..d44cfa0471e9a6f0e37c2244d0ff349aba44ddb3 100644 --- a/arch/arm64/boot/dts/qcom/msm8953.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8953.dtsi @@ -153,11 +153,13 @@ L2_0: l2-cache-0 { compatible = "cache"; cache-level = <2>; + cache-unified; }; L2_1: l2-cache-1 { compatible = "cache"; cache-level = <2>; + cache-unified; }; }; diff --git a/arch/arm64/boot/dts/qcom/msm8976.dtsi b/arch/arm64/boot/dts/qcom/msm8976.dtsi index 1f0bd24a074a681de0beb52756b5f28614dfab41..f47fb8ea71e20049d5ca78996c14e7a31626065a 100644 --- a/arch/arm64/boot/dts/qcom/msm8976.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8976.dtsi @@ -193,11 +193,13 @@ l2_0: l2-cache0 { compatible = "cache"; cache-level = <2>; + cache-unified; }; l2_1: l2-cache1 { compatible = "cache"; cache-level = <2>; + cache-unified; }; }; diff --git a/arch/arm64/boot/dts/qcom/msm8994.dtsi b/arch/arm64/boot/dts/qcom/msm8994.dtsi index 2831966be960de32a651d01615685049a76f1014..bdc3f2ba1755e48b3e1b81cbd9ee0e790cfe2314 100644 --- a/arch/arm64/boot/dts/qcom/msm8994.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8994.dtsi @@ -52,6 +52,7 @@ L2_0: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; }; }; @@ -88,6 +89,7 @@ L2_1: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; }; }; diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi index 2b35cb3f52921662ca823d3be5dd52017f1884f1..30257c07e1279f71a2536aafb34573a245c32ed3 100644 --- a/arch/arm64/boot/dts/qcom/msm8996.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi @@ -53,8 +53,9 @@ #cooling-cells = <2>; next-level-cache = <&L2_0>; L2_0: l2-cache { - compatible = "cache"; - cache-level = <2>; + compatible = "cache"; + cache-level = <2>; + cache-unified; }; }; @@ -83,8 +84,9 @@ #cooling-cells = <2>; next-level-cache = <&L2_1>; L2_1: l2-cache { - compatible = "cache"; - cache-level = <2>; + compatible = "cache"; + cache-level = <2>; + cache-unified; }; }; diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi index b150437a83558c440e2745397d873fe01b3fc88c..3ec941fed14fedcd6c8c36a829e01d562394d6d6 100644 --- a/arch/arm64/boot/dts/qcom/msm8998.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi @@ -146,6 +146,7 @@ L2_0: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; }; }; @@ -190,6 +191,7 @@ L2_1: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; }; }; diff --git a/arch/arm64/boot/dts/qcom/qcm2290.dtsi b/arch/arm64/boot/dts/qcom/qcm2290.dtsi index ae5abc76bcc785ab63c8b803338ad5f06916b10b..b29bc4e4b837c6f083aa48c4e13dbbd2df8d4947 100644 --- a/arch/arm64/boot/dts/qcom/qcm2290.dtsi +++ b/arch/arm64/boot/dts/qcom/qcm2290.dtsi @@ -51,6 +51,7 @@ L2_0: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; }; }; diff --git a/arch/arm64/boot/dts/qcom/qcs404.dtsi b/arch/arm64/boot/dts/qcom/qcs404.dtsi index eefed585738cd7e5b250cbd892c6f132f2930002..972f753847e13316a9437a65f781b1f33f8be84f 100644 --- a/arch/arm64/boot/dts/qcom/qcs404.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs404.dtsi @@ -95,6 +95,7 @@ L2_0: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; }; idle-states { diff --git a/arch/arm64/boot/dts/qcom/qdu1000.dtsi b/arch/arm64/boot/dts/qcom/qdu1000.dtsi index 734438113bbae71a57472f1ed20023717bd6df6d..fb553f0bb17aae36b16b0c08dd5241409cffcab3 100644 --- a/arch/arm64/boot/dts/qcom/qdu1000.dtsi +++ b/arch/arm64/boot/dts/qcom/qdu1000.dtsi @@ -35,9 +35,13 @@ next-level-cache = <&L2_0>; L2_0: l2-cache { compatible = "cache"; + cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; L3_0: l3-cache { compatible = "cache"; + cache-level = <3>; + cache-unified; }; }; }; @@ -54,6 +58,8 @@ next-level-cache = <&L2_100>; L2_100: l2-cache { compatible = "cache"; + cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -70,6 +76,8 @@ next-level-cache = <&L2_200>; L2_200: l2-cache { compatible = "cache"; + cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -86,6 +94,8 @@ next-level-cache = <&L2_300>; L2_300: l2-cache { compatible = "cache"; + cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; diff --git a/arch/arm64/boot/dts/qcom/sa8155p-adp.dts b/arch/arm64/boot/dts/qcom/sa8155p-adp.dts index 339fea522509fae5987378c949e15871a1b88f41..15e1ae1c1a9774302bf44490e99cbb515cb61208 100644 --- a/arch/arm64/boot/dts/qcom/sa8155p-adp.dts +++ b/arch/arm64/boot/dts/qcom/sa8155p-adp.dts @@ -7,7 +7,7 @@ #include #include -#include "sm8150.dtsi" +#include "sa8155p.dtsi" #include "pmm8155au_1.dtsi" #include "pmm8155au_2.dtsi" diff --git a/arch/arm64/boot/dts/qcom/sa8155p.dtsi b/arch/arm64/boot/dts/qcom/sa8155p.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..ffb7ab695213a6009294420b150e476f604d614a --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sa8155p.dtsi @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * Copyright (c) 2023, Linaro Limited + * + * SA8155P is an automotive variant of SM8150, with some minor changes. + * Most notably, the RPMhPD setup differs: MMCX and LCX/LMX rails are gone, + * though the cmd-db doesn't reflect that and access attemps result in a bite. + */ + +#include "sm8150.dtsi" + +&dispcc { + power-domains = <&rpmhpd SA8155P_CX>; +}; + +&mdss_dsi0 { + power-domains = <&rpmhpd SA8155P_CX>; +}; + +&mdss_dsi1 { + power-domains = <&rpmhpd SA8155P_CX>; +}; + +&mdss_mdp { + power-domains = <&rpmhpd SA8155P_CX>; +}; + +&remoteproc_slpi { + power-domains = <&rpmhpd SA8155P_CX>, + <&rpmhpd SA8155P_MX>; +}; + +&rpmhpd { + /* + * The bindings were crafted such that SA8155P PDs match their + * SM8150 counterparts to make it more maintainable and only + * necessitate adjusting entries that actually differ + */ + compatible = "qcom,sa8155p-rpmhpd"; +}; diff --git a/arch/arm64/boot/dts/qcom/sa8775p.dtsi b/arch/arm64/boot/dts/qcom/sa8775p.dtsi index 2343df7e0ea4f36961ae305ddbaedcdc5157a287..c3310caf9f68c6451692efc1fba96f9bfc4b08e7 100644 --- a/arch/arm64/boot/dts/qcom/sa8775p.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8775p.dtsi @@ -42,9 +42,13 @@ next-level-cache = <&L2_0>; L2_0: l2-cache { compatible = "cache"; + cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; L3_0: l3-cache { compatible = "cache"; + cache-level = <3>; + cache-unified; }; }; }; @@ -58,6 +62,8 @@ next-level-cache = <&L2_1>; L2_1: l2-cache { compatible = "cache"; + cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -71,6 +77,8 @@ next-level-cache = <&L2_2>; L2_2: l2-cache { compatible = "cache"; + cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -84,6 +92,8 @@ next-level-cache = <&L2_3>; L2_3: l2-cache { compatible = "cache"; + cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -97,9 +107,13 @@ next-level-cache = <&L2_4>; L2_4: l2-cache { compatible = "cache"; + cache-level = <2>; + cache-unified; next-level-cache = <&L3_1>; L3_1: l3-cache { compatible = "cache"; + cache-level = <3>; + cache-unified; }; }; @@ -114,6 +128,8 @@ next-level-cache = <&L2_5>; L2_5: l2-cache { compatible = "cache"; + cache-level = <2>; + cache-unified; next-level-cache = <&L3_1>; }; }; @@ -127,6 +143,8 @@ next-level-cache = <&L2_6>; L2_6: l2-cache { compatible = "cache"; + cache-level = <2>; + cache-unified; next-level-cache = <&L3_1>; }; }; @@ -140,6 +158,8 @@ next-level-cache = <&L2_7>; L2_7: l2-cache { compatible = "cache"; + cache-level = <2>; + cache-unified; next-level-cache = <&L3_1>; }; }; diff --git a/arch/arm64/boot/dts/qcom/sc7180-idp.dts b/arch/arm64/boot/dts/qcom/sc7180-idp.dts index 9f052270e0908c8e48b8c724edded66fb663f323..299ef5dc225ab327b1cd9ef58a870309821c7763 100644 --- a/arch/arm64/boot/dts/qcom/sc7180-idp.dts +++ b/arch/arm64/boot/dts/qcom/sc7180-idp.dts @@ -393,6 +393,11 @@ qcom,spare-regs = <&tcsr_regs_2 0xb3e4>; }; +&scm { + /* TF-A firmware maps memory cached so mark dma-coherent to match. */ + dma-coherent; +}; + &sdhc_1 { status = "okay"; diff --git a/arch/arm64/boot/dts/qcom/sc7180-lite.dtsi b/arch/arm64/boot/dts/qcom/sc7180-lite.dtsi index d8ed1d7b4ec76224d87cadbb104c06ebb7bd7869..4b306a59d9bec16c42e0bc80d6f5191f49eab7fa 100644 --- a/arch/arm64/boot/dts/qcom/sc7180-lite.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7180-lite.dtsi @@ -16,3 +16,11 @@ &cpu6_opp12 { opp-peak-kBps = <8532000 23347200>; }; + +&cpu6_opp13 { + opp-peak-kBps = <8532000 23347200>; +}; + +&cpu6_opp14 { + opp-peak-kBps = <8532000 23347200>; +}; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi index ca6920de7ea879bbcdf302b51c76909aab7e092b..1472e7f108311c1c9017cb365fce60ace667e21d 100644 --- a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi @@ -892,6 +892,11 @@ hp_i2c: &i2c9 { qcom,spare-regs = <&tcsr_regs_2 0xb3e4>; }; +&scm { + /* TF-A firmware maps memory cached so mark dma-coherent to match. */ + dma-coherent; +}; + &sdhc_1 { status = "okay"; diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi index ea1ffade1aa194ba5896680ac13083b7fd3cf24e..a65be760d1a7c0b29255c6e3e76cb4fc8d0f3bfd 100644 --- a/arch/arm64/boot/dts/qcom/sc7180.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi @@ -92,10 +92,12 @@ L2_0: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; L3_0: l3-cache { compatible = "cache"; cache-level = <3>; + cache-unified; }; }; }; @@ -120,6 +122,7 @@ L2_100: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -144,6 +147,7 @@ L2_200: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -168,6 +172,7 @@ L2_300: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -192,6 +197,7 @@ L2_400: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -216,6 +222,7 @@ L2_500: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -240,6 +247,7 @@ L2_600: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -264,6 +272,7 @@ L2_700: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -360,7 +369,7 @@ }; firmware { - scm { + scm: scm { compatible = "qcom,scm-sc7180", "qcom,scm"; }; }; diff --git a/arch/arm64/boot/dts/qcom/sc7280-chrome-common.dtsi b/arch/arm64/boot/dts/qcom/sc7280-chrome-common.dtsi index f562e4d2b6552c4922ffdafeb669285063e60b21..2e1cd219fc182204d4f1c8845701ca757750a0d0 100644 --- a/arch/arm64/boot/dts/qcom/sc7280-chrome-common.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7280-chrome-common.dtsi @@ -79,6 +79,11 @@ firmware-name = "ath11k/WCN6750/hw1.0/wpss.mdt"; }; +&scm { + /* TF-A firmware maps memory cached so mark dma-coherent to match. */ + dma-coherent; +}; + &wifi { status = "okay"; diff --git a/arch/arm64/boot/dts/qcom/sc7280-idp.dtsi b/arch/arm64/boot/dts/qcom/sc7280-idp.dtsi index c6dc200c00cea42c891e130beed7fe74522e220f..21027042cf1332481ae5934f957297420749c012 100644 --- a/arch/arm64/boot/dts/qcom/sc7280-idp.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7280-idp.dtsi @@ -480,7 +480,6 @@ wcd_rx: codec@0,4 { compatible = "sdw20217010d00"; reg = <0 4>; - #sound-dai-cells = <1>; qcom,rx-port-mapping = <1 2 3 4 5>; }; }; @@ -491,7 +490,6 @@ wcd_tx: codec@0,3 { compatible = "sdw20217010d00"; reg = <0 3>; - #sound-dai-cells = <1>; qcom,tx-port-mapping = <1 2 3 4>; }; }; diff --git a/arch/arm64/boot/dts/qcom/sc7280-qcard.dtsi b/arch/arm64/boot/dts/qcom/sc7280-qcard.dtsi index 88b3586e389fad78a68342d11a4853352486b55a..9137db066d9e2581197b25edb4e7cad483b37513 100644 --- a/arch/arm64/boot/dts/qcom/sc7280-qcard.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7280-qcard.dtsi @@ -414,7 +414,6 @@ wcd_rx: codec@0,4 { compatible = "sdw20217010d00"; reg = <0 4>; - #sound-dai-cells = <1>; qcom,rx-port-mapping = <1 2 3 4 5>; }; }; @@ -423,7 +422,6 @@ wcd_tx: codec@0,3 { compatible = "sdw20217010d00"; reg = <0 3>; - #sound-dai-cells = <1>; qcom,tx-port-mapping = <1 2 3 4>; }; }; diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi index 31728f4614223b1b9ce440f709d16609e624a42f..36f0bb9b3cbb46bbeb173dc6f9f968ad317d32c8 100644 --- a/arch/arm64/boot/dts/qcom/sc7280.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi @@ -182,10 +182,12 @@ L2_0: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; L3_0: l3-cache { compatible = "cache"; cache-level = <3>; + cache-unified; }; }; }; @@ -208,6 +210,7 @@ L2_100: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -230,6 +233,7 @@ L2_200: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -252,6 +256,7 @@ L2_300: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -274,6 +279,7 @@ L2_400: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -296,6 +302,7 @@ L2_500: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -318,6 +325,7 @@ L2_600: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -340,6 +348,7 @@ L2_700: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -647,7 +656,7 @@ }; firmware { - scm { + scm: scm { compatible = "qcom,scm-sc7280", "qcom,scm"; }; }; diff --git a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi index 8fa9fbfe5d0009b44b40e02f16e980dd848a8a2b..cc4aef21e6172e6b7399da43b09cd1896daa62e6 100644 --- a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi +++ b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi @@ -58,10 +58,12 @@ L2_0: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; L3_0: l3-cache { - compatible = "cache"; - cache-level = <3>; + compatible = "cache"; + cache-level = <3>; + cache-unified; }; }; }; @@ -83,6 +85,7 @@ L2_100: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -104,6 +107,7 @@ L2_200: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -125,6 +129,7 @@ L2_300: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -146,6 +151,7 @@ L2_400: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -167,6 +173,7 @@ L2_500: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -188,6 +195,7 @@ L2_600: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -209,6 +217,7 @@ L2_700: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -2726,6 +2735,7 @@ pins = "gpio7"; function = "dmic1_data"; drive-strength = <8>; + input-enable; }; }; @@ -2743,6 +2753,7 @@ function = "dmic1_data"; drive-strength = <2>; bias-pull-down; + input-enable; }; }; @@ -2758,6 +2769,7 @@ pins = "gpio9"; function = "dmic2_data"; drive-strength = <8>; + input-enable; }; }; @@ -2775,6 +2787,7 @@ function = "dmic2_data"; drive-strength = <2>; bias-pull-down; + input-enable; }; }; @@ -3982,6 +3995,7 @@ qcom,tcs-config = , , , ; label = "apps_rsc"; + power-domains = <&CLUSTER_PD>; apps_bcm_voter: bcm-voter { compatible = "qcom,bcm-voter"; diff --git a/arch/arm64/boot/dts/qcom/sdm630.dtsi b/arch/arm64/boot/dts/qcom/sdm630.dtsi index 37e72b1c56dce7430aeb63f481afb22a15e94f4b..eaead2f7beb4ea93a3e2c498c4455fc13fd0558c 100644 --- a/arch/arm64/boot/dts/qcom/sdm630.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm630.dtsi @@ -63,6 +63,7 @@ L2_1: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; }; }; @@ -127,6 +128,7 @@ L2_0: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; }; }; diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi index c5f839dd1c6e3ab3fe9e44ba90da467db99f8e8a..b61e13db89bd58fcfb012d0c0d8e5f1e8ff7fb8a 100644 --- a/arch/arm64/boot/dts/qcom/sdm670.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi @@ -41,8 +41,12 @@ L2_0: l2-cache { compatible = "cache"; next-level-cache = <&L3_0>; + cache-level = <2>; + cache-unified; L3_0: l3-cache { - compatible = "cache"; + compatible = "cache"; + cache-level = <3>; + cache-unified; }; }; }; @@ -57,6 +61,8 @@ next-level-cache = <&L2_100>; L2_100: l2-cache { compatible = "cache"; + cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -71,6 +77,8 @@ next-level-cache = <&L2_200>; L2_200: l2-cache { compatible = "cache"; + cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -85,6 +93,8 @@ next-level-cache = <&L2_300>; L2_300: l2-cache { compatible = "cache"; + cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -99,6 +109,8 @@ next-level-cache = <&L2_400>; L2_400: l2-cache { compatible = "cache"; + cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -113,6 +125,8 @@ next-level-cache = <&L2_500>; L2_500: l2-cache { compatible = "cache"; + cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -127,6 +141,8 @@ next-level-cache = <&L2_600>; L2_600: l2-cache { compatible = "cache"; + cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -141,6 +157,8 @@ next-level-cache = <&L2_700>; L2_700: l2-cache { compatible = "cache"; + cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi index 90424442bb4acab8149488c0f679032389361572..cdeb05e95674e9d490b37b26e008f98599b8476e 100644 --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi @@ -108,10 +108,12 @@ L2_0: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; L3_0: l3-cache { - compatible = "cache"; - cache-level = <3>; + compatible = "cache"; + cache-level = <3>; + cache-unified; }; }; }; @@ -135,6 +137,7 @@ L2_100: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -158,6 +161,7 @@ L2_200: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -181,6 +185,7 @@ L2_300: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -204,6 +209,7 @@ L2_400: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -227,6 +233,7 @@ L2_500: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -250,6 +257,7 @@ L2_600: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -273,6 +281,7 @@ L2_700: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm6115.dtsi b/arch/arm64/boot/dts/qcom/sm6115.dtsi index 631ca327e064bc6c0a9269a2b3eaaf5170d43f34..43f31c1b9d5a74952bd0b324553506c601b76e3e 100644 --- a/arch/arm64/boot/dts/qcom/sm6115.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6115.dtsi @@ -50,6 +50,7 @@ L2_0: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; }; }; @@ -102,6 +103,7 @@ L2_1: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm6125.dtsi b/arch/arm64/boot/dts/qcom/sm6125.dtsi index 9484752fb85066e373a86afea0c11ce3844ebe63..2aa093d16858a68e5c4aa1f1a663f850d2182930 100644 --- a/arch/arm64/boot/dts/qcom/sm6125.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6125.dtsi @@ -47,6 +47,7 @@ L2_0: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; }; }; @@ -87,6 +88,7 @@ L2_1: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm6350.dtsi b/arch/arm64/boot/dts/qcom/sm6350.dtsi index 18c4616848cefcb58ef8ab38fa0e2ee68e2294ef..ad34301f6cddf9fd62f1297e28d377b6bc398f1c 100644 --- a/arch/arm64/boot/dts/qcom/sm6350.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6350.dtsi @@ -60,10 +60,12 @@ L2_0: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; L3_0: l3-cache { compatible = "cache"; cache-level = <3>; + cache-unified; }; }; }; @@ -86,6 +88,7 @@ L2_100: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -108,6 +111,7 @@ L2_200: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -130,6 +134,7 @@ L2_300: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -152,6 +157,7 @@ L2_400: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -174,6 +180,7 @@ L2_500: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -196,6 +203,7 @@ L2_600: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -218,6 +226,7 @@ L2_700: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm6375-sony-xperia-murray-pdx225.dts b/arch/arm64/boot/dts/qcom/sm6375-sony-xperia-murray-pdx225.dts index 8220e6f441171166abff3656e3a31642ce8f145f..b2f1bb1d58e974080fc1d23f799a15aef0bdf8a6 100644 --- a/arch/arm64/boot/dts/qcom/sm6375-sony-xperia-murray-pdx225.dts +++ b/arch/arm64/boot/dts/qcom/sm6375-sony-xperia-murray-pdx225.dts @@ -178,12 +178,12 @@ }; &remoteproc_adsp { - firmware-name = "qcom/Sony/murray/adsp.mbn"; + firmware-name = "qcom/sm6375/Sony/murray/adsp.mbn"; status = "okay"; }; &remoteproc_cdsp { - firmware-name = "qcom/Sony/murray/cdsp.mbn"; + firmware-name = "qcom/sm6375/Sony/murray/cdsp.mbn"; status = "okay"; }; diff --git a/arch/arm64/boot/dts/qcom/sm6375.dtsi b/arch/arm64/boot/dts/qcom/sm6375.dtsi index ae9b6bc446cbc1ae97a6d32919f17d8d03d381ee..f8d9c34d3b2f4f55e11cf3ba4beaa4ad0e0ac5b7 100644 --- a/arch/arm64/boot/dts/qcom/sm6375.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6375.dtsi @@ -48,10 +48,14 @@ power-domain-names = "psci"; #cooling-cells = <2>; L2_0: l2-cache { - compatible = "cache"; - next-level-cache = <&L3_0>; + compatible = "cache"; + cache-level = <2>; + cache-unified; + next-level-cache = <&L3_0>; L3_0: l3-cache { - compatible = "cache"; + compatible = "cache"; + cache-level = <3>; + cache-unified; }; }; }; @@ -68,8 +72,10 @@ power-domain-names = "psci"; #cooling-cells = <2>; L2_100: l2-cache { - compatible = "cache"; - next-level-cache = <&L3_0>; + compatible = "cache"; + cache-level = <2>; + cache-unified; + next-level-cache = <&L3_0>; }; }; @@ -85,8 +91,10 @@ power-domain-names = "psci"; #cooling-cells = <2>; L2_200: l2-cache { - compatible = "cache"; - next-level-cache = <&L3_0>; + compatible = "cache"; + cache-level = <2>; + cache-unified; + next-level-cache = <&L3_0>; }; }; @@ -102,8 +110,10 @@ power-domain-names = "psci"; #cooling-cells = <2>; L2_300: l2-cache { - compatible = "cache"; - next-level-cache = <&L3_0>; + compatible = "cache"; + cache-level = <2>; + cache-unified; + next-level-cache = <&L3_0>; }; }; @@ -119,8 +129,10 @@ power-domain-names = "psci"; #cooling-cells = <2>; L2_400: l2-cache { - compatible = "cache"; - next-level-cache = <&L3_0>; + compatible = "cache"; + cache-level = <2>; + cache-unified; + next-level-cache = <&L3_0>; }; }; @@ -136,8 +148,10 @@ power-domain-names = "psci"; #cooling-cells = <2>; L2_500: l2-cache { - compatible = "cache"; - next-level-cache = <&L3_0>; + compatible = "cache"; + cache-level = <2>; + cache-unified; + next-level-cache = <&L3_0>; }; }; @@ -153,8 +167,10 @@ power-domain-names = "psci"; #cooling-cells = <2>; L2_600: l2-cache { - compatible = "cache"; - next-level-cache = <&L3_0>; + compatible = "cache"; + cache-level = <2>; + cache-unified; + next-level-cache = <&L3_0>; }; }; @@ -170,8 +186,10 @@ power-domain-names = "psci"; #cooling-cells = <2>; L2_700: l2-cache { - compatible = "cache"; - next-level-cache = <&L3_0>; + compatible = "cache"; + cache-level = <2>; + cache-unified; + next-level-cache = <&L3_0>; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi index 2273fa57198842ef4c700ea464bd24ed9fbde9bb..27dcda0d4288fcedcfff7b633761fc6d9ef567b8 100644 --- a/arch/arm64/boot/dts/qcom/sm8150.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi @@ -63,10 +63,12 @@ L2_0: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; L3_0: l3-cache { - compatible = "cache"; - cache-level = <3>; + compatible = "cache"; + cache-level = <3>; + cache-unified; }; }; }; @@ -90,6 +92,7 @@ L2_100: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -113,6 +116,7 @@ L2_200: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -136,6 +140,7 @@ L2_300: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -159,6 +164,7 @@ L2_400: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -182,6 +188,7 @@ L2_500: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -205,6 +212,7 @@ L2_600: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -228,6 +236,7 @@ L2_700: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm8250-xiaomi-elish-boe.dts b/arch/arm64/boot/dts/qcom/sm8250-xiaomi-elish-boe.dts index 8b2ae39950ffa75a60cde3e9c313fad34f980605..de6101ddebe7d552fed84a8e0526458158c46850 100644 --- a/arch/arm64/boot/dts/qcom/sm8250-xiaomi-elish-boe.dts +++ b/arch/arm64/boot/dts/qcom/sm8250-xiaomi-elish-boe.dts @@ -13,6 +13,6 @@ }; &display_panel { - compatible = "xiaomi,elish-boe-nt36523"; + compatible = "xiaomi,elish-boe-nt36523", "novatek,nt36523"; status = "okay"; }; diff --git a/arch/arm64/boot/dts/qcom/sm8250-xiaomi-elish-csot.dts b/arch/arm64/boot/dts/qcom/sm8250-xiaomi-elish-csot.dts index a4d5341495cf8e4746d57eda194c20ea9d9c96e8..4cffe9c703df872e56dea51f0629d5f8f2ae04e4 100644 --- a/arch/arm64/boot/dts/qcom/sm8250-xiaomi-elish-csot.dts +++ b/arch/arm64/boot/dts/qcom/sm8250-xiaomi-elish-csot.dts @@ -13,6 +13,6 @@ }; &display_panel { - compatible = "xiaomi,elish-csot-nt36523"; + compatible = "xiaomi,elish-csot-nt36523", "novatek,nt36523"; status = "okay"; }; diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi index ebcb481571c28267f0938b6b97394140fbf5a36f..3efdc03ed0f1107860f6ef8aa95bcf6a7e31b4fc 100644 --- a/arch/arm64/boot/dts/qcom/sm8350.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi @@ -58,12 +58,14 @@ power-domain-names = "psci"; #cooling-cells = <2>; L2_0: l2-cache { - compatible = "cache"; - cache-level = <2>; - next-level-cache = <&L3_0>; + compatible = "cache"; + cache-level = <2>; + cache-unified; + next-level-cache = <&L3_0>; L3_0: l3-cache { - compatible = "cache"; - cache-level = <3>; + compatible = "cache"; + cache-level = <3>; + cache-unified; }; }; }; @@ -80,9 +82,10 @@ power-domain-names = "psci"; #cooling-cells = <2>; L2_100: l2-cache { - compatible = "cache"; - cache-level = <2>; - next-level-cache = <&L3_0>; + compatible = "cache"; + cache-level = <2>; + cache-unified; + next-level-cache = <&L3_0>; }; }; @@ -98,9 +101,10 @@ power-domain-names = "psci"; #cooling-cells = <2>; L2_200: l2-cache { - compatible = "cache"; - cache-level = <2>; - next-level-cache = <&L3_0>; + compatible = "cache"; + cache-level = <2>; + cache-unified; + next-level-cache = <&L3_0>; }; }; @@ -116,9 +120,10 @@ power-domain-names = "psci"; #cooling-cells = <2>; L2_300: l2-cache { - compatible = "cache"; - cache-level = <2>; - next-level-cache = <&L3_0>; + compatible = "cache"; + cache-level = <2>; + cache-unified; + next-level-cache = <&L3_0>; }; }; @@ -134,9 +139,10 @@ power-domain-names = "psci"; #cooling-cells = <2>; L2_400: l2-cache { - compatible = "cache"; - cache-level = <2>; - next-level-cache = <&L3_0>; + compatible = "cache"; + cache-level = <2>; + cache-unified; + next-level-cache = <&L3_0>; }; }; @@ -152,9 +158,10 @@ power-domain-names = "psci"; #cooling-cells = <2>; L2_500: l2-cache { - compatible = "cache"; - cache-level = <2>; - next-level-cache = <&L3_0>; + compatible = "cache"; + cache-level = <2>; + cache-unified; + next-level-cache = <&L3_0>; }; }; @@ -170,9 +177,10 @@ power-domain-names = "psci"; #cooling-cells = <2>; L2_600: l2-cache { - compatible = "cache"; - cache-level = <2>; - next-level-cache = <&L3_0>; + compatible = "cache"; + cache-level = <2>; + cache-unified; + next-level-cache = <&L3_0>; }; }; @@ -188,9 +196,10 @@ power-domain-names = "psci"; #cooling-cells = <2>; L2_700: l2-cache { - compatible = "cache"; - cache-level = <2>; - next-level-cache = <&L3_0>; + compatible = "cache"; + cache-level = <2>; + cache-unified; + next-level-cache = <&L3_0>; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm8450.dtsi b/arch/arm64/boot/dts/qcom/sm8450.dtsi index 595533aeafc406185ab470f3d740cdf6ccb263b2..d59ea8ee711143ad7aae301c3317d6b2bfb6f14a 100644 --- a/arch/arm64/boot/dts/qcom/sm8450.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8450.dtsi @@ -57,12 +57,14 @@ #cooling-cells = <2>; clocks = <&cpufreq_hw 0>; L2_0: l2-cache { - compatible = "cache"; - cache-level = <2>; - next-level-cache = <&L3_0>; + compatible = "cache"; + cache-level = <2>; + cache-unified; + next-level-cache = <&L3_0>; L3_0: l3-cache { - compatible = "cache"; - cache-level = <3>; + compatible = "cache"; + cache-level = <3>; + cache-unified; }; }; }; @@ -79,9 +81,10 @@ #cooling-cells = <2>; clocks = <&cpufreq_hw 0>; L2_100: l2-cache { - compatible = "cache"; - cache-level = <2>; - next-level-cache = <&L3_0>; + compatible = "cache"; + cache-level = <2>; + cache-unified; + next-level-cache = <&L3_0>; }; }; @@ -97,9 +100,10 @@ #cooling-cells = <2>; clocks = <&cpufreq_hw 0>; L2_200: l2-cache { - compatible = "cache"; - cache-level = <2>; - next-level-cache = <&L3_0>; + compatible = "cache"; + cache-level = <2>; + cache-unified; + next-level-cache = <&L3_0>; }; }; @@ -115,9 +119,10 @@ #cooling-cells = <2>; clocks = <&cpufreq_hw 0>; L2_300: l2-cache { - compatible = "cache"; - cache-level = <2>; - next-level-cache = <&L3_0>; + compatible = "cache"; + cache-level = <2>; + cache-unified; + next-level-cache = <&L3_0>; }; }; @@ -133,9 +138,10 @@ #cooling-cells = <2>; clocks = <&cpufreq_hw 1>; L2_400: l2-cache { - compatible = "cache"; - cache-level = <2>; - next-level-cache = <&L3_0>; + compatible = "cache"; + cache-level = <2>; + cache-unified; + next-level-cache = <&L3_0>; }; }; @@ -151,9 +157,10 @@ #cooling-cells = <2>; clocks = <&cpufreq_hw 1>; L2_500: l2-cache { - compatible = "cache"; - cache-level = <2>; - next-level-cache = <&L3_0>; + compatible = "cache"; + cache-level = <2>; + cache-unified; + next-level-cache = <&L3_0>; }; }; @@ -169,9 +176,10 @@ #cooling-cells = <2>; clocks = <&cpufreq_hw 1>; L2_600: l2-cache { - compatible = "cache"; - cache-level = <2>; - next-level-cache = <&L3_0>; + compatible = "cache"; + cache-level = <2>; + cache-unified; + next-level-cache = <&L3_0>; }; }; @@ -187,9 +195,10 @@ #cooling-cells = <2>; clocks = <&cpufreq_hw 2>; L2_700: l2-cache { - compatible = "cache"; - cache-level = <2>; - next-level-cache = <&L3_0>; + compatible = "cache"; + cache-level = <2>; + cache-unified; + next-level-cache = <&L3_0>; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm8550.dtsi b/arch/arm64/boot/dts/qcom/sm8550.dtsi index 6e9bad8f6f33ebe74cfab95e457b18bd9af45fc2..558cbc4307080407e63a63533ed634b31c12bec6 100644 --- a/arch/arm64/boot/dts/qcom/sm8550.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8550.dtsi @@ -80,10 +80,12 @@ L2_0: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; L3_0: l3-cache { compatible = "cache"; cache-level = <3>; + cache-unified; }; }; }; @@ -104,6 +106,7 @@ L2_100: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -124,6 +127,7 @@ L2_200: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -144,6 +148,7 @@ L2_300: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -164,6 +169,7 @@ L2_400: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -184,6 +190,7 @@ L2_500: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -204,6 +211,7 @@ L2_600: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -224,6 +232,7 @@ L2_700: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; next-level-cache = <&L3_0>; }; }; @@ -2022,7 +2031,7 @@ qcom,din-ports = <4>; qcom,dout-ports = <9>; - qcom,ports-sinterval = <0x07 0x1f 0x3f 0x07 0x1f 0x3f 0x18f 0xff 0xff 0x0f 0x0f 0xff 0x31f>; + qcom,ports-sinterval = /bits/ 16 <0x07 0x1f 0x3f 0x07 0x1f 0x3f 0x18f 0xff 0xff 0x0f 0x0f 0xff 0x31f>; qcom,ports-offset1 = /bits/ 8 <0x01 0x03 0x05 0x02 0x04 0x15 0x00 0xff 0xff 0x06 0x0d 0xff 0x00>; qcom,ports-offset2 = /bits/ 8 <0xff 0x07 0x1f 0xff 0x07 0x1f 0xff 0xff 0xff 0xff 0xff 0xff 0xff>; qcom,ports-hstart = /bits/ 8 <0xff 0xff 0xff 0xff 0xff 0xff 0x08 0xff 0xff 0xff 0xff 0xff 0x0f>; @@ -2068,7 +2077,7 @@ qcom,din-ports = <0>; qcom,dout-ports = <10>; - qcom,ports-sinterval = <0x03 0x3f 0x1f 0x07 0x00 0x18f 0xff 0xff 0xff 0xff>; + qcom,ports-sinterval = /bits/ 16 <0x03 0x3f 0x1f 0x07 0x00 0x18f 0xff 0xff 0xff 0xff>; qcom,ports-offset1 = /bits/ 8 <0x00 0x00 0x0b 0x01 0x00 0x00 0xff 0xff 0xff 0xff>; qcom,ports-offset2 = /bits/ 8 <0x00 0x00 0x0b 0x00 0x00 0x00 0xff 0xff 0xff 0xff>; qcom,ports-hstart = /bits/ 8 <0xff 0x03 0xff 0xff 0xff 0x08 0xff 0xff 0xff 0xff>; @@ -2133,7 +2142,7 @@ qcom,din-ports = <4>; qcom,dout-ports = <9>; - qcom,ports-sinterval = <0x07 0x1f 0x3f 0x07 0x1f 0x3f 0x18f 0xff 0xff 0x0f 0x0f 0xff 0x31f>; + qcom,ports-sinterval = /bits/ 16 <0x07 0x1f 0x3f 0x07 0x1f 0x3f 0x18f 0xff 0xff 0x0f 0x0f 0xff 0x31f>; qcom,ports-offset1 = /bits/ 8 <0x01 0x03 0x05 0x02 0x04 0x15 0x00 0xff 0xff 0x06 0x0d 0xff 0x00>; qcom,ports-offset2 = /bits/ 8 <0xff 0x07 0x1f 0xff 0x07 0x1f 0xff 0xff 0xff 0xff 0xff 0xff 0xff>; qcom,ports-hstart = /bits/ 8 <0xff 0xff 0xff 0xff 0xff 0xff 0x08 0xff 0xff 0xff 0xff 0xff 0x0f>; @@ -3762,9 +3771,16 @@ system-cache-controller@25000000 { compatible = "qcom,sm8550-llcc"; - reg = <0 0x25000000 0 0x800000>, + reg = <0 0x25000000 0 0x200000>, + <0 0x25200000 0 0x200000>, + <0 0x25400000 0 0x200000>, + <0 0x25600000 0 0x200000>, <0 0x25800000 0 0x200000>; - reg-names = "llcc_base", "llcc_broadcast_base"; + reg-names = "llcc0_base", + "llcc1_base", + "llcc2_base", + "llcc3_base", + "llcc_broadcast_base"; interrupts = ; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3308.dtsi b/arch/arm64/boot/dts/rockchip/rk3308.dtsi index dd228a256a32a80230955f7ccc356b6755bc8140..2ae4bb7d5e62a3bb83edf2b3929cc88f880c5902 100644 --- a/arch/arm64/boot/dts/rockchip/rk3308.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3308.dtsi @@ -97,6 +97,7 @@ l2: l2-cache { compatible = "cache"; cache-level = <2>; + cache-unified; }; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts index f69a38f42d2d54e4e10092c63b16a5775d71f601..0a27fa5271f57684a930d4a0cd9ded6dd74d1eb2 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts @@ -37,7 +37,8 @@ vin-supply = <&vcc_io>; }; - vcc_host_5v: vcc-host-5v-regulator { + /* Common enable line for all of the rails mentioned in the labels */ + vcc_host_5v: vcc_host1_5v: vcc_otg_5v: vcc-host-5v-regulator { compatible = "regulator-fixed"; gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_LOW>; pinctrl-names = "default"; @@ -48,17 +49,6 @@ vin-supply = <&vcc_sys>; }; - vcc_host1_5v: vcc_otg_5v: vcc-host1-5v-regulator { - compatible = "regulator-fixed"; - gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_LOW>; - pinctrl-names = "default"; - pinctrl-0 = <&usb20_host_drv>; - regulator-name = "vcc_host1_5v"; - regulator-always-on; - regulator-boot-on; - vin-supply = <&vcc_sys>; - }; - vcc_sys: vcc-sys { compatible = "regulator-fixed"; regulator-name = "vcc_sys"; diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi index 6d7a7bf72ac7eeb8992b823a22688a9fdde369c1..e729e7a22b23a6a2e93374665d6f1ce2d03e9596 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi @@ -103,6 +103,7 @@ l2: l2-cache0 { compatible = "cache"; cache-level = <2>; + cache-unified; }; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3566-soquartz-cm4.dts b/arch/arm64/boot/dts/rockchip/rk3566-soquartz-cm4.dts index 263ce40770ddeda22b02c2f2e2f807947c8c0ee4..cddf6cd2fecb1bf34458a98adc93b59560f98fde 100644 --- a/arch/arm64/boot/dts/rockchip/rk3566-soquartz-cm4.dts +++ b/arch/arm64/boot/dts/rockchip/rk3566-soquartz-cm4.dts @@ -28,6 +28,16 @@ regulator-max-microvolt = <5000000>; vin-supply = <&vcc12v_dcin>; }; + + vcc_sd_pwr: vcc-sd-pwr-regulator { + compatible = "regulator-fixed"; + regulator-name = "vcc_sd_pwr"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&vcc3v3_sys>; + }; }; /* phy for pcie */ @@ -130,13 +140,7 @@ }; &sdmmc0 { - vmmc-supply = <&sdmmc_pwr>; - status = "okay"; -}; - -&sdmmc_pwr { - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; + vmmc-supply = <&vcc_sd_pwr>; status = "okay"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi b/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi index 102e448bc026a78a3783dd9530c4f12a0aad0e09..31aa2b8efe39314087190027c495cb2b4a26c163 100644 --- a/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi @@ -104,16 +104,6 @@ regulator-max-microvolt = <3300000>; vin-supply = <&vcc5v0_sys>; }; - - sdmmc_pwr: sdmmc-pwr-regulator { - compatible = "regulator-fixed"; - enable-active-high; - gpio = <&gpio0 RK_PA5 GPIO_ACTIVE_HIGH>; - pinctrl-names = "default"; - pinctrl-0 = <&sdmmc_pwr_h>; - regulator-name = "sdmmc_pwr"; - status = "disabled"; - }; }; &cpu0 { @@ -155,6 +145,19 @@ status = "disabled"; }; +&gpio0 { + nextrst-hog { + gpio-hog; + /* + * GPIO_ACTIVE_LOW + output-low here means that the pin is set + * to high, because output-low decides the value pre-inversion. + */ + gpios = ; + line-name = "nEXTRST"; + output-low; + }; +}; + &gpu { mali-supply = <&vdd_gpu>; status = "okay"; @@ -538,12 +541,6 @@ rockchip,pins = <2 RK_PC2 RK_FUNC_GPIO &pcfg_pull_none>; }; }; - - sdmmc-pwr { - sdmmc_pwr_h: sdmmc-pwr-h { - rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>; - }; - }; }; &pmu_io_domains { diff --git a/arch/arm64/boot/dts/rockchip/rk3568-nanopi-r5c.dts b/arch/arm64/boot/dts/rockchip/rk3568-nanopi-r5c.dts index f70ca9f0470ad59dc9ff41a9f9f6a27b423cf7d0..c718b8dbb9c6bc0b5055aafa6286a2925efe4d1c 100644 --- a/arch/arm64/boot/dts/rockchip/rk3568-nanopi-r5c.dts +++ b/arch/arm64/boot/dts/rockchip/rk3568-nanopi-r5c.dts @@ -106,7 +106,7 @@ rockchip-key { reset_button_pin: reset-button-pin { - rockchip,pins = <4 RK_PA0 RK_FUNC_GPIO &pcfg_pull_up>; + rockchip,pins = <0 RK_PB7 RK_FUNC_GPIO &pcfg_pull_up>; }; }; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3568-nanopi-r5s.dts b/arch/arm64/boot/dts/rockchip/rk3568-nanopi-r5s.dts index 2a1118f15c2913f32d96046918f2735625e546fc..b6ad8328c7ebc4102552d6374ea548b5bcd8c9d5 100644 --- a/arch/arm64/boot/dts/rockchip/rk3568-nanopi-r5s.dts +++ b/arch/arm64/boot/dts/rockchip/rk3568-nanopi-r5s.dts @@ -134,4 +134,3 @@ }; }; }; - diff --git a/arch/arm64/boot/dts/rockchip/rk3568.dtsi b/arch/arm64/boot/dts/rockchip/rk3568.dtsi index ba67b58f05b790c55d63427aaa87215441b6f082..f1be76a54ceb0cb0730f444c69c3da73e07194c9 100644 --- a/arch/arm64/boot/dts/rockchip/rk3568.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3568.dtsi @@ -94,9 +94,10 @@ power-domains = <&power RK3568_PD_PIPE>; reg = <0x3 0xc0400000 0x0 0x00400000>, <0x0 0xfe270000 0x0 0x00010000>, - <0x3 0x7f000000 0x0 0x01000000>; - ranges = <0x01000000 0x0 0x3ef00000 0x3 0x7ef00000 0x0 0x00100000>, - <0x02000000 0x0 0x00000000 0x3 0x40000000 0x0 0x3ef00000>; + <0x0 0xf2000000 0x0 0x00100000>; + ranges = <0x01000000 0x0 0xf2100000 0x0 0xf2100000 0x0 0x00100000>, + <0x02000000 0x0 0xf2200000 0x0 0xf2200000 0x0 0x01e00000>, + <0x03000000 0x0 0x40000000 0x3 0x40000000 0x0 0x40000000>; reg-names = "dbi", "apb", "config"; resets = <&cru SRST_PCIE30X1_POWERUP>; reset-names = "pipe"; @@ -146,9 +147,10 @@ power-domains = <&power RK3568_PD_PIPE>; reg = <0x3 0xc0800000 0x0 0x00400000>, <0x0 0xfe280000 0x0 0x00010000>, - <0x3 0xbf000000 0x0 0x01000000>; - ranges = <0x01000000 0x0 0x3ef00000 0x3 0xbef00000 0x0 0x00100000>, - <0x02000000 0x0 0x00000000 0x3 0x80000000 0x0 0x3ef00000>; + <0x0 0xf0000000 0x0 0x00100000>; + ranges = <0x01000000 0x0 0xf0100000 0x0 0xf0100000 0x0 0x00100000>, + <0x02000000 0x0 0xf0200000 0x0 0xf0200000 0x0 0x01e00000>, + <0x03000000 0x0 0x40000000 0x3 0x80000000 0x0 0x40000000>; reg-names = "dbi", "apb", "config"; resets = <&cru SRST_PCIE30X2_POWERUP>; reset-names = "pipe"; diff --git a/arch/arm64/boot/dts/rockchip/rk356x.dtsi b/arch/arm64/boot/dts/rockchip/rk356x.dtsi index f62e0fd881a958bb36d989b9491eefc026d12819..61680c7ac4899c3f77ede57c4d4f3b31747fc1e4 100644 --- a/arch/arm64/boot/dts/rockchip/rk356x.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk356x.dtsi @@ -952,7 +952,7 @@ compatible = "rockchip,rk3568-pcie"; reg = <0x3 0xc0000000 0x0 0x00400000>, <0x0 0xfe260000 0x0 0x00010000>, - <0x3 0x3f000000 0x0 0x01000000>; + <0x0 0xf4000000 0x0 0x00100000>; reg-names = "dbi", "apb", "config"; interrupts = , , @@ -982,8 +982,9 @@ phys = <&combphy2 PHY_TYPE_PCIE>; phy-names = "pcie-phy"; power-domains = <&power RK3568_PD_PIPE>; - ranges = <0x01000000 0x0 0x3ef00000 0x3 0x3ef00000 0x0 0x00100000 - 0x02000000 0x0 0x00000000 0x3 0x00000000 0x0 0x3ef00000>; + ranges = <0x01000000 0x0 0xf4100000 0x0 0xf4100000 0x0 0x00100000>, + <0x02000000 0x0 0xf4200000 0x0 0xf4200000 0x0 0x01e00000>, + <0x03000000 0x0 0x40000000 0x3 0x00000000 0x0 0x40000000>; resets = <&cru SRST_PCIE20_POWERUP>; reset-names = "pipe"; #address-cells = <3>; diff --git a/arch/arm64/boot/dts/rockchip/rk3588s.dtsi b/arch/arm64/boot/dts/rockchip/rk3588s.dtsi index 657c019d27fa9a54012ccbfac0a458656ba1a92d..a3124bd2e092cd5fcb9f5b6ecd1ad089b77afb6b 100644 --- a/arch/arm64/boot/dts/rockchip/rk3588s.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3588s.dtsi @@ -229,6 +229,7 @@ cache-line-size = <64>; cache-sets = <512>; cache-level = <2>; + cache-unified; next-level-cache = <&l3_cache>; }; @@ -238,6 +239,7 @@ cache-line-size = <64>; cache-sets = <512>; cache-level = <2>; + cache-unified; next-level-cache = <&l3_cache>; }; @@ -247,6 +249,7 @@ cache-line-size = <64>; cache-sets = <512>; cache-level = <2>; + cache-unified; next-level-cache = <&l3_cache>; }; @@ -256,6 +259,7 @@ cache-line-size = <64>; cache-sets = <512>; cache-level = <2>; + cache-unified; next-level-cache = <&l3_cache>; }; @@ -265,6 +269,7 @@ cache-line-size = <64>; cache-sets = <1024>; cache-level = <2>; + cache-unified; next-level-cache = <&l3_cache>; }; @@ -274,6 +279,7 @@ cache-line-size = <64>; cache-sets = <1024>; cache-level = <2>; + cache-unified; next-level-cache = <&l3_cache>; }; @@ -283,6 +289,7 @@ cache-line-size = <64>; cache-sets = <1024>; cache-level = <2>; + cache-unified; next-level-cache = <&l3_cache>; }; @@ -292,6 +299,7 @@ cache-line-size = <64>; cache-sets = <1024>; cache-level = <2>; + cache-unified; next-level-cache = <&l3_cache>; }; @@ -301,6 +309,7 @@ cache-line-size = <64>; cache-sets = <4096>; cache-level = <3>; + cache-unified; }; }; diff --git a/arch/arm64/hyperv/mshyperv.c b/arch/arm64/hyperv/mshyperv.c index a406454578f07e2a59c5a34639503ea0692f8fa6..f1b8a04ee9f26364bdebe33510cd135781d3f63b 100644 --- a/arch/arm64/hyperv/mshyperv.c +++ b/arch/arm64/hyperv/mshyperv.c @@ -67,7 +67,7 @@ static int __init hyperv_init(void) if (ret) return ret; - ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "arm64/hyperv_init:online", + ret = cpuhp_setup_state(CPUHP_AP_HYPERV_ONLINE, "arm64/hyperv_init:online", hv_common_cpu_init, hv_common_cpu_die); if (ret < 0) { hv_common_free(); diff --git a/arch/arm64/include/asm/arm_pmuv3.h b/arch/arm64/include/asm/arm_pmuv3.h index d6b51deb7bf0ff2f28810cada684097be2666962..18dc2fb3d7b7b2d0633bbe2f4ef18ec2e8ce4f67 100644 --- a/arch/arm64/include/asm/arm_pmuv3.h +++ b/arch/arm64/include/asm/arm_pmuv3.h @@ -13,7 +13,7 @@ #define RETURN_READ_PMEVCNTRN(n) \ return read_sysreg(pmevcntr##n##_el0) -static unsigned long read_pmevcntrn(int n) +static inline unsigned long read_pmevcntrn(int n) { PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN); return 0; @@ -21,14 +21,14 @@ static unsigned long read_pmevcntrn(int n) #define WRITE_PMEVCNTRN(n) \ write_sysreg(val, pmevcntr##n##_el0) -static void write_pmevcntrn(int n, unsigned long val) +static inline void write_pmevcntrn(int n, unsigned long val) { PMEVN_SWITCH(n, WRITE_PMEVCNTRN); } #define WRITE_PMEVTYPERN(n) \ write_sysreg(val, pmevtyper##n##_el0) -static void write_pmevtypern(int n, unsigned long val) +static inline void write_pmevtypern(int n, unsigned long val) { PMEVN_SWITCH(n, WRITE_PMEVTYPERN); } diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 683ca3af408485aa6cdf4aa44b5988d91cd6a0f3..5f6f84837a490375c8175626e3fec40057bb56d9 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -126,6 +126,10 @@ #define APPLE_CPU_PART_M1_FIRESTORM_MAX 0x029 #define APPLE_CPU_PART_M2_BLIZZARD 0x032 #define APPLE_CPU_PART_M2_AVALANCHE 0x033 +#define APPLE_CPU_PART_M2_BLIZZARD_PRO 0x034 +#define APPLE_CPU_PART_M2_AVALANCHE_PRO 0x035 +#define APPLE_CPU_PART_M2_BLIZZARD_MAX 0x038 +#define APPLE_CPU_PART_M2_AVALANCHE_MAX 0x039 #define AMPERE_CPU_PART_AMPERE1 0xAC3 @@ -181,6 +185,10 @@ #define MIDR_APPLE_M1_FIRESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_MAX) #define MIDR_APPLE_M2_BLIZZARD MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD) #define MIDR_APPLE_M2_AVALANCHE MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE) +#define MIDR_APPLE_M2_BLIZZARD_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD_PRO) +#define MIDR_APPLE_M2_AVALANCHE_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE_PRO) +#define MIDR_APPLE_M2_BLIZZARD_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD_MAX) +#define MIDR_APPLE_M2_AVALANCHE_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE_MAX) #define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1) /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 7e7e19ef6993ede45aea71c6f19b624731290fbe..9787503ff43fdfb01703302f7c0e63cb02db901d 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -699,6 +699,8 @@ struct kvm_vcpu_arch { #define SYSREGS_ON_CPU __vcpu_single_flag(sflags, BIT(4)) /* Software step state is Active-pending */ #define DBG_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(5)) +/* PMUSERENR for the guest EL0 is on physical CPU */ +#define PMUSERENR_ON_CPU __vcpu_single_flag(sflags, BIT(6)) /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */ @@ -1065,9 +1067,14 @@ void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu); #ifdef CONFIG_KVM void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr); void kvm_clr_pmu_events(u32 clr); +bool kvm_set_pmuserenr(u64 val); #else static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {} static inline void kvm_clr_pmu_events(u32 clr) {} +static inline bool kvm_set_pmuserenr(u64 val) +{ + return false; +} #endif void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h index 4cd6762bda805d16fe798a1db258155ced0d1a28..93bd0975b15f5545d5a408fd177ce35e780204ad 100644 --- a/arch/arm64/include/asm/kvm_pgtable.h +++ b/arch/arm64/include/asm/kvm_pgtable.h @@ -209,6 +209,7 @@ struct kvm_pgtable_visit_ctx { kvm_pte_t old; void *arg; struct kvm_pgtable_mm_ops *mm_ops; + u64 start; u64 addr; u64 end; u32 level; @@ -631,9 +632,9 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size); * * The walker will walk the page-table entries corresponding to the input * address range specified, visiting entries according to the walker flags. - * Invalid entries are treated as leaf entries. Leaf entries are reloaded - * after invoking the walker callback, allowing the walker to descend into - * a newly installed table. + * Invalid entries are treated as leaf entries. The visited page table entry is + * reloaded after invoking the walker callback, allowing the walker to descend + * into a newly installed table. * * Returning a negative error code from the walker callback function will * terminate the walk immediately with the same error code. diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index e72d9aaab6b1b63461b6b061536486528c041514..eefd712f2430353f5800df0b06333dd0460f22c0 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -115,8 +115,14 @@ #define SB_BARRIER_INSN __SYS_BARRIER_INSN(0, 7, 31) #define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2) +#define SYS_DC_IGSW sys_insn(1, 0, 7, 6, 4) +#define SYS_DC_IGDSW sys_insn(1, 0, 7, 6, 6) #define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2) +#define SYS_DC_CGSW sys_insn(1, 0, 7, 10, 4) +#define SYS_DC_CGDSW sys_insn(1, 0, 7, 10, 6) #define SYS_DC_CISW sys_insn(1, 0, 7, 14, 2) +#define SYS_DC_CIGSW sys_insn(1, 0, 7, 14, 4) +#define SYS_DC_CIGDSW sys_insn(1, 0, 7, 14, 6) /* * Automatically generated definitions for system registers, the diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index f5bcb0dc6267385e3401ed2735429b45af56e8e7..7e89968bd28244ced303586a35bb2e81dc1ffad6 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -66,13 +66,10 @@ void mte_sync_tags(pte_t old_pte, pte_t pte) return; /* if PG_mte_tagged is set, tags have already been initialised */ - for (i = 0; i < nr_pages; i++, page++) { - if (!page_mte_tagged(page)) { + for (i = 0; i < nr_pages; i++, page++) + if (!page_mte_tagged(page)) mte_sync_page_tags(page, old_pte, check_swap, pte_is_tagged); - set_page_mte_tagged(page); - } - } /* ensure the tags are visible before the PTE is set */ smp_wmb(); diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 0119dc91abb5dbc7567344aff13760454fbb24c3..d9e1355730ef5811163d173590970d79207a8c5b 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -288,7 +288,7 @@ static int aarch32_alloc_kuser_vdso_page(void) memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start, kuser_sz); - aarch32_vectors_page = virt_to_page(vdso_page); + aarch32_vectors_page = virt_to_page((void *)vdso_page); return 0; } diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index 1279949599b5fc84948f4061d6571537883c54e9..4c9dcd8fc93950d2927725ef23e21275267dd0c0 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -81,26 +81,34 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) fpsimd_kvm_prepare(); + /* + * We will check TIF_FOREIGN_FPSTATE just before entering the + * guest in kvm_arch_vcpu_ctxflush_fp() and override this to + * FP_STATE_FREE if the flag set. + */ vcpu->arch.fp_state = FP_STATE_HOST_OWNED; vcpu_clear_flag(vcpu, HOST_SVE_ENABLED); if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN) vcpu_set_flag(vcpu, HOST_SVE_ENABLED); - /* - * We don't currently support SME guests but if we leave - * things in streaming mode then when the guest starts running - * FPSIMD or SVE code it may generate SME traps so as a - * special case if we are in streaming mode we force the host - * state to be saved now and exit streaming mode so that we - * don't have to handle any SME traps for valid guest - * operations. Do this for ZA as well for now for simplicity. - */ if (system_supports_sme()) { vcpu_clear_flag(vcpu, HOST_SME_ENABLED); if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN) vcpu_set_flag(vcpu, HOST_SME_ENABLED); + /* + * If PSTATE.SM is enabled then save any pending FP + * state and disable PSTATE.SM. If we leave PSTATE.SM + * enabled and the guest does not enable SME via + * CPACR_EL1.SMEN then operations that should be valid + * may generate SME traps from EL1 to EL1 which we + * can't intercept and which would confuse the guest. + * + * Do the same for PSTATE.ZA in the case where there + * is state in the registers which has not already + * been saved, this is very unlikely to happen. + */ if (read_sysreg_s(SYS_SVCR) & (SVCR_SM_MASK | SVCR_ZA_MASK)) { vcpu->arch.fp_state = FP_STATE_FREE; fpsimd_save_and_flush_cpu_state(); diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index c41166f1a1dd7dd6c228dd7e0477424dd19be9ba..4fe217efa2185a73bea804b1842deff3684918a7 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -82,8 +82,14 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu) * EL1 instead of being trapped to EL2. */ if (kvm_arm_support_pmu_v3()) { + struct kvm_cpu_context *hctxt; + write_sysreg(0, pmselr_el0); + + hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; + ctxt_sys_reg(hctxt, PMUSERENR_EL0) = read_sysreg(pmuserenr_el0); write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0); + vcpu_set_flag(vcpu, PMUSERENR_ON_CPU); } vcpu->arch.mdcr_el2_host = read_sysreg(mdcr_el2); @@ -106,8 +112,13 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu) write_sysreg(vcpu->arch.mdcr_el2_host, mdcr_el2); write_sysreg(0, hstr_el2); - if (kvm_arm_support_pmu_v3()) - write_sysreg(0, pmuserenr_el0); + if (kvm_arm_support_pmu_v3()) { + struct kvm_cpu_context *hctxt; + + hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; + write_sysreg(ctxt_sys_reg(hctxt, PMUSERENR_EL0), pmuserenr_el0); + vcpu_clear_flag(vcpu, PMUSERENR_ON_CPU); + } if (cpus_have_final_cap(ARM64_SME)) { sysreg_clear_set_s(SYS_HFGRTR_EL2, 0, @@ -177,9 +188,17 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) sve_guest = vcpu_has_sve(vcpu); esr_ec = kvm_vcpu_trap_get_class(vcpu); - /* Don't handle SVE traps for non-SVE vcpus here: */ - if (!sve_guest && esr_ec != ESR_ELx_EC_FP_ASIMD) + /* Only handle traps the vCPU can support here: */ + switch (esr_ec) { + case ESR_ELx_EC_FP_ASIMD: + break; + case ESR_ELx_EC_SVE: + if (!sve_guest) + return false; + break; + default: return false; + } /* Valid trap. Switch the context: */ @@ -404,17 +423,21 @@ static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code) return false; } -static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code) +static bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, u64 *exit_code) { if (!__populate_fault_info(vcpu)) return true; return false; } +static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code) + __alias(kvm_hyp_handle_memory_fault); +static bool kvm_hyp_handle_watchpt_low(struct kvm_vcpu *vcpu, u64 *exit_code) + __alias(kvm_hyp_handle_memory_fault); static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code) { - if (!__populate_fault_info(vcpu)) + if (kvm_hyp_handle_memory_fault(vcpu, exit_code)) return true; if (static_branch_unlikely(&vgic_v2_cpuif_trap)) { diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c index 2e9ec4a2a4a323d238e29f8ac199da8ea5301768..a8813b2129966d094e54154dec71c92781c032bb 100644 --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c @@ -575,7 +575,7 @@ struct pkvm_mem_donation { struct check_walk_data { enum pkvm_page_state desired; - enum pkvm_page_state (*get_page_state)(kvm_pte_t pte); + enum pkvm_page_state (*get_page_state)(kvm_pte_t pte, u64 addr); }; static int __check_page_state_visitor(const struct kvm_pgtable_visit_ctx *ctx, @@ -583,10 +583,7 @@ static int __check_page_state_visitor(const struct kvm_pgtable_visit_ctx *ctx, { struct check_walk_data *d = ctx->arg; - if (kvm_pte_valid(ctx->old) && !addr_is_allowed_memory(kvm_pte_to_phys(ctx->old))) - return -EINVAL; - - return d->get_page_state(ctx->old) == d->desired ? 0 : -EPERM; + return d->get_page_state(ctx->old, ctx->addr) == d->desired ? 0 : -EPERM; } static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size, @@ -601,8 +598,11 @@ static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size, return kvm_pgtable_walk(pgt, addr, size, &walker); } -static enum pkvm_page_state host_get_page_state(kvm_pte_t pte) +static enum pkvm_page_state host_get_page_state(kvm_pte_t pte, u64 addr) { + if (!addr_is_allowed_memory(addr)) + return PKVM_NOPAGE; + if (!kvm_pte_valid(pte) && pte) return PKVM_NOPAGE; @@ -709,7 +709,7 @@ static int host_complete_donation(u64 addr, const struct pkvm_mem_transition *tx return host_stage2_set_owner_locked(addr, size, host_id); } -static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte) +static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte, u64 addr) { if (!kvm_pte_valid(pte)) return PKVM_NOPAGE; diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 71fa16a0dc775890adbcebe3a2ade963734ef445..77791495c9951d60e18fa8f80fd5c22385d65840 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -186,6 +186,7 @@ static const exit_handler_fn hyp_exit_handlers[] = { [ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd, [ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low, [ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low, + [ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low, [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth, }; @@ -196,6 +197,7 @@ static const exit_handler_fn pvm_exit_handlers[] = { [ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd, [ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low, [ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low, + [ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low, [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth, }; diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index 3d61bd3e591d27e9858b028220a9eb81a7498f6d..95dae02ccc2e60c2a830061a23b2e9d8c16e6246 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -58,8 +58,9 @@ struct kvm_pgtable_walk_data { struct kvm_pgtable_walker *walker; + const u64 start; u64 addr; - u64 end; + const u64 end; }; static bool kvm_phys_is_valid(u64 phys) @@ -201,20 +202,33 @@ static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data, .old = READ_ONCE(*ptep), .arg = data->walker->arg, .mm_ops = mm_ops, + .start = data->start, .addr = data->addr, .end = data->end, .level = level, .flags = flags, }; int ret = 0; + bool reload = false; kvm_pteref_t childp; bool table = kvm_pte_table(ctx.old, level); - if (table && (ctx.flags & KVM_PGTABLE_WALK_TABLE_PRE)) + if (table && (ctx.flags & KVM_PGTABLE_WALK_TABLE_PRE)) { ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_PRE); + reload = true; + } if (!table && (ctx.flags & KVM_PGTABLE_WALK_LEAF)) { ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_LEAF); + reload = true; + } + + /* + * Reload the page table after invoking the walker callback for leaf + * entries or after pre-order traversal, to allow the walker to descend + * into a newly installed or replaced table. + */ + if (reload) { ctx.old = READ_ONCE(*ptep); table = kvm_pte_table(ctx.old, level); } @@ -293,6 +307,7 @@ int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size, struct kvm_pgtable_walker *walker) { struct kvm_pgtable_walk_data walk_data = { + .start = ALIGN_DOWN(addr, PAGE_SIZE), .addr = ALIGN_DOWN(addr, PAGE_SIZE), .end = PAGE_ALIGN(walk_data.addr + size), .walker = walker, @@ -349,7 +364,7 @@ int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr, } struct hyp_map_data { - u64 phys; + const u64 phys; kvm_pte_t attr; }; @@ -407,13 +422,12 @@ enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte) static bool hyp_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx, struct hyp_map_data *data) { + u64 phys = data->phys + (ctx->addr - ctx->start); kvm_pte_t new; - u64 granule = kvm_granule_size(ctx->level), phys = data->phys; if (!kvm_block_mapping_supported(ctx, phys)) return false; - data->phys += granule; new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level); if (ctx->old == new) return true; @@ -576,7 +590,7 @@ void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt) } struct stage2_map_data { - u64 phys; + const u64 phys; kvm_pte_t attr; u8 owner_id; @@ -794,20 +808,43 @@ static bool stage2_pte_executable(kvm_pte_t pte) return !(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN); } +static u64 stage2_map_walker_phys_addr(const struct kvm_pgtable_visit_ctx *ctx, + const struct stage2_map_data *data) +{ + u64 phys = data->phys; + + /* + * Stage-2 walks to update ownership data are communicated to the map + * walker using an invalid PA. Avoid offsetting an already invalid PA, + * which could overflow and make the address valid again. + */ + if (!kvm_phys_is_valid(phys)) + return phys; + + /* + * Otherwise, work out the correct PA based on how far the walk has + * gotten. + */ + return phys + (ctx->addr - ctx->start); +} + static bool stage2_leaf_mapping_allowed(const struct kvm_pgtable_visit_ctx *ctx, struct stage2_map_data *data) { + u64 phys = stage2_map_walker_phys_addr(ctx, data); + if (data->force_pte && (ctx->level < (KVM_PGTABLE_MAX_LEVELS - 1))) return false; - return kvm_block_mapping_supported(ctx, data->phys); + return kvm_block_mapping_supported(ctx, phys); } static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx, struct stage2_map_data *data) { kvm_pte_t new; - u64 granule = kvm_granule_size(ctx->level), phys = data->phys; + u64 phys = stage2_map_walker_phys_addr(ctx, data); + u64 granule = kvm_granule_size(ctx->level); struct kvm_pgtable *pgt = data->mmu->pgt; struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; @@ -841,8 +878,6 @@ static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx, stage2_make_pte(ctx, new); - if (kvm_phys_is_valid(phys)) - data->phys += granule; return 0; } @@ -1297,4 +1332,7 @@ void kvm_pgtable_stage2_free_removed(struct kvm_pgtable_mm_ops *mm_ops, void *pg }; WARN_ON(__kvm_pgtable_walk(&data, mm_ops, ptep, level + 1)); + + WARN_ON(mm_ops->page_count(pgtable) != 1); + mm_ops->put_page(pgtable); } diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index 3d868e84c7a0a33246f86b4ccadaed8b5abec548..b37e7c96efea188cf8ed0ed1fb86744948d0065c 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -92,14 +92,28 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu) } NOKPROBE_SYMBOL(__deactivate_traps); +/* + * Disable IRQs in {activate,deactivate}_traps_vhe_{load,put}() to + * prevent a race condition between context switching of PMUSERENR_EL0 + * in __{activate,deactivate}_traps_common() and IPIs that attempts to + * update PMUSERENR_EL0. See also kvm_set_pmuserenr(). + */ void activate_traps_vhe_load(struct kvm_vcpu *vcpu) { + unsigned long flags; + + local_irq_save(flags); __activate_traps_common(vcpu); + local_irq_restore(flags); } void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu) { + unsigned long flags; + + local_irq_save(flags); __deactivate_traps_common(vcpu); + local_irq_restore(flags); } static const exit_handler_fn hyp_exit_handlers[] = { @@ -110,6 +124,7 @@ static const exit_handler_fn hyp_exit_handlers[] = { [ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd, [ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low, [ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low, + [ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low, [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth, }; diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index 64c3aec0d937c36b84d49b7ca403933429b993c8..0bd93a5f21ce382506803d018ba7df7072ac0aa1 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c @@ -204,7 +204,7 @@ void kvm_inject_size_fault(struct kvm_vcpu *vcpu) * Size Fault at level 0, as if exceeding PARange. * * Non-LPAE guests will only get the external abort, as there - * is no way to to describe the ASF. + * is no way to describe the ASF. */ if (vcpu_el1_is_32bit(vcpu) && !(vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE)) diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 45727d50d18dd90c842285a2d621628c77c9b0e6..5606509724787383ef90fb87e30ec4008303e6f9 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -694,45 +694,41 @@ out_unlock: static struct arm_pmu *kvm_pmu_probe_armpmu(void) { - struct perf_event_attr attr = { }; - struct perf_event *event; - struct arm_pmu *pmu = NULL; + struct arm_pmu *tmp, *pmu = NULL; + struct arm_pmu_entry *entry; + int cpu; + + mutex_lock(&arm_pmus_lock); /* - * Create a dummy event that only counts user cycles. As we'll never - * leave this function with the event being live, it will never - * count anything. But it allows us to probe some of the PMU - * details. Yes, this is terrible. + * It is safe to use a stale cpu to iterate the list of PMUs so long as + * the same value is used for the entirety of the loop. Given this, and + * the fact that no percpu data is used for the lookup there is no need + * to disable preemption. + * + * It is still necessary to get a valid cpu, though, to probe for the + * default PMU instance as userspace is not required to specify a PMU + * type. In order to uphold the preexisting behavior KVM selects the + * PMU instance for the core where the first call to the + * KVM_ARM_VCPU_PMU_V3_CTRL attribute group occurs. A dependent use case + * would be a user with disdain of all things big.LITTLE that affines + * the VMM to a particular cluster of cores. + * + * In any case, userspace should just do the sane thing and use the UAPI + * to select a PMU type directly. But, be wary of the baggage being + * carried here. */ - attr.type = PERF_TYPE_RAW; - attr.size = sizeof(attr); - attr.pinned = 1; - attr.disabled = 0; - attr.exclude_user = 0; - attr.exclude_kernel = 1; - attr.exclude_hv = 1; - attr.exclude_host = 1; - attr.config = ARMV8_PMUV3_PERFCTR_CPU_CYCLES; - attr.sample_period = GENMASK(63, 0); - - event = perf_event_create_kernel_counter(&attr, -1, current, - kvm_pmu_perf_overflow, &attr); - - if (IS_ERR(event)) { - pr_err_once("kvm: pmu event creation failed %ld\n", - PTR_ERR(event)); - return NULL; - } + cpu = raw_smp_processor_id(); + list_for_each_entry(entry, &arm_pmus, entry) { + tmp = entry->arm_pmu; - if (event->pmu) { - pmu = to_arm_pmu(event->pmu); - if (pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_NI || - pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF) - pmu = NULL; + if (cpumask_test_cpu(cpu, &tmp->supported_cpus)) { + pmu = tmp; + break; + } } - perf_event_disable(event); - perf_event_release_kernel(event); + mutex_unlock(&arm_pmus_lock); return pmu; } @@ -912,7 +908,17 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) return -EBUSY; if (!kvm->arch.arm_pmu) { - /* No PMU set, get the default one */ + /* + * No PMU set, get the default one. + * + * The observant among you will notice that the supported_cpus + * mask does not get updated for the default PMU even though it + * is quite possible the selected instance supports only a + * subset of cores in the system. This is intentional, and + * upholds the preexisting behavior on heterogeneous systems + * where vCPUs can be scheduled on any core but the guest + * counters could stop working. + */ kvm->arch.arm_pmu = kvm_pmu_probe_armpmu(); if (!kvm->arch.arm_pmu) return -ENODEV; diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c index 7887133d15f0b67f8b04c2c01b8c17ed2d9cb5d8..121f1a14c829c2804127427d4331d91d84d1ab13 100644 --- a/arch/arm64/kvm/pmu.c +++ b/arch/arm64/kvm/pmu.c @@ -209,3 +209,30 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) kvm_vcpu_pmu_enable_el0(events_host); kvm_vcpu_pmu_disable_el0(events_guest); } + +/* + * With VHE, keep track of the PMUSERENR_EL0 value for the host EL0 on the pCPU + * where PMUSERENR_EL0 for the guest is loaded, since PMUSERENR_EL0 is switched + * to the value for the guest on vcpu_load(). The value for the host EL0 + * will be restored on vcpu_put(), before returning to userspace. + * This isn't necessary for nVHE, as the register is context switched for + * every guest enter/exit. + * + * Return true if KVM takes care of the register. Otherwise return false. + */ +bool kvm_set_pmuserenr(u64 val) +{ + struct kvm_cpu_context *hctxt; + struct kvm_vcpu *vcpu; + + if (!kvm_arm_support_pmu_v3() || !has_vhe()) + return false; + + vcpu = kvm_get_running_vcpu(); + if (!vcpu || !vcpu_get_flag(vcpu, PMUSERENR_ON_CPU)) + return false; + + hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; + ctxt_sys_reg(hctxt, PMUSERENR_EL0) = val; + return true; +} diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 71b12094d6137008f65bf6e6297747e815bc4bd3..753aa741814977728778648fc7e3c7e05644ab51 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -211,6 +211,19 @@ static bool access_dcsw(struct kvm_vcpu *vcpu, return true; } +static bool access_dcgsw(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + if (!kvm_has_mte(vcpu->kvm)) { + kvm_inject_undefined(vcpu); + return false; + } + + /* Treat MTE S/W ops as we treat the classic ones: with contempt */ + return access_dcsw(vcpu, p, r); +} + static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift) { switch (r->aarch32_map) { @@ -1756,8 +1769,14 @@ static bool access_spsr(struct kvm_vcpu *vcpu, */ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_DC_ISW), access_dcsw }, + { SYS_DESC(SYS_DC_IGSW), access_dcgsw }, + { SYS_DESC(SYS_DC_IGDSW), access_dcgsw }, { SYS_DESC(SYS_DC_CSW), access_dcsw }, + { SYS_DESC(SYS_DC_CGSW), access_dcgsw }, + { SYS_DESC(SYS_DC_CGDSW), access_dcgsw }, { SYS_DESC(SYS_DC_CISW), access_dcsw }, + { SYS_DESC(SYS_DC_CIGSW), access_dcgsw }, + { SYS_DESC(SYS_DC_CIGDSW), access_dcgsw }, DBG_BCR_BVR_WCR_WVR_EL1(0), DBG_BCR_BVR_WCR_WVR_EL1(1), diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c index 9d42c7cb2b588235abf9b314cf92bc52f61429b0..c8c3cb812783218e93065d157291d09c7a4a0812 100644 --- a/arch/arm64/kvm/vgic/vgic-init.c +++ b/arch/arm64/kvm/vgic/vgic-init.c @@ -235,9 +235,9 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) * KVM io device for the redistributor that belongs to this VCPU. */ if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { - mutex_lock(&vcpu->kvm->arch.config_lock); + mutex_lock(&vcpu->kvm->slots_lock); ret = vgic_register_redist_iodev(vcpu); - mutex_unlock(&vcpu->kvm->arch.config_lock); + mutex_unlock(&vcpu->kvm->slots_lock); } return ret; } @@ -406,7 +406,7 @@ void kvm_vgic_destroy(struct kvm *kvm) /** * vgic_lazy_init: Lazy init is only allowed if the GIC exposed to the guest - * is a GICv2. A GICv3 must be explicitly initialized by the guest using the + * is a GICv2. A GICv3 must be explicitly initialized by userspace using the * KVM_DEV_ARM_VGIC_GRP_CTRL KVM_DEVICE group. * @kvm: kvm struct pointer */ @@ -446,11 +446,14 @@ int vgic_lazy_init(struct kvm *kvm) int kvm_vgic_map_resources(struct kvm *kvm) { struct vgic_dist *dist = &kvm->arch.vgic; + enum vgic_type type; + gpa_t dist_base; int ret = 0; if (likely(vgic_ready(kvm))) return 0; + mutex_lock(&kvm->slots_lock); mutex_lock(&kvm->arch.config_lock); if (vgic_ready(kvm)) goto out; @@ -458,18 +461,33 @@ int kvm_vgic_map_resources(struct kvm *kvm) if (!irqchip_in_kernel(kvm)) goto out; - if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) + if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) { ret = vgic_v2_map_resources(kvm); - else + type = VGIC_V2; + } else { ret = vgic_v3_map_resources(kvm); + type = VGIC_V3; + } - if (ret) + if (ret) { __kvm_vgic_destroy(kvm); - else - dist->ready = true; + goto out; + } + dist->ready = true; + dist_base = dist->vgic_dist_base; + mutex_unlock(&kvm->arch.config_lock); + + ret = vgic_register_dist_iodev(kvm, dist_base, type); + if (ret) { + kvm_err("Unable to register VGIC dist MMIO regions\n"); + kvm_vgic_destroy(kvm); + } + mutex_unlock(&kvm->slots_lock); + return ret; out: mutex_unlock(&kvm->arch.config_lock); + mutex_unlock(&kvm->slots_lock); return ret; } diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c index 750e51e3779a31a0eda2f8223be233fb10355664..5fe2365a629f25013fd9039ab8279fe76a31349c 100644 --- a/arch/arm64/kvm/vgic/vgic-its.c +++ b/arch/arm64/kvm/vgic/vgic-its.c @@ -1936,6 +1936,7 @@ void vgic_lpi_translation_cache_destroy(struct kvm *kvm) static int vgic_its_create(struct kvm_device *dev, u32 type) { + int ret; struct vgic_its *its; if (type != KVM_DEV_TYPE_ARM_VGIC_ITS) @@ -1945,9 +1946,12 @@ static int vgic_its_create(struct kvm_device *dev, u32 type) if (!its) return -ENOMEM; + mutex_lock(&dev->kvm->arch.config_lock); + if (vgic_initialized(dev->kvm)) { - int ret = vgic_v4_init(dev->kvm); + ret = vgic_v4_init(dev->kvm); if (ret < 0) { + mutex_unlock(&dev->kvm->arch.config_lock); kfree(its); return ret; } @@ -1960,12 +1964,10 @@ static int vgic_its_create(struct kvm_device *dev, u32 type) /* Yep, even more trickery for lock ordering... */ #ifdef CONFIG_LOCKDEP - mutex_lock(&dev->kvm->arch.config_lock); mutex_lock(&its->cmd_lock); mutex_lock(&its->its_lock); mutex_unlock(&its->its_lock); mutex_unlock(&its->cmd_lock); - mutex_unlock(&dev->kvm->arch.config_lock); #endif its->vgic_its_base = VGIC_ADDR_UNDEF; @@ -1986,7 +1988,11 @@ static int vgic_its_create(struct kvm_device *dev, u32 type) dev->private = its; - return vgic_its_set_abi(its, NR_ITS_ABIS - 1); + ret = vgic_its_set_abi(its, NR_ITS_ABIS - 1); + + mutex_unlock(&dev->kvm->arch.config_lock); + + return ret; } static void vgic_its_destroy(struct kvm_device *kvm_dev) diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c index 35cfa268fd5de7a0e3d1e1fa7eaa864715f77103..212b73a715c1c245540e12636085bb0d4ac1d741 100644 --- a/arch/arm64/kvm/vgic/vgic-kvm-device.c +++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c @@ -102,7 +102,11 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri if (get_user(addr, uaddr)) return -EFAULT; - mutex_lock(&kvm->arch.config_lock); + /* + * Since we can't hold config_lock while registering the redistributor + * iodevs, take the slots_lock immediately. + */ + mutex_lock(&kvm->slots_lock); switch (attr->attr) { case KVM_VGIC_V2_ADDR_TYPE_DIST: r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); @@ -182,6 +186,7 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri if (r) goto out; + mutex_lock(&kvm->arch.config_lock); if (write) { r = vgic_check_iorange(kvm, *addr_ptr, addr, alignment, size); if (!r) @@ -189,9 +194,10 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri } else { addr = *addr_ptr; } + mutex_unlock(&kvm->arch.config_lock); out: - mutex_unlock(&kvm->arch.config_lock); + mutex_unlock(&kvm->slots_lock); if (!r && !write) r = put_user(addr, uaddr); diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c index 472b18ac92a242bd922cc1a3f58a56326de2c40b..188d2187eede935e43b31fcf4985205a1872d92f 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c +++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c @@ -769,10 +769,13 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu) struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev; struct vgic_redist_region *rdreg; gpa_t rd_base; - int ret; + int ret = 0; + + lockdep_assert_held(&kvm->slots_lock); + mutex_lock(&kvm->arch.config_lock); if (!IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) - return 0; + goto out_unlock; /* * We may be creating VCPUs before having set the base address for the @@ -782,10 +785,12 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu) */ rdreg = vgic_v3_rdist_free_slot(&vgic->rd_regions); if (!rdreg) - return 0; + goto out_unlock; - if (!vgic_v3_check_base(kvm)) - return -EINVAL; + if (!vgic_v3_check_base(kvm)) { + ret = -EINVAL; + goto out_unlock; + } vgic_cpu->rdreg = rdreg; vgic_cpu->rdreg_index = rdreg->free_index; @@ -799,16 +804,20 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu) rd_dev->nr_regions = ARRAY_SIZE(vgic_v3_rd_registers); rd_dev->redist_vcpu = vcpu; - mutex_lock(&kvm->slots_lock); + mutex_unlock(&kvm->arch.config_lock); + ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, rd_base, 2 * SZ_64K, &rd_dev->dev); - mutex_unlock(&kvm->slots_lock); - if (ret) return ret; + /* Protected by slots_lock */ rdreg->free_index++; return 0; + +out_unlock: + mutex_unlock(&kvm->arch.config_lock); + return ret; } static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu) @@ -834,12 +843,10 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm) /* The current c failed, so iterate over the previous ones. */ int i; - mutex_lock(&kvm->slots_lock); for (i = 0; i < c; i++) { vcpu = kvm_get_vcpu(kvm, i); vgic_unregister_redist_iodev(vcpu); } - mutex_unlock(&kvm->slots_lock); } return ret; @@ -938,7 +945,9 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count) { int ret; + mutex_lock(&kvm->arch.config_lock); ret = vgic_v3_alloc_redist_region(kvm, index, addr, count); + mutex_unlock(&kvm->arch.config_lock); if (ret) return ret; @@ -950,8 +959,10 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count) if (ret) { struct vgic_redist_region *rdreg; + mutex_lock(&kvm->arch.config_lock); rdreg = vgic_v3_rdist_region_from_index(kvm, index); vgic_v3_free_redist_region(rdreg); + mutex_unlock(&kvm->arch.config_lock); return ret; } diff --git a/arch/arm64/kvm/vgic/vgic-mmio.c b/arch/arm64/kvm/vgic/vgic-mmio.c index 1939c94e0b248801800ea00c62f081f2b0c9f62d..ff558c05e990c728abd5361054cb04ef44083818 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio.c +++ b/arch/arm64/kvm/vgic/vgic-mmio.c @@ -1096,7 +1096,6 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address, enum vgic_type type) { struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev; - int ret = 0; unsigned int len; switch (type) { @@ -1114,10 +1113,6 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address, io_device->iodev_type = IODEV_DIST; io_device->redist_vcpu = NULL; - mutex_lock(&kvm->slots_lock); - ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address, - len, &io_device->dev); - mutex_unlock(&kvm->slots_lock); - - return ret; + return kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address, + len, &io_device->dev); } diff --git a/arch/arm64/kvm/vgic/vgic-v2.c b/arch/arm64/kvm/vgic/vgic-v2.c index 645648349c99b122359535c36f221d78432e25c0..7e9cdb78f7ce885f9917d380d99fa18547af47e6 100644 --- a/arch/arm64/kvm/vgic/vgic-v2.c +++ b/arch/arm64/kvm/vgic/vgic-v2.c @@ -312,12 +312,6 @@ int vgic_v2_map_resources(struct kvm *kvm) return ret; } - ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2); - if (ret) { - kvm_err("Unable to register VGIC MMIO regions\n"); - return ret; - } - if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) { ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base, kvm_vgic_global_state.vcpu_base, diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c index 469d816f356f3fe94b021f5e2fd14ea91310f114..c3b8e132d5992616ca1f308456ababce125c6c01 100644 --- a/arch/arm64/kvm/vgic/vgic-v3.c +++ b/arch/arm64/kvm/vgic/vgic-v3.c @@ -539,7 +539,6 @@ int vgic_v3_map_resources(struct kvm *kvm) { struct vgic_dist *dist = &kvm->arch.vgic; struct kvm_vcpu *vcpu; - int ret = 0; unsigned long c; kvm_for_each_vcpu(c, vcpu, kvm) { @@ -569,12 +568,6 @@ int vgic_v3_map_resources(struct kvm *kvm) return -EBUSY; } - ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3); - if (ret) { - kvm_err("Unable to register VGICv3 dist MMIO regions\n"); - return ret; - } - if (kvm_vgic_global_state.has_gicv4_1) vgic_v4_configure_vsgis(kvm); @@ -616,6 +609,10 @@ static const struct midr_range broken_seis[] = { MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_MAX), MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD), MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE), + MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_PRO), + MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_PRO), + MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_MAX), + MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_MAX), {}, }; diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c index 3bb0034780605772859f329620765d742e3fdf59..c1c28fe680ba317e9b8712de9a04b264991cdd0a 100644 --- a/arch/arm64/kvm/vgic/vgic-v4.c +++ b/arch/arm64/kvm/vgic/vgic-v4.c @@ -184,13 +184,14 @@ static void vgic_v4_disable_vsgis(struct kvm_vcpu *vcpu) } } -/* Must be called with the kvm lock held */ void vgic_v4_configure_vsgis(struct kvm *kvm) { struct vgic_dist *dist = &kvm->arch.vgic; struct kvm_vcpu *vcpu; unsigned long i; + lockdep_assert_held(&kvm->arch.config_lock); + kvm_arm_halt_guest(kvm); kvm_for_each_vcpu(i, vcpu, kvm) { diff --git a/arch/arm64/kvm/vmid.c b/arch/arm64/kvm/vmid.c index 08978d0672e7e947b0afbca0eb7d5ae0d9be440a..7fe8ba1a2851c5b71acbf17075987b96436f1a4a 100644 --- a/arch/arm64/kvm/vmid.c +++ b/arch/arm64/kvm/vmid.c @@ -47,7 +47,7 @@ static void flush_context(void) int cpu; u64 vmid; - bitmap_clear(vmid_map, 0, NUM_USER_VMIDS); + bitmap_zero(vmid_map, NUM_USER_VMIDS); for_each_possible_cpu(cpu) { vmid = atomic64_xchg_relaxed(&per_cpu(active_vmids, cpu), 0); @@ -182,8 +182,7 @@ int __init kvm_arm_vmid_alloc_init(void) */ WARN_ON(NUM_USER_VMIDS - 1 <= num_possible_cpus()); atomic64_set(&vmid_generation, VMID_FIRST_VERSION); - vmid_map = kcalloc(BITS_TO_LONGS(NUM_USER_VMIDS), - sizeof(*vmid_map), GFP_KERNEL); + vmid_map = bitmap_zalloc(NUM_USER_VMIDS, GFP_KERNEL); if (!vmid_map) return -ENOMEM; @@ -192,5 +191,5 @@ int __init kvm_arm_vmid_alloc_init(void) void __init kvm_arm_vmid_alloc_free(void) { - kfree(vmid_map); + bitmap_free(vmid_map); } diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c index 4aadcfb017545dbaef8fd7fb321ab2bdd9d565bd..a7bb20055ce0948a6f586e29fd1a05bbbc1cc75f 100644 --- a/arch/arm64/mm/copypage.c +++ b/arch/arm64/mm/copypage.c @@ -21,9 +21,10 @@ void copy_highpage(struct page *to, struct page *from) copy_page(kto, kfrom); + if (kasan_hw_tags_enabled()) + page_kasan_tag_reset(to); + if (system_supports_mte() && page_mte_tagged(from)) { - if (kasan_hw_tags_enabled()) - page_kasan_tag_reset(to); /* It's a new page, shouldn't have been tagged yet */ WARN_ON_ONCE(!try_page_mte_tagging(to)); mte_copy_page_tags(kto, kfrom); diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 9e0db5c387e3ae903ca5b6526b5ef6978eee22bf..6045a5117ac15bca1fe0cceba12509e13003df87 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -480,8 +480,8 @@ static void do_bad_area(unsigned long far, unsigned long esr, } } -#define VM_FAULT_BADMAP 0x010000 -#define VM_FAULT_BADACCESS 0x020000 +#define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000) +#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000) static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int mm_flags, unsigned long vm_flags, @@ -600,8 +600,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, vma_end_read(vma); goto lock_mmap; } - fault = handle_mm_fault(vma, addr & PAGE_MASK, - mm_flags | FAULT_FLAG_VMA_LOCK, regs); + fault = handle_mm_fault(vma, addr, mm_flags | FAULT_FLAG_VMA_LOCK, regs); vma_end_read(vma); if (!(fault & VM_FAULT_RETRY)) { diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h index b3323ab5b78d37d5acdea5ca9b3d3479eb2fdcc8..35e8a52fea11a71004e1a79e092f4ec6c0e2d10e 100644 --- a/arch/loongarch/include/asm/loongarch.h +++ b/arch/loongarch/include/asm/loongarch.h @@ -1496,7 +1496,7 @@ __BUILD_CSR_OP(tlbidx) #define write_fcsr(dest, val) \ do { \ __asm__ __volatile__( \ - " movgr2fcsr %0, "__stringify(dest)" \n" \ + " movgr2fcsr "__stringify(dest)", %0 \n" \ : : "r" (val)); \ } while (0) diff --git a/arch/loongarch/include/asm/pgtable-bits.h b/arch/loongarch/include/asm/pgtable-bits.h index 8b98d22a145b1d67974cffb52b116f8a626bf7ed..de46a6b1e9f11c87d68ef526e30c4cf816b17a38 100644 --- a/arch/loongarch/include/asm/pgtable-bits.h +++ b/arch/loongarch/include/asm/pgtable-bits.h @@ -22,12 +22,14 @@ #define _PAGE_PFN_SHIFT 12 #define _PAGE_SWP_EXCLUSIVE_SHIFT 23 #define _PAGE_PFN_END_SHIFT 48 +#define _PAGE_PRESENT_INVALID_SHIFT 60 #define _PAGE_NO_READ_SHIFT 61 #define _PAGE_NO_EXEC_SHIFT 62 #define _PAGE_RPLV_SHIFT 63 /* Used by software */ #define _PAGE_PRESENT (_ULCAST_(1) << _PAGE_PRESENT_SHIFT) +#define _PAGE_PRESENT_INVALID (_ULCAST_(1) << _PAGE_PRESENT_INVALID_SHIFT) #define _PAGE_WRITE (_ULCAST_(1) << _PAGE_WRITE_SHIFT) #define _PAGE_ACCESSED (_ULCAST_(1) << _PAGE_ACCESSED_SHIFT) #define _PAGE_MODIFIED (_ULCAST_(1) << _PAGE_MODIFIED_SHIFT) diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h index d28fb9dbec5966c8dfc7ad07e5ae5908f61ed1af..9a9f9ff9b7098000002c3ed029e54c62a7ba05b1 100644 --- a/arch/loongarch/include/asm/pgtable.h +++ b/arch/loongarch/include/asm/pgtable.h @@ -213,7 +213,7 @@ static inline int pmd_bad(pmd_t pmd) static inline int pmd_present(pmd_t pmd) { if (unlikely(pmd_val(pmd) & _PAGE_HUGE)) - return !!(pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE)); + return !!(pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PRESENT_INVALID)); return pmd_val(pmd) != (unsigned long)invalid_pte_table; } @@ -558,6 +558,7 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) static inline pmd_t pmd_mkinvalid(pmd_t pmd) { + pmd_val(pmd) |= _PAGE_PRESENT_INVALID; pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY | _PAGE_PROTNONE); return pmd; diff --git a/arch/loongarch/kernel/hw_breakpoint.c b/arch/loongarch/kernel/hw_breakpoint.c index 2406c95b34cc4f0023f1db8755e60ff7ea0cc18a..021b59c248fac64413ad70a5387ee7a3f698b92d 100644 --- a/arch/loongarch/kernel/hw_breakpoint.c +++ b/arch/loongarch/kernel/hw_breakpoint.c @@ -396,6 +396,8 @@ int hw_breakpoint_arch_parse(struct perf_event *bp, if (hw->ctrl.type != LOONGARCH_BREAKPOINT_EXECUTE) alignment_mask = 0x7; + else + alignment_mask = 0x3; offset = hw->address & alignment_mask; hw->address &= ~alignment_mask; diff --git a/arch/loongarch/kernel/perf_event.c b/arch/loongarch/kernel/perf_event.c index ff28f99b47d7ebce47bdaf5b3a97b8b2cb49481b..0491bf453cd49601c4f8b7b35565ea4a2b83c689 100644 --- a/arch/loongarch/kernel/perf_event.c +++ b/arch/loongarch/kernel/perf_event.c @@ -271,7 +271,7 @@ static void loongarch_pmu_enable_event(struct hw_perf_event *evt, int idx) WARN_ON(idx < 0 || idx >= loongarch_pmu.num_counters); /* Make sure interrupt enabled. */ - cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) | + cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base) | (evt->config_base & M_PERFCTL_CONFIG_MASK) | CSR_PERFCTRL_IE; cpu = (event->cpu >= 0) ? event->cpu : smp_processor_id(); @@ -594,7 +594,7 @@ static struct pmu pmu = { static unsigned int loongarch_pmu_perf_event_encode(const struct loongarch_perf_event *pev) { - return (pev->event_id & 0xff); + return M_PERFCTL_EVENT(pev->event_id); } static const struct loongarch_perf_event *loongarch_pmu_map_general_event(int idx) @@ -849,7 +849,7 @@ static void resume_local_counters(void) static const struct loongarch_perf_event *loongarch_pmu_map_raw_event(u64 config) { - raw_event.event_id = config & 0xff; + raw_event.event_id = M_PERFCTL_EVENT(config); return &raw_event; } diff --git a/arch/loongarch/kernel/unaligned.c b/arch/loongarch/kernel/unaligned.c index bdff825d29ef4dd252430cbd0f5e923fb102cb5f..85fae3d2d71aebc5691686cfe67214561fd8d6fe 100644 --- a/arch/loongarch/kernel/unaligned.c +++ b/arch/loongarch/kernel/unaligned.c @@ -485,7 +485,7 @@ static int __init debugfs_unaligned(void) struct dentry *d; d = debugfs_create_dir("loongarch", NULL); - if (!d) + if (IS_ERR_OR_NULL(d)) return -ENOMEM; debugfs_create_u32("unaligned_instructions_user", diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c index b9f6908a31bc39f94c0418a298aed87470755170..ba468b5f3f0b65bd57d74af263d19d9ba917f635 100644 --- a/arch/m68k/kernel/signal.c +++ b/arch/m68k/kernel/signal.c @@ -858,11 +858,17 @@ static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs * } static inline void __user * -get_sigframe(struct ksignal *ksig, size_t frame_size) +get_sigframe(struct ksignal *ksig, struct pt_regs *tregs, size_t frame_size) { unsigned long usp = sigsp(rdusp(), ksig); + unsigned long gap = 0; - return (void __user *)((usp - frame_size) & -8UL); + if (CPU_IS_020_OR_030 && tregs->format == 0xb) { + /* USP is unreliable so use worst-case value */ + gap = 256; + } + + return (void __user *)((usp - gap - frame_size) & -8UL); } static int setup_frame(struct ksignal *ksig, sigset_t *set, @@ -880,7 +886,7 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set, return -EFAULT; } - frame = get_sigframe(ksig, sizeof(*frame) + fsize); + frame = get_sigframe(ksig, tregs, sizeof(*frame) + fsize); if (fsize) err |= copy_to_user (frame + 1, regs + 1, fsize); @@ -952,7 +958,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, return -EFAULT; } - frame = get_sigframe(ksig, sizeof(*frame)); + frame = get_sigframe(ksig, tregs, sizeof(*frame)); if (fsize) err |= copy_to_user (&frame->uc.uc_extra, regs + 1, fsize); diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index c2f5498d207fffd0f07bfff905ff75e05c504802..675a8660cb85a033792d9622040ea48a8b800cb0 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -79,6 +79,7 @@ config MIPS select HAVE_LD_DEAD_CODE_DATA_ELIMINATION select HAVE_MOD_ARCH_SPECIFIC select HAVE_NMI + select HAVE_PATA_PLATFORM select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP diff --git a/arch/mips/alchemy/common/dbdma.c b/arch/mips/alchemy/common/dbdma.c index 5ab0430004092900cb8a7de8013031ff18d862fe..6a3c890f7bbfedc43459edc3c69b6f8cae3c79dc 100644 --- a/arch/mips/alchemy/common/dbdma.c +++ b/arch/mips/alchemy/common/dbdma.c @@ -30,6 +30,7 @@ * */ +#include /* for dma_default_coherent */ #include #include #include @@ -623,17 +624,18 @@ u32 au1xxx_dbdma_put_source(u32 chanid, dma_addr_t buf, int nbytes, u32 flags) dp->dscr_cmd0 &= ~DSCR_CMD0_IE; /* - * There is an errata on the Au1200/Au1550 parts that could result - * in "stale" data being DMA'ed. It has to do with the snoop logic on - * the cache eviction buffer. DMA_NONCOHERENT is on by default for - * these parts. If it is fixed in the future, these dma_cache_inv will - * just be nothing more than empty macros. See io.h. + * There is an erratum on certain Au1200/Au1550 revisions that could + * result in "stale" data being DMA'ed. It has to do with the snoop + * logic on the cache eviction buffer. dma_default_coherent is set + * to false on these parts. */ - dma_cache_wback_inv((unsigned long)buf, nbytes); + if (!dma_default_coherent) + dma_cache_wback_inv(KSEG0ADDR(buf), nbytes); dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */ wmb(); /* drain writebuffer */ dma_cache_wback_inv((unsigned long)dp, sizeof(*dp)); ctp->chan_ptr->ddma_dbell = 0; + wmb(); /* force doorbell write out to dma engine */ /* Get next descriptor pointer. */ ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr)); @@ -685,17 +687,18 @@ u32 au1xxx_dbdma_put_dest(u32 chanid, dma_addr_t buf, int nbytes, u32 flags) dp->dscr_source1, dp->dscr_dest0, dp->dscr_dest1); #endif /* - * There is an errata on the Au1200/Au1550 parts that could result in - * "stale" data being DMA'ed. It has to do with the snoop logic on the - * cache eviction buffer. DMA_NONCOHERENT is on by default for these - * parts. If it is fixed in the future, these dma_cache_inv will just - * be nothing more than empty macros. See io.h. + * There is an erratum on certain Au1200/Au1550 revisions that could + * result in "stale" data being DMA'ed. It has to do with the snoop + * logic on the cache eviction buffer. dma_default_coherent is set + * to false on these parts. */ - dma_cache_inv((unsigned long)buf, nbytes); + if (!dma_default_coherent) + dma_cache_inv(KSEG0ADDR(buf), nbytes); dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */ wmb(); /* drain writebuffer */ dma_cache_wback_inv((unsigned long)dp, sizeof(*dp)); ctp->chan_ptr->ddma_dbell = 0; + wmb(); /* force doorbell write out to dma engine */ /* Get next descriptor pointer. */ ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr)); diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 6d15a398d389e2bd269d47794edd1b37a370b956..e79adcb128e67df5a13dfc30740e555b4697554c 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -1502,6 +1502,10 @@ static inline void cpu_probe_alchemy(struct cpuinfo_mips *c, unsigned int cpu) break; } break; + case PRID_IMP_NETLOGIC_AU13XX: + c->cputype = CPU_ALCHEMY; + __cpu_name[cpu] = "Au1300"; + break; } } @@ -1863,6 +1867,7 @@ void cpu_probe(void) cpu_probe_mips(c, cpu); break; case PRID_COMP_ALCHEMY: + case PRID_COMP_NETLOGIC: cpu_probe_alchemy(c, cpu); break; case PRID_COMP_SIBYTE: diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index febdc5564638e9b9bbb959791ecb75d6068b676c..c0e65135481b74bfb102f09dd56607c634f45b55 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -158,10 +158,6 @@ static unsigned long __init init_initrd(void) pr_err("initrd start must be page aligned\n"); goto disable; } - if (initrd_start < PAGE_OFFSET) { - pr_err("initrd start < PAGE_OFFSET\n"); - goto disable; - } /* * Sanitize initrd addresses. For example firmware @@ -174,6 +170,11 @@ static unsigned long __init init_initrd(void) initrd_end = (unsigned long)__va(end); initrd_start = (unsigned long)__va(__pa(initrd_start)); + if (initrd_start < PAGE_OFFSET) { + pr_err("initrd start < PAGE_OFFSET\n"); + goto disable; + } + ROOT_DEV = Root_RAM0; return PFN_UP(end); disable: diff --git a/arch/nios2/boot/dts/10m50_devboard.dts b/arch/nios2/boot/dts/10m50_devboard.dts index 56339bef3247d544fd34a101377b08d13d906959..0e7e5b0dd685c15abfae91f0e833a6035eed2c75 100644 --- a/arch/nios2/boot/dts/10m50_devboard.dts +++ b/arch/nios2/boot/dts/10m50_devboard.dts @@ -97,7 +97,7 @@ rx-fifo-depth = <8192>; tx-fifo-depth = <8192>; address-bits = <48>; - max-frame-size = <1518>; + max-frame-size = <1500>; local-mac-address = [00 00 00 00 00 00]; altr,has-supplementary-unicast; altr,enable-sup-addr = <1>; diff --git a/arch/nios2/boot/dts/3c120_devboard.dts b/arch/nios2/boot/dts/3c120_devboard.dts index d10fb81686c7efb41dc1459700d04289bb1519f3..3ee316906379795493e85917ef6c2a94c5220144 100644 --- a/arch/nios2/boot/dts/3c120_devboard.dts +++ b/arch/nios2/boot/dts/3c120_devboard.dts @@ -106,7 +106,7 @@ interrupt-names = "rx_irq", "tx_irq"; rx-fifo-depth = <8192>; tx-fifo-depth = <8192>; - max-frame-size = <1518>; + max-frame-size = <1500>; local-mac-address = [ 00 00 00 00 00 00 ]; phy-mode = "rgmii-id"; phy-handle = <&phy0>; diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 466a25525364d2d45e7ed5bf286856242fdc90d2..967bde65dd0ec895d78b2314a49aa8cb043e28c0 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -130,6 +130,10 @@ config PM config STACKTRACE_SUPPORT def_bool y +config LOCKDEP_SUPPORT + bool + default y + config ISA_DMA_API bool diff --git a/arch/parisc/Kconfig.debug b/arch/parisc/Kconfig.debug index f66554cd5c4518cf919abf321ce9137e8b2be59d..3a059cb5e112fb09fe701fe362c07c84cec247e2 100644 --- a/arch/parisc/Kconfig.debug +++ b/arch/parisc/Kconfig.debug @@ -1 +1,12 @@ # SPDX-License-Identifier: GPL-2.0 +# +config LIGHTWEIGHT_SPINLOCK_CHECK + bool "Enable lightweight spinlock checks" + depends on SMP && !DEBUG_SPINLOCK + default y + help + Add checks with low performance impact to the spinlock functions + to catch memory overwrites at runtime. For more advanced + spinlock debugging you should choose the DEBUG_SPINLOCK option + which will detect unitialized spinlocks too. + If unsure say Y here. diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h index 0f0d4a496fef0ed5c4f54aae6fc1f75dbc2cb8a7..75677b526b2bb79f7bb531a1281e24296d3c3c7c 100644 --- a/arch/parisc/include/asm/assembly.h +++ b/arch/parisc/include/asm/assembly.h @@ -90,10 +90,6 @@ #include #include - sp = 30 - gp = 27 - ipsw = 22 - /* * We provide two versions of each macro to convert from physical * to virtual and vice versa. The "_r1" versions take one argument diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index 0bdee672413206aa9db8c23c4bd3fd3be2182660..c8b6928cee1ee442f4b9c9b0c996fe1801620f75 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h @@ -48,6 +48,10 @@ void flush_dcache_page(struct page *page); #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages) +#define flush_dcache_mmap_lock_irqsave(mapping, flags) \ + xa_lock_irqsave(&mapping->i_pages, flags) +#define flush_dcache_mmap_unlock_irqrestore(mapping, flags) \ + xa_unlock_irqrestore(&mapping->i_pages, flags) #define flush_icache_page(vma,page) do { \ flush_kernel_dcache_page_addr(page_address(page)); \ diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index a6e5d66a76562aaa8e89bab4b55e4aa39d13ad43..edfcb9858bcb7ca0bf2fee2cfeef304836beb4a5 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h @@ -7,10 +7,26 @@ #include #include +#define SPINLOCK_BREAK_INSN 0x0000c006 /* break 6,6 */ + +static inline void arch_spin_val_check(int lock_val) +{ + if (IS_ENABLED(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK)) + asm volatile( "andcm,= %0,%1,%%r0\n" + ".word %2\n" + : : "r" (lock_val), "r" (__ARCH_SPIN_LOCK_UNLOCKED_VAL), + "i" (SPINLOCK_BREAK_INSN)); +} + static inline int arch_spin_is_locked(arch_spinlock_t *x) { - volatile unsigned int *a = __ldcw_align(x); - return READ_ONCE(*a) == 0; + volatile unsigned int *a; + int lock_val; + + a = __ldcw_align(x); + lock_val = READ_ONCE(*a); + arch_spin_val_check(lock_val); + return (lock_val == 0); } static inline void arch_spin_lock(arch_spinlock_t *x) @@ -18,9 +34,18 @@ static inline void arch_spin_lock(arch_spinlock_t *x) volatile unsigned int *a; a = __ldcw_align(x); - while (__ldcw(a) == 0) + do { + int lock_val_old; + + lock_val_old = __ldcw(a); + arch_spin_val_check(lock_val_old); + if (lock_val_old) + return; /* got lock */ + + /* wait until we should try to get lock again */ while (*a == 0) continue; + } while (1); } static inline void arch_spin_unlock(arch_spinlock_t *x) @@ -29,15 +54,19 @@ static inline void arch_spin_unlock(arch_spinlock_t *x) a = __ldcw_align(x); /* Release with ordered store. */ - __asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory"); + __asm__ __volatile__("stw,ma %0,0(%1)" + : : "r"(__ARCH_SPIN_LOCK_UNLOCKED_VAL), "r"(a) : "memory"); } static inline int arch_spin_trylock(arch_spinlock_t *x) { volatile unsigned int *a; + int lock_val; a = __ldcw_align(x); - return __ldcw(a) != 0; + lock_val = __ldcw(a); + arch_spin_val_check(lock_val); + return lock_val != 0; } /* diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h index ca39ee350c3f422eee3ff28fa39cd5090600464e..d65934079ebdb032967f86976da3fc8963a84f80 100644 --- a/arch/parisc/include/asm/spinlock_types.h +++ b/arch/parisc/include/asm/spinlock_types.h @@ -2,13 +2,17 @@ #ifndef __ASM_SPINLOCK_TYPES_H #define __ASM_SPINLOCK_TYPES_H +#define __ARCH_SPIN_LOCK_UNLOCKED_VAL 0x1a46 + typedef struct { #ifdef CONFIG_PA20 volatile unsigned int slock; -# define __ARCH_SPIN_LOCK_UNLOCKED { 1 } +# define __ARCH_SPIN_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED_VAL } #else volatile unsigned int lock[4]; -# define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } +# define __ARCH_SPIN_LOCK_UNLOCKED \ + { { __ARCH_SPIN_LOCK_UNLOCKED_VAL, __ARCH_SPIN_LOCK_UNLOCKED_VAL, \ + __ARCH_SPIN_LOCK_UNLOCKED_VAL, __ARCH_SPIN_LOCK_UNLOCKED_VAL } } #endif } arch_spinlock_t; diff --git a/arch/parisc/kernel/alternative.c b/arch/parisc/kernel/alternative.c index 66f5672c70bd469e6e1a261c4a7c080be1d808f5..25c4d6c3375db357f73c12e48826c941b92fc621 100644 --- a/arch/parisc/kernel/alternative.c +++ b/arch/parisc/kernel/alternative.c @@ -25,7 +25,7 @@ void __init_or_module apply_alternatives(struct alt_instr *start, { struct alt_instr *entry; int index = 0, applied = 0; - int num_cpus = num_online_cpus(); + int num_cpus = num_present_cpus(); u16 cond_check; cond_check = ALT_COND_ALWAYS | diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 1d3b8bc8a6233641c4eeea6d8b775887b0de9a4e..ca4a302d4365fe9e7472d3c748b69369233610af 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -399,6 +399,7 @@ void flush_dcache_page(struct page *page) unsigned long offset; unsigned long addr, old_addr = 0; unsigned long count = 0; + unsigned long flags; pgoff_t pgoff; if (mapping && !mapping_mapped(mapping)) { @@ -420,7 +421,7 @@ void flush_dcache_page(struct page *page) * to flush one address here for them all to become coherent * on machines that support equivalent aliasing */ - flush_dcache_mmap_lock(mapping); + flush_dcache_mmap_lock_irqsave(mapping, flags); vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; addr = mpnt->vm_start + offset; @@ -460,7 +461,7 @@ void flush_dcache_page(struct page *page) } WARN_ON(++count == 4096); } - flush_dcache_mmap_unlock(mapping); + flush_dcache_mmap_unlock_irqrestore(mapping, flags); } EXPORT_SYMBOL(flush_dcache_page); diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index ba87f791323be008d1e6abfcfbe846a55575cbf5..71ed5391f29d6f00ec85168b02cce2fe4eccb097 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c @@ -446,11 +446,27 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr, void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, enum dma_data_direction dir) { + /* + * fdc: The data cache line is written back to memory, if and only if + * it is dirty, and then invalidated from the data cache. + */ flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size); } void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, enum dma_data_direction dir) { - flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size); + unsigned long addr = (unsigned long) phys_to_virt(paddr); + + switch (dir) { + case DMA_TO_DEVICE: + case DMA_BIDIRECTIONAL: + flush_kernel_dcache_range(addr, size); + return; + case DMA_FROM_DEVICE: + purge_kernel_dcache_range_asm(addr, addr + size); + return; + default: + BUG(); + } } diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index 97c6f875bd0e12f188215afa07815356711593f5..24411ab79c307d4ab78f33303a7b1d8afb7d1c1d 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -122,13 +122,18 @@ void machine_power_off(void) /* It seems we have no way to power the system off via * software. The user has to press the button himself. */ - printk(KERN_EMERG "System shut down completed.\n" - "Please power this system off now."); + printk("Power off or press RETURN to reboot.\n"); /* prevent soft lockup/stalled CPU messages for endless loop. */ rcu_sysrq_start(); lockup_detector_soft_poweroff(); - for (;;); + while (1) { + /* reboot if user presses RETURN key */ + if (pdc_iodc_getc() == 13) { + printk("Rebooting...\n"); + machine_restart(NULL); + } + } } void (*pm_power_off)(void); diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index f9696fbf646c473cffbe26f316b1bcd289054431..304eebd1c83e7d802fd9b1d726d479fbaeefa175 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -47,6 +47,10 @@ #include #include +#if defined(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK) +#include +#endif + #include "../math-emu/math-emu.h" /* for handle_fpe() */ static void parisc_show_stack(struct task_struct *task, @@ -291,24 +295,30 @@ static void handle_break(struct pt_regs *regs) } #ifdef CONFIG_KPROBES - if (unlikely(iir == PARISC_KPROBES_BREAK_INSN)) { + if (unlikely(iir == PARISC_KPROBES_BREAK_INSN && !user_mode(regs))) { parisc_kprobe_break_handler(regs); return; } - if (unlikely(iir == PARISC_KPROBES_BREAK_INSN2)) { + if (unlikely(iir == PARISC_KPROBES_BREAK_INSN2 && !user_mode(regs))) { parisc_kprobe_ss_handler(regs); return; } #endif #ifdef CONFIG_KGDB - if (unlikely(iir == PARISC_KGDB_COMPILED_BREAK_INSN || - iir == PARISC_KGDB_BREAK_INSN)) { + if (unlikely((iir == PARISC_KGDB_COMPILED_BREAK_INSN || + iir == PARISC_KGDB_BREAK_INSN)) && !user_mode(regs)) { kgdb_handle_exception(9, SIGTRAP, 0, regs); return; } #endif +#ifdef CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK + if ((iir == SPINLOCK_BREAK_INSN) && !user_mode(regs)) { + die_if_kernel("Spinlock was trashed", regs, 1); + } +#endif + if (unlikely(iir != GDB_BREAK_INSN)) parisc_printk_ratelimited(0, regs, KERN_DEBUG "break %d,%d: pid=%d command='%s'\n", diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 539d1f03ff42a5a60527836791f3b17cead05e05..bff5820b7cda14714f6b76da3bac2de92c8f740f 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -906,11 +906,17 @@ config DATA_SHIFT config ARCH_FORCE_MAX_ORDER int "Order of maximal physically contiguous allocations" + range 7 8 if PPC64 && PPC_64K_PAGES default "8" if PPC64 && PPC_64K_PAGES + range 12 12 if PPC64 && !PPC_64K_PAGES default "12" if PPC64 && !PPC_64K_PAGES + range 8 10 if PPC32 && PPC_16K_PAGES default "8" if PPC32 && PPC_16K_PAGES + range 6 10 if PPC32 && PPC_64K_PAGES default "6" if PPC32 && PPC_64K_PAGES + range 4 10 if PPC32 && PPC_256K_PAGES default "4" if PPC32 && PPC_256K_PAGES + range 10 10 default "10" help The kernel page allocator limits the size of maximal physically diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index 85cde5bf04b7ae7c3cfdb2394cbf67d4c7c96e0e..771b79423bbc2b4ea4ee4d5a5fa3c07b0a14d53c 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -34,8 +34,6 @@ endif BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ -fno-strict-aliasing -O2 -msoft-float -mno-altivec -mno-vsx \ - $(call cc-option,-mno-prefixed) $(call cc-option,-mno-pcrel) \ - $(call cc-option,-mno-mma) \ $(call cc-option,-mno-spe) $(call cc-option,-mspe=no) \ -pipe -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \ $(LINUXINCLUDE) @@ -71,6 +69,10 @@ BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -nostdinc BOOTARFLAGS := -crD +BOOTCFLAGS += $(call cc-option,-mno-prefixed) \ + $(call cc-option,-mno-pcrel) \ + $(call cc-option,-mno-mma) + ifdef CONFIG_CC_IS_CLANG BOOTCFLAGS += $(CLANG_FLAGS) BOOTAFLAGS += $(CLANG_FLAGS) diff --git a/arch/powerpc/crypto/Kconfig b/arch/powerpc/crypto/Kconfig index 7113f9355165a663732964cdf1890541936a36a8..ad1872518992e0a074a250ff46997eb3d02740f4 100644 --- a/arch/powerpc/crypto/Kconfig +++ b/arch/powerpc/crypto/Kconfig @@ -96,7 +96,7 @@ config CRYPTO_AES_PPC_SPE config CRYPTO_AES_GCM_P10 tristate "Stitched AES/GCM acceleration support on P10 or later CPU (PPC)" - depends on PPC64 && CPU_LITTLE_ENDIAN + depends on PPC64 && CPU_LITTLE_ENDIAN && VSX select CRYPTO_LIB_AES select CRYPTO_ALGAPI select CRYPTO_AEAD diff --git a/arch/powerpc/crypto/Makefile b/arch/powerpc/crypto/Makefile index 05c7486f42c587b1bf1c59b3ad756c4a6f474609..7b4f516abec1d35b221b29ac6fec238f0eb55d99 100644 --- a/arch/powerpc/crypto/Makefile +++ b/arch/powerpc/crypto/Makefile @@ -22,15 +22,15 @@ sha1-ppc-spe-y := sha1-spe-asm.o sha1-spe-glue.o sha256-ppc-spe-y := sha256-spe-asm.o sha256-spe-glue.o crc32c-vpmsum-y := crc32c-vpmsum_asm.o crc32c-vpmsum_glue.o crct10dif-vpmsum-y := crct10dif-vpmsum_asm.o crct10dif-vpmsum_glue.o -aes-gcm-p10-crypto-y := aes-gcm-p10-glue.o aes-gcm-p10.o ghashp8-ppc.o aesp8-ppc.o +aes-gcm-p10-crypto-y := aes-gcm-p10-glue.o aes-gcm-p10.o ghashp10-ppc.o aesp10-ppc.o quiet_cmd_perl = PERL $@ cmd_perl = $(PERL) $< $(if $(CONFIG_CPU_LITTLE_ENDIAN), linux-ppc64le, linux-ppc64) > $@ -targets += aesp8-ppc.S ghashp8-ppc.S +targets += aesp10-ppc.S ghashp10-ppc.S -$(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE +$(obj)/aesp10-ppc.S $(obj)/ghashp10-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE $(call if_changed,perl) -OBJECT_FILES_NON_STANDARD_aesp8-ppc.o := y -OBJECT_FILES_NON_STANDARD_ghashp8-ppc.o := y +OBJECT_FILES_NON_STANDARD_aesp10-ppc.o := y +OBJECT_FILES_NON_STANDARD_ghashp10-ppc.o := y diff --git a/arch/powerpc/crypto/aes-gcm-p10-glue.c b/arch/powerpc/crypto/aes-gcm-p10-glue.c index bd3475f5348d99ec0620c968fd95bcffed2388cc..4b6e899895e7be8214d021559e9d0d7fbc8d65d2 100644 --- a/arch/powerpc/crypto/aes-gcm-p10-glue.c +++ b/arch/powerpc/crypto/aes-gcm-p10-glue.c @@ -30,15 +30,15 @@ MODULE_AUTHOR("Danny Tsen aadLen = alen; i = alen & ~0xf; if (i) { - gcm_ghash_p8(nXi, hash->Htable+32, aad, i); + gcm_ghash_p10(nXi, hash->Htable+32, aad, i); aad += i; alen -= i; } @@ -102,7 +102,7 @@ static void set_aad(struct gcm_ctx *gctx, struct Hash_ctx *hash, nXi[i] ^= aad[i]; memset(gctx->aad_hash, 0, 16); - gcm_ghash_p8(gctx->aad_hash, hash->Htable+32, nXi, 16); + gcm_ghash_p10(gctx->aad_hash, hash->Htable+32, nXi, 16); } else { memcpy(gctx->aad_hash, nXi, 16); } @@ -115,7 +115,7 @@ static void gcmp10_init(struct gcm_ctx *gctx, u8 *iv, unsigned char *rdkey, { __be32 counter = cpu_to_be32(1); - aes_p8_encrypt(hash->H, hash->H, rdkey); + aes_p10_encrypt(hash->H, hash->H, rdkey); set_subkey(hash->H); gcm_init_htable(hash->Htable+32, hash->H); @@ -126,7 +126,7 @@ static void gcmp10_init(struct gcm_ctx *gctx, u8 *iv, unsigned char *rdkey, /* * Encrypt counter vector as iv tag and increment counter. */ - aes_p8_encrypt(iv, gctx->ivtag, rdkey); + aes_p10_encrypt(iv, gctx->ivtag, rdkey); counter = cpu_to_be32(2); *((__be32 *)(iv+12)) = counter; @@ -160,7 +160,7 @@ static void finish_tag(struct gcm_ctx *gctx, struct Hash_ctx *hash, int len) /* * hash (AAD len and len) */ - gcm_ghash_p8(hash->Htable, hash->Htable+32, aclen, 16); + gcm_ghash_p10(hash->Htable, hash->Htable+32, aclen, 16); for (i = 0; i < 16; i++) hash->Htable[i] ^= gctx->ivtag[i]; @@ -192,7 +192,7 @@ static int p10_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key, int ret; vsx_begin(); - ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); + ret = aes_p10_set_encrypt_key(key, keylen * 8, &ctx->enc_key); vsx_end(); return ret ? -EINVAL : 0; diff --git a/arch/powerpc/crypto/aesp8-ppc.pl b/arch/powerpc/crypto/aesp10-ppc.pl similarity index 99% rename from arch/powerpc/crypto/aesp8-ppc.pl rename to arch/powerpc/crypto/aesp10-ppc.pl index 1f22aec27d79970c5d1d7a4694cd6cb5416bc3d6..2c06ce2a2c7c02b159ac0108f8b380c3781f3403 100644 --- a/arch/powerpc/crypto/aesp8-ppc.pl +++ b/arch/powerpc/crypto/aesp10-ppc.pl @@ -110,7 +110,7 @@ die "can't locate ppc-xlate.pl"; open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!"; $FRAME=8*$SIZE_T; -$prefix="aes_p8"; +$prefix="aes_p10"; $sp="r1"; $vrsave="r12"; diff --git a/arch/powerpc/crypto/ghashp8-ppc.pl b/arch/powerpc/crypto/ghashp10-ppc.pl similarity index 97% rename from arch/powerpc/crypto/ghashp8-ppc.pl rename to arch/powerpc/crypto/ghashp10-ppc.pl index b56603b4a893c756217255a51fe90fdadae77d24..27a6b0bec645f9ae4e457fcbdbe0bccda07dc71e 100644 --- a/arch/powerpc/crypto/ghashp8-ppc.pl +++ b/arch/powerpc/crypto/ghashp10-ppc.pl @@ -64,7 +64,7 @@ $code=<<___; .text -.globl .gcm_init_p8 +.globl .gcm_init_p10 lis r0,0xfff0 li r8,0x10 mfspr $vrsave,256 @@ -110,7 +110,7 @@ $code=<<___; .long 0 .byte 0,12,0x14,0,0,0,2,0 .long 0 -.size .gcm_init_p8,.-.gcm_init_p8 +.size .gcm_init_p10,.-.gcm_init_p10 .globl .gcm_init_htable lis r0,0xfff0 @@ -237,7 +237,7 @@ $code=<<___; .long 0 .size .gcm_init_htable,.-.gcm_init_htable -.globl .gcm_gmult_p8 +.globl .gcm_gmult_p10 lis r0,0xfff8 li r8,0x10 mfspr $vrsave,256 @@ -283,9 +283,9 @@ $code=<<___; .long 0 .byte 0,12,0x14,0,0,0,2,0 .long 0 -.size .gcm_gmult_p8,.-.gcm_gmult_p8 +.size .gcm_gmult_p10,.-.gcm_gmult_p10 -.globl .gcm_ghash_p8 +.globl .gcm_ghash_p10 lis r0,0xfff8 li r8,0x10 mfspr $vrsave,256 @@ -350,7 +350,7 @@ Loop: .long 0 .byte 0,12,0x14,0,0,0,4,0 .long 0 -.size .gcm_ghash_p8,.-.gcm_ghash_p8 +.size .gcm_ghash_p10,.-.gcm_ghash_p10 .asciz "GHASH for PowerISA 2.07, CRYPTOGAMS by " .align 2 diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index 678b5bdc79b1f5df91372ce5c95fbde92a676918..34e14dfd8e042a679df1dd50cf6d6586aae0f099 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -205,7 +205,6 @@ extern void iommu_register_group(struct iommu_table_group *table_group, int pci_domain_number, unsigned long pe_num); extern int iommu_add_device(struct iommu_table_group *table_group, struct device *dev); -extern void iommu_del_device(struct device *dev); extern long iommu_tce_xchg(struct mm_struct *mm, struct iommu_table *tbl, unsigned long entry, unsigned long *hpa, enum dma_data_direction *direction); @@ -229,10 +228,6 @@ static inline int iommu_add_device(struct iommu_table_group *table_group, { return 0; } - -static inline void iommu_del_device(struct device *dev) -{ -} #endif /* !CONFIG_IOMMU_API */ u64 dma_iommu_get_required_mask(struct device *dev); diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c index 038ce8d9061d166b8c85663adc2fa4dab08c23f1..8920862ffd7916ef3afcde6714a1676ae6a1f647 100644 --- a/arch/powerpc/kernel/dma-iommu.c +++ b/arch/powerpc/kernel/dma-iommu.c @@ -144,7 +144,7 @@ static bool dma_iommu_bypass_supported(struct device *dev, u64 mask) /* We support DMA to/from any memory page via the iommu */ int dma_iommu_dma_supported(struct device *dev, u64 mask) { - struct iommu_table *tbl = get_iommu_table_base(dev); + struct iommu_table *tbl; if (dev_is_pci(dev) && dma_iommu_bypass_supported(dev, mask)) { /* @@ -162,6 +162,8 @@ int dma_iommu_dma_supported(struct device *dev, u64 mask) return 1; } + tbl = get_iommu_table_base(dev); + if (!tbl) { dev_err(dev, "Warning: IOMMU dma not supported: mask 0x%08llx, table unavailable\n", mask); return 0; diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 0089dd49b4cbf71a964aee0dd7ae5165853fbb70..67f0b01e6ff575d54d95532f04e70e735b3640ee 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -518,7 +518,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, /* Convert entry to a dma_addr_t */ entry += tbl->it_offset; dma_addr = entry << tbl->it_page_shift; - dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl)); + dma_addr |= (vaddr & ~IOMMU_PAGE_MASK(tbl)); DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n", npages, entry, dma_addr); @@ -905,6 +905,7 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, unsigned int order; unsigned int nio_pages, io_order; struct page *page; + int tcesize = (1 << tbl->it_page_shift); size = PAGE_ALIGN(size); order = get_order(size); @@ -931,7 +932,8 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, memset(ret, 0, size); /* Set up tces to cover the allocated range */ - nio_pages = size >> tbl->it_page_shift; + nio_pages = IOMMU_PAGE_ALIGN(size, tbl) >> tbl->it_page_shift; + io_order = get_iommu_order(size, tbl); mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, mask >> tbl->it_page_shift, io_order, 0); @@ -939,7 +941,8 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, free_pages((unsigned long)ret, order); return NULL; } - *dma_handle = mapping; + + *dma_handle = mapping | ((u64)ret & (tcesize - 1)); return ret; } @@ -950,7 +953,7 @@ void iommu_free_coherent(struct iommu_table *tbl, size_t size, unsigned int nio_pages; size = PAGE_ALIGN(size); - nio_pages = size >> tbl->it_page_shift; + nio_pages = IOMMU_PAGE_ALIGN(size, tbl) >> tbl->it_page_shift; iommu_free(tbl, dma_handle, nio_pages); size = PAGE_ALIGN(size); free_pages((unsigned long)vaddr, get_order(size)); @@ -1168,23 +1171,6 @@ int iommu_add_device(struct iommu_table_group *table_group, struct device *dev) } EXPORT_SYMBOL_GPL(iommu_add_device); -void iommu_del_device(struct device *dev) -{ - /* - * Some devices might not have IOMMU table and group - * and we needn't detach them from the associated - * IOMMU groups - */ - if (!device_iommu_mapped(dev)) { - pr_debug("iommu_tce: skipping device %s with no tbl\n", - dev_name(dev)); - return; - } - - iommu_group_remove_device(dev); -} -EXPORT_SYMBOL_GPL(iommu_del_device); - /* * A simple iommu_table_group_ops which only allows reusing the existing * iommu_table. This handles VFIO for POWER7 or the nested KVM. diff --git a/arch/powerpc/kernel/isa-bridge.c b/arch/powerpc/kernel/isa-bridge.c index 85bdd7d3652f84222f2cb6cdcc0be1d4a9664099..48e0eaf1ad61559a374fe83b33c65aee6ef35e7f 100644 --- a/arch/powerpc/kernel/isa-bridge.c +++ b/arch/powerpc/kernel/isa-bridge.c @@ -93,11 +93,12 @@ static int process_ISA_OF_ranges(struct device_node *isa_node, } inval_range: - if (!phb_io_base_phys) { + if (phb_io_base_phys) { pr_err("no ISA IO ranges or unexpected isa range, mapping 64k\n"); remap_isa_base(phb_io_base_phys, 0x10000); + return 0; } - return 0; + return -EINVAL; } diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 26245aaf12b8bddb8f6082ce5b2b3c093f2e00cc..2297aa764ecdbcedbaf7958df234f5d5c4408470 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -1040,8 +1040,8 @@ void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep, pte_t entry, unsigned long address, int psize) { struct mm_struct *mm = vma->vm_mm; - unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED | - _PAGE_RW | _PAGE_EXEC); + unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_SOFT_DIRTY | + _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); unsigned long change = pte_val(entry) ^ pte_val(*ptep); /* diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c index ce804b7bf84e42c131634e7639c93096f4acfe9d..0bd4866d98241d68546ec7669fc265b93866badb 100644 --- a/arch/powerpc/mm/book3s64/radix_tlb.c +++ b/arch/powerpc/mm/book3s64/radix_tlb.c @@ -795,12 +795,20 @@ void exit_lazy_flush_tlb(struct mm_struct *mm, bool always_flush) goto out; if (current->active_mm == mm) { + unsigned long flags; + WARN_ON_ONCE(current->mm != NULL); - /* Is a kernel thread and is using mm as the lazy tlb */ + /* + * It is a kernel thread and is using mm as the lazy tlb, so + * switch it to init_mm. This is not always called from IPI + * (e.g., flush_type_needed), so must disable irqs. + */ + local_irq_save(flags); mmgrab_lazy_tlb(&init_mm); current->active_mm = &init_mm; switch_mm_irqs_off(mm, &init_mm, current); mmdrop_lazy_tlb(mm); + local_irq_restore(flags); } /* diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index e93aefcfb83f2663008833d9a845ae3a83b22556..37043dfc1adddf67b84f881fe751216db87162ec 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -101,6 +101,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) bpf_hdr = jit_data->header; proglen = jit_data->proglen; extra_pass = true; + /* During extra pass, ensure index is reset before repopulating extable entries */ + cgctx.exentry_idx = 0; goto skip_init_ctx; } diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index 0d9b7609c7d5629404594eb9bec518795aae0763..3e2e252016f7a6e66de2e85a5c6bb548a36c1688 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig @@ -265,6 +265,7 @@ config CPM2 config FSL_ULI1575 bool "ULI1575 PCIe south bridge support" depends on FSL_SOC_BOOKE || PPC_86xx + depends on PCI select FSL_PCI select GENERIC_ISA_DMA help diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index 233a50e65fceddf8d641d71c40f989df7a219144..7725492097b627ff70b6026f0c9267bf94feb9be 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -865,28 +865,3 @@ void __init pnv_pci_init(void) /* Configure IOMMU DMA hooks */ set_pci_dma_ops(&dma_iommu_ops); } - -static int pnv_tce_iommu_bus_notifier(struct notifier_block *nb, - unsigned long action, void *data) -{ - struct device *dev = data; - - switch (action) { - case BUS_NOTIFY_DEL_DEVICE: - iommu_del_device(dev); - return 0; - default: - return 0; - } -} - -static struct notifier_block pnv_tce_iommu_bus_nb = { - .notifier_call = pnv_tce_iommu_bus_notifier, -}; - -static int __init pnv_tce_iommu_bus_notifier_init(void) -{ - bus_register_notifier(&pci_bus_type, &pnv_tce_iommu_bus_nb); - return 0; -} -machine_subsys_initcall_sync(powernv, pnv_tce_iommu_bus_notifier_init); diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 7464fa6e41455385b06885e5720bfcb1a5e9dc05..d59e8a98a200826c75a1602129312b8014f5f26d 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -91,19 +91,24 @@ static struct iommu_table_group *iommu_pseries_alloc_group(int node) static void iommu_pseries_free_group(struct iommu_table_group *table_group, const char *node_name) { - struct iommu_table *tbl; - if (!table_group) return; - tbl = table_group->tables[0]; #ifdef CONFIG_IOMMU_API if (table_group->group) { iommu_group_put(table_group->group); BUG_ON(table_group->group); } #endif - iommu_tce_table_put(tbl); + + /* Default DMA window table is at index 0, while DDW at 1. SR-IOV + * adapters only have table on index 1. + */ + if (table_group->tables[0]) + iommu_tce_table_put(table_group->tables[0]); + + if (table_group->tables[1]) + iommu_tce_table_put(table_group->tables[1]); kfree(table_group); } @@ -312,13 +317,22 @@ static void tce_free_pSeriesLP(unsigned long liobn, long tcenum, long tceshift, static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages) { u64 rc; + long rpages = npages; + unsigned long limit; if (!firmware_has_feature(FW_FEATURE_STUFF_TCE)) return tce_free_pSeriesLP(tbl->it_index, tcenum, tbl->it_page_shift, npages); - rc = plpar_tce_stuff((u64)tbl->it_index, - (u64)tcenum << tbl->it_page_shift, 0, npages); + do { + limit = min_t(unsigned long, rpages, 512); + + rc = plpar_tce_stuff((u64)tbl->it_index, + (u64)tcenum << tbl->it_page_shift, 0, limit); + + rpages -= limit; + tcenum += limit; + } while (rpages > 0 && !rc); if (rc && printk_ratelimit()) { printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n"); @@ -1695,31 +1709,6 @@ static int __init disable_multitce(char *str) __setup("multitce=", disable_multitce); -static int tce_iommu_bus_notifier(struct notifier_block *nb, - unsigned long action, void *data) -{ - struct device *dev = data; - - switch (action) { - case BUS_NOTIFY_DEL_DEVICE: - iommu_del_device(dev); - return 0; - default: - return 0; - } -} - -static struct notifier_block tce_iommu_bus_nb = { - .notifier_call = tce_iommu_bus_notifier, -}; - -static int __init tce_iommu_bus_notifier_init(void) -{ - bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb); - return 0; -} -machine_subsys_initcall_sync(pseries, tce_iommu_bus_notifier_init); - #ifdef CONFIG_SPAPR_TCE_IOMMU struct iommu_group *pSeries_pci_device_group(struct pci_controller *hose, struct pci_dev *pdev) diff --git a/arch/powerpc/purgatory/Makefile b/arch/powerpc/purgatory/Makefile index 6f5e2727963c4c18b28129d128e60371bb7f1aaf..78473d69cd2b6752e5cac6d02e7f0cc8affb829a 100644 --- a/arch/powerpc/purgatory/Makefile +++ b/arch/powerpc/purgatory/Makefile @@ -5,6 +5,11 @@ KCSAN_SANITIZE := n targets += trampoline_$(BITS).o purgatory.ro +# When profile-guided optimization is enabled, llvm emits two different +# overlapping text sections, which is not supported by kexec. Remove profile +# optimization flags. +KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%,$(KBUILD_CFLAGS)) + LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined $(obj)/purgatory.ro: $(obj)/trampoline_$(BITS).o FORCE diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 728d3c257e4a33323bdca9716dd2658548fc693a..70c4c59a1a8f47f103ba180935974b6e71f6db31 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -88,7 +88,7 @@ static unsigned long ndump = 64; static unsigned long nidump = 16; static unsigned long ncsum = 4096; static int termch; -static char tmpstr[128]; +static char tmpstr[KSYM_NAME_LEN]; static int tracing_enabled; static long bus_error_jmp[JMP_BUF_LEN]; diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 348c0fa1fc8c7a8479c42c70bdede50a8a3c5126..5966ad97c30c3d8f157dad32f4da9a5ab07790a7 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -26,6 +26,7 @@ config RISCV select ARCH_HAS_GIGANTIC_PAGE select ARCH_HAS_KCOV select ARCH_HAS_MMIOWB + select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE select ARCH_HAS_PMEM_API select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SET_DIRECT_MAP if MMU @@ -799,8 +800,11 @@ menu "Power management options" source "kernel/power/Kconfig" +# Hibernation is only possible on systems where the SBI implementation has +# marked its reserved memory as not accessible from, or does not run +# from the same memory as, Linux config ARCH_HIBERNATION_POSSIBLE - def_bool y + def_bool NONPORTABLE config ARCH_HIBERNATION_HEADER def_bool HIBERNATION diff --git a/arch/riscv/errata/Makefile b/arch/riscv/errata/Makefile index a1055965fbee6a6da0b173f9e894e79f8930dfd1..7b2637c8c332653f5c5551a88c0947f732233b23 100644 --- a/arch/riscv/errata/Makefile +++ b/arch/riscv/errata/Makefile @@ -1,2 +1,6 @@ +ifdef CONFIG_RELOCATABLE +KBUILD_CFLAGS += -fno-pie +endif + obj-$(CONFIG_ERRATA_SIFIVE) += sifive/ obj-$(CONFIG_ERRATA_THEAD) += thead/ diff --git a/arch/riscv/include/asm/hugetlb.h b/arch/riscv/include/asm/hugetlb.h index fe6f2300664162c1b0e233e1323aeb96c2d33746..ce1ebda1a49a79b41c240b033c94b2c47d90c465 100644 --- a/arch/riscv/include/asm/hugetlb.h +++ b/arch/riscv/include/asm/hugetlb.h @@ -36,6 +36,9 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte, int dirty); +#define __HAVE_ARCH_HUGE_PTEP_GET +pte_t huge_ptep_get(pte_t *ptep); + pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags); #define arch_make_huge_pte arch_make_huge_pte diff --git a/arch/riscv/include/asm/kfence.h b/arch/riscv/include/asm/kfence.h index d887a54042aa417ae7152ea41ccb46addf8aea9d..0bbffd528096d97e02e070be23ae05271a283538 100644 --- a/arch/riscv/include/asm/kfence.h +++ b/arch/riscv/include/asm/kfence.h @@ -8,41 +8,8 @@ #include #include -static inline int split_pmd_page(unsigned long addr) -{ - int i; - unsigned long pfn = PFN_DOWN(__pa((addr & PMD_MASK))); - pmd_t *pmd = pmd_off_k(addr); - pte_t *pte = pte_alloc_one_kernel(&init_mm); - - if (!pte) - return -ENOMEM; - - for (i = 0; i < PTRS_PER_PTE; i++) - set_pte(pte + i, pfn_pte(pfn + i, PAGE_KERNEL)); - set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(pte)), PAGE_TABLE)); - - flush_tlb_kernel_range(addr, addr + PMD_SIZE); - return 0; -} - static inline bool arch_kfence_init_pool(void) { - int ret; - unsigned long addr; - pmd_t *pmd; - - for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr); - addr += PAGE_SIZE) { - pmd = pmd_off_k(addr); - - if (pmd_leaf(*pmd)) { - ret = split_pmd_page(addr); - if (ret) - return false; - } - } - return true; } diff --git a/arch/riscv/include/asm/perf_event.h b/arch/riscv/include/asm/perf_event.h index d42c901f9a9770985b53da11098d69cf7934cc62..665bbc9b2f840a5272eb493d799f21dab73dfd25 100644 --- a/arch/riscv/include/asm/perf_event.h +++ b/arch/riscv/include/asm/perf_event.h @@ -10,4 +10,11 @@ #include #define perf_arch_bpf_user_pt_regs(regs) (struct user_regs_struct *)regs + +#define perf_arch_fetch_caller_regs(regs, __ip) { \ + (regs)->epc = (__ip); \ + (regs)->s0 = (unsigned long) __builtin_frame_address(0); \ + (regs)->sp = current_stack_pointer; \ + (regs)->status = SR_PP; \ +} #endif /* _ASM_RISCV_PERF_EVENT_H */ diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 2258b27173b02658209c5f260c051fe961d99fbd..75970ee2bda223be83a567adab6fa327e8e58cd1 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -165,8 +165,7 @@ extern struct pt_alloc_ops pt_ops __initdata; _PAGE_EXEC | _PAGE_WRITE) #define PAGE_COPY PAGE_READ -#define PAGE_COPY_EXEC PAGE_EXEC -#define PAGE_COPY_READ_EXEC PAGE_READ_EXEC +#define PAGE_COPY_EXEC PAGE_READ_EXEC #define PAGE_SHARED PAGE_WRITE #define PAGE_SHARED_EXEC PAGE_WRITE_EXEC diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index fbdccc21418a5b9bf0d53de537089b879a04a429..153864e4f3996dc3405a28fc024e0d76f0232028 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -23,6 +23,10 @@ ifdef CONFIG_FTRACE CFLAGS_REMOVE_alternative.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_cpufeature.o = $(CC_FLAGS_FTRACE) endif +ifdef CONFIG_RELOCATABLE +CFLAGS_alternative.o += -fno-pie +CFLAGS_cpufeature.o += -fno-pie +endif ifdef CONFIG_KASAN KASAN_SANITIZE_alternative.o := n KASAN_SANITIZE_cpufeature.o := n diff --git a/arch/riscv/kernel/probes/Makefile b/arch/riscv/kernel/probes/Makefile index c40139e9ca4781e414946bd9e58648f15c89d25a..8265ff497977cffb5559f25a2bd6adcce2eb1a60 100644 --- a/arch/riscv/kernel/probes/Makefile +++ b/arch/riscv/kernel/probes/Makefile @@ -4,3 +4,5 @@ obj-$(CONFIG_RETHOOK) += rethook.o rethook_trampoline.o obj-$(CONFIG_KPROBES_ON_FTRACE) += ftrace.o obj-$(CONFIG_UPROBES) += uprobes.o decode-insn.o simulate-insn.o CFLAGS_REMOVE_simulate-insn.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_rethook.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_rethook_trampoline.o = $(CC_FLAGS_FTRACE) diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c index a163a3e0f0d46b92565e85318c105b9cf0891c29..e0ef56dc57b90a31a9ef95072f4360019ac84ba1 100644 --- a/arch/riscv/mm/hugetlbpage.c +++ b/arch/riscv/mm/hugetlbpage.c @@ -3,6 +3,30 @@ #include #ifdef CONFIG_RISCV_ISA_SVNAPOT +pte_t huge_ptep_get(pte_t *ptep) +{ + unsigned long pte_num; + int i; + pte_t orig_pte = ptep_get(ptep); + + if (!pte_present(orig_pte) || !pte_napot(orig_pte)) + return orig_pte; + + pte_num = napot_pte_num(napot_cont_order(orig_pte)); + + for (i = 0; i < pte_num; i++, ptep++) { + pte_t pte = ptep_get(ptep); + + if (pte_dirty(pte)) + orig_pte = pte_mkdirty(orig_pte); + + if (pte_young(pte)) + orig_pte = pte_mkyoung(orig_pte); + } + + return orig_pte; +} + pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, @@ -218,6 +242,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm, { pte_t pte = ptep_get(ptep); unsigned long order; + pte_t orig_pte; int i, pte_num; if (!pte_napot(pte)) { @@ -228,9 +253,12 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm, order = napot_cont_order(pte); pte_num = napot_pte_num(order); ptep = huge_pte_offset(mm, addr, napot_cont_size(order)); + orig_pte = get_clear_contig_flush(mm, addr, ptep, pte_num); + + orig_pte = pte_wrprotect(orig_pte); for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++) - ptep_set_wrprotect(mm, addr, ptep); + set_pte_at(mm, addr, ptep, orig_pte); } pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 747e5b1ef02d35ebbc2fdaed3f6af793f0d2a5b5..4fa420faa780899b4802fc12eba567d03cd458e5 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -23,6 +23,7 @@ #ifdef CONFIG_RELOCATABLE #include #endif +#include #include #include @@ -293,7 +294,7 @@ static const pgprot_t protection_map[16] = { [VM_EXEC] = PAGE_EXEC, [VM_EXEC | VM_READ] = PAGE_READ_EXEC, [VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC, - [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_READ_EXEC, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_EXEC, [VM_SHARED] = PAGE_NONE, [VM_SHARED | VM_READ] = PAGE_READ, [VM_SHARED | VM_WRITE] = PAGE_SHARED, @@ -659,18 +660,19 @@ void __init create_pgd_mapping(pgd_t *pgdp, create_pgd_next_mapping(nextp, va, pa, sz, prot); } -static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size) +static uintptr_t __init best_map_size(phys_addr_t pa, uintptr_t va, + phys_addr_t size) { - if (!(base & (PGDIR_SIZE - 1)) && size >= PGDIR_SIZE) + if (!(pa & (PGDIR_SIZE - 1)) && !(va & (PGDIR_SIZE - 1)) && size >= PGDIR_SIZE) return PGDIR_SIZE; - if (!(base & (P4D_SIZE - 1)) && size >= P4D_SIZE) + if (!(pa & (P4D_SIZE - 1)) && !(va & (P4D_SIZE - 1)) && size >= P4D_SIZE) return P4D_SIZE; - if (!(base & (PUD_SIZE - 1)) && size >= PUD_SIZE) + if (!(pa & (PUD_SIZE - 1)) && !(va & (PUD_SIZE - 1)) && size >= PUD_SIZE) return PUD_SIZE; - if (!(base & (PMD_SIZE - 1)) && size >= PMD_SIZE) + if (!(pa & (PMD_SIZE - 1)) && !(va & (PMD_SIZE - 1)) && size >= PMD_SIZE) return PMD_SIZE; return PAGE_SIZE; @@ -922,9 +924,9 @@ static void __init create_kernel_page_table(pgd_t *pgdir, bool early) static void __init create_fdt_early_page_table(uintptr_t fix_fdt_va, uintptr_t dtb_pa) { +#ifndef CONFIG_BUILTIN_DTB uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1); -#ifndef CONFIG_BUILTIN_DTB /* Make sure the fdt fixmap address is always aligned on PMD size */ BUILD_BUG_ON(FIX_FDT % (PMD_SIZE / PAGE_SIZE)); @@ -1167,14 +1169,16 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) } static void __init create_linear_mapping_range(phys_addr_t start, - phys_addr_t end) + phys_addr_t end, + uintptr_t fixed_map_size) { phys_addr_t pa; uintptr_t va, map_size; for (pa = start; pa < end; pa += map_size) { va = (uintptr_t)__va(pa); - map_size = best_map_size(pa, end - pa); + map_size = fixed_map_size ? fixed_map_size : + best_map_size(pa, va, end - pa); create_pgd_mapping(swapper_pg_dir, va, pa, map_size, pgprot_from_va(va)); @@ -1184,6 +1188,7 @@ static void __init create_linear_mapping_range(phys_addr_t start, static void __init create_linear_mapping_page_table(void) { phys_addr_t start, end; + phys_addr_t kfence_pool __maybe_unused; u64 i; #ifdef CONFIG_STRICT_KERNEL_RWX @@ -1197,6 +1202,19 @@ static void __init create_linear_mapping_page_table(void) memblock_mark_nomap(krodata_start, krodata_size); #endif +#ifdef CONFIG_KFENCE + /* + * kfence pool must be backed by PAGE_SIZE mappings, so allocate it + * before we setup the linear mapping so that we avoid using hugepages + * for this region. + */ + kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE); + BUG_ON(!kfence_pool); + + memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE); + __kfence_pool = __va(kfence_pool); +#endif + /* Map all memory banks in the linear mapping */ for_each_mem_range(i, &start, &end) { if (start >= end) @@ -1207,17 +1225,25 @@ static void __init create_linear_mapping_page_table(void) if (end >= __pa(PAGE_OFFSET) + memory_limit) end = __pa(PAGE_OFFSET) + memory_limit; - create_linear_mapping_range(start, end); + create_linear_mapping_range(start, end, 0); } #ifdef CONFIG_STRICT_KERNEL_RWX - create_linear_mapping_range(ktext_start, ktext_start + ktext_size); + create_linear_mapping_range(ktext_start, ktext_start + ktext_size, 0); create_linear_mapping_range(krodata_start, - krodata_start + krodata_size); + krodata_start + krodata_size, 0); memblock_clear_nomap(ktext_start, ktext_size); memblock_clear_nomap(krodata_start, krodata_size); #endif + +#ifdef CONFIG_KFENCE + create_linear_mapping_range(kfence_pool, + kfence_pool + KFENCE_POOL_SIZE, + PAGE_SIZE); + + memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE); +#endif } static void __init setup_vm_final(void) diff --git a/arch/riscv/purgatory/Makefile b/arch/riscv/purgatory/Makefile index 5730797a6b402c39c96cd0360486b3548631b389..bd2e27f82532681ff124a01b28c25dd346347afe 100644 --- a/arch/riscv/purgatory/Makefile +++ b/arch/riscv/purgatory/Makefile @@ -35,6 +35,11 @@ CFLAGS_sha256.o := -D__DISABLE_EXPORTS CFLAGS_string.o := -D__DISABLE_EXPORTS CFLAGS_ctype.o := -D__DISABLE_EXPORTS +# When profile-guided optimization is enabled, llvm emits two different +# overlapping text sections, which is not supported by kexec. Remove profile +# optimization flags. +KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%,$(KBUILD_CFLAGS)) + # When linking purgatory.ro with -r unresolved symbols are not checked, # also link a purgatory.chk binary without -r to check for unresolved symbols. PURGATORY_LDFLAGS := -e purgatory_start -z nodefaultlib diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index db20c1589a98f3905d57f452ff19fc53380a45c6..6dab9c1be508b9138ef7dde50db6db2ef9ff5a3a 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -469,19 +469,11 @@ config SCHED_SMT config SCHED_MC def_bool n -config SCHED_BOOK - def_bool n - -config SCHED_DRAWER - def_bool n - config SCHED_TOPOLOGY def_bool y prompt "Topology scheduler support" select SCHED_SMT select SCHED_MC - select SCHED_BOOK - select SCHED_DRAWER help Topology scheduler support improves the CPU scheduler's decision making when dealing with machines that have multi-threading, @@ -716,7 +708,6 @@ config EADM_SCH config VFIO_CCW def_tristate n prompt "Support for VFIO-CCW subchannels" - depends on S390_CCW_IOMMU depends on VFIO select VFIO_MDEV help @@ -728,7 +719,7 @@ config VFIO_CCW config VFIO_AP def_tristate n prompt "VFIO support for AP devices" - depends on S390_AP_IOMMU && KVM + depends on KVM depends on VFIO depends on ZCRYPT select VFIO_MDEV diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index 4ccf66d29fc24b9cad9425fc0fcb9a01bfe4b3c0..be3bf03bf361850d9a0abd5cdb6644878ebec6a4 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -591,8 +591,6 @@ CONFIG_VIRTIO_BALLOON=m CONFIG_VIRTIO_INPUT=y CONFIG_VHOST_NET=m CONFIG_VHOST_VSOCK=m -CONFIG_S390_CCW_IOMMU=y -CONFIG_S390_AP_IOMMU=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y @@ -703,6 +701,7 @@ CONFIG_IMA_DEFAULT_HASH_SHA256=y CONFIG_IMA_WRITE_POLICY=y CONFIG_IMA_APPRAISE=y CONFIG_LSM="yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor" +CONFIG_INIT_STACK_NONE=y CONFIG_CRYPTO_USER=m # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set CONFIG_CRYPTO_PCRYPT=m diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig index 693297a2e89733d888c30e78e9d95f981c6db015..769c7eed8b6ac67f01ac781b6eac7721c21ce3b8 100644 --- a/arch/s390/configs/defconfig +++ b/arch/s390/configs/defconfig @@ -580,8 +580,6 @@ CONFIG_VIRTIO_BALLOON=m CONFIG_VIRTIO_INPUT=y CONFIG_VHOST_NET=m CONFIG_VHOST_VSOCK=m -CONFIG_S390_CCW_IOMMU=y -CONFIG_S390_AP_IOMMU=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y @@ -686,6 +684,7 @@ CONFIG_IMA_DEFAULT_HASH_SHA256=y CONFIG_IMA_WRITE_POLICY=y CONFIG_IMA_APPRAISE=y CONFIG_LSM="yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor" +CONFIG_INIT_STACK_NONE=y CONFIG_CRYPTO_FIPS=y CONFIG_CRYPTO_USER=m # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig index 33a232bb68af95b460eb098136fbdd23e3045751..6f68b39817effcfc5f5a9612cd52ec465630be53 100644 --- a/arch/s390/configs/zfcpdump_defconfig +++ b/arch/s390/configs/zfcpdump_defconfig @@ -67,6 +67,7 @@ CONFIG_ZFCP=y # CONFIG_MISC_FILESYSTEMS is not set # CONFIG_NETWORK_FILESYSTEMS is not set CONFIG_LSM="yama,loadpin,safesetid,integrity" +CONFIG_INIT_STACK_NONE=y # CONFIG_ZLIB_DFLTCC is not set CONFIG_XZ_DEC_MICROLZMA=y CONFIG_PRINTK_TIME=y diff --git a/arch/s390/crypto/chacha-glue.c b/arch/s390/crypto/chacha-glue.c index 7752bd314558e50ce6c0142c2472cb60297799f7..5fae187f947a08325ccb19ff792118fdcc661277 100644 --- a/arch/s390/crypto/chacha-glue.c +++ b/arch/s390/crypto/chacha-glue.c @@ -82,7 +82,7 @@ void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, * it cannot handle a block of data or less, but otherwise * it can handle data of arbitrary size */ - if (bytes <= CHACHA_BLOCK_SIZE || nrounds != 20) + if (bytes <= CHACHA_BLOCK_SIZE || nrounds != 20 || !MACHINE_HAS_VX) chacha_crypt_generic(state, dst, src, bytes, nrounds); else chacha20_crypt_s390(state, dst, src, bytes, diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h index a386070f1d5653a97bf2aa075b21c23e83eac436..3cb9d813f022b9b8e565bca04f9ec1e805452e2f 100644 --- a/arch/s390/include/asm/compat.h +++ b/arch/s390/include/asm/compat.h @@ -112,7 +112,7 @@ struct compat_statfs64 { u32 f_namelen; u32 f_frsize; u32 f_flags; - u32 f_spare[4]; + u32 f_spare[5]; }; /* diff --git a/arch/s390/include/uapi/asm/statfs.h b/arch/s390/include/uapi/asm/statfs.h index 72604f7792c33642aed1907bb0a69effd8b06991..f85b50723dd355f04a66e3ef5ac07bf89de0c6ff 100644 --- a/arch/s390/include/uapi/asm/statfs.h +++ b/arch/s390/include/uapi/asm/statfs.h @@ -30,7 +30,7 @@ struct statfs { unsigned int f_namelen; unsigned int f_frsize; unsigned int f_flags; - unsigned int f_spare[4]; + unsigned int f_spare[5]; }; struct statfs64 { @@ -45,7 +45,7 @@ struct statfs64 { unsigned int f_namelen; unsigned int f_frsize; unsigned int f_flags; - unsigned int f_spare[4]; + unsigned int f_spare[5]; }; #endif diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 8983837b3565914b19451546d7b5a1ff61511c95..6b2a051e1f8a2a7fec822ab5b287a9b25fd33a84 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -10,6 +10,7 @@ CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) # Do not trace early setup code CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_rethook.o = $(CC_FLAGS_FTRACE) endif diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 43de939b7af199757646a3ea82347897f6dd9980..f44f70de966115c64aa6286afb9bfe06f2543dc6 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -1935,14 +1935,13 @@ static struct shutdown_action __refdata dump_action = { static void dump_reipl_run(struct shutdown_trigger *trigger) { - unsigned long ipib = (unsigned long) reipl_block_actual; struct lowcore *abs_lc; unsigned int csum; csum = (__force unsigned int) csum_partial(reipl_block_actual, reipl_block_actual->hdr.len, 0); abs_lc = get_abs_lowcore(); - abs_lc->ipib = ipib; + abs_lc->ipib = __pa(reipl_block_actual); abs_lc->ipib_checksum = csum; put_abs_lowcore(abs_lc); dump_run(trigger); diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 9fd19530c9a5623d5901abb1dd9e2032238203ba..68adf1de8888b25fbfaea0a45537e8cd4fb21c2f 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -95,7 +95,7 @@ out: static void cpu_thread_map(cpumask_t *dst, unsigned int cpu) { static cpumask_t mask; - int i; + unsigned int max_cpu; cpumask_clear(&mask); if (!cpumask_test_cpu(cpu, &cpu_setup_mask)) @@ -104,9 +104,10 @@ static void cpu_thread_map(cpumask_t *dst, unsigned int cpu) if (topology_mode != TOPOLOGY_MODE_HW) goto out; cpu -= cpu % (smp_cpu_mtid + 1); - for (i = 0; i <= smp_cpu_mtid; i++) { - if (cpumask_test_cpu(cpu + i, &cpu_setup_mask)) - cpumask_set_cpu(cpu + i, &mask); + max_cpu = min(cpu + smp_cpu_mtid, nr_cpu_ids - 1); + for (; cpu <= max_cpu; cpu++) { + if (cpumask_test_cpu(cpu, &cpu_setup_mask)) + cpumask_set_cpu(cpu, &mask); } out: cpumask_copy(dst, &mask); @@ -123,25 +124,26 @@ static void add_cpus_to_mask(struct topology_core *tl_core, unsigned int core; for_each_set_bit(core, &tl_core->mask, TOPOLOGY_CORE_BITS) { - unsigned int rcore; - int lcpu, i; + unsigned int max_cpu, rcore; + int cpu; rcore = TOPOLOGY_CORE_BITS - 1 - core + tl_core->origin; - lcpu = smp_find_processor_id(rcore << smp_cpu_mt_shift); - if (lcpu < 0) + cpu = smp_find_processor_id(rcore << smp_cpu_mt_shift); + if (cpu < 0) continue; - for (i = 0; i <= smp_cpu_mtid; i++) { - topo = &cpu_topology[lcpu + i]; + max_cpu = min(cpu + smp_cpu_mtid, nr_cpu_ids - 1); + for (; cpu <= max_cpu; cpu++) { + topo = &cpu_topology[cpu]; topo->drawer_id = drawer->id; topo->book_id = book->id; topo->socket_id = socket->id; topo->core_id = rcore; - topo->thread_id = lcpu + i; + topo->thread_id = cpu; topo->dedicated = tl_core->d; - cpumask_set_cpu(lcpu + i, &drawer->mask); - cpumask_set_cpu(lcpu + i, &book->mask); - cpumask_set_cpu(lcpu + i, &socket->mask); - smp_cpu_set_polarization(lcpu + i, tl_core->pp); + cpumask_set_cpu(cpu, &drawer->mask); + cpumask_set_cpu(cpu, &book->mask); + cpumask_set_cpu(cpu, &socket->mask); + smp_cpu_set_polarization(cpu, tl_core->pp); } } } diff --git a/arch/s390/purgatory/Makefile b/arch/s390/purgatory/Makefile index 32573b4f9bd20b13eba7bb51220e344dcf238178..cc8cf5abea158c63449fb130bd3f369a10884e5d 100644 --- a/arch/s390/purgatory/Makefile +++ b/arch/s390/purgatory/Makefile @@ -26,6 +26,7 @@ KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding KBUILD_CFLAGS += -Os -m64 -msoft-float -fno-common KBUILD_CFLAGS += -fno-stack-protector +KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING KBUILD_CFLAGS += $(CLANG_FLAGS) KBUILD_CFLAGS += $(call cc-option,-fno-PIE) KBUILD_AFLAGS := $(filter-out -DCC_USING_EXPOLINE,$(KBUILD_AFLAGS)) diff --git a/arch/um/drivers/Makefile b/arch/um/drivers/Makefile index dee6f66353b334c6b55de0097409f2ce11cb7a26..a461a950f0518d55fc5ad95175520c6b45e8efe4 100644 --- a/arch/um/drivers/Makefile +++ b/arch/um/drivers/Makefile @@ -16,7 +16,8 @@ mconsole-objs := mconsole_kern.o mconsole_user.o hostaudio-objs := hostaudio_kern.o ubd-objs := ubd_kern.o ubd_user.o port-objs := port_kern.o port_user.o -harddog-objs := harddog_kern.o harddog_user.o +harddog-objs := harddog_kern.o +harddog-builtin-$(CONFIG_UML_WATCHDOG) := harddog_user.o harddog_user_exp.o rtc-objs := rtc_kern.o rtc_user.o LDFLAGS_pcap.o = $(shell $(CC) $(KBUILD_CFLAGS) -print-file-name=libpcap.a) @@ -60,6 +61,7 @@ obj-$(CONFIG_PTY_CHAN) += pty.o obj-$(CONFIG_TTY_CHAN) += tty.o obj-$(CONFIG_XTERM_CHAN) += xterm.o xterm_kern.o obj-$(CONFIG_UML_WATCHDOG) += harddog.o +obj-y += $(harddog-builtin-y) $(harddog-builtin-m) obj-$(CONFIG_BLK_DEV_COW_COMMON) += cow_user.o obj-$(CONFIG_UML_RANDOM) += random.o obj-$(CONFIG_VIRTIO_UML) += virtio_uml.o diff --git a/arch/um/drivers/harddog.h b/arch/um/drivers/harddog.h new file mode 100644 index 0000000000000000000000000000000000000000..6d9ea60e7133e8598ae732888ad5bf77ea944458 --- /dev/null +++ b/arch/um/drivers/harddog.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef UM_WATCHDOG_H +#define UM_WATCHDOG_H + +int start_watchdog(int *in_fd_ret, int *out_fd_ret, char *sock); +void stop_watchdog(int in_fd, int out_fd); +int ping_watchdog(int fd); + +#endif /* UM_WATCHDOG_H */ diff --git a/arch/um/drivers/harddog_kern.c b/arch/um/drivers/harddog_kern.c index e6d4f43deba82a77e20eaf15ea81639f0ddb37d1..60d1c6cab8a95197e2c14cc789ca5a8f9fa47496 100644 --- a/arch/um/drivers/harddog_kern.c +++ b/arch/um/drivers/harddog_kern.c @@ -47,6 +47,7 @@ #include #include #include "mconsole.h" +#include "harddog.h" MODULE_LICENSE("GPL"); @@ -60,8 +61,6 @@ static int harddog_out_fd = -1; * Allow only one person to hold it open */ -extern int start_watchdog(int *in_fd_ret, int *out_fd_ret, char *sock); - static int harddog_open(struct inode *inode, struct file *file) { int err = -EBUSY; @@ -92,8 +91,6 @@ err: return err; } -extern void stop_watchdog(int in_fd, int out_fd); - static int harddog_release(struct inode *inode, struct file *file) { /* @@ -112,8 +109,6 @@ static int harddog_release(struct inode *inode, struct file *file) return 0; } -extern int ping_watchdog(int fd); - static ssize_t harddog_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { diff --git a/arch/um/drivers/harddog_user.c b/arch/um/drivers/harddog_user.c index 070468d22e3943f4d047aa8917776ee7f87b43db..9ed89304975ed351b264d5c068b70fd0094f44b2 100644 --- a/arch/um/drivers/harddog_user.c +++ b/arch/um/drivers/harddog_user.c @@ -7,6 +7,7 @@ #include #include #include +#include "harddog.h" struct dog_data { int stdin_fd; diff --git a/arch/um/drivers/harddog_user_exp.c b/arch/um/drivers/harddog_user_exp.c new file mode 100644 index 0000000000000000000000000000000000000000..c74d4b815d143d27e96f13672fe073faa5608cd0 --- /dev/null +++ b/arch/um/drivers/harddog_user_exp.c @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include "harddog.h" + +#if IS_MODULE(CONFIG_UML_WATCHDOG) +EXPORT_SYMBOL(start_watchdog); +EXPORT_SYMBOL(stop_watchdog); +EXPORT_SYMBOL(ping_watchdog); +#endif diff --git a/arch/x86/Makefile b/arch/x86/Makefile index b39975977c037c03acc0cdb1702bd7abbc47e535..fdc2e3abd6152f53530f687a3f9041c020065609 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -305,6 +305,18 @@ ifeq ($(RETPOLINE_CFLAGS),) endif endif +ifdef CONFIG_UNWINDER_ORC +orc_hash_h := arch/$(SRCARCH)/include/generated/asm/orc_hash.h +orc_hash_sh := $(srctree)/scripts/orc_hash.sh +targets += $(orc_hash_h) +quiet_cmd_orc_hash = GEN $@ + cmd_orc_hash = mkdir -p $(dir $@); \ + $(CONFIG_SHELL) $(orc_hash_sh) < $< > $@ +$(orc_hash_h): $(srctree)/arch/x86/include/asm/orc_types.h $(orc_hash_sh) FORCE + $(call if_changed,orc_hash) +archprepare: $(orc_hash_h) +endif + archclean: $(Q)rm -rf $(objtree)/arch/i386 $(Q)rm -rf $(objtree)/arch/x86_64 diff --git a/arch/x86/crypto/aria-aesni-avx-asm_64.S b/arch/x86/crypto/aria-aesni-avx-asm_64.S index 7c1abc513f34621eff4d0ee72db6a6c3dbb291f5..9556dacd984154a2b6ede8808321b8c6c692f347 100644 --- a/arch/x86/crypto/aria-aesni-avx-asm_64.S +++ b/arch/x86/crypto/aria-aesni-avx-asm_64.S @@ -773,8 +773,6 @@ .octa 0x3F893781E95FE1576CDA64D2BA0CB204 #ifdef CONFIG_AS_GFNI -.section .rodata.cst8, "aM", @progbits, 8 -.align 8 /* AES affine: */ #define tf_aff_const BV8(1, 1, 0, 0, 0, 1, 1, 0) .Ltf_aff_bitmatrix: diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 070cc4ef267210b4dc7a490f43cad45d5d3ab5c7..27f3a7b34bd52821def0a7d47b32fd059bd818b4 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -349,6 +349,16 @@ static struct event_constraint intel_spr_event_constraints[] = { EVENT_CONSTRAINT_END }; +static struct extra_reg intel_gnr_extra_regs[] __read_mostly = { + INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), + INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), + INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), + INTEL_UEVENT_EXTRA_REG(0x02c6, MSR_PEBS_FRONTEND, 0x9, FE), + INTEL_UEVENT_EXTRA_REG(0x03c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE), + INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE), + INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE), + EVENT_EXTRA_END +}; EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3"); EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3"); @@ -4074,7 +4084,7 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data) if (x86_pmu.intel_cap.pebs_baseline) { arr[(*nr)++] = (struct perf_guest_switch_msr){ .msr = MSR_PEBS_DATA_CFG, - .host = cpuc->pebs_data_cfg, + .host = cpuc->active_pebs_data_cfg, .guest = kvm_pmu->pebs_data_cfg, }; } @@ -6496,6 +6506,7 @@ __init int intel_pmu_init(void) case INTEL_FAM6_SAPPHIRERAPIDS_X: case INTEL_FAM6_EMERALDRAPIDS_X: x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX; + x86_pmu.extra_regs = intel_spr_extra_regs; fallthrough; case INTEL_FAM6_GRANITERAPIDS_X: case INTEL_FAM6_GRANITERAPIDS_D: @@ -6506,7 +6517,8 @@ __init int intel_pmu_init(void) x86_pmu.event_constraints = intel_spr_event_constraints; x86_pmu.pebs_constraints = intel_spr_pebs_event_constraints; - x86_pmu.extra_regs = intel_spr_extra_regs; + if (!x86_pmu.extra_regs) + x86_pmu.extra_regs = intel_gnr_extra_regs; x86_pmu.limit_period = spr_limit_period; x86_pmu.pebs_ept = 1; x86_pmu.pebs_aliases = NULL; @@ -6650,6 +6662,7 @@ __init int intel_pmu_init(void) pmu->pebs_constraints = intel_grt_pebs_event_constraints; pmu->extra_regs = intel_grt_extra_regs; if (is_mtl(boot_cpu_data.x86_model)) { + x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX].extra_regs = intel_gnr_extra_regs; x86_pmu.pebs_latency_data = mtl_latency_data_small; extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr; diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index fa9b209a11fabce2f57d585e9e08219182d947dd..d49e90dc04a4ccac259485982384f994c728c122 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -6150,6 +6150,7 @@ static struct intel_uncore_type spr_uncore_mdf = { }; #define UNCORE_SPR_NUM_UNCORE_TYPES 12 +#define UNCORE_SPR_CHA 0 #define UNCORE_SPR_IIO 1 #define UNCORE_SPR_IMC 6 #define UNCORE_SPR_UPI 8 @@ -6460,12 +6461,22 @@ static int uncore_type_max_boxes(struct intel_uncore_type **types, return max + 1; } +#define SPR_MSR_UNC_CBO_CONFIG 0x2FFE + void spr_uncore_cpu_init(void) { + struct intel_uncore_type *type; + u64 num_cbo; + uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR, UNCORE_SPR_MSR_EXTRA_UNCORES, spr_msr_uncores); + type = uncore_find_type_by_id(uncore_msr_uncores, UNCORE_SPR_CHA); + if (type) { + rdmsrl(SPR_MSR_UNC_CBO_CONFIG, num_cbo); + type->num_boxes = num_cbo; + } spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(uncore_msr_uncores, UNCORE_SPR_IIO); } diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index a5f9474f08e123fa6c703e29ecfe2049763fd2cc..6c04b52f139b5e54ec7e3363cdd3eba3cc3a16b8 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c @@ -416,7 +416,7 @@ void __init hyperv_init(void) goto free_vp_assist_page; } - cpuhp = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/hyperv_init:online", + cpuhp = cpuhp_setup_state(CPUHP_AP_HYPERV_ONLINE, "x86/hyperv_init:online", hv_cpu_init, hv_cpu_die); if (cpuhp < 0) goto free_ghcb_page; diff --git a/arch/x86/hyperv/hv_vtl.c b/arch/x86/hyperv/hv_vtl.c index 1ba5d3b99b1630bc373fe2f44cd9ffdae926b873..85d38b9f35861b65eafe3b4323bfae2f99e5fab3 100644 --- a/arch/x86/hyperv/hv_vtl.c +++ b/arch/x86/hyperv/hv_vtl.c @@ -20,6 +20,8 @@ void __init hv_vtl_init_platform(void) { pr_info("Linux runs in Hyper-V Virtual Trust Level\n"); + x86_platform.realmode_reserve = x86_init_noop; + x86_platform.realmode_init = x86_init_noop; x86_init.irqs.pre_vector_init = x86_init_noop; x86_init.timers.timer_init = x86_init_noop; diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild index 1e51650b79d7c87a9497ab79d9906ebae1ff83d1..4f1ce5fc4e194fc03ae6e36c03253f5f95ac1f95 100644 --- a/arch/x86/include/asm/Kbuild +++ b/arch/x86/include/asm/Kbuild @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 +generated-y += orc_hash.h generated-y += syscalls_32.h generated-y += syscalls_64.h generated-y += syscalls_x32.h diff --git a/arch/x86/include/asm/fpu/sched.h b/arch/x86/include/asm/fpu/sched.h index c2d6cd78ed0c2986181217a184e8d52f7e72c534..78fcde7b1f0700ee1e12cba4871edf7d3efed5f1 100644 --- a/arch/x86/include/asm/fpu/sched.h +++ b/arch/x86/include/asm/fpu/sched.h @@ -39,7 +39,7 @@ extern void fpu_flush_thread(void); static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu) { if (cpu_feature_enabled(X86_FEATURE_FPU) && - !(current->flags & (PF_KTHREAD | PF_IO_WORKER))) { + !(current->flags & (PF_KTHREAD | PF_USER_WORKER))) { save_fpregs_to_fpstate(old_fpu); /* * The save operation preserved register state, so the diff --git a/arch/x86/include/asm/orc_header.h b/arch/x86/include/asm/orc_header.h new file mode 100644 index 0000000000000000000000000000000000000000..07bacf3e160eaceecbcb5a49ae70c5bce804847a --- /dev/null +++ b/arch/x86/include/asm/orc_header.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#ifndef _ORC_HEADER_H +#define _ORC_HEADER_H + +#include +#include +#include + +/* + * The header is currently a 20-byte hash of the ORC entry definition; see + * scripts/orc_hash.sh. + */ +#define ORC_HEADER \ + __used __section(".orc_header") __aligned(4) \ + static const u8 orc_header[] = { ORC_HASH } + +#endif /* _ORC_HEADER_H */ diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 498dc600bd5c8ef55a4de29ecb87396d4d192859..0d02c4aafa6f49b2e981ac975610095844a815d3 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -13,7 +13,9 @@ #include +#include #include + #include #include diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index dd61752f4c966bf0db57abb92472f8c4ae4c0c82..4070a01c11b72af04f429dee966c495275a7dfb6 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -17,6 +17,7 @@ CFLAGS_REMOVE_ftrace.o = -pg CFLAGS_REMOVE_early_printk.o = -pg CFLAGS_REMOVE_head64.o = -pg CFLAGS_REMOVE_sev.o = -pg +CFLAGS_REMOVE_rethook.o = -pg endif KASAN_SANITIZE_head$(BITS).o := n diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index 6bde05a86b4edde5b0ea91b4f088424a97c4a216..896bc41cb2ba7fe6ab89377297e37c4954e39895 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c @@ -97,7 +97,10 @@ static void init_x2apic_ldr(void) static int x2apic_phys_probe(void) { - if (x2apic_mode && (x2apic_phys || x2apic_fadt_phys())) + if (!x2apic_mode) + return 0; + + if (x2apic_phys || x2apic_fadt_phys()) return 1; return apic == &apic_x2apic_phys; diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c index 5e868b62a7c4e4c466fadd8f858e004a9ba76e27..0270925fe013bcf9bbe76e589c93a35a6ddbfde5 100644 --- a/arch/x86/kernel/cpu/topology.c +++ b/arch/x86/kernel/cpu/topology.c @@ -79,7 +79,7 @@ int detect_extended_topology_early(struct cpuinfo_x86 *c) * initial apic id, which also represents 32-bit extended x2apic id. */ c->initial_apicid = edx; - smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx); + smp_num_siblings = max_t(int, smp_num_siblings, LEVEL_MAX_SIBLINGS(ebx)); #endif return 0; } @@ -109,7 +109,8 @@ int detect_extended_topology(struct cpuinfo_x86 *c) */ cpuid_count(leaf, SMT_LEVEL, &eax, &ebx, &ecx, &edx); c->initial_apicid = edx; - core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx); + core_level_siblings = LEVEL_MAX_SIBLINGS(ebx); + smp_num_siblings = max_t(int, smp_num_siblings, LEVEL_MAX_SIBLINGS(ebx)); core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); die_level_siblings = LEVEL_MAX_SIBLINGS(ebx); pkg_mask_width = die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 0bf6779187dda158a8deba21e14215d2b0cbddb7..f18ca44c904b747e68a3fdf0f7926a7025da71b7 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -195,7 +195,6 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, printk("%sCall Trace:\n", log_lvl); unwind_start(&state, task, regs, stack); - stack = stack ? : get_stack_pointer(task, regs); regs = unwind_get_entry_regs(&state, &partial); /* @@ -214,9 +213,13 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, * - hardirq stack * - entry stack */ - for ( ; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) { + for (stack = stack ?: get_stack_pointer(task, regs); + stack; + stack = stack_info.next_sp) { const char *stack_name; + stack = PTR_ALIGN(stack, sizeof(long)); + if (get_stack_info(stack, task, &stack_info, &visit_mask)) { /* * We weren't on a valid stack. It's possible that diff --git a/arch/x86/kernel/fpu/context.h b/arch/x86/kernel/fpu/context.h index 9fcfa5c4dad799c99cafff1a0b6c4b9ad958af74..af5cbdd9bd29a2b496d349a439071b75b88f8d61 100644 --- a/arch/x86/kernel/fpu/context.h +++ b/arch/x86/kernel/fpu/context.h @@ -57,7 +57,7 @@ static inline void fpregs_restore_userregs(void) struct fpu *fpu = ¤t->thread.fpu; int cpu = smp_processor_id(); - if (WARN_ON_ONCE(current->flags & (PF_KTHREAD | PF_IO_WORKER))) + if (WARN_ON_ONCE(current->flags & (PF_KTHREAD | PF_USER_WORKER))) return; if (!fpregs_state_valid(fpu, cpu)) { diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index caf33486dc5ee5b39bc4bfb1032d233cc1f0e09d..1015af1ae562bff33925f2e3266517d8c365296c 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -426,7 +426,7 @@ void kernel_fpu_begin_mask(unsigned int kfpu_mask) this_cpu_write(in_kernel_fpu, true); - if (!(current->flags & (PF_KTHREAD | PF_IO_WORKER)) && + if (!(current->flags & (PF_KTHREAD | PF_USER_WORKER)) && !test_thread_flag(TIF_NEED_FPU_LOAD)) { set_thread_flag(TIF_NEED_FPU_LOAD); save_fpregs_to_fpstate(¤t->thread.fpu); diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index a5df3e994f04f10f2c9fb814bf6f952a6452ae7a..113c13376e5125c05212dc1808ff44c9bf9df43c 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -77,6 +77,15 @@ SYM_CODE_START_NOALIGN(startup_64) call startup_64_setup_env popq %rsi + /* Now switch to __KERNEL_CS so IRET works reliably */ + pushq $__KERNEL_CS + leaq .Lon_kernel_cs(%rip), %rax + pushq %rax + lretq + +.Lon_kernel_cs: + UNWIND_HINT_END_OF_STACK + #ifdef CONFIG_AMD_MEM_ENCRYPT /* * Activate SEV/SME memory encryption if supported/enabled. This needs to @@ -90,15 +99,6 @@ SYM_CODE_START_NOALIGN(startup_64) popq %rsi #endif - /* Now switch to __KERNEL_CS so IRET works reliably */ - pushq $__KERNEL_CS - leaq .Lon_kernel_cs(%rip), %rax - pushq %rax - lretq - -.Lon_kernel_cs: - UNWIND_HINT_END_OF_STACK - /* Sanitize CPU configuration */ call verify_cpu diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c index 3ac50b7298d15a82114663effa26cf8c13d99a8a..4d8e518365f44dd92529c35b0e727ae3c35c52ed 100644 --- a/arch/x86/kernel/unwind_orc.c +++ b/arch/x86/kernel/unwind_orc.c @@ -7,6 +7,9 @@ #include #include #include +#include + +ORC_HEADER; #define orc_warn(fmt, ...) \ printk_deferred_once(KERN_WARNING "WARNING: " fmt, ##__VA_ARGS__) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 123bf8b97a4b21098a8271320f711cecf2eae5ec..0c9660a07b233ff404147fa6acda76668241f7ee 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -253,7 +253,6 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e int nent) { struct kvm_cpuid_entry2 *best; - u64 guest_supported_xcr0 = cpuid_get_supported_xcr0(entries, nent); best = cpuid_entry2_find(entries, nent, 1, KVM_CPUID_INDEX_NOT_SIGNIFICANT); if (best) { @@ -292,21 +291,6 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e vcpu->arch.ia32_misc_enable_msr & MSR_IA32_MISC_ENABLE_MWAIT); } - - /* - * Bits 127:0 of the allowed SECS.ATTRIBUTES (CPUID.0x12.0x1) enumerate - * the supported XSAVE Feature Request Mask (XFRM), i.e. the enclave's - * requested XCR0 value. The enclave's XFRM must be a subset of XCRO - * at the time of EENTER, thus adjust the allowed XFRM by the guest's - * supported XCR0. Similar to XCR0 handling, FP and SSE are forced to - * '1' even on CPUs that don't support XSAVE. - */ - best = cpuid_entry2_find(entries, nent, 0x12, 0x1); - if (best) { - best->ecx &= guest_supported_xcr0 & 0xffffffff; - best->edx &= guest_supported_xcr0 >> 32; - best->ecx |= XFEATURE_MASK_FPSSE; - } } void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index e542cf285b51842099332f594f4836836bda56d4..3c300a196bdf945ea7354cc370b4cd0aad515c61 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -228,6 +228,23 @@ static int kvm_recalculate_phys_map(struct kvm_apic_map *new, u32 xapic_id = kvm_xapic_id(apic); u32 physical_id; + /* + * For simplicity, KVM always allocates enough space for all possible + * xAPIC IDs. Yell, but don't kill the VM, as KVM can continue on + * without the optimized map. + */ + if (WARN_ON_ONCE(xapic_id > new->max_apic_id)) + return -EINVAL; + + /* + * Bail if a vCPU was added and/or enabled its APIC between allocating + * the map and doing the actual calculations for the map. Note, KVM + * hardcodes the x2APIC ID to vcpu_id, i.e. there's no TOCTOU bug if + * the compiler decides to reload x2apic_id after this check. + */ + if (x2apic_id > new->max_apic_id) + return -E2BIG; + /* * Deliberately truncate the vCPU ID when detecting a mismatched APIC * ID to avoid false positives if the vCPU ID, i.e. x2APIC ID, is a @@ -253,8 +270,7 @@ static int kvm_recalculate_phys_map(struct kvm_apic_map *new, */ if (vcpu->kvm->arch.x2apic_format) { /* See also kvm_apic_match_physical_addr(). */ - if ((apic_x2apic_mode(apic) || x2apic_id > 0xff) && - x2apic_id <= new->max_apic_id) + if (apic_x2apic_mode(apic) || x2apic_id > 0xff) new->phys_map[x2apic_id] = apic; if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id]) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index c8961f45e3b1c1c01108f2830da0aad9624601b2..6eaa3d6994aebea7ecc344626adce9d0d062a31b 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -7091,7 +7091,10 @@ static void kvm_recover_nx_huge_pages(struct kvm *kvm) */ slot = NULL; if (atomic_read(&kvm->nr_memslots_dirty_logging)) { - slot = gfn_to_memslot(kvm, sp->gfn); + struct kvm_memslots *slots; + + slots = kvm_memslots_for_spte_role(kvm, sp->role); + slot = __gfn_to_memslot(slots, sp->gfn); WARN_ON_ONCE(!slot); } diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index ca32389f3c36bae9d4c2d29c7d810fde12bb82ff..54089f990c8f83292eecede362b19269c8756900 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3510,7 +3510,7 @@ static bool svm_is_vnmi_pending(struct kvm_vcpu *vcpu) if (!is_vnmi_enabled(svm)) return false; - return !!(svm->vmcb->control.int_ctl & V_NMI_BLOCKING_MASK); + return !!(svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK); } static bool svm_set_vnmi_pending(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c index 0574030b071fbfea9ed7943da13c5d32942b4b14..2261b684a7d4475ab78107114aa5cb8dd5b2179e 100644 --- a/arch/x86/kvm/vmx/sgx.c +++ b/arch/x86/kvm/vmx/sgx.c @@ -170,12 +170,19 @@ static int __handle_encls_ecreate(struct kvm_vcpu *vcpu, return 1; } - /* Enforce CPUID restrictions on MISCSELECT, ATTRIBUTES and XFRM. */ + /* + * Enforce CPUID restrictions on MISCSELECT, ATTRIBUTES and XFRM. Note + * that the allowed XFRM (XFeature Request Mask) isn't strictly bound + * by the supported XCR0. FP+SSE *must* be set in XFRM, even if XSAVE + * is unsupported, i.e. even if XCR0 itself is completely unsupported. + */ if ((u32)miscselect & ~sgx_12_0->ebx || (u32)attributes & ~sgx_12_1->eax || (u32)(attributes >> 32) & ~sgx_12_1->ebx || (u32)xfrm & ~sgx_12_1->ecx || - (u32)(xfrm >> 32) & ~sgx_12_1->edx) { + (u32)(xfrm >> 32) & ~sgx_12_1->edx || + xfrm & ~(vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE) || + (xfrm & XFEATURE_MASK_FPSSE) != XFEATURE_MASK_FPSSE) { kvm_inject_gp(vcpu, 0); return 1; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ceb7c5e9cf9e9235a63cee5f909ba2a0ae41d5b3..04b57a336b34e5fc5224c47e5e8f23f794e98729 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1446,7 +1446,7 @@ static const u32 msrs_to_save_base[] = { #endif MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA, MSR_IA32_FEAT_CTL, MSR_IA32_BNDCFGS, MSR_TSC_AUX, - MSR_IA32_SPEC_CTRL, + MSR_IA32_SPEC_CTRL, MSR_IA32_TSX_CTRL, MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH, MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK, MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B, @@ -7155,6 +7155,10 @@ static void kvm_probe_msr_to_save(u32 msr_index) if (!kvm_cpu_cap_has(X86_FEATURE_XFD)) return; break; + case MSR_IA32_TSX_CTRL: + if (!(kvm_get_arch_capabilities() & ARCH_CAP_TSX_CTRL_MSR)) + return; + break; default: break; } @@ -10754,6 +10758,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED; break; } + + /* Note, VM-Exits that go down the "slow" path are accounted below. */ + ++vcpu->stat.exits; } /* diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index 4fc5c2de2de467c689ddf91eb1684eeb00aeb9db..01c5de4c279b8fe8cf0fcd914f4cb4ec31945bc8 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -7,6 +7,8 @@ */ #include +#include +#include #include #include @@ -29,7 +31,7 @@ */ SYM_FUNC_START(rep_movs_alternative) cmpq $64,%rcx - jae .Lunrolled + jae .Llarge cmp $8,%ecx jae .Lword @@ -65,6 +67,12 @@ SYM_FUNC_START(rep_movs_alternative) _ASM_EXTABLE_UA( 2b, .Lcopy_user_tail) _ASM_EXTABLE_UA( 3b, .Lcopy_user_tail) +.Llarge: +0: ALTERNATIVE "jmp .Lunrolled", "rep movsb", X86_FEATURE_ERMS +1: RET + + _ASM_EXTABLE_UA( 0b, 1b) + .p2align 4 .Lunrolled: 10: movq (%rsi),%r8 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 3cdac0f0055deef186eb5a84d751f0ecb7058b9d..8192452d1d2d3569dddfb109e74e6b83d8b5570d 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -9,6 +9,7 @@ #include #include +#include #include #include #include @@ -261,6 +262,24 @@ static void __init probe_page_size_mask(void) } } +#define INTEL_MATCH(_model) { .vendor = X86_VENDOR_INTEL, \ + .family = 6, \ + .model = _model, \ + } +/* + * INVLPG may not properly flush Global entries + * on these CPUs when PCIDs are enabled. + */ +static const struct x86_cpu_id invlpg_miss_ids[] = { + INTEL_MATCH(INTEL_FAM6_ALDERLAKE ), + INTEL_MATCH(INTEL_FAM6_ALDERLAKE_L ), + INTEL_MATCH(INTEL_FAM6_ALDERLAKE_N ), + INTEL_MATCH(INTEL_FAM6_RAPTORLAKE ), + INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_P), + INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_S), + {} +}; + static void setup_pcid(void) { if (!IS_ENABLED(CONFIG_X86_64)) @@ -269,6 +288,12 @@ static void setup_pcid(void) if (!boot_cpu_has(X86_FEATURE_PCID)) return; + if (x86_match_cpu(invlpg_miss_ids)) { + pr_info("Incomplete global flushes, disabling PCID"); + setup_clear_cpu_cap(X86_FEATURE_PCID); + return; + } + if (boot_cpu_has(X86_FEATURE_PGE)) { /* * This can't be cr4_set_bits_and_update_boot() -- the diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c index 557f0fe25dff47641f03a86e5166a03f19627901..37db264866b6489ef99325d2fe1604d6cd3c9885 100644 --- a/arch/x86/mm/kaslr.c +++ b/arch/x86/mm/kaslr.c @@ -172,10 +172,10 @@ void __meminit init_trampoline_kaslr(void) set_p4d(p4d_tramp, __p4d(_KERNPG_TABLE | __pa(pud_page_tramp))); - set_pgd(&trampoline_pgd_entry, - __pgd(_KERNPG_TABLE | __pa(p4d_page_tramp))); + trampoline_pgd_entry = + __pgd(_KERNPG_TABLE | __pa(p4d_page_tramp)); } else { - set_pgd(&trampoline_pgd_entry, - __pgd(_KERNPG_TABLE | __pa(pud_page_tramp))); + trampoline_pgd_entry = + __pgd(_KERNPG_TABLE | __pa(pud_page_tramp)); } } diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 1056bbf55b172c4e1ad1992be7b53b34ceef9156..438adb695daab16cb60cc9239ce8107796a836d7 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -2570,7 +2570,7 @@ out_image: } if (bpf_jit_enable > 1) - bpf_jit_dump(prog->len, proglen, pass + 1, image); + bpf_jit_dump(prog->len, proglen, pass + 1, rw_image); if (image) { if (!prog->is_func || extra_pass) { diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 8babce71915fe514aa24cfdb25f36c98ddc90369..014c508e914d9816bb062621554c65231468870a 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c @@ -198,7 +198,7 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) i++; } kfree(v); - return 0; + return msi_device_populate_sysfs(&dev->dev); error: if (ret == -ENOSYS) @@ -254,7 +254,7 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) dev_dbg(&dev->dev, "xen: msi --> pirq=%d --> irq=%d\n", pirq, irq); } - return 0; + return msi_device_populate_sysfs(&dev->dev); error: dev_err(&dev->dev, "Failed to create MSI%s! ret=%d!\n", @@ -346,7 +346,7 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) if (ret < 0) goto out; } - ret = 0; + ret = msi_device_populate_sysfs(&dev->dev); out: return ret; } @@ -394,6 +394,8 @@ static void xen_teardown_msi_irqs(struct pci_dev *dev) xen_destroy_irq(msidesc->irq + i); msidesc->irq = 0; } + + msi_device_destroy_sysfs(&dev->dev); } static void xen_pv_teardown_msi_irqs(struct pci_dev *dev) diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile index 82fec66d46d29ee7e41e392fd5f22c524ad4f727..42abd6af119846977f7e8eea870bef761876a480 100644 --- a/arch/x86/purgatory/Makefile +++ b/arch/x86/purgatory/Makefile @@ -14,6 +14,11 @@ $(obj)/sha256.o: $(srctree)/lib/crypto/sha256.c FORCE CFLAGS_sha256.o := -D__DISABLE_EXPORTS +# When profile-guided optimization is enabled, llvm emits two different +# overlapping text sections, which is not supported by kexec. Remove profile +# optimization flags. +KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%,$(KBUILD_CFLAGS)) + # When linking purgatory.ro with -r unresolved symbols are not checked, # also link a purgatory.chk binary without -r to check for unresolved symbols. PURGATORY_LDFLAGS := -e purgatory_start -z nodefaultlib diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c index 876d5df157ed986ff3d919aba15fca177806d56e..5c01d7e70d90da4a0f495273180005cf235f4a7c 100644 --- a/arch/xtensa/kernel/signal.c +++ b/arch/xtensa/kernel/signal.c @@ -343,7 +343,19 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set, struct rt_sigframe *frame; int err = 0, sig = ksig->sig; unsigned long sp, ra, tp, ps; + unsigned long handler = (unsigned long)ksig->ka.sa.sa_handler; + unsigned long handler_fdpic_GOT = 0; unsigned int base; + bool fdpic = IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) && + (current->personality & FDPIC_FUNCPTRS); + + if (fdpic) { + unsigned long __user *fdpic_func_desc = + (unsigned long __user *)handler; + if (__get_user(handler, &fdpic_func_desc[0]) || + __get_user(handler_fdpic_GOT, &fdpic_func_desc[1])) + return -EFAULT; + } sp = regs->areg[1]; @@ -373,20 +385,26 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set, err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (ksig->ka.sa.sa_flags & SA_RESTORER) { - ra = (unsigned long)ksig->ka.sa.sa_restorer; + if (fdpic) { + unsigned long __user *fdpic_func_desc = + (unsigned long __user *)ksig->ka.sa.sa_restorer; + + err |= __get_user(ra, fdpic_func_desc); + } else { + ra = (unsigned long)ksig->ka.sa.sa_restorer; + } } else { /* Create sys_rt_sigreturn syscall in stack frame */ err |= gen_return_code(frame->retcode); - - if (err) { - return -EFAULT; - } ra = (unsigned long) frame->retcode; } - /* + if (err) + return -EFAULT; + + /* * Create signal handler execution context. * Return context not modified until this point. */ @@ -394,8 +412,7 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set, /* Set up registers for signal handler; preserve the threadptr */ tp = regs->threadptr; ps = regs->ps; - start_thread(regs, (unsigned long) ksig->ka.sa.sa_handler, - (unsigned long) frame); + start_thread(regs, handler, (unsigned long)frame); /* Set up a stack frame for a call4 if userspace uses windowed ABI */ if (ps & PS_WOE_MASK) { @@ -413,6 +430,8 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set, regs->areg[base + 4] = (unsigned long) &frame->uc; regs->threadptr = tp; regs->ps = ps; + if (fdpic) + regs->areg[base + 11] = handler_fdpic_GOT; pr_debug("SIG rt deliver (%s:%d): signal=%d sp=%p pc=%08lx\n", current->comm, current->pid, sig, frame, regs->pc); diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c index 2a31b1ab0c9f20ed800b3f2490afe5e33b9bd579..17a7ef86fd0dd0c67c4ef0d25576ac1ce106620f 100644 --- a/arch/xtensa/kernel/xtensa_ksyms.c +++ b/arch/xtensa/kernel/xtensa_ksyms.c @@ -56,6 +56,8 @@ EXPORT_SYMBOL(empty_zero_page); */ extern long long __ashrdi3(long long, int); extern long long __ashldi3(long long, int); +extern long long __bswapdi2(long long); +extern int __bswapsi2(int); extern long long __lshrdi3(long long, int); extern int __divsi3(int, int); extern int __modsi3(int, int); @@ -66,6 +68,8 @@ extern unsigned long long __umulsidi3(unsigned int, unsigned int); EXPORT_SYMBOL(__ashldi3); EXPORT_SYMBOL(__ashrdi3); +EXPORT_SYMBOL(__bswapdi2); +EXPORT_SYMBOL(__bswapsi2); EXPORT_SYMBOL(__lshrdi3); EXPORT_SYMBOL(__divsi3); EXPORT_SYMBOL(__modsi3); diff --git a/arch/xtensa/lib/Makefile b/arch/xtensa/lib/Makefile index 7ecef0519a27caac48855896aa6042959f315c56..c9c2614188f74c788a566a1c58b24f3b68721163 100644 --- a/arch/xtensa/lib/Makefile +++ b/arch/xtensa/lib/Makefile @@ -4,7 +4,7 @@ # lib-y += memcopy.o memset.o checksum.o \ - ashldi3.o ashrdi3.o lshrdi3.o \ + ashldi3.o ashrdi3.o bswapdi2.o bswapsi2.o lshrdi3.o \ divsi3.o udivsi3.o modsi3.o umodsi3.o mulsi3.o umulsidi3.o \ usercopy.o strncpy_user.o strnlen_user.o lib-$(CONFIG_PCI) += pci-auto.o diff --git a/arch/xtensa/lib/bswapdi2.S b/arch/xtensa/lib/bswapdi2.S new file mode 100644 index 0000000000000000000000000000000000000000..d8e52e05eba664fe80a41a6ee20468eab878b1a2 --- /dev/null +++ b/arch/xtensa/lib/bswapdi2.S @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later WITH GCC-exception-2.0 */ +#include +#include +#include + +ENTRY(__bswapdi2) + + abi_entry_default + ssai 8 + srli a4, a2, 16 + src a4, a4, a2 + src a4, a4, a4 + src a4, a2, a4 + srli a2, a3, 16 + src a2, a2, a3 + src a2, a2, a2 + src a2, a3, a2 + mov a3, a4 + abi_ret_default + +ENDPROC(__bswapdi2) diff --git a/arch/xtensa/lib/bswapsi2.S b/arch/xtensa/lib/bswapsi2.S new file mode 100644 index 0000000000000000000000000000000000000000..9c1de1344f79ad7c71a6dcfcb73a0dd72c302328 --- /dev/null +++ b/arch/xtensa/lib/bswapsi2.S @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later WITH GCC-exception-2.0 */ +#include +#include +#include + +ENTRY(__bswapsi2) + + abi_entry_default + ssai 8 + srli a3, a2, 16 + src a3, a3, a2 + src a3, a3, a3 + src a2, a2, a3 + abi_ret_default + +ENDPROC(__bswapsi2) diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 0ce64dd73cfe71489cce5dba5672a66b0052d4e0..dce1548a7a0c3e3160c3660189e01399bab402e5 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -34,6 +34,8 @@ #include "blk-ioprio.h" #include "blk-throttle.h" +static void __blkcg_rstat_flush(struct blkcg *blkcg, int cpu); + /* * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation. * blkcg_pol_register_mutex nests outside of it and synchronizes entire @@ -56,6 +58,8 @@ static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */ bool blkcg_debug_stats = false; +static DEFINE_RAW_SPINLOCK(blkg_stat_lock); + #define BLKG_DESTROY_BATCH_SIZE 64 /* @@ -163,10 +167,20 @@ static void blkg_free(struct blkcg_gq *blkg) static void __blkg_release(struct rcu_head *rcu) { struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head); + struct blkcg *blkcg = blkg->blkcg; + int cpu; #ifdef CONFIG_BLK_CGROUP_PUNT_BIO WARN_ON(!bio_list_empty(&blkg->async_bios)); #endif + /* + * Flush all the non-empty percpu lockless lists before releasing + * us, given these stat belongs to us. + * + * blkg_stat_lock is for serializing blkg stat update + */ + for_each_possible_cpu(cpu) + __blkcg_rstat_flush(blkcg, cpu); /* release the blkcg and parent blkg refs this blkg has been holding */ css_put(&blkg->blkcg->css); @@ -951,16 +965,12 @@ static void blkcg_iostat_update(struct blkcg_gq *blkg, struct blkg_iostat *cur, u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags); } -static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu) +static void __blkcg_rstat_flush(struct blkcg *blkcg, int cpu) { - struct blkcg *blkcg = css_to_blkcg(css); struct llist_head *lhead = per_cpu_ptr(blkcg->lhead, cpu); struct llist_node *lnode; struct blkg_iostat_set *bisc, *next_bisc; - - /* Root-level stats are sourced from system-wide IO stats */ - if (!cgroup_parent(css->cgroup)) - return; + unsigned long flags; rcu_read_lock(); @@ -968,6 +978,14 @@ static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu) if (!lnode) goto out; + /* + * For covering concurrent parent blkg update from blkg_release(). + * + * When flushing from cgroup, cgroup_rstat_lock is always held, so + * this lock won't cause contention most of time. + */ + raw_spin_lock_irqsave(&blkg_stat_lock, flags); + /* * Iterate only the iostat_cpu's queued in the lockless list. */ @@ -991,13 +1009,19 @@ static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu) if (parent && parent->parent) blkcg_iostat_update(parent, &blkg->iostat.cur, &blkg->iostat.last); - percpu_ref_put(&blkg->refcnt); } - + raw_spin_unlock_irqrestore(&blkg_stat_lock, flags); out: rcu_read_unlock(); } +static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu) +{ + /* Root-level stats are sourced from system-wide IO stats */ + if (cgroup_parent(css->cgroup)) + __blkcg_rstat_flush(css_to_blkcg(css), cpu); +} + /* * We source root cgroup stats from the system-wide stats to avoid * tracking the same information twice and incurring overhead when no @@ -2075,7 +2099,6 @@ void blk_cgroup_bio_start(struct bio *bio) llist_add(&bis->lnode, lhead); WRITE_ONCE(bis->lqueued, true); - percpu_ref_get(&bis->blkg->refcnt); } u64_stats_update_end_irqrestore(&bis->sync, flags); diff --git a/block/blk-core.c b/block/blk-core.c index 00c74330fa92c221422a629361607068a65eda99..1da77e7d62894651d3e307dbd1be1930983baad8 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -520,7 +520,7 @@ static inline int bio_check_eod(struct bio *bio) sector_t maxsector = bdev_nr_sectors(bio->bi_bdev); unsigned int nr_sectors = bio_sectors(bio); - if (nr_sectors && maxsector && + if (nr_sectors && (nr_sectors > maxsector || bio->bi_iter.bi_sector > maxsector - nr_sectors)) { pr_info_ratelimited("%s: attempt to access beyond end of device\n" diff --git a/block/blk-map.c b/block/blk-map.c index 04c55f1c492eb144b332139478b5cc390dbbf075..46eed2e627c363a4ab6a3797b538380be6a45ef6 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -248,7 +248,7 @@ static struct bio *blk_rq_map_bio_alloc(struct request *rq, { struct bio *bio; - if (rq->cmd_flags & REQ_ALLOC_CACHE) { + if (rq->cmd_flags & REQ_ALLOC_CACHE && (nr_vecs <= BIO_INLINE_VECS)) { bio = bio_alloc_bioset(NULL, nr_vecs, rq->cmd_flags, gfp_mask, &fs_bio_set); if (!bio) diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index d6af9d431dc631989dce6709971d1fc042e89dc1..dfd81cab57888b33a39b7b8b8e7d8d1c3bae0506 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -39,16 +39,20 @@ void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) { unsigned int users; + /* + * calling test_bit() prior to test_and_set_bit() is intentional, + * it avoids dirtying the cacheline if the queue is already active. + */ if (blk_mq_is_shared_tags(hctx->flags)) { struct request_queue *q = hctx->queue; - if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) + if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) || + test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) return; - set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags); } else { - if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) + if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) || + test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) return; - set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state); } users = atomic_inc_return(&hctx->tags->active_queues); diff --git a/block/blk-mq.c b/block/blk-mq.c index f6dad0886a2fa1bacac778c093717ba21e24b33e..850bfb844ed2f75b7c37774fe541a08cd59294c5 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -683,6 +683,10 @@ static void __blk_mq_free_request(struct request *rq) blk_crypto_free_request(rq); blk_pm_mark_last_busy(rq); rq->mq_hctx = NULL; + + if (rq->rq_flags & RQF_MQ_INFLIGHT) + __blk_mq_dec_active_requests(hctx); + if (rq->tag != BLK_MQ_NO_TAG) blk_mq_put_tag(hctx->tags, ctx, rq->tag); if (sched_tag != BLK_MQ_NO_TAG) @@ -694,15 +698,11 @@ static void __blk_mq_free_request(struct request *rq) void blk_mq_free_request(struct request *rq) { struct request_queue *q = rq->q; - struct blk_mq_hw_ctx *hctx = rq->mq_hctx; if ((rq->rq_flags & RQF_ELVPRIV) && q->elevator->type->ops.finish_request) q->elevator->type->ops.finish_request(rq); - if (rq->rq_flags & RQF_MQ_INFLIGHT) - __blk_mq_dec_active_requests(hctx); - if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq))) laptop_io_completion(q->disk->bdi); diff --git a/block/blk-settings.c b/block/blk-settings.c index 896b4654ab00fa700146678f2a0c1c4371f2ed7b..4dd59059b788eb3b8b45cc555716eec11bfcd995 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -915,6 +915,7 @@ static bool disk_has_partitions(struct gendisk *disk) void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model) { struct request_queue *q = disk->queue; + unsigned int old_model = q->limits.zoned; switch (model) { case BLK_ZONED_HM: @@ -952,7 +953,7 @@ void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model) */ blk_queue_zone_write_granularity(q, queue_logical_block_size(q)); - } else { + } else if (old_model != BLK_ZONED_NONE) { disk_clear_zone_settings(disk); } } diff --git a/block/blk-wbt.c b/block/blk-wbt.c index e49a4868453276744e02561c6bfef1dfe68aa2c2..9ec2a2f1eda3882b8d174b90c2e5f14ae5d07679 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -730,14 +730,16 @@ void wbt_enable_default(struct gendisk *disk) { struct request_queue *q = disk->queue; struct rq_qos *rqos; - bool disable_flag = q->elevator && - test_bit(ELEVATOR_FLAG_DISABLE_WBT, &q->elevator->flags); + bool enable = IS_ENABLED(CONFIG_BLK_WBT_MQ); + + if (q->elevator && + test_bit(ELEVATOR_FLAG_DISABLE_WBT, &q->elevator->flags)) + enable = false; /* Throttling already enabled? */ rqos = wbt_rq_qos(q); if (rqos) { - if (!disable_flag && - RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT) + if (enable && RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT) RQWB(rqos)->enable_state = WBT_STATE_ON_DEFAULT; return; } @@ -746,7 +748,7 @@ void wbt_enable_default(struct gendisk *disk) if (!blk_queue_registered(q)) return; - if (queue_is_mq(q) && !disable_flag) + if (queue_is_mq(q) && enable) wbt_init(disk); } EXPORT_SYMBOL_GPL(wbt_enable_default); diff --git a/block/fops.c b/block/fops.c index d2e6be4e3d1c7da029d0a38bd18cf9b069f901e2..58d0aebc7313a838d4d895fd73f7c383290d75da 100644 --- a/block/fops.c +++ b/block/fops.c @@ -678,6 +678,16 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start, return error; } +static int blkdev_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct inode *bd_inode = bdev_file_inode(file); + + if (bdev_read_only(I_BDEV(bd_inode))) + return generic_file_readonly_mmap(file, vma); + + return generic_file_mmap(file, vma); +} + const struct file_operations def_blk_fops = { .open = blkdev_open, .release = blkdev_close, @@ -685,7 +695,7 @@ const struct file_operations def_blk_fops = { .read_iter = blkdev_read_iter, .write_iter = blkdev_write_iter, .iopoll = iocb_bio_iopoll, - .mmap = generic_file_mmap, + .mmap = blkdev_mmap, .fsync = blkdev_fsync, .unlocked_ioctl = blkdev_ioctl, #ifdef CONFIG_COMPAT diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index eca5671ad3f2288eefe2a07b31d50b1a5728a8ed..50c933f86b218cca0ffe07756272dadbcf9854a2 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c @@ -380,9 +380,10 @@ int public_key_verify_signature(const struct public_key *pkey, struct crypto_wait cwait; struct crypto_akcipher *tfm; struct akcipher_request *req; - struct scatterlist src_sg[2]; + struct scatterlist src_sg; char alg_name[CRYPTO_MAX_ALG_NAME]; - char *key, *ptr; + char *buf, *ptr; + size_t buf_len; int ret; pr_devel("==>%s()\n", __func__); @@ -420,34 +421,37 @@ int public_key_verify_signature(const struct public_key *pkey, if (!req) goto error_free_tfm; - key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen, - GFP_KERNEL); - if (!key) + buf_len = max_t(size_t, pkey->keylen + sizeof(u32) * 2 + pkey->paramlen, + sig->s_size + sig->digest_size); + + buf = kmalloc(buf_len, GFP_KERNEL); + if (!buf) goto error_free_req; - memcpy(key, pkey->key, pkey->keylen); - ptr = key + pkey->keylen; + memcpy(buf, pkey->key, pkey->keylen); + ptr = buf + pkey->keylen; ptr = pkey_pack_u32(ptr, pkey->algo); ptr = pkey_pack_u32(ptr, pkey->paramlen); memcpy(ptr, pkey->params, pkey->paramlen); if (pkey->key_is_private) - ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen); + ret = crypto_akcipher_set_priv_key(tfm, buf, pkey->keylen); else - ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen); + ret = crypto_akcipher_set_pub_key(tfm, buf, pkey->keylen); if (ret) - goto error_free_key; + goto error_free_buf; if (strcmp(pkey->pkey_algo, "sm2") == 0 && sig->data_size) { ret = cert_sig_digest_update(sig, tfm); if (ret) - goto error_free_key; + goto error_free_buf; } - sg_init_table(src_sg, 2); - sg_set_buf(&src_sg[0], sig->s, sig->s_size); - sg_set_buf(&src_sg[1], sig->digest, sig->digest_size); - akcipher_request_set_crypt(req, src_sg, NULL, sig->s_size, + memcpy(buf, sig->s, sig->s_size); + memcpy(buf + sig->s_size, sig->digest, sig->digest_size); + + sg_init_one(&src_sg, buf, sig->s_size + sig->digest_size); + akcipher_request_set_crypt(req, &src_sg, NULL, sig->s_size, sig->digest_size); crypto_init_wait(&cwait); akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | @@ -455,8 +459,8 @@ int public_key_verify_signature(const struct public_key *pkey, crypto_req_done, &cwait); ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait); -error_free_key: - kfree(key); +error_free_buf: + kfree(buf); error_free_req: akcipher_request_free(req); error_free_tfm: diff --git a/drivers/accel/ivpu/Kconfig b/drivers/accel/ivpu/Kconfig index 9bdf168bf1d0e789236f19d48970d304db5c1392..1a4c4ed9d11368fffd381d66f486aa53aa5d440b 100644 --- a/drivers/accel/ivpu/Kconfig +++ b/drivers/accel/ivpu/Kconfig @@ -7,6 +7,7 @@ config DRM_ACCEL_IVPU depends on PCI && PCI_MSI select FW_LOADER select SHMEM + select GENERIC_ALLOCATOR help Choose this option if you have a system that has an 14th generation Intel CPU or newer. VPU stands for Versatile Processing Unit and it's a CPU-integrated diff --git a/drivers/accel/ivpu/ivpu_hw_mtl.c b/drivers/accel/ivpu/ivpu_hw_mtl.c index 382ec127be8ea1081f3a4446a82553c536b40f0e..fef35422c6f0d17156eac76abc5f85f99e1b0a89 100644 --- a/drivers/accel/ivpu/ivpu_hw_mtl.c +++ b/drivers/accel/ivpu/ivpu_hw_mtl.c @@ -197,6 +197,11 @@ static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev) hw->pll.pn_ratio = clamp_t(u8, fuse_pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio); } +static int ivpu_hw_mtl_wait_for_vpuip_bar(struct ivpu_device *vdev) +{ + return REGV_POLL_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, AON, 0, 100); +} + static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable) { struct ivpu_hw_info *hw = vdev->hw; @@ -239,6 +244,12 @@ static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable) ivpu_err(vdev, "Timed out waiting for PLL ready status\n"); return ret; } + + ret = ivpu_hw_mtl_wait_for_vpuip_bar(vdev); + if (ret) { + ivpu_err(vdev, "Timed out waiting for VPUIP bar\n"); + return ret; + } } return 0; @@ -256,7 +267,7 @@ static int ivpu_pll_disable(struct ivpu_device *vdev) static void ivpu_boot_host_ss_rst_clr_assert(struct ivpu_device *vdev) { - u32 val = REGV_RD32(MTL_VPU_HOST_SS_CPR_RST_CLR); + u32 val = 0; val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, TOP_NOC, val); val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, DSS_MAS, val); @@ -754,9 +765,8 @@ static int ivpu_hw_mtl_power_down(struct ivpu_device *vdev) { int ret = 0; - if (ivpu_hw_mtl_reset(vdev)) { + if (!ivpu_hw_mtl_is_idle(vdev) && ivpu_hw_mtl_reset(vdev)) { ivpu_err(vdev, "Failed to reset the VPU\n"); - ret = -EIO; } if (ivpu_pll_disable(vdev)) { @@ -764,8 +774,10 @@ static int ivpu_hw_mtl_power_down(struct ivpu_device *vdev) ret = -EIO; } - if (ivpu_hw_mtl_d0i3_enable(vdev)) - ivpu_warn(vdev, "Failed to enable D0I3\n"); + if (ivpu_hw_mtl_d0i3_enable(vdev)) { + ivpu_err(vdev, "Failed to enter D0I3\n"); + ret = -EIO; + } return ret; } diff --git a/drivers/accel/ivpu/ivpu_hw_mtl_reg.h b/drivers/accel/ivpu/ivpu_hw_mtl_reg.h index d83ccfd9a871b6bde38b886797cc3b2a10f75060..593b8ff0741701a1006545882c37d0ff649c584c 100644 --- a/drivers/accel/ivpu/ivpu_hw_mtl_reg.h +++ b/drivers/accel/ivpu/ivpu_hw_mtl_reg.h @@ -91,6 +91,7 @@ #define MTL_VPU_HOST_SS_CPR_RST_SET_MSS_MAS_MASK BIT_MASK(11) #define MTL_VPU_HOST_SS_CPR_RST_CLR 0x00000098u +#define MTL_VPU_HOST_SS_CPR_RST_CLR_AON_MASK BIT_MASK(0) #define MTL_VPU_HOST_SS_CPR_RST_CLR_TOP_NOC_MASK BIT_MASK(1) #define MTL_VPU_HOST_SS_CPR_RST_CLR_DSS_MAS_MASK BIT_MASK(10) #define MTL_VPU_HOST_SS_CPR_RST_CLR_MSS_MAS_MASK BIT_MASK(11) diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c index 3adcfa80fc0e556a9e48c08e1a8869474d5a18a0..fa0af59e39ab66f0a0c20d8e4bfc4892c2ff7426 100644 --- a/drivers/accel/ivpu/ivpu_ipc.c +++ b/drivers/accel/ivpu/ivpu_ipc.c @@ -183,9 +183,7 @@ ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, struct v struct ivpu_ipc_info *ipc = vdev->ipc; int ret; - ret = mutex_lock_interruptible(&ipc->lock); - if (ret) - return ret; + mutex_lock(&ipc->lock); if (!ipc->on) { ret = -EAGAIN; diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c index 3c6f1e16cf2ff7a1090cd471cb84f93d26feba21..d45be0615b47621a2913be826c6cd6905f5250af 100644 --- a/drivers/accel/ivpu/ivpu_job.c +++ b/drivers/accel/ivpu/ivpu_job.c @@ -431,6 +431,7 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 struct ivpu_file_priv *file_priv = file->driver_priv; struct ivpu_device *vdev = file_priv->vdev; struct ww_acquire_ctx acquire_ctx; + enum dma_resv_usage usage; struct ivpu_bo *bo; int ret; u32 i; @@ -461,22 +462,28 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 job->cmd_buf_vpu_addr = bo->vpu_addr + commands_offset; - ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, 1, &acquire_ctx); + ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, buf_count, + &acquire_ctx); if (ret) { ivpu_warn(vdev, "Failed to lock reservations: %d\n", ret); return ret; } - ret = dma_resv_reserve_fences(bo->base.resv, 1); - if (ret) { - ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret); - goto unlock_reservations; + for (i = 0; i < buf_count; i++) { + ret = dma_resv_reserve_fences(job->bos[i]->base.resv, 1); + if (ret) { + ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret); + goto unlock_reservations; + } } - dma_resv_add_fence(bo->base.resv, job->done_fence, DMA_RESV_USAGE_WRITE); + for (i = 0; i < buf_count; i++) { + usage = (i == CMD_BUF_IDX) ? DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_BOOKKEEP; + dma_resv_add_fence(job->bos[i]->base.resv, job->done_fence, usage); + } unlock_reservations: - drm_gem_unlock_reservations((struct drm_gem_object **)job->bos, 1, &acquire_ctx); + drm_gem_unlock_reservations((struct drm_gem_object **)job->bos, buf_count, &acquire_ctx); wmb(); /* Flush write combining buffers */ diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c index 694e978aba6637bbc979e06343a32a1f36dda30b..b8b259b3aa635d02e53e5a127d05f9ab093fa5fa 100644 --- a/drivers/accel/ivpu/ivpu_mmu.c +++ b/drivers/accel/ivpu/ivpu_mmu.c @@ -587,16 +587,11 @@ static int ivpu_mmu_strtab_init(struct ivpu_device *vdev) int ivpu_mmu_invalidate_tlb(struct ivpu_device *vdev, u16 ssid) { struct ivpu_mmu_info *mmu = vdev->mmu; - int ret; - - ret = mutex_lock_interruptible(&mmu->lock); - if (ret) - return ret; + int ret = 0; - if (!mmu->on) { - ret = 0; + mutex_lock(&mmu->lock); + if (!mmu->on) goto unlock; - } ret = ivpu_mmu_cmdq_write_tlbi_nh_asid(vdev, ssid); if (ret) @@ -614,7 +609,7 @@ static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma) struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab; u64 *entry; u64 cd[4]; - int ret; + int ret = 0; if (ssid > IVPU_MMU_CDTAB_ENT_COUNT) return -EINVAL; @@ -655,14 +650,9 @@ static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma) ivpu_dbg(vdev, MMU, "CDTAB %s entry (SSID=%u, dma=%pad): 0x%llx, 0x%llx, 0x%llx, 0x%llx\n", cd_dma ? "write" : "clear", ssid, &cd_dma, cd[0], cd[1], cd[2], cd[3]); - ret = mutex_lock_interruptible(&mmu->lock); - if (ret) - return ret; - - if (!mmu->on) { - ret = 0; + mutex_lock(&mmu->lock); + if (!mmu->on) goto unlock; - } ret = ivpu_mmu_cmdq_write_cfgi_all(vdev); if (ret) diff --git a/drivers/accel/qaic/qaic_control.c b/drivers/accel/qaic/qaic_control.c index 9f216eb6f76ed80eb0fa3706e83e291898374782..5c57f7b4494e482f46d920e34f265ae1cac0c1ba 100644 --- a/drivers/accel/qaic/qaic_control.c +++ b/drivers/accel/qaic/qaic_control.c @@ -997,14 +997,34 @@ static void *msg_xfer(struct qaic_device *qdev, struct wrapper_list *wrappers, u struct xfer_queue_elem elem; struct wire_msg *out_buf; struct wrapper_msg *w; + long ret = -EAGAIN; + int xfer_count = 0; int retry_count; - long ret; if (qdev->in_reset) { mutex_unlock(&qdev->cntl_mutex); return ERR_PTR(-ENODEV); } + /* Attempt to avoid a partial commit of a message */ + list_for_each_entry(w, &wrappers->list, list) + xfer_count++; + + for (retry_count = 0; retry_count < QAIC_MHI_RETRY_MAX; retry_count++) { + if (xfer_count <= mhi_get_free_desc_count(qdev->cntl_ch, DMA_TO_DEVICE)) { + ret = 0; + break; + } + msleep_interruptible(QAIC_MHI_RETRY_WAIT_MS); + if (signal_pending(current)) + break; + } + + if (ret) { + mutex_unlock(&qdev->cntl_mutex); + return ERR_PTR(ret); + } + elem.seq_num = seq_num; elem.buf = NULL; init_completion(&elem.xfer_done); @@ -1038,16 +1058,9 @@ static void *msg_xfer(struct qaic_device *qdev, struct wrapper_list *wrappers, u list_for_each_entry(w, &wrappers->list, list) { kref_get(&w->ref_count); retry_count = 0; -retry: ret = mhi_queue_buf(qdev->cntl_ch, DMA_TO_DEVICE, &w->msg, w->len, list_is_last(&w->list, &wrappers->list) ? MHI_EOT : MHI_CHAIN); if (ret) { - if (ret == -EAGAIN && retry_count++ < QAIC_MHI_RETRY_MAX) { - msleep_interruptible(QAIC_MHI_RETRY_WAIT_MS); - if (!signal_pending(current)) - goto retry; - } - qdev->cntl_lost_buf = true; kref_put(&w->ref_count, free_wrapper); mutex_unlock(&qdev->cntl_mutex); @@ -1249,7 +1262,7 @@ dma_cont_failed: int qaic_manage_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct qaic_manage_msg *user_msg; + struct qaic_manage_msg *user_msg = data; struct qaic_device *qdev; struct manage_msg *msg; struct qaic_user *usr; @@ -1258,6 +1271,9 @@ int qaic_manage_ioctl(struct drm_device *dev, void *data, struct drm_file *file_ int usr_rcu_id; int ret; + if (user_msg->len > QAIC_MANAGE_MAX_MSG_LENGTH) + return -EINVAL; + usr = file_priv->driver_priv; usr_rcu_id = srcu_read_lock(&usr->qddev_lock); @@ -1275,13 +1291,6 @@ int qaic_manage_ioctl(struct drm_device *dev, void *data, struct drm_file *file_ return -ENODEV; } - user_msg = data; - - if (user_msg->len > QAIC_MANAGE_MAX_MSG_LENGTH) { - ret = -EINVAL; - goto out; - } - msg = kzalloc(QAIC_MANAGE_MAX_MSG_LENGTH + sizeof(*msg), GFP_KERNEL); if (!msg) { ret = -ENOMEM; diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c index c0a574cd1b35c6b80696e0b3d46380eef5008e6f..e9a1cb779b3056aedb3cb561926d5472aab56d11 100644 --- a/drivers/accel/qaic/qaic_data.c +++ b/drivers/accel/qaic/qaic_data.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -591,7 +592,7 @@ static int qaic_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struc struct qaic_bo *bo = to_qaic_bo(obj); unsigned long offset = 0; struct scatterlist *sg; - int ret; + int ret = 0; if (obj->import_attach) return -EINVAL; @@ -616,8 +617,7 @@ static void qaic_free_object(struct drm_gem_object *obj) if (obj->import_attach) { /* DMABUF/PRIME Path */ - dma_buf_detach(obj->import_attach->dmabuf, obj->import_attach); - dma_buf_put(obj->import_attach->dmabuf); + drm_prime_gem_destroy(obj, NULL); } else { /* Private buffer allocation path */ qaic_free_sgt(bo->sgt); @@ -663,6 +663,10 @@ int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *fi if (args->pad) return -EINVAL; + size = PAGE_ALIGN(args->size); + if (size == 0) + return -EINVAL; + usr = file_priv->driver_priv; usr_rcu_id = srcu_read_lock(&usr->qddev_lock); if (!usr->qddev) { @@ -677,12 +681,6 @@ int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *fi goto unlock_dev_srcu; } - size = PAGE_ALIGN(args->size); - if (size == 0) { - ret = -EINVAL; - goto unlock_dev_srcu; - } - bo = qaic_alloc_init_bo(); if (IS_ERR(bo)) { ret = PTR_ERR(bo); @@ -926,8 +924,8 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi { struct qaic_attach_slice_entry *slice_ent; struct qaic_attach_slice *args = data; + int rcu_id, usr_rcu_id, qdev_rcu_id; struct dma_bridge_chan *dbc; - int usr_rcu_id, qdev_rcu_id; struct drm_gem_object *obj; struct qaic_device *qdev; unsigned long arg_size; @@ -936,6 +934,22 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi struct qaic_bo *bo; int ret; + if (args->hdr.count == 0) + return -EINVAL; + + arg_size = args->hdr.count * sizeof(*slice_ent); + if (arg_size / args->hdr.count != sizeof(*slice_ent)) + return -EINVAL; + + if (args->hdr.size == 0) + return -EINVAL; + + if (!(args->hdr.dir == DMA_TO_DEVICE || args->hdr.dir == DMA_FROM_DEVICE)) + return -EINVAL; + + if (args->data == 0) + return -EINVAL; + usr = file_priv->driver_priv; usr_rcu_id = srcu_read_lock(&usr->qddev_lock); if (!usr->qddev) { @@ -950,43 +964,11 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi goto unlock_dev_srcu; } - if (args->hdr.count == 0) { - ret = -EINVAL; - goto unlock_dev_srcu; - } - - arg_size = args->hdr.count * sizeof(*slice_ent); - if (arg_size / args->hdr.count != sizeof(*slice_ent)) { - ret = -EINVAL; - goto unlock_dev_srcu; - } - if (args->hdr.dbc_id >= qdev->num_dbc) { ret = -EINVAL; goto unlock_dev_srcu; } - if (args->hdr.size == 0) { - ret = -EINVAL; - goto unlock_dev_srcu; - } - - if (!(args->hdr.dir == DMA_TO_DEVICE || args->hdr.dir == DMA_FROM_DEVICE)) { - ret = -EINVAL; - goto unlock_dev_srcu; - } - - dbc = &qdev->dbc[args->hdr.dbc_id]; - if (dbc->usr != usr) { - ret = -EINVAL; - goto unlock_dev_srcu; - } - - if (args->data == 0) { - ret = -EINVAL; - goto unlock_dev_srcu; - } - user_data = u64_to_user_ptr(args->data); slice_ent = kzalloc(arg_size, GFP_KERNEL); @@ -1013,9 +995,21 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi bo = to_qaic_bo(obj); + if (bo->sliced) { + ret = -EINVAL; + goto put_bo; + } + + dbc = &qdev->dbc[args->hdr.dbc_id]; + rcu_id = srcu_read_lock(&dbc->ch_lock); + if (dbc->usr != usr) { + ret = -EINVAL; + goto unlock_ch_srcu; + } + ret = qaic_prepare_bo(qdev, bo, &args->hdr); if (ret) - goto put_bo; + goto unlock_ch_srcu; ret = qaic_attach_slicing_bo(qdev, bo, &args->hdr, slice_ent); if (ret) @@ -1025,6 +1019,7 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, args->hdr.dir); bo->dbc = dbc; + srcu_read_unlock(&dbc->ch_lock, rcu_id); drm_gem_object_put(obj); srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); @@ -1033,6 +1028,8 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi unprepare_bo: qaic_unprepare_bo(qdev, bo); +unlock_ch_srcu: + srcu_read_unlock(&dbc->ch_lock, rcu_id); put_bo: drm_gem_object_put(obj); free_slice_ent: @@ -1316,7 +1313,6 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr received_ts = ktime_get_ns(); size = is_partial ? sizeof(*pexec) : sizeof(*exec); - n = (unsigned long)size * args->hdr.count; if (args->hdr.count == 0 || n / args->hdr.count != size) return -EINVAL; @@ -1665,6 +1661,9 @@ int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file int rcu_id; int ret; + if (args->pad != 0) + return -EINVAL; + usr = file_priv->driver_priv; usr_rcu_id = srcu_read_lock(&usr->qddev_lock); if (!usr->qddev) { @@ -1679,11 +1678,6 @@ int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file goto unlock_dev_srcu; } - if (args->pad != 0) { - ret = -EINVAL; - goto unlock_dev_srcu; - } - if (args->dbc_id >= qdev->num_dbc) { ret = -EINVAL; goto unlock_dev_srcu; @@ -1855,6 +1849,11 @@ void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id) dbc->usr = NULL; empty_xfer_list(qdev, dbc); synchronize_srcu(&dbc->ch_lock); + /* + * Threads holding channel lock, may add more elements in the xfer_list. + * Flush out these elements from xfer_list. + */ + empty_xfer_list(qdev, dbc); } void release_dbc(struct qaic_device *qdev, u32 dbc_id) diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c index ff80eb5717290c7d72ed3585bd0dd982dfe0f9ce..b5ba550a0c0400e03d3875bcbc5095c73582c06d 100644 --- a/drivers/accel/qaic/qaic_drv.c +++ b/drivers/accel/qaic/qaic_drv.c @@ -97,6 +97,7 @@ static int qaic_open(struct drm_device *dev, struct drm_file *file) cleanup_usr: cleanup_srcu_struct(&usr->qddev_lock); + ida_free(&qaic_usrs, usr->handle); free_usr: kfree(usr); dev_unlock: @@ -224,6 +225,9 @@ static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id) struct qaic_user *usr; qddev = qdev->qddev; + qdev->qddev = NULL; + if (!qddev) + return; /* * Existing users get unresolvable errors till they close FDs. @@ -262,8 +266,8 @@ static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id) static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id) { + u16 major = -1, minor = -1; struct qaic_device *qdev; - u16 major, minor; int ret; /* diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h index ebf8fd373cf7ea0e37cbd7f159acbb7ab7ac37e9..79bbfe00d241f94b73d7c777a6e80f66b65c6075 100644 --- a/drivers/acpi/acpica/achware.h +++ b/drivers/acpi/acpica/achware.h @@ -101,8 +101,6 @@ acpi_status acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info, acpi_event_status *event_status); -acpi_status acpi_hw_disable_all_gpes(void); - acpi_status acpi_hw_enable_all_runtime_gpes(void); acpi_status acpi_hw_enable_all_wakeup_gpes(void); diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h index 1d6ef96547252d510b570858d937bc7c222586fe..67c2c3b959e1538a645d43bda3449378c0051ed8 100644 --- a/drivers/acpi/apei/apei-internal.h +++ b/drivers/acpi/apei/apei-internal.h @@ -7,7 +7,6 @@ #ifndef APEI_INTERNAL_H #define APEI_INTERNAL_H -#include #include struct apei_exec_context; @@ -130,10 +129,5 @@ static inline u32 cper_estatus_len(struct acpi_hest_generic_status *estatus) return sizeof(*estatus) + estatus->data_length; } -void cper_estatus_print(const char *pfx, - const struct acpi_hest_generic_status *estatus); -int cper_estatus_check_header(const struct acpi_hest_generic_status *estatus); -int cper_estatus_check(const struct acpi_hest_generic_status *estatus); - int apei_osc_setup(void); #endif diff --git a/drivers/acpi/apei/bert.c b/drivers/acpi/apei/bert.c index c23eb75866d0214fa98688e1206d106effb7cca7..7514e38d5640eed18483897f9ddfd867061d9fa9 100644 --- a/drivers/acpi/apei/bert.c +++ b/drivers/acpi/apei/bert.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include "apei-internal.h" diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index e8492b3a393ab6932c1f6c791bee1c442688fb64..0800a9d77558069edc16243098183b16f5f78e39 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -516,6 +516,17 @@ static const struct dmi_system_id maingear_laptop[] = { { } }; +static const struct dmi_system_id lg_laptop[] = { + { + .ident = "LG Electronics 17U70P", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"), + DMI_MATCH(DMI_BOARD_NAME, "17U70P"), + }, + }, + { } +}; + struct irq_override_cmp { const struct dmi_system_id *system; unsigned char irq; @@ -532,6 +543,7 @@ static const struct irq_override_cmp override_table[] = { { lenovo_laptop, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true }, { tongfang_gm_rg, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true }, { maingear_laptop, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true }, + { lg_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false }, }; static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity, diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 72470b9f16c4577fa8fe6ec5037c5e7ce0e85f75..f32570f72b90cb9b64de15127295a4ad1ba211b7 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -636,11 +636,19 @@ static int acpi_suspend_enter(suspend_state_t pm_state) } /* - * Disable and clear GPE status before interrupt is enabled. Some GPEs - * (like wakeup GPE) haven't handler, this can avoid such GPE misfire. - * acpi_leave_sleep_state will reenable specific GPEs later + * Disable all GPE and clear their status bits before interrupts are + * enabled. Some GPEs (like wakeup GPEs) have no handlers and this can + * prevent them from producing spurious interrups. + * + * acpi_leave_sleep_state() will reenable specific GPEs later. + * + * Because this code runs on one CPU with disabled interrupts (all of + * the other CPUs are offline at this time), it need not acquire any + * sleeping locks which may trigger an implicit preemption point even + * if there is no contention, so avoid doing that by using a low-level + * library routine here. */ - acpi_disable_all_gpes(); + acpi_hw_disable_all_gpes(); /* Allow EC transactions to happen. */ acpi_ec_unblock_transactions(); diff --git a/drivers/android/binder.c b/drivers/android/binder.c index fb56bfc45096d4a8a645dc30f7441b77c6c20c33..8fb7672021ee2d2fc06570c29dbcb2b6f09bc993 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -1934,24 +1934,23 @@ static void binder_deferred_fd_close(int fd) static void binder_transaction_buffer_release(struct binder_proc *proc, struct binder_thread *thread, struct binder_buffer *buffer, - binder_size_t failed_at, + binder_size_t off_end_offset, bool is_failure) { int debug_id = buffer->debug_id; - binder_size_t off_start_offset, buffer_offset, off_end_offset; + binder_size_t off_start_offset, buffer_offset; binder_debug(BINDER_DEBUG_TRANSACTION, "%d buffer release %d, size %zd-%zd, failed at %llx\n", proc->pid, buffer->debug_id, buffer->data_size, buffer->offsets_size, - (unsigned long long)failed_at); + (unsigned long long)off_end_offset); if (buffer->target_node) binder_dec_node(buffer->target_node, 1, 0); off_start_offset = ALIGN(buffer->data_size, sizeof(void *)); - off_end_offset = is_failure && failed_at ? failed_at : - off_start_offset + buffer->offsets_size; + for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; buffer_offset += sizeof(binder_size_t)) { struct binder_object_header *hdr; @@ -2111,6 +2110,21 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, } } +/* Clean up all the objects in the buffer */ +static inline void binder_release_entire_buffer(struct binder_proc *proc, + struct binder_thread *thread, + struct binder_buffer *buffer, + bool is_failure) +{ + binder_size_t off_end_offset; + + off_end_offset = ALIGN(buffer->data_size, sizeof(void *)); + off_end_offset += buffer->offsets_size; + + binder_transaction_buffer_release(proc, thread, buffer, + off_end_offset, is_failure); +} + static int binder_translate_binder(struct flat_binder_object *fp, struct binder_transaction *t, struct binder_thread *thread) @@ -2806,7 +2820,7 @@ static int binder_proc_transaction(struct binder_transaction *t, t_outdated->buffer = NULL; buffer->transaction = NULL; trace_binder_transaction_update_buffer_release(buffer); - binder_transaction_buffer_release(proc, NULL, buffer, 0, 0); + binder_release_entire_buffer(proc, NULL, buffer, false); binder_alloc_free_buf(&proc->alloc, buffer); kfree(t_outdated); binder_stats_deleted(BINDER_STAT_TRANSACTION); @@ -3775,7 +3789,7 @@ binder_free_buf(struct binder_proc *proc, binder_node_inner_unlock(buf_node); } trace_binder_transaction_buffer_release(buffer); - binder_transaction_buffer_release(proc, thread, buffer, 0, is_failure); + binder_release_entire_buffer(proc, thread, buffer, is_failure); binder_alloc_free_buf(&proc->alloc, buffer); } diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 55a3c3c2409f0108ccbff79307e45036ad7c7701..662a2a2e2e84a77b6fc9294d34c376f3e27712f2 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -212,8 +212,8 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, mm = alloc->mm; if (mm) { - mmap_read_lock(mm); - vma = vma_lookup(mm, alloc->vma_addr); + mmap_write_lock(mm); + vma = alloc->vma; } if (!vma && need_mm) { @@ -270,7 +270,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, trace_binder_alloc_page_end(alloc, index); } if (mm) { - mmap_read_unlock(mm); + mmap_write_unlock(mm); mmput(mm); } return 0; @@ -303,21 +303,24 @@ err_page_ptr_cleared: } err_no_vma: if (mm) { - mmap_read_unlock(mm); + mmap_write_unlock(mm); mmput(mm); } return vma ? -ENOMEM : -ESRCH; } +static inline void binder_alloc_set_vma(struct binder_alloc *alloc, + struct vm_area_struct *vma) +{ + /* pairs with smp_load_acquire in binder_alloc_get_vma() */ + smp_store_release(&alloc->vma, vma); +} + static inline struct vm_area_struct *binder_alloc_get_vma( struct binder_alloc *alloc) { - struct vm_area_struct *vma = NULL; - - if (alloc->vma_addr) - vma = vma_lookup(alloc->mm, alloc->vma_addr); - - return vma; + /* pairs with smp_store_release in binder_alloc_set_vma() */ + return smp_load_acquire(&alloc->vma); } static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid) @@ -380,15 +383,13 @@ static struct binder_buffer *binder_alloc_new_buf_locked( size_t size, data_offsets_size; int ret; - mmap_read_lock(alloc->mm); + /* Check binder_alloc is fully initialized */ if (!binder_alloc_get_vma(alloc)) { - mmap_read_unlock(alloc->mm); binder_alloc_debug(BINDER_DEBUG_USER_ERROR, "%d: binder_alloc_buf, no vma\n", alloc->pid); return ERR_PTR(-ESRCH); } - mmap_read_unlock(alloc->mm); data_offsets_size = ALIGN(data_size, sizeof(void *)) + ALIGN(offsets_size, sizeof(void *)); @@ -778,7 +779,9 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, buffer->free = 1; binder_insert_free_buffer(alloc, buffer); alloc->free_async_space = alloc->buffer_size / 2; - alloc->vma_addr = vma->vm_start; + + /* Signal binder_alloc is fully initialized */ + binder_alloc_set_vma(alloc, vma); return 0; @@ -808,8 +811,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) buffers = 0; mutex_lock(&alloc->mutex); - BUG_ON(alloc->vma_addr && - vma_lookup(alloc->mm, alloc->vma_addr)); + BUG_ON(alloc->vma); while ((n = rb_first(&alloc->allocated_buffers))) { buffer = rb_entry(n, struct binder_buffer, rb_node); @@ -916,25 +918,17 @@ void binder_alloc_print_pages(struct seq_file *m, * Make sure the binder_alloc is fully initialized, otherwise we might * read inconsistent state. */ - - mmap_read_lock(alloc->mm); - if (binder_alloc_get_vma(alloc) == NULL) { - mmap_read_unlock(alloc->mm); - goto uninitialized; - } - - mmap_read_unlock(alloc->mm); - for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { - page = &alloc->pages[i]; - if (!page->page_ptr) - free++; - else if (list_empty(&page->lru)) - active++; - else - lru++; + if (binder_alloc_get_vma(alloc) != NULL) { + for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { + page = &alloc->pages[i]; + if (!page->page_ptr) + free++; + else if (list_empty(&page->lru)) + active++; + else + lru++; + } } - -uninitialized: mutex_unlock(&alloc->mutex); seq_printf(m, " pages: %d:%d:%d\n", active, lru, free); seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high); @@ -969,7 +963,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc) */ void binder_alloc_vma_close(struct binder_alloc *alloc) { - alloc->vma_addr = 0; + binder_alloc_set_vma(alloc, NULL); } /** diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h index 0f811ac4bcffd1f922a6bd895cd5682ad9acd08c..138d1d5af9ce36051893ee50a8e728d7bfbc2940 100644 --- a/drivers/android/binder_alloc.h +++ b/drivers/android/binder_alloc.h @@ -75,7 +75,7 @@ struct binder_lru_page { /** * struct binder_alloc - per-binder proc state for binder allocator * @mutex: protects binder_alloc fields - * @vma_addr: vm_area_struct->vm_start passed to mmap_handler + * @vma: vm_area_struct passed to mmap_handler * (invariant after mmap) * @mm: copy of task->mm (invariant after open) * @buffer: base of per-proc address space mapped via mmap @@ -99,7 +99,7 @@ struct binder_lru_page { */ struct binder_alloc { struct mutex mutex; - unsigned long vma_addr; + struct vm_area_struct *vma; struct mm_struct *mm; void __user *buffer; struct list_head buffers; diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c index 43a881073a42832feb96c906c53e0ad34d2b2970..c2b323bc3b3a53043fa5783fb4f809de93b24644 100644 --- a/drivers/android/binder_alloc_selftest.c +++ b/drivers/android/binder_alloc_selftest.c @@ -287,7 +287,7 @@ void binder_selftest_alloc(struct binder_alloc *alloc) if (!binder_selftest_run) return; mutex_lock(&binder_selftest_lock); - if (!binder_selftest_run || !alloc->vma_addr) + if (!binder_selftest_run || !alloc->vma) goto done; pr_info("STARTED\n"); binder_selftest_alloc_offset(alloc, end_offset, 0); diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 8bf612bdd61a5a1d8d04d70fd8579249ede7c728..b4f246f0cac7f05a6942d41526af51d6f65eb766 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -5348,7 +5348,7 @@ struct ata_port *ata_port_alloc(struct ata_host *host) mutex_init(&ap->scsi_scan_mutex); INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); - INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); + INIT_DELAYED_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); INIT_LIST_HEAD(&ap->eh_done_q); init_waitqueue_head(&ap->eh_wait_q); init_completion(&ap->park_req_pending); @@ -5954,6 +5954,7 @@ static void ata_port_detach(struct ata_port *ap) WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED)); cancel_delayed_work_sync(&ap->hotplug_task); + cancel_delayed_work_sync(&ap->scsi_rescan_task); skip_eh: /* clean up zpodd on port removal */ diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index a6c90181180271bd0c435a658b4dc71a57f3968c..6f8d141915939441dafd1a9852e2172da128cadf 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -2984,7 +2984,7 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link, ehc->i.flags |= ATA_EHI_SETMODE; /* schedule the scsi_rescan_device() here */ - schedule_work(&(ap->scsi_rescan_task)); + schedule_delayed_work(&ap->scsi_rescan_task, 0); } else if (dev->class == ATA_DEV_UNKNOWN && ehc->tries[dev->devno] && ata_class_enabled(ehc->classes[dev->devno])) { diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 7bb12deab70c465bb2a701548bdc044cd8bcb11f..551077cea4e4e023e20e913e37ee7b939488083a 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -2694,18 +2694,36 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) return 0; } -static struct ata_device *ata_find_dev(struct ata_port *ap, int devno) +static struct ata_device *ata_find_dev(struct ata_port *ap, unsigned int devno) { - if (!sata_pmp_attached(ap)) { - if (likely(devno >= 0 && - devno < ata_link_max_devices(&ap->link))) + /* + * For the non-PMP case, ata_link_max_devices() returns 1 (SATA case), + * or 2 (IDE master + slave case). However, the former case includes + * libsas hosted devices which are numbered per scsi host, leading + * to devno potentially being larger than 0 but with each struct + * ata_device having its own struct ata_port and struct ata_link. + * To accommodate these, ignore devno and always use device number 0. + */ + if (likely(!sata_pmp_attached(ap))) { + int link_max_devices = ata_link_max_devices(&ap->link); + + if (link_max_devices == 1) + return &ap->link.device[0]; + + if (devno < link_max_devices) return &ap->link.device[devno]; - } else { - if (likely(devno >= 0 && - devno < ap->nr_pmp_links)) - return &ap->pmp_link[devno].device[0]; + + return NULL; } + /* + * For PMP-attached devices, the device number corresponds to C + * (channel) of SCSI [H:C:I:L], indicating the port pmp link + * for the device. + */ + if (devno < ap->nr_pmp_links) + return &ap->pmp_link[devno].device[0]; + return NULL; } @@ -4579,10 +4597,11 @@ int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, void ata_scsi_dev_rescan(struct work_struct *work) { struct ata_port *ap = - container_of(work, struct ata_port, scsi_rescan_task); + container_of(work, struct ata_port, scsi_rescan_task.work); struct ata_link *link; struct ata_device *dev; unsigned long flags; + bool delay_rescan = false; mutex_lock(&ap->scsi_scan_mutex); spin_lock_irqsave(ap->lock, flags); @@ -4596,6 +4615,21 @@ void ata_scsi_dev_rescan(struct work_struct *work) if (scsi_device_get(sdev)) continue; + /* + * If the rescan work was scheduled because of a resume + * event, the port is already fully resumed, but the + * SCSI device may not yet be fully resumed. In such + * case, executing scsi_rescan_device() may cause a + * deadlock with the PM code on device_lock(). Prevent + * this by giving up and retrying rescan after a short + * delay. + */ + delay_rescan = sdev->sdev_gendev.power.is_suspended; + if (delay_rescan) { + scsi_device_put(sdev); + break; + } + spin_unlock_irqrestore(ap->lock, flags); scsi_rescan_device(&(sdev->sdev_gendev)); scsi_device_put(sdev); @@ -4605,4 +4639,8 @@ void ata_scsi_dev_rescan(struct work_struct *work) spin_unlock_irqrestore(ap->lock, flags); mutex_unlock(&ap->scsi_scan_mutex); + + if (delay_rescan) + schedule_delayed_work(&ap->scsi_rescan_task, + msecs_to_jiffies(5)); } diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index bba3482ddeb82e72b8578f4db36bdb5d78511a6f..cbae8be1fe52005553e3d9dded77a21595d2cafc 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -388,6 +388,16 @@ static int cache_shared_cpu_map_setup(unsigned int cpu) continue;/* skip if itself or no cacheinfo */ for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) { sib_leaf = per_cpu_cacheinfo_idx(i, sib_index); + + /* + * Comparing cache IDs only makes sense if the leaves + * belong to the same cache level of same type. Skip + * the check if level and type do not match. + */ + if (sib_leaf->level != this_leaf->level || + sib_leaf->type != this_leaf->type) + continue; + if (cache_leaves_are_shared(this_leaf, sib_leaf)) { cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map); cpumask_set_cpu(i, &this_leaf->shared_cpu_map); @@ -400,11 +410,14 @@ static int cache_shared_cpu_map_setup(unsigned int cpu) coherency_max_size = this_leaf->coherency_line_size; } + /* shared_cpu_map is now populated for the cpu */ + this_cpu_ci->cpu_map_populated = true; return 0; } static void cache_shared_cpu_map_remove(unsigned int cpu) { + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); struct cacheinfo *this_leaf, *sib_leaf; unsigned int sibling, index, sib_index; @@ -419,6 +432,16 @@ static void cache_shared_cpu_map_remove(unsigned int cpu) for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) { sib_leaf = per_cpu_cacheinfo_idx(sibling, sib_index); + + /* + * Comparing cache IDs only makes sense if the leaves + * belong to the same cache level of same type. Skip + * the check if level and type do not match. + */ + if (sib_leaf->level != this_leaf->level || + sib_leaf->type != this_leaf->type) + continue; + if (cache_leaves_are_shared(this_leaf, sib_leaf)) { cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map); cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map); @@ -427,6 +450,9 @@ static void cache_shared_cpu_map_remove(unsigned int cpu) } } } + + /* cpu is no longer populated in the shared map */ + this_cpu_ci->cpu_map_populated = false; } static void free_cache_attributes(unsigned int cpu) diff --git a/drivers/base/class.c b/drivers/base/class.c index ac1808d1a2e8f0012beb816b97a7101b048b688c..05d9df90f621be89cdbd7d1ecee888c18a6b688b 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c @@ -320,6 +320,7 @@ void class_dev_iter_init(struct class_dev_iter *iter, const struct class *class, start_knode = &start->p->knode_class; klist_iter_init_node(&sp->klist_devices, &iter->ki, start_knode); iter->type = type; + iter->sp = sp; } EXPORT_SYMBOL_GPL(class_dev_iter_init); @@ -361,6 +362,7 @@ EXPORT_SYMBOL_GPL(class_dev_iter_next); void class_dev_iter_exit(struct class_dev_iter *iter) { klist_iter_exit(&iter->ki); + subsys_put(iter->sp); } EXPORT_SYMBOL_GPL(class_dev_iter_exit); diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c index 9d79d5ad9102417386de44356c0d6d2af45f8e47..b58c42f1b1ce658157c2c832bca8ea929286b139 100644 --- a/drivers/base/firmware_loader/main.c +++ b/drivers/base/firmware_loader/main.c @@ -812,7 +812,7 @@ static void fw_log_firmware_info(const struct firmware *fw, const char *name, st char *outbuf; alg = crypto_alloc_shash("sha256", 0, 0); - if (!alg) + if (IS_ERR(alg)) return; sha256buf = kmalloc(SHA256_DIGEST_SIZE, GFP_KERNEL); diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig index 33a8366e22a584a5681b9434cfac240e238d750c..0db2021f7477f2abd40ee69ff23dd759dbc6a1a7 100644 --- a/drivers/base/regmap/Kconfig +++ b/drivers/base/regmap/Kconfig @@ -4,16 +4,23 @@ # subsystems should select the appropriate symbols. config REGMAP + bool "Register Map support" if KUNIT_ALL_TESTS default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SOUNDWIRE || REGMAP_SOUNDWIRE_MBQ || REGMAP_SCCB || REGMAP_I3C || REGMAP_SPI_AVMM || REGMAP_MDIO || REGMAP_FSI) select IRQ_DOMAIN if REGMAP_IRQ select MDIO_BUS if REGMAP_MDIO - bool + help + Enable support for the Register Map (regmap) access API. + + Usually, this option is automatically selected when needed. + However, you may want to enable it manually for running the regmap + KUnit tests. + + If unsure, say N. config REGMAP_KUNIT tristate "KUnit tests for regmap" - depends on KUNIT + depends on KUNIT && REGMAP default KUNIT_ALL_TESTS - select REGMAP select REGMAP_RAM config REGMAP_AC97 diff --git a/drivers/base/regmap/regcache-maple.c b/drivers/base/regmap/regcache-maple.c index 9b1b559107ef4d8016536ab8ba589be4b02aa725..c2e3a0f6c2183bd2c3cc59561d162317dfea8d50 100644 --- a/drivers/base/regmap/regcache-maple.c +++ b/drivers/base/regmap/regcache-maple.c @@ -203,15 +203,18 @@ static int regcache_maple_sync(struct regmap *map, unsigned int min, mas_for_each(&mas, entry, max) { for (r = max(mas.index, lmin); r <= min(mas.last, lmax); r++) { + mas_pause(&mas); + rcu_read_unlock(); ret = regcache_sync_val(map, r, entry[r - mas.index]); if (ret != 0) goto out; + rcu_read_lock(); } } -out: rcu_read_unlock(); +out: map->cache_bypass = false; return ret; diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index 029564695dbbb1ba03964837efb4375eda41ea5b..97c681fcf9f6ee4cd4e5bb0c709fabb9faf07b50 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c @@ -284,6 +284,9 @@ static bool regcache_reg_needs_sync(struct regmap *map, unsigned int reg, { int ret; + if (!regmap_writeable(map, reg)) + return false; + /* If we don't know the chip just got reset, then sync everything. */ if (!map->no_sync_defaults) return true; diff --git a/drivers/base/regmap/regmap-sdw.c b/drivers/base/regmap/regmap-sdw.c index 09899ae99fc199b0585c7f417c1f76043dced7d0..159c0b740b001c18f1c9510d956c97316f9689b6 100644 --- a/drivers/base/regmap/regmap-sdw.c +++ b/drivers/base/regmap/regmap-sdw.c @@ -59,6 +59,10 @@ static int regmap_sdw_config_check(const struct regmap_config *config) if (config->pad_bits != 0) return -ENOTSUPP; + /* Only bulk writes are supported not multi-register writes */ + if (config->can_multi_write) + return -ENOTSUPP; + return 0; } diff --git a/drivers/base/regmap/regmap-spi-avmm.c b/drivers/base/regmap/regmap-spi-avmm.c index 4c2b94b3e30be588cac339d30a21b3b01ea879ff..6af692844c19681832b8580095d93301cd29d990 100644 --- a/drivers/base/regmap/regmap-spi-avmm.c +++ b/drivers/base/regmap/regmap-spi-avmm.c @@ -660,7 +660,7 @@ static const struct regmap_bus regmap_spi_avmm_bus = { .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, .val_format_endian_default = REGMAP_ENDIAN_NATIVE, .max_raw_read = SPI_AVMM_VAL_SIZE * MAX_READ_CNT, - .max_raw_write = SPI_AVMM_VAL_SIZE * MAX_WRITE_CNT, + .max_raw_write = SPI_AVMM_REG_SIZE + SPI_AVMM_VAL_SIZE * MAX_WRITE_CNT, .free_context = spi_avmm_bridge_ctx_free, }; diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index db7851f0e3b8cd7542b93a09d9a9224557868fd7..fa2d3fba6ac9d1d9df6f3a8ef63506c16f1e0cb0 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -2082,6 +2082,8 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg, size_t val_count = val_len / val_bytes; size_t chunk_count, chunk_bytes; size_t chunk_regs = val_count; + size_t max_data = map->max_raw_write - map->format.reg_bytes - + map->format.pad_bytes; int ret, i; if (!val_count) @@ -2089,8 +2091,8 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg, if (map->use_single_write) chunk_regs = 1; - else if (map->max_raw_write && val_len > map->max_raw_write) - chunk_regs = map->max_raw_write / val_bytes; + else if (map->max_raw_write && val_len > max_data) + chunk_regs = max_data / val_bytes; chunk_count = val_count / chunk_regs; chunk_bytes = chunk_regs * val_bytes; diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c index b3fedafe301e837206d6a0c2a22f40231dc3aeb5..864013019d6b6508d8750c7f19c10a11b710e9ca 100644 --- a/drivers/block/null_blk/main.c +++ b/drivers/block/null_blk/main.c @@ -2244,6 +2244,7 @@ static void null_destroy_dev(struct nullb *nullb) struct nullb_device *dev = nullb->dev; null_del_dev(nullb); + null_free_device_storage(dev, false); null_free_dev(dev); } diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 84ad3b17956f67ddd55508e6eb7d9b23c67ecb1e..632751ddb2870846926391f77986ca1e44a0a5bc 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1334,14 +1334,30 @@ static bool rbd_obj_is_tail(struct rbd_obj_request *obj_req) /* * Must be called after rbd_obj_calc_img_extents(). */ -static bool rbd_obj_copyup_enabled(struct rbd_obj_request *obj_req) +static void rbd_obj_set_copyup_enabled(struct rbd_obj_request *obj_req) { - if (!obj_req->num_img_extents || - (rbd_obj_is_entire(obj_req) && - !obj_req->img_request->snapc->num_snaps)) - return false; + rbd_assert(obj_req->img_request->snapc); - return true; + if (obj_req->img_request->op_type == OBJ_OP_DISCARD) { + dout("%s %p objno %llu discard\n", __func__, obj_req, + obj_req->ex.oe_objno); + return; + } + + if (!obj_req->num_img_extents) { + dout("%s %p objno %llu not overlapping\n", __func__, obj_req, + obj_req->ex.oe_objno); + return; + } + + if (rbd_obj_is_entire(obj_req) && + !obj_req->img_request->snapc->num_snaps) { + dout("%s %p objno %llu entire\n", __func__, obj_req, + obj_req->ex.oe_objno); + return; + } + + obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED; } static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req) @@ -1442,6 +1458,7 @@ __rbd_obj_add_osd_request(struct rbd_obj_request *obj_req, static struct ceph_osd_request * rbd_obj_add_osd_request(struct rbd_obj_request *obj_req, int num_ops) { + rbd_assert(obj_req->img_request->snapc); return __rbd_obj_add_osd_request(obj_req, obj_req->img_request->snapc, num_ops); } @@ -1578,15 +1595,18 @@ static void rbd_img_request_init(struct rbd_img_request *img_request, mutex_init(&img_request->state_mutex); } +/* + * Only snap_id is captured here, for reads. For writes, snapshot + * context is captured in rbd_img_object_requests() after exclusive + * lock is ensured to be held. + */ static void rbd_img_capture_header(struct rbd_img_request *img_req) { struct rbd_device *rbd_dev = img_req->rbd_dev; lockdep_assert_held(&rbd_dev->header_rwsem); - if (rbd_img_is_write(img_req)) - img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc); - else + if (!rbd_img_is_write(img_req)) img_req->snap_id = rbd_dev->spec->snap_id; if (rbd_dev_parent_get(rbd_dev)) @@ -2233,9 +2253,6 @@ static int rbd_obj_init_write(struct rbd_obj_request *obj_req) if (ret) return ret; - if (rbd_obj_copyup_enabled(obj_req)) - obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED; - obj_req->write_state = RBD_OBJ_WRITE_START; return 0; } @@ -2341,8 +2358,6 @@ static int rbd_obj_init_zeroout(struct rbd_obj_request *obj_req) if (ret) return ret; - if (rbd_obj_copyup_enabled(obj_req)) - obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED; if (!obj_req->num_img_extents) { obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT; if (rbd_obj_is_entire(obj_req)) @@ -3286,6 +3301,7 @@ again: case RBD_OBJ_WRITE_START: rbd_assert(!*result); + rbd_obj_set_copyup_enabled(obj_req); if (rbd_obj_write_is_noop(obj_req)) return true; @@ -3472,9 +3488,19 @@ static int rbd_img_exclusive_lock(struct rbd_img_request *img_req) static void rbd_img_object_requests(struct rbd_img_request *img_req) { + struct rbd_device *rbd_dev = img_req->rbd_dev; struct rbd_obj_request *obj_req; rbd_assert(!img_req->pending.result && !img_req->pending.num_pending); + rbd_assert(!need_exclusive_lock(img_req) || + __rbd_is_lock_owner(rbd_dev)); + + if (rbd_img_is_write(img_req)) { + rbd_assert(!img_req->snapc); + down_read(&rbd_dev->header_rwsem); + img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc); + up_read(&rbd_dev->header_rwsem); + } for_each_obj_request(img_req, obj_req) { int result = 0; @@ -3492,7 +3518,6 @@ static void rbd_img_object_requests(struct rbd_img_request *img_req) static bool rbd_img_advance(struct rbd_img_request *img_req, int *result) { - struct rbd_device *rbd_dev = img_req->rbd_dev; int ret; again: @@ -3513,9 +3538,6 @@ again: if (*result) return true; - rbd_assert(!need_exclusive_lock(img_req) || - __rbd_is_lock_owner(rbd_dev)); - rbd_img_object_requests(img_req); if (!img_req->pending.num_pending) { *result = img_req->pending.result; @@ -3977,6 +3999,10 @@ static int rbd_post_acquire_action(struct rbd_device *rbd_dev) { int ret; + ret = rbd_dev_refresh(rbd_dev); + if (ret) + return ret; + if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) { ret = rbd_object_map_open(rbd_dev); if (ret) diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index c7ed5d69e9eee2545c0f4458ce6f4a1847060cde..33d3298a0da16f1f70ecc87495f77746f2e6a4af 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -1120,6 +1120,11 @@ static inline bool ublk_queue_ready(struct ublk_queue *ubq) return ubq->nr_io_ready == ubq->q_depth; } +static void ublk_cmd_cancel_cb(struct io_uring_cmd *cmd, unsigned issue_flags) +{ + io_uring_cmd_done(cmd, UBLK_IO_RES_ABORT, 0, issue_flags); +} + static void ublk_cancel_queue(struct ublk_queue *ubq) { int i; @@ -1131,8 +1136,8 @@ static void ublk_cancel_queue(struct ublk_queue *ubq) struct ublk_io *io = &ubq->ios[i]; if (io->flags & UBLK_IO_FLAG_ACTIVE) - io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0, - IO_URING_F_UNLOCKED); + io_uring_cmd_complete_in_task(io->cmd, + ublk_cmd_cancel_cb); } /* all io commands are canceled */ diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 2b918e28acaacd973221e56344977053072f7f7d..b47358da92a231ee15bf5452c236df93452fe15d 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -348,63 +348,33 @@ static inline void virtblk_request_done(struct request *req) blk_mq_end_request(req, status); } -static void virtblk_complete_batch(struct io_comp_batch *iob) -{ - struct request *req; - - rq_list_for_each(&iob->req_list, req) { - virtblk_unmap_data(req, blk_mq_rq_to_pdu(req)); - virtblk_cleanup_cmd(req); - } - blk_mq_end_request_batch(iob); -} - -static int virtblk_handle_req(struct virtio_blk_vq *vq, - struct io_comp_batch *iob) -{ - struct virtblk_req *vbr; - int req_done = 0; - unsigned int len; - - while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) { - struct request *req = blk_mq_rq_from_pdu(vbr); - - if (likely(!blk_should_fake_timeout(req->q)) && - !blk_mq_complete_request_remote(req) && - !blk_mq_add_to_batch(req, iob, virtblk_vbr_status(vbr), - virtblk_complete_batch)) - virtblk_request_done(req); - req_done++; - } - - return req_done; -} - static void virtblk_done(struct virtqueue *vq) { struct virtio_blk *vblk = vq->vdev->priv; - struct virtio_blk_vq *vblk_vq = &vblk->vqs[vq->index]; - int req_done = 0; + bool req_done = false; + int qid = vq->index; + struct virtblk_req *vbr; unsigned long flags; - DEFINE_IO_COMP_BATCH(iob); + unsigned int len; - spin_lock_irqsave(&vblk_vq->lock, flags); + spin_lock_irqsave(&vblk->vqs[qid].lock, flags); do { virtqueue_disable_cb(vq); - req_done += virtblk_handle_req(vblk_vq, &iob); + while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { + struct request *req = blk_mq_rq_from_pdu(vbr); + if (likely(!blk_should_fake_timeout(req->q))) + blk_mq_complete_request(req); + req_done = true; + } if (unlikely(virtqueue_is_broken(vq))) break; } while (!virtqueue_enable_cb(vq)); - if (req_done) { - if (!rq_list_empty(iob.req_list)) - iob.complete(&iob); - - /* In case queue is stopped waiting for more buffers. */ + /* In case queue is stopped waiting for more buffers. */ + if (req_done) blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); - } - spin_unlock_irqrestore(&vblk_vq->lock, flags); + spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); } static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx) @@ -1283,15 +1253,37 @@ static void virtblk_map_queues(struct blk_mq_tag_set *set) } } +static void virtblk_complete_batch(struct io_comp_batch *iob) +{ + struct request *req; + + rq_list_for_each(&iob->req_list, req) { + virtblk_unmap_data(req, blk_mq_rq_to_pdu(req)); + virtblk_cleanup_cmd(req); + } + blk_mq_end_request_batch(iob); +} + static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) { struct virtio_blk *vblk = hctx->queue->queuedata; struct virtio_blk_vq *vq = get_virtio_blk_vq(hctx); + struct virtblk_req *vbr; unsigned long flags; + unsigned int len; int found = 0; spin_lock_irqsave(&vq->lock, flags); - found = virtblk_handle_req(vq, iob); + + while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) { + struct request *req = blk_mq_rq_from_pdu(vbr); + + found++; + if (!blk_mq_complete_request_remote(req) && + !blk_mq_add_to_batch(req, iob, virtblk_vbr_status(vbr), + virtblk_complete_batch)) + virtblk_request_done(req); + } if (found) blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 23ed258b57f0e585e122a60d6d4dba7cc2972f02..c1890c8a9f6e7640d874237a988437ff4aa264d7 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -780,7 +780,8 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri ring_req->u.rw.handle = info->handle; ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; - if (req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA) { + if (req_op(req) == REQ_OP_FLUSH || + (req_op(req) == REQ_OP_WRITE && (req->cmd_flags & REQ_FUA))) { /* * Ideally we can do an unordered flush-to-disk. * In case the backend onlysupports barriers, use that. diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c index 3a34d7c1475bceeeca3a3c5d10a10243d4d9e0f4..52ef44688d38a3685c7974feaa60af7ace37dac6 100644 --- a/drivers/bluetooth/btnxpuart.c +++ b/drivers/bluetooth/btnxpuart.c @@ -1319,17 +1319,17 @@ static void nxp_serdev_remove(struct serdev_device *serdev) hci_free_dev(hdev); } -static struct btnxpuart_data w8987_data = { +static struct btnxpuart_data w8987_data __maybe_unused = { .helper_fw_name = NULL, .fw_name = FIRMWARE_W8987, }; -static struct btnxpuart_data w8997_data = { +static struct btnxpuart_data w8997_data __maybe_unused = { .helper_fw_name = FIRMWARE_HELPER, .fw_name = FIRMWARE_W8997, }; -static const struct of_device_id nxpuart_of_match_table[] = { +static const struct of_device_id nxpuart_of_match_table[] __maybe_unused = { { .compatible = "nxp,88w8987-bt", .data = &w8987_data }, { .compatible = "nxp,88w8997-bt", .data = &w8997_data }, { } diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 1b064504b3887522f50ecc19b5d578863399baed..e30c979535b1dae8df06b4a4a158730bc83a9ba8 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -78,7 +78,8 @@ enum qca_flags { QCA_HW_ERROR_EVENT, QCA_SSR_TRIGGERED, QCA_BT_OFF, - QCA_ROM_FW + QCA_ROM_FW, + QCA_DEBUGFS_CREATED, }; enum qca_capabilities { @@ -635,6 +636,9 @@ static void qca_debugfs_init(struct hci_dev *hdev) if (!hdev->debugfs) return; + if (test_and_set_bit(QCA_DEBUGFS_CREATED, &qca->flags)) + return; + ibs_dir = debugfs_create_dir("ibs", hdev->debugfs); /* read only */ diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c index d68d05d5d38388523b0fecfdd45cb6e32488c784..514f9f287a781dd910de9da4034a6ea74529bc11 100644 --- a/drivers/char/agp/parisc-agp.c +++ b/drivers/char/agp/parisc-agp.c @@ -90,6 +90,9 @@ parisc_agp_tlbflush(struct agp_memory *mem) { struct _parisc_agp_info *info = &parisc_agp_info; + /* force fdc ops to be visible to IOMMU */ + asm_io_sync(); + writeq(info->gart_base | ilog2(info->gart_size), info->ioc_regs+IOC_PCOM); readq(info->ioc_regs+IOC_PCOM); /* flush */ } @@ -158,6 +161,7 @@ parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type) info->gatt[j] = parisc_agp_mask_memory(agp_bridge, paddr, type); + asm_io_fdc(&info->gatt[j]); } } @@ -191,7 +195,16 @@ static unsigned long parisc_agp_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr, int type) { - return SBA_PDIR_VALID_BIT | addr; + unsigned ci; /* coherent index */ + dma_addr_t pa; + + pa = addr & IOVP_MASK; + asm("lci 0(%1), %0" : "=r" (ci) : "r" (phys_to_virt(pa))); + + pa |= (ci >> PAGE_SHIFT) & 0xff;/* move CI (8 bits) into lowest byte */ + pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */ + + return cpu_to_le64(pa); } static void diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index c10a4aa9737366f949d3fb0602971ef85ae3d5a6..cd48033b804a3871529ce9d31acb34d7ec64819e 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -571,6 +571,10 @@ static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait) { struct tpm_chip *chip = container_of(rng, struct tpm_chip, hwrng); + /* Give back zero bytes, as TPM chip has not yet fully resumed: */ + if (chip->flags & TPM_CHIP_FLAG_SUSPENDED) + return 0; + return tpm_get_random(chip, data, max); } diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index 4463d00182909b37773101c0b826b95e4e92c8bf..586ca10b0d72e5a0ed6fb84171b8ba1754def0d8 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c @@ -412,6 +412,8 @@ int tpm_pm_suspend(struct device *dev) } suspended: + chip->flags |= TPM_CHIP_FLAG_SUSPENDED; + if (rc) dev_err(dev, "Ignoring error %d while suspending\n", rc); return 0; @@ -429,6 +431,14 @@ int tpm_pm_resume(struct device *dev) if (chip == NULL) return -ENODEV; + chip->flags &= ~TPM_CHIP_FLAG_SUSPENDED; + + /* + * Guarantee that SUSPENDED is written last, so that hwrng does not + * activate before the chip has been fully resumed. + */ + wmb(); + return 0; } EXPORT_SYMBOL_GPL(tpm_pm_resume); diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index 7af389806643379fa76507755550fdae92871de9..7db3593941eaa0bce84ac5b5c217b814e8ae2772 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -122,6 +122,29 @@ static const struct dmi_system_id tpm_tis_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T490s"), }, }, + { + .callback = tpm_tis_disable_irq, + .ident = "ThinkStation P360 Tiny", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkStation P360 Tiny"), + }, + }, + { + .callback = tpm_tis_disable_irq, + .ident = "ThinkPad L490", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L490"), + }, + }, + { + .callback = tpm_tis_disable_irq, + .ident = "UPX-TGL", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "AAEON"), + }, + }, {} }; diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index 02945d53fcefafae4e0c72ef3b2a4cf6a8cb85e5..558144fa707ae7ad0937d1ae2a16ac3afba7e310 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -1209,25 +1209,20 @@ static void tpm_tis_reenable_interrupts(struct tpm_chip *chip) u32 intmask; int rc; - if (chip->ops->clk_enable != NULL) - chip->ops->clk_enable(chip, true); - - /* reenable interrupts that device may have lost or - * BIOS/firmware may have disabled + /* + * Re-enable interrupts that device may have lost or BIOS/firmware may + * have disabled. */ rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), priv->irq); - if (rc < 0) - goto out; + if (rc < 0) { + dev_err(&chip->dev, "Setting IRQ failed.\n"); + return; + } intmask = priv->int_mask | TPM_GLOBAL_INT_ENABLE; - - tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask); - -out: - if (chip->ops->clk_enable != NULL) - chip->ops->clk_enable(chip, false); - - return; + rc = tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask); + if (rc < 0) + dev_err(&chip->dev, "Enabling interrupts failed.\n"); } int tpm_tis_resume(struct device *dev) @@ -1235,27 +1230,27 @@ int tpm_tis_resume(struct device *dev) struct tpm_chip *chip = dev_get_drvdata(dev); int ret; - ret = tpm_tis_request_locality(chip, 0); - if (ret < 0) + ret = tpm_chip_start(chip); + if (ret) return ret; if (chip->flags & TPM_CHIP_FLAG_IRQ) tpm_tis_reenable_interrupts(chip); - ret = tpm_pm_resume(dev); - if (ret) - goto out; - /* * TPM 1.2 requires self-test on resume. This function actually returns * an error code but for unknown reason it isn't handled. */ if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) tpm1_do_selftest(chip); -out: - tpm_tis_relinquish_locality(chip, 0); - return ret; + tpm_chip_stop(chip); + + ret = tpm_pm_resume(dev); + if (ret) + return ret; + + return 0; } EXPORT_SYMBOL_GPL(tpm_tis_resume); #endif diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h index e978f457fd4d4ce083a7a9f9689df5f2d7a8ed9d..610bfadb6acf1c1731575842eca18f1d8c6ea395 100644 --- a/drivers/char/tpm/tpm_tis_core.h +++ b/drivers/char/tpm/tpm_tis_core.h @@ -84,10 +84,10 @@ enum tis_defaults { #define ILB_REMAP_SIZE 0x100 enum tpm_tis_flags { - TPM_TIS_ITPM_WORKAROUND = BIT(0), - TPM_TIS_INVALID_STATUS = BIT(1), - TPM_TIS_DEFAULT_CANCELLATION = BIT(2), - TPM_TIS_IRQ_TESTED = BIT(3), + TPM_TIS_ITPM_WORKAROUND = 0, + TPM_TIS_INVALID_STATUS = 1, + TPM_TIS_DEFAULT_CANCELLATION = 2, + TPM_TIS_IRQ_TESTED = 3, }; struct tpm_tis_data { diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c index edfa94641bbfed8f0a9d4e575f73c82b21598c2b..66759fe28fad67f98b6c723ce70849490a022e26 100644 --- a/drivers/clk/clk-composite.c +++ b/drivers/clk/clk-composite.c @@ -119,7 +119,10 @@ static int clk_composite_determine_rate(struct clk_hw *hw, if (ret) continue; - rate_diff = abs(req->rate - tmp_req.rate); + if (req->rate >= tmp_req.rate) + rate_diff = req->rate - tmp_req.rate; + else + rate_diff = tmp_req.rate - req->rate; if (!rate_diff || !req->best_parent_hw || best_rate_diff > rate_diff) { diff --git a/drivers/clk/clk-loongson2.c b/drivers/clk/clk-loongson2.c index 70ae1dd2e474cc401a270a8df3b26b4b63633f25..bacdcbb287ac618b57c236b1103ebbe2c31e7107 100644 --- a/drivers/clk/clk-loongson2.c +++ b/drivers/clk/clk-loongson2.c @@ -40,7 +40,7 @@ static struct clk_hw *loongson2_clk_register(struct device *dev, { int ret; struct clk_hw *hw; - struct clk_init_data init; + struct clk_init_data init = { }; hw = devm_kzalloc(dev, sizeof(*hw), GFP_KERNEL); if (!hw) diff --git a/drivers/clk/mediatek/clk-mt8365.c b/drivers/clk/mediatek/clk-mt8365.c index 6b4e193f648d81e3181abaef3fc80a7929ce8c14..c87a6c4a79678c1e29c2bd8e67eb984313f484f0 100644 --- a/drivers/clk/mediatek/clk-mt8365.c +++ b/drivers/clk/mediatek/clk-mt8365.c @@ -23,6 +23,7 @@ static DEFINE_SPINLOCK(mt8365_clk_lock); static const struct mtk_fixed_clk top_fixed_clks[] = { + FIXED_CLK(CLK_TOP_CLK_NULL, "clk_null", NULL, 0), FIXED_CLK(CLK_TOP_I2S0_BCK, "i2s0_bck", NULL, 26000000), FIXED_CLK(CLK_TOP_DSI0_LNTC_DSICK, "dsi0_lntc_dsick", "clk26m", 75000000), @@ -559,6 +560,14 @@ static const struct mtk_clk_divider top_adj_divs[] = { 0x324, 16, 8, CLK_DIVIDER_ROUND_CLOSEST), DIV_ADJ_F(CLK_TOP_APLL12_CK_DIV3, "apll12_ck_div3", "apll_i2s3_sel", 0x324, 24, 8, CLK_DIVIDER_ROUND_CLOSEST), + DIV_ADJ_F(CLK_TOP_APLL12_CK_DIV4, "apll12_ck_div4", "apll_tdmout_sel", + 0x328, 0, 8, CLK_DIVIDER_ROUND_CLOSEST), + DIV_ADJ_F(CLK_TOP_APLL12_CK_DIV4B, "apll12_ck_div4b", "apll_tdmout_sel", + 0x328, 8, 8, CLK_DIVIDER_ROUND_CLOSEST), + DIV_ADJ_F(CLK_TOP_APLL12_CK_DIV5, "apll12_ck_div5", "apll_tdmin_sel", + 0x328, 16, 8, CLK_DIVIDER_ROUND_CLOSEST), + DIV_ADJ_F(CLK_TOP_APLL12_CK_DIV5B, "apll12_ck_div5b", "apll_tdmin_sel", + 0x328, 24, 8, CLK_DIVIDER_ROUND_CLOSEST), DIV_ADJ_F(CLK_TOP_APLL12_CK_DIV6, "apll12_ck_div6", "apll_spdif_sel", 0x32c, 0, 8, CLK_DIVIDER_ROUND_CLOSEST), }; @@ -583,15 +592,15 @@ static const struct mtk_gate_regs top2_cg_regs = { #define GATE_TOP0(_id, _name, _parent, _shift) \ GATE_MTK(_id, _name, _parent, &top0_cg_regs, \ - _shift, &mtk_clk_gate_ops_no_setclr_inv) + _shift, &mtk_clk_gate_ops_no_setclr) #define GATE_TOP1(_id, _name, _parent, _shift) \ GATE_MTK(_id, _name, _parent, &top1_cg_regs, \ - _shift, &mtk_clk_gate_ops_no_setclr) + _shift, &mtk_clk_gate_ops_no_setclr_inv) #define GATE_TOP2(_id, _name, _parent, _shift) \ GATE_MTK(_id, _name, _parent, &top2_cg_regs, \ - _shift, &mtk_clk_gate_ops_no_setclr) + _shift, &mtk_clk_gate_ops_no_setclr_inv) static const struct mtk_gate top_clk_gates[] = { GATE_TOP0(CLK_TOP_CONN_32K, "conn_32k", "clk32k", 10), @@ -696,6 +705,7 @@ static const struct mtk_gate ifr_clks[] = { GATE_IFR3(CLK_IFR_GCPU, "ifr_gcpu", "axi_sel", 8), GATE_IFR3(CLK_IFR_TRNG, "ifr_trng", "axi_sel", 9), GATE_IFR3(CLK_IFR_AUXADC, "ifr_auxadc", "clk26m", 10), + GATE_IFR3(CLK_IFR_CPUM, "ifr_cpum", "clk26m", 11), GATE_IFR3(CLK_IFR_AUXADC_MD, "ifr_auxadc_md", "clk26m", 14), GATE_IFR3(CLK_IFR_AP_DMA, "ifr_ap_dma", "axi_sel", 18), GATE_IFR3(CLK_IFR_DEBUGSYS, "ifr_debugsys", "axi_sel", 24), @@ -717,6 +727,8 @@ static const struct mtk_gate ifr_clks[] = { GATE_IFR5(CLK_IFR_PWRAP_TMR, "ifr_pwrap_tmr", "clk26m", 12), GATE_IFR5(CLK_IFR_PWRAP_SPI, "ifr_pwrap_spi", "clk26m", 13), GATE_IFR5(CLK_IFR_PWRAP_SYS, "ifr_pwrap_sys", "clk26m", 14), + GATE_MTK_FLAGS(CLK_IFR_MCU_PM_BK, "ifr_mcu_pm_bk", NULL, &ifr5_cg_regs, + 17, &mtk_clk_gate_ops_setclr, CLK_IGNORE_UNUSED), GATE_IFR5(CLK_IFR_IRRX_26M, "ifr_irrx_26m", "clk26m", 22), GATE_IFR5(CLK_IFR_IRRX_32K, "ifr_irrx_32k", "clk32k", 23), GATE_IFR5(CLK_IFR_I2C0_AXI, "ifr_i2c0_axi", "i2c_sel", 24), diff --git a/drivers/clk/pxa/clk-pxa3xx.c b/drivers/clk/pxa/clk-pxa3xx.c index 42958a5426625a85af8935484bb13bd0bb576b20..621e298f101a039bcc99f14f42288951b55dae52 100644 --- a/drivers/clk/pxa/clk-pxa3xx.c +++ b/drivers/clk/pxa/clk-pxa3xx.c @@ -164,7 +164,7 @@ void pxa3xx_clk_update_accr(u32 disable, u32 enable, u32 xclkcfg, u32 mask) accr &= ~disable; accr |= enable; - writel(accr, ACCR); + writel(accr, clk_regs + ACCR); if (xclkcfg) __asm__("mcr p14, 0, %0, c6, c0, 0\n" : : "r"(xclkcfg)); diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 29904395e95f9ca3b6c24d845584a5da141a99b0..b2f05d27167e393c92b63bda89b17a9b48cad923 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -975,7 +975,7 @@ static int __init acpi_cpufreq_probe(struct platform_device *pdev) /* don't keep reloading if cpufreq_driver exists */ if (cpufreq_get_current_driver()) - return -EEXIST; + return -ENODEV; pr_debug("%s\n", __func__); diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index 5a3d4aa0f45a6a18e1abb3cceb6c972c4b3f3e19..ddd346a239e0b55acfaef158cb12ea1a8612f948 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -444,9 +444,8 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy) return 0; } -static int amd_pstate_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) +static int amd_pstate_update_freq(struct cpufreq_policy *policy, + unsigned int target_freq, bool fast_switch) { struct cpufreq_freqs freqs; struct amd_cpudata *cpudata = policy->driver_data; @@ -465,26 +464,51 @@ static int amd_pstate_target(struct cpufreq_policy *policy, des_perf = DIV_ROUND_CLOSEST(target_freq * cap_perf, cpudata->max_freq); - cpufreq_freq_transition_begin(policy, &freqs); + WARN_ON(fast_switch && !policy->fast_switch_enabled); + /* + * If fast_switch is desired, then there aren't any registered + * transition notifiers. See comment for + * cpufreq_enable_fast_switch(). + */ + if (!fast_switch) + cpufreq_freq_transition_begin(policy, &freqs); + amd_pstate_update(cpudata, min_perf, des_perf, - max_perf, false, policy->governor->flags); - cpufreq_freq_transition_end(policy, &freqs, false); + max_perf, fast_switch, policy->governor->flags); + + if (!fast_switch) + cpufreq_freq_transition_end(policy, &freqs, false); return 0; } +static int amd_pstate_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + return amd_pstate_update_freq(policy, target_freq, false); +} + +static unsigned int amd_pstate_fast_switch(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + return amd_pstate_update_freq(policy, target_freq, true); +} + static void amd_pstate_adjust_perf(unsigned int cpu, unsigned long _min_perf, unsigned long target_perf, unsigned long capacity) { unsigned long max_perf, min_perf, des_perf, - cap_perf, lowest_nonlinear_perf; + cap_perf, lowest_nonlinear_perf, max_freq; struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); struct amd_cpudata *cpudata = policy->driver_data; + unsigned int target_freq; cap_perf = READ_ONCE(cpudata->highest_perf); lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf); + max_freq = READ_ONCE(cpudata->max_freq); des_perf = cap_perf; if (target_perf < capacity) @@ -501,6 +525,10 @@ static void amd_pstate_adjust_perf(unsigned int cpu, if (max_perf < min_perf) max_perf = min_perf; + des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf); + target_freq = div_u64(des_perf * max_freq, max_perf); + policy->cur = target_freq; + amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true, policy->governor->flags); cpufreq_cpu_put(policy); @@ -715,6 +743,7 @@ static int amd_pstate_cpu_exit(struct cpufreq_policy *policy) freq_qos_remove_request(&cpudata->req[1]); freq_qos_remove_request(&cpudata->req[0]); + policy->fast_switch_possible = false; kfree(cpudata); return 0; @@ -1079,7 +1108,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) policy->policy = CPUFREQ_POLICY_POWERSAVE; if (boot_cpu_has(X86_FEATURE_CPPC)) { - policy->fast_switch_possible = true; ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value); if (ret) return ret; @@ -1102,7 +1130,6 @@ free_cpudata1: static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy) { pr_debug("CPU %d exiting\n", policy->cpu); - policy->fast_switch_possible = false; return 0; } @@ -1309,6 +1336,7 @@ static struct cpufreq_driver amd_pstate_driver = { .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS, .verify = amd_pstate_verify, .target = amd_pstate_target, + .fast_switch = amd_pstate_fast_switch, .init = amd_pstate_cpu_init, .exit = amd_pstate_cpu_exit, .suspend = amd_pstate_cpu_suspend, diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index 1d2cfea9858afc121d8ecf4048d5bf2bfc9afe4a..73efbcf5513b2cde60faa7d3223479c1c5efd1bd 100644 --- a/drivers/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c @@ -583,7 +583,7 @@ static int __init pcc_cpufreq_probe(struct platform_device *pdev) /* Skip initialization if another cpufreq driver is there. */ if (cpufreq_get_current_driver()) - return -EEXIST; + return -ENODEV; if (acpi_disabled) return -ENODEV; diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index 23b9ff920d7e85c8f347fc11bb8fe738635c31a4..bea9cf31a12df9a3f258e47e98ca5af3582ea856 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -1028,7 +1028,7 @@ static int cxl_mem_get_partition_info(struct cxl_dev_state *cxlds) * cxl_dev_state_identify() - Send the IDENTIFY command to the device. * @cxlds: The device data for the operation * - * Return: 0 if identify was executed successfully. + * Return: 0 if identify was executed successfully or media not ready. * * This will dispatch the identify command to the device and on success populate * structures to be exported to sysfs. @@ -1041,6 +1041,9 @@ int cxl_dev_state_identify(struct cxl_dev_state *cxlds) u32 val; int rc; + if (!cxlds->media_ready) + return 0; + mbox_cmd = (struct cxl_mbox_cmd) { .opcode = CXL_MBOX_OP_IDENTIFY, .size_out = sizeof(id), @@ -1102,6 +1105,13 @@ int cxl_mem_create_range_info(struct cxl_dev_state *cxlds) struct device *dev = cxlds->dev; int rc; + if (!cxlds->media_ready) { + cxlds->dpa_res = DEFINE_RES_MEM(0, 0); + cxlds->ram_res = DEFINE_RES_MEM(0, 0); + cxlds->pmem_res = DEFINE_RES_MEM(0, 0); + return 0; + } + cxlds->dpa_res = (struct resource)DEFINE_RES_MEM(0, cxlds->total_bytes); diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index f332fe7af92ba3f86833ca9f3517723b855e53a2..67f4ab6daa34f0144380c27d41d9ca252c52ef04 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -101,23 +101,57 @@ int devm_cxl_port_enumerate_dports(struct cxl_port *port) } EXPORT_SYMBOL_NS_GPL(devm_cxl_port_enumerate_dports, CXL); -/* - * Wait up to @media_ready_timeout for the device to report memory - * active. - */ -int cxl_await_media_ready(struct cxl_dev_state *cxlds) +static int cxl_dvsec_mem_range_valid(struct cxl_dev_state *cxlds, int id) +{ + struct pci_dev *pdev = to_pci_dev(cxlds->dev); + int d = cxlds->cxl_dvsec; + bool valid = false; + int rc, i; + u32 temp; + + if (id > CXL_DVSEC_RANGE_MAX) + return -EINVAL; + + /* Check MEM INFO VALID bit first, give up after 1s */ + i = 1; + do { + rc = pci_read_config_dword(pdev, + d + CXL_DVSEC_RANGE_SIZE_LOW(id), + &temp); + if (rc) + return rc; + + valid = FIELD_GET(CXL_DVSEC_MEM_INFO_VALID, temp); + if (valid) + break; + msleep(1000); + } while (i--); + + if (!valid) { + dev_err(&pdev->dev, + "Timeout awaiting memory range %d valid after 1s.\n", + id); + return -ETIMEDOUT; + } + + return 0; +} + +static int cxl_dvsec_mem_range_active(struct cxl_dev_state *cxlds, int id) { struct pci_dev *pdev = to_pci_dev(cxlds->dev); int d = cxlds->cxl_dvsec; bool active = false; - u64 md_status; int rc, i; + u32 temp; - for (i = media_ready_timeout; i; i--) { - u32 temp; + if (id > CXL_DVSEC_RANGE_MAX) + return -EINVAL; + /* Check MEM ACTIVE bit, up to 60s timeout by default */ + for (i = media_ready_timeout; i; i--) { rc = pci_read_config_dword( - pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &temp); + pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(id), &temp); if (rc) return rc; @@ -134,6 +168,39 @@ int cxl_await_media_ready(struct cxl_dev_state *cxlds) return -ETIMEDOUT; } + return 0; +} + +/* + * Wait up to @media_ready_timeout for the device to report memory + * active. + */ +int cxl_await_media_ready(struct cxl_dev_state *cxlds) +{ + struct pci_dev *pdev = to_pci_dev(cxlds->dev); + int d = cxlds->cxl_dvsec; + int rc, i, hdm_count; + u64 md_status; + u16 cap; + + rc = pci_read_config_word(pdev, + d + CXL_DVSEC_CAP_OFFSET, &cap); + if (rc) + return rc; + + hdm_count = FIELD_GET(CXL_DVSEC_HDM_COUNT_MASK, cap); + for (i = 0; i < hdm_count; i++) { + rc = cxl_dvsec_mem_range_valid(cxlds, i); + if (rc) + return rc; + } + + for (i = 0; i < hdm_count; i++) { + rc = cxl_dvsec_mem_range_active(cxlds, i); + if (rc) + return rc; + } + md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET); if (!CXLMDEV_READY(md_status)) return -EIO; @@ -241,17 +308,36 @@ static void disable_hdm(void *_cxlhdm) hdm + CXL_HDM_DECODER_CTRL_OFFSET); } -static int devm_cxl_enable_hdm(struct device *host, struct cxl_hdm *cxlhdm) +int devm_cxl_enable_hdm(struct cxl_port *port, struct cxl_hdm *cxlhdm) { - void __iomem *hdm = cxlhdm->regs.hdm_decoder; + void __iomem *hdm; u32 global_ctrl; + /* + * If the hdm capability was not mapped there is nothing to enable and + * the caller is responsible for what happens next. For example, + * emulate a passthrough decoder. + */ + if (IS_ERR(cxlhdm)) + return 0; + + hdm = cxlhdm->regs.hdm_decoder; global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET); + + /* + * If the HDM decoder capability was enabled on entry, skip + * registering disable_hdm() since this decode capability may be + * owned by platform firmware. + */ + if (global_ctrl & CXL_HDM_DECODER_ENABLE) + return 0; + writel(global_ctrl | CXL_HDM_DECODER_ENABLE, hdm + CXL_HDM_DECODER_CTRL_OFFSET); - return devm_add_action_or_reset(host, disable_hdm, cxlhdm); + return devm_add_action_or_reset(&port->dev, disable_hdm, cxlhdm); } +EXPORT_SYMBOL_NS_GPL(devm_cxl_enable_hdm, CXL); int cxl_dvsec_rr_decode(struct device *dev, int d, struct cxl_endpoint_dvsec_info *info) @@ -425,7 +511,7 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm, if (info->mem_enabled) return 0; - rc = devm_cxl_enable_hdm(&port->dev, cxlhdm); + rc = devm_cxl_enable_hdm(port, cxlhdm); if (rc) return rc; diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c index da2068475fa26c5b4cea086e8433bcec77299f07..e7c284c890bc111eeefd19e369e996b4b6966f3f 100644 --- a/drivers/cxl/core/port.c +++ b/drivers/cxl/core/port.c @@ -750,11 +750,10 @@ struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport, parent_port = parent_dport ? parent_dport->port : NULL; if (IS_ERR(port)) { - dev_dbg(uport, "Failed to add %s%s%s%s: %ld\n", - dev_name(&port->dev), - parent_port ? " to " : "", + dev_dbg(uport, "Failed to add%s%s%s: %ld\n", + parent_port ? " port to " : "", parent_port ? dev_name(&parent_port->dev) : "", - parent_port ? "" : " (root port)", + parent_port ? "" : " root port", PTR_ERR(port)); } else { dev_dbg(uport, "%s added%s%s%s\n", diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index 044a92d9813e239d1b97e7a4935f36850c30e154..f93a285389621cc77e86ae792c095b1671f30c85 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -710,6 +710,7 @@ struct cxl_endpoint_dvsec_info { struct cxl_hdm; struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port, struct cxl_endpoint_dvsec_info *info); +int devm_cxl_enable_hdm(struct cxl_port *port, struct cxl_hdm *cxlhdm); int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm, struct cxl_endpoint_dvsec_info *info); int devm_cxl_add_passthrough_decoder(struct cxl_port *port); diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index db12b6313afbf734fba7c38302a28c56c44950a1..a2845a7a69d820d9b4f04ae6a681839728d0f31c 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -266,6 +266,7 @@ struct cxl_poison_state { * @regs: Parsed register blocks * @cxl_dvsec: Offset to the PCIe device DVSEC * @rcd: operating in RCD mode (CXL 3.0 9.11.8 CXL Devices Attached to an RCH) + * @media_ready: Indicate whether the device media is usable * @payload_size: Size of space for payload * (CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register) * @lsa_size: Size of Label Storage Area @@ -303,6 +304,7 @@ struct cxl_dev_state { int cxl_dvsec; bool rcd; + bool media_ready; size_t payload_size; size_t lsa_size; struct mutex mbox_mutex; /* Protects device mailbox and firmware */ diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h index 0465ef963cd6a0b23dae3cfb7177d9a033dcde18..7c02e55b80429a9390d3322407701033bb6c01ce 100644 --- a/drivers/cxl/cxlpci.h +++ b/drivers/cxl/cxlpci.h @@ -31,6 +31,8 @@ #define CXL_DVSEC_RANGE_BASE_LOW(i) (0x24 + (i * 0x10)) #define CXL_DVSEC_MEM_BASE_LOW_MASK GENMASK(31, 28) +#define CXL_DVSEC_RANGE_MAX 2 + /* CXL 2.0 8.1.4: Non-CXL Function Map DVSEC */ #define CXL_DVSEC_FUNCTION_MAP 2 diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c index 10caf180b3fa0bff3e1363c269ad4fe77cc7cff8..519edd0eb1967d8c0d0ec7bb5e934b267e37c1ff 100644 --- a/drivers/cxl/mem.c +++ b/drivers/cxl/mem.c @@ -124,6 +124,9 @@ static int cxl_mem_probe(struct device *dev) struct dentry *dentry; int rc; + if (!cxlds->media_ready) + return -EBUSY; + /* * Someone is trying to reattach this device after it lost its port * connection (an endpoint port previously registered by this memdev was diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index f7a5b8e9c102411ca5bfde2f1995488498aa7e91..0872f2233ed0c88aa9fec7e9e93935dfa1f0e740 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -708,6 +708,12 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (rc) dev_dbg(&pdev->dev, "Failed to map RAS capability.\n"); + rc = cxl_await_media_ready(cxlds); + if (rc == 0) + cxlds->media_ready = true; + else + dev_warn(&pdev->dev, "Media not active (%d)\n", rc); + rc = cxl_pci_setup_mailbox(cxlds); if (rc) return rc; diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c index eb57324c4ad4a0e54c9a35500df9c3a216f603a2..c23b6164e1c0fa5c1e9a9c671c2e1d29da604c69 100644 --- a/drivers/cxl/port.c +++ b/drivers/cxl/port.c @@ -60,13 +60,17 @@ static int discover_region(struct device *dev, void *root) static int cxl_switch_port_probe(struct cxl_port *port) { struct cxl_hdm *cxlhdm; - int rc; + int rc, nr_dports; - rc = devm_cxl_port_enumerate_dports(port); - if (rc < 0) - return rc; + nr_dports = devm_cxl_port_enumerate_dports(port); + if (nr_dports < 0) + return nr_dports; cxlhdm = devm_cxl_setup_hdm(port, NULL); + rc = devm_cxl_enable_hdm(port, cxlhdm); + if (rc) + return rc; + if (!IS_ERR(cxlhdm)) return devm_cxl_enumerate_decoders(cxlhdm, NULL); @@ -75,7 +79,7 @@ static int cxl_switch_port_probe(struct cxl_port *port) return PTR_ERR(cxlhdm); } - if (rc == 1) { + if (nr_dports == 1) { dev_dbg(&port->dev, "Fallback to passthrough decoder\n"); return devm_cxl_add_passthrough_decoder(port); } @@ -113,12 +117,6 @@ static int cxl_endpoint_port_probe(struct cxl_port *port) if (rc) return rc; - rc = cxl_await_media_ready(cxlds); - if (rc) { - dev_err(&port->dev, "Media not active (%d)\n", rc); - return rc; - } - rc = devm_cxl_enumerate_decoders(cxlhdm, &info); if (rc) return rc; diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index 01f2e86f3f7ce69f3e1aab1c336173a1b6de9c9d..12cf6bb2e3ce370043968935728d25b371894862 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include @@ -207,9 +206,7 @@ static long udmabuf_create(struct miscdevice *device, struct udmabuf *ubuf; struct dma_buf *buf; pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit; - struct page *page, *hpage = NULL; - pgoff_t subpgoff, maxsubpgs; - struct hstate *hpstate; + struct page *page; int seals, ret = -EINVAL; u32 i, flags; @@ -245,7 +242,7 @@ static long udmabuf_create(struct miscdevice *device, if (!memfd) goto err; mapping = memfd->f_mapping; - if (!shmem_mapping(mapping) && !is_file_hugepages(memfd)) + if (!shmem_mapping(mapping)) goto err; seals = memfd_fcntl(memfd, F_GET_SEALS, 0); if (seals == -EINVAL) @@ -256,48 +253,16 @@ static long udmabuf_create(struct miscdevice *device, goto err; pgoff = list[i].offset >> PAGE_SHIFT; pgcnt = list[i].size >> PAGE_SHIFT; - if (is_file_hugepages(memfd)) { - hpstate = hstate_file(memfd); - pgoff = list[i].offset >> huge_page_shift(hpstate); - subpgoff = (list[i].offset & - ~huge_page_mask(hpstate)) >> PAGE_SHIFT; - maxsubpgs = huge_page_size(hpstate) >> PAGE_SHIFT; - } for (pgidx = 0; pgidx < pgcnt; pgidx++) { - if (is_file_hugepages(memfd)) { - if (!hpage) { - hpage = find_get_page_flags(mapping, pgoff, - FGP_ACCESSED); - if (!hpage) { - ret = -EINVAL; - goto err; - } - } - page = hpage + subpgoff; - get_page(page); - subpgoff++; - if (subpgoff == maxsubpgs) { - put_page(hpage); - hpage = NULL; - subpgoff = 0; - pgoff++; - } - } else { - page = shmem_read_mapping_page(mapping, - pgoff + pgidx); - if (IS_ERR(page)) { - ret = PTR_ERR(page); - goto err; - } + page = shmem_read_mapping_page(mapping, pgoff + pgidx); + if (IS_ERR(page)) { + ret = PTR_ERR(page); + goto err; } ubuf->pages[pgbuf++] = page; } fput(memfd); memfd = NULL; - if (hpage) { - put_page(hpage); - hpage = NULL; - } } exp_info.ops = &udmabuf_ops; diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 8858470246e1552e5e278a2b254f8ae874df894b..ee3a219e3a897c56ac69d7a902ef90ccc0262822 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -132,7 +132,7 @@ #define ATC_DST_PIP BIT(12) /* Destination Picture-in-Picture enabled */ #define ATC_SRC_DSCR_DIS BIT(16) /* Src Descriptor fetch disable */ #define ATC_DST_DSCR_DIS BIT(20) /* Dst Descriptor fetch disable */ -#define ATC_FC GENMASK(22, 21) /* Choose Flow Controller */ +#define ATC_FC GENMASK(23, 21) /* Choose Flow Controller */ #define ATC_FC_MEM2MEM 0x0 /* Mem-to-Mem (DMA) */ #define ATC_FC_MEM2PER 0x1 /* Mem-to-Periph (DMA) */ #define ATC_FC_PER2MEM 0x2 /* Periph-to-Mem (DMA) */ @@ -153,8 +153,6 @@ #define ATC_AUTO BIT(31) /* Auto multiple buffer tx enable */ /* Bitfields in CFG */ -#define ATC_PER_MSB(h) ((0x30U & (h)) >> 4) /* Extract most significant bits of a handshaking identifier */ - #define ATC_SRC_PER GENMASK(3, 0) /* Channel src rq associated with periph handshaking ifc h */ #define ATC_DST_PER GENMASK(7, 4) /* Channel dst rq associated with periph handshaking ifc h */ #define ATC_SRC_REP BIT(8) /* Source Replay Mod */ @@ -181,10 +179,15 @@ #define ATC_DPIP_HOLE GENMASK(15, 0) #define ATC_DPIP_BOUNDARY GENMASK(25, 16) -#define ATC_SRC_PER_ID(id) (FIELD_PREP(ATC_SRC_PER_MSB, (id)) | \ - FIELD_PREP(ATC_SRC_PER, (id))) -#define ATC_DST_PER_ID(id) (FIELD_PREP(ATC_DST_PER_MSB, (id)) | \ - FIELD_PREP(ATC_DST_PER, (id))) +#define ATC_PER_MSB GENMASK(5, 4) /* Extract MSBs of a handshaking identifier */ +#define ATC_SRC_PER_ID(id) \ + ({ typeof(id) _id = (id); \ + FIELD_PREP(ATC_SRC_PER_MSB, FIELD_GET(ATC_PER_MSB, _id)) | \ + FIELD_PREP(ATC_SRC_PER, _id); }) +#define ATC_DST_PER_ID(id) \ + ({ typeof(id) _id = (id); \ + FIELD_PREP(ATC_DST_PER_MSB, FIELD_GET(ATC_PER_MSB, _id)) | \ + FIELD_PREP(ATC_DST_PER, _id); }) diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 7da6d9b6098e719e531052c9aa938b669dfc3149..c3b37168b21f166e9690076a1692d965392b37f5 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -1102,6 +1102,8 @@ at_xdmac_prep_interleaved(struct dma_chan *chan, NULL, src_addr, dst_addr, xt, xt->sgl); + if (!first) + return NULL; /* Length of the block is (BLEN+1) microblocks. */ for (i = 0; i < xt->numf - 1; i++) @@ -1132,8 +1134,9 @@ at_xdmac_prep_interleaved(struct dma_chan *chan, src_addr, dst_addr, xt, chunk); if (!desc) { - list_splice_tail_init(&first->descs_list, - &atchan->free_descs_list); + if (first) + list_splice_tail_init(&first->descs_list, + &atchan->free_descs_list); return NULL; } diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c index ecbf67c2ad2b07d51ec6b05764be8dc4c51068db..d32deb9b4e3dee466fd056230596aa42198d8285 100644 --- a/drivers/dma/idxd/cdev.c +++ b/drivers/dma/idxd/cdev.c @@ -277,7 +277,6 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp) if (wq_dedicated(wq)) { rc = idxd_wq_set_pasid(wq, pasid); if (rc < 0) { - iommu_sva_unbind_device(sva); dev_err(dev, "wq set pasid failed: %d\n", rc); goto failed_set_pasid; } diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 0d9257fbdfb0d90d902761406de722eff92b1888..b4731fe6bbc14fc7360723726b535a6194383429 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -1050,7 +1050,7 @@ static bool _trigger(struct pl330_thread *thrd) return true; } -static bool _start(struct pl330_thread *thrd) +static bool pl330_start_thread(struct pl330_thread *thrd) { switch (_state(thrd)) { case PL330_STATE_FAULT_COMPLETING: @@ -1702,7 +1702,7 @@ static int pl330_update(struct pl330_dmac *pl330) thrd->req_running = -1; /* Get going again ASAP */ - _start(thrd); + pl330_start_thread(thrd); /* For now, just make a list of callbacks to be done */ list_add_tail(&descdone->rqd, &pl330->req_done); @@ -2089,7 +2089,7 @@ static void pl330_tasklet(struct tasklet_struct *t) } else { /* Make sure the PL330 Channel thread is active */ spin_lock(&pch->thread->dmac->lock); - _start(pch->thread); + pl330_start_thread(pch->thread); spin_unlock(&pch->thread->dmac->lock); } @@ -2107,7 +2107,7 @@ static void pl330_tasklet(struct tasklet_struct *t) if (power_down) { pch->active = true; spin_lock(&pch->thread->dmac->lock); - _start(pch->thread); + pl330_start_thread(pch->thread); spin_unlock(&pch->thread->dmac->lock); power_down = false; } diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index fc3a2a05ab7b70f33ab2c81e618a245941e1222a..b8329a23728d73f2a8a29e1c3965181b2712d635 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -5527,7 +5527,7 @@ static int udma_probe(struct platform_device *pdev) return ret; } -static int udma_pm_suspend(struct device *dev) +static int __maybe_unused udma_pm_suspend(struct device *dev) { struct udma_dev *ud = dev_get_drvdata(dev); struct dma_device *dma_dev = &ud->ddev; @@ -5549,7 +5549,7 @@ static int udma_pm_suspend(struct device *dev) return 0; } -static int udma_pm_resume(struct device *dev) +static int __maybe_unused udma_pm_resume(struct device *dev) { struct udma_dev *ud = dev_get_drvdata(dev); struct dma_device *dma_dev = &ud->ddev; diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c index 265e0fb39bc7dcf87f682f5145d7c1274602f00a..b2db545c681031e649883f2c81cc33aecc9a31dd 100644 --- a/drivers/edac/qcom_edac.c +++ b/drivers/edac/qcom_edac.c @@ -21,30 +21,9 @@ #define TRP_SYN_REG_CNT 6 #define DRP_SYN_REG_CNT 8 -#define LLCC_COMMON_STATUS0 0x0003000c #define LLCC_LB_CNT_MASK GENMASK(31, 28) #define LLCC_LB_CNT_SHIFT 28 -/* Single & double bit syndrome register offsets */ -#define TRP_ECC_SB_ERR_SYN0 0x0002304c -#define TRP_ECC_DB_ERR_SYN0 0x00020370 -#define DRP_ECC_SB_ERR_SYN0 0x0004204c -#define DRP_ECC_DB_ERR_SYN0 0x00042070 - -/* Error register offsets */ -#define TRP_ECC_ERROR_STATUS1 0x00020348 -#define TRP_ECC_ERROR_STATUS0 0x00020344 -#define DRP_ECC_ERROR_STATUS1 0x00042048 -#define DRP_ECC_ERROR_STATUS0 0x00042044 - -/* TRP, DRP interrupt register offsets */ -#define DRP_INTERRUPT_STATUS 0x00041000 -#define TRP_INTERRUPT_0_STATUS 0x00020480 -#define DRP_INTERRUPT_CLEAR 0x00041008 -#define DRP_ECC_ERROR_CNTR_CLEAR 0x00040004 -#define TRP_INTERRUPT_0_CLEAR 0x00020484 -#define TRP_ECC_ERROR_CNTR_CLEAR 0x00020440 - /* Mask and shift macros */ #define ECC_DB_ERR_COUNT_MASK GENMASK(4, 0) #define ECC_DB_ERR_WAYS_MASK GENMASK(31, 16) @@ -60,15 +39,6 @@ #define DRP_TRP_INT_CLEAR GENMASK(1, 0) #define DRP_TRP_CNT_CLEAR GENMASK(1, 0) -/* Config registers offsets*/ -#define DRP_ECC_ERROR_CFG 0x00040000 - -/* Tag RAM, Data RAM interrupt register offsets */ -#define CMN_INTERRUPT_0_ENABLE 0x0003001c -#define CMN_INTERRUPT_2_ENABLE 0x0003003c -#define TRP_INTERRUPT_0_ENABLE 0x00020488 -#define DRP_INTERRUPT_ENABLE 0x0004100c - #define SB_ERROR_THRESHOLD 0x1 #define SB_ERROR_THRESHOLD_SHIFT 24 #define SB_DB_TRP_INTERRUPT_ENABLE 0x3 @@ -88,9 +58,6 @@ enum { static const struct llcc_edac_reg_data edac_reg_data[] = { [LLCC_DRAM_CE] = { .name = "DRAM Single-bit", - .synd_reg = DRP_ECC_SB_ERR_SYN0, - .count_status_reg = DRP_ECC_ERROR_STATUS1, - .ways_status_reg = DRP_ECC_ERROR_STATUS0, .reg_cnt = DRP_SYN_REG_CNT, .count_mask = ECC_SB_ERR_COUNT_MASK, .ways_mask = ECC_SB_ERR_WAYS_MASK, @@ -98,9 +65,6 @@ static const struct llcc_edac_reg_data edac_reg_data[] = { }, [LLCC_DRAM_UE] = { .name = "DRAM Double-bit", - .synd_reg = DRP_ECC_DB_ERR_SYN0, - .count_status_reg = DRP_ECC_ERROR_STATUS1, - .ways_status_reg = DRP_ECC_ERROR_STATUS0, .reg_cnt = DRP_SYN_REG_CNT, .count_mask = ECC_DB_ERR_COUNT_MASK, .ways_mask = ECC_DB_ERR_WAYS_MASK, @@ -108,9 +72,6 @@ static const struct llcc_edac_reg_data edac_reg_data[] = { }, [LLCC_TRAM_CE] = { .name = "TRAM Single-bit", - .synd_reg = TRP_ECC_SB_ERR_SYN0, - .count_status_reg = TRP_ECC_ERROR_STATUS1, - .ways_status_reg = TRP_ECC_ERROR_STATUS0, .reg_cnt = TRP_SYN_REG_CNT, .count_mask = ECC_SB_ERR_COUNT_MASK, .ways_mask = ECC_SB_ERR_WAYS_MASK, @@ -118,9 +79,6 @@ static const struct llcc_edac_reg_data edac_reg_data[] = { }, [LLCC_TRAM_UE] = { .name = "TRAM Double-bit", - .synd_reg = TRP_ECC_DB_ERR_SYN0, - .count_status_reg = TRP_ECC_ERROR_STATUS1, - .ways_status_reg = TRP_ECC_ERROR_STATUS0, .reg_cnt = TRP_SYN_REG_CNT, .count_mask = ECC_DB_ERR_COUNT_MASK, .ways_mask = ECC_DB_ERR_WAYS_MASK, @@ -128,7 +86,7 @@ static const struct llcc_edac_reg_data edac_reg_data[] = { }, }; -static int qcom_llcc_core_setup(struct regmap *llcc_bcast_regmap) +static int qcom_llcc_core_setup(struct llcc_drv_data *drv, struct regmap *llcc_bcast_regmap) { u32 sb_err_threshold; int ret; @@ -137,31 +95,31 @@ static int qcom_llcc_core_setup(struct regmap *llcc_bcast_regmap) * Configure interrupt enable registers such that Tag, Data RAM related * interrupts are propagated to interrupt controller for servicing */ - ret = regmap_update_bits(llcc_bcast_regmap, CMN_INTERRUPT_2_ENABLE, + ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_2_enable, TRP0_INTERRUPT_ENABLE, TRP0_INTERRUPT_ENABLE); if (ret) return ret; - ret = regmap_update_bits(llcc_bcast_regmap, TRP_INTERRUPT_0_ENABLE, + ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->trp_interrupt_0_enable, SB_DB_TRP_INTERRUPT_ENABLE, SB_DB_TRP_INTERRUPT_ENABLE); if (ret) return ret; sb_err_threshold = (SB_ERROR_THRESHOLD << SB_ERROR_THRESHOLD_SHIFT); - ret = regmap_write(llcc_bcast_regmap, DRP_ECC_ERROR_CFG, + ret = regmap_write(llcc_bcast_regmap, drv->edac_reg_offset->drp_ecc_error_cfg, sb_err_threshold); if (ret) return ret; - ret = regmap_update_bits(llcc_bcast_regmap, CMN_INTERRUPT_2_ENABLE, + ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_2_enable, DRP0_INTERRUPT_ENABLE, DRP0_INTERRUPT_ENABLE); if (ret) return ret; - ret = regmap_write(llcc_bcast_regmap, DRP_INTERRUPT_ENABLE, + ret = regmap_write(llcc_bcast_regmap, drv->edac_reg_offset->drp_interrupt_enable, SB_DB_DRP_INTERRUPT_ENABLE); return ret; } @@ -170,29 +128,33 @@ static int qcom_llcc_core_setup(struct regmap *llcc_bcast_regmap) static int qcom_llcc_clear_error_status(int err_type, struct llcc_drv_data *drv) { - int ret = 0; + int ret; switch (err_type) { case LLCC_DRAM_CE: case LLCC_DRAM_UE: - ret = regmap_write(drv->bcast_regmap, DRP_INTERRUPT_CLEAR, + ret = regmap_write(drv->bcast_regmap, + drv->edac_reg_offset->drp_interrupt_clear, DRP_TRP_INT_CLEAR); if (ret) return ret; - ret = regmap_write(drv->bcast_regmap, DRP_ECC_ERROR_CNTR_CLEAR, + ret = regmap_write(drv->bcast_regmap, + drv->edac_reg_offset->drp_ecc_error_cntr_clear, DRP_TRP_CNT_CLEAR); if (ret) return ret; break; case LLCC_TRAM_CE: case LLCC_TRAM_UE: - ret = regmap_write(drv->bcast_regmap, TRP_INTERRUPT_0_CLEAR, + ret = regmap_write(drv->bcast_regmap, + drv->edac_reg_offset->trp_interrupt_0_clear, DRP_TRP_INT_CLEAR); if (ret) return ret; - ret = regmap_write(drv->bcast_regmap, TRP_ECC_ERROR_CNTR_CLEAR, + ret = regmap_write(drv->bcast_regmap, + drv->edac_reg_offset->trp_ecc_error_cntr_clear, DRP_TRP_CNT_CLEAR); if (ret) return ret; @@ -205,16 +167,54 @@ qcom_llcc_clear_error_status(int err_type, struct llcc_drv_data *drv) return ret; } +struct qcom_llcc_syn_regs { + u32 synd_reg; + u32 count_status_reg; + u32 ways_status_reg; +}; + +static void get_reg_offsets(struct llcc_drv_data *drv, int err_type, + struct qcom_llcc_syn_regs *syn_regs) +{ + const struct llcc_edac_reg_offset *edac_reg_offset = drv->edac_reg_offset; + + switch (err_type) { + case LLCC_DRAM_CE: + syn_regs->synd_reg = edac_reg_offset->drp_ecc_sb_err_syn0; + syn_regs->count_status_reg = edac_reg_offset->drp_ecc_error_status1; + syn_regs->ways_status_reg = edac_reg_offset->drp_ecc_error_status0; + break; + case LLCC_DRAM_UE: + syn_regs->synd_reg = edac_reg_offset->drp_ecc_db_err_syn0; + syn_regs->count_status_reg = edac_reg_offset->drp_ecc_error_status1; + syn_regs->ways_status_reg = edac_reg_offset->drp_ecc_error_status0; + break; + case LLCC_TRAM_CE: + syn_regs->synd_reg = edac_reg_offset->trp_ecc_sb_err_syn0; + syn_regs->count_status_reg = edac_reg_offset->trp_ecc_error_status1; + syn_regs->ways_status_reg = edac_reg_offset->trp_ecc_error_status0; + break; + case LLCC_TRAM_UE: + syn_regs->synd_reg = edac_reg_offset->trp_ecc_db_err_syn0; + syn_regs->count_status_reg = edac_reg_offset->trp_ecc_error_status1; + syn_regs->ways_status_reg = edac_reg_offset->trp_ecc_error_status0; + break; + } +} + /* Dump Syndrome registers data for Tag RAM, Data RAM bit errors*/ static int dump_syn_reg_values(struct llcc_drv_data *drv, u32 bank, int err_type) { struct llcc_edac_reg_data reg_data = edac_reg_data[err_type]; + struct qcom_llcc_syn_regs regs = { }; int err_cnt, err_ways, ret, i; u32 synd_reg, synd_val; + get_reg_offsets(drv, err_type, ®s); + for (i = 0; i < reg_data.reg_cnt; i++) { - synd_reg = reg_data.synd_reg + (i * 4); + synd_reg = regs.synd_reg + (i * 4); ret = regmap_read(drv->regmaps[bank], synd_reg, &synd_val); if (ret) @@ -224,7 +224,7 @@ dump_syn_reg_values(struct llcc_drv_data *drv, u32 bank, int err_type) reg_data.name, i, synd_val); } - ret = regmap_read(drv->regmaps[bank], reg_data.count_status_reg, + ret = regmap_read(drv->regmaps[bank], regs.count_status_reg, &err_cnt); if (ret) goto clear; @@ -234,7 +234,7 @@ dump_syn_reg_values(struct llcc_drv_data *drv, u32 bank, int err_type) edac_printk(KERN_CRIT, EDAC_LLCC, "%s: Error count: 0x%4x\n", reg_data.name, err_cnt); - ret = regmap_read(drv->regmaps[bank], reg_data.ways_status_reg, + ret = regmap_read(drv->regmaps[bank], regs.ways_status_reg, &err_ways); if (ret) goto clear; @@ -295,7 +295,7 @@ static irqreturn_t llcc_ecc_irq_handler(int irq, void *edev_ctl) /* Iterate over the banks and look for Tag RAM or Data RAM errors */ for (i = 0; i < drv->num_banks; i++) { - ret = regmap_read(drv->regmaps[i], DRP_INTERRUPT_STATUS, + ret = regmap_read(drv->regmaps[i], drv->edac_reg_offset->drp_interrupt_status, &drp_error); if (!ret && (drp_error & SB_ECC_ERROR)) { @@ -310,7 +310,7 @@ static irqreturn_t llcc_ecc_irq_handler(int irq, void *edev_ctl) if (!ret) irq_rc = IRQ_HANDLED; - ret = regmap_read(drv->regmaps[i], TRP_INTERRUPT_0_STATUS, + ret = regmap_read(drv->regmaps[i], drv->edac_reg_offset->trp_interrupt_0_status, &trp_error); if (!ret && (trp_error & SB_ECC_ERROR)) { @@ -342,7 +342,7 @@ static int qcom_llcc_edac_probe(struct platform_device *pdev) int ecc_irq; int rc; - rc = qcom_llcc_core_setup(llcc_driv_data->bcast_regmap); + rc = qcom_llcc_core_setup(llcc_driv_data, llcc_driv_data->bcast_regmap); if (rc) return rc; diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c index f29d77ecf72db613724dc175cdba1a001fc0ac60..2b8bfcd010f5fd34e59b3eacdec0677e6692f020 100644 --- a/drivers/firmware/arm_ffa/bus.c +++ b/drivers/firmware/arm_ffa/bus.c @@ -15,6 +15,8 @@ #include "common.h" +static DEFINE_IDA(ffa_bus_id); + static int ffa_device_match(struct device *dev, struct device_driver *drv) { const struct ffa_device_id *id_table; @@ -53,7 +55,8 @@ static void ffa_device_remove(struct device *dev) { struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver); - ffa_drv->remove(to_ffa_dev(dev)); + if (ffa_drv->remove) + ffa_drv->remove(to_ffa_dev(dev)); } static int ffa_device_uevent(const struct device *dev, struct kobj_uevent_env *env) @@ -130,6 +133,7 @@ static void ffa_release_device(struct device *dev) { struct ffa_device *ffa_dev = to_ffa_dev(dev); + ida_free(&ffa_bus_id, ffa_dev->id); kfree(ffa_dev); } @@ -170,18 +174,24 @@ bool ffa_device_is_valid(struct ffa_device *ffa_dev) struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id, const struct ffa_ops *ops) { - int ret; + int id, ret; struct device *dev; struct ffa_device *ffa_dev; + id = ida_alloc_min(&ffa_bus_id, 1, GFP_KERNEL); + if (id < 0) + return NULL; + ffa_dev = kzalloc(sizeof(*ffa_dev), GFP_KERNEL); - if (!ffa_dev) + if (!ffa_dev) { + ida_free(&ffa_bus_id, id); return NULL; + } dev = &ffa_dev->dev; dev->bus = &ffa_bus_type; dev->release = ffa_release_device; - dev_set_name(&ffa_dev->dev, "arm-ffa-%04x", vm_id); + dev_set_name(&ffa_dev->dev, "arm-ffa-%d", id); ffa_dev->vm_id = vm_id; ffa_dev->ops = ops; @@ -217,4 +227,5 @@ void arm_ffa_bus_exit(void) { ffa_devices_unregister(); bus_unregister(&ffa_bus_type); + ida_destroy(&ffa_bus_id); } diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c index fa85c64d3dede7edcc6c35b3d9959b868405d7a2..2109cd178ff706f3ec7b0db0735d18519ba639cb 100644 --- a/drivers/firmware/arm_ffa/driver.c +++ b/drivers/firmware/arm_ffa/driver.c @@ -193,7 +193,8 @@ __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3, int idx, count, flags = 0, sz, buf_sz; ffa_value_t partition_info; - if (!buffer || !num_partitions) /* Just get the count for now */ + if (drv_info->version > FFA_VERSION_1_0 && + (!buffer || !num_partitions)) /* Just get the count for now */ flags = PARTITION_INFO_GET_RETURN_COUNT_ONLY; mutex_lock(&drv_info->rx_lock); @@ -420,12 +421,18 @@ ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize, ep_mem_access->receiver = args->attrs[idx].receiver; ep_mem_access->attrs = args->attrs[idx].attrs; ep_mem_access->composite_off = COMPOSITE_OFFSET(args->nattrs); + ep_mem_access->flag = 0; + ep_mem_access->reserved = 0; } + mem_region->handle = 0; + mem_region->reserved_0 = 0; + mem_region->reserved_1 = 0; mem_region->ep_count = args->nattrs; composite = buffer + COMPOSITE_OFFSET(args->nattrs); composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg); composite->addr_range_cnt = num_entries; + composite->reserved = 0; length = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, num_entries); frag_len = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, 0); @@ -460,6 +467,7 @@ ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize, constituents->address = sg_phys(args->sg); constituents->pg_cnt = args->sg->length / FFA_PAGE_SIZE; + constituents->reserved = 0; constituents++; frag_len += sizeof(struct ffa_mem_region_addr_range); } while ((args->sg = sg_next(args->sg))); diff --git a/drivers/firmware/arm_scmi/raw_mode.c b/drivers/firmware/arm_scmi/raw_mode.c index d40df099fd515276b87cdde144271c60954b46be..6971dcf72fb998425655865fd6ccf4e1ecb44976 100644 --- a/drivers/firmware/arm_scmi/raw_mode.c +++ b/drivers/firmware/arm_scmi/raw_mode.c @@ -1066,7 +1066,7 @@ static int scmi_xfer_raw_worker_init(struct scmi_raw_mode_info *raw) raw->wait_wq = alloc_workqueue("scmi-raw-wait-wq-%d", WQ_UNBOUND | WQ_FREEZABLE | - WQ_HIGHPRI, WQ_SYSFS, raw->id); + WQ_HIGHPRI | WQ_SYSFS, 0, raw->id); if (!raw->wait_wq) return -ENOMEM; diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c index e4ccfb6a8fa52fefa852b009e161afb3ba23c264..ec056f6f40ce81ddd5a09578193811e9b9d1b9c1 100644 --- a/drivers/firmware/cirrus/cs_dsp.c +++ b/drivers/firmware/cirrus/cs_dsp.c @@ -2124,6 +2124,7 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware file, blocks, le32_to_cpu(blk->len), type, le32_to_cpu(blk->id)); + region_name = cs_dsp_mem_region_name(type); mem = cs_dsp_find_region(dsp, type); if (!mem) { cs_dsp_err(dsp, "No base for region %x\n", type); @@ -2147,8 +2148,8 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware reg = dsp->ops->region_to_reg(mem, reg); reg += offset; } else { - cs_dsp_err(dsp, "No %x for algorithm %x\n", - type, le32_to_cpu(blk->id)); + cs_dsp_err(dsp, "No %s for algorithm %x\n", + region_name, le32_to_cpu(blk->id)); } break; diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index abeff7dc0b581f8661b2d6a22edeb108d36497a6..34b9e787653865ee6834fcbe7416ccd6fcf08395 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -361,24 +361,6 @@ static void __init efi_debugfs_init(void) static inline void efi_debugfs_init(void) {} #endif -static void refresh_nv_rng_seed(struct work_struct *work) -{ - u8 seed[EFI_RANDOM_SEED_SIZE]; - - get_random_bytes(seed, sizeof(seed)); - efi.set_variable(L"RandomSeed", &LINUX_EFI_RANDOM_SEED_TABLE_GUID, - EFI_VARIABLE_NON_VOLATILE | EFI_VARIABLE_BOOTSERVICE_ACCESS | - EFI_VARIABLE_RUNTIME_ACCESS, sizeof(seed), seed); - memzero_explicit(seed, sizeof(seed)); -} -static int refresh_nv_rng_seed_notification(struct notifier_block *nb, unsigned long action, void *data) -{ - static DECLARE_WORK(work, refresh_nv_rng_seed); - schedule_work(&work); - return NOTIFY_DONE; -} -static struct notifier_block refresh_nv_rng_seed_nb = { .notifier_call = refresh_nv_rng_seed_notification }; - /* * We register the efi subsystem with the firmware subsystem and the * efivars subsystem with the efi subsystem, if the system was booted with @@ -451,9 +433,6 @@ static int __init efisubsys_init(void) platform_device_register_simple("efi_secret", 0, NULL, 0); #endif - if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) - execute_with_initialized_rng(&refresh_nv_rng_seed_nb); - return 0; err_remove_group: diff --git a/drivers/firmware/efi/libstub/Makefile.zboot b/drivers/firmware/efi/libstub/Makefile.zboot index 89ef820f3b34483a55cd6f9b30043910922da75e..2c489627a8078945f26071644cc5e5390c2ee0d9 100644 --- a/drivers/firmware/efi/libstub/Makefile.zboot +++ b/drivers/firmware/efi/libstub/Makefile.zboot @@ -32,7 +32,8 @@ zboot-size-len-$(CONFIG_KERNEL_GZIP) := 0 $(obj)/vmlinuz: $(obj)/vmlinux.bin FORCE $(call if_changed,$(zboot-method-y)) -OBJCOPYFLAGS_vmlinuz.o := -I binary -O $(EFI_ZBOOT_BFD_TARGET) $(EFI_ZBOOT_OBJCOPY_FLAGS) \ +# avoid eager evaluation to prevent references to non-existent build artifacts +OBJCOPYFLAGS_vmlinuz.o = -I binary -O $(EFI_ZBOOT_BFD_TARGET) $(EFI_ZBOOT_OBJCOPY_FLAGS) \ --rename-section .data=.gzdata,load,alloc,readonly,contents $(obj)/vmlinuz.o: $(obj)/vmlinuz FORCE $(call if_changed,objcopy) diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h index 67d5a20802e0b7c6bff01a8da5c083f1768b6e58..54a2822cae7714584c668dd9e0650350e30922df 100644 --- a/drivers/firmware/efi/libstub/efistub.h +++ b/drivers/firmware/efi/libstub/efistub.h @@ -1133,4 +1133,7 @@ const u8 *__efi_get_smbios_string(const struct efi_smbios_record *record, void efi_remap_image(unsigned long image_base, unsigned alloc_size, unsigned long code_size); +asmlinkage efi_status_t __efiapi +efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab); + #endif diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 5521f060d58e9df3c45ab9bef89295eb3efac6aa..f45c6a36551c75533a92d6c6bf5897162b9ee5e8 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -897,7 +897,7 @@ config GPIO_F7188X help This option enables support for GPIOs found on Fintek Super-I/O chips F71869, F71869A, F71882FG, F71889F and F81866. - As well as Nuvoton Super-I/O chip NCT6116D. + As well as Nuvoton Super-I/O chip NCT6126D. To compile this driver as a module, choose M here: the module will be called f7188x-gpio. diff --git a/drivers/gpio/gpio-f7188x.c b/drivers/gpio/gpio-f7188x.c index 9effa7769bef5bcdea5865b964f78f261da40dfe..f54ca5a1775ea2178004e747286660bcf83900d1 100644 --- a/drivers/gpio/gpio-f7188x.c +++ b/drivers/gpio/gpio-f7188x.c @@ -48,7 +48,7 @@ /* * Nuvoton devices. */ -#define SIO_NCT6116D_ID 0xD283 /* NCT6116D chipset ID */ +#define SIO_NCT6126D_ID 0xD283 /* NCT6126D chipset ID */ #define SIO_LD_GPIO_NUVOTON 0x07 /* GPIO logical device */ @@ -62,7 +62,7 @@ enum chips { f81866, f81804, f81865, - nct6116d, + nct6126d, }; static const char * const f7188x_names[] = { @@ -74,7 +74,7 @@ static const char * const f7188x_names[] = { "f81866", "f81804", "f81865", - "nct6116d", + "nct6126d", }; struct f7188x_sio { @@ -187,8 +187,8 @@ static int f7188x_gpio_set_config(struct gpio_chip *chip, unsigned offset, /* Output mode register (0:open drain 1:push-pull). */ #define f7188x_gpio_out_mode(base) ((base) + 3) -#define f7188x_gpio_dir_invert(type) ((type) == nct6116d) -#define f7188x_gpio_data_single(type) ((type) == nct6116d) +#define f7188x_gpio_dir_invert(type) ((type) == nct6126d) +#define f7188x_gpio_data_single(type) ((type) == nct6126d) static struct f7188x_gpio_bank f71869_gpio_bank[] = { F7188X_GPIO_BANK(0, 6, 0xF0, DRVNAME "-0"), @@ -274,7 +274,7 @@ static struct f7188x_gpio_bank f81865_gpio_bank[] = { F7188X_GPIO_BANK(60, 5, 0x90, DRVNAME "-6"), }; -static struct f7188x_gpio_bank nct6116d_gpio_bank[] = { +static struct f7188x_gpio_bank nct6126d_gpio_bank[] = { F7188X_GPIO_BANK(0, 8, 0xE0, DRVNAME "-0"), F7188X_GPIO_BANK(10, 8, 0xE4, DRVNAME "-1"), F7188X_GPIO_BANK(20, 8, 0xE8, DRVNAME "-2"), @@ -282,7 +282,7 @@ static struct f7188x_gpio_bank nct6116d_gpio_bank[] = { F7188X_GPIO_BANK(40, 8, 0xF0, DRVNAME "-4"), F7188X_GPIO_BANK(50, 8, 0xF4, DRVNAME "-5"), F7188X_GPIO_BANK(60, 8, 0xF8, DRVNAME "-6"), - F7188X_GPIO_BANK(70, 1, 0xFC, DRVNAME "-7"), + F7188X_GPIO_BANK(70, 8, 0xFC, DRVNAME "-7"), }; static int f7188x_gpio_get_direction(struct gpio_chip *chip, unsigned offset) @@ -490,9 +490,9 @@ static int f7188x_gpio_probe(struct platform_device *pdev) data->nr_bank = ARRAY_SIZE(f81865_gpio_bank); data->bank = f81865_gpio_bank; break; - case nct6116d: - data->nr_bank = ARRAY_SIZE(nct6116d_gpio_bank); - data->bank = nct6116d_gpio_bank; + case nct6126d: + data->nr_bank = ARRAY_SIZE(nct6126d_gpio_bank); + data->bank = nct6126d_gpio_bank; break; default: return -ENODEV; @@ -559,9 +559,9 @@ static int __init f7188x_find(int addr, struct f7188x_sio *sio) case SIO_F81865_ID: sio->type = f81865; break; - case SIO_NCT6116D_ID: + case SIO_NCT6126D_ID: sio->device = SIO_LD_GPIO_NUVOTON; - sio->type = nct6116d; + sio->type = nct6126d; break; default: pr_info("Unsupported Fintek device 0x%04x\n", devid); @@ -569,7 +569,7 @@ static int __init f7188x_find(int addr, struct f7188x_sio *sio) } /* double check manufacturer where possible */ - if (sio->type != nct6116d) { + if (sio->type != nct6126d) { manid = superio_inw(addr, SIO_FINTEK_MANID); if (manid != SIO_FINTEK_ID) { pr_debug("Not a Fintek device at 0x%08x\n", addr); @@ -581,7 +581,7 @@ static int __init f7188x_find(int addr, struct f7188x_sio *sio) err = 0; pr_info("Found %s at %#x\n", f7188x_names[sio->type], (unsigned int)addr); - if (sio->type != nct6116d) + if (sio->type != nct6126d) pr_info(" revision %d\n", superio_inb(addr, SIO_FINTEK_DEVREV)); err: diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c index e6a7049bef6417affd16c79982d3eeb7c4e6c255..b32063ac845a5073f67593c433f2fcc7694362c7 100644 --- a/drivers/gpio/gpio-mockup.c +++ b/drivers/gpio/gpio-mockup.c @@ -369,7 +369,7 @@ static void gpio_mockup_debugfs_setup(struct device *dev, priv->offset = i; priv->desc = gpiochip_get_desc(gc, i); - debugfs_create_file(name, 0200, chip->dbg_dir, priv, + debugfs_create_file(name, 0600, chip->dbg_dir, priv, &gpio_mockup_debugfs_ops); } } diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c index 98939cd4a71e65777ed88c073da4ecba4f7233bc..745e5f67254ea2715ae7d3241468bd7ffe85fed1 100644 --- a/drivers/gpio/gpio-sifive.c +++ b/drivers/gpio/gpio-sifive.c @@ -221,8 +221,12 @@ static int sifive_gpio_probe(struct platform_device *pdev) return -ENODEV; } - for (i = 0; i < ngpio; i++) - chip->irq_number[i] = platform_get_irq(pdev, i); + for (i = 0; i < ngpio; i++) { + ret = platform_get_irq(pdev, i); + if (ret < 0) + return ret; + chip->irq_number[i] = ret; + } ret = bgpio_init(&chip->gc, dev, 4, chip->base + SIFIVE_GPIO_INPUT_VAL, diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c index a1c8702f362ce76ce7b1ae96ad0b3d378eec0ab6..8b49b0abacd518448ab7a49629a413d4388a35c1 100644 --- a/drivers/gpio/gpio-sim.c +++ b/drivers/gpio/gpio-sim.c @@ -696,6 +696,9 @@ static char **gpio_sim_make_line_names(struct gpio_sim_bank *bank, char **line_names; list_for_each_entry(line, &bank->line_list, siblings) { + if (line->offset >= bank->num_lines) + continue; + if (line->name) { if (line->offset > max_offset) max_offset = line->offset; @@ -721,8 +724,13 @@ static char **gpio_sim_make_line_names(struct gpio_sim_bank *bank, if (!line_names) return ERR_PTR(-ENOMEM); - list_for_each_entry(line, &bank->line_list, siblings) - line_names[line->offset] = line->name; + list_for_each_entry(line, &bank->line_list, siblings) { + if (line->offset >= bank->num_lines) + continue; + + if (line->name && (line->offset <= max_offset)) + line_names[line->offset] = line->name; + } return line_names; } @@ -754,6 +762,9 @@ static int gpio_sim_add_hogs(struct gpio_sim_device *dev) list_for_each_entry(bank, &dev->bank_list, siblings) { list_for_each_entry(line, &bank->line_list, siblings) { + if (line->offset >= bank->num_lines) + continue; + if (line->hog) num_hogs++; } @@ -769,6 +780,9 @@ static int gpio_sim_add_hogs(struct gpio_sim_device *dev) list_for_each_entry(bank, &dev->bank_list, siblings) { list_for_each_entry(line, &bank->line_list, siblings) { + if (line->offset >= bank->num_lines) + continue; + if (!line->hog) continue; diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 04fb05df805b016a08b8b70703ddcb7675795ff4..5be8ad61523eb591cafd35336960cf9ba59dd39a 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -209,6 +209,8 @@ static int gpiochip_find_base(int ngpio) break; /* nope, check the space right after the chip */ base = gdev->base + gdev->ngpio; + if (base < GPIO_DYNAMIC_BASE) + base = GPIO_DYNAMIC_BASE; } if (gpio_is_valid(base)) { @@ -1743,7 +1745,7 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gc) } /* Remove all IRQ mappings and delete the domain */ - if (gc->irq.domain) { + if (!gc->irq.domain_is_allocated_externally && gc->irq.domain) { unsigned int irq; for (offset = 0; offset < gc->ngpio; offset++) { @@ -1789,6 +1791,15 @@ int gpiochip_irqchip_add_domain(struct gpio_chip *gc, gc->to_irq = gpiochip_to_irq; gc->irq.domain = domain; + gc->irq.domain_is_allocated_externally = true; + + /* + * Using barrier() here to prevent compiler from reordering + * gc->irq.initialized before adding irqdomain. + */ + barrier(); + + gc->irq.initialized = true; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index aeeec211861c434bae58aae748586e4a608360d5..fd6e83795873b4a6622aedc2052916c2e8f6015a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -1092,16 +1092,20 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) * S0ix even though the system is suspending to idle, so return false * in that case. */ - if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) - dev_warn_once(adev->dev, + if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) { + dev_err_once(adev->dev, "Power consumption will be higher as BIOS has not been configured for suspend-to-idle.\n" "To use suspend-to-idle change the sleep mode in BIOS setup.\n"); + return false; + } #if !IS_ENABLED(CONFIG_AMD_PMC) - dev_warn_once(adev->dev, + dev_err_once(adev->dev, "Power consumption will be higher as the kernel has not been compiled with CONFIG_AMD_PMC.\n"); -#endif /* CONFIG_AMD_PMC */ + return false; +#else return true; +#endif /* CONFIG_AMD_PMC */ } #endif /* CONFIG_SUSPEND */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index b1ca1ab6d6ad305cf3ff4b5858ea875bca0e253b..393b6fb7a71d3686e3fe0518ed25b44139c06a6c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1615,6 +1615,7 @@ static const u16 amdgpu_unsupported_pciidlist[] = { 0x5874, 0x5940, 0x5941, + 0x5b70, 0x5b72, 0x5b73, 0x5b74, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index f52d0ba91a770a4b437a1137aa88cf6a7113cf53..a7d250809da99e9e13c95c07e57bdb7da4283b08 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -582,7 +582,8 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev) if (r) amdgpu_fence_driver_force_completion(ring); - if (ring->fence_drv.irq_src) + if (!drm_dev_is_unplugged(adev_to_drm(adev)) && + ring->fence_drv.irq_src) amdgpu_irq_put(adev, ring->fence_drv.irq_src, ring->fence_drv.irq_type); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 4e2531758866c6e370622152661efcddf218a31b..95b0f984acbfd8eecf062627b30c841e96e621af 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -593,6 +593,8 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev) case IP_VERSION(9, 3, 0): /* GC 10.3.7 */ case IP_VERSION(10, 3, 7): + /* GC 11.0.1 */ + case IP_VERSION(11, 0, 1): if (amdgpu_tmz == 0) { adev->gmc.tmz_enabled = false; dev_info(adev->dev, @@ -616,7 +618,6 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev) case IP_VERSION(10, 3, 1): /* YELLOW_CARP*/ case IP_VERSION(10, 3, 3): - case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 4): /* Don't enable it by default yet. */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c index b07c000fc8ba39ee60540312eb8c0e6ee439d8e0..4fa019c8aefc4ddf730fb69f8fa9b7185c9c2c9b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c @@ -241,6 +241,31 @@ int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev, return 0; } +int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) +{ + int r, i; + + r = amdgpu_ras_block_late_init(adev, ras_block); + if (r) + return r; + + if (amdgpu_ras_is_supported(adev, ras_block->block)) { + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + r = amdgpu_irq_get(adev, &adev->jpeg.inst[i].ras_poison_irq, 0); + if (r) + goto late_fini; + } + } + return 0; + +late_fini: + amdgpu_ras_block_late_fini(adev, ras_block); + return r; +} + int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev) { int err; @@ -262,7 +287,7 @@ int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev) adev->jpeg.ras_if = &ras->ras_block.ras_comm; if (!ras->ras_block.ras_late_init) - ras->ras_block.ras_late_init = amdgpu_ras_block_late_init; + ras->ras_block.ras_late_init = amdgpu_jpeg_ras_late_init; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h index 0ca76f0f23e9c8a0b980050cfbf27afa7307ef34..1471a1ebb03449fd939cbd8aa6a6501fd9c9b9c3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h @@ -38,6 +38,7 @@ struct amdgpu_jpeg_reg{ struct amdgpu_jpeg_inst { struct amdgpu_ring ring_dec; struct amdgpu_irq_src irq; + struct amdgpu_irq_src ras_poison_irq; struct amdgpu_jpeg_reg external; }; @@ -72,6 +73,8 @@ int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout); int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry); +int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev, + struct ras_common_if *ras_block); int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev); #endif /*__AMDGPU_JPEG_H__*/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 2bd1a54ee86656bcf06c2e135c58a9f3a9b9d8ba..a70103ac0026aefb815cafdd25df77e209e720dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -79,9 +79,10 @@ static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo) static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo) { struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); - struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); + struct amdgpu_bo *shadow_bo = ttm_to_amdgpu_bo(tbo), *bo; struct amdgpu_bo_vm *vmbo; + bo = shadow_bo->parent; vmbo = to_amdgpu_bo_vm(bo); /* in case amdgpu_device_recover_vram got NULL of bo->parent */ if (!list_empty(&vmbo->shadow_list)) { @@ -139,7 +140,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) places[c].lpfn = visible_pfn; - else if (adev->gmc.real_vram_size != adev->gmc.visible_vram_size) + else places[c].flags |= TTM_PL_FLAG_TOPDOWN; if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) @@ -694,11 +695,6 @@ int amdgpu_bo_create_vm(struct amdgpu_device *adev, return r; *vmbo_ptr = to_amdgpu_bo_vm(bo_ptr); - INIT_LIST_HEAD(&(*vmbo_ptr)->shadow_list); - /* Set destroy callback to amdgpu_bo_vm_destroy after vmbo->shadow_list - * is initialized. - */ - bo_ptr->tbo.destroy = &amdgpu_bo_vm_destroy; return r; } @@ -715,6 +711,8 @@ void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo) mutex_lock(&adev->shadow_list_lock); list_add_tail(&vmbo->shadow_list, &adev->shadow_list); + vmbo->shadow->parent = amdgpu_bo_ref(&vmbo->bo); + vmbo->shadow->tbo.destroy = &amdgpu_bo_vm_destroy; mutex_unlock(&adev->shadow_list_lock); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 9d7e6e0e73edb9722233d3bff87769e92c8434a8..a150b7a4b4aae7780124b679093bca139570ad26 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -3548,6 +3548,9 @@ static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj, void *fw_pri_cpu_addr; int ret; + if (adev->psp.vbflash_image_size == 0) + return -EINVAL; + dev_info(adev->dev, "VBIOS flash to PSP started"); ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size, @@ -3599,13 +3602,13 @@ static ssize_t amdgpu_psp_vbflash_status(struct device *dev, } static const struct bin_attribute psp_vbflash_bin_attr = { - .attr = {.name = "psp_vbflash", .mode = 0664}, + .attr = {.name = "psp_vbflash", .mode = 0660}, .size = 0, .write = amdgpu_psp_vbflash_write, .read = amdgpu_psp_vbflash_read, }; -static DEVICE_ATTR(psp_vbflash_status, 0444, amdgpu_psp_vbflash_status, NULL); +static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL); int amdgpu_psp_sysfs_init(struct amdgpu_device *adev) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index dc474b8096040bfdcc9c40dbf9426ca2ae24bdb6..49de3a3eebc78fdd4c65d3445902e3a8d5e35abe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -581,3 +581,21 @@ void amdgpu_ring_ib_end(struct amdgpu_ring *ring) if (ring->is_sw_ring) amdgpu_sw_ring_ib_end(ring); } + +void amdgpu_ring_ib_on_emit_cntl(struct amdgpu_ring *ring) +{ + if (ring->is_sw_ring) + amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_CONTROL); +} + +void amdgpu_ring_ib_on_emit_ce(struct amdgpu_ring *ring) +{ + if (ring->is_sw_ring) + amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_CE); +} + +void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring) +{ + if (ring->is_sw_ring) + amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_DE); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index d8749444b6891fb774b20df6f5a36b5a428ef21a..2474cb71e47674ac2d048de5164b5cb58eb23737 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -227,6 +227,9 @@ struct amdgpu_ring_funcs { int (*preempt_ib)(struct amdgpu_ring *ring); void (*emit_mem_sync)(struct amdgpu_ring *ring); void (*emit_wave_limit)(struct amdgpu_ring *ring, bool enable); + void (*patch_cntl)(struct amdgpu_ring *ring, unsigned offset); + void (*patch_ce)(struct amdgpu_ring *ring, unsigned offset); + void (*patch_de)(struct amdgpu_ring *ring, unsigned offset); }; struct amdgpu_ring { @@ -318,10 +321,16 @@ struct amdgpu_ring { #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r)) #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o)) #define amdgpu_ring_preempt_ib(r) (r)->funcs->preempt_ib(r) +#define amdgpu_ring_patch_cntl(r, o) ((r)->funcs->patch_cntl((r), (o))) +#define amdgpu_ring_patch_ce(r, o) ((r)->funcs->patch_ce((r), (o))) +#define amdgpu_ring_patch_de(r, o) ((r)->funcs->patch_de((r), (o))) int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw); void amdgpu_ring_ib_begin(struct amdgpu_ring *ring); void amdgpu_ring_ib_end(struct amdgpu_ring *ring); +void amdgpu_ring_ib_on_emit_cntl(struct amdgpu_ring *ring); +void amdgpu_ring_ib_on_emit_ce(struct amdgpu_ring *ring); +void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring); void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c index 62079f0e3ee8f4f3cf0189b40b1401766766bf91..73516abef662f00f8b418a78a757adefb7bf7eef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c @@ -105,6 +105,16 @@ static void amdgpu_mux_resubmit_chunks(struct amdgpu_ring_mux *mux) amdgpu_fence_update_start_timestamp(e->ring, chunk->sync_seq, ktime_get()); + if (chunk->sync_seq == + le32_to_cpu(*(e->ring->fence_drv.cpu_addr + 2))) { + if (chunk->cntl_offset <= e->ring->buf_mask) + amdgpu_ring_patch_cntl(e->ring, + chunk->cntl_offset); + if (chunk->ce_offset <= e->ring->buf_mask) + amdgpu_ring_patch_ce(e->ring, chunk->ce_offset); + if (chunk->de_offset <= e->ring->buf_mask) + amdgpu_ring_patch_de(e->ring, chunk->de_offset); + } amdgpu_ring_mux_copy_pkt_from_sw_ring(mux, e->ring, chunk->start, chunk->end); @@ -407,6 +417,17 @@ void amdgpu_sw_ring_ib_end(struct amdgpu_ring *ring) amdgpu_ring_mux_end_ib(mux, ring); } +void amdgpu_sw_ring_ib_mark_offset(struct amdgpu_ring *ring, enum amdgpu_ring_mux_offset_type type) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_ring_mux *mux = &adev->gfx.muxer; + unsigned offset; + + offset = ring->wptr & ring->buf_mask; + + amdgpu_ring_mux_ib_mark_offset(mux, ring, offset, type); +} + void amdgpu_ring_mux_start_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring) { struct amdgpu_mux_entry *e; @@ -429,6 +450,10 @@ void amdgpu_ring_mux_start_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *r } chunk->start = ring->wptr; + /* the initialized value used to check if they are set by the ib submission*/ + chunk->cntl_offset = ring->buf_mask + 1; + chunk->de_offset = ring->buf_mask + 1; + chunk->ce_offset = ring->buf_mask + 1; list_add_tail(&chunk->entry, &e->list); } @@ -454,6 +479,41 @@ static void scan_and_remove_signaled_chunk(struct amdgpu_ring_mux *mux, struct a } } +void amdgpu_ring_mux_ib_mark_offset(struct amdgpu_ring_mux *mux, + struct amdgpu_ring *ring, u64 offset, + enum amdgpu_ring_mux_offset_type type) +{ + struct amdgpu_mux_entry *e; + struct amdgpu_mux_chunk *chunk; + + e = amdgpu_ring_mux_sw_entry(mux, ring); + if (!e) { + DRM_ERROR("cannot find entry!\n"); + return; + } + + chunk = list_last_entry(&e->list, struct amdgpu_mux_chunk, entry); + if (!chunk) { + DRM_ERROR("cannot find chunk!\n"); + return; + } + + switch (type) { + case AMDGPU_MUX_OFFSET_TYPE_CONTROL: + chunk->cntl_offset = offset; + break; + case AMDGPU_MUX_OFFSET_TYPE_DE: + chunk->de_offset = offset; + break; + case AMDGPU_MUX_OFFSET_TYPE_CE: + chunk->ce_offset = offset; + break; + default: + DRM_ERROR("invalid type (%d)\n", type); + break; + } +} + void amdgpu_ring_mux_end_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring) { struct amdgpu_mux_entry *e; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h index 4be45fc14954c9cf5a683641a4ef0d1a1e319eeb..b22d4fb2a8470e457ba5f0db4a9dcda352865716 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h @@ -50,6 +50,12 @@ struct amdgpu_mux_entry { struct list_head list; }; +enum amdgpu_ring_mux_offset_type { + AMDGPU_MUX_OFFSET_TYPE_CONTROL, + AMDGPU_MUX_OFFSET_TYPE_DE, + AMDGPU_MUX_OFFSET_TYPE_CE, +}; + struct amdgpu_ring_mux { struct amdgpu_ring *real_ring; @@ -72,12 +78,18 @@ struct amdgpu_ring_mux { * @sync_seq: the fence seqno related with the saved IB. * @start:- start location on the software ring. * @end:- end location on the software ring. + * @control_offset:- the PRE_RESUME bit position used for resubmission. + * @de_offset:- the anchor in write_data for de meta of resubmission. + * @ce_offset:- the anchor in write_data for ce meta of resubmission. */ struct amdgpu_mux_chunk { struct list_head entry; uint32_t sync_seq; u64 start; u64 end; + u64 cntl_offset; + u64 de_offset; + u64 ce_offset; }; int amdgpu_ring_mux_init(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring, @@ -89,6 +101,8 @@ u64 amdgpu_ring_mux_get_wptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ri u64 amdgpu_ring_mux_get_rptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring); void amdgpu_ring_mux_start_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring); void amdgpu_ring_mux_end_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring); +void amdgpu_ring_mux_ib_mark_offset(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring, + u64 offset, enum amdgpu_ring_mux_offset_type type); bool amdgpu_mcbp_handle_trailing_fence_irq(struct amdgpu_ring_mux *mux); u64 amdgpu_sw_ring_get_rptr_gfx(struct amdgpu_ring *ring); @@ -97,6 +111,7 @@ void amdgpu_sw_ring_set_wptr_gfx(struct amdgpu_ring *ring); void amdgpu_sw_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); void amdgpu_sw_ring_ib_begin(struct amdgpu_ring *ring); void amdgpu_sw_ring_ib_end(struct amdgpu_ring *ring); +void amdgpu_sw_ring_ib_mark_offset(struct amdgpu_ring *ring, enum amdgpu_ring_mux_offset_type type); const char *amdgpu_sw_ring_name(int idx); unsigned int amdgpu_sw_ring_priority(int idx); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index e63fcc58e8e0622f90599733b862a3953f902622..2d94f1b63bd6c4463ceef185b7726c27ad250ec5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -1181,6 +1181,31 @@ int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev, return 0; } +int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) +{ + int r, i; + + r = amdgpu_ras_block_late_init(adev, ras_block); + if (r) + return r; + + if (amdgpu_ras_is_supported(adev, ras_block->block)) { + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + r = amdgpu_irq_get(adev, &adev->vcn.inst[i].ras_poison_irq, 0); + if (r) + goto late_fini; + } + } + return 0; + +late_fini: + amdgpu_ras_block_late_fini(adev, ras_block); + return r; +} + int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev) { int err; @@ -1202,7 +1227,7 @@ int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev) adev->vcn.ras_if = &ras->ras_block.ras_comm; if (!ras->ras_block.ras_late_init) - ras->ras_block.ras_late_init = amdgpu_ras_block_late_init; + ras->ras_block.ras_late_init = amdgpu_vcn_ras_late_init; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index c730949ece7d9b27c62b483d1844ddb4aa6d0319..f1397ef66fd77edb31989d28e4a50f19f06fbecd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -234,6 +234,7 @@ struct amdgpu_vcn_inst { struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS]; atomic_t sched_score; struct amdgpu_irq_src irq; + struct amdgpu_irq_src ras_poison_irq; struct amdgpu_vcn_reg external; struct amdgpu_bo *dpg_sram_bo; struct dpg_pause_state pause_state; @@ -400,6 +401,8 @@ void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry); +int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev, + struct ras_common_if *ras_block); int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c index df63dc3bca18cd2829575ef6a592e21dcaee0db0..051c7194ab4fb99ec11023d6c28086469a39933b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c @@ -564,7 +564,6 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, return r; } - (*vmbo)->shadow->parent = amdgpu_bo_ref(bo); amdgpu_bo_add_to_shadow_list(*vmbo); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 43d6a9d6a5384f886b4460a539bb97450cf1a0e2..afacfb9b5bf6c952eb1440dc74ed6a3154526023 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -800,7 +800,7 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man, { struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct drm_buddy *mm = &mgr->mm; - struct drm_buddy_block *block; + struct amdgpu_vram_reservation *rsv; drm_printf(printer, " vis usage:%llu\n", amdgpu_vram_mgr_vis_usage(mgr)); @@ -812,8 +812,9 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man, drm_buddy_print(mm, printer); drm_printf(printer, "reserved:\n"); - list_for_each_entry(block, &mgr->reserved_pages, link) - drm_buddy_block_print(mm, block, printer); + list_for_each_entry(rsv, &mgr->reserved_pages, blocks) + drm_printf(printer, "%#018llx-%#018llx: %llu\n", + rsv->start, rsv->start + rsv->size, rsv->size); mutex_unlock(&mgr->lock); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index f5b5ce1051a2864651245a776c7d8b7881dbfeb1..ab44c1391d526594974bf8fc631913d8c8f4bb6a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -6892,8 +6892,10 @@ static int gfx_v10_0_kiq_resume(struct amdgpu_device *adev) return r; r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); - if (unlikely(r != 0)) + if (unlikely(r != 0)) { + amdgpu_bo_unreserve(ring->mqd_obj); return r; + } gfx_v10_0_kiq_init_queue(ring); amdgpu_bo_kunmap(ring->mqd_obj); @@ -8152,8 +8154,14 @@ static int gfx_v10_0_set_powergating_state(void *handle, case IP_VERSION(10, 3, 3): case IP_VERSION(10, 3, 6): case IP_VERSION(10, 3, 7): + if (!enable) + amdgpu_gfx_off_ctrl(adev, false); + gfx_v10_cntl_pg(adev, enable); - amdgpu_gfx_off_ctrl(adev, enable); + + if (enable) + amdgpu_gfx_off_ctrl(adev, true); + break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index f5c3762769843a19e9fdca3a6fdb85d22c6df57f..c4940b6ea1c4ccd8b82a6df1d91fcf0fedacce47 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -4667,24 +4667,27 @@ static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev) uint64_t clock; uint64_t clock_counter_lo, clock_counter_hi_pre, clock_counter_hi_after; - amdgpu_gfx_off_ctrl(adev, false); - mutex_lock(&adev->gfx.gpu_clock_mutex); if (amdgpu_sriov_vf(adev)) { + amdgpu_gfx_off_ctrl(adev, false); + mutex_lock(&adev->gfx.gpu_clock_mutex); clock_counter_hi_pre = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI); clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO); clock_counter_hi_after = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI); if (clock_counter_hi_pre != clock_counter_hi_after) clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO); + mutex_unlock(&adev->gfx.gpu_clock_mutex); + amdgpu_gfx_off_ctrl(adev, true); } else { + preempt_disable(); clock_counter_hi_pre = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER); clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER); clock_counter_hi_after = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER); if (clock_counter_hi_pre != clock_counter_hi_after) clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER); + preempt_enable(); } clock = clock_counter_lo | (clock_counter_hi_after << 32ULL); - mutex_unlock(&adev->gfx.gpu_clock_mutex); - amdgpu_gfx_off_ctrl(adev, true); + return clock; } @@ -5150,8 +5153,14 @@ static int gfx_v11_0_set_powergating_state(void *handle, break; case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 4): + if (!enable) + amdgpu_gfx_off_ctrl(adev, false); + gfx_v11_cntl_pg(adev, enable); - amdgpu_gfx_off_ctrl(adev, enable); + + if (enable) + amdgpu_gfx_off_ctrl(adev, true); + break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index f46d4b18a3fa35f9be4f18fcb0924e01f0beedde..a674c8a58dc23477d24e7368d246396b85766e00 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -149,16 +149,6 @@ MODULE_FIRMWARE("amdgpu/aldebaran_sjt_mec2.bin"); #define mmGOLDEN_TSC_COUNT_LOWER_Renoir 0x0026 #define mmGOLDEN_TSC_COUNT_LOWER_Renoir_BASE_IDX 1 -#define mmGOLDEN_TSC_COUNT_UPPER_Raven 0x007a -#define mmGOLDEN_TSC_COUNT_UPPER_Raven_BASE_IDX 0 -#define mmGOLDEN_TSC_COUNT_LOWER_Raven 0x007b -#define mmGOLDEN_TSC_COUNT_LOWER_Raven_BASE_IDX 0 - -#define mmGOLDEN_TSC_COUNT_UPPER_Raven2 0x0068 -#define mmGOLDEN_TSC_COUNT_UPPER_Raven2_BASE_IDX 0 -#define mmGOLDEN_TSC_COUNT_LOWER_Raven2 0x0069 -#define mmGOLDEN_TSC_COUNT_LOWER_Raven2_BASE_IDX 0 - enum ta_ras_gfx_subblock { /*CPC*/ TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0, @@ -765,7 +755,7 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev); static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, struct amdgpu_cu_info *cu_info); static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev); -static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume); +static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume, bool usegds); static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring); static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status); @@ -3617,8 +3607,10 @@ static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev) return r; r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); - if (unlikely(r != 0)) + if (unlikely(r != 0)) { + amdgpu_bo_unreserve(ring->mqd_obj); return r; + } gfx_v9_0_kiq_init_queue(ring); amdgpu_bo_kunmap(ring->mqd_obj); @@ -4002,36 +3994,6 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev) preempt_enable(); clock = clock_lo | (clock_hi << 32ULL); break; - case IP_VERSION(9, 1, 0): - preempt_disable(); - clock_hi = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven); - clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven); - hi_check = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven); - /* The PWR TSC clock frequency is 100MHz, which sets 32-bit carry over - * roughly every 42 seconds. - */ - if (hi_check != clock_hi) { - clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven); - clock_hi = hi_check; - } - preempt_enable(); - clock = clock_lo | (clock_hi << 32ULL); - break; - case IP_VERSION(9, 2, 2): - preempt_disable(); - clock_hi = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven2); - clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven2); - hi_check = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven2); - /* The PWR TSC clock frequency is 100MHz, which sets 32-bit carry over - * roughly every 42 seconds. - */ - if (hi_check != clock_hi) { - clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven2); - clock_hi = hi_check; - } - preempt_enable(); - clock = clock_lo | (clock_hi << 32ULL); - break; default: amdgpu_gfx_off_ctrl(adev, false); mutex_lock(&adev->gfx.gpu_clock_mutex); @@ -5165,7 +5127,8 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, gfx_v9_0_ring_emit_de_meta(ring, (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? - true : false); + true : false, + job->gds_size > 0 && job->gds_base != 0); } amdgpu_ring_write(ring, header); @@ -5176,9 +5139,83 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, #endif lower_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); + amdgpu_ring_ib_on_emit_cntl(ring); amdgpu_ring_write(ring, control); } +static void gfx_v9_0_ring_patch_cntl(struct amdgpu_ring *ring, + unsigned offset) +{ + u32 control = ring->ring[offset]; + + control |= INDIRECT_BUFFER_PRE_RESUME(1); + ring->ring[offset] = control; +} + +static void gfx_v9_0_ring_patch_ce_meta(struct amdgpu_ring *ring, + unsigned offset) +{ + struct amdgpu_device *adev = ring->adev; + void *ce_payload_cpu_addr; + uint64_t payload_offset, payload_size; + + payload_size = sizeof(struct v9_ce_ib_state); + + if (ring->is_mes_queue) { + payload_offset = offsetof(struct amdgpu_mes_ctx_meta_data, + gfx[0].gfx_meta_data) + + offsetof(struct v9_gfx_meta_data, ce_payload); + ce_payload_cpu_addr = + amdgpu_mes_ctx_get_offs_cpu_addr(ring, payload_offset); + } else { + payload_offset = offsetof(struct v9_gfx_meta_data, ce_payload); + ce_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset; + } + + if (offset + (payload_size >> 2) <= ring->buf_mask + 1) { + memcpy((void *)&ring->ring[offset], ce_payload_cpu_addr, payload_size); + } else { + memcpy((void *)&ring->ring[offset], ce_payload_cpu_addr, + (ring->buf_mask + 1 - offset) << 2); + payload_size -= (ring->buf_mask + 1 - offset) << 2; + memcpy((void *)&ring->ring[0], + ce_payload_cpu_addr + ((ring->buf_mask + 1 - offset) << 2), + payload_size); + } +} + +static void gfx_v9_0_ring_patch_de_meta(struct amdgpu_ring *ring, + unsigned offset) +{ + struct amdgpu_device *adev = ring->adev; + void *de_payload_cpu_addr; + uint64_t payload_offset, payload_size; + + payload_size = sizeof(struct v9_de_ib_state); + + if (ring->is_mes_queue) { + payload_offset = offsetof(struct amdgpu_mes_ctx_meta_data, + gfx[0].gfx_meta_data) + + offsetof(struct v9_gfx_meta_data, de_payload); + de_payload_cpu_addr = + amdgpu_mes_ctx_get_offs_cpu_addr(ring, payload_offset); + } else { + payload_offset = offsetof(struct v9_gfx_meta_data, de_payload); + de_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset; + } + + if (offset + (payload_size >> 2) <= ring->buf_mask + 1) { + memcpy((void *)&ring->ring[offset], de_payload_cpu_addr, payload_size); + } else { + memcpy((void *)&ring->ring[offset], de_payload_cpu_addr, + (ring->buf_mask + 1 - offset) << 2); + payload_size -= (ring->buf_mask + 1 - offset) << 2; + memcpy((void *)&ring->ring[0], + de_payload_cpu_addr + ((ring->buf_mask + 1 - offset) << 2), + payload_size); + } +} + static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, @@ -5374,6 +5411,8 @@ static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume) amdgpu_ring_write(ring, lower_32_bits(ce_payload_gpu_addr)); amdgpu_ring_write(ring, upper_32_bits(ce_payload_gpu_addr)); + amdgpu_ring_ib_on_emit_ce(ring); + if (resume) amdgpu_ring_write_multiple(ring, ce_payload_cpu_addr, sizeof(ce_payload) >> 2); @@ -5407,10 +5446,6 @@ static int gfx_v9_0_ring_preempt_ib(struct amdgpu_ring *ring) amdgpu_ring_alloc(ring, 13); gfx_v9_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr, ring->trail_seq, AMDGPU_FENCE_FLAG_EXEC | AMDGPU_FENCE_FLAG_INT); - /*reset the CP_VMID_PREEMPT after trailing fence*/ - amdgpu_ring_emit_wreg(ring, - SOC15_REG_OFFSET(GC, 0, mmCP_VMID_PREEMPT), - 0x0); /* assert IB preemption, emit the trailing fence */ kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP, @@ -5433,6 +5468,10 @@ static int gfx_v9_0_ring_preempt_ib(struct amdgpu_ring *ring) DRM_WARN("ring %d timeout to preempt ib\n", ring->idx); } + /*reset the CP_VMID_PREEMPT after trailing fence*/ + amdgpu_ring_emit_wreg(ring, + SOC15_REG_OFFSET(GC, 0, mmCP_VMID_PREEMPT), + 0x0); amdgpu_ring_commit(ring); /* deassert preemption condition */ @@ -5440,7 +5479,7 @@ static int gfx_v9_0_ring_preempt_ib(struct amdgpu_ring *ring) return r; } -static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume) +static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume, bool usegds) { struct amdgpu_device *adev = ring->adev; struct v9_de_ib_state de_payload = {0}; @@ -5471,8 +5510,10 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume) PAGE_SIZE); } - de_payload.gds_backup_addrlo = lower_32_bits(gds_addr); - de_payload.gds_backup_addrhi = upper_32_bits(gds_addr); + if (usegds) { + de_payload.gds_backup_addrlo = lower_32_bits(gds_addr); + de_payload.gds_backup_addrhi = upper_32_bits(gds_addr); + } cnt = (sizeof(de_payload) >> 2) + 4 - 2; amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt)); @@ -5483,6 +5524,7 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume) amdgpu_ring_write(ring, lower_32_bits(de_payload_gpu_addr)); amdgpu_ring_write(ring, upper_32_bits(de_payload_gpu_addr)); + amdgpu_ring_ib_on_emit_de(ring); if (resume) amdgpu_ring_write_multiple(ring, de_payload_cpu_addr, sizeof(de_payload) >> 2); @@ -6893,6 +6935,9 @@ static const struct amdgpu_ring_funcs gfx_v9_0_sw_ring_funcs_gfx = { .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait, .soft_recovery = gfx_v9_0_ring_soft_recovery, .emit_mem_sync = gfx_v9_0_emit_mem_sync, + .patch_cntl = gfx_v9_0_ring_patch_cntl, + .patch_de = gfx_v9_0_ring_patch_de_meta, + .patch_ce = gfx_v9_0_ring_patch_ce_meta, }; static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index d95f9fe8f1c54ef351d36a567fd8a4572038b339..4116c112e8a01d00fee4306e737bf7a1cc92252e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -31,6 +31,8 @@ #include "umc_v8_10.h" #include "athub/athub_3_0_0_sh_mask.h" #include "athub/athub_3_0_0_offset.h" +#include "dcn/dcn_3_2_0_offset.h" +#include "dcn/dcn_3_2_0_sh_mask.h" #include "oss/osssys_6_0_0_offset.h" #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h" #include "navi10_enum.h" @@ -546,7 +548,24 @@ static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev, static unsigned gmc_v11_0_get_vbios_fb_size(struct amdgpu_device *adev) { - return 0; + u32 d1vga_control = RREG32_SOC15(DCE, 0, regD1VGA_CONTROL); + unsigned size; + + if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { + size = AMDGPU_VBIOS_VGA_ALLOCATION; + } else { + u32 viewport; + u32 pitch; + + viewport = RREG32_SOC15(DCE, 0, regHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION); + pitch = RREG32_SOC15(DCE, 0, regHUBPREQ0_DCSURF_SURFACE_PITCH); + size = (REG_GET_FIELD(viewport, + HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) * + REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) * + 4); + } + + return size; } static const struct amdgpu_gmc_funcs gmc_v11_0_gmc_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c index b040f51d9aa9d2847ea7f0c1023a15de8e0ed381..73e0dc5a10cd4a586aa6f4c35a2ddc7be56bb09f 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c @@ -102,13 +102,13 @@ static int jpeg_v2_5_sw_init(void *handle) /* JPEG DJPEG POISON EVENT */ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i], - VCN_2_6__SRCID_DJPEG0_POISON, &adev->jpeg.inst[i].irq); + VCN_2_6__SRCID_DJPEG0_POISON, &adev->jpeg.inst[i].ras_poison_irq); if (r) return r; /* JPEG EJPEG POISON EVENT */ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i], - VCN_2_6__SRCID_EJPEG0_POISON, &adev->jpeg.inst[i].irq); + VCN_2_6__SRCID_EJPEG0_POISON, &adev->jpeg.inst[i].ras_poison_irq); if (r) return r; } @@ -221,6 +221,9 @@ static int jpeg_v2_5_hw_fini(void *handle) if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS)) jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE); + + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) + amdgpu_irq_put(adev, &adev->jpeg.inst[i].ras_poison_irq, 0); } return 0; @@ -569,6 +572,14 @@ static int jpeg_v2_5_set_interrupt_state(struct amdgpu_device *adev, return 0; } +static int jpeg_v2_6_set_ras_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) @@ -593,10 +604,6 @@ static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev, case VCN_2_0__SRCID__JPEG_DECODE: amdgpu_fence_process(&adev->jpeg.inst[ip_instance].ring_dec); break; - case VCN_2_6__SRCID_DJPEG0_POISON: - case VCN_2_6__SRCID_EJPEG0_POISON: - amdgpu_jpeg_process_poison_irq(adev, source, entry); - break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); @@ -725,6 +732,11 @@ static const struct amdgpu_irq_src_funcs jpeg_v2_5_irq_funcs = { .process = jpeg_v2_5_process_interrupt, }; +static const struct amdgpu_irq_src_funcs jpeg_v2_6_ras_irq_funcs = { + .set = jpeg_v2_6_set_ras_interrupt_state, + .process = amdgpu_jpeg_process_poison_irq, +}; + static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev) { int i; @@ -735,6 +747,9 @@ static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev) adev->jpeg.inst[i].irq.num_types = 1; adev->jpeg.inst[i].irq.funcs = &jpeg_v2_5_irq_funcs; + + adev->jpeg.inst[i].ras_poison_irq.num_types = 1; + adev->jpeg.inst[i].ras_poison_irq.funcs = &jpeg_v2_6_ras_irq_funcs; } } @@ -800,6 +815,7 @@ const struct amdgpu_ras_block_hw_ops jpeg_v2_6_ras_hw_ops = { static struct amdgpu_jpeg_ras jpeg_v2_6_ras = { .ras_block = { .hw_ops = &jpeg_v2_6_ras_hw_ops, + .ras_late_init = amdgpu_jpeg_ras_late_init, }, }; diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c index 77e1e64aa1d1c7b7987d15aa5a771b553754a230..a3d83c9f2c9a3cca84e07aa65732c43ab8d2bfc2 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c @@ -87,13 +87,13 @@ static int jpeg_v4_0_sw_init(void *handle) /* JPEG DJPEG POISON EVENT */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, - VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->irq); + VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq); if (r) return r; /* JPEG EJPEG POISON EVENT */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, - VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->irq); + VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq); if (r) return r; @@ -202,7 +202,8 @@ static int jpeg_v4_0_hw_fini(void *handle) RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS)) jpeg_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE); } - amdgpu_irq_put(adev, &adev->jpeg.inst->irq, 0); + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) + amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0); return 0; } @@ -670,6 +671,14 @@ static int jpeg_v4_0_set_interrupt_state(struct amdgpu_device *adev, return 0; } +static int jpeg_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) @@ -680,10 +689,6 @@ static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev, case VCN_4_0__SRCID__JPEG_DECODE: amdgpu_fence_process(&adev->jpeg.inst->ring_dec); break; - case VCN_4_0__SRCID_DJPEG0_POISON: - case VCN_4_0__SRCID_EJPEG0_POISON: - amdgpu_jpeg_process_poison_irq(adev, source, entry); - break; default: DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); @@ -753,10 +758,18 @@ static const struct amdgpu_irq_src_funcs jpeg_v4_0_irq_funcs = { .process = jpeg_v4_0_process_interrupt, }; +static const struct amdgpu_irq_src_funcs jpeg_v4_0_ras_irq_funcs = { + .set = jpeg_v4_0_set_ras_interrupt_state, + .process = amdgpu_jpeg_process_poison_irq, +}; + static void jpeg_v4_0_set_irq_funcs(struct amdgpu_device *adev) { adev->jpeg.inst->irq.num_types = 1; adev->jpeg.inst->irq.funcs = &jpeg_v4_0_irq_funcs; + + adev->jpeg.inst->ras_poison_irq.num_types = 1; + adev->jpeg.inst->ras_poison_irq.funcs = &jpeg_v4_0_ras_irq_funcs; } const struct amdgpu_ip_block_version jpeg_v4_0_ip_block = { @@ -811,6 +824,7 @@ const struct amdgpu_ras_block_hw_ops jpeg_v4_0_ras_hw_ops = { static struct amdgpu_jpeg_ras jpeg_v4_0_ras = { .ras_block = { .hw_ops = &jpeg_v4_0_ras_hw_ops, + .ras_late_init = amdgpu_jpeg_ras_late_init, }, }; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c index e1b7fca096660a99387079c8fd73a99053de916b..5f10883da6a2388ef7f8b1354f5e3eddb0d970dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c @@ -57,7 +57,13 @@ static int psp_v10_0_init_microcode(struct psp_context *psp) if (err) return err; - return psp_init_ta_microcode(psp, ucode_prefix); + err = psp_init_ta_microcode(psp, ucode_prefix); + if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 1, 0)) && + (adev->pdev->revision == 0xa1) && + (psp->securedisplay_context.context.bin_desc.fw_version >= 0x27000008)) { + adev->psp.securedisplay_context.context.bin_desc.size_bytes = 0; + } + return err; } static int psp_v10_0_ring_create(struct psp_context *psp, diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 6d15d5cd9e07f64cac8c6961f65b68e55ad86409..a2fd1ffadb59e90d421cf619947b92d6064b37ca 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -301,10 +301,11 @@ static u32 soc15_get_xclk(struct amdgpu_device *adev) u32 reference_clock = adev->clock.spll.reference_freq; if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 0) || - adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1) || - adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 0) || - adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 1)) + adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1)) return 10000; + if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 0) || + adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 1)) + return reference_clock / 4; return reference_clock; } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index ab0b45d0ead18ae6f315a8b59be901247f05aefe..515681c88dcbca3b19c2e20d2dff4421a9cca9a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -143,7 +143,7 @@ static int vcn_v2_5_sw_init(void *handle) /* VCN POISON TRAP */ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j], - VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].irq); + VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].ras_poison_irq); if (r) return r; } @@ -354,6 +354,9 @@ static int vcn_v2_5_hw_fini(void *handle) (adev->vcn.cur_state != AMD_PG_STATE_GATE && RREG32_SOC15(VCN, i, mmUVD_STATUS))) vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE); + + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) + amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0); } return 0; @@ -1807,6 +1810,14 @@ static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev, return 0; } +static int vcn_v2_6_set_ras_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) @@ -1837,9 +1848,6 @@ static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev, case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY: amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]); break; - case VCN_2_6__SRCID_UVD_POISON: - amdgpu_vcn_process_poison_irq(adev, source, entry); - break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); @@ -1854,6 +1862,11 @@ static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = { .process = vcn_v2_5_process_interrupt, }; +static const struct amdgpu_irq_src_funcs vcn_v2_6_ras_irq_funcs = { + .set = vcn_v2_6_set_ras_interrupt_state, + .process = amdgpu_vcn_process_poison_irq, +}; + static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev) { int i; @@ -1863,6 +1876,9 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev) continue; adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1; adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs; + + adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.num_enc_rings + 1; + adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v2_6_ras_irq_funcs; } } @@ -1965,6 +1981,7 @@ const struct amdgpu_ras_block_hw_ops vcn_v2_6_ras_hw_ops = { static struct amdgpu_vcn_ras vcn_v2_6_ras = { .ras_block = { .hw_ops = &vcn_v2_6_ras_hw_ops, + .ras_late_init = amdgpu_vcn_ras_late_init, }, }; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c index bf0674039598d3154776c682400d10dfd5db5f29..da126ff8bcbc266d08b64e73ccecd5b20c94caa1 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c @@ -129,7 +129,11 @@ static int vcn_v4_0_sw_init(void *handle) if (adev->vcn.harvest_config & (1 << i)) continue; - atomic_set(&adev->vcn.inst[i].sched_score, 0); + /* Init instance 0 sched_score to 1, so it's scheduled after other instances */ + if (i == 0) + atomic_set(&adev->vcn.inst[i].sched_score, 1); + else + atomic_set(&adev->vcn.inst[i].sched_score, 0); /* VCN UNIFIED TRAP */ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i], @@ -139,7 +143,7 @@ static int vcn_v4_0_sw_init(void *handle) /* VCN POISON TRAP */ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i], - VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].irq); + VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].ras_poison_irq); if (r) return r; @@ -305,8 +309,8 @@ static int vcn_v4_0_hw_fini(void *handle) vcn_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE); } } - - amdgpu_irq_put(adev, &adev->vcn.inst[i].irq, 0); + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) + amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0); } return 0; @@ -1975,6 +1979,24 @@ static int vcn_v4_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgp return 0; } +/** + * vcn_v4_0_set_ras_interrupt_state - set VCN block RAS interrupt state + * + * @adev: amdgpu_device pointer + * @source: interrupt sources + * @type: interrupt types + * @state: interrupt states + * + * Set VCN block RAS interrupt state + */ +static int vcn_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + /** * vcn_v4_0_process_interrupt - process VCN block interrupt * @@ -2007,9 +2029,6 @@ static int vcn_v4_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_ case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE: amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]); break; - case VCN_4_0__SRCID_UVD_POISON: - amdgpu_vcn_process_poison_irq(adev, source, entry); - break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); @@ -2024,6 +2043,11 @@ static const struct amdgpu_irq_src_funcs vcn_v4_0_irq_funcs = { .process = vcn_v4_0_process_interrupt, }; +static const struct amdgpu_irq_src_funcs vcn_v4_0_ras_irq_funcs = { + .set = vcn_v4_0_set_ras_interrupt_state, + .process = amdgpu_vcn_process_poison_irq, +}; + /** * vcn_v4_0_set_irq_funcs - set VCN block interrupt irq functions * @@ -2041,6 +2065,9 @@ static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev) adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1; adev->vcn.inst[i].irq.funcs = &vcn_v4_0_irq_funcs; + + adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.num_enc_rings + 1; + adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v4_0_ras_irq_funcs; } } @@ -2114,6 +2141,7 @@ const struct amdgpu_ras_block_hw_ops vcn_v4_0_ras_hw_ops = { static struct amdgpu_vcn_ras vcn_v4_0_ras = { .ras_block = { .hw_ops = &vcn_v4_0_ras_hw_ops, + .ras_late_init = amdgpu_vcn_ras_late_init, }, }; diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 531f173ade2d62c1f1ced0ef3a83596b181efb87..c0360dbebd4bdb0454115a94b6176dbfba4cea06 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -542,8 +542,15 @@ static u32 vi_get_xclk(struct amdgpu_device *adev) u32 reference_clock = adev->clock.spll.reference_freq; u32 tmp; - if (adev->flags & AMD_IS_APU) - return reference_clock; + if (adev->flags & AMD_IS_APU) { + switch (adev->asic_type) { + case CHIP_STONEY: + /* vbios says 48Mhz, but the actual freq is 100Mhz */ + return 10000; + default: + return reference_clock; + } + } tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2); if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK)) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 8b4b186c57f515fd541eaa15ce55e0b3ebf2dadb..7acd73e5004fb6cb42f9bbcb7bde011428617924 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2479,20 +2479,25 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev, if (acrtc && state->stream_status[i].plane_count != 0) { irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst; rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; - DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n", - acrtc->crtc_id, enable ? "en" : "dis", rc); if (rc) DRM_WARN("Failed to %s pflip interrupts\n", enable ? "enable" : "disable"); if (enable) { - rc = amdgpu_dm_crtc_enable_vblank(&acrtc->base); - if (rc) - DRM_WARN("Failed to enable vblank interrupts\n"); - } else { - amdgpu_dm_crtc_disable_vblank(&acrtc->base); - } + if (amdgpu_dm_crtc_vrr_active(to_dm_crtc_state(acrtc->base.state))) + rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, true); + } else + rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false); + + if (rc) + DRM_WARN("Failed to %sable vupdate interrupt\n", enable ? "en" : "dis"); + irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; + /* During gpu-reset we disable and then enable vblank irq, so + * don't use amdgpu_irq_get/put() to avoid refcount change. + */ + if (!dc_interrupt_set(adev->dm.dc, irq_source, enable)) + DRM_WARN("Failed to %sable vblank interrupt\n", enable ? "en" : "dis"); } } @@ -2852,7 +2857,7 @@ static int dm_resume(void *handle) * this is the case when traversing through already created * MST connectors, should be skipped */ - if (aconnector->dc_link->type == dc_connection_mst_branch) + if (aconnector && aconnector->mst_root) continue; mutex_lock(&aconnector->hpd_lock); @@ -6737,7 +6742,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, int clock, bpp = 0; bool is_y420 = false; - if (!aconnector->mst_output_port || !aconnector->dc_sink) + if (!aconnector->mst_output_port) return 0; mst_port = aconnector->mst_output_port; @@ -7191,7 +7196,13 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) drm_add_modes_noedid(connector, 1920, 1080); } else { amdgpu_dm_connector_ddc_get_modes(connector, edid); - amdgpu_dm_connector_add_common_modes(encoder, connector); + /* most eDP supports only timings from its edid, + * usually only detailed timings are available + * from eDP edid. timings which are not from edid + * may damage eDP + */ + if (connector->connector_type != DRM_MODE_CONNECTOR_eDP) + amdgpu_dm_connector_add_common_modes(encoder, connector); amdgpu_dm_connector_add_freesync_modes(connector, edid); } amdgpu_dm_fbc_init(connector); @@ -8193,6 +8204,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, if (acrtc_state->abm_level != dm_old_crtc_state->abm_level) bundle->stream_update.abm_level = &acrtc_state->abm_level; + mutex_lock(&dm->dc_lock); + if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && + acrtc_state->stream->link->psr_settings.psr_allow_active) + amdgpu_dm_psr_disable(acrtc_state->stream); + mutex_unlock(&dm->dc_lock); + /* * If FreeSync state on the stream has changed then we need to * re-adjust the min/max bounds now that DC doesn't handle this @@ -8206,10 +8223,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); } mutex_lock(&dm->dc_lock); - if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && - acrtc_state->stream->link->psr_settings.psr_allow_active) - amdgpu_dm_psr_disable(acrtc_state->stream); - update_planes_and_stream_adapter(dm->dc, acrtc_state->update_type, planes_count, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index e3762e806617c11a724e34e3954ecbedfea49352..440fc0869a34ba631c84df42df35318ed37aeabb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -146,7 +146,6 @@ static void vblank_control_worker(struct work_struct *work) static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) { - enum dc_irq_source irq_source; struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); struct amdgpu_device *adev = drm_to_adev(crtc->dev); struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state); @@ -169,18 +168,9 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) if (rc) return rc; - if (amdgpu_in_reset(adev)) { - irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; - /* During gpu-reset we disable and then enable vblank irq, so - * don't use amdgpu_irq_get/put() to avoid refcount change. - */ - if (!dc_interrupt_set(adev->dm.dc, irq_source, enable)) - rc = -EBUSY; - } else { - rc = (enable) - ? amdgpu_irq_get(adev, &adev->crtc_irq, acrtc->crtc_id) - : amdgpu_irq_put(adev, &adev->crtc_irq, acrtc->crtc_id); - } + rc = (enable) + ? amdgpu_irq_get(adev, &adev->crtc_irq, acrtc->crtc_id) + : amdgpu_irq_put(adev, &adev->crtc_irq, acrtc->crtc_id); if (rc) return rc; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 52564b93f7eb2dc7539bd184efeea5031e9a149a..7cde67b7f0c33dc66e54ed0d76d38276b8a2055f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1981,6 +1981,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c return result; } +static bool commit_minimal_transition_state(struct dc *dc, + struct dc_state *transition_base_context); + /** * dc_commit_streams - Commit current stream state * @@ -2002,6 +2005,8 @@ enum dc_status dc_commit_streams(struct dc *dc, struct dc_state *context; enum dc_status res = DC_OK; struct dc_validation_set set[MAX_STREAMS] = {0}; + struct pipe_ctx *pipe; + bool handle_exit_odm2to1 = false; if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW) return res; @@ -2026,6 +2031,22 @@ enum dc_status dc_commit_streams(struct dc *dc, } } + /* Check for case where we are going from odm 2:1 to max + * pipe scenario. For these cases, we will call + * commit_minimal_transition_state() to exit out of odm 2:1 + * first before processing new streams + */ + if (stream_count == dc->res_pool->pipe_count) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe->next_odm_pipe) + handle_exit_odm2to1 = true; + } + } + + if (handle_exit_odm2to1) + res = commit_minimal_transition_state(dc, dc->current_state); + context = dc_create_state(dc); if (!context) goto context_alloc_fail; @@ -3872,6 +3893,7 @@ static bool commit_minimal_transition_state(struct dc *dc, unsigned int i, j; unsigned int pipe_in_use = 0; bool subvp_in_use = false; + bool odm_in_use = false; if (!transition_context) return false; @@ -3900,6 +3922,18 @@ static bool commit_minimal_transition_state(struct dc *dc, } } + /* If ODM is enabled and we are adding or removing planes from any ODM + * pipe, we must use the minimal transition. + */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (pipe->stream && pipe->next_odm_pipe) { + odm_in_use = true; + break; + } + } + /* When the OS add a new surface if we have been used all of pipes with odm combine * and mpc split feature, it need use commit_minimal_transition_state to transition safely. * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need @@ -3908,7 +3942,7 @@ static bool commit_minimal_transition_state(struct dc *dc, * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially * enter/exit MPO when DCN still have enough resources. */ - if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use) { + if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && !odm_in_use) { dc_release_state(transition_context); return true; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 117d80cb36fbb5e245ce0dd8b27e076cc3dc75fd..fe1551393b26461f584d61d6e0950830da72a37b 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1444,6 +1444,26 @@ static int acquire_first_split_pipe( split_pipe->plane_res.mpcc_inst = pool->dpps[i]->inst; split_pipe->pipe_idx = i; + split_pipe->stream = stream; + return i; + } else if (split_pipe->prev_odm_pipe && + split_pipe->prev_odm_pipe->plane_state == split_pipe->plane_state) { + split_pipe->prev_odm_pipe->next_odm_pipe = split_pipe->next_odm_pipe; + if (split_pipe->next_odm_pipe) + split_pipe->next_odm_pipe->prev_odm_pipe = split_pipe->prev_odm_pipe; + + if (split_pipe->prev_odm_pipe->plane_state) + resource_build_scaling_params(split_pipe->prev_odm_pipe); + + memset(split_pipe, 0, sizeof(*split_pipe)); + split_pipe->stream_res.tg = pool->timing_generators[i]; + split_pipe->plane_res.hubp = pool->hubps[i]; + split_pipe->plane_res.ipp = pool->ipps[i]; + split_pipe->plane_res.dpp = pool->dpps[i]; + split_pipe->stream_res.opp = pool->opps[i]; + split_pipe->plane_res.mpcc_inst = pool->dpps[i]->inst; + split_pipe->pipe_idx = i; + split_pipe->stream = stream; return i; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 422fbf79da64fb95eda7627455870a0dbdb351cb..5403e9399a465c948c81e40f352dcb437751660d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -2113,15 +2113,6 @@ void dcn20_optimize_bandwidth( if (hubbub->funcs->program_compbuf_size) hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true); - if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { - dc_dmub_srv_p_state_delegate(dc, - true, context); - context->bw_ctx.bw.dcn.clk.p_state_change_support = true; - dc->clk_mgr->clks.fw_based_mclk_switching = true; - } else { - dc->clk_mgr->clks.fw_based_mclk_switching = false; - } - dc->clk_mgr->funcs->update_clocks( dc->clk_mgr, context, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index 8263a07f265f2157cb72afce81dcddb0e9af572f..32121db2851e665a587d7a7855cf9805f2be11a8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -983,36 +983,13 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc, } void dcn30_prepare_bandwidth(struct dc *dc, - struct dc_state *context) + struct dc_state *context) { - bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support; - /* Any transition into an FPO config should disable MCLK switching first to avoid - * driver and FW P-State synchronization issues. - */ - if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) { - dc->optimized_required = true; - context->bw_ctx.bw.dcn.clk.p_state_change_support = false; - } - if (dc->clk_mgr->dc_mode_softmax_enabled) if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 && context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz); dcn20_prepare_bandwidth(dc, context); - /* - * enabled -> enabled: do not disable - * enabled -> disabled: disable - * disabled -> enabled: don't care - * disabled -> disabled: don't care - */ - if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) - dc_dmub_srv_p_state_delegate(dc, false, context); - - if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) { - /* After disabling P-State, restore the original value to ensure we get the correct P-State - * on the next optimize. */ - context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support; - } } diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index 47beb4ea779d30df03b6e5daf638eee79f9e7f05..0c4c3208def17f129bfc74c09bf4d8586382e806 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -138,7 +138,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = { .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, - .pct_ideal_sdp_bw_after_urgent = 100.0, + .pct_ideal_sdp_bw_after_urgent = 90.0, .pct_ideal_fabric_bw_after_urgent = 67.0, .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 20.0, .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, // N/A, for now keep as is until DML implemented diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c index a131e30fd7d6a01036b55598662b50345dbc34e1..d471d58aba92cba441915c0757072136ca0f8c64 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c @@ -980,6 +980,11 @@ static bool detect_link_and_local_sink(struct dc_link *link, (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER)) converter_disable_audio = true; + + /* limited link rate to HBR3 for DPIA until we implement USB4 V2 */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && + link->reported_link_cap.link_rate > LINK_RATE_HIGH3) + link->reported_link_cap.link_rate = LINK_RATE_HIGH3; break; } diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c index d4b7da526f0a55eeb7736edb5f99a84826110126..e8b2fc4002a52d07dc91e7cee0c3250acd23dc51 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c @@ -359,5 +359,8 @@ bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const un link[i] = stream[i].link; bw_needed[i] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing); } + + ret = dpia_validate_usb4_bw(link, bw_needed, num_streams); + return ret; } diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 58c2246918fdabd1de4d9f91262c0ac20b2bc587..f4f40459f22b97e0cfc8b4fa7df6a7a911a21878 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -871,13 +871,11 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, } if (ret == -ENOENT) { size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf); - if (size > 0) { - size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf + size); - size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf + size); - size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf + size); - size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf + size); - size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf + size); - } + size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf + size); + size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf + size); + size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf + size); + size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf + size); + size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf + size); } if (size == 0) diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index d6d9e3b1b2c0e4511707680dbb1286d13188977a..02e69ccff3bac47cefb29b4369e196bd09dfae9d 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -6925,23 +6925,6 @@ static int si_dpm_enable(struct amdgpu_device *adev) return 0; } -static int si_set_temperature_range(struct amdgpu_device *adev) -{ - int ret; - - ret = si_thermal_enable_alert(adev, false); - if (ret) - return ret; - ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); - if (ret) - return ret; - ret = si_thermal_enable_alert(adev, true); - if (ret) - return ret; - - return ret; -} - static void si_dpm_disable(struct amdgpu_device *adev) { struct rv7xx_power_info *pi = rv770_get_pi(adev); @@ -7626,18 +7609,6 @@ static int si_dpm_process_interrupt(struct amdgpu_device *adev, static int si_dpm_late_init(void *handle) { - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!adev->pm.dpm_enabled) - return 0; - - ret = si_set_temperature_range(adev); - if (ret) - return ret; -#if 0 //TODO ? - si_dpm_powergate_uvd(adev, true); -#endif return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 5633c5797e85a4c7651301e06cd1c06eea90d6da..2ddf5198e5c4860f86db5ce824f35ae051fb6bed 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -733,6 +733,24 @@ static int smu_late_init(void *handle) return ret; } + /* + * Explicitly notify PMFW the power mode the system in. Since + * the PMFW may boot the ASIC with a different mode. + * For those supporting ACDC switch via gpio, PMFW will + * handle the switch automatically. Driver involvement + * is unnecessary. + */ + if (!smu->dc_controlled_by_gpio) { + ret = smu_set_power_source(smu, + adev->pm.ac_power ? SMU_POWER_SOURCE_AC : + SMU_POWER_SOURCE_DC); + if (ret) { + dev_err(adev->dev, "Failed to switch to %s mode!\n", + adev->pm.ac_power ? "AC" : "DC"); + return ret; + } + } + if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 1)) || (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 3))) return 0; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index c4000518dc56d84da730a439eb192671d18f503f..275f708db63626183e2a45b14782a7b54ac12328 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -3413,26 +3413,8 @@ static int navi10_post_smu_init(struct smu_context *smu) return 0; ret = navi10_run_umc_cdr_workaround(smu); - if (ret) { + if (ret) dev_err(adev->dev, "Failed to apply umc cdr workaround!\n"); - return ret; - } - - if (!smu->dc_controlled_by_gpio) { - /* - * For Navi1X, manually switch it to AC mode as PMFW - * may boot it with DC mode. - */ - ret = smu_v11_0_set_power_source(smu, - adev->pm.ac_power ? - SMU_POWER_SOURCE_AC : - SMU_POWER_SOURCE_DC); - if (ret) { - dev_err(adev->dev, "Failed to switch to %s mode!\n", - adev->pm.ac_power ? "AC" : "DC"); - return ret; - } - } return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 75f18681e984c33a5c7cf83d7a22aa92b794f3c9..85d53597eb07ae4721bb1ec3750b695d87ad97fa 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -2067,33 +2067,94 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context return ret; } +static void sienna_cichlid_get_override_pcie_settings(struct smu_context *smu, + uint32_t *gen_speed_override, + uint32_t *lane_width_override) +{ + struct amdgpu_device *adev = smu->adev; + + *gen_speed_override = 0xff; + *lane_width_override = 0xff; + + switch (adev->pdev->device) { + case 0x73A0: + case 0x73A1: + case 0x73A2: + case 0x73A3: + case 0x73AB: + case 0x73AE: + /* Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 */ + *lane_width_override = 6; + break; + case 0x73E0: + case 0x73E1: + case 0x73E3: + *lane_width_override = 4; + break; + case 0x7420: + case 0x7421: + case 0x7422: + case 0x7423: + case 0x7424: + *lane_width_override = 3; + break; + default: + break; + } +} + +#define MAX(a, b) ((a) > (b) ? (a) : (b)) + static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu, uint32_t pcie_gen_cap, uint32_t pcie_width_cap) { struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; - - uint32_t smu_pcie_arg; + struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table; + uint32_t gen_speed_override, lane_width_override; uint8_t *table_member1, *table_member2; + uint32_t min_gen_speed, max_gen_speed; + uint32_t min_lane_width, max_lane_width; + uint32_t smu_pcie_arg; int ret, i; GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1); GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2); - /* lclk dpm table setup */ - for (i = 0; i < MAX_PCIE_CONF; i++) { - dpm_context->dpm_tables.pcie_table.pcie_gen[i] = table_member1[i]; - dpm_context->dpm_tables.pcie_table.pcie_lane[i] = table_member2[i]; + sienna_cichlid_get_override_pcie_settings(smu, + &gen_speed_override, + &lane_width_override); + + /* PCIE gen speed override */ + if (gen_speed_override != 0xff) { + min_gen_speed = MIN(pcie_gen_cap, gen_speed_override); + max_gen_speed = MIN(pcie_gen_cap, gen_speed_override); + } else { + min_gen_speed = MAX(0, table_member1[0]); + max_gen_speed = MIN(pcie_gen_cap, table_member1[1]); + min_gen_speed = min_gen_speed > max_gen_speed ? + max_gen_speed : min_gen_speed; } + pcie_table->pcie_gen[0] = min_gen_speed; + pcie_table->pcie_gen[1] = max_gen_speed; + + /* PCIE lane width override */ + if (lane_width_override != 0xff) { + min_lane_width = MIN(pcie_width_cap, lane_width_override); + max_lane_width = MIN(pcie_width_cap, lane_width_override); + } else { + min_lane_width = MAX(1, table_member2[0]); + max_lane_width = MIN(pcie_width_cap, table_member2[1]); + min_lane_width = min_lane_width > max_lane_width ? + max_lane_width : min_lane_width; + } + pcie_table->pcie_lane[0] = min_lane_width; + pcie_table->pcie_lane[1] = max_lane_width; for (i = 0; i < NUM_LINK_LEVELS; i++) { - smu_pcie_arg = (i << 16) | - ((table_member1[i] <= pcie_gen_cap) ? - (table_member1[i] << 8) : - (pcie_gen_cap << 8)) | - ((table_member2[i] <= pcie_width_cap) ? - table_member2[i] : - pcie_width_cap); + smu_pcie_arg = (i << 16 | + pcie_table->pcie_gen[i] << 8 | + pcie_table->pcie_lane[i]); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_OverridePcieParameters, @@ -2101,11 +2162,6 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu, NULL); if (ret) return ret; - - if (table_member1[i] > pcie_gen_cap) - dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap; - if (table_member2[i] > pcie_width_cap) - dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap; } return 0; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 7433dcaa16e046af9c311276c0a04f5f7214928d..067b4e0b026c0b9ce76391723bc45a6492b7afcb 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -582,7 +582,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu, DpmClocks_t *clk_table = smu->smu_table.clocks_table; SmuMetrics_legacy_t metrics; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); - int i, size = 0, ret = 0; + int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; bool cur_value_match_level = false; @@ -656,7 +656,8 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu, case SMU_MCLK: case SMU_FCLK: for (i = 0; i < count; i++) { - ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value); + idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value); if (ret) return ret; if (!value) @@ -683,7 +684,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu, DpmClocks_t *clk_table = smu->smu_table.clocks_table; SmuMetrics_t metrics; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); - int i, size = 0, ret = 0; + int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; bool cur_value_match_level = false; uint32_t min, max; @@ -765,7 +766,8 @@ static int vangogh_print_clk_levels(struct smu_context *smu, case SMU_MCLK: case SMU_FCLK: for (i = 0; i < count; i++) { - ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value); + idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value); if (ret) return ret; if (!value) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index 5cdc07165480b5a3be51ec46c722699d34d52b84..8a8ba25c9ad7cc29437103b22d58555cd8afbe31 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -494,7 +494,7 @@ static int renoir_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) static int renoir_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, size = 0, ret = 0; + int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0; SmuMetrics_t metrics; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); @@ -594,7 +594,8 @@ static int renoir_print_clk_levels(struct smu_context *smu, case SMU_VCLK: case SMU_DCLK: for (i = 0; i < count; i++) { - ret = renoir_get_dpm_clk_limited(smu, clk_type, i, &value); + idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = renoir_get_dpm_clk_limited(smu, clk_type, idx, &value); if (ret) return ret; if (!value) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 393c6a7b960962603a756c4e4651f52ce98735d5..ca379181081c1ff3b591746c49d6d67f381ac3c5 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -573,11 +573,11 @@ int smu_v13_0_init_power(struct smu_context *smu) if (smu_power->power_context || smu_power->power_context_size != 0) return -EINVAL; - smu_power->power_context = kzalloc(sizeof(struct smu_13_0_dpm_context), + smu_power->power_context = kzalloc(sizeof(struct smu_13_0_power_context), GFP_KERNEL); if (!smu_power->power_context) return -ENOMEM; - smu_power->power_context_size = sizeof(struct smu_13_0_dpm_context); + smu_power->power_context_size = sizeof(struct smu_13_0_power_context); return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 09405ef1e3c83a8f7bf0263cc8e1c0794a6454dd..08577d1b84eca16134c6480dd05f401908649b5c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -1696,10 +1696,39 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, } } - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, + if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE && + (((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xC8)) || + ((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xCC)))) { + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_COMPUTE_BIT, + (void *)(&activity_monitor_external), + false); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); + return ret; + } + + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), + true); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + return ret; + } + + workload_type = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_WORKLOAD, + PP_SMC_POWER_PROFILE_CUSTOM); + } else { + /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ + workload_type = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_WORKLOAD, smu->power_profile_mode); + } + if (workload_type < 0) return -EINVAL; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c index 8fa9a36c38b64e5b0281561308c70a22887adb5c..6d9760eac16d8de73480ac9448390b3806a5f671 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c @@ -478,7 +478,7 @@ static int smu_v13_0_4_get_dpm_level_count(struct smu_context *smu, static int smu_v13_0_4_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, size = 0, ret = 0; + int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t min, max; @@ -512,7 +512,8 @@ static int smu_v13_0_4_print_clk_levels(struct smu_context *smu, break; for (i = 0; i < count; i++) { - ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, i, &value); + idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, idx, &value); if (ret) break; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c index 66445964efbd1e5a94c7cfa3d2bbfca7b76e8c89..0081fa607e02e4d4b8578cf1b696855b97a871a6 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c @@ -866,7 +866,7 @@ out: static int smu_v13_0_5_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, size = 0, ret = 0; + int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t min = 0, max = 0; @@ -898,7 +898,8 @@ static int smu_v13_0_5_print_clk_levels(struct smu_context *smu, goto print_clk_out; for (i = 0; i < count; i++) { - ret = smu_v13_0_5_get_dpm_freq_by_index(smu, clk_type, i, &value); + idx = (clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = smu_v13_0_5_get_dpm_freq_by_index(smu, clk_type, idx, &value); if (ret) goto print_clk_out; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index 3d9ff46706fb704855615f353165a353b43adf96..bba621615abf0948bf9ce4d0d560fd3d860b0354 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -125,6 +125,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0), MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0), MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0), + MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0), }; static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = { @@ -1770,6 +1771,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .enable_mgpu_fan_boost = smu_v13_0_7_enable_mgpu_fan_boost, .get_power_limit = smu_v13_0_7_get_power_limit, .set_power_limit = smu_v13_0_set_power_limit, + .set_power_source = smu_v13_0_set_power_source, .get_power_profile_mode = smu_v13_0_7_get_power_profile_mode, .set_power_profile_mode = smu_v13_0_7_set_power_profile_mode, .set_tool_table_location = smu_v13_0_set_tool_table_location, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index 04e56b0b3033eaef681e9b3c9d2b14a18853cb3e..798f36cfcebd387c484590509b3a58642fa113b4 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -1000,7 +1000,7 @@ out: static int yellow_carp_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, size = 0, ret = 0; + int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t min, max; @@ -1033,7 +1033,8 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu, goto print_clk_out; for (i = 0; i < count; i++) { - ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, i, &value); + idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, idx, &value); if (ret) goto print_clk_out; diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c index fbb070f63e36b8656bedfadea9b629dbad310f20..6dc1a09504e13ec2bad4bbf095c28a00b710d9e4 100644 --- a/drivers/gpu/drm/ast/ast_dp.c +++ b/drivers/gpu/drm/ast/ast_dp.c @@ -119,53 +119,32 @@ err_astdp_edid_not_ready: /* * Launch Aspeed DP */ -void ast_dp_launch(struct drm_device *dev, u8 bPower) +void ast_dp_launch(struct drm_device *dev) { - u32 i = 0, j = 0, WaitCount = 1; - u8 bDPTX = 0; + u32 i = 0; u8 bDPExecute = 1; - struct ast_device *ast = to_ast_device(dev); - // S3 come back, need more time to wait BMC ready. - if (bPower) - WaitCount = 300; - - - // Wait total count by different condition. - for (j = 0; j < WaitCount; j++) { - bDPTX = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, TX_TYPE_MASK); - - if (bDPTX) - break; + // Wait one second then timeout. + while (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, ASTDP_MCU_FW_EXECUTING) != + ASTDP_MCU_FW_EXECUTING) { + i++; + // wait 100 ms msleep(100); - } - // 0xE : ASTDP with DPMCU FW handling - if (bDPTX == ASTDP_DPMCU_TX) { - // Wait one second then timeout. - i = 0; - - while (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, COPROCESSOR_LAUNCH) != - COPROCESSOR_LAUNCH) { - i++; - // wait 100 ms - msleep(100); - - if (i >= 10) { - // DP would not be ready. - bDPExecute = 0; - break; - } + if (i >= 10) { + // DP would not be ready. + bDPExecute = 0; + break; } + } - if (bDPExecute) - ast->tx_chip_types |= BIT(AST_TX_ASTDP); + if (!bDPExecute) + drm_err(dev, "Wait DPMCU executing timeout\n"); - ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5, - (u8) ~ASTDP_HOST_EDID_READ_DONE_MASK, - ASTDP_HOST_EDID_READ_DONE); - } + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5, + (u8) ~ASTDP_HOST_EDID_READ_DONE_MASK, + ASTDP_HOST_EDID_READ_DONE); } diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index a501169cddad890c0c845cf9217d8b517e16af94..5498a6676f2e8f2eede492b53c34e875ccf81635 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -350,9 +350,6 @@ int ast_mode_config_init(struct ast_device *ast); #define AST_DP501_LINKRATE 0xf014 #define AST_DP501_EDID_DATA 0xf020 -/* Define for Soc scratched reg */ -#define COPROCESSOR_LAUNCH BIT(5) - /* * Display Transmitter Type: */ @@ -480,7 +477,7 @@ struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev); /* aspeed DP */ int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata); -void ast_dp_launch(struct drm_device *dev, u8 bPower); +void ast_dp_launch(struct drm_device *dev); void ast_dp_power_on_off(struct drm_device *dev, bool no); void ast_dp_set_on_off(struct drm_device *dev, bool no); void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mode); diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index f32ce29edba726a903d7b082f770ae896bfce3cc..1f35438f614a7ba441af81e1e564da3ec1dd5d52 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -254,8 +254,13 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) case 0x0c: ast->tx_chip_types = AST_TX_DP501_BIT; } - } else if (ast->chip == AST2600) - ast_dp_launch(&ast->base, 0); + } else if (ast->chip == AST2600) { + if (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, TX_TYPE_MASK) == + ASTDP_DPMCU_TX) { + ast->tx_chip_types = AST_TX_ASTDP_BIT; + ast_dp_launch(&ast->base); + } + } /* Print stuff for diagnostic purposes */ if (ast->tx_chip_types & AST_TX_NONE_BIT) @@ -264,6 +269,8 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) drm_info(dev, "Using Sil164 TMDS transmitter\n"); if (ast->tx_chip_types & AST_TX_DP501_BIT) drm_info(dev, "Using DP501 DisplayPort transmitter\n"); + if (ast->tx_chip_types & AST_TX_ASTDP_BIT) + drm_info(dev, "Using ASPEED DisplayPort transmitter\n"); return 0; } diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 36374828f6c8fa790d89028a5a26e53722048baf..b3c670af6ef2bc309051457d292f6919200e3f5c 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -1647,6 +1647,8 @@ static int ast_dp501_output_init(struct ast_device *ast) static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector) { void *edid; + struct drm_device *dev = connector->dev; + struct ast_device *ast = to_ast_device(dev); int succ; int count; @@ -1655,9 +1657,17 @@ static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector) if (!edid) goto err_drm_connector_update_edid_property; + /* + * Protect access to I/O registers from concurrent modesetting + * by acquiring the I/O-register lock. + */ + mutex_lock(&ast->ioregs_lock); + succ = ast_astdp_read_edid(connector->dev, edid); if (succ < 0) - goto err_kfree; + goto err_mutex_unlock; + + mutex_unlock(&ast->ioregs_lock); drm_connector_update_edid_property(connector, edid); count = drm_add_edid_modes(connector, edid); @@ -1665,7 +1675,8 @@ static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector) return count; -err_kfree: +err_mutex_unlock: + mutex_unlock(&ast->ioregs_lock); kfree(edid); err_drm_connector_update_edid_property: drm_connector_update_edid_property(connector, NULL); diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c index 71bb36b865fdac627ac846ac210d7d1f3c0da5e4..a005aec18a0209a9bdb6e0dec252a750d3c99409 100644 --- a/drivers/gpu/drm/ast/ast_post.c +++ b/drivers/gpu/drm/ast/ast_post.c @@ -380,7 +380,8 @@ void ast_post_gpu(struct drm_device *dev) ast_set_def_ext_reg(dev); if (ast->chip == AST2600) { - ast_dp_launch(dev, 1); + if (ast->tx_chip_types & AST_TX_ASTDP_BIT) + ast_dp_launch(dev); } else if (ast->config_mode == ast_use_p2a) { if (ast->chip == AST2500) ast_post_chip_2500(dev); diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index 7a748785c54596b04b9a7101fd7bd332827a2dc5..4676cf2900dfdc3f9d71dd176692144fd31e644f 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -298,6 +298,10 @@ static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata) if (refclk_lut[i] == refclk_rate) break; + /* avoid buffer overflow and "1" is the default rate in the datasheet. */ + if (i >= refclk_lut_size) + i = 1; + regmap_update_bits(pdata->regmap, SN_DPPLL_SRC_REG, REFCLK_FREQ_MASK, REFCLK_FREQ(i)); diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c index 38dab76ae69ea6232062be9b431d52a57ed4015c..e2e21ce79510e25415a1133c76e0b858b0d84514 100644 --- a/drivers/gpu/drm/display/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c @@ -3404,7 +3404,7 @@ int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr, /* Skip failed payloads */ if (payload->vc_start_slot == -1) { - drm_dbg_kms(state->dev, "Part 1 of payload creation for %s failed, skipping part 2\n", + drm_dbg_kms(mgr->dev, "Part 1 of payload creation for %s failed, skipping part 2\n", payload->port->connector->name); return -EIO; } diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 6bb1b8b27d7a102b8ed634a5014980d666762816..fd27f19786352da77ab7cbd5218a352d03cb7e60 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1545,17 +1545,19 @@ static void drm_fb_helper_fill_pixel_fmt(struct fb_var_screeninfo *var, } } -static void __fill_var(struct fb_var_screeninfo *var, +static void __fill_var(struct fb_var_screeninfo *var, struct fb_info *info, struct drm_framebuffer *fb) { int i; var->xres_virtual = fb->width; var->yres_virtual = fb->height; - var->accel_flags = FB_ACCELF_TEXT; + var->accel_flags = 0; var->bits_per_pixel = drm_format_info_bpp(fb->format, 0); - var->height = var->width = 0; + var->height = info->var.height; + var->width = info->var.width; + var->left_margin = var->right_margin = 0; var->upper_margin = var->lower_margin = 0; var->hsync_len = var->vsync_len = 0; @@ -1618,7 +1620,7 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var, return -EINVAL; } - __fill_var(var, fb); + __fill_var(var, info, fb); /* * fb_pan_display() validates this, but fb_set_par() doesn't and just @@ -2074,7 +2076,7 @@ static void drm_fb_helper_fill_var(struct fb_info *info, info->pseudo_palette = fb_helper->pseudo_palette; info->var.xoffset = 0; info->var.yoffset = 0; - __fill_var(&info->var, fb); + __fill_var(&info->var, info, fb); info->var.activate = FB_ACTIVATE_NOW; drm_fb_helper_fill_pixel_fmt(&info->var, format); diff --git a/drivers/gpu/drm/drm_managed.c b/drivers/gpu/drm/drm_managed.c index 4cf214de50c4037ab659fda9097f2e6e34da7d06..c21c3f6230335f4a93a2c9cf0c4d2768138a72bb 100644 --- a/drivers/gpu/drm/drm_managed.c +++ b/drivers/gpu/drm/drm_managed.c @@ -264,28 +264,10 @@ void drmm_kfree(struct drm_device *dev, void *data) } EXPORT_SYMBOL(drmm_kfree); -static void drmm_mutex_release(struct drm_device *dev, void *res) +void __drmm_mutex_release(struct drm_device *dev, void *res) { struct mutex *lock = res; mutex_destroy(lock); } - -/** - * drmm_mutex_init - &drm_device-managed mutex_init() - * @dev: DRM device - * @lock: lock to be initialized - * - * Returns: - * 0 on success, or a negative errno code otherwise. - * - * This is a &drm_device-managed version of mutex_init(). The initialized - * lock is automatically destroyed on the final drm_dev_put(). - */ -int drmm_mutex_init(struct drm_device *dev, struct mutex *lock) -{ - mutex_init(lock); - - return drmm_add_action_or_reset(dev, drmm_mutex_release, lock); -} -EXPORT_SYMBOL(drmm_mutex_init); +EXPORT_SYMBOL(__drmm_mutex_release); diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index b1a38e6ce2f8fad28bbef491a02c29b7257beaa1..0cb646cb04ee1ae5028aa03439df18f06925760a 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -179,7 +179,7 @@ static const struct dmi_system_id orientation_data[] = { }, { /* AYA NEO AIR */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"), - DMI_MATCH(DMI_BOARD_NAME, "AIR"), + DMI_MATCH(DMI_PRODUCT_NAME, "AIR"), }, .driver_data = (void *)&lcd1080x1920_leftside_up, }, { /* AYA NEO NEXT */ diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index ec784e58da5c1cd612059c5f80daea4316410309..414e585ec7dd0b0561f338da1b24f623fe4462b0 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -1335,7 +1335,7 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, /* Let the runqueue know that there is work to do. */ queue_work(g2d->g2d_workq, &g2d->runqueue_work); - if (runqueue_node->async) + if (req->async) goto out; wait_for_completion(&runqueue_node->complete); diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.h b/drivers/gpu/drm/exynos/exynos_drm_g2d.h index 74ea3c26deadcebabc7cae82b4e0d261269a2761..1a5ae781b56c60e41af91a5725e8cffa3e6f7b46 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.h +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.h @@ -34,11 +34,11 @@ static inline int exynos_g2d_exec_ioctl(struct drm_device *dev, void *data, return -ENODEV; } -int g2d_open(struct drm_device *drm_dev, struct drm_file *file) +static inline int g2d_open(struct drm_device *drm_dev, struct drm_file *file) { return 0; } -void g2d_close(struct drm_device *drm_dev, struct drm_file *file) +static inline void g2d_close(struct drm_device *drm_dev, struct drm_file *file) { } #endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 4d56c8c799c5a9d5a7faa2133a5e21cd6c18536f..f5e1adfcaa514ee0a84b00b1c7357e7d43a64ecf 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -469,8 +469,6 @@ static int vidi_remove(struct platform_device *pdev) if (ctx->raw_edid != (struct edid *)fake_edid_info) { kfree(ctx->raw_edid); ctx->raw_edid = NULL; - - return -EINVAL; } component_del(&pdev->dev, &vidi_component_ops); diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 084a483f97766165472c2f6b4fb71f57e2b07c00..2aaaba090cc00eac6009ffafa6b21fc5ddf3bd83 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -1453,6 +1453,18 @@ static u8 tgl_calc_voltage_level(int cdclk) return 0; } +static u8 rplu_calc_voltage_level(int cdclk) +{ + if (cdclk > 556800) + return 3; + else if (cdclk > 480000) + return 2; + else if (cdclk > 312000) + return 1; + else + return 0; +} + static void icl_readout_refclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { @@ -3242,6 +3254,13 @@ static const struct intel_cdclk_funcs mtl_cdclk_funcs = { .calc_voltage_level = tgl_calc_voltage_level, }; +static const struct intel_cdclk_funcs rplu_cdclk_funcs = { + .get_cdclk = bxt_get_cdclk, + .set_cdclk = bxt_set_cdclk, + .modeset_calc_cdclk = bxt_modeset_calc_cdclk, + .calc_voltage_level = rplu_calc_voltage_level, +}; + static const struct intel_cdclk_funcs tgl_cdclk_funcs = { .get_cdclk = bxt_get_cdclk, .set_cdclk = bxt_set_cdclk, @@ -3384,14 +3403,17 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv) dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs; dev_priv->display.cdclk.table = dg2_cdclk_table; } else if (IS_ALDERLAKE_P(dev_priv)) { - dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs; /* Wa_22011320316:adl-p[a0] */ - if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) + if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) { dev_priv->display.cdclk.table = adlp_a_step_cdclk_table; - else if (IS_ADLP_RPLU(dev_priv)) + dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs; + } else if (IS_ADLP_RPLU(dev_priv)) { dev_priv->display.cdclk.table = rplu_cdclk_table; - else + dev_priv->display.funcs.cdclk = &rplu_cdclk_funcs; + } else { dev_priv->display.cdclk.table = adlp_cdclk_table; + dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs; + } } else if (IS_ROCKETLAKE(dev_priv)) { dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs; dev_priv->display.cdclk.table = rkl_cdclk_table; diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 3c29792137a56d06e2c287b6c37a1e6671bfaf91..0aae9a1eb3d58438820d8a7daf940d7a9651c73b 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -1851,9 +1851,17 @@ static void hsw_crtc_disable(struct intel_atomic_state *state, intel_disable_shared_dpll(old_crtc_state); - intel_encoders_post_pll_disable(state, crtc); + if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) { + struct intel_crtc *slave_crtc; + + intel_encoders_post_pll_disable(state, crtc); - intel_dmc_disable_pipe(i915, crtc->pipe); + intel_dmc_disable_pipe(i915, crtc->pipe); + + for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc, + intel_crtc_bigjoiner_slave_pipes(old_crtc_state)) + intel_dmc_disable_pipe(i915, slave_crtc->pipe); + } } static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c index 705915d505652553179a832a11427fd6c06ec08f..524bd6da260ced4a63bfcae07a5fd2366a1edffd 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c @@ -129,7 +129,7 @@ static int intel_dp_aux_sync_len(void) static int intel_dp_aux_fw_sync_len(void) { - int precharge = 16; /* 10-16 */ + int precharge = 10; /* 10-16 */ int preamble = 8; return precharge + preamble; diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 650232c4892b183800fa86adbe518e964cc0c29b..b183efab04a1d4c06f0fc05a5e96f0ccb8eca7ca 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -204,8 +204,6 @@ bool intel_hdcp2_capable(struct intel_connector *connector) struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; - struct intel_gt *gt = dev_priv->media_gt; - struct intel_gsc_uc *gsc = >->uc.gsc; bool capable = false; /* I915 support for HDCP2.2 */ @@ -213,9 +211,13 @@ bool intel_hdcp2_capable(struct intel_connector *connector) return false; /* If MTL+ make sure gsc is loaded and proxy is setup */ - if (intel_hdcp_gsc_cs_required(dev_priv)) - if (!intel_uc_fw_is_running(&gsc->fw)) + if (intel_hdcp_gsc_cs_required(dev_priv)) { + struct intel_gt *gt = dev_priv->media_gt; + struct intel_gsc_uc *gsc = gt ? >->uc.gsc : NULL; + + if (!gsc || !intel_uc_fw_is_running(&gsc->fw)) return false; + } /* MEI/GSC interface is solid depending on which is used */ mutex_lock(&dev_priv->display.hdcp.comp_mutex); diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index a81fa6a20f5aac7c6e347f17723ccd8c0c633687..7b516b1a4915bd876cfc4211f991a34b986acbc9 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -346,8 +346,10 @@ static int live_parallel_switch(void *arg) continue; ce = intel_context_create(data[m].ce[0]->engine); - if (IS_ERR(ce)) + if (IS_ERR(ce)) { + err = PTR_ERR(ce); goto out; + } err = intel_context_pin(ce); if (err) { @@ -367,8 +369,10 @@ static int live_parallel_switch(void *arg) worker = kthread_create_worker(0, "igt/parallel:%s", data[n].ce[0]->engine->name); - if (IS_ERR(worker)) + if (IS_ERR(worker)) { + err = PTR_ERR(worker); goto out; + } data[n].worker = worker; } @@ -397,8 +401,10 @@ static int live_parallel_switch(void *arg) } } - if (igt_live_test_end(&t)) - err = -EIO; + if (igt_live_test_end(&t)) { + err = err ?: -EIO; + break; + } } out: diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c index 736b89a8ecf54fa5dffdb1e349d87c748adfe20e..4202df5b8c122f9a2edb9203bfe7f5eb882cea3b 100644 --- a/drivers/gpu/drm/i915/gt/selftest_execlists.c +++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c @@ -1530,8 +1530,8 @@ static int live_busywait_preempt(void *arg) struct drm_i915_gem_object *obj; struct i915_vma *vma; enum intel_engine_id id; - int err = -ENOMEM; u32 *map; + int err; /* * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can @@ -1539,13 +1539,17 @@ static int live_busywait_preempt(void *arg) */ ctx_hi = kernel_context(gt->i915, NULL); - if (!ctx_hi) - return -ENOMEM; + if (IS_ERR(ctx_hi)) + return PTR_ERR(ctx_hi); + ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY; ctx_lo = kernel_context(gt->i915, NULL); - if (!ctx_lo) + if (IS_ERR(ctx_lo)) { + err = PTR_ERR(ctx_lo); goto err_ctx_hi; + } + ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY; obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 050b8ae7b8e70b7b58a9844fcf7c4ca5b53cc908..3035cba2c6a29e16999dc308d96510d42c3206ae 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -877,12 +877,17 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream, stream->oa_buffer.last_ctx_id = ctx_id; } - /* - * Clear out the report id and timestamp as a means to detect unlanded - * reports. - */ - oa_report_id_clear(stream, report32); - oa_timestamp_clear(stream, report32); + if (is_power_of_2(report_size)) { + /* + * Clear out the report id and timestamp as a means + * to detect unlanded reports. + */ + oa_report_id_clear(stream, report32); + oa_timestamp_clear(stream, report32); + } else { + /* Zero out the entire report */ + memset(report32, 0, report_size); + } } if (start_offset != *offset) { diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c index ff003403fbbc7ffd04cd2986b5dbe833a32e7db6..ffd91a5ee29901e934eae58e2e6b42e9a5959457 100644 --- a/drivers/gpu/drm/lima/lima_sched.c +++ b/drivers/gpu/drm/lima/lima_sched.c @@ -165,7 +165,7 @@ int lima_sched_context_init(struct lima_sched_pipe *pipe, void lima_sched_context_fini(struct lima_sched_pipe *pipe, struct lima_sched_context *context) { - drm_sched_entity_fini(&context->base); + drm_sched_entity_destroy(&context->base); } struct dma_fence *lima_sched_context_queue_task(struct lima_sched_task *task) diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index 0f2dd26755df98e9052484a5fafd208b645eefa6..af3ce5a6a636ac7bceb83539b66cbfc0a66d45b4 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -642,6 +642,11 @@ void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_ if (funcs->pixpllc_atomic_update) funcs->pixpllc_atomic_update(crtc, old_state); + if (crtc_state->gamma_lut) + mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data); + else + mgag200_crtc_set_gamma_linear(mdev, format); + mgag200_enable_display(mdev); if (funcs->enable_vidrst) diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index e16b4b3f8535f718e8a6a0ba221a47189e613962..8914992378f21707e4a55f0452abe05ef10795ce 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -1526,8 +1526,6 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) if (!pdev) return -ENODEV; - mutex_init(&gmu->lock); - gmu->dev = &pdev->dev; of_dma_configure(gmu->dev, node, true); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 9fb214f150dd47d98ba9dfb0786a366a3e45d5ce..52da3795b175d0e9d9c0036bd9a2e050f7de973a 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -1981,6 +1981,8 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) adreno_gpu = &a6xx_gpu->base; gpu = &adreno_gpu->base; + mutex_init(&a6xx_gpu->gmu.lock); + adreno_gpu->registers = NULL; /* diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h index 2b3ae84057dfe0fdc87ef73b40da378dd06979e3..bdcd554fc8a80d5c45ce34070ad9236523b507d1 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h @@ -98,17 +98,17 @@ static const struct dpu_sspp_cfg msm8998_sspp[] = { static const struct dpu_lm_cfg msm8998_lm[] = { LM_BLK("lm_0", LM_0, 0x44000, MIXER_MSM8998_MASK, - &msm8998_lm_sblk, PINGPONG_0, LM_2, DSPP_0), + &msm8998_lm_sblk, PINGPONG_0, LM_1, DSPP_0), LM_BLK("lm_1", LM_1, 0x45000, MIXER_MSM8998_MASK, - &msm8998_lm_sblk, PINGPONG_1, LM_5, DSPP_1), + &msm8998_lm_sblk, PINGPONG_1, LM_0, DSPP_1), LM_BLK("lm_2", LM_2, 0x46000, MIXER_MSM8998_MASK, - &msm8998_lm_sblk, PINGPONG_2, LM_0, 0), + &msm8998_lm_sblk, PINGPONG_2, LM_5, 0), LM_BLK("lm_3", LM_3, 0x47000, MIXER_MSM8998_MASK, &msm8998_lm_sblk, PINGPONG_MAX, 0, 0), LM_BLK("lm_4", LM_4, 0x48000, MIXER_MSM8998_MASK, &msm8998_lm_sblk, PINGPONG_MAX, 0, 0), LM_BLK("lm_5", LM_5, 0x49000, MIXER_MSM8998_MASK, - &msm8998_lm_sblk, PINGPONG_3, LM_1, 0), + &msm8998_lm_sblk, PINGPONG_3, LM_2, 0), }; static const struct dpu_pingpong_cfg msm8998_pp[] = { @@ -134,10 +134,10 @@ static const struct dpu_dspp_cfg msm8998_dspp[] = { }; static const struct dpu_intf_cfg msm8998_intf[] = { - INTF_BLK("intf_0", INTF_0, 0x6a000, 0x280, INTF_DP, 0, 25, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 24, 25), - INTF_BLK("intf_1", INTF_1, 0x6a800, 0x280, INTF_DSI, 0, 25, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 26, 27), - INTF_BLK("intf_2", INTF_2, 0x6b000, 0x280, INTF_DSI, 1, 25, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 28, 29), - INTF_BLK("intf_3", INTF_3, 0x6b800, 0x280, INTF_HDMI, 0, 25, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 30, 31), + INTF_BLK("intf_0", INTF_0, 0x6a000, 0x280, INTF_DP, 0, 21, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 24, 25), + INTF_BLK("intf_1", INTF_1, 0x6a800, 0x280, INTF_DSI, 0, 21, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 26, 27), + INTF_BLK("intf_2", INTF_2, 0x6b000, 0x280, INTF_DSI, 1, 21, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 28, 29), + INTF_BLK("intf_3", INTF_3, 0x6b800, 0x280, INTF_HDMI, 0, 21, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 30, 31), }; static const struct dpu_perf_cfg msm8998_perf_data = { diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h index 282d410269ff62b55108794a0c8479e665c855e6..42b0e58624d0087c5760f710021c1758e46b3fdd 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h @@ -128,10 +128,10 @@ static const struct dpu_dspp_cfg sm8150_dspp[] = { }; static const struct dpu_pingpong_cfg sm8150_pp[] = { - PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk_te, + PP_BLK("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)), - PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk_te, + PP_BLK("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)), PP_BLK("pingpong_2", PINGPONG_2, 0x71000, MERGE_3D_1, sdm845_pp_sblk, diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h index c57400265f288403f39b11d4f87fd489a248eca6..e3bdfe7b30f1f19552128e468f6d9ebf5d1fa5a2 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h @@ -116,10 +116,10 @@ static const struct dpu_lm_cfg sc8180x_lm[] = { }; static const struct dpu_pingpong_cfg sc8180x_pp[] = { - PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk_te, + PP_BLK("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)), - PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk_te, + PP_BLK("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)), PP_BLK("pingpong_2", PINGPONG_2, 0x71000, MERGE_3D_1, sdm845_pp_sblk, diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h index 2c40229ea51594439a02acb7c16f04d05cfccb49..ed130582873c7037294dccdd6798fa66bc0948de 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h @@ -129,10 +129,10 @@ static const struct dpu_dspp_cfg sm8250_dspp[] = { }; static const struct dpu_pingpong_cfg sm8250_pp[] = { - PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk_te, + PP_BLK("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)), - PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk_te, + PP_BLK("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)), PP_BLK("pingpong_2", PINGPONG_2, 0x71000, MERGE_3D_1, sdm845_pp_sblk, diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h index 8799ed7571190fb7a4726d72a04a31b6dcf26458..a46b11730a4d4a2c43a295ade916b75b158328a4 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h @@ -80,8 +80,8 @@ static const struct dpu_dspp_cfg sc7180_dspp[] = { }; static const struct dpu_pingpong_cfg sc7180_pp[] = { - PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, 0, sdm845_pp_sblk_te, -1, -1), - PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, 0, sdm845_pp_sblk_te, -1, -1), + PP_BLK("pingpong_0", PINGPONG_0, 0x70000, 0, sdm845_pp_sblk, -1, -1), + PP_BLK("pingpong_1", PINGPONG_1, 0x70800, 0, sdm845_pp_sblk, -1, -1), }; static const struct dpu_intf_cfg sc7180_intf[] = { diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h index 6f04d8f85c9250ef65aa7b2bc3e628d497a0c275..988d820f7ef2e5e7059e368cdfed0777dcd516f8 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h @@ -122,7 +122,6 @@ const struct dpu_mdss_cfg dpu_sm6115_cfg = { .mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \ BIT(MDP_SSPP_TOP0_INTR2) | \ BIT(MDP_SSPP_TOP0_HIST_INTR) | \ - BIT(MDP_INTF0_INTR) | \ BIT(MDP_INTF1_INTR), }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h index 303492d62a5caaa38e39921289f35b672909a9c8..c9003dcc1a59b6221110b58bb9d4a0f930d30c0e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h @@ -112,7 +112,6 @@ const struct dpu_mdss_cfg dpu_qcm2290_cfg = { .mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \ BIT(MDP_SSPP_TOP0_INTR2) | \ BIT(MDP_SSPP_TOP0_HIST_INTR) | \ - BIT(MDP_INTF0_INTR) | \ BIT(MDP_INTF1_INTR), }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h index ca107ca8de462482278c3799cf01b953504293ac..4f6a965bcd90b187a99146d74bdc9f3c4ca32dd6 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h @@ -127,22 +127,22 @@ static const struct dpu_dspp_cfg sm8350_dspp[] = { }; static const struct dpu_pingpong_cfg sm8350_pp[] = { - PP_BLK_TE("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sdm845_pp_sblk_te, + PP_BLK_DITHER("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sc7280_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)), - PP_BLK_TE("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sdm845_pp_sblk_te, + PP_BLK_DITHER("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sc7280_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)), - PP_BLK("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sdm845_pp_sblk, + PP_BLK_DITHER("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sc7280_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10), DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14)), - PP_BLK("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sdm845_pp_sblk, + PP_BLK_DITHER("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sc7280_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11), DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15)), - PP_BLK("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sdm845_pp_sblk, + PP_BLK_DITHER("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sc7280_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30), -1), - PP_BLK("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sdm845_pp_sblk, + PP_BLK_DITHER("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sc7280_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31), -1), }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h index 5957de1859844f633cc414d3e9059ab3203f78be..6b2c7eae71d998ed1beb3238d4f15a64784b76bd 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h @@ -87,10 +87,10 @@ static const struct dpu_dspp_cfg sc7280_dspp[] = { }; static const struct dpu_pingpong_cfg sc7280_pp[] = { - PP_BLK("pingpong_0", PINGPONG_0, 0x69000, 0, sc7280_pp_sblk, -1, -1), - PP_BLK("pingpong_1", PINGPONG_1, 0x6a000, 0, sc7280_pp_sblk, -1, -1), - PP_BLK("pingpong_2", PINGPONG_2, 0x6b000, 0, sc7280_pp_sblk, -1, -1), - PP_BLK("pingpong_3", PINGPONG_3, 0x6c000, 0, sc7280_pp_sblk, -1, -1), + PP_BLK_DITHER("pingpong_0", PINGPONG_0, 0x69000, 0, sc7280_pp_sblk, -1, -1), + PP_BLK_DITHER("pingpong_1", PINGPONG_1, 0x6a000, 0, sc7280_pp_sblk, -1, -1), + PP_BLK_DITHER("pingpong_2", PINGPONG_2, 0x6b000, 0, sc7280_pp_sblk, -1, -1), + PP_BLK_DITHER("pingpong_3", PINGPONG_3, 0x6c000, 0, sc7280_pp_sblk, -1, -1), }; static const struct dpu_intf_cfg sc7280_intf[] = { diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h index 9aab110b8c44d67458eb507a0bea99b54ed73553..706d0f13b598e53772eafeb1b6a455c2ee488dff 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h @@ -121,18 +121,18 @@ static const struct dpu_dspp_cfg sc8280xp_dspp[] = { }; static const struct dpu_pingpong_cfg sc8280xp_pp[] = { - PP_BLK_TE("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sdm845_pp_sblk_te, - DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), -1), - PP_BLK_TE("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sdm845_pp_sblk_te, - DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), -1), - PP_BLK_TE("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sdm845_pp_sblk_te, - DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10), -1), - PP_BLK_TE("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sdm845_pp_sblk_te, - DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11), -1), - PP_BLK_TE("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sdm845_pp_sblk_te, - DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30), -1), - PP_BLK_TE("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sdm845_pp_sblk_te, - DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31), -1), + PP_BLK_DITHER("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sc7280_pp_sblk, + DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), -1), + PP_BLK_DITHER("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sc7280_pp_sblk, + DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), -1), + PP_BLK_DITHER("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sc7280_pp_sblk, + DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10), -1), + PP_BLK_DITHER("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sc7280_pp_sblk, + DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11), -1), + PP_BLK_DITHER("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sc7280_pp_sblk, + DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30), -1), + PP_BLK_DITHER("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sc7280_pp_sblk, + DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31), -1), }; static const struct dpu_merge_3d_cfg sc8280xp_merge_3d[] = { diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h index 02a259b6b4268e8ac521e7b5069b5e0f024a01f7..4ecb3df5cbc022e68ba9cfb127c8da829a6d9fdb 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h @@ -128,28 +128,28 @@ static const struct dpu_dspp_cfg sm8450_dspp[] = { }; /* FIXME: interrupts */ static const struct dpu_pingpong_cfg sm8450_pp[] = { - PP_BLK_TE("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sdm845_pp_sblk_te, + PP_BLK_DITHER("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sc7280_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)), - PP_BLK_TE("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sdm845_pp_sblk_te, + PP_BLK_DITHER("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sc7280_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)), - PP_BLK("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sdm845_pp_sblk, + PP_BLK_DITHER("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sc7280_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10), DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14)), - PP_BLK("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sdm845_pp_sblk, + PP_BLK_DITHER("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sc7280_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11), DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15)), - PP_BLK("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sdm845_pp_sblk, + PP_BLK_DITHER("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sc7280_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30), -1), - PP_BLK("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sdm845_pp_sblk, + PP_BLK_DITHER("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sc7280_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31), -1), - PP_BLK("pingpong_6", PINGPONG_6, 0x65800, MERGE_3D_3, sdm845_pp_sblk, + PP_BLK_DITHER("pingpong_6", PINGPONG_6, 0x65800, MERGE_3D_3, sc7280_pp_sblk, -1, -1), - PP_BLK("pingpong_7", PINGPONG_7, 0x65c00, MERGE_3D_3, sdm845_pp_sblk, + PP_BLK_DITHER("pingpong_7", PINGPONG_7, 0x65c00, MERGE_3D_3, sc7280_pp_sblk, -1, -1), }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h index 9e403034093fdc81cf04c90bce561afe14d3bca0..d0ab351b6a8b934226fded7bdcf9db3ee30ac80a 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h @@ -132,28 +132,28 @@ static const struct dpu_dspp_cfg sm8550_dspp[] = { &sm8150_dspp_sblk), }; static const struct dpu_pingpong_cfg sm8550_pp[] = { - PP_BLK_DIPHER("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sc7280_pp_sblk, + PP_BLK_DITHER("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sc7280_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), -1), - PP_BLK_DIPHER("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sc7280_pp_sblk, + PP_BLK_DITHER("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sc7280_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), -1), - PP_BLK_DIPHER("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sc7280_pp_sblk, + PP_BLK_DITHER("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sc7280_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10), -1), - PP_BLK_DIPHER("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sc7280_pp_sblk, + PP_BLK_DITHER("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sc7280_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11), -1), - PP_BLK_DIPHER("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sc7280_pp_sblk, + PP_BLK_DITHER("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sc7280_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30), -1), - PP_BLK_DIPHER("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sc7280_pp_sblk, + PP_BLK_DITHER("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sc7280_pp_sblk, DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31), -1), - PP_BLK_DIPHER("pingpong_6", PINGPONG_6, 0x66000, MERGE_3D_3, sc7280_pp_sblk, + PP_BLK_DITHER("pingpong_6", PINGPONG_6, 0x66000, MERGE_3D_3, sc7280_pp_sblk, -1, -1), - PP_BLK_DIPHER("pingpong_7", PINGPONG_7, 0x66400, MERGE_3D_3, sc7280_pp_sblk, + PP_BLK_DITHER("pingpong_7", PINGPONG_7, 0x66400, MERGE_3D_3, sc7280_pp_sblk, -1, -1), }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c index 03f162af1a50bcaa996576f99dc6c3ac23bbc6d5..5d994bce696f9e76e6387aa1bf66bc8c6cbac298 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c @@ -491,7 +491,7 @@ static const struct dpu_pingpong_sub_blks sc7280_pp_sblk = { .len = 0x20, .version = 0x20000}, }; -#define PP_BLK_DIPHER(_name, _id, _base, _merge_3d, _sblk, _done, _rdptr) \ +#define PP_BLK_DITHER(_name, _id, _base, _merge_3d, _sblk, _done, _rdptr) \ {\ .name = _name, .id = _id, \ .base = _base, .len = 0, \ @@ -587,12 +587,12 @@ static const u32 sdm845_nrt_pri_lvl[] = {3, 3, 3, 3, 3, 3, 3, 3}; static const struct dpu_vbif_dynamic_ot_cfg msm8998_ot_rdwr_cfg[] = { { - .pps = 1088 * 1920 * 30, + .pps = 1920 * 1080 * 30, .ot_limit = 2, }, { - .pps = 1088 * 1920 * 60, - .ot_limit = 6, + .pps = 1920 * 1080 * 60, + .ot_limit = 4, }, { .pps = 3840 * 2160 * 30, @@ -705,10 +705,7 @@ static const struct dpu_qos_lut_entry msm8998_qos_linear[] = { {.fl = 10, .lut = 0x1555b}, {.fl = 11, .lut = 0x5555b}, {.fl = 12, .lut = 0x15555b}, - {.fl = 13, .lut = 0x55555b}, - {.fl = 14, .lut = 0}, - {.fl = 1, .lut = 0x1b}, - {.fl = 0, .lut = 0} + {.fl = 0, .lut = 0x55555b} }; static const struct dpu_qos_lut_entry sdm845_qos_linear[] = { @@ -730,9 +727,7 @@ static const struct dpu_qos_lut_entry msm8998_qos_macrotile[] = { {.fl = 10, .lut = 0x1aaff}, {.fl = 11, .lut = 0x5aaff}, {.fl = 12, .lut = 0x15aaff}, - {.fl = 13, .lut = 0x55aaff}, - {.fl = 1, .lut = 0x1aaff}, - {.fl = 0, .lut = 0}, + {.fl = 0, .lut = 0x55aaff}, }; static const struct dpu_qos_lut_entry sc7180_qos_linear[] = { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c index 53326f25e40ef10b22bb09da70b829d4defa0553..17f3e7e4f1941052ddebbc562bcea0ba5f7b91f4 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c @@ -15,7 +15,7 @@ /* * Register offsets in MDSS register file for the interrupt registers - * w.r.t. to the MDP base + * w.r.t. the MDP base */ #define MDP_SSPP_TOP0_OFF 0x0 #define MDP_INTF_0_OFF 0x6A000 @@ -24,20 +24,23 @@ #define MDP_INTF_3_OFF 0x6B800 #define MDP_INTF_4_OFF 0x6C000 #define MDP_INTF_5_OFF 0x6C800 +#define INTF_INTR_EN 0x1c0 +#define INTF_INTR_STATUS 0x1c4 +#define INTF_INTR_CLEAR 0x1c8 #define MDP_AD4_0_OFF 0x7C000 #define MDP_AD4_1_OFF 0x7D000 #define MDP_AD4_INTR_EN_OFF 0x41c #define MDP_AD4_INTR_CLEAR_OFF 0x424 #define MDP_AD4_INTR_STATUS_OFF 0x420 -#define MDP_INTF_0_OFF_REV_7xxx 0x34000 -#define MDP_INTF_1_OFF_REV_7xxx 0x35000 -#define MDP_INTF_2_OFF_REV_7xxx 0x36000 -#define MDP_INTF_3_OFF_REV_7xxx 0x37000 -#define MDP_INTF_4_OFF_REV_7xxx 0x38000 -#define MDP_INTF_5_OFF_REV_7xxx 0x39000 -#define MDP_INTF_6_OFF_REV_7xxx 0x3a000 -#define MDP_INTF_7_OFF_REV_7xxx 0x3b000 -#define MDP_INTF_8_OFF_REV_7xxx 0x3c000 +#define MDP_INTF_0_OFF_REV_7xxx 0x34000 +#define MDP_INTF_1_OFF_REV_7xxx 0x35000 +#define MDP_INTF_2_OFF_REV_7xxx 0x36000 +#define MDP_INTF_3_OFF_REV_7xxx 0x37000 +#define MDP_INTF_4_OFF_REV_7xxx 0x38000 +#define MDP_INTF_5_OFF_REV_7xxx 0x39000 +#define MDP_INTF_6_OFF_REV_7xxx 0x3a000 +#define MDP_INTF_7_OFF_REV_7xxx 0x3b000 +#define MDP_INTF_8_OFF_REV_7xxx 0x3c000 /** * struct dpu_intr_reg - array of DPU register sets diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c index 84ee2efa9c664e04c89e3d898cd11914c99cc275..b9dddf576c029f0d136ce281eff897f7f041953b 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c @@ -56,11 +56,6 @@ #define INTF_TPG_RGB_MAPPING 0x11C #define INTF_PROG_FETCH_START 0x170 #define INTF_PROG_ROT_START 0x174 - -#define INTF_FRAME_LINE_COUNT_EN 0x0A8 -#define INTF_FRAME_COUNT 0x0AC -#define INTF_LINE_COUNT 0x0B0 - #define INTF_MUX 0x25C #define INTF_STATUS 0x26C diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c index 2d28afdf860efc43709e1c648be6607763ef4603..a3e413d277175e360a96557197cb4ba3ab2a6bf7 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c @@ -61,6 +61,7 @@ static const struct dpu_wb_cfg *_wb_offset(enum dpu_wb wb, for (i = 0; i < m->wb_count; i++) { if (wb == m->wb[i].id) { b->blk_addr = addr + m->wb[i].base; + b->log_mask = DPU_DBG_MASK_WB; return &m->wb[i]; } } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h index feb9a729844a3c1488419e360382e48ac3f16e1b..5acd5683d25a4021950a80d341d732ac6d7fc856 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h @@ -21,9 +21,6 @@ #define HIST_INTR_EN 0x01c #define HIST_INTR_STATUS 0x020 #define HIST_INTR_CLEAR 0x024 -#define INTF_INTR_EN 0x1C0 -#define INTF_INTR_STATUS 0x1C4 -#define INTF_INTR_CLEAR 0x1C8 #define SPLIT_DISPLAY_EN 0x2F4 #define SPLIT_DISPLAY_UPPER_PIPE_CTRL 0x2F8 #define DSPP_IGC_COLOR0_RAM_LUTN 0x300 diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c index 6666783e1468e33f3ddaefcfd51789de7588649f..1245c7aa49df844ce293207c6a74374eb0bee805 100644 --- a/drivers/gpu/drm/msm/dp/dp_audio.c +++ b/drivers/gpu/drm/msm/dp/dp_audio.c @@ -593,6 +593,18 @@ static struct hdmi_codec_pdata codec_data = { .i2s = 1, }; +void dp_unregister_audio_driver(struct device *dev, struct dp_audio *dp_audio) +{ + struct dp_audio_private *audio_priv; + + audio_priv = container_of(dp_audio, struct dp_audio_private, dp_audio); + + if (audio_priv->audio_pdev) { + platform_device_unregister(audio_priv->audio_pdev); + audio_priv->audio_pdev = NULL; + } +} + int dp_register_audio_driver(struct device *dev, struct dp_audio *dp_audio) { diff --git a/drivers/gpu/drm/msm/dp/dp_audio.h b/drivers/gpu/drm/msm/dp/dp_audio.h index 84e5f4a5d26badb0bfc8d5f8858681ab5e310ec1..4ab78880af8296efd50654967e297eccf83adc17 100644 --- a/drivers/gpu/drm/msm/dp/dp_audio.h +++ b/drivers/gpu/drm/msm/dp/dp_audio.h @@ -53,6 +53,8 @@ struct dp_audio *dp_audio_get(struct platform_device *pdev, int dp_register_audio_driver(struct device *dev, struct dp_audio *dp_audio); +void dp_unregister_audio_driver(struct device *dev, struct dp_audio *dp_audio); + /** * dp_audio_put() * diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c index 7a8cf1c8233d07b64594485ee87ca778e4dbfb02..5142aeb705a44dd0891d875ba401d3ea735da71d 100644 --- a/drivers/gpu/drm/msm/dp/dp_catalog.c +++ b/drivers/gpu/drm/msm/dp/dp_catalog.c @@ -620,7 +620,7 @@ void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog, config & DP_DP_HPD_INT_MASK); } -void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog) +void dp_catalog_ctrl_hpd_enable(struct dp_catalog *dp_catalog) { struct dp_catalog_private *catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog); @@ -635,6 +635,19 @@ void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog) dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, DP_DP_HPD_CTRL_HPD_EN); } +void dp_catalog_ctrl_hpd_disable(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + u32 reftimer = dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER); + + reftimer &= ~DP_DP_HPD_REFTIMER_ENABLE; + dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer); + + dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, 0); +} + static void dp_catalog_enable_sdp(struct dp_catalog_private *catalog) { /* trigger sdp */ diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h index 82376a2697eff4e14f7b8d9315f19f8cfd5bf35e..38786e855b51a0439445f1571fb0979bfb7f4556 100644 --- a/drivers/gpu/drm/msm/dp/dp_catalog.h +++ b/drivers/gpu/drm/msm/dp/dp_catalog.h @@ -104,7 +104,8 @@ bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog); void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog, bool enable); void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog, u32 intr_mask, bool en); -void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog); +void dp_catalog_ctrl_hpd_enable(struct dp_catalog *dp_catalog); +void dp_catalog_ctrl_hpd_disable(struct dp_catalog *dp_catalog); void dp_catalog_ctrl_config_psr(struct dp_catalog *dp_catalog); void dp_catalog_ctrl_set_psr(struct dp_catalog *dp_catalog, bool enter); u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog); diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index 3e13acdfa7e533825f5991ee8470a595d32d1327..03b0eda6df54a550aec7d7d51129a47f9eee8154 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -28,6 +28,10 @@ #include "dp_audio.h" #include "dp_debug.h" +static bool psr_enabled = false; +module_param(psr_enabled, bool, 0); +MODULE_PARM_DESC(psr_enabled, "enable PSR for eDP and DP displays"); + #define HPD_STRING_SIZE 30 enum { @@ -326,6 +330,7 @@ static void dp_display_unbind(struct device *dev, struct device *master, kthread_stop(dp->ev_tsk); dp_power_client_deinit(dp->power); + dp_unregister_audio_driver(dev, dp->audio); dp_aux_unregister(dp->aux); dp->drm_dev = NULL; dp->aux->drm_dev = NULL; @@ -406,7 +411,7 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp) edid = dp->panel->edid; - dp->dp_display.psr_supported = dp->panel->psr_cap.version; + dp->dp_display.psr_supported = dp->panel->psr_cap.version && psr_enabled; dp->audio_supported = drm_detect_monitor_audio(edid); dp_panel_handle_sink_request(dp->panel); @@ -615,12 +620,6 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data) dp->hpd_state = ST_MAINLINK_READY; } - /* enable HDP irq_hpd/replug interrupt */ - if (dp->dp_display.internal_hpd) - dp_catalog_hpd_config_intr(dp->catalog, - DP_DP_IRQ_HPD_INT_MASK | DP_DP_HPD_REPLUG_INT_MASK, - true); - drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n", dp->dp_display.connector_type, state); mutex_unlock(&dp->event_mutex); @@ -658,12 +657,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n", dp->dp_display.connector_type, state); - /* disable irq_hpd/replug interrupts */ - if (dp->dp_display.internal_hpd) - dp_catalog_hpd_config_intr(dp->catalog, - DP_DP_IRQ_HPD_INT_MASK | DP_DP_HPD_REPLUG_INT_MASK, - false); - /* unplugged, no more irq_hpd handle */ dp_del_event(dp, EV_IRQ_HPD_INT); @@ -687,10 +680,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) return 0; } - /* disable HPD plug interrupts */ - if (dp->dp_display.internal_hpd) - dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK, false); - /* * We don't need separate work for disconnect as * connect/attention interrupts are disabled @@ -706,10 +695,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) /* signal the disconnect event early to ensure proper teardown */ dp_display_handle_plugged_change(&dp->dp_display, false); - /* enable HDP plug interrupt to prepare for next plugin */ - if (dp->dp_display.internal_hpd) - dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK, true); - drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n", dp->dp_display.connector_type, state); @@ -1082,26 +1067,6 @@ void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp) mutex_unlock(&dp_display->event_mutex); } -static void dp_display_config_hpd(struct dp_display_private *dp) -{ - - dp_display_host_init(dp); - dp_catalog_ctrl_hpd_config(dp->catalog); - - /* Enable plug and unplug interrupts only if requested */ - if (dp->dp_display.internal_hpd) - dp_catalog_hpd_config_intr(dp->catalog, - DP_DP_HPD_PLUG_INT_MASK | - DP_DP_HPD_UNPLUG_INT_MASK, - true); - - /* Enable interrupt first time - * we are leaving dp clocks on during disconnect - * and never disable interrupt - */ - enable_irq(dp->irq); -} - void dp_display_set_psr(struct msm_dp *dp_display, bool enter) { struct dp_display_private *dp; @@ -1176,7 +1141,7 @@ static int hpd_event_thread(void *data) switch (todo->event_id) { case EV_HPD_INIT_SETUP: - dp_display_config_hpd(dp_priv); + dp_display_host_init(dp_priv); break; case EV_HPD_PLUG_INT: dp_hpd_plug_handle(dp_priv, todo->data); @@ -1282,7 +1247,6 @@ int dp_display_request_irq(struct msm_dp *dp_display) dp->irq, rc); return rc; } - disable_irq(dp->irq); return 0; } @@ -1394,13 +1358,8 @@ static int dp_pm_resume(struct device *dev) /* turn on dp ctrl/phy */ dp_display_host_init(dp); - dp_catalog_ctrl_hpd_config(dp->catalog); - - if (dp->dp_display.internal_hpd) - dp_catalog_hpd_config_intr(dp->catalog, - DP_DP_HPD_PLUG_INT_MASK | - DP_DP_HPD_UNPLUG_INT_MASK, - true); + if (dp_display->is_edp) + dp_catalog_ctrl_hpd_enable(dp->catalog); if (dp_catalog_link_is_connected(dp->catalog)) { /* @@ -1568,9 +1527,8 @@ static int dp_display_get_next_bridge(struct msm_dp *dp) if (aux_bus && dp->is_edp) { dp_display_host_init(dp_priv); - dp_catalog_ctrl_hpd_config(dp_priv->catalog); + dp_catalog_ctrl_hpd_enable(dp_priv->catalog); dp_display_host_phy_init(dp_priv); - enable_irq(dp_priv->irq); /* * The code below assumes that the panel will finish probing @@ -1612,7 +1570,6 @@ static int dp_display_get_next_bridge(struct msm_dp *dp) error: if (dp->is_edp) { - disable_irq(dp_priv->irq); dp_display_host_phy_exit(dp_priv); dp_display_host_deinit(dp_priv); } @@ -1801,16 +1758,31 @@ void dp_bridge_hpd_enable(struct drm_bridge *bridge) { struct msm_dp_bridge *dp_bridge = to_dp_bridge(bridge); struct msm_dp *dp_display = dp_bridge->dp_display; + struct dp_display_private *dp = container_of(dp_display, struct dp_display_private, dp_display); + + mutex_lock(&dp->event_mutex); + dp_catalog_ctrl_hpd_enable(dp->catalog); + + /* enable HDP interrupts */ + dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, true); dp_display->internal_hpd = true; + mutex_unlock(&dp->event_mutex); } void dp_bridge_hpd_disable(struct drm_bridge *bridge) { struct msm_dp_bridge *dp_bridge = to_dp_bridge(bridge); struct msm_dp *dp_display = dp_bridge->dp_display; + struct dp_display_private *dp = container_of(dp_display, struct dp_display_private, dp_display); + + mutex_lock(&dp->event_mutex); + /* disable HDP interrupts */ + dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false); + dp_catalog_ctrl_hpd_disable(dp->catalog); dp_display->internal_hpd = false; + mutex_unlock(&dp->event_mutex); } void dp_bridge_hpd_notify(struct drm_bridge *bridge, diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index d77fa9793c54dc7adb001f9e81dd12ecad891f2a..9c45d641b5212c11078ab38c13a519663d85e10a 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -155,6 +155,8 @@ static bool can_do_async(struct drm_atomic_state *state, for_each_new_crtc_in_state(state, crtc, crtc_state, i) { if (drm_atomic_crtc_needs_modeset(crtc_state)) return false; + if (!crtc_state->active) + return false; if (++num_crtcs > 1) return false; *async_crtc = crtc; diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index b4cfa44a8a5c479618d8e92fa871fbad42b1b929..463ca4164f5f8d344fad8ca929c8e4c758a476f0 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -449,6 +449,8 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv) if (ret) goto err_cleanup_mode_config; + dma_set_max_seg_size(dev, UINT_MAX); + /* Bind all our sub-components: */ ret = component_bind_all(dev, ddev); if (ret) @@ -459,8 +461,6 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv) if (ret) goto err_msm_uninit; - dma_set_max_seg_size(dev, UINT_MAX); - msm_gem_shrinker_init(ddev); if (priv->kms_init) { diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index db6c4e281d75d06dc169c0e48d7c112413db682e..cd39b9d8abdbeb96aad9495552f3f113aa3a008e 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -219,7 +219,8 @@ static void put_pages(struct drm_gem_object *obj) } } -static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj) +static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj, + unsigned madv) { struct msm_drm_private *priv = obj->dev->dev_private; struct msm_gem_object *msm_obj = to_msm_bo(obj); @@ -227,7 +228,9 @@ static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj) msm_gem_assert_locked(obj); - if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { + if (GEM_WARN_ON(msm_obj->madv > madv)) { + DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n", + msm_obj->madv, madv); return ERR_PTR(-EBUSY); } @@ -248,7 +251,7 @@ struct page **msm_gem_pin_pages(struct drm_gem_object *obj) struct page **p; msm_gem_lock(obj); - p = msm_gem_pin_pages_locked(obj); + p = msm_gem_pin_pages_locked(obj, MSM_MADV_WILLNEED); msm_gem_unlock(obj); return p; @@ -473,10 +476,7 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma) msm_gem_assert_locked(obj); - if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) - return -EBUSY; - - pages = msm_gem_pin_pages_locked(obj); + pages = msm_gem_pin_pages_locked(obj, MSM_MADV_WILLNEED); if (IS_ERR(pages)) return PTR_ERR(pages); @@ -699,13 +699,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) if (obj->import_attach) return ERR_PTR(-ENODEV); - if (GEM_WARN_ON(msm_obj->madv > madv)) { - DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n", - msm_obj->madv, madv); - return ERR_PTR(-EBUSY); - } - - pages = msm_gem_pin_pages_locked(obj); + pages = msm_gem_pin_pages_locked(obj, madv); if (IS_ERR(pages)) return ERR_CAST(pages); diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index aff18c2f600ab2050d52cd05ba924d3b4ae68a20..9f5933c75e3df981177c9b38edcadab411b60d38 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -722,7 +722,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, struct msm_drm_private *priv = dev->dev_private; struct drm_msm_gem_submit *args = data; struct msm_file_private *ctx = file->driver_priv; - struct msm_gem_submit *submit; + struct msm_gem_submit *submit = NULL; struct msm_gpu *gpu = priv->gpu; struct msm_gpu_submitqueue *queue; struct msm_ringbuffer *ring; @@ -769,13 +769,15 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, out_fence_fd = get_unused_fd_flags(O_CLOEXEC); if (out_fence_fd < 0) { ret = out_fence_fd; - return ret; + goto out_post_unlock; } } submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds); - if (IS_ERR(submit)) - return PTR_ERR(submit); + if (IS_ERR(submit)) { + ret = PTR_ERR(submit); + goto out_post_unlock; + } trace_msm_gpu_submit(pid_nr(submit->pid), ring->id, submit->ident, args->nr_bos, args->nr_cmds); @@ -962,11 +964,20 @@ out: if (has_ww_ticket) ww_acquire_fini(&submit->ticket); out_unlock: - if (ret && (out_fence_fd >= 0)) - put_unused_fd(out_fence_fd); mutex_unlock(&queue->lock); out_post_unlock: - msm_gem_submit_put(submit); + if (ret && (out_fence_fd >= 0)) + put_unused_fd(out_fence_fd); + + if (!IS_ERR_OR_NULL(submit)) { + msm_gem_submit_put(submit); + } else { + /* + * If the submit hasn't yet taken ownership of the queue + * then we need to drop the reference ourself: + */ + msm_submitqueue_put(queue); + } if (!IS_ERR_OR_NULL(post_deps)) { for (i = 0; i < args->nr_out_syncobjs; ++i) { kfree(post_deps[i].chain); diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index 418e1e06cddefd83b7cfd5aa57f2197de2f4f489..5cc8d358cc9759307a444cd62bce83c62b3dcdb7 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -234,7 +234,12 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent) /* Get the pagetable configuration from the domain */ if (adreno_smmu->cookie) ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie); - if (!ttbr1_cfg) + + /* + * If you hit this WARN_ONCE() you are probably missing an entry in + * qcom_smmu_impl_of_match[] in arm-smmu-qcom.c + */ + if (WARN_ONCE(!ttbr1_cfg, "No per-process page tables")) return ERR_PTR(-ENODEV); pagetable = kzalloc(sizeof(*pagetable), GFP_KERNEL); @@ -410,7 +415,7 @@ struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsig struct msm_mmu *mmu; mmu = msm_iommu_new(dev, quirks); - if (IS_ERR(mmu)) + if (IS_ERR_OR_NULL(mmu)) return mmu; iommu = to_msm_iommu(mmu); diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 8cf096f841a908f9403a75b640526c24939020c0..a2ae8c21e4dcea81c18d87913c5961e46f36234d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -220,6 +220,9 @@ static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out int optimus_funcs; struct pci_dev *parent_pdev; + if (pdev->vendor != PCI_VENDOR_ID_NVIDIA) + return; + *has_pr3 = false; parent_pdev = pci_upstream_bridge(pdev); if (parent_pdev) { diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 086b66b60d918a488dcda36db23d816e281ebebf..f75c6f09dd2af5fca8431f1144eb3439511f81c0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -730,7 +730,8 @@ out: #endif nouveau_connector_set_edid(nv_connector, edid); - nouveau_connector_set_encoder(connector, nv_encoder); + if (nv_encoder) + nouveau_connector_set_encoder(connector, nv_encoder); return status; } @@ -966,7 +967,7 @@ nouveau_connector_get_modes(struct drm_connector *connector) /* Determine display colour depth for everything except LVDS now, * DP requires this before mode_valid() is called. */ - if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS) + if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && nv_connector->native_mode) nouveau_connector_detect_depth(connector); /* Find the native mode if this is a digital panel, if we didn't @@ -987,7 +988,7 @@ nouveau_connector_get_modes(struct drm_connector *connector) * "native" mode as some VBIOS tables require us to use the * pixel clock as part of the lookup... */ - if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) + if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS && nv_connector->native_mode) nouveau_connector_detect_depth(connector); if (nv_encoder->dcb->type == DCB_OUTPUT_TV) diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index cc7c5b4a05fd875413593686ff42470017e8e50f..7aac9384600ed43863ded2165441c731886c36de 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -137,10 +137,16 @@ nouveau_name(struct drm_device *dev) static inline bool nouveau_cli_work_ready(struct dma_fence *fence) { - if (!dma_fence_is_signaled(fence)) - return false; - dma_fence_put(fence); - return true; + bool ret = true; + + spin_lock_irq(fence->lock); + if (!dma_fence_is_signaled_locked(fence)) + ret = false; + spin_unlock_irq(fence->lock); + + if (ret == true) + dma_fence_put(fence); + return ret; } static void diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c index 6afdf260a4e226492637fa1f27f5a6b33de95918..b9fe926a49e8bb42116ac382010829e333edbf8f 100644 --- a/drivers/gpu/drm/pl111/pl111_display.c +++ b/drivers/gpu/drm/pl111/pl111_display.c @@ -53,7 +53,7 @@ pl111_mode_valid(struct drm_simple_display_pipe *pipe, { struct drm_device *drm = pipe->crtc.dev; struct pl111_drm_dev_private *priv = drm->dev_private; - u32 cpp = priv->variant->fb_bpp / 8; + u32 cpp = DIV_ROUND_UP(priv->variant->fb_depth, 8); u64 bw; /* diff --git a/drivers/gpu/drm/pl111/pl111_drm.h b/drivers/gpu/drm/pl111/pl111_drm.h index 2a46b5bd857649eb811e5d27b122ca3515397d2e..d1fe756444ee52056da07b6189d39d6fa441cb8d 100644 --- a/drivers/gpu/drm/pl111/pl111_drm.h +++ b/drivers/gpu/drm/pl111/pl111_drm.h @@ -114,7 +114,7 @@ struct drm_minor; * extensions to the control register * @formats: array of supported pixel formats on this variant * @nformats: the length of the array of supported pixel formats - * @fb_bpp: desired bits per pixel on the default framebuffer + * @fb_depth: desired depth per pixel on the default framebuffer */ struct pl111_variant_data { const char *name; @@ -126,7 +126,7 @@ struct pl111_variant_data { bool st_bitmux_control; const u32 *formats; unsigned int nformats; - unsigned int fb_bpp; + unsigned int fb_depth; }; struct pl111_drm_dev_private { diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c index 4b2a9e9753f6da9cf1bbab087ff7533a6c0ac01b..43049c8028b21649c232cf33ddfe5681cac29644 100644 --- a/drivers/gpu/drm/pl111/pl111_drv.c +++ b/drivers/gpu/drm/pl111/pl111_drv.c @@ -308,7 +308,7 @@ static int pl111_amba_probe(struct amba_device *amba_dev, if (ret < 0) goto dev_put; - drm_fbdev_dma_setup(drm, priv->variant->fb_bpp); + drm_fbdev_dma_setup(drm, priv->variant->fb_depth); return 0; @@ -351,7 +351,7 @@ static const struct pl111_variant_data pl110_variant = { .is_pl110 = true, .formats = pl110_pixel_formats, .nformats = ARRAY_SIZE(pl110_pixel_formats), - .fb_bpp = 16, + .fb_depth = 16, }; /* RealView, Versatile Express etc use this modern variant */ @@ -376,7 +376,7 @@ static const struct pl111_variant_data pl111_variant = { .name = "PL111", .formats = pl111_pixel_formats, .nformats = ARRAY_SIZE(pl111_pixel_formats), - .fb_bpp = 32, + .fb_depth = 32, }; static const u32 pl110_nomadik_pixel_formats[] = { @@ -405,7 +405,7 @@ static const struct pl111_variant_data pl110_nomadik_variant = { .is_lcdc = true, .st_bitmux_control = true, .broken_vblank = true, - .fb_bpp = 16, + .fb_depth = 16, }; static const struct amba_id pl111_id_table[] = { diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c index 1b436b75fd396f2bf71ab067a873873ccd484fd5..00c3ebd32359b975c553e1b275c7e3b64d727879 100644 --- a/drivers/gpu/drm/pl111/pl111_versatile.c +++ b/drivers/gpu/drm/pl111/pl111_versatile.c @@ -316,7 +316,7 @@ static const struct pl111_variant_data pl110_integrator = { .broken_vblank = true, .formats = pl110_integrator_pixel_formats, .nformats = ARRAY_SIZE(pl110_integrator_pixel_formats), - .fb_bpp = 16, + .fb_depth = 16, }; /* @@ -330,7 +330,7 @@ static const struct pl111_variant_data pl110_impd1 = { .broken_vblank = true, .formats = pl110_integrator_pixel_formats, .nformats = ARRAY_SIZE(pl110_integrator_pixel_formats), - .fb_bpp = 16, + .fb_depth = 15, }; /* @@ -343,7 +343,7 @@ static const struct pl111_variant_data pl110_versatile = { .external_bgr = true, .formats = pl110_versatile_pixel_formats, .nformats = ARRAY_SIZE(pl110_versatile_pixel_formats), - .fb_bpp = 16, + .fb_depth = 16, }; /* @@ -355,7 +355,7 @@ static const struct pl111_variant_data pl111_realview = { .name = "PL111 RealView", .formats = pl111_realview_pixel_formats, .nformats = ARRAY_SIZE(pl111_realview_pixel_formats), - .fb_bpp = 16, + .fb_depth = 16, }; /* @@ -367,7 +367,7 @@ static const struct pl111_variant_data pl111_vexpress = { .name = "PL111 Versatile Express", .formats = pl111_realview_pixel_formats, .nformats = ARRAY_SIZE(pl111_realview_pixel_formats), - .fb_bpp = 16, + .fb_depth = 16, .broken_clockdivider = true, }; diff --git a/drivers/gpu/drm/radeon/radeon_fbdev.c b/drivers/gpu/drm/radeon/radeon_fbdev.c index fe76e29910efadd8022ef2edebd6f48ef7017301..8f6c3aef096285476cc3e0bc0467b45f870f9337 100644 --- a/drivers/gpu/drm/radeon/radeon_fbdev.c +++ b/drivers/gpu/drm/radeon/radeon_fbdev.c @@ -307,6 +307,7 @@ static void radeon_fbdev_client_unregister(struct drm_client_dev *client) if (fb_helper->info) { vga_switcheroo_client_fb_set(rdev->pdev, NULL); + drm_helper_force_disable_all(dev); drm_fb_helper_unregister_info(fb_helper); } else { drm_client_release(&fb_helper->client); diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index bdc5af23f005accc418ea77a9fe2d59e9ae409db..d3f5ddbc1704327b41f3b6cce5ac0c5612a775dd 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -459,7 +459,6 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, struct radeon_device *rdev = dev->dev_private; struct drm_radeon_gem_set_domain *args = data; struct drm_gem_object *gobj; - struct radeon_bo *robj; int r; /* for now if someone requests domain CPU - @@ -472,13 +471,12 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, up_read(&rdev->exclusive_lock); return -ENOENT; } - robj = gem_to_radeon_bo(gobj); r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); drm_gem_object_put(gobj); up_read(&rdev->exclusive_lock); - r = radeon_gem_handle_lockup(robj->rdev, r); + r = radeon_gem_handle_lockup(rdev, r); return r; } diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 3377fbc71f65478b630085fccdbc786306f0157e..c4dda908666cfc4f9a312517698c7c0c7870af1f 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -99,6 +99,16 @@ static void radeon_hotplug_work_func(struct work_struct *work) static void radeon_dp_work_func(struct work_struct *work) { + struct radeon_device *rdev = container_of(work, struct radeon_device, + dp_work); + struct drm_device *dev = rdev->ddev; + struct drm_mode_config *mode_config = &dev->mode_config; + struct drm_connector *connector; + + mutex_lock(&mode_config->mutex); + list_for_each_entry(connector, &mode_config->connector_list, head) + radeon_connector_hotplug(connector); + mutex_unlock(&mode_config->mutex); } /** diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 8c183639603ec49a4efbcfa9b296ea9cea65d6f9..aea5a90ff98b98f8836417b46cbe628fc40aa680 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -1141,9 +1141,6 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched) for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { struct drm_sched_rq *rq = &sched->sched_rq[i]; - if (!rq) - continue; - spin_lock(&rq->lock); list_for_each_entry(s_entity, &rq->entities, list) /* diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c index 7ae5f27df54dd6731b1dcef653a6153323cdbfc2..c6bdb9c4ef3e004cf27fba53b6f5e0eccbe21b55 100644 --- a/drivers/hid/hid-google-hammer.c +++ b/drivers/hid/hid-google-hammer.c @@ -586,6 +586,8 @@ static const struct hid_device_id hammer_devices[] = { USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_EEL) }, { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) }, + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, + USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_JEWEL) }, { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MAGNEMITE) }, { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index d79e946acdcbed9fe7afe132ada5366b0a784dbb..5d29abac2300e40c75b0b6eedb803969cac1a84a 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -529,6 +529,7 @@ #define USB_DEVICE_ID_GOOGLE_MOONBALL 0x5044 #define USB_DEVICE_ID_GOOGLE_DON 0x5050 #define USB_DEVICE_ID_GOOGLE_EEL 0x5057 +#define USB_DEVICE_ID_GOOGLE_JEWEL 0x5061 #define USB_VENDOR_ID_GOTOP 0x08f2 #define USB_DEVICE_ID_SUPER_Q2 0x007f diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index 0fcfd85fea0feff39a650348d31380daa0f26706..5e1a412fd28fa125ba84ee35ce570cc4827d755f 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -286,7 +286,7 @@ static int hidpp_send_message_sync(struct hidpp_device *hidpp, struct hidpp_report *message, struct hidpp_report *response) { - int ret; + int ret = -1; int max_retries = 3; mutex_lock(&hidpp->send_mutex); @@ -300,13 +300,13 @@ static int hidpp_send_message_sync(struct hidpp_device *hidpp, */ *response = *message; - for (; max_retries != 0; max_retries--) { + for (; max_retries != 0 && ret; max_retries--) { ret = __hidpp_send_report(hidpp->hid_dev, message); if (ret) { dbg_hid("__hidpp_send_report returned err: %d\n", ret); memset(response, 0, sizeof(struct hidpp_report)); - goto exit; + break; } if (!wait_event_timeout(hidpp->wait, hidpp->answer_available, @@ -314,13 +314,14 @@ static int hidpp_send_message_sync(struct hidpp_device *hidpp, dbg_hid("%s:timeout waiting for response\n", __func__); memset(response, 0, sizeof(struct hidpp_report)); ret = -ETIMEDOUT; + break; } if (response->report_id == REPORT_ID_HIDPP_SHORT && response->rap.sub_id == HIDPP_ERROR) { ret = response->rap.params[1]; dbg_hid("%s:got hidpp error %02X\n", __func__, ret); - goto exit; + break; } if ((response->report_id == REPORT_ID_HIDPP_LONG || @@ -329,13 +330,12 @@ static int hidpp_send_message_sync(struct hidpp_device *hidpp, ret = response->fap.params[1]; if (ret != HIDPP20_ERROR_BUSY) { dbg_hid("%s:got hidpp 2.0 error %02X\n", __func__, ret); - goto exit; + break; } dbg_hid("%s:got busy hidpp 2.0 error %02X, retrying\n", __func__, ret); } } -exit: mutex_unlock(&hidpp->send_mutex); return ret; diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index 8214896adadaddcb17ebccfc644d2c0f8ad496f3..76e5353aca0c7440253d407ceacd41513e18158a 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -2224,7 +2224,9 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix) } else if (strstr(product_name, "Wacom") || strstr(product_name, "wacom") || strstr(product_name, "WACOM")) { - strscpy(name, product_name, sizeof(name)); + if (strscpy(name, product_name, sizeof(name)) < 0) { + hid_warn(wacom->hdev, "String overflow while assembling device name"); + } } else { snprintf(name, sizeof(name), "Wacom %s", product_name); } @@ -2242,7 +2244,9 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix) if (name[strlen(name)-1] == ' ') name[strlen(name)-1] = '\0'; } else { - strscpy(name, features->name, sizeof(name)); + if (strscpy(name, features->name, sizeof(name)) < 0) { + hid_warn(wacom->hdev, "String overflow while assembling device name"); + } } snprintf(wacom_wac->name, sizeof(wacom_wac->name), "%s%s", @@ -2410,8 +2414,13 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless) goto fail_quirks; } - if (features->device_type & WACOM_DEVICETYPE_WL_MONITOR) + if (features->device_type & WACOM_DEVICETYPE_WL_MONITOR) { error = hid_hw_open(hdev); + if (error) { + hid_err(hdev, "hw open failed\n"); + goto fail_quirks; + } + } wacom_set_shared_values(wacom_wac); devres_close_group(&hdev->dev, wacom); @@ -2500,8 +2509,10 @@ static void wacom_wireless_work(struct work_struct *work) goto fail; } - strscpy(wacom_wac->name, wacom_wac1->name, - sizeof(wacom_wac->name)); + if (strscpy(wacom_wac->name, wacom_wac1->name, + sizeof(wacom_wac->name)) < 0) { + hid_warn(wacom->hdev, "String overflow while assembling device name"); + } } return; diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index dc0f7d9a992c10977a5bc3bcaf100cb5c548129b..2ccf838371343da1628bec555896caba7843d2eb 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -831,7 +831,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) /* Enter report */ if ((data[1] & 0xfc) == 0xc0) { /* serial number of the tool */ - wacom->serial[idx] = ((data[3] & 0x0f) << 28) + + wacom->serial[idx] = ((__u64)(data[3] & 0x0f) << 28) + (data[4] << 20) + (data[5] << 12) + (data[6] << 4) + (data[7] >> 4); diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 007f26d5f1a45214710559af9529f88fc878ab68..2f4d09ce027a3f6b07132da55c6b31cdbbfaf25b 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -829,11 +829,22 @@ static void vmbus_wait_for_unload(void) if (completion_done(&vmbus_connection.unload_event)) goto completed; - for_each_online_cpu(cpu) { + for_each_present_cpu(cpu) { struct hv_per_cpu_context *hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu); + /* + * In a CoCo VM the synic_message_page is not allocated + * in hv_synic_alloc(). Instead it is set/cleared in + * hv_synic_enable_regs() and hv_synic_disable_regs() + * such that it is set only when the CPU is online. If + * not all present CPUs are online, the message page + * might be NULL, so skip such CPUs. + */ page_addr = hv_cpu->synic_message_page; + if (!page_addr) + continue; + msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT; @@ -867,11 +878,14 @@ completed: * maybe-pending messages on all CPUs to be able to receive new * messages after we reconnect. */ - for_each_online_cpu(cpu) { + for_each_present_cpu(cpu) { struct hv_per_cpu_context *hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu); page_addr = hv_cpu->synic_message_page; + if (!page_addr) + continue; + msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT; msg->header.message_type = HVMSG_NONE; } diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c index 64f9ceca887b9ea628489d0625d2e87f90cf698d..542a1d53b303e295f026a46d7cd2bab7ee4e206d 100644 --- a/drivers/hv/hv_common.c +++ b/drivers/hv/hv_common.c @@ -364,13 +364,20 @@ int hv_common_cpu_init(unsigned int cpu) flags = irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL; inputarg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg); - *inputarg = kmalloc(pgcount * HV_HYP_PAGE_SIZE, flags); - if (!(*inputarg)) - return -ENOMEM; - if (hv_root_partition) { - outputarg = (void **)this_cpu_ptr(hyperv_pcpu_output_arg); - *outputarg = (char *)(*inputarg) + HV_HYP_PAGE_SIZE; + /* + * hyperv_pcpu_input_arg and hyperv_pcpu_output_arg memory is already + * allocated if this CPU was previously online and then taken offline + */ + if (!*inputarg) { + *inputarg = kmalloc(pgcount * HV_HYP_PAGE_SIZE, flags); + if (!(*inputarg)) + return -ENOMEM; + + if (hv_root_partition) { + outputarg = (void **)this_cpu_ptr(hyperv_pcpu_output_arg); + *outputarg = (char *)(*inputarg) + HV_HYP_PAGE_SIZE; + } } msr_vp_index = hv_get_register(HV_REGISTER_VP_INDEX); @@ -385,24 +392,17 @@ int hv_common_cpu_init(unsigned int cpu) int hv_common_cpu_die(unsigned int cpu) { - unsigned long flags; - void **inputarg, **outputarg; - void *mem; - - local_irq_save(flags); - - inputarg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg); - mem = *inputarg; - *inputarg = NULL; - - if (hv_root_partition) { - outputarg = (void **)this_cpu_ptr(hyperv_pcpu_output_arg); - *outputarg = NULL; - } - - local_irq_restore(flags); - - kfree(mem); + /* + * The hyperv_pcpu_input_arg and hyperv_pcpu_output_arg memory + * is not freed when the CPU goes offline as the hyperv_pcpu_input_arg + * may be used by the Hyper-V vPCI driver in reassigning interrupts + * as part of the offlining process. The interrupt reassignment + * happens *after* the CPUHP_AP_HYPERV_ONLINE state has run and + * called this function. + * + * If a previously offlined CPU is brought back online again, the + * originally allocated memory is reused in hv_common_cpu_init(). + */ return 0; } diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 1c65a6dfb9fae58c4cd29c7f6f028776caf54e9c..67f95a29aeca5b8e0b7f3755448724f4c7e726fb 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -1372,7 +1372,7 @@ static int vmbus_bus_init(void) ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hyperv/vmbus:online", hv_synic_init, hv_synic_cleanup); if (ret < 0) - goto err_cpuhp; + goto err_alloc; hyperv_cpuhp_online = ret; ret = vmbus_connect(); @@ -1392,9 +1392,8 @@ static int vmbus_bus_init(void) err_connect: cpuhp_remove_state(hyperv_cpuhp_online); -err_cpuhp: - hv_synic_free(); err_alloc: + hv_synic_free(); if (vmbus_irq == -1) { hv_remove_vmbus_handler(); } else { diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c index 711f451b69469404cc4a0835c266e9854860e3be..89e8ed214ea4967620c20ad1c1a1e39f9d068400 100644 --- a/drivers/hwtracing/coresight/coresight-etm-perf.c +++ b/drivers/hwtracing/coresight/coresight-etm-perf.c @@ -402,6 +402,7 @@ static void *etm_setup_aux(struct perf_event *event, void **pages, trace_id = coresight_trace_id_get_cpu_id(cpu); if (!IS_VALID_CS_TRACE_ID(trace_id)) { cpumask_clear_cpu(cpu, mask); + coresight_release_path(path); continue; } diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c index 918d461fcf4a68637cea80d6079ebb2e0f8eccb3..eaa296ced167893770a3695a1e9b8fb24098d861 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etr.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c @@ -942,7 +942,7 @@ tmc_etr_buf_insert_barrier_packet(struct etr_buf *etr_buf, u64 offset) len = tmc_etr_buf_get_data(etr_buf, offset, CORESIGHT_BARRIER_PKT_SIZE, &bufp); - if (WARN_ON(len < CORESIGHT_BARRIER_PKT_SIZE)) + if (WARN_ON(len < 0 || len < CORESIGHT_BARRIER_PKT_SIZE)) return -EINVAL; coresight_insert_barrier_packet(bufp); return offset + CORESIGHT_BARRIER_PKT_SIZE; diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index c5d87aae39c666db553afe3f57c18b27288f462d..bf23bfb51aead1e45c00cb2e44e28d609ecb40d1 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h @@ -40,6 +40,7 @@ #define DW_IC_CON_BUS_CLEAR_CTRL BIT(11) #define DW_IC_DATA_CMD_DAT GENMASK(7, 0) +#define DW_IC_DATA_CMD_FIRST_DATA_BYTE BIT(11) /* * Registers offset diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c index cec25054bb244fadd946d36dc234d499aa831260..2e079cf20bb5b40c09004bdc02a8124352fab958 100644 --- a/drivers/i2c/busses/i2c-designware-slave.c +++ b/drivers/i2c/busses/i2c-designware-slave.c @@ -176,6 +176,10 @@ static irqreturn_t i2c_dw_isr_slave(int this_irq, void *dev_id) do { regmap_read(dev->map, DW_IC_DATA_CMD, &tmp); + if (tmp & DW_IC_DATA_CMD_FIRST_DATA_BYTE) + i2c_slave_event(dev->slave, + I2C_SLAVE_WRITE_REQUESTED, + &val); val = tmp; i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_RECEIVED, &val); diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c index 8e987945ed4503d641bd44bcdf2fe36e6e078c66..39c479f96eb58564b75e91f4650cf31e1345433f 100644 --- a/drivers/i2c/busses/i2c-img-scb.c +++ b/drivers/i2c/busses/i2c-img-scb.c @@ -257,7 +257,7 @@ #define IMG_I2C_TIMEOUT (msecs_to_jiffies(1000)) /* - * Worst incs are 1 (innacurate) and 16*256 (irregular). + * Worst incs are 1 (inaccurate) and 16*256 (irregular). * So a sensible inc is the logarithmic mean: 64 (2^6), which is * in the middle of the valid range (0-127). */ diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c index 1af0a637d7f1414b033f5c0231c75a4ece2353bd..4d24ceb57ee7400938ac6886614012914353c204 100644 --- a/drivers/i2c/busses/i2c-imx-lpi2c.c +++ b/drivers/i2c/busses/i2c-imx-lpi2c.c @@ -201,8 +201,8 @@ static void lpi2c_imx_stop(struct lpi2c_imx_struct *lpi2c_imx) /* CLKLO = I2C_CLK_RATIO * CLKHI, SETHOLD = CLKHI, DATAVD = CLKHI/2 */ static int lpi2c_imx_config(struct lpi2c_imx_struct *lpi2c_imx) { - u8 prescale, filt, sethold, clkhi, clklo, datavd; - unsigned int clk_rate, clk_cycle; + u8 prescale, filt, sethold, datavd; + unsigned int clk_rate, clk_cycle, clkhi, clklo; enum lpi2c_imx_pincfg pincfg; unsigned int temp; diff --git a/drivers/i2c/busses/i2c-mchp-pci1xxxx.c b/drivers/i2c/busses/i2c-mchp-pci1xxxx.c index b21ffd6df9276b34ec14267d6db6fe9cd93c9c66..5ef136c3ecb1262144f36be9e54336740d9e5528 100644 --- a/drivers/i2c/busses/i2c-mchp-pci1xxxx.c +++ b/drivers/i2c/busses/i2c-mchp-pci1xxxx.c @@ -1118,8 +1118,10 @@ static int pci1xxxx_i2c_resume(struct device *dev) static DEFINE_SIMPLE_DEV_PM_OPS(pci1xxxx_i2c_pm_ops, pci1xxxx_i2c_suspend, pci1xxxx_i2c_resume); -static void pci1xxxx_i2c_shutdown(struct pci1xxxx_i2c *i2c) +static void pci1xxxx_i2c_shutdown(void *data) { + struct pci1xxxx_i2c *i2c = data; + pci1xxxx_i2c_config_padctrl(i2c, false); pci1xxxx_i2c_configure_core_reg(i2c, false); } @@ -1156,7 +1158,7 @@ static int pci1xxxx_i2c_probe_pci(struct pci_dev *pdev, init_completion(&i2c->i2c_xfer_done); pci1xxxx_i2c_init(i2c); - ret = devm_add_action(dev, (void (*)(void *))pci1xxxx_i2c_shutdown, i2c); + ret = devm_add_action(dev, pci1xxxx_i2c_shutdown, i2c); if (ret) return ret; diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c index 047dfef7a657750ef7ef75646b026d4ea3c168b9..878c076ebdc6b2ab5e30f8441094db198da28fcd 100644 --- a/drivers/i2c/busses/i2c-mv64xxx.c +++ b/drivers/i2c/busses/i2c-mv64xxx.c @@ -520,6 +520,17 @@ mv64xxx_i2c_intr(int irq, void *dev_id) while (readl(drv_data->reg_base + drv_data->reg_offsets.control) & MV64XXX_I2C_REG_CONTROL_IFLG) { + /* + * It seems that sometime the controller updates the status + * register only after it asserts IFLG in control register. + * This may result in weird bugs when in atomic mode. A delay + * of 100 ns before reading the status register solves this + * issue. This bug does not seem to appear when using + * interrupts. + */ + if (drv_data->atomic) + ndelay(100); + status = readl(drv_data->reg_base + drv_data->reg_offsets.status); mv64xxx_i2c_fsm(drv_data, status); mv64xxx_i2c_do_action(drv_data); diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c index 2e153f2f71b6d8e9cde863759033d46c3c850ad7..78682388e02ed4ef05315bb1a8b3d8f58efb4385 100644 --- a/drivers/i2c/busses/i2c-qup.c +++ b/drivers/i2c/busses/i2c-qup.c @@ -1752,16 +1752,21 @@ nodma: if (!clk_freq || clk_freq > I2C_MAX_FAST_MODE_PLUS_FREQ) { dev_err(qup->dev, "clock frequency not supported %d\n", clk_freq); - return -EINVAL; + ret = -EINVAL; + goto fail_dma; } qup->base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(qup->base)) - return PTR_ERR(qup->base); + if (IS_ERR(qup->base)) { + ret = PTR_ERR(qup->base); + goto fail_dma; + } qup->irq = platform_get_irq(pdev, 0); - if (qup->irq < 0) - return qup->irq; + if (qup->irq < 0) { + ret = qup->irq; + goto fail_dma; + } if (has_acpi_companion(qup->dev)) { ret = device_property_read_u32(qup->dev, @@ -1775,13 +1780,15 @@ nodma: qup->clk = devm_clk_get(qup->dev, "core"); if (IS_ERR(qup->clk)) { dev_err(qup->dev, "Could not get core clock\n"); - return PTR_ERR(qup->clk); + ret = PTR_ERR(qup->clk); + goto fail_dma; } qup->pclk = devm_clk_get(qup->dev, "iface"); if (IS_ERR(qup->pclk)) { dev_err(qup->dev, "Could not get iface clock\n"); - return PTR_ERR(qup->pclk); + ret = PTR_ERR(qup->pclk); + goto fail_dma; } qup_i2c_enable_clocks(qup); src_clk_freq = clk_get_rate(qup->clk); diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c index 4fe15cd78907e065a496724a0b84c7c644c21eb4..ffc54fbf814dde0a8a9ada780d811ea6138edbe6 100644 --- a/drivers/i2c/busses/i2c-sprd.c +++ b/drivers/i2c/busses/i2c-sprd.c @@ -576,12 +576,14 @@ static int sprd_i2c_remove(struct platform_device *pdev) struct sprd_i2c *i2c_dev = platform_get_drvdata(pdev); int ret; - ret = pm_runtime_resume_and_get(i2c_dev->dev); + ret = pm_runtime_get_sync(i2c_dev->dev); if (ret < 0) - return ret; + dev_err(&pdev->dev, "Failed to resume device (%pe)\n", ERR_PTR(ret)); i2c_del_adapter(&i2c_dev->adap); - clk_disable_unprepare(i2c_dev->clk); + + if (ret >= 0) + clk_disable_unprepare(i2c_dev->clk); pm_runtime_put_noidle(i2c_dev->dev); pm_runtime_disable(i2c_dev->dev); diff --git a/drivers/iio/accel/kionix-kx022a.c b/drivers/iio/accel/kionix-kx022a.c index f98393d746668f0ebbf3ba7d5d909adf25b1d680..b8636fa8eaeb68d740300d75e06365f9e887dd25 100644 --- a/drivers/iio/accel/kionix-kx022a.c +++ b/drivers/iio/accel/kionix-kx022a.c @@ -1048,7 +1048,7 @@ int kx022a_probe_internal(struct device *dev) data->ien_reg = KX022A_REG_INC4; } else { irq = fwnode_irq_get_byname(fwnode, "INT2"); - if (irq <= 0) + if (irq < 0) return dev_err_probe(dev, irq, "No suitable IRQ\n"); data->inc_reg = KX022A_REG_INC5; diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c index 5f7d81b44b1d28ce2eab297ec16c459bb9feed33..282e539157f87cb223c2ad985d964b0cd5a7a0b3 100644 --- a/drivers/iio/accel/st_accel_core.c +++ b/drivers/iio/accel/st_accel_core.c @@ -1291,12 +1291,12 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev) adev = ACPI_COMPANION(indio_dev->dev.parent); if (!adev) - return 0; + return -ENXIO; /* Read _ONT data, which should be a package of 6 integers. */ status = acpi_evaluate_object(adev->handle, "_ONT", NULL, &buffer); if (status == AE_NOT_FOUND) { - return 0; + return -ENXIO; } else if (ACPI_FAILURE(status)) { dev_warn(&indio_dev->dev, "failed to execute _ONT: %d\n", status); diff --git a/drivers/iio/adc/ad4130.c b/drivers/iio/adc/ad4130.c index 38394341fd6e7401a94662fd5524eb512ea402b4..5a5dd5e87ffc4481c9776d509d32c319ba83135a 100644 --- a/drivers/iio/adc/ad4130.c +++ b/drivers/iio/adc/ad4130.c @@ -1817,6 +1817,11 @@ static const struct clk_ops ad4130_int_clk_ops = { .unprepare = ad4130_int_clk_unprepare, }; +static void ad4130_clk_del_provider(void *of_node) +{ + of_clk_del_provider(of_node); +} + static int ad4130_setup_int_clk(struct ad4130_state *st) { struct device *dev = &st->spi->dev; @@ -1824,6 +1829,7 @@ static int ad4130_setup_int_clk(struct ad4130_state *st) struct clk_init_data init; const char *clk_name; struct clk *clk; + int ret; if (st->int_pin_sel == AD4130_INT_PIN_CLK || st->mclk_sel != AD4130_MCLK_76_8KHZ) @@ -1843,7 +1849,11 @@ static int ad4130_setup_int_clk(struct ad4130_state *st) if (IS_ERR(clk)) return PTR_ERR(clk); - return of_clk_add_provider(of_node, of_clk_src_simple_get, clk); + ret = of_clk_add_provider(of_node, of_clk_src_simple_get, clk); + if (ret) + return ret; + + return devm_add_action_or_reset(dev, ad4130_clk_del_provider, of_node); } static int ad4130_setup(struct iio_dev *indio_dev) diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c index 55a6ab5910160725415b004ab523f453c97b5337..99bb604b78c8cf904271b1832bde8a91a1c13bc3 100644 --- a/drivers/iio/adc/ad7192.c +++ b/drivers/iio/adc/ad7192.c @@ -897,10 +897,6 @@ static const struct iio_info ad7195_info = { __AD719x_CHANNEL(_si, _channel1, -1, _address, NULL, IIO_VOLTAGE, \ BIT(IIO_CHAN_INFO_SCALE), ad7192_calibsys_ext_info) -#define AD719x_SHORTED_CHANNEL(_si, _channel1, _address) \ - __AD719x_CHANNEL(_si, _channel1, -1, _address, "shorted", IIO_VOLTAGE, \ - BIT(IIO_CHAN_INFO_SCALE), ad7192_calibsys_ext_info) - #define AD719x_TEMP_CHANNEL(_si, _address) \ __AD719x_CHANNEL(_si, 0, -1, _address, NULL, IIO_TEMP, 0, NULL) @@ -908,7 +904,7 @@ static const struct iio_chan_spec ad7192_channels[] = { AD719x_DIFF_CHANNEL(0, 1, 2, AD7192_CH_AIN1P_AIN2M), AD719x_DIFF_CHANNEL(1, 3, 4, AD7192_CH_AIN3P_AIN4M), AD719x_TEMP_CHANNEL(2, AD7192_CH_TEMP), - AD719x_SHORTED_CHANNEL(3, 2, AD7192_CH_AIN2P_AIN2M), + AD719x_DIFF_CHANNEL(3, 2, 2, AD7192_CH_AIN2P_AIN2M), AD719x_CHANNEL(4, 1, AD7192_CH_AIN1), AD719x_CHANNEL(5, 2, AD7192_CH_AIN2), AD719x_CHANNEL(6, 3, AD7192_CH_AIN3), @@ -922,7 +918,7 @@ static const struct iio_chan_spec ad7193_channels[] = { AD719x_DIFF_CHANNEL(2, 5, 6, AD7193_CH_AIN5P_AIN6M), AD719x_DIFF_CHANNEL(3, 7, 8, AD7193_CH_AIN7P_AIN8M), AD719x_TEMP_CHANNEL(4, AD7193_CH_TEMP), - AD719x_SHORTED_CHANNEL(5, 2, AD7193_CH_AIN2P_AIN2M), + AD719x_DIFF_CHANNEL(5, 2, 2, AD7193_CH_AIN2P_AIN2M), AD719x_CHANNEL(6, 1, AD7193_CH_AIN1), AD719x_CHANNEL(7, 2, AD7193_CH_AIN2), AD719x_CHANNEL(8, 3, AD7193_CH_AIN3), diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c index d8570f620785af8d8b6dfb1a11ee3b63ef579526..7e21928707437baf9a2522f265d72194745205e6 100644 --- a/drivers/iio/adc/ad_sigma_delta.c +++ b/drivers/iio/adc/ad_sigma_delta.c @@ -584,6 +584,10 @@ static int devm_ad_sd_probe_trigger(struct device *dev, struct iio_dev *indio_de init_completion(&sigma_delta->completion); sigma_delta->irq_dis = true; + + /* the IRQ core clears IRQ_DISABLE_UNLAZY flag when freeing an IRQ */ + irq_set_status_flags(sigma_delta->spi->irq, IRQ_DISABLE_UNLAZY); + ret = devm_request_irq(dev, sigma_delta->spi->irq, ad_sd_data_rdy_trig_poll, sigma_delta->info->irq_flags | IRQF_NO_AUTOEN, diff --git a/drivers/iio/adc/imx93_adc.c b/drivers/iio/adc/imx93_adc.c index a775d2e4056718c0f4b83239617e71d26a14cb1d..dce9ec91e4a778db91f866b6adac6d606496b786 100644 --- a/drivers/iio/adc/imx93_adc.c +++ b/drivers/iio/adc/imx93_adc.c @@ -236,8 +236,7 @@ static int imx93_adc_read_raw(struct iio_dev *indio_dev, { struct imx93_adc *adc = iio_priv(indio_dev); struct device *dev = adc->dev; - long ret; - u32 vref_uv; + int ret; switch (mask) { case IIO_CHAN_INFO_RAW: @@ -253,10 +252,10 @@ static int imx93_adc_read_raw(struct iio_dev *indio_dev, return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: - ret = vref_uv = regulator_get_voltage(adc->vref); + ret = regulator_get_voltage(adc->vref); if (ret < 0) return ret; - *val = vref_uv / 1000; + *val = ret / 1000; *val2 = 12; return IIO_VAL_FRACTIONAL_LOG2; diff --git a/drivers/iio/adc/mt6370-adc.c b/drivers/iio/adc/mt6370-adc.c index bc62e5a9d50d11d42e6791065f2a3266d4dc2b06..0bc112135bca1e5922ae19a7bd591f555e863240 100644 --- a/drivers/iio/adc/mt6370-adc.c +++ b/drivers/iio/adc/mt6370-adc.c @@ -19,6 +19,7 @@ #include +#define MT6370_REG_DEV_INFO 0x100 #define MT6370_REG_CHG_CTRL3 0x113 #define MT6370_REG_CHG_CTRL7 0x117 #define MT6370_REG_CHG_ADC 0x121 @@ -27,6 +28,7 @@ #define MT6370_ADC_START_MASK BIT(0) #define MT6370_ADC_IN_SEL_MASK GENMASK(7, 4) #define MT6370_AICR_ICHG_MASK GENMASK(7, 2) +#define MT6370_VENID_MASK GENMASK(7, 4) #define MT6370_AICR_100_mA 0x0 #define MT6370_AICR_150_mA 0x1 @@ -47,6 +49,10 @@ #define ADC_CONV_TIME_MS 35 #define ADC_CONV_POLLING_TIME_US 1000 +#define MT6370_VID_RT5081 0x8 +#define MT6370_VID_RT5081A 0xA +#define MT6370_VID_MT6370 0xE + struct mt6370_adc_data { struct device *dev; struct regmap *regmap; @@ -55,6 +61,7 @@ struct mt6370_adc_data { * from being read at the same time. */ struct mutex adc_lock; + unsigned int vid; }; static int mt6370_adc_read_channel(struct mt6370_adc_data *priv, int chan, @@ -98,6 +105,30 @@ adc_unlock: return ret; } +static int mt6370_adc_get_ibus_scale(struct mt6370_adc_data *priv) +{ + switch (priv->vid) { + case MT6370_VID_RT5081: + case MT6370_VID_RT5081A: + case MT6370_VID_MT6370: + return 3350; + default: + return 3875; + } +} + +static int mt6370_adc_get_ibat_scale(struct mt6370_adc_data *priv) +{ + switch (priv->vid) { + case MT6370_VID_RT5081: + case MT6370_VID_RT5081A: + case MT6370_VID_MT6370: + return 2680; + default: + return 3870; + } +} + static int mt6370_adc_read_scale(struct mt6370_adc_data *priv, int chan, int *val1, int *val2) { @@ -123,7 +154,7 @@ static int mt6370_adc_read_scale(struct mt6370_adc_data *priv, case MT6370_AICR_250_mA: case MT6370_AICR_300_mA: case MT6370_AICR_350_mA: - *val1 = 3350; + *val1 = mt6370_adc_get_ibus_scale(priv); break; default: *val1 = 5000; @@ -150,7 +181,7 @@ static int mt6370_adc_read_scale(struct mt6370_adc_data *priv, case MT6370_ICHG_600_mA: case MT6370_ICHG_700_mA: case MT6370_ICHG_800_mA: - *val1 = 2680; + *val1 = mt6370_adc_get_ibat_scale(priv); break; default: *val1 = 5000; @@ -251,6 +282,20 @@ static const struct iio_chan_spec mt6370_adc_channels[] = { MT6370_ADC_CHAN(TEMP_JC, IIO_TEMP, 12, BIT(IIO_CHAN_INFO_OFFSET)), }; +static int mt6370_get_vendor_info(struct mt6370_adc_data *priv) +{ + unsigned int dev_info; + int ret; + + ret = regmap_read(priv->regmap, MT6370_REG_DEV_INFO, &dev_info); + if (ret) + return ret; + + priv->vid = FIELD_GET(MT6370_VENID_MASK, dev_info); + + return 0; +} + static int mt6370_adc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -272,6 +317,10 @@ static int mt6370_adc_probe(struct platform_device *pdev) priv->regmap = regmap; mutex_init(&priv->adc_lock); + ret = mt6370_get_vendor_info(priv); + if (ret) + return dev_err_probe(dev, ret, "Failed to get vid\n"); + ret = regmap_write(priv->regmap, MT6370_REG_CHG_ADC, 0); if (ret) return dev_err_probe(dev, ret, "Failed to reset ADC\n"); diff --git a/drivers/iio/adc/mxs-lradc-adc.c b/drivers/iio/adc/mxs-lradc-adc.c index bca79a93cbe43186fb8de002ab2f8a63bba54499..a50f39143d3ea3d7f23cf353081049b7069ca64b 100644 --- a/drivers/iio/adc/mxs-lradc-adc.c +++ b/drivers/iio/adc/mxs-lradc-adc.c @@ -757,13 +757,13 @@ static int mxs_lradc_adc_probe(struct platform_device *pdev) ret = mxs_lradc_adc_trigger_init(iio); if (ret) - goto err_trig; + return ret; ret = iio_triggered_buffer_setup(iio, &iio_pollfunc_store_time, &mxs_lradc_adc_trigger_handler, &mxs_lradc_adc_buffer_ops); if (ret) - return ret; + goto err_trig; adc->vref_mv = mxs_lradc_adc_vref_mv[lradc->soc]; @@ -801,9 +801,9 @@ static int mxs_lradc_adc_probe(struct platform_device *pdev) err_dev: mxs_lradc_adc_hw_stop(adc); - mxs_lradc_adc_trigger_remove(iio); -err_trig: iio_triggered_buffer_cleanup(iio); +err_trig: + mxs_lradc_adc_trigger_remove(iio); return ret; } @@ -814,8 +814,8 @@ static int mxs_lradc_adc_remove(struct platform_device *pdev) iio_device_unregister(iio); mxs_lradc_adc_hw_stop(adc); - mxs_lradc_adc_trigger_remove(iio); iio_triggered_buffer_cleanup(iio); + mxs_lradc_adc_trigger_remove(iio); return 0; } diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c index c1c439215aeb5fc5a20313572e2b7e6b2ec41716..7dfc9c927a23f121e6ea08ddb0b06a15601d4aa3 100644 --- a/drivers/iio/adc/palmas_gpadc.c +++ b/drivers/iio/adc/palmas_gpadc.c @@ -547,7 +547,7 @@ static int palmas_gpadc_read_raw(struct iio_dev *indio_dev, int adc_chan = chan->channel; int ret = 0; - if (adc_chan > PALMAS_ADC_CH_MAX) + if (adc_chan >= PALMAS_ADC_CH_MAX) return -EINVAL; mutex_lock(&adc->lock); @@ -595,7 +595,7 @@ static int palmas_gpadc_read_event_config(struct iio_dev *indio_dev, int adc_chan = chan->channel; int ret = 0; - if (adc_chan > PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH) + if (adc_chan >= PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH) return -EINVAL; mutex_lock(&adc->lock); @@ -684,7 +684,7 @@ static int palmas_gpadc_write_event_config(struct iio_dev *indio_dev, int adc_chan = chan->channel; int ret; - if (adc_chan > PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH) + if (adc_chan >= PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH) return -EINVAL; mutex_lock(&adc->lock); @@ -710,7 +710,7 @@ static int palmas_gpadc_read_event_value(struct iio_dev *indio_dev, int adc_chan = chan->channel; int ret; - if (adc_chan > PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH) + if (adc_chan >= PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH) return -EINVAL; mutex_lock(&adc->lock); @@ -744,7 +744,7 @@ static int palmas_gpadc_write_event_value(struct iio_dev *indio_dev, int old; int ret; - if (adc_chan > PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH) + if (adc_chan >= PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH) return -EINVAL; mutex_lock(&adc->lock); diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c index 1aadb2ad2cabdf12130030d4bf8b341b0fb93c42..bd7e2408bf281da5a90e40472cf7a2da3703f2bd 100644 --- a/drivers/iio/adc/stm32-adc.c +++ b/drivers/iio/adc/stm32-adc.c @@ -2006,16 +2006,15 @@ static int stm32_adc_get_legacy_chan_count(struct iio_dev *indio_dev, struct stm * to get the *real* number of channels. */ ret = device_property_count_u32(dev, "st,adc-diff-channels"); - if (ret < 0) - return ret; - - ret /= (int)(sizeof(struct stm32_adc_diff_channel) / sizeof(u32)); - if (ret > adc_info->max_channels) { - dev_err(&indio_dev->dev, "Bad st,adc-diff-channels?\n"); - return -EINVAL; - } else if (ret > 0) { - adc->num_diff = ret; - num_channels += ret; + if (ret > 0) { + ret /= (int)(sizeof(struct stm32_adc_diff_channel) / sizeof(u32)); + if (ret > adc_info->max_channels) { + dev_err(&indio_dev->dev, "Bad st,adc-diff-channels?\n"); + return -EINVAL; + } else if (ret > 0) { + adc->num_diff = ret; + num_channels += ret; + } } /* Optional sample time is provided either for each, or all channels */ @@ -2037,6 +2036,7 @@ static int stm32_adc_legacy_chan_init(struct iio_dev *indio_dev, struct stm32_adc_diff_channel diff[STM32_ADC_CH_MAX]; struct device *dev = &indio_dev->dev; u32 num_diff = adc->num_diff; + int num_se = nchans - num_diff; int size = num_diff * sizeof(*diff) / sizeof(u32); int scan_index = 0, ret, i, c; u32 smp = 0, smps[STM32_ADC_CH_MAX], chans[STM32_ADC_CH_MAX]; @@ -2063,29 +2063,32 @@ static int stm32_adc_legacy_chan_init(struct iio_dev *indio_dev, scan_index++; } } - - ret = device_property_read_u32_array(dev, "st,adc-channels", chans, - nchans); - if (ret) - return ret; - - for (c = 0; c < nchans; c++) { - if (chans[c] >= adc_info->max_channels) { - dev_err(&indio_dev->dev, "Invalid channel %d\n", - chans[c]); - return -EINVAL; + if (num_se > 0) { + ret = device_property_read_u32_array(dev, "st,adc-channels", chans, num_se); + if (ret) { + dev_err(&indio_dev->dev, "Failed to get st,adc-channels %d\n", ret); + return ret; } - /* Channel can't be configured both as single-ended & diff */ - for (i = 0; i < num_diff; i++) { - if (chans[c] == diff[i].vinp) { - dev_err(&indio_dev->dev, "channel %d misconfigured\n", chans[c]); + for (c = 0; c < num_se; c++) { + if (chans[c] >= adc_info->max_channels) { + dev_err(&indio_dev->dev, "Invalid channel %d\n", + chans[c]); return -EINVAL; } + + /* Channel can't be configured both as single-ended & diff */ + for (i = 0; i < num_diff; i++) { + if (chans[c] == diff[i].vinp) { + dev_err(&indio_dev->dev, "channel %d misconfigured\n", + chans[c]); + return -EINVAL; + } + } + stm32_adc_chan_init_one(indio_dev, &channels[scan_index], + chans[c], 0, scan_index, false); + scan_index++; } - stm32_adc_chan_init_one(indio_dev, &channels[scan_index], - chans[c], 0, scan_index, false); - scan_index++; } if (adc->nsmps > 0) { @@ -2306,7 +2309,7 @@ static int stm32_adc_chan_fw_init(struct iio_dev *indio_dev, bool timestamping) if (legacy) ret = stm32_adc_legacy_chan_init(indio_dev, adc, channels, - num_channels); + timestamping ? num_channels - 1 : num_channels); else ret = stm32_adc_generic_chan_init(indio_dev, adc, channels); if (ret < 0) diff --git a/drivers/iio/addac/ad74413r.c b/drivers/iio/addac/ad74413r.c index 07e9f6ae16a83dc612dafbb212a63e888c934fda..e3366cf5eb31943c006e059eda6fc26558073431 100644 --- a/drivers/iio/addac/ad74413r.c +++ b/drivers/iio/addac/ad74413r.c @@ -1007,7 +1007,7 @@ static int ad74413r_read_raw(struct iio_dev *indio_dev, ret = ad74413r_get_single_adc_result(indio_dev, chan->channel, val); - if (ret) + if (ret < 0) return ret; ad74413r_adc_to_resistance_result(*val, val); diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile index 6c74fea21736a426414689aabdb19422e5258356..addd97a788389cfb1305d41e872111534898c047 100644 --- a/drivers/iio/dac/Makefile +++ b/drivers/iio/dac/Makefile @@ -17,7 +17,7 @@ obj-$(CONFIG_AD5592R_BASE) += ad5592r-base.o obj-$(CONFIG_AD5592R) += ad5592r.o obj-$(CONFIG_AD5593R) += ad5593r.o obj-$(CONFIG_AD5755) += ad5755.o -obj-$(CONFIG_AD5755) += ad5758.o +obj-$(CONFIG_AD5758) += ad5758.o obj-$(CONFIG_AD5761) += ad5761.o obj-$(CONFIG_AD5764) += ad5764.o obj-$(CONFIG_AD5766) += ad5766.o diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c index 46bf758760f85bbd9500da020c3740fef1529e11..3f5661a3718fe87577d30c0ac039fdbfda4c9292 100644 --- a/drivers/iio/dac/mcp4725.c +++ b/drivers/iio/dac/mcp4725.c @@ -47,12 +47,18 @@ static int mcp4725_suspend(struct device *dev) struct mcp4725_data *data = iio_priv(i2c_get_clientdata( to_i2c_client(dev))); u8 outbuf[2]; + int ret; outbuf[0] = (data->powerdown_mode + 1) << 4; outbuf[1] = 0; data->powerdown = true; - return i2c_master_send(data->client, outbuf, 2); + ret = i2c_master_send(data->client, outbuf, 2); + if (ret < 0) + return ret; + else if (ret != 2) + return -EIO; + return 0; } static int mcp4725_resume(struct device *dev) @@ -60,13 +66,19 @@ static int mcp4725_resume(struct device *dev) struct mcp4725_data *data = iio_priv(i2c_get_clientdata( to_i2c_client(dev))); u8 outbuf[2]; + int ret; /* restore previous DAC value */ outbuf[0] = (data->dac_value >> 8) & 0xf; outbuf[1] = data->dac_value & 0xff; data->powerdown = false; - return i2c_master_send(data->client, outbuf, 2); + ret = i2c_master_send(data->client, outbuf, 2); + if (ret < 0) + return ret; + else if (ret != 2) + return -EIO; + return 0; } static DEFINE_SIMPLE_DEV_PM_OPS(mcp4725_pm_ops, mcp4725_suspend, mcp4725_resume); diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c index 99576b2c171f4d3d47f18de4704511ec69d1842e..32d7f836423036851b33493ded7e12007eadb47d 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c @@ -275,9 +275,14 @@ static int inv_icm42600_buffer_preenable(struct iio_dev *indio_dev) { struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev); struct device *dev = regmap_get_device(st->map); + struct inv_icm42600_timestamp *ts = iio_priv(indio_dev); pm_runtime_get_sync(dev); + mutex_lock(&st->lock); + inv_icm42600_timestamp_reset(ts); + mutex_unlock(&st->lock); + return 0; } @@ -375,7 +380,6 @@ static int inv_icm42600_buffer_postdisable(struct iio_dev *indio_dev) struct device *dev = regmap_get_device(st->map); unsigned int sensor; unsigned int *watermark; - struct inv_icm42600_timestamp *ts; struct inv_icm42600_sensor_conf conf = INV_ICM42600_SENSOR_CONF_INIT; unsigned int sleep_temp = 0; unsigned int sleep_sensor = 0; @@ -385,11 +389,9 @@ static int inv_icm42600_buffer_postdisable(struct iio_dev *indio_dev) if (indio_dev == st->indio_gyro) { sensor = INV_ICM42600_SENSOR_GYRO; watermark = &st->fifo.watermark.gyro; - ts = iio_priv(st->indio_gyro); } else if (indio_dev == st->indio_accel) { sensor = INV_ICM42600_SENSOR_ACCEL; watermark = &st->fifo.watermark.accel; - ts = iio_priv(st->indio_accel); } else { return -EINVAL; } @@ -417,8 +419,6 @@ static int inv_icm42600_buffer_postdisable(struct iio_dev *indio_dev) if (!st->fifo.on) ret = inv_icm42600_set_temp_conf(st, false, &sleep_temp); - inv_icm42600_timestamp_reset(ts); - out_unlock: mutex_unlock(&st->lock); diff --git a/drivers/iio/industrialio-gts-helper.c b/drivers/iio/industrialio-gts-helper.c index 8bb68975b2598a48682af26d798b0af9340de311..7653261d2dc2bf4cb916f9ca30b239261597a018 100644 --- a/drivers/iio/industrialio-gts-helper.c +++ b/drivers/iio/industrialio-gts-helper.c @@ -337,6 +337,17 @@ free_gains: return ret; } +static void iio_gts_us_to_int_micro(int *time_us, int *int_micro_times, + int num_times) +{ + int i; + + for (i = 0; i < num_times; i++) { + int_micro_times[i * 2] = time_us[i] / 1000000; + int_micro_times[i * 2 + 1] = time_us[i] % 1000000; + } +} + /** * iio_gts_build_avail_time_table - build table of available integration times * @gts: Gain time scale descriptor @@ -351,7 +362,7 @@ free_gains: */ static int iio_gts_build_avail_time_table(struct iio_gts *gts) { - int *times, i, j, idx = 0; + int *times, i, j, idx = 0, *int_micro_times; if (!gts->num_itime) return 0; @@ -378,13 +389,24 @@ static int iio_gts_build_avail_time_table(struct iio_gts *gts) } } } - gts->avail_time_tables = times; - /* - * This is just to survive a unlikely corner-case where times in the - * given time table were not unique. Else we could just trust the - * gts->num_itime. - */ - gts->num_avail_time_tables = idx; + + /* create a list of times formatted as list of IIO_VAL_INT_PLUS_MICRO */ + int_micro_times = kcalloc(idx, sizeof(int) * 2, GFP_KERNEL); + if (int_micro_times) { + /* + * This is just to survive a unlikely corner-case where times in + * the given time table were not unique. Else we could just + * trust the gts->num_itime. + */ + gts->num_avail_time_tables = idx; + iio_gts_us_to_int_micro(times, int_micro_times, idx); + } + + gts->avail_time_tables = int_micro_times; + kfree(times); + + if (!int_micro_times) + return -ENOMEM; return 0; } @@ -683,8 +705,8 @@ int iio_gts_avail_times(struct iio_gts *gts, const int **vals, int *type, return -EINVAL; *vals = gts->avail_time_tables; - *type = IIO_VAL_INT; - *length = gts->num_avail_time_tables; + *type = IIO_VAL_INT_PLUS_MICRO; + *length = gts->num_avail_time_tables * 2; return IIO_AVAIL_LIST; } diff --git a/drivers/iio/light/rohm-bu27034.c b/drivers/iio/light/rohm-bu27034.c index e486dcf35eba7d60fc84e52f75fc566e52193a72..f85194fda6b093d5de2c487fb268e89ea3b2b14d 100644 --- a/drivers/iio/light/rohm-bu27034.c +++ b/drivers/iio/light/rohm-bu27034.c @@ -231,6 +231,9 @@ struct bu27034_result { static const struct regmap_range bu27034_volatile_ranges[] = { { + .range_min = BU27034_REG_SYSTEM_CONTROL, + .range_max = BU27034_REG_SYSTEM_CONTROL, + }, { .range_min = BU27034_REG_MODE_CONTROL4, .range_max = BU27034_REG_MODE_CONTROL4, }, { @@ -1167,11 +1170,12 @@ static int bu27034_read_raw(struct iio_dev *idev, switch (mask) { case IIO_CHAN_INFO_INT_TIME: - *val = bu27034_get_int_time(data); - if (*val < 0) - return *val; + *val = 0; + *val2 = bu27034_get_int_time(data); + if (*val2 < 0) + return *val2; - return IIO_VAL_INT; + return IIO_VAL_INT_PLUS_MICRO; case IIO_CHAN_INFO_SCALE: return bu27034_get_scale(data, chan->channel, val, val2); @@ -1229,7 +1233,10 @@ static int bu27034_write_raw(struct iio_dev *idev, ret = bu27034_set_scale(data, chan->channel, val, val2); break; case IIO_CHAN_INFO_INT_TIME: - ret = bu27034_try_set_int_time(data, val); + if (!val) + ret = bu27034_try_set_int_time(data, val2); + else + ret = -EINVAL; break; default: ret = -EINVAL; @@ -1268,12 +1275,19 @@ static int bu27034_chip_init(struct bu27034_data *data) int ret, sel; /* Reset */ - ret = regmap_update_bits(data->regmap, BU27034_REG_SYSTEM_CONTROL, + ret = regmap_write_bits(data->regmap, BU27034_REG_SYSTEM_CONTROL, BU27034_MASK_SW_RESET, BU27034_MASK_SW_RESET); if (ret) return dev_err_probe(data->dev, ret, "Sensor reset failed\n"); msleep(1); + + ret = regmap_reinit_cache(data->regmap, &bu27034_regmap); + if (ret) { + dev_err(data->dev, "Failed to reinit reg cache\n"); + return ret; + } + /* * Read integration time here to ensure it is in regmap cache. We do * this to speed-up the int-time acquisition in the start of the buffer diff --git a/drivers/iio/light/vcnl4035.c b/drivers/iio/light/vcnl4035.c index 14e29330e972eb91f094a284913fb57e79e50572..94f5d611e98c3c80225737dd837a3d25d834aa9d 100644 --- a/drivers/iio/light/vcnl4035.c +++ b/drivers/iio/light/vcnl4035.c @@ -8,6 +8,7 @@ * TODO: Proximity */ #include +#include #include #include #include @@ -42,6 +43,7 @@ #define VCNL4035_ALS_PERS_MASK GENMASK(3, 2) #define VCNL4035_INT_ALS_IF_H_MASK BIT(12) #define VCNL4035_INT_ALS_IF_L_MASK BIT(13) +#define VCNL4035_DEV_ID_MASK GENMASK(7, 0) /* Default values */ #define VCNL4035_MODE_ALS_ENABLE BIT(0) @@ -413,6 +415,7 @@ static int vcnl4035_init(struct vcnl4035_data *data) return ret; } + id = FIELD_GET(VCNL4035_DEV_ID_MASK, id); if (id != VCNL4035_DEV_ID_VAL) { dev_err(&data->client->dev, "Wrong id, got %x, expected %x\n", id, VCNL4035_DEV_ID_VAL); diff --git a/drivers/iio/magnetometer/tmag5273.c b/drivers/iio/magnetometer/tmag5273.c index 28bb7efe8df8c07bba0b4810c9beae3e60c5ef8c..e155a75b3cd29d3563b59b8a9fd98889f0ac9bf9 100644 --- a/drivers/iio/magnetometer/tmag5273.c +++ b/drivers/iio/magnetometer/tmag5273.c @@ -296,12 +296,13 @@ static int tmag5273_read_raw(struct iio_dev *indio_dev, return ret; ret = tmag5273_get_measure(data, &t, &x, &y, &z, &angle, &magnitude); - if (ret) - return ret; pm_runtime_mark_last_busy(data->dev); pm_runtime_put_autosuspend(data->dev); + if (ret) + return ret; + switch (chan->address) { case TEMPERATURE: *val = t; diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 5146ef2dbfd9e97af0114e34d0e9999814909d50..1ee87c3aaeabc9ceec26c8ef8f6b06762233d1c0 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -3295,7 +3295,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) route->path_rec->traffic_class = tos; route->path_rec->mtu = iboe_get_mtu(ndev->mtu); route->path_rec->rate_selector = IB_SA_EQ; - route->path_rec->rate = iboe_get_rate(ndev); + route->path_rec->rate = IB_RATE_PORT_CURRENT; dev_put(ndev); route->path_rec->packet_life_time_selector = IB_SA_EQ; /* In case ACK timeout is set, use this value to calculate @@ -4963,7 +4963,7 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, if (!ndev) return -ENODEV; - ib.rec.rate = iboe_get_rate(ndev); + ib.rec.rate = IB_RATE_PORT_CURRENT; ib.rec.hop_limit = 1; ib.rec.mtu = iboe_get_mtu(ndev->mtu); diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 4796f6a8828caa47ba0f1b980cdee3b1ec1fb97f..e836c9c477f6755e97a469b5f0cf588d8c1aa21c 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -1850,8 +1850,13 @@ static int modify_qp(struct uverbs_attr_bundle *attrs, attr->path_mtu = cmd->base.path_mtu; if (cmd->base.attr_mask & IB_QP_PATH_MIG_STATE) attr->path_mig_state = cmd->base.path_mig_state; - if (cmd->base.attr_mask & IB_QP_QKEY) + if (cmd->base.attr_mask & IB_QP_QKEY) { + if (cmd->base.qkey & IB_QP_SET_QKEY && !capable(CAP_NET_RAW)) { + ret = -EPERM; + goto release_qp; + } attr->qkey = cmd->base.qkey; + } if (cmd->base.attr_mask & IB_QP_RQ_PSN) attr->rq_psn = cmd->base.rq_psn; if (cmd->base.attr_mask & IB_QP_SQ_PSN) diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index fbace69672ca921f5d13ba9ab64462dda8d95e21..7c9c79c139411cc9efca2b12ad4ed56935f14491 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -222,8 +222,12 @@ static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue, spin_lock_irq(&ev_queue->lock); while (list_empty(&ev_queue->event_list)) { - spin_unlock_irq(&ev_queue->lock); + if (ev_queue->is_closed) { + spin_unlock_irq(&ev_queue->lock); + return -EIO; + } + spin_unlock_irq(&ev_queue->lock); if (filp->f_flags & O_NONBLOCK) return -EAGAIN; @@ -233,12 +237,6 @@ static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue, return -ERESTARTSYS; spin_lock_irq(&ev_queue->lock); - - /* If device was disassociated and no event exists set an error */ - if (list_empty(&ev_queue->event_list) && ev_queue->is_closed) { - spin_unlock_irq(&ev_queue->lock); - return -EIO; - } } event = list_entry(ev_queue->event_list.next, struct ib_uverbs_event, list); diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h index 9e278d23eb82b3aa1e7902a04bbf4e6453f956ad..ea81b2497511a4ced31f7e65846d3bb877710855 100644 --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h @@ -136,8 +136,6 @@ struct bnxt_re_dev { struct delayed_work worker; u8 cur_prio_map; - u16 active_speed; - u8 active_width; /* FP Notification Queue (CQ & SRQ) */ struct tasklet_struct nq_task; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index ef47c32a53cb514b387e33da33df53eab89072a3..abef0b8baa7c31b43b0e8f93bf4d7f48e5466a3d 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -208,6 +208,7 @@ int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num, { struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; + int rc; memset(port_attr, 0, sizeof(*port_attr)); @@ -237,10 +238,10 @@ int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num, port_attr->sm_sl = 0; port_attr->subnet_timeout = 0; port_attr->init_type_reply = 0; - port_attr->active_speed = rdev->active_speed; - port_attr->active_width = rdev->active_width; + rc = ib_get_eth_speed(&rdev->ibdev, port_num, &port_attr->active_speed, + &port_attr->active_width); - return 0; + return rc; } int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num, @@ -3413,9 +3414,7 @@ static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp, udwr.remote_qkey = gsi_sqp->qplib_qp.qkey; /* post data received in the send queue */ - rc = bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr); - - return 0; + return bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr); } static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc, diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 729a2f5318ccfdd2c349760ad912145bb6201b95..b42166fe7454045c87a442d62695129ec7e83b7a 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -1178,8 +1178,6 @@ static int bnxt_re_ib_init(struct bnxt_re_dev *rdev) return rc; } dev_info(rdev_to_dev(rdev), "Device registered with IB successfully"); - ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed, - &rdev->active_width); set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags); event = netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev) ? @@ -1437,6 +1435,10 @@ static void bnxt_re_setup_cc(struct bnxt_re_dev *rdev, bool enable) { struct bnxt_qplib_cc_param cc_param = {}; + /* Do not enable congestion control on VFs */ + if (rdev->is_virtfn) + return; + /* Currently enabling only for GenP5 adapters */ if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) return; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index d5d418a8b003a6372b3159e29ca1f72529ccb91b..91aed77ce40d5f91f4622588ba691c100bc520bc 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -2057,6 +2057,12 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) u32 pg_sz_lvl; int rc; + if (!cq->dpi) { + dev_err(&rcfw->pdev->dev, + "FP: CREATE_CQ failed due to NULL DPI\n"); + return -EINVAL; + } + hwq_attr.res = res; hwq_attr.depth = cq->max_wqe; hwq_attr.stride = sizeof(struct cq_base); @@ -2070,11 +2076,6 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) CMDQ_BASE_OPCODE_CREATE_CQ, sizeof(req)); - if (!cq->dpi) { - dev_err(&rcfw->pdev->dev, - "FP: CREATE_CQ failed due to NULL DPI\n"); - return -EINVAL; - } req.dpi = cpu_to_le32(cq->dpi->dpi); req.cq_handle = cpu_to_le64(cq->cq_handle); req.cq_size = cpu_to_le32(cq->hwq.max_elements); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c index 7674136c08b25afd19dfc6532e72b4b883c25dd0..5fd8f7c90bb06a8b734f568c1c24c06a70457282 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c @@ -215,17 +215,9 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq, return -EINVAL; hwq_attr->sginfo->npages = npages; } else { - unsigned long sginfo_num_pages = ib_umem_num_dma_blocks( - hwq_attr->sginfo->umem, hwq_attr->sginfo->pgsize); - + npages = ib_umem_num_dma_blocks(hwq_attr->sginfo->umem, + hwq_attr->sginfo->pgsize); hwq->is_user = true; - npages = sginfo_num_pages; - npages = (npages * PAGE_SIZE) / - BIT_ULL(hwq_attr->sginfo->pgshft); - if ((sginfo_num_pages * PAGE_SIZE) % - BIT_ULL(hwq_attr->sginfo->pgshft)) - if (!npages) - npages++; } if (npages == MAX_PBL_LVL_0_PGS && !hwq_attr->sginfo->nopte) { diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index d5ad0861c537fe2a947cd173e7d15f52cee87fc5..ab45f9d4bb02fecfb448136875a6addf0286a12d 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -614,16 +614,15 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr, /* Free the hwq if it already exist, must be a rereg */ if (mr->hwq.max_elements) bnxt_qplib_free_hwq(res, &mr->hwq); - /* Use system PAGE_SIZE */ hwq_attr.res = res; hwq_attr.depth = pages; - hwq_attr.stride = buf_pg_size; + hwq_attr.stride = sizeof(dma_addr_t); hwq_attr.type = HWQ_TYPE_MR; hwq_attr.sginfo = &sginfo; hwq_attr.sginfo->umem = umem; hwq_attr.sginfo->npages = pages; - hwq_attr.sginfo->pgsize = PAGE_SIZE; - hwq_attr.sginfo->pgshft = PAGE_SHIFT; + hwq_attr.sginfo->pgsize = buf_pg_size; + hwq_attr.sginfo->pgshft = ilog2(buf_pg_size); rc = bnxt_qplib_alloc_init_hwq(&mr->hwq, &hwq_attr); if (rc) { dev_err(&res->pdev->dev, diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c index 8eca6c14d0cfca3db186810789058aaeecbcae21..2a195c4b0f17de077a87b6ff7a8fed4e837f03ad 100644 --- a/drivers/infiniband/hw/efa/efa_verbs.c +++ b/drivers/infiniband/hw/efa/efa_verbs.c @@ -1403,7 +1403,7 @@ static int pbl_continuous_initialize(struct efa_dev *dev, */ static int pbl_indirect_initialize(struct efa_dev *dev, struct pbl_context *pbl) { - u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, PAGE_SIZE); + u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, EFA_CHUNK_PAYLOAD_SIZE); struct scatterlist *sgl; int sg_dma_cnt, err; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index eba5fdc107036238c821a1996704e9948ac4a9a1..8f7eb11066b43e8a12f9db59cbb5dafe6a6de7ca 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -4565,11 +4565,9 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, mtu = ib_mtu_enum_to_int(ib_mtu); if (WARN_ON(mtu <= 0)) return -EINVAL; -#define MAX_LP_MSG_LEN 16384 - /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 16KB */ - lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / mtu); - if (WARN_ON(lp_pktn_ini >= 0xF)) - return -EINVAL; +#define MIN_LP_MSG_LEN 1024 + /* mtu * (2 ^ lp_pktn_ini) should be in the range of 1024 to mtu */ + lp_pktn_ini = ilog2(max(mtu, MIN_LP_MSG_LEN) / mtu); if (attr_mask & IB_QP_PATH_MTU) { hr_reg_write(context, QPC_MTU, ib_mtu); @@ -4994,7 +4992,6 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp, static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout) { #define QP_ACK_TIMEOUT_MAX_HIP08 20 -#define QP_ACK_TIMEOUT_OFFSET 10 #define QP_ACK_TIMEOUT_MAX 31 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { @@ -5003,7 +5000,7 @@ static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout) "local ACK timeout shall be 0 to 20.\n"); return false; } - *timeout += QP_ACK_TIMEOUT_OFFSET; + *timeout += HNS_ROCE_V2_QP_ACK_TIMEOUT_OFS_HIP08; } else if (hr_dev->pci_dev->revision > PCI_REVISION_ID_HIP08) { if (*timeout > QP_ACK_TIMEOUT_MAX) { ibdev_warn(&hr_dev->ib_dev, @@ -5289,6 +5286,18 @@ out: return ret; } +static u8 get_qp_timeout_attr(struct hns_roce_dev *hr_dev, + struct hns_roce_v2_qp_context *context) +{ + u8 timeout; + + timeout = (u8)hr_reg_read(context, QPC_AT); + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) + timeout -= HNS_ROCE_V2_QP_ACK_TIMEOUT_OFS_HIP08; + + return timeout; +} + static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) @@ -5366,7 +5375,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, qp_attr->max_dest_rd_atomic = 1 << hr_reg_read(&context, QPC_RR_MAX); qp_attr->min_rnr_timer = (u8)hr_reg_read(&context, QPC_MIN_RNR_TIME); - qp_attr->timeout = (u8)hr_reg_read(&context, QPC_AT); + qp_attr->timeout = get_qp_timeout_attr(hr_dev, &context); qp_attr->retry_cnt = hr_reg_read(&context, QPC_RETRY_NUM_INIT); qp_attr->rnr_retry = hr_reg_read(&context, QPC_RNR_NUM_INIT); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index 1b44d2434ab415a756cb36fe6d3df6ee5d8bf637..7033eae2407c541c88a07d4a8543678c1fb630f7 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -44,6 +44,8 @@ #define HNS_ROCE_V2_MAX_XRCD_NUM 0x1000000 #define HNS_ROCE_V2_RSV_XRCD_NUM 0 +#define HNS_ROCE_V2_QP_ACK_TIMEOUT_OFS_HIP08 10 + #define HNS_ROCE_V3_SCCC_SZ 64 #define HNS_ROCE_V3_GMV_ENTRY_SZ 32 diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 37a5cf62f88b48b12293946e311bdd5d067edbaf..14376490ac226abce81b3f4f27f58726f5994a29 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -33,6 +33,7 @@ #include #include +#include #include "hns_roce_device.h" #include "hns_roce_cmd.h" #include "hns_roce_hem.h" @@ -909,6 +910,44 @@ static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev, return page_cnt; } +static u64 cal_pages_per_l1ba(unsigned int ba_per_bt, unsigned int hopnum) +{ + return int_pow(ba_per_bt, hopnum - 1); +} + +static unsigned int cal_best_bt_pg_sz(struct hns_roce_dev *hr_dev, + struct hns_roce_mtr *mtr, + unsigned int pg_shift) +{ + unsigned long cap = hr_dev->caps.page_size_cap; + struct hns_roce_buf_region *re; + unsigned int pgs_per_l1ba; + unsigned int ba_per_bt; + unsigned int ba_num; + int i; + + for_each_set_bit_from(pg_shift, &cap, sizeof(cap) * BITS_PER_BYTE) { + if (!(BIT(pg_shift) & cap)) + continue; + + ba_per_bt = BIT(pg_shift) / BA_BYTE_LEN; + ba_num = 0; + for (i = 0; i < mtr->hem_cfg.region_count; i++) { + re = &mtr->hem_cfg.region[i]; + if (re->hopnum == 0) + continue; + + pgs_per_l1ba = cal_pages_per_l1ba(ba_per_bt, re->hopnum); + ba_num += DIV_ROUND_UP(re->count, pgs_per_l1ba); + } + + if (ba_num <= ba_per_bt) + return pg_shift; + } + + return 0; +} + static int mtr_alloc_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, unsigned int ba_page_shift) { @@ -917,6 +956,10 @@ static int mtr_alloc_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, hns_roce_hem_list_init(&mtr->hem_list); if (!cfg->is_direct) { + ba_page_shift = cal_best_bt_pg_sz(hr_dev, mtr, ba_page_shift); + if (!ba_page_shift) + return -ERANGE; + ret = hns_roce_hem_list_request(hr_dev, &mtr->hem_list, cfg->region, cfg->region_count, ba_page_shift); diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c index 6242ab6af77fa9aaadec27418d49ee552a46bb98..9c4fe4fa90018142f67fd0bd7bd56a9c57b8cdce 100644 --- a/drivers/infiniband/hw/irdma/verbs.c +++ b/drivers/infiniband/hw/irdma/verbs.c @@ -522,11 +522,6 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) if (!iwqp->user_mode) cancel_delayed_work_sync(&iwqp->dwork_flush); - irdma_qp_rem_ref(&iwqp->ibqp); - wait_for_completion(&iwqp->free_qp); - irdma_free_lsmm_rsrc(iwqp); - irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp); - if (!iwqp->user_mode) { if (iwqp->iwscq) { irdma_clean_cqes(iwqp, iwqp->iwscq); @@ -534,6 +529,12 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) irdma_clean_cqes(iwqp, iwqp->iwrcq); } } + + irdma_qp_rem_ref(&iwqp->ibqp); + wait_for_completion(&iwqp->free_qp); + irdma_free_lsmm_rsrc(iwqp); + irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp); + irdma_remove_push_mmap_entries(iwqp); irdma_free_qp_rsrc(iwqp); @@ -3291,6 +3292,7 @@ static int irdma_post_send(struct ib_qp *ibqp, break; case IB_WR_LOCAL_INV: info.op_type = IRDMA_OP_TYPE_INV_STAG; + info.local_fence = info.read_fence; info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey; err = irdma_uk_stag_local_invalidate(ukqp, &info, true); break; diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c index 1c06920505d25fb8f975bcac64f25eadbb610aa2..93257fa5aae8eb6f59b5cf58cda945246933c681 100644 --- a/drivers/infiniband/hw/mlx5/counters.c +++ b/drivers/infiniband/hw/mlx5/counters.c @@ -209,7 +209,8 @@ static const struct mlx5_ib_counters *get_counters(struct mlx5_ib_dev *dev, !vport_qcounters_supported(dev)) || !port_num) return &dev->port[0].cnts; - return &dev->port[port_num - 1].cnts; + return is_mdev_switchdev_mode(dev->mdev) ? + &dev->port[1].cnts : &dev->port[port_num - 1].cnts; } /** @@ -262,7 +263,7 @@ static struct rdma_hw_stats * mlx5_ib_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num) { struct mlx5_ib_dev *dev = to_mdev(ibdev); - const struct mlx5_ib_counters *cnts = &dev->port[port_num - 1].cnts; + const struct mlx5_ib_counters *cnts = get_counters(dev, port_num); return do_alloc_stats(cnts); } @@ -329,6 +330,7 @@ static int mlx5_ib_query_q_counters_vport(struct mlx5_ib_dev *dev, { u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {}; u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {}; + struct mlx5_core_dev *mdev; __be32 val; int ret, i; @@ -336,12 +338,16 @@ static int mlx5_ib_query_q_counters_vport(struct mlx5_ib_dev *dev, dev->port[port_num].rep->vport == MLX5_VPORT_UPLINK) return 0; + mdev = mlx5_eswitch_get_core_dev(dev->port[port_num].rep->esw); + if (!mdev) + return -EOPNOTSUPP; + MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER); MLX5_SET(query_q_counter_in, in, other_vport, 1); MLX5_SET(query_q_counter_in, in, vport_number, dev->port[port_num].rep->vport); MLX5_SET(query_q_counter_in, in, aggregate, 1); - ret = mlx5_cmd_exec_inout(dev->mdev, query_q_counter, in, out); + ret = mlx5_cmd_exec_inout(mdev, query_q_counter, in, out); if (ret) return ret; @@ -575,43 +581,53 @@ static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev, bool is_vport = is_mdev_switchdev_mode(dev->mdev) && port_num != MLX5_VPORT_PF; const struct mlx5_ib_counter *names; - int j = 0, i; + int j = 0, i, size; names = is_vport ? vport_basic_q_cnts : basic_q_cnts; - for (i = 0; i < ARRAY_SIZE(basic_q_cnts); i++, j++) { + size = is_vport ? ARRAY_SIZE(vport_basic_q_cnts) : + ARRAY_SIZE(basic_q_cnts); + for (i = 0; i < size; i++, j++) { descs[j].name = names[i].name; - offsets[j] = basic_q_cnts[i].offset; + offsets[j] = names[i].offset; } names = is_vport ? vport_out_of_seq_q_cnts : out_of_seq_q_cnts; + size = is_vport ? ARRAY_SIZE(vport_out_of_seq_q_cnts) : + ARRAY_SIZE(out_of_seq_q_cnts); if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) { - for (i = 0; i < ARRAY_SIZE(out_of_seq_q_cnts); i++, j++) { + for (i = 0; i < size; i++, j++) { descs[j].name = names[i].name; - offsets[j] = out_of_seq_q_cnts[i].offset; + offsets[j] = names[i].offset; } } names = is_vport ? vport_retrans_q_cnts : retrans_q_cnts; + size = is_vport ? ARRAY_SIZE(vport_retrans_q_cnts) : + ARRAY_SIZE(retrans_q_cnts); if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) { - for (i = 0; i < ARRAY_SIZE(retrans_q_cnts); i++, j++) { + for (i = 0; i < size; i++, j++) { descs[j].name = names[i].name; - offsets[j] = retrans_q_cnts[i].offset; + offsets[j] = names[i].offset; } } names = is_vport ? vport_extended_err_cnts : extended_err_cnts; + size = is_vport ? ARRAY_SIZE(vport_extended_err_cnts) : + ARRAY_SIZE(extended_err_cnts); if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) { - for (i = 0; i < ARRAY_SIZE(extended_err_cnts); i++, j++) { + for (i = 0; i < size; i++, j++) { descs[j].name = names[i].name; - offsets[j] = extended_err_cnts[i].offset; + offsets[j] = names[i].offset; } } names = is_vport ? vport_roce_accl_cnts : roce_accl_cnts; + size = is_vport ? ARRAY_SIZE(vport_roce_accl_cnts) : + ARRAY_SIZE(roce_accl_cnts); if (MLX5_CAP_GEN(dev->mdev, roce_accl)) { - for (i = 0; i < ARRAY_SIZE(roce_accl_cnts); i++, j++) { + for (i = 0; i < size; i++, j++) { descs[j].name = names[i].name; - offsets[j] = roce_accl_cnts[i].offset; + offsets[j] = names[i].offset; } } @@ -661,25 +677,37 @@ static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev, static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev, struct mlx5_ib_counters *cnts, u32 port_num) { - u32 num_counters, num_op_counters = 0; + bool is_vport = is_mdev_switchdev_mode(dev->mdev) && + port_num != MLX5_VPORT_PF; + u32 num_counters, num_op_counters = 0, size; - num_counters = ARRAY_SIZE(basic_q_cnts); + size = is_vport ? ARRAY_SIZE(vport_basic_q_cnts) : + ARRAY_SIZE(basic_q_cnts); + num_counters = size; + size = is_vport ? ARRAY_SIZE(vport_out_of_seq_q_cnts) : + ARRAY_SIZE(out_of_seq_q_cnts); if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) - num_counters += ARRAY_SIZE(out_of_seq_q_cnts); + num_counters += size; + size = is_vport ? ARRAY_SIZE(vport_retrans_q_cnts) : + ARRAY_SIZE(retrans_q_cnts); if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) - num_counters += ARRAY_SIZE(retrans_q_cnts); + num_counters += size; + size = is_vport ? ARRAY_SIZE(vport_extended_err_cnts) : + ARRAY_SIZE(extended_err_cnts); if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) - num_counters += ARRAY_SIZE(extended_err_cnts); + num_counters += size; + size = is_vport ? ARRAY_SIZE(vport_roce_accl_cnts) : + ARRAY_SIZE(roce_accl_cnts); if (MLX5_CAP_GEN(dev->mdev, roce_accl)) - num_counters += ARRAY_SIZE(roce_accl_cnts); + num_counters += size; cnts->num_q_counters = num_counters; - if (is_mdev_switchdev_mode(dev->mdev) && port_num != MLX5_VPORT_PF) + if (is_vport) goto skip_non_qcounters; if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) { @@ -725,11 +753,11 @@ err: static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev) { u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {}; - int num_cnt_ports; + int num_cnt_ports = dev->num_ports; int i, j; - num_cnt_ports = (!is_mdev_switchdev_mode(dev->mdev) || - vport_qcounters_supported(dev)) ? dev->num_ports : 1; + if (is_mdev_switchdev_mode(dev->mdev)) + num_cnt_ports = min(2, num_cnt_ports); MLX5_SET(dealloc_q_counter_in, in, opcode, MLX5_CMD_OP_DEALLOC_Q_COUNTER); @@ -761,15 +789,22 @@ static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev) { u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {}; u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {}; - int num_cnt_ports; + int num_cnt_ports = dev->num_ports; int err = 0; int i; bool is_shared; MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER); is_shared = MLX5_CAP_GEN(dev->mdev, log_max_uctx) != 0; - num_cnt_ports = (!is_mdev_switchdev_mode(dev->mdev) || - vport_qcounters_supported(dev)) ? dev->num_ports : 1; + + /* + * In switchdev we need to allocate two ports, one that is used for + * the device Q_counters and it is essentially the real Q_counters of + * this device, while the other is used as a helper for PF to be able to + * query all other vports. + */ + if (is_mdev_switchdev_mode(dev->mdev)) + num_cnt_ports = min(2, num_cnt_ports); for (i = 0; i < num_cnt_ports; i++) { err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts, i); diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c index 3008632a6c2064a81c5c8d5ded5e2f98385319de..1e419e080b5352bbcdf3c7faa4277cca97f6e3ce 100644 --- a/drivers/infiniband/hw/mlx5/fs.c +++ b/drivers/infiniband/hw/mlx5/fs.c @@ -695,8 +695,6 @@ static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_ib_dev *dev, struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_flow_table *ft; - if (mlx5_ib_shared_ft_allowed(&dev->ib_dev)) - ft_attr.uid = MLX5_SHARED_RESOURCE_UID; ft_attr.prio = priority; ft_attr.max_fte = num_entries; ft_attr.flags = flags; @@ -2025,6 +2023,237 @@ static int flow_matcher_cleanup(struct ib_uobject *uobject, return 0; } +static int steering_anchor_create_ft(struct mlx5_ib_dev *dev, + struct mlx5_ib_flow_prio *ft_prio, + enum mlx5_flow_namespace_type ns_type) +{ + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_flow_namespace *ns; + struct mlx5_flow_table *ft; + + if (ft_prio->anchor.ft) + return 0; + + ns = mlx5_get_flow_namespace(dev->mdev, ns_type); + if (!ns) + return -EOPNOTSUPP; + + ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED; + ft_attr.uid = MLX5_SHARED_RESOURCE_UID; + ft_attr.prio = 0; + ft_attr.max_fte = 2; + ft_attr.level = 1; + + ft = mlx5_create_flow_table(ns, &ft_attr); + if (IS_ERR(ft)) + return PTR_ERR(ft); + + ft_prio->anchor.ft = ft; + + return 0; +} + +static void steering_anchor_destroy_ft(struct mlx5_ib_flow_prio *ft_prio) +{ + if (ft_prio->anchor.ft) { + mlx5_destroy_flow_table(ft_prio->anchor.ft); + ft_prio->anchor.ft = NULL; + } +} + +static int +steering_anchor_create_fg_drop(struct mlx5_ib_flow_prio *ft_prio) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_flow_group *fg; + void *flow_group_in; + int err = 0; + + if (ft_prio->anchor.fg_drop) + return 0; + + flow_group_in = kvzalloc(inlen, GFP_KERNEL); + if (!flow_group_in) + return -ENOMEM; + + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); + + fg = mlx5_create_flow_group(ft_prio->anchor.ft, flow_group_in); + if (IS_ERR(fg)) { + err = PTR_ERR(fg); + goto out; + } + + ft_prio->anchor.fg_drop = fg; + +out: + kvfree(flow_group_in); + + return err; +} + +static void +steering_anchor_destroy_fg_drop(struct mlx5_ib_flow_prio *ft_prio) +{ + if (ft_prio->anchor.fg_drop) { + mlx5_destroy_flow_group(ft_prio->anchor.fg_drop); + ft_prio->anchor.fg_drop = NULL; + } +} + +static int +steering_anchor_create_fg_goto_table(struct mlx5_ib_flow_prio *ft_prio) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_flow_group *fg; + void *flow_group_in; + int err = 0; + + if (ft_prio->anchor.fg_goto_table) + return 0; + + flow_group_in = kvzalloc(inlen, GFP_KERNEL); + if (!flow_group_in) + return -ENOMEM; + + fg = mlx5_create_flow_group(ft_prio->anchor.ft, flow_group_in); + if (IS_ERR(fg)) { + err = PTR_ERR(fg); + goto out; + } + ft_prio->anchor.fg_goto_table = fg; + +out: + kvfree(flow_group_in); + + return err; +} + +static void +steering_anchor_destroy_fg_goto_table(struct mlx5_ib_flow_prio *ft_prio) +{ + if (ft_prio->anchor.fg_goto_table) { + mlx5_destroy_flow_group(ft_prio->anchor.fg_goto_table); + ft_prio->anchor.fg_goto_table = NULL; + } +} + +static int +steering_anchor_create_rule_drop(struct mlx5_ib_flow_prio *ft_prio) +{ + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_handle *handle; + + if (ft_prio->anchor.rule_drop) + return 0; + + flow_act.fg = ft_prio->anchor.fg_drop; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; + + handle = mlx5_add_flow_rules(ft_prio->anchor.ft, NULL, &flow_act, + NULL, 0); + if (IS_ERR(handle)) + return PTR_ERR(handle); + + ft_prio->anchor.rule_drop = handle; + + return 0; +} + +static void steering_anchor_destroy_rule_drop(struct mlx5_ib_flow_prio *ft_prio) +{ + if (ft_prio->anchor.rule_drop) { + mlx5_del_flow_rules(ft_prio->anchor.rule_drop); + ft_prio->anchor.rule_drop = NULL; + } +} + +static int +steering_anchor_create_rule_goto_table(struct mlx5_ib_flow_prio *ft_prio) +{ + struct mlx5_flow_destination dest = {}; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_handle *handle; + + if (ft_prio->anchor.rule_goto_table) + return 0; + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; + flow_act.fg = ft_prio->anchor.fg_goto_table; + + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest.ft = ft_prio->flow_table; + + handle = mlx5_add_flow_rules(ft_prio->anchor.ft, NULL, &flow_act, + &dest, 1); + if (IS_ERR(handle)) + return PTR_ERR(handle); + + ft_prio->anchor.rule_goto_table = handle; + + return 0; +} + +static void +steering_anchor_destroy_rule_goto_table(struct mlx5_ib_flow_prio *ft_prio) +{ + if (ft_prio->anchor.rule_goto_table) { + mlx5_del_flow_rules(ft_prio->anchor.rule_goto_table); + ft_prio->anchor.rule_goto_table = NULL; + } +} + +static int steering_anchor_create_res(struct mlx5_ib_dev *dev, + struct mlx5_ib_flow_prio *ft_prio, + enum mlx5_flow_namespace_type ns_type) +{ + int err; + + err = steering_anchor_create_ft(dev, ft_prio, ns_type); + if (err) + return err; + + err = steering_anchor_create_fg_drop(ft_prio); + if (err) + goto destroy_ft; + + err = steering_anchor_create_fg_goto_table(ft_prio); + if (err) + goto destroy_fg_drop; + + err = steering_anchor_create_rule_drop(ft_prio); + if (err) + goto destroy_fg_goto_table; + + err = steering_anchor_create_rule_goto_table(ft_prio); + if (err) + goto destroy_rule_drop; + + return 0; + +destroy_rule_drop: + steering_anchor_destroy_rule_drop(ft_prio); +destroy_fg_goto_table: + steering_anchor_destroy_fg_goto_table(ft_prio); +destroy_fg_drop: + steering_anchor_destroy_fg_drop(ft_prio); +destroy_ft: + steering_anchor_destroy_ft(ft_prio); + + return err; +} + +static void mlx5_steering_anchor_destroy_res(struct mlx5_ib_flow_prio *ft_prio) +{ + steering_anchor_destroy_rule_goto_table(ft_prio); + steering_anchor_destroy_rule_drop(ft_prio); + steering_anchor_destroy_fg_goto_table(ft_prio); + steering_anchor_destroy_fg_drop(ft_prio); + steering_anchor_destroy_ft(ft_prio); +} + static int steering_anchor_cleanup(struct ib_uobject *uobject, enum rdma_remove_reason why, struct uverbs_attr_bundle *attrs) @@ -2035,6 +2264,9 @@ static int steering_anchor_cleanup(struct ib_uobject *uobject, return -EBUSY; mutex_lock(&obj->dev->flow_db->lock); + if (!--obj->ft_prio->anchor.rule_goto_table_ref) + steering_anchor_destroy_rule_goto_table(obj->ft_prio); + put_flow_table(obj->dev, obj->ft_prio, true); mutex_unlock(&obj->dev->flow_db->lock); @@ -2042,6 +2274,24 @@ static int steering_anchor_cleanup(struct ib_uobject *uobject, return 0; } +static void fs_cleanup_anchor(struct mlx5_ib_flow_prio *prio, + int count) +{ + while (count--) + mlx5_steering_anchor_destroy_res(&prio[count]); +} + +void mlx5_ib_fs_cleanup_anchor(struct mlx5_ib_dev *dev) +{ + fs_cleanup_anchor(dev->flow_db->prios, MLX5_IB_NUM_FLOW_FT); + fs_cleanup_anchor(dev->flow_db->egress_prios, MLX5_IB_NUM_FLOW_FT); + fs_cleanup_anchor(dev->flow_db->sniffer, MLX5_IB_NUM_SNIFFER_FTS); + fs_cleanup_anchor(dev->flow_db->egress, MLX5_IB_NUM_EGRESS_FTS); + fs_cleanup_anchor(dev->flow_db->fdb, MLX5_IB_NUM_FDB_FTS); + fs_cleanup_anchor(dev->flow_db->rdma_rx, MLX5_IB_NUM_FLOW_FT); + fs_cleanup_anchor(dev->flow_db->rdma_tx, MLX5_IB_NUM_FLOW_FT); +} + static int mlx5_ib_matcher_ns(struct uverbs_attr_bundle *attrs, struct mlx5_ib_flow_matcher *obj) { @@ -2182,21 +2432,31 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_STEERING_ANCHOR_CREATE)( return -ENOMEM; mutex_lock(&dev->flow_db->lock); + ft_prio = _get_flow_table(dev, priority, ns_type, 0); if (IS_ERR(ft_prio)) { - mutex_unlock(&dev->flow_db->lock); err = PTR_ERR(ft_prio); goto free_obj; } ft_prio->refcount++; - ft_id = mlx5_flow_table_id(ft_prio->flow_table); - mutex_unlock(&dev->flow_db->lock); + + if (!ft_prio->anchor.rule_goto_table_ref) { + err = steering_anchor_create_res(dev, ft_prio, ns_type); + if (err) + goto put_flow_table; + } + + ft_prio->anchor.rule_goto_table_ref++; + + ft_id = mlx5_flow_table_id(ft_prio->anchor.ft); err = uverbs_copy_to(attrs, MLX5_IB_ATTR_STEERING_ANCHOR_FT_ID, &ft_id, sizeof(ft_id)); if (err) - goto put_flow_table; + goto destroy_res; + + mutex_unlock(&dev->flow_db->lock); uobj->object = obj; obj->dev = dev; @@ -2205,8 +2465,10 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_STEERING_ANCHOR_CREATE)( return 0; +destroy_res: + --ft_prio->anchor.rule_goto_table_ref; + mlx5_steering_anchor_destroy_res(ft_prio); put_flow_table: - mutex_lock(&dev->flow_db->lock); put_flow_table(dev, ft_prio, true); mutex_unlock(&dev->flow_db->lock); free_obj: diff --git a/drivers/infiniband/hw/mlx5/fs.h b/drivers/infiniband/hw/mlx5/fs.h index ad320adaf32172e50a67401ac33eec9441b3be5a..b9734904f5f01ca89427c79c4851e0fc378318dd 100644 --- a/drivers/infiniband/hw/mlx5/fs.h +++ b/drivers/infiniband/hw/mlx5/fs.h @@ -10,6 +10,7 @@ #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) int mlx5_ib_fs_init(struct mlx5_ib_dev *dev); +void mlx5_ib_fs_cleanup_anchor(struct mlx5_ib_dev *dev); #else static inline int mlx5_ib_fs_init(struct mlx5_ib_dev *dev) { @@ -21,9 +22,24 @@ static inline int mlx5_ib_fs_init(struct mlx5_ib_dev *dev) mutex_init(&dev->flow_db->lock); return 0; } + +inline void mlx5_ib_fs_cleanup_anchor(struct mlx5_ib_dev *dev) {} #endif + static inline void mlx5_ib_fs_cleanup(struct mlx5_ib_dev *dev) { + /* When a steering anchor is created, a special flow table is also + * created for the user to reference. Since the user can reference it, + * the kernel cannot trust that when the user destroys the steering + * anchor, they no longer reference the flow table. + * + * To address this issue, when a user destroys a steering anchor, only + * the flow steering rule in the table is destroyed, but the table + * itself is kept to deal with the above scenario. The remaining + * resources are only removed when the RDMA device is destroyed, which + * is a safe assumption that all references are gone. + */ + mlx5_ib_fs_cleanup_anchor(dev); kfree(dev->flow_db); } #endif /* _MLX5_IB_FS_H */ diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 5d45de223c43a9afed62a70265b9e32d75db4e10..f0b394ed7452a652461c42c99ef47f0e2d540a37 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -4275,6 +4275,9 @@ const struct mlx5_ib_profile raw_eth_profile = { STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR, mlx5_ib_stage_post_ib_reg_umr_init, NULL), + STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP, + mlx5_ib_stage_delay_drop_init, + mlx5_ib_stage_delay_drop_cleanup), STAGE_CREATE(MLX5_IB_STAGE_RESTRACK, mlx5_ib_restrack_init, NULL), diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 4bceef878c437c96e8169ba61c4d3b85288cce37..9c33d960af3c5cd3887be7546ad496f2666c1394 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -238,8 +238,19 @@ enum { #define MLX5_IB_NUM_SNIFFER_FTS 2 #define MLX5_IB_NUM_EGRESS_FTS 1 #define MLX5_IB_NUM_FDB_FTS MLX5_BY_PASS_NUM_REGULAR_PRIOS + +struct mlx5_ib_anchor { + struct mlx5_flow_table *ft; + struct mlx5_flow_group *fg_goto_table; + struct mlx5_flow_group *fg_drop; + struct mlx5_flow_handle *rule_goto_table; + struct mlx5_flow_handle *rule_drop; + unsigned int rule_goto_table_ref; +}; + struct mlx5_ib_flow_prio { struct mlx5_flow_table *flow_table; + struct mlx5_ib_anchor anchor; unsigned int refcount; }; @@ -1588,6 +1599,9 @@ static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev) MLX5_CAP_PORT_SELECTION(dev->mdev, port_select_flow_table_bypass)) return 0; + if (mlx5_lag_is_lacp_owner(dev->mdev) && !dev->lag_active) + return 0; + return dev->lag_active || (MLX5_CAP_GEN(dev->mdev, num_lag_ports) > 1 && MLX5_CAP_GEN(dev->mdev, lag_tx_port_affinity)); diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 70ca8ffa9256b1d11a48bbc5d50a6506a5813671..78b96bfb4e6ac9df9bb24f0f5a29a76ed4520b0a 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1237,6 +1237,9 @@ static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev, MLX5_SET(create_tis_in, in, uid, to_mpd(pd)->uid); MLX5_SET(tisc, tisc, transport_domain, tdn); + if (!mlx5_ib_lag_should_assign_affinity(dev) && + mlx5_lag_is_lacp_owner(dev->mdev)) + MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1); if (qp->flags & IB_QP_CREATE_SOURCE_QPN) MLX5_SET(tisc, tisc, underlay_qpn, qp->underlay_qpn); diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c index 0c0ae214c3a9b7ca30d346e1b29ab6d1ceb1ab5d..5111735aafaed8286a72a4972750a11e2c4cedbc 100644 --- a/drivers/infiniband/sw/rxe/rxe_comp.c +++ b/drivers/infiniband/sw/rxe/rxe_comp.c @@ -115,15 +115,16 @@ static enum ib_wc_opcode wr_to_wc_opcode(enum ib_wr_opcode opcode) void retransmit_timer(struct timer_list *t) { struct rxe_qp *qp = from_timer(qp, t, retrans_timer); + unsigned long flags; rxe_dbg_qp(qp, "retransmit timer fired\n"); - spin_lock_bh(&qp->state_lock); + spin_lock_irqsave(&qp->state_lock, flags); if (qp->valid) { qp->comp.timeout = 1; rxe_sched_task(&qp->comp.task); } - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); } void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb) @@ -481,11 +482,13 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe) static void comp_check_sq_drain_done(struct rxe_qp *qp) { - spin_lock_bh(&qp->state_lock); + unsigned long flags; + + spin_lock_irqsave(&qp->state_lock, flags); if (unlikely(qp_state(qp) == IB_QPS_SQD)) { if (qp->attr.sq_draining && qp->comp.psn == qp->req.psn) { qp->attr.sq_draining = 0; - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); if (qp->ibqp.event_handler) { struct ib_event ev; @@ -499,7 +502,7 @@ static void comp_check_sq_drain_done(struct rxe_qp *qp) return; } } - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); } static inline enum comp_state complete_ack(struct rxe_qp *qp, @@ -625,13 +628,15 @@ static void free_pkt(struct rxe_pkt_info *pkt) */ static void reset_retry_timer(struct rxe_qp *qp) { + unsigned long flags; + if (qp_type(qp) == IB_QPT_RC && qp->qp_timeout_jiffies) { - spin_lock_bh(&qp->state_lock); + spin_lock_irqsave(&qp->state_lock, flags); if (qp_state(qp) >= IB_QPS_RTS && psn_compare(qp->req.psn, qp->comp.psn) > 0) mod_timer(&qp->retrans_timer, jiffies + qp->qp_timeout_jiffies); - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); } } @@ -643,18 +648,19 @@ int rxe_completer(struct rxe_qp *qp) struct rxe_pkt_info *pkt = NULL; enum comp_state state; int ret; + unsigned long flags; - spin_lock_bh(&qp->state_lock); + spin_lock_irqsave(&qp->state_lock, flags); if (!qp->valid || qp_state(qp) == IB_QPS_ERR || qp_state(qp) == IB_QPS_RESET) { bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR); drain_resp_pkts(qp); flush_send_queue(qp, notify); - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); goto exit; } - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); if (qp->comp.timeout) { qp->comp.timeout_retry = 1; diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c index 31a25aaa44a07806f8464e9de8d502f2762c6543..d5486cbb3f100404808e1e1df5b68452239292cc 100644 --- a/drivers/infiniband/sw/rxe/rxe_cq.c +++ b/drivers/infiniband/sw/rxe/rxe_cq.c @@ -113,14 +113,14 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited) queue_advance_producer(cq->queue, QUEUE_TYPE_TO_CLIENT); - spin_unlock_irqrestore(&cq->cq_lock, flags); - if ((cq->notify & IB_CQ_NEXT_COMP) || (cq->notify & IB_CQ_SOLICITED && solicited)) { cq->notify = 0; cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); } + spin_unlock_irqrestore(&cq->cq_lock, flags); + return 0; } diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index 2bc7361152ea70f95c78f743098c4b6cc547fa16..cd59666158b18aa4a035f0e898e70d591274eed9 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -159,6 +159,9 @@ static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb) pkt->mask = RXE_GRH_MASK; pkt->paylen = be16_to_cpu(udph->len) - sizeof(*udph); + /* remove udp header */ + skb_pull(skb, sizeof(struct udphdr)); + rxe_rcv(skb); return 0; @@ -401,6 +404,9 @@ static int rxe_loopback(struct sk_buff *skb, struct rxe_pkt_info *pkt) return -EIO; } + /* remove udp header */ + skb_pull(skb, sizeof(struct udphdr)); + rxe_rcv(skb); return 0; @@ -412,15 +418,16 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt, int err; int is_request = pkt->mask & RXE_REQ_MASK; struct rxe_dev *rxe = to_rdev(qp->ibqp.device); + unsigned long flags; - spin_lock_bh(&qp->state_lock); + spin_lock_irqsave(&qp->state_lock, flags); if ((is_request && (qp_state(qp) < IB_QPS_RTS)) || (!is_request && (qp_state(qp) < IB_QPS_RTR))) { - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); rxe_dbg_qp(qp, "Packet dropped. QP is not in ready state\n"); goto drop; } - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); rxe_icrc_generate(skb, pkt); diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index 95d4a6760c3354f231417ce91d443dcde4e552cc..a569b111a9d2a88c79b5a99350e5d261942b45a5 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -176,6 +176,9 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp, spin_lock_init(&qp->rq.producer_lock); spin_lock_init(&qp->rq.consumer_lock); + skb_queue_head_init(&qp->req_pkts); + skb_queue_head_init(&qp->resp_pkts); + atomic_set(&qp->ssn, 0); atomic_set(&qp->skb_out, 0); } @@ -234,8 +237,6 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, qp->req.opcode = -1; qp->comp.opcode = -1; - skb_queue_head_init(&qp->req_pkts); - rxe_init_task(&qp->req.task, qp, rxe_requester); rxe_init_task(&qp->comp.task, qp, rxe_completer); @@ -279,8 +280,6 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, } } - skb_queue_head_init(&qp->resp_pkts); - rxe_init_task(&qp->resp.task, qp, rxe_responder); qp->resp.opcode = OPCODE_NONE; @@ -300,6 +299,7 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, struct rxe_cq *rcq = to_rcq(init->recv_cq); struct rxe_cq *scq = to_rcq(init->send_cq); struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL; + unsigned long flags; rxe_get(pd); rxe_get(rcq); @@ -325,10 +325,10 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, if (err) goto err2; - spin_lock_bh(&qp->state_lock); + spin_lock_irqsave(&qp->state_lock, flags); qp->attr.qp_state = IB_QPS_RESET; qp->valid = 1; - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); return 0; @@ -499,24 +499,28 @@ static void rxe_qp_reset(struct rxe_qp *qp) /* move the qp to the error state */ void rxe_qp_error(struct rxe_qp *qp) { - spin_lock_bh(&qp->state_lock); + unsigned long flags; + + spin_lock_irqsave(&qp->state_lock, flags); qp->attr.qp_state = IB_QPS_ERR; /* drain work and packet queues */ rxe_sched_task(&qp->resp.task); rxe_sched_task(&qp->comp.task); rxe_sched_task(&qp->req.task); - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); } static void rxe_qp_sqd(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) { - spin_lock_bh(&qp->state_lock); + unsigned long flags; + + spin_lock_irqsave(&qp->state_lock, flags); qp->attr.sq_draining = 1; rxe_sched_task(&qp->comp.task); rxe_sched_task(&qp->req.task); - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); } /* caller should hold qp->state_lock */ @@ -562,14 +566,16 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, qp->attr.cur_qp_state = attr->qp_state; if (mask & IB_QP_STATE) { - spin_lock_bh(&qp->state_lock); + unsigned long flags; + + spin_lock_irqsave(&qp->state_lock, flags); err = __qp_chk_state(qp, attr, mask); if (!err) { qp->attr.qp_state = attr->qp_state; rxe_dbg_qp(qp, "state -> %s\n", qps2str[attr->qp_state]); } - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); if (err) return err; @@ -695,6 +701,8 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, /* called by the query qp verb */ int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) { + unsigned long flags; + *attr = qp->attr; attr->rq_psn = qp->resp.psn; @@ -715,12 +723,13 @@ int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) /* Applications that get this state typically spin on it. * Yield the processor */ - spin_lock_bh(&qp->state_lock); + spin_lock_irqsave(&qp->state_lock, flags); if (qp->attr.sq_draining) { - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); cond_resched(); + } else { + spin_unlock_irqrestore(&qp->state_lock, flags); } - spin_unlock_bh(&qp->state_lock); return 0; } @@ -743,10 +752,11 @@ int rxe_qp_chk_destroy(struct rxe_qp *qp) static void rxe_qp_do_cleanup(struct work_struct *work) { struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work); + unsigned long flags; - spin_lock_bh(&qp->state_lock); + spin_lock_irqsave(&qp->state_lock, flags); qp->valid = 0; - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); qp->qp_timeout_jiffies = 0; if (qp_type(qp) == IB_QPT_RC) { diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c index 2f953cc74256d90c9015bd413520be8369a6ff2b..5861e42440490dd1014efad9c0a52c2d9ebb5bc4 100644 --- a/drivers/infiniband/sw/rxe/rxe_recv.c +++ b/drivers/infiniband/sw/rxe/rxe_recv.c @@ -14,6 +14,7 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct rxe_qp *qp) { unsigned int pkt_type; + unsigned long flags; if (unlikely(!qp->valid)) return -EINVAL; @@ -38,19 +39,19 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, return -EINVAL; } - spin_lock_bh(&qp->state_lock); + spin_lock_irqsave(&qp->state_lock, flags); if (pkt->mask & RXE_REQ_MASK) { if (unlikely(qp_state(qp) < IB_QPS_RTR)) { - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); return -EINVAL; } } else { if (unlikely(qp_state(qp) < IB_QPS_RTS)) { - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); return -EINVAL; } } - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); return 0; } diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index 400840c913a9520a8da6bce4a8c82257ff26a528..2171f19494bca131971d3cfaee6dba78281d1276 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c @@ -99,17 +99,18 @@ static void req_retry(struct rxe_qp *qp) void rnr_nak_timer(struct timer_list *t) { struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer); + unsigned long flags; rxe_dbg_qp(qp, "nak timer fired\n"); - spin_lock_bh(&qp->state_lock); + spin_lock_irqsave(&qp->state_lock, flags); if (qp->valid) { /* request a send queue retry */ qp->req.need_retry = 1; qp->req.wait_for_rnr_timer = 0; rxe_sched_task(&qp->req.task); } - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); } static void req_check_sq_drain_done(struct rxe_qp *qp) @@ -118,8 +119,9 @@ static void req_check_sq_drain_done(struct rxe_qp *qp) unsigned int index; unsigned int cons; struct rxe_send_wqe *wqe; + unsigned long flags; - spin_lock_bh(&qp->state_lock); + spin_lock_irqsave(&qp->state_lock, flags); if (qp_state(qp) == IB_QPS_SQD) { q = qp->sq.queue; index = qp->req.wqe_index; @@ -140,7 +142,7 @@ static void req_check_sq_drain_done(struct rxe_qp *qp) break; qp->attr.sq_draining = 0; - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); if (qp->ibqp.event_handler) { struct ib_event ev; @@ -154,7 +156,7 @@ static void req_check_sq_drain_done(struct rxe_qp *qp) return; } while (0); } - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); } static struct rxe_send_wqe *__req_next_wqe(struct rxe_qp *qp) @@ -173,6 +175,7 @@ static struct rxe_send_wqe *__req_next_wqe(struct rxe_qp *qp) static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp) { struct rxe_send_wqe *wqe; + unsigned long flags; req_check_sq_drain_done(qp); @@ -180,13 +183,13 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp) if (wqe == NULL) return NULL; - spin_lock_bh(&qp->state_lock); + spin_lock_irqsave(&qp->state_lock, flags); if (unlikely((qp_state(qp) == IB_QPS_SQD) && (wqe->state != wqe_state_processing))) { - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); return NULL; } - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp); return wqe; @@ -676,16 +679,17 @@ int rxe_requester(struct rxe_qp *qp) struct rxe_queue *q = qp->sq.queue; struct rxe_ah *ah; struct rxe_av *av; + unsigned long flags; - spin_lock_bh(&qp->state_lock); + spin_lock_irqsave(&qp->state_lock, flags); if (unlikely(!qp->valid)) { - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); goto exit; } if (unlikely(qp_state(qp) == IB_QPS_ERR)) { wqe = __req_next_wqe(qp); - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); if (wqe) goto err; else @@ -700,10 +704,10 @@ int rxe_requester(struct rxe_qp *qp) qp->req.wait_psn = 0; qp->req.need_retry = 0; qp->req.wait_for_rnr_timer = 0; - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); goto exit; } - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); /* we come here if the retransmit timer has fired * or if the rnr timer has fired. If the retransmit diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index 8a3c9c2c5a2d48d554d5a4360343158c86b18af5..64c64f5f36a810addfa84284268fb8699a6be772 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -497,8 +497,9 @@ static enum resp_states check_rkey(struct rxe_qp *qp, if (mw->access & IB_ZERO_BASED) qp->resp.offset = mw->addr; - rxe_put(mw); rxe_get(mr); + rxe_put(mw); + mw = NULL; } else { mr = lookup_mr(qp->pd, access, rkey, RXE_LOOKUP_REMOTE); if (!mr) { @@ -1055,6 +1056,7 @@ static enum resp_states do_complete(struct rxe_qp *qp, struct ib_uverbs_wc *uwc = &cqe.uibwc; struct rxe_recv_wqe *wqe = qp->resp.wqe; struct rxe_dev *rxe = to_rdev(qp->ibqp.device); + unsigned long flags; if (!wqe) goto finish; @@ -1145,12 +1147,12 @@ static enum resp_states do_complete(struct rxe_qp *qp, return RESPST_ERR_CQ_OVERFLOW; finish: - spin_lock_bh(&qp->state_lock); + spin_lock_irqsave(&qp->state_lock, flags); if (unlikely(qp_state(qp) == IB_QPS_ERR)) { - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); return RESPST_CHK_RESOURCE; } - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); if (unlikely(!pkt)) return RESPST_DONE; @@ -1485,18 +1487,19 @@ int rxe_responder(struct rxe_qp *qp) enum resp_states state; struct rxe_pkt_info *pkt = NULL; int ret; + unsigned long flags; - spin_lock_bh(&qp->state_lock); + spin_lock_irqsave(&qp->state_lock, flags); if (!qp->valid || qp_state(qp) == IB_QPS_ERR || qp_state(qp) == IB_QPS_RESET) { bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR); drain_req_pkts(qp); flush_recv_queue(qp, notify); - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); goto exit; } - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED; diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 515f9ff72d18d636a4159109dcb91acd953cb751..f4321a1720000039fa88ded10d76b99d28d8a1f4 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -904,10 +904,10 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, if (!err) rxe_sched_task(&qp->req.task); - spin_lock_bh(&qp->state_lock); + spin_lock_irqsave(&qp->state_lock, flags); if (qp_state(qp) == IB_QPS_ERR) rxe_sched_task(&qp->comp.task); - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); return err; } @@ -917,22 +917,23 @@ static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, { struct rxe_qp *qp = to_rqp(ibqp); int err; + unsigned long flags; - spin_lock_bh(&qp->state_lock); + spin_lock_irqsave(&qp->state_lock, flags); /* caller has already called destroy_qp */ if (WARN_ON_ONCE(!qp->valid)) { - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); rxe_err_qp(qp, "qp has been destroyed"); return -EINVAL; } if (unlikely(qp_state(qp) < IB_QPS_RTS)) { - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); *bad_wr = wr; rxe_err_qp(qp, "qp not ready to send"); return -EINVAL; } - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); if (qp->is_user) { /* Utilize process context to do protocol processing */ @@ -1008,22 +1009,22 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, struct rxe_rq *rq = &qp->rq; unsigned long flags; - spin_lock_bh(&qp->state_lock); + spin_lock_irqsave(&qp->state_lock, flags); /* caller has already called destroy_qp */ if (WARN_ON_ONCE(!qp->valid)) { - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); rxe_err_qp(qp, "qp has been destroyed"); return -EINVAL; } /* see C10-97.2.1 */ if (unlikely((qp_state(qp) < IB_QPS_INIT))) { - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); *bad_wr = wr; rxe_dbg_qp(qp, "qp not ready to post recv"); return -EINVAL; } - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); if (unlikely(qp->srq)) { *bad_wr = wr; @@ -1044,10 +1045,10 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, spin_unlock_irqrestore(&rq->producer_lock, flags); - spin_lock_bh(&qp->state_lock); + spin_lock_irqsave(&qp->state_lock, flags); if (qp_state(qp) == IB_QPS_ERR) rxe_sched_task(&qp->resp.task); - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); return err; } diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index f290cd49698ea27b1f143587fbc666c89c28dcc8..92e1e7587af8b4131f37e7c6887747f7649b86c1 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -657,9 +657,13 @@ static int isert_connect_error(struct rdma_cm_id *cma_id) { struct isert_conn *isert_conn = cma_id->qp->qp_context; + struct isert_np *isert_np = cma_id->context; ib_drain_qp(isert_conn->qp); + + mutex_lock(&isert_np->mutex); list_del_init(&isert_conn->node); + mutex_unlock(&isert_np->mutex); isert_conn->cm_id = NULL; isert_put_conn(isert_conn); @@ -2431,6 +2435,7 @@ isert_free_np(struct iscsi_np *np) { struct isert_np *isert_np = np->np_context; struct isert_conn *isert_conn, *n; + LIST_HEAD(drop_conn_list); if (isert_np->cm_id) rdma_destroy_id(isert_np->cm_id); @@ -2450,7 +2455,7 @@ isert_free_np(struct iscsi_np *np) node) { isert_info("cleaning isert_conn %p state (%d)\n", isert_conn, isert_conn->state); - isert_connect_release(isert_conn); + list_move_tail(&isert_conn->node, &drop_conn_list); } } @@ -2461,11 +2466,16 @@ isert_free_np(struct iscsi_np *np) node) { isert_info("cleaning isert_conn %p state (%d)\n", isert_conn, isert_conn->state); - isert_connect_release(isert_conn); + list_move_tail(&isert_conn->node, &drop_conn_list); } } mutex_unlock(&isert_np->mutex); + list_for_each_entry_safe(isert_conn, n, &drop_conn_list, node) { + list_del_init(&isert_conn->node); + isert_connect_release(isert_conn); + } + np->np_context = NULL; kfree(isert_np); } @@ -2560,8 +2570,6 @@ static void isert_wait_conn(struct iscsit_conn *conn) isert_put_unsol_pending_cmds(conn); isert_wait4cmds(conn); isert_wait4logout(isert_conn); - - queue_work(isert_release_wq, &isert_conn->release_work); } static void isert_free_conn(struct iscsit_conn *conn) diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c index 3738aaefbe1316ef7e11af1ea41813fbc691cc2d..b32941dd67cb90d5b1260369d227316b418a1ec3 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c @@ -2039,6 +2039,7 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id, return 0; } +/* The caller should do the cleanup in case of error */ static int create_cm(struct rtrs_clt_con *con) { struct rtrs_path *s = con->c.path; @@ -2061,14 +2062,14 @@ static int create_cm(struct rtrs_clt_con *con) err = rdma_set_reuseaddr(cm_id, 1); if (err != 0) { rtrs_err(s, "Set address reuse failed, err: %d\n", err); - goto destroy_cm; + return err; } err = rdma_resolve_addr(cm_id, (struct sockaddr *)&clt_path->s.src_addr, (struct sockaddr *)&clt_path->s.dst_addr, RTRS_CONNECT_TIMEOUT_MS); if (err) { rtrs_err(s, "Failed to resolve address, err: %d\n", err); - goto destroy_cm; + return err; } /* * Combine connection status and session events. This is needed @@ -2083,29 +2084,15 @@ static int create_cm(struct rtrs_clt_con *con) if (err == 0) err = -ETIMEDOUT; /* Timedout or interrupted */ - goto errr; - } - if (con->cm_err < 0) { - err = con->cm_err; - goto errr; + return err; } - if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTING) { + if (con->cm_err < 0) + return con->cm_err; + if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTING) /* Device removal */ - err = -ECONNABORTED; - goto errr; - } + return -ECONNABORTED; return 0; - -errr: - stop_cm(con); - mutex_lock(&con->con_mutex); - destroy_con_cq_qp(con); - mutex_unlock(&con->con_mutex); -destroy_cm: - destroy_cm(con); - - return err; } static void rtrs_clt_path_up(struct rtrs_clt_path *clt_path) @@ -2333,7 +2320,7 @@ static void rtrs_clt_close_work(struct work_struct *work) static int init_conns(struct rtrs_clt_path *clt_path) { unsigned int cid; - int err; + int err, i; /* * On every new session connections increase reconnect counter @@ -2349,10 +2336,8 @@ static int init_conns(struct rtrs_clt_path *clt_path) goto destroy; err = create_cm(to_clt_con(clt_path->s.con[cid])); - if (err) { - destroy_con(to_clt_con(clt_path->s.con[cid])); + if (err) goto destroy; - } } err = alloc_path_reqs(clt_path); if (err) @@ -2363,15 +2348,21 @@ static int init_conns(struct rtrs_clt_path *clt_path) return 0; destroy: - while (cid--) { - struct rtrs_clt_con *con = to_clt_con(clt_path->s.con[cid]); + /* Make sure we do the cleanup in the order they are created */ + for (i = 0; i <= cid; i++) { + struct rtrs_clt_con *con; - stop_cm(con); + if (!clt_path->s.con[i]) + break; - mutex_lock(&con->con_mutex); - destroy_con_cq_qp(con); - mutex_unlock(&con->con_mutex); - destroy_cm(con); + con = to_clt_con(clt_path->s.con[i]); + if (con->c.cm_id) { + stop_cm(con); + mutex_lock(&con->con_mutex); + destroy_con_cq_qp(con); + mutex_unlock(&con->con_mutex); + destroy_cm(con); + } destroy_con(con); } /* diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c index 4bf9d868cc522bd8b7631f0f28aba9199385800c..3696f367ff5151333234c707ccad326385fdd59c 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs.c +++ b/drivers/infiniband/ulp/rtrs/rtrs.c @@ -37,8 +37,10 @@ struct rtrs_iu *rtrs_iu_alloc(u32 iu_num, size_t size, gfp_t gfp_mask, goto err; iu->dma_addr = ib_dma_map_single(dma_dev, iu->buf, size, dir); - if (ib_dma_mapping_error(dma_dev, iu->dma_addr)) + if (ib_dma_mapping_error(dma_dev, iu->dma_addr)) { + kfree(iu->buf); goto err; + } iu->cqe.done = done; iu->size = size; diff --git a/drivers/input/input.c b/drivers/input/input.c index 37e876d45eb9c23775465da445860bd20aa961aa..641eb86f276e6da05e86ea1ddd9ae10069cefe7a 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -703,7 +703,7 @@ void input_close_device(struct input_handle *handle) __input_release_device(handle); - if (!dev->inhibited && !--dev->users) { + if (!--dev->users && !dev->inhibited) { if (dev->poller) input_dev_poller_stop(dev->poller); if (dev->close) diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 28be88e0e96abed47f0c0453e741b934e53ba15b..f33622fe946f657380155fd1db111ab9d23e39f3 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -281,7 +281,6 @@ static const struct xpad_device { { 0x1430, 0xf801, "RedOctane Controller", 0, XTYPE_XBOX360 }, { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 }, { 0x146b, 0x0604, "Bigben Interactive DAIJA Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, - { 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 }, { 0x1532, 0x0a00, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, { 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE }, { 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 }, diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c index 09489380afda7a78d1f331e1c092f6d90d7b0f66..e79f5497948b8c047039c29c504317d009b5ef4c 100644 --- a/drivers/input/misc/soc_button_array.c +++ b/drivers/input/misc/soc_button_array.c @@ -108,6 +108,27 @@ static const struct dmi_system_id dmi_use_low_level_irq[] = { {} /* Terminating entry */ }; +/* + * Some devices have a wrong entry which points to a GPIO which is + * required in another driver, so this driver must not claim it. + */ +static const struct dmi_system_id dmi_invalid_acpi_index[] = { + { + /* + * Lenovo Yoga Book X90F / X90L, the PNP0C40 home button entry + * points to a GPIO which is not a home button and which is + * required by the lenovo-yogabook driver. + */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"), + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"), + }, + .driver_data = (void *)1l, + }, + {} /* Terminating entry */ +}; + /* * Get the Nth GPIO number from the ACPI object. */ @@ -137,6 +158,8 @@ soc_button_device_create(struct platform_device *pdev, struct platform_device *pd; struct gpio_keys_button *gpio_keys; struct gpio_keys_platform_data *gpio_keys_pdata; + const struct dmi_system_id *dmi_id; + int invalid_acpi_index = -1; int error, gpio, irq; int n_buttons = 0; @@ -154,10 +177,17 @@ soc_button_device_create(struct platform_device *pdev, gpio_keys = (void *)(gpio_keys_pdata + 1); n_buttons = 0; + dmi_id = dmi_first_match(dmi_invalid_acpi_index); + if (dmi_id) + invalid_acpi_index = (long)dmi_id->driver_data; + for (info = button_info; info->name; info++) { if (info->autorepeat != autorepeat) continue; + if (info->acpi_index == invalid_acpi_index) + continue; + error = soc_button_lookup_gpio(&pdev->dev, info->acpi_index, &gpio, &irq); if (error || irq < 0) { /* diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index ece97f8c6a3e399365b3f7c6a7e5d3f923c06ca7..2118b2075f437c2bfda8d3ec68b97ff831c3d74a 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -674,10 +674,11 @@ static void process_packet_head_v4(struct psmouse *psmouse) struct input_dev *dev = psmouse->dev; struct elantech_data *etd = psmouse->private; unsigned char *packet = psmouse->packet; - int id = ((packet[3] & 0xe0) >> 5) - 1; + int id; int pres, traces; - if (id < 0) + id = ((packet[3] & 0xe0) >> 5) - 1; + if (id < 0 || id >= ETP_MAX_FINGERS) return; etd->mt[id].x = ((packet[1] & 0x0f) << 8) | packet[2]; @@ -707,7 +708,7 @@ static void process_packet_motion_v4(struct psmouse *psmouse) int id, sid; id = ((packet[0] & 0xe0) >> 5) - 1; - if (id < 0) + if (id < 0 || id >= ETP_MAX_FINGERS) return; sid = ((packet[3] & 0xe0) >> 5) - 1; @@ -728,7 +729,7 @@ static void process_packet_motion_v4(struct psmouse *psmouse) input_report_abs(dev, ABS_MT_POSITION_X, etd->mt[id].x); input_report_abs(dev, ABS_MT_POSITION_Y, etd->mt[id].y); - if (sid >= 0) { + if (sid >= 0 && sid < ETP_MAX_FINGERS) { etd->mt[sid].x += delta_x2 * weight; etd->mt[sid].y -= delta_y2 * weight; input_mt_slot(dev, sid); diff --git a/drivers/input/touchscreen/cyttsp5.c b/drivers/input/touchscreen/cyttsp5.c index 30102cb80fac823ae47a27cc64a540a78211e656..3c9d07218f48dfc52bb59fe0be008d8e4915f586 100644 --- a/drivers/input/touchscreen/cyttsp5.c +++ b/drivers/input/touchscreen/cyttsp5.c @@ -560,7 +560,7 @@ static int cyttsp5_hid_output_get_sysinfo(struct cyttsp5 *ts) static int cyttsp5_hid_output_bl_launch_app(struct cyttsp5 *ts) { int rc; - u8 cmd[HID_OUTPUT_BL_LAUNCH_APP]; + u8 cmd[HID_OUTPUT_BL_LAUNCH_APP_SIZE]; u16 crc; put_unaligned_le16(HID_OUTPUT_BL_LAUNCH_APP_SIZE, cmd); diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index db98c3f86e8c8b87f4c9c7e45b1f1b775f72c2f0..4d800601e8ecd6002c462383e4fc6d2cc186cb03 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -282,6 +282,7 @@ config EXYNOS_IOMMU_DEBUG config IPMMU_VMSA bool "Renesas VMSA-compatible IPMMU" depends on ARCH_RENESAS || COMPILE_TEST + depends on ARM || ARM64 || COMPILE_TEST depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE select IOMMU_API select IOMMU_IO_PGTABLE_LPAE @@ -417,22 +418,6 @@ config S390_IOMMU help Support for the IOMMU API for s390 PCI devices. -config S390_CCW_IOMMU - bool "S390 CCW IOMMU Support" - depends on S390 && CCW || COMPILE_TEST - select IOMMU_API - help - Enables bits of IOMMU API required by VFIO. The iommu_ops - is not implemented as it is not necessary for VFIO. - -config S390_AP_IOMMU - bool "S390 AP IOMMU Support" - depends on S390 && ZCRYPT || COMPILE_TEST - select IOMMU_API - help - Enables bits of IOMMU API required by VFIO. The iommu_ops - is not implemented as it is not necessary for VFIO. - config MTK_IOMMU tristate "MediaTek IOMMU Support" depends on ARCH_MEDIATEK || COMPILE_TEST diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h index e98f20a9bdd82e6be58e4d9938094f18979482b6..9beeceb9d825dfeae9e2aa9229e031513680ba09 100644 --- a/drivers/iommu/amd/amd_iommu.h +++ b/drivers/iommu/amd/amd_iommu.h @@ -15,9 +15,7 @@ extern irqreturn_t amd_iommu_int_thread(int irq, void *data); extern irqreturn_t amd_iommu_int_handler(int irq, void *data); extern void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid); extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu); -extern int amd_iommu_init_devices(void); -extern void amd_iommu_uninit_devices(void); -extern void amd_iommu_init_notifier(void); +extern void amd_iommu_restart_ga_log(struct amd_iommu *iommu); extern void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid); #ifdef CONFIG_AMD_IOMMU_DEBUGFS diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 329a406cc37de8f84e88acde441eea20ce65599b..c2d80a4e5fb066c69cfe0f8cab1856894bfbb2e3 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -758,6 +758,30 @@ void amd_iommu_restart_event_logging(struct amd_iommu *iommu) iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); } +/* + * This function restarts event logging in case the IOMMU experienced + * an GA log overflow. + */ +void amd_iommu_restart_ga_log(struct amd_iommu *iommu) +{ + u32 status; + + status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); + if (status & MMIO_STATUS_GALOG_RUN_MASK) + return; + + pr_info_ratelimited("IOMMU GA Log restarting\n"); + + iommu_feature_disable(iommu, CONTROL_GALOG_EN); + iommu_feature_disable(iommu, CONTROL_GAINT_EN); + + writel(MMIO_STATUS_GALOG_OVERFLOW_MASK, + iommu->mmio_base + MMIO_STATUS_OFFSET); + + iommu_feature_enable(iommu, CONTROL_GAINT_EN); + iommu_feature_enable(iommu, CONTROL_GALOG_EN); +} + /* * This function resets the command buffer if the IOMMU stopped fetching * commands from it. diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 4a314647d1f792e3ef74538baedb654b8e8a2f42..e8a2e5984acb7c97821db25a6689f8ea3c099de0 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -845,6 +845,7 @@ amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { } (MMIO_STATUS_EVT_OVERFLOW_INT_MASK | \ MMIO_STATUS_EVT_INT_MASK | \ MMIO_STATUS_PPR_INT_MASK | \ + MMIO_STATUS_GALOG_OVERFLOW_MASK | \ MMIO_STATUS_GALOG_INT_MASK) irqreturn_t amd_iommu_int_thread(int irq, void *data) @@ -868,10 +869,16 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data) } #ifdef CONFIG_IRQ_REMAP - if (status & MMIO_STATUS_GALOG_INT_MASK) { + if (status & (MMIO_STATUS_GALOG_INT_MASK | + MMIO_STATUS_GALOG_OVERFLOW_MASK)) { pr_devel("Processing IOMMU GA Log\n"); iommu_poll_ga_log(iommu); } + + if (status & MMIO_STATUS_GALOG_OVERFLOW_MASK) { + pr_info_ratelimited("IOMMU GA Log overflow\n"); + amd_iommu_restart_ga_log(iommu); + } #endif if (status & MMIO_STATUS_EVT_OVERFLOW_INT_MASK) { @@ -2067,14 +2074,10 @@ static struct protection_domain *protection_domain_alloc(unsigned int type) { struct io_pgtable_ops *pgtbl_ops; struct protection_domain *domain; - int pgtable = amd_iommu_pgtable; + int pgtable; int mode = DEFAULT_PGTABLE_LEVEL; int ret; - domain = kzalloc(sizeof(*domain), GFP_KERNEL); - if (!domain) - return NULL; - /* * Force IOMMU v1 page table when iommu=pt and * when allocating domain for pass-through devices. @@ -2084,8 +2087,16 @@ static struct protection_domain *protection_domain_alloc(unsigned int type) mode = PAGE_MODE_NONE; } else if (type == IOMMU_DOMAIN_UNMANAGED) { pgtable = AMD_IOMMU_V1; + } else if (type == IOMMU_DOMAIN_DMA || type == IOMMU_DOMAIN_DMA_FQ) { + pgtable = amd_iommu_pgtable; + } else { + return NULL; } + domain = kzalloc(sizeof(*domain), GFP_KERNEL); + if (!domain) + return NULL; + switch (pgtable) { case AMD_IOMMU_V1: ret = protection_domain_init_v1(domain, mode); @@ -2118,6 +2129,15 @@ out_err: return NULL; } +static inline u64 dma_max_address(void) +{ + if (amd_iommu_pgtable == AMD_IOMMU_V1) + return ~0ULL; + + /* V2 with 4/5 level page table */ + return ((1ULL << PM_LEVEL_SHIFT(amd_iommu_gpt_level)) - 1); +} + static struct iommu_domain *amd_iommu_domain_alloc(unsigned type) { struct protection_domain *domain; @@ -2134,7 +2154,7 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type) return NULL; domain->domain.geometry.aperture_start = 0; - domain->domain.geometry.aperture_end = ~0ULL; + domain->domain.geometry.aperture_end = dma_max_address(); domain->domain.geometry.force_aperture = true; return &domain->domain; @@ -2387,7 +2407,7 @@ static void amd_iommu_iotlb_sync(struct iommu_domain *domain, unsigned long flags; spin_lock_irqsave(&dom->lock, flags); - domain_flush_pages(dom, gather->start, gather->end - gather->start, 1); + domain_flush_pages(dom, gather->start, gather->end - gather->start + 1, 1); amd_iommu_domain_flush_complete(dom); spin_unlock_irqrestore(&dom->lock, flags); } @@ -3493,8 +3513,7 @@ int amd_iommu_activate_guest_mode(void *data) struct irte_ga *entry = (struct irte_ga *) ir_data->entry; u64 valid; - if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || - !entry || entry->lo.fields_vapic.guest_mode) + if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || !entry) return 0; valid = entry->lo.fields_vapic.valid; diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c index ae09c627bc8448b962f95f581ef213ff06b0ab58..c71afda79d6442f57026b945fd7ff8fe08cd35a4 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c @@ -517,6 +517,7 @@ static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = { { .compatible = "qcom,qcm2290-smmu-500", .data = &qcom_smmu_500_impl0_data }, { .compatible = "qcom,qdu1000-smmu-500", .data = &qcom_smmu_500_impl0_data }, { .compatible = "qcom,sc7180-smmu-500", .data = &qcom_smmu_500_impl0_data }, + { .compatible = "qcom,sc7180-smmu-v2", .data = &qcom_smmu_v2_data }, { .compatible = "qcom,sc7280-smmu-500", .data = &qcom_smmu_500_impl0_data }, { .compatible = "qcom,sc8180x-smmu-500", .data = &qcom_smmu_500_impl0_data }, { .compatible = "qcom,sc8280xp-smmu-500", .data = &qcom_smmu_500_impl0_data }, @@ -561,5 +562,14 @@ struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu) if (match) return qcom_smmu_create(smmu, match->data); + /* + * If you hit this WARN_ON() you are missing an entry in the + * qcom_smmu_impl_of_match[] table, and GPU per-process page- + * tables will be broken. + */ + WARN(of_device_is_compatible(np, "qcom,adreno-smmu"), + "Missing qcom_smmu_impl_of_match entry for: %s", + dev_name(smmu->dev)); + return smmu; } diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index aecc7d154f28eeb1cfb493b9e61686f1fba2f4bf..e93906d6e112e826facaea508e118e36b78fb666 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -781,7 +781,8 @@ static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain) { struct mtk_iommu_domain *dom = to_mtk_domain(domain); - mtk_iommu_tlb_flush_all(dom->bank->parent_data); + if (dom->bank) + mtk_iommu_tlb_flush_all(dom->bank->parent_data); } static void mtk_iommu_iotlb_sync(struct iommu_domain *domain, diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index ea5a3088bb7e8a566ab9285d644009eb2bcd1d6a..4054030c32379533eca2e96003016a8a6a51481a 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -1335,20 +1335,22 @@ static int rk_iommu_probe(struct platform_device *pdev) for (i = 0; i < iommu->num_irq; i++) { int irq = platform_get_irq(pdev, i); - if (irq < 0) - return irq; + if (irq < 0) { + err = irq; + goto err_pm_disable; + } err = devm_request_irq(iommu->dev, irq, rk_iommu_irq, IRQF_SHARED, dev_name(dev), iommu); - if (err) { - pm_runtime_disable(dev); - goto err_remove_sysfs; - } + if (err) + goto err_pm_disable; } dma_set_mask_and_coherent(dev, rk_ops->dma_bit_mask); return 0; +err_pm_disable: + pm_runtime_disable(dev); err_remove_sysfs: iommu_device_sysfs_remove(&iommu->iommu); err_put_group: diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c index a610821c8ff2af1e8485046480b047c6d3538a27..afd6a1841715a152afd572dac15e4366bfd96d1f 100644 --- a/drivers/irqchip/irq-gic-common.c +++ b/drivers/irqchip/irq-gic-common.c @@ -16,7 +16,13 @@ void gic_enable_of_quirks(const struct device_node *np, const struct gic_quirk *quirks, void *data) { for (; quirks->desc; quirks++) { - if (!of_device_is_compatible(np, quirks->compatible)) + if (!quirks->compatible && !quirks->property) + continue; + if (quirks->compatible && + !of_device_is_compatible(np, quirks->compatible)) + continue; + if (quirks->property && + !of_property_read_bool(np, quirks->property)) continue; if (quirks->init(data)) pr_info("GIC: enabling workaround for %s\n", @@ -28,7 +34,7 @@ void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks, void *data) { for (; quirks->desc; quirks++) { - if (quirks->compatible) + if (quirks->compatible || quirks->property) continue; if (quirks->iidr != (quirks->mask & iidr)) continue; diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h index 27e3d4ed4f32861a4c46eebcc4634eb835a8e45b..3db4592cda1c01ad8a27e73f62b9d0890019667d 100644 --- a/drivers/irqchip/irq-gic-common.h +++ b/drivers/irqchip/irq-gic-common.h @@ -13,6 +13,7 @@ struct gic_quirk { const char *desc; const char *compatible; + const char *property; bool (*init)(void *data); u32 iidr; u32 mask; diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 6fcee221f201720929f1481aa64d3367d6395737..a605aa79435a4dca64dd1eb012f0f1fe7f394ff3 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -39,6 +39,7 @@ #define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0) #define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1) +#define FLAGS_WORKAROUND_MTK_GICR_SAVE (1ULL << 2) #define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1) @@ -1720,6 +1721,15 @@ static bool gic_enable_quirk_msm8996(void *data) return true; } +static bool gic_enable_quirk_mtk_gicr(void *data) +{ + struct gic_chip_data *d = data; + + d->flags |= FLAGS_WORKAROUND_MTK_GICR_SAVE; + + return true; +} + static bool gic_enable_quirk_cavium_38539(void *data) { struct gic_chip_data *d = data; @@ -1792,6 +1802,11 @@ static const struct gic_quirk gic_quirks[] = { .compatible = "qcom,msm8996-gic-v3", .init = gic_enable_quirk_msm8996, }, + { + .desc = "GICv3: Mediatek Chromebook GICR save problem", + .property = "mediatek,broken-save-restore-fw", + .init = gic_enable_quirk_mtk_gicr, + }, { .desc = "GICv3: HIP06 erratum 161010803", .iidr = 0x0204043b, @@ -1834,6 +1849,11 @@ static void gic_enable_nmi_support(void) if (!gic_prio_masking_enabled()) return; + if (gic_data.flags & FLAGS_WORKAROUND_MTK_GICR_SAVE) { + pr_warn("Skipping NMI enable due to firmware issues\n"); + return; + } + ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL); if (!ppi_nmi_refs) return; diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c index eada5e0e3eb956c939be7d6eef06e1a0a4aedd39..5101a3fb11df5bef53122db9db3c194669d754e7 100644 --- a/drivers/irqchip/irq-mbigen.c +++ b/drivers/irqchip/irq-mbigen.c @@ -240,26 +240,27 @@ static int mbigen_of_create_domain(struct platform_device *pdev, struct irq_domain *domain; struct device_node *np; u32 num_pins; + int ret = 0; + + parent = bus_get_dev_root(&platform_bus_type); + if (!parent) + return -ENODEV; for_each_child_of_node(pdev->dev.of_node, np) { if (!of_property_read_bool(np, "interrupt-controller")) continue; - parent = bus_get_dev_root(&platform_bus_type); - if (parent) { - child = of_platform_device_create(np, NULL, parent); - put_device(parent); - if (!child) { - of_node_put(np); - return -ENOMEM; - } + child = of_platform_device_create(np, NULL, parent); + if (!child) { + ret = -ENOMEM; + break; } if (of_property_read_u32(child->dev.of_node, "num-pins", &num_pins) < 0) { dev_err(&pdev->dev, "No num-pins property\n"); - of_node_put(np); - return -EINVAL; + ret = -EINVAL; + break; } domain = platform_msi_create_device_domain(&child->dev, num_pins, @@ -267,12 +268,16 @@ static int mbigen_of_create_domain(struct platform_device *pdev, &mbigen_domain_ops, mgn_chip); if (!domain) { - of_node_put(np); - return -ENOMEM; + ret = -ENOMEM; + break; } } - return 0; + put_device(parent); + if (ret) + of_node_put(np); + + return ret; } #ifdef CONFIG_ACPI diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c index 2aaa9aad3e87acf666e7cf1fbe97feb9a175d797..7da18ef95211984bd2998af59e3bac07b5bedf98 100644 --- a/drivers/irqchip/irq-meson-gpio.c +++ b/drivers/irqchip/irq-meson-gpio.c @@ -150,7 +150,7 @@ static const struct meson_gpio_irq_params s4_params = { INIT_MESON_S4_COMMON_DATA(82) }; -static const struct of_device_id meson_irq_gpio_matches[] = { +static const struct of_device_id meson_irq_gpio_matches[] __maybe_unused = { { .compatible = "amlogic,meson8-gpio-intc", .data = &meson8_params }, { .compatible = "amlogic,meson8b-gpio-intc", .data = &meson8b_params }, { .compatible = "amlogic,meson-gxbb-gpio-intc", .data = &gxbb_params }, diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index 046c355e120b6485abf5266490670af9ce21be21..6d5ecc10a87032dfccb0835964999bfb07d912bd 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -50,7 +50,7 @@ void __iomem *mips_gic_base; static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks); -static DEFINE_SPINLOCK(gic_lock); +static DEFINE_RAW_SPINLOCK(gic_lock); static struct irq_domain *gic_irq_domain; static int gic_shared_intrs; static unsigned int gic_cpu_pin; @@ -210,7 +210,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) irq = GIC_HWIRQ_TO_SHARED(d->hwirq); - spin_lock_irqsave(&gic_lock, flags); + raw_spin_lock_irqsave(&gic_lock, flags); switch (type & IRQ_TYPE_SENSE_MASK) { case IRQ_TYPE_EDGE_FALLING: pol = GIC_POL_FALLING_EDGE; @@ -250,7 +250,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) else irq_set_chip_handler_name_locked(d, &gic_level_irq_controller, handle_level_irq, NULL); - spin_unlock_irqrestore(&gic_lock, flags); + raw_spin_unlock_irqrestore(&gic_lock, flags); return 0; } @@ -268,7 +268,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, return -EINVAL; /* Assumption : cpumask refers to a single CPU */ - spin_lock_irqsave(&gic_lock, flags); + raw_spin_lock_irqsave(&gic_lock, flags); /* Re-route this IRQ */ write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu))); @@ -279,7 +279,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, set_bit(irq, per_cpu_ptr(pcpu_masks, cpu)); irq_data_update_effective_affinity(d, cpumask_of(cpu)); - spin_unlock_irqrestore(&gic_lock, flags); + raw_spin_unlock_irqrestore(&gic_lock, flags); return IRQ_SET_MASK_OK; } @@ -357,12 +357,12 @@ static void gic_mask_local_irq_all_vpes(struct irq_data *d) cd = irq_data_get_irq_chip_data(d); cd->mask = false; - spin_lock_irqsave(&gic_lock, flags); + raw_spin_lock_irqsave(&gic_lock, flags); for_each_online_cpu(cpu) { write_gic_vl_other(mips_cm_vp_id(cpu)); write_gic_vo_rmask(BIT(intr)); } - spin_unlock_irqrestore(&gic_lock, flags); + raw_spin_unlock_irqrestore(&gic_lock, flags); } static void gic_unmask_local_irq_all_vpes(struct irq_data *d) @@ -375,12 +375,12 @@ static void gic_unmask_local_irq_all_vpes(struct irq_data *d) cd = irq_data_get_irq_chip_data(d); cd->mask = true; - spin_lock_irqsave(&gic_lock, flags); + raw_spin_lock_irqsave(&gic_lock, flags); for_each_online_cpu(cpu) { write_gic_vl_other(mips_cm_vp_id(cpu)); write_gic_vo_smask(BIT(intr)); } - spin_unlock_irqrestore(&gic_lock, flags); + raw_spin_unlock_irqrestore(&gic_lock, flags); } static void gic_all_vpes_irq_cpu_online(void) @@ -393,19 +393,21 @@ static void gic_all_vpes_irq_cpu_online(void) unsigned long flags; int i; - spin_lock_irqsave(&gic_lock, flags); + raw_spin_lock_irqsave(&gic_lock, flags); for (i = 0; i < ARRAY_SIZE(local_intrs); i++) { unsigned int intr = local_intrs[i]; struct gic_all_vpes_chip_data *cd; + if (!gic_local_irq_is_routable(intr)) + continue; cd = &gic_all_vpes_chip_data[intr]; write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map); if (cd->mask) write_gic_vl_smask(BIT(intr)); } - spin_unlock_irqrestore(&gic_lock, flags); + raw_spin_unlock_irqrestore(&gic_lock, flags); } static struct irq_chip gic_all_vpes_local_irq_controller = { @@ -435,11 +437,11 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq, data = irq_get_irq_data(virq); - spin_lock_irqsave(&gic_lock, flags); + raw_spin_lock_irqsave(&gic_lock, flags); write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu))); irq_data_update_effective_affinity(data, cpumask_of(cpu)); - spin_unlock_irqrestore(&gic_lock, flags); + raw_spin_unlock_irqrestore(&gic_lock, flags); return 0; } @@ -531,12 +533,12 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, if (!gic_local_irq_is_routable(intr)) return -EPERM; - spin_lock_irqsave(&gic_lock, flags); + raw_spin_lock_irqsave(&gic_lock, flags); for_each_online_cpu(cpu) { write_gic_vl_other(mips_cm_vp_id(cpu)); write_gic_vo_map(mips_gic_vx_map_reg(intr), map); } - spin_unlock_irqrestore(&gic_lock, flags); + raw_spin_unlock_irqrestore(&gic_lock, flags); return 0; } diff --git a/drivers/leds/rgb/leds-qcom-lpg.c b/drivers/leds/rgb/leds-qcom-lpg.c index 55a037234df1b07c44fab18846bfa17e705c1af1..1c849814a4917fe5805a3f1aa8bf87831366a759 100644 --- a/drivers/leds/rgb/leds-qcom-lpg.c +++ b/drivers/leds/rgb/leds-qcom-lpg.c @@ -312,14 +312,14 @@ static int lpg_calc_freq(struct lpg_channel *chan, uint64_t period) max_res = LPG_RESOLUTION_9BIT; } - min_period = (u64)NSEC_PER_SEC * - div64_u64((1 << pwm_resolution_arr[0]), clk_rate_arr[clk_len - 1]); + min_period = div64_u64((u64)NSEC_PER_SEC * (1 << pwm_resolution_arr[0]), + clk_rate_arr[clk_len - 1]); if (period <= min_period) return -EINVAL; /* Limit period to largest possible value, to avoid overflows */ - max_period = (u64)NSEC_PER_SEC * max_res * LPG_MAX_PREDIV * - div64_u64((1 << LPG_MAX_M), 1024); + max_period = div64_u64((u64)NSEC_PER_SEC * max_res * LPG_MAX_PREDIV * (1 << LPG_MAX_M), + 1024); if (period > max_period) period = max_period; diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c index c4a705c30331421e08e31606a12285d56a6d8682..fc6a12a51b403863d7dcb36a63a54b5a76c2f135 100644 --- a/drivers/mailbox/mailbox-test.c +++ b/drivers/mailbox/mailbox-test.c @@ -98,6 +98,7 @@ static ssize_t mbox_test_message_write(struct file *filp, size_t count, loff_t *ppos) { struct mbox_test_device *tdev = filp->private_data; + char *message; void *data; int ret; @@ -113,12 +114,13 @@ static ssize_t mbox_test_message_write(struct file *filp, return -EINVAL; } - mutex_lock(&tdev->mutex); - - tdev->message = kzalloc(MBOX_MAX_MSG_LEN, GFP_KERNEL); - if (!tdev->message) + message = kzalloc(MBOX_MAX_MSG_LEN, GFP_KERNEL); + if (!message) return -ENOMEM; + mutex_lock(&tdev->mutex); + + tdev->message = message; ret = copy_from_user(tdev->message, userbuf, count); if (ret) { ret = -EFAULT; diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 9e0c69958587209a0fb3af47c42446f4d2b58910..acffed750e3edc8d248a409626e49ba57172e5c6 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -1828,7 +1828,7 @@ int dm_cache_metadata_abort(struct dm_cache_metadata *cmd) * Replacement block manager (new_bm) is created and old_bm destroyed outside of * cmd root_lock to avoid ABBA deadlock that would result (due to life-cycle of * shrinker associated with the block manager's bufio client vs cmd root_lock). - * - must take shrinker_mutex without holding cmd->root_lock + * - must take shrinker_rwsem without holding cmd->root_lock */ new_bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT, CACHE_MAX_CONCURRENT_LOCKS); diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index cc77cf3d410921432eb0c62cdede7d55b9aa674a..7d5c9c582ed2d639f244ad998836c1e49fbbaad5 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1168,13 +1168,10 @@ static int do_resume(struct dm_ioctl *param) /* Do we need to load a new map ? */ if (new_map) { sector_t old_size, new_size; - int srcu_idx; /* Suspend if it isn't already suspended */ - old_map = dm_get_live_table(md, &srcu_idx); - if ((param->flags & DM_SKIP_LOCKFS_FLAG) || !old_map) + if (param->flags & DM_SKIP_LOCKFS_FLAG) suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG; - dm_put_live_table(md, srcu_idx); if (param->flags & DM_NOFLUSH_FLAG) suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG; if (!dm_suspended_md(md)) diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 9f5cb52c57632cac44c9f182cef25dac606bcc97..9dd0409848abe0d889cd67f704deb02b25f4471f 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -1756,13 +1756,15 @@ int dm_thin_remove_range(struct dm_thin_device *td, int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result) { - int r; + int r = -EINVAL; uint32_t ref_count; down_read(&pmd->root_lock); - r = dm_sm_get_count(pmd->data_sm, b, &ref_count); - if (!r) - *result = (ref_count > 1); + if (!pmd->fail_io) { + r = dm_sm_get_count(pmd->data_sm, b, &ref_count); + if (!r) + *result = (ref_count > 1); + } up_read(&pmd->root_lock); return r; @@ -1770,10 +1772,11 @@ int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *re int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e) { - int r = 0; + int r = -EINVAL; pmd_write_lock(pmd); - r = dm_sm_inc_blocks(pmd->data_sm, b, e); + if (!pmd->fail_io) + r = dm_sm_inc_blocks(pmd->data_sm, b, e); pmd_write_unlock(pmd); return r; @@ -1781,10 +1784,11 @@ int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_ int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e) { - int r = 0; + int r = -EINVAL; pmd_write_lock(pmd); - r = dm_sm_dec_blocks(pmd->data_sm, b, e); + if (!pmd->fail_io) + r = dm_sm_dec_blocks(pmd->data_sm, b, e); pmd_write_unlock(pmd); return r; @@ -1887,7 +1891,7 @@ int dm_pool_abort_metadata(struct dm_pool_metadata *pmd) * Replacement block manager (new_bm) is created and old_bm destroyed outside of * pmd root_lock to avoid ABBA deadlock that would result (due to life-cycle of * shrinker associated with the block manager's bufio client vs pmd root_lock). - * - must take shrinker_mutex without holding pmd->root_lock + * - must take shrinker_rwsem without holding pmd->root_lock */ new_bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT, THIN_MAX_CONCURRENT_LOCKS); diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 2b13c949bd726bf13cc82c08e9d10676d32e4f47..39410bf186cf6ecbf4056814b271f4b3ce80330b 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -401,8 +401,7 @@ static int issue_discard(struct discard_op *op, dm_block_t data_b, dm_block_t da sector_t s = block_to_sectors(tc->pool, data_b); sector_t len = block_to_sectors(tc->pool, data_e - data_b); - return __blkdev_issue_discard(tc->pool_dev->bdev, s, len, GFP_NOWAIT, - &op->bio); + return __blkdev_issue_discard(tc->pool_dev->bdev, s, len, GFP_NOIO, &op->bio); } static void end_discard(struct discard_op *op, int r) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 3b694ba3a106e68d4c0d5e64cd9136cf7abce237..fffb0cbe2ac8a14fe9bc0c9dc442ce6e4a0a02aa 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1172,7 +1172,8 @@ static inline sector_t max_io_len_target_boundary(struct dm_target *ti, } static sector_t __max_io_len(struct dm_target *ti, sector_t sector, - unsigned int max_granularity) + unsigned int max_granularity, + unsigned int max_sectors) { sector_t target_offset = dm_target_offset(ti, sector); sector_t len = max_io_len_target_boundary(ti, target_offset); @@ -1186,13 +1187,13 @@ static sector_t __max_io_len(struct dm_target *ti, sector_t sector, if (!max_granularity) return len; return min_t(sector_t, len, - min(queue_max_sectors(ti->table->md->queue), + min(max_sectors ? : queue_max_sectors(ti->table->md->queue), blk_chunk_sectors_left(target_offset, max_granularity))); } static inline sector_t max_io_len(struct dm_target *ti, sector_t sector) { - return __max_io_len(ti, sector, ti->max_io_len); + return __max_io_len(ti, sector, ti->max_io_len, 0); } int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) @@ -1581,12 +1582,13 @@ static void __send_empty_flush(struct clone_info *ci) static void __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, unsigned int num_bios, - unsigned int max_granularity) + unsigned int max_granularity, + unsigned int max_sectors) { unsigned int len, bios; len = min_t(sector_t, ci->sector_count, - __max_io_len(ti, ci->sector, max_granularity)); + __max_io_len(ti, ci->sector, max_granularity, max_sectors)); atomic_add(num_bios, &ci->io->io_count); bios = __send_duplicate_bios(ci, ti, num_bios, &len); @@ -1623,23 +1625,27 @@ static blk_status_t __process_abnormal_io(struct clone_info *ci, { unsigned int num_bios = 0; unsigned int max_granularity = 0; + unsigned int max_sectors = 0; struct queue_limits *limits = dm_get_queue_limits(ti->table->md); switch (bio_op(ci->bio)) { case REQ_OP_DISCARD: num_bios = ti->num_discard_bios; + max_sectors = limits->max_discard_sectors; if (ti->max_discard_granularity) - max_granularity = limits->max_discard_sectors; + max_granularity = max_sectors; break; case REQ_OP_SECURE_ERASE: num_bios = ti->num_secure_erase_bios; + max_sectors = limits->max_secure_erase_sectors; if (ti->max_secure_erase_granularity) - max_granularity = limits->max_secure_erase_sectors; + max_granularity = max_sectors; break; case REQ_OP_WRITE_ZEROES: num_bios = ti->num_write_zeroes_bios; + max_sectors = limits->max_write_zeroes_sectors; if (ti->max_write_zeroes_granularity) - max_granularity = limits->max_write_zeroes_sectors; + max_granularity = max_sectors; break; default: break; @@ -1654,7 +1660,8 @@ static blk_status_t __process_abnormal_io(struct clone_info *ci, if (unlikely(!num_bios)) return BLK_STS_NOTSUPP; - __send_changing_extent_only(ci, ti, num_bios, max_granularity); + __send_changing_extent_only(ci, ti, num_bios, + max_granularity, max_sectors); return BLK_STS_OK; } @@ -2808,6 +2815,10 @@ retry: } map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); + if (!map) { + /* avoid deadlock with fs/namespace.c:do_mount() */ + suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG; + } r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); if (r) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 4739ed891e756eb3a8af3e7c48c6be0950b82db7..9ea285fbc4a654703f363f7d450c903d5e8c6d77 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -5516,7 +5516,7 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio) sector = raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, 0, &dd_idx, NULL); - end_sector = bio_end_sector(raid_bio); + end_sector = sector + bio_sectors(raid_bio); rcu_read_lock(); if (r5c_big_stripe_cached(conf, sector)) diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c index 769ea6b2e1d0b6776f16e78a0ccb71d4c03e8668..241b1621b197c4f64677a6a16108f72b7322db11 100644 --- a/drivers/media/cec/core/cec-adap.c +++ b/drivers/media/cec/core/cec-adap.c @@ -1091,7 +1091,8 @@ void cec_received_msg_ts(struct cec_adapter *adap, mutex_lock(&adap->lock); dprintk(2, "%s: %*ph\n", __func__, msg->len, msg->msg); - adap->last_initiator = 0xff; + if (!adap->transmit_in_progress) + adap->last_initiator = 0xff; /* Check if this message was for us (directed or broadcast). */ if (!cec_msg_is_broadcast(msg)) { @@ -1585,7 +1586,7 @@ static void cec_claim_log_addrs(struct cec_adapter *adap, bool block) * * This function is called with adap->lock held. */ -static int cec_adap_enable(struct cec_adapter *adap) +int cec_adap_enable(struct cec_adapter *adap) { bool enable; int ret = 0; @@ -1595,6 +1596,9 @@ static int cec_adap_enable(struct cec_adapter *adap) if (adap->needs_hpd) enable = enable && adap->phys_addr != CEC_PHYS_ADDR_INVALID; + if (adap->devnode.unregistered) + enable = false; + if (enable == adap->is_enabled) return 0; diff --git a/drivers/media/cec/core/cec-core.c b/drivers/media/cec/core/cec-core.c index af358e901b5f35c6cf05f7a7bc5fd954ae3b5854..7e153c5cad04f50d58824da520cc019409bd4a87 100644 --- a/drivers/media/cec/core/cec-core.c +++ b/drivers/media/cec/core/cec-core.c @@ -191,6 +191,8 @@ static void cec_devnode_unregister(struct cec_adapter *adap) mutex_lock(&adap->lock); __cec_s_phys_addr(adap, CEC_PHYS_ADDR_INVALID, false); __cec_s_log_addrs(adap, NULL, false); + // Disable the adapter (since adap->devnode.unregistered is true) + cec_adap_enable(adap); mutex_unlock(&adap->lock); cdev_device_del(&devnode->cdev, &devnode->dev); diff --git a/drivers/media/cec/core/cec-priv.h b/drivers/media/cec/core/cec-priv.h index b78df931aa74b04a2699bbb11f0c140cccf7de46..ed1f8c67626bf9cee44f4596307b9c95aa62e4e5 100644 --- a/drivers/media/cec/core/cec-priv.h +++ b/drivers/media/cec/core/cec-priv.h @@ -47,6 +47,7 @@ int cec_monitor_pin_cnt_inc(struct cec_adapter *adap); void cec_monitor_pin_cnt_dec(struct cec_adapter *adap); int cec_adap_status(struct seq_file *file, void *priv); int cec_thread_func(void *_adap); +int cec_adap_enable(struct cec_adapter *adap); void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block); int __cec_s_log_addrs(struct cec_adapter *adap, struct cec_log_addrs *log_addrs, bool block); diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c index c2d2792227f865ff934f0effa3eda98ecef7802d..baf64540dc00a127c5f781034b1fec192e7e4ca2 100644 --- a/drivers/media/dvb-core/dvb_ca_en50221.c +++ b/drivers/media/dvb-core/dvb_ca_en50221.c @@ -151,6 +151,12 @@ struct dvb_ca_private { /* mutex serializing ioctls */ struct mutex ioctl_mutex; + + /* A mutex used when a device is disconnected */ + struct mutex remove_mutex; + + /* Whether the device is disconnected */ + int exit; }; static void dvb_ca_private_free(struct dvb_ca_private *ca) @@ -187,7 +193,7 @@ static void dvb_ca_en50221_thread_wakeup(struct dvb_ca_private *ca); static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 *ebuf, int ecount); static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot, - u8 *ebuf, int ecount); + u8 *ebuf, int ecount, int size_write_flag); /** * findstr - Safely find needle in haystack. @@ -370,7 +376,7 @@ static int dvb_ca_en50221_link_init(struct dvb_ca_private *ca, int slot) ret = dvb_ca_en50221_wait_if_status(ca, slot, STATUSREG_FR, HZ / 10); if (ret) return ret; - ret = dvb_ca_en50221_write_data(ca, slot, buf, 2); + ret = dvb_ca_en50221_write_data(ca, slot, buf, 2, CMDREG_SW); if (ret != 2) return -EIO; ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN); @@ -778,11 +784,13 @@ exit: * @buf: The data in this buffer is treated as a complete link-level packet to * be written. * @bytes_write: Size of ebuf. + * @size_write_flag: A flag on Command Register which says whether the link size + * information will be writen or not. * * return: Number of bytes written, or < 0 on error. */ static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot, - u8 *buf, int bytes_write) + u8 *buf, int bytes_write, int size_write_flag) { struct dvb_ca_slot *sl = &ca->slot_info[slot]; int status; @@ -817,7 +825,7 @@ static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot, /* OK, set HC bit */ status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, - IRQEN | CMDREG_HC); + IRQEN | CMDREG_HC | size_write_flag); if (status) goto exit; @@ -1508,7 +1516,7 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file, mutex_lock(&sl->slot_lock); status = dvb_ca_en50221_write_data(ca, slot, fragbuf, - fraglen + 2); + fraglen + 2, 0); mutex_unlock(&sl->slot_lock); if (status == (fraglen + 2)) { written = 1; @@ -1709,12 +1717,22 @@ static int dvb_ca_en50221_io_open(struct inode *inode, struct file *file) dprintk("%s\n", __func__); - if (!try_module_get(ca->pub->owner)) + mutex_lock(&ca->remove_mutex); + + if (ca->exit) { + mutex_unlock(&ca->remove_mutex); + return -ENODEV; + } + + if (!try_module_get(ca->pub->owner)) { + mutex_unlock(&ca->remove_mutex); return -EIO; + } err = dvb_generic_open(inode, file); if (err < 0) { module_put(ca->pub->owner); + mutex_unlock(&ca->remove_mutex); return err; } @@ -1739,6 +1757,7 @@ static int dvb_ca_en50221_io_open(struct inode *inode, struct file *file) dvb_ca_private_get(ca); + mutex_unlock(&ca->remove_mutex); return 0; } @@ -1758,6 +1777,8 @@ static int dvb_ca_en50221_io_release(struct inode *inode, struct file *file) dprintk("%s\n", __func__); + mutex_lock(&ca->remove_mutex); + /* mark the CA device as closed */ ca->open = 0; dvb_ca_en50221_thread_update_delay(ca); @@ -1768,6 +1789,13 @@ static int dvb_ca_en50221_io_release(struct inode *inode, struct file *file) dvb_ca_private_put(ca); + if (dvbdev->users == 1 && ca->exit == 1) { + mutex_unlock(&ca->remove_mutex); + wake_up(&dvbdev->wait_queue); + } else { + mutex_unlock(&ca->remove_mutex); + } + return err; } @@ -1891,6 +1919,7 @@ int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter, } mutex_init(&ca->ioctl_mutex); + mutex_init(&ca->remove_mutex); if (signal_pending(current)) { ret = -EINTR; @@ -1933,6 +1962,14 @@ void dvb_ca_en50221_release(struct dvb_ca_en50221 *pubca) dprintk("%s\n", __func__); + mutex_lock(&ca->remove_mutex); + ca->exit = 1; + mutex_unlock(&ca->remove_mutex); + + if (ca->dvbdev->users < 1) + wait_event(ca->dvbdev->wait_queue, + ca->dvbdev->users == 1); + /* shutdown the thread if there was one */ kthread_stop(ca->thread); diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c index 398c86279b5b09555590c5c3ac4d3a8798c9c103..7c4d86bfdd6c9d259ab99b57cd3ce355f3b341ad 100644 --- a/drivers/media/dvb-core/dvb_demux.c +++ b/drivers/media/dvb-core/dvb_demux.c @@ -115,12 +115,12 @@ static inline int dvb_dmx_swfilter_payload(struct dvb_demux_feed *feed, cc = buf[3] & 0x0f; ccok = ((feed->cc + 1) & 0x0f) == cc; - feed->cc = cc; if (!ccok) { set_buf_flags(feed, DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED); dprintk_sect_loss("missed packet: %d instead of %d!\n", cc, (feed->cc + 1) & 0x0f); } + feed->cc = cc; if (buf[1] & 0x40) // PUSI ? feed->peslen = 0xfffa; @@ -300,7 +300,6 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed, cc = buf[3] & 0x0f; ccok = ((feed->cc + 1) & 0x0f) == cc; - feed->cc = cc; if (buf[3] & 0x20) { /* adaption field present, check for discontinuity_indicator */ @@ -336,6 +335,7 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed, feed->pusi_seen = false; dvb_dmx_swfilter_section_new(feed); } + feed->cc = cc; if (buf[1] & 0x40) { /* PUSI=1 (is set), section boundary is here */ diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c index cc0a789f09ae5360c43aaaa8d989704790ff6d17..9293b058ab9974885da8938095597dabd96e603c 100644 --- a/drivers/media/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb-core/dvb_frontend.c @@ -293,14 +293,22 @@ static int dvb_frontend_get_event(struct dvb_frontend *fe, } if (events->eventw == events->eventr) { - int ret; + struct wait_queue_entry wait; + int ret = 0; if (flags & O_NONBLOCK) return -EWOULDBLOCK; - ret = wait_event_interruptible(events->wait_queue, - dvb_frontend_test_event(fepriv, events)); - + init_waitqueue_entry(&wait, current); + add_wait_queue(&events->wait_queue, &wait); + while (!dvb_frontend_test_event(fepriv, events)) { + wait_woken(&wait, TASK_INTERRUPTIBLE, 0); + if (signal_pending(current)) { + ret = -ERESTARTSYS; + break; + } + } + remove_wait_queue(&events->wait_queue, &wait); if (ret < 0) return ret; } diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c index 8a2febf33ce28fde43676e66f7381e59d032f4d1..8bb8dd34c223e91119059965851de57cac9af8e5 100644 --- a/drivers/media/dvb-core/dvb_net.c +++ b/drivers/media/dvb-core/dvb_net.c @@ -1564,15 +1564,43 @@ static long dvb_net_ioctl(struct file *file, return dvb_usercopy(file, cmd, arg, dvb_net_do_ioctl); } +static int locked_dvb_net_open(struct inode *inode, struct file *file) +{ + struct dvb_device *dvbdev = file->private_data; + struct dvb_net *dvbnet = dvbdev->priv; + int ret; + + if (mutex_lock_interruptible(&dvbnet->remove_mutex)) + return -ERESTARTSYS; + + if (dvbnet->exit) { + mutex_unlock(&dvbnet->remove_mutex); + return -ENODEV; + } + + ret = dvb_generic_open(inode, file); + + mutex_unlock(&dvbnet->remove_mutex); + + return ret; +} + static int dvb_net_close(struct inode *inode, struct file *file) { struct dvb_device *dvbdev = file->private_data; struct dvb_net *dvbnet = dvbdev->priv; + mutex_lock(&dvbnet->remove_mutex); + dvb_generic_release(inode, file); - if(dvbdev->users == 1 && dvbnet->exit == 1) + if (dvbdev->users == 1 && dvbnet->exit == 1) { + mutex_unlock(&dvbnet->remove_mutex); wake_up(&dvbdev->wait_queue); + } else { + mutex_unlock(&dvbnet->remove_mutex); + } + return 0; } @@ -1580,7 +1608,7 @@ static int dvb_net_close(struct inode *inode, struct file *file) static const struct file_operations dvb_net_fops = { .owner = THIS_MODULE, .unlocked_ioctl = dvb_net_ioctl, - .open = dvb_generic_open, + .open = locked_dvb_net_open, .release = dvb_net_close, .llseek = noop_llseek, }; @@ -1599,10 +1627,13 @@ void dvb_net_release (struct dvb_net *dvbnet) { int i; + mutex_lock(&dvbnet->remove_mutex); dvbnet->exit = 1; + mutex_unlock(&dvbnet->remove_mutex); + if (dvbnet->dvbdev->users < 1) wait_event(dvbnet->dvbdev->wait_queue, - dvbnet->dvbdev->users==1); + dvbnet->dvbdev->users == 1); dvb_unregister_device(dvbnet->dvbdev); @@ -1621,6 +1652,7 @@ int dvb_net_init (struct dvb_adapter *adap, struct dvb_net *dvbnet, int i; mutex_init(&dvbnet->ioctl_mutex); + mutex_init(&dvbnet->remove_mutex); dvbnet->demux = dmx; for (i=0; i static DEFINE_MUTEX(dvbdev_mutex); +static LIST_HEAD(dvbdevfops_list); static int dvbdev_debug; module_param(dvbdev_debug, int, 0644); @@ -453,14 +454,15 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, enum dvb_device_type type, int demux_sink_pads) { struct dvb_device *dvbdev; - struct file_operations *dvbdevfops; + struct file_operations *dvbdevfops = NULL; + struct dvbdevfops_node *node = NULL, *new_node = NULL; struct device *clsdev; int minor; int id, ret; mutex_lock(&dvbdev_register_lock); - if ((id = dvbdev_get_free_id (adap, type)) < 0){ + if ((id = dvbdev_get_free_id (adap, type)) < 0) { mutex_unlock(&dvbdev_register_lock); *pdvbdev = NULL; pr_err("%s: couldn't find free device id\n", __func__); @@ -468,18 +470,45 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, } *pdvbdev = dvbdev = kzalloc(sizeof(*dvbdev), GFP_KERNEL); - if (!dvbdev){ mutex_unlock(&dvbdev_register_lock); return -ENOMEM; } - dvbdevfops = kmemdup(template->fops, sizeof(*dvbdevfops), GFP_KERNEL); + /* + * When a device of the same type is probe()d more than once, + * the first allocated fops are used. This prevents memory leaks + * that can occur when the same device is probe()d repeatedly. + */ + list_for_each_entry(node, &dvbdevfops_list, list_head) { + if (node->fops->owner == adap->module && + node->type == type && + node->template == template) { + dvbdevfops = node->fops; + break; + } + } - if (!dvbdevfops){ - kfree (dvbdev); - mutex_unlock(&dvbdev_register_lock); - return -ENOMEM; + if (dvbdevfops == NULL) { + dvbdevfops = kmemdup(template->fops, sizeof(*dvbdevfops), GFP_KERNEL); + if (!dvbdevfops) { + kfree(dvbdev); + mutex_unlock(&dvbdev_register_lock); + return -ENOMEM; + } + + new_node = kzalloc(sizeof(struct dvbdevfops_node), GFP_KERNEL); + if (!new_node) { + kfree(dvbdevfops); + kfree(dvbdev); + mutex_unlock(&dvbdev_register_lock); + return -ENOMEM; + } + + new_node->fops = dvbdevfops; + new_node->type = type; + new_node->template = template; + list_add_tail (&new_node->list_head, &dvbdevfops_list); } memcpy(dvbdev, template, sizeof(struct dvb_device)); @@ -490,20 +519,20 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, dvbdev->priv = priv; dvbdev->fops = dvbdevfops; init_waitqueue_head (&dvbdev->wait_queue); - dvbdevfops->owner = adap->module; - list_add_tail (&dvbdev->list_head, &adap->device_list); - down_write(&minor_rwsem); #ifdef CONFIG_DVB_DYNAMIC_MINORS for (minor = 0; minor < MAX_DVB_MINORS; minor++) if (dvb_minors[minor] == NULL) break; - if (minor == MAX_DVB_MINORS) { + if (new_node) { + list_del (&new_node->list_head); + kfree(dvbdevfops); + kfree(new_node); + } list_del (&dvbdev->list_head); - kfree(dvbdevfops); kfree(dvbdev); up_write(&minor_rwsem); mutex_unlock(&dvbdev_register_lock); @@ -512,41 +541,47 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, #else minor = nums2minor(adap->num, type, id); #endif - dvbdev->minor = minor; dvb_minors[minor] = dvb_device_get(dvbdev); up_write(&minor_rwsem); - ret = dvb_register_media_device(dvbdev, type, minor, demux_sink_pads); if (ret) { pr_err("%s: dvb_register_media_device failed to create the mediagraph\n", __func__); - + if (new_node) { + list_del (&new_node->list_head); + kfree(dvbdevfops); + kfree(new_node); + } dvb_media_device_free(dvbdev); list_del (&dvbdev->list_head); - kfree(dvbdevfops); kfree(dvbdev); mutex_unlock(&dvbdev_register_lock); return ret; } - mutex_unlock(&dvbdev_register_lock); - clsdev = device_create(dvb_class, adap->device, MKDEV(DVB_MAJOR, minor), dvbdev, "dvb%d.%s%d", adap->num, dnames[type], id); if (IS_ERR(clsdev)) { pr_err("%s: failed to create device dvb%d.%s%d (%ld)\n", __func__, adap->num, dnames[type], id, PTR_ERR(clsdev)); + if (new_node) { + list_del (&new_node->list_head); + kfree(dvbdevfops); + kfree(new_node); + } dvb_media_device_free(dvbdev); list_del (&dvbdev->list_head); - kfree(dvbdevfops); kfree(dvbdev); + mutex_unlock(&dvbdev_register_lock); return PTR_ERR(clsdev); } + dprintk("DVB: register adapter%d/%s%d @ minor: %i (0x%02x)\n", adap->num, dnames[type], id, minor, minor); + mutex_unlock(&dvbdev_register_lock); return 0; } EXPORT_SYMBOL(dvb_register_device); @@ -575,7 +610,6 @@ static void dvb_free_device(struct kref *ref) { struct dvb_device *dvbdev = container_of(ref, struct dvb_device, ref); - kfree (dvbdev->fops); kfree (dvbdev); } @@ -1081,9 +1115,17 @@ error: static void __exit exit_dvbdev(void) { + struct dvbdevfops_node *node, *next; + class_destroy(dvb_class); cdev_del(&dvb_device_cdev); unregister_chrdev_region(MKDEV(DVB_MAJOR, 0), MAX_DVB_MINORS); + + list_for_each_entry_safe(node, next, &dvbdevfops_list, list_head) { + list_del (&node->list_head); + kfree(node->fops); + kfree(node); + } } subsys_initcall(init_dvbdev); diff --git a/drivers/media/dvb-frontends/mn88443x.c b/drivers/media/dvb-frontends/mn88443x.c index 1f1753f2ab1a3ed5f1b52a87d9848e132cb94645..0782f8377eb2f333731fe93de70f0804d95ffcc5 100644 --- a/drivers/media/dvb-frontends/mn88443x.c +++ b/drivers/media/dvb-frontends/mn88443x.c @@ -798,7 +798,7 @@ MODULE_DEVICE_TABLE(i2c, mn88443x_i2c_id); static struct i2c_driver mn88443x_driver = { .driver = { .name = "mn88443x", - .of_match_table = of_match_ptr(mn88443x_of_match), + .of_match_table = mn88443x_of_match, }, .probe_new = mn88443x_probe, .remove = mn88443x_remove, diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c index 8287851b5ffdc8d591bc2f1354eaa136168e057d..d85bfbb77a2509eaf5da9b4716d86eb3d05e3bc1 100644 --- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c +++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c @@ -697,7 +697,7 @@ static void netup_unidvb_dma_fini(struct netup_unidvb_dev *ndev, int num) netup_unidvb_dma_enable(dma, 0); msleep(50); cancel_work_sync(&dma->work); - del_timer(&dma->timeout); + del_timer_sync(&dma->timeout); } static int netup_unidvb_dma_setup(struct netup_unidvb_dev *ndev) @@ -887,12 +887,7 @@ static int netup_unidvb_initdev(struct pci_dev *pci_dev, ndev->lmmio0, (u32)pci_resource_len(pci_dev, 0), ndev->lmmio1, (u32)pci_resource_len(pci_dev, 1), pci_dev->irq); - if (request_irq(pci_dev->irq, netup_unidvb_isr, IRQF_SHARED, - "netup_unidvb", pci_dev) < 0) { - dev_err(&pci_dev->dev, - "%s(): can't get IRQ %d\n", __func__, pci_dev->irq); - goto irq_request_err; - } + ndev->dma_size = 2 * 188 * NETUP_DMA_BLOCKS_COUNT * NETUP_DMA_PACKETS_COUNT; ndev->dma_virt = dma_alloc_coherent(&pci_dev->dev, @@ -933,6 +928,14 @@ static int netup_unidvb_initdev(struct pci_dev *pci_dev, dev_err(&pci_dev->dev, "netup_unidvb: DMA setup failed\n"); goto dma_setup_err; } + + if (request_irq(pci_dev->irq, netup_unidvb_isr, IRQF_SHARED, + "netup_unidvb", pci_dev) < 0) { + dev_err(&pci_dev->dev, + "%s(): can't get IRQ %d\n", __func__, pci_dev->irq); + goto dma_setup_err; + } + dev_info(&pci_dev->dev, "netup_unidvb: device has been initialized\n"); return 0; @@ -951,8 +954,6 @@ spi_setup_err: dma_free_coherent(&pci_dev->dev, ndev->dma_size, ndev->dma_virt, ndev->dma_phys); dma_alloc_err: - free_irq(pci_dev->irq, pci_dev); -irq_request_err: iounmap(ndev->lmmio1); pci_bar1_error: iounmap(ndev->lmmio0); diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c index 29991551cf6140671e486490ff648871b0860250..0fbd030026c729620314e7f15355bb9c24f2797a 100644 --- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c +++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c @@ -584,6 +584,9 @@ static void mtk_init_vdec_params(struct mtk_vcodec_ctx *ctx) if (!(ctx->dev->dec_capability & VCODEC_CAPABILITY_4K_DISABLED)) { for (i = 0; i < num_supported_formats; i++) { + if (mtk_video_formats[i].type != MTK_FMT_DEC) + continue; + mtk_video_formats[i].frmsize.max_width = VCODEC_DEC_4K_CODED_WIDTH; mtk_video_formats[i].frmsize.max_height = diff --git a/drivers/media/platform/qcom/camss/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c index 898f32177b12a5cc2b6aa6a65b9c7f1b8719bd45..8640db306026804270e82cb4642f9592f51b5c0b 100644 --- a/drivers/media/platform/qcom/camss/camss-video.c +++ b/drivers/media/platform/qcom/camss/camss-video.c @@ -353,7 +353,6 @@ static int video_get_subdev_format(struct camss_video *video, if (subdev == NULL) return -EPIPE; - memset(&fmt, 0, sizeof(fmt)); fmt.pad = pad; ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt); diff --git a/drivers/media/platform/verisilicon/hantro_v4l2.c b/drivers/media/platform/verisilicon/hantro_v4l2.c index 835518534e3b94dc6f352cf3a8fe0330db4d79c5..61cfaaf4e927b35a81e55ccae6e2d4b8b2f13dd5 100644 --- a/drivers/media/platform/verisilicon/hantro_v4l2.c +++ b/drivers/media/platform/verisilicon/hantro_v4l2.c @@ -397,10 +397,12 @@ hantro_reset_raw_fmt(struct hantro_ctx *ctx, int bit_depth) if (!raw_vpu_fmt) return -EINVAL; - if (ctx->is_encoder) + if (ctx->is_encoder) { encoded_fmt = &ctx->dst_fmt; - else + ctx->vpu_src_fmt = raw_vpu_fmt; + } else { encoded_fmt = &ctx->src_fmt; + } hantro_reset_fmt(&raw_fmt, raw_vpu_fmt); raw_fmt.width = encoded_fmt->width; diff --git a/drivers/media/usb/dvb-usb-v2/ce6230.c b/drivers/media/usb/dvb-usb-v2/ce6230.c index 44540de1a206686fcc366a2a8701bf15fe5b221b..d3b5cb4a24daf9c47a4903a3618aee977abeafd3 100644 --- a/drivers/media/usb/dvb-usb-v2/ce6230.c +++ b/drivers/media/usb/dvb-usb-v2/ce6230.c @@ -101,6 +101,10 @@ static int ce6230_i2c_master_xfer(struct i2c_adapter *adap, if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) { if (msg[i].addr == ce6230_zl10353_config.demod_address) { + if (msg[i].len < 1) { + i = -EOPNOTSUPP; + break; + } req.cmd = DEMOD_READ; req.value = msg[i].addr >> 1; req.index = msg[i].buf[0]; @@ -117,6 +121,10 @@ static int ce6230_i2c_master_xfer(struct i2c_adapter *adap, } else { if (msg[i].addr == ce6230_zl10353_config.demod_address) { + if (msg[i].len < 1) { + i = -EOPNOTSUPP; + break; + } req.cmd = DEMOD_WRITE; req.value = msg[i].addr >> 1; req.index = msg[i].buf[0]; diff --git a/drivers/media/usb/dvb-usb-v2/ec168.c b/drivers/media/usb/dvb-usb-v2/ec168.c index 7ed0ab9e429b1457abc5e91b9f4795e96cdda631..0e4773fc025c9e74f8c86a451e9df60b513ccc8f 100644 --- a/drivers/media/usb/dvb-usb-v2/ec168.c +++ b/drivers/media/usb/dvb-usb-v2/ec168.c @@ -115,6 +115,10 @@ static int ec168_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], while (i < num) { if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) { if (msg[i].addr == ec168_ec100_config.demod_address) { + if (msg[i].len < 1) { + i = -EOPNOTSUPP; + break; + } req.cmd = READ_DEMOD; req.value = 0; req.index = 0xff00 + msg[i].buf[0]; /* reg */ @@ -131,6 +135,10 @@ static int ec168_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], } } else { if (msg[i].addr == ec168_ec100_config.demod_address) { + if (msg[i].len < 1) { + i = -EOPNOTSUPP; + break; + } req.cmd = WRITE_DEMOD; req.value = msg[i].buf[1]; /* val */ req.index = 0xff00 + msg[i].buf[0]; /* reg */ @@ -139,6 +147,10 @@ static int ec168_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], ret = ec168_ctrl_msg(d, &req); i += 1; } else { + if (msg[i].len < 1) { + i = -EOPNOTSUPP; + break; + } req.cmd = WRITE_I2C; req.value = msg[i].buf[0]; /* val */ req.index = 0x0100 + msg[i].addr; /* I2C addr */ diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c index 795a012d402002de74559c0aab4e0d80dc826e3e..f7884bb56fccff29205c95f20b00fd098cec3ee3 100644 --- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c +++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c @@ -176,6 +176,10 @@ static int rtl28xxu_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], ret = -EOPNOTSUPP; goto err_mutex_unlock; } else if (msg[0].addr == 0x10) { + if (msg[0].len < 1 || msg[1].len < 1) { + ret = -EOPNOTSUPP; + goto err_mutex_unlock; + } /* method 1 - integrated demod */ if (msg[0].buf[0] == 0x00) { /* return demod page from driver cache */ @@ -189,6 +193,10 @@ static int rtl28xxu_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], ret = rtl28xxu_ctrl_msg(d, &req); } } else if (msg[0].len < 2) { + if (msg[0].len < 1) { + ret = -EOPNOTSUPP; + goto err_mutex_unlock; + } /* method 2 - old I2C */ req.value = (msg[0].buf[0] << 8) | (msg[0].addr << 1); req.index = CMD_I2C_RD; @@ -217,8 +225,16 @@ static int rtl28xxu_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], ret = -EOPNOTSUPP; goto err_mutex_unlock; } else if (msg[0].addr == 0x10) { + if (msg[0].len < 1) { + ret = -EOPNOTSUPP; + goto err_mutex_unlock; + } /* method 1 - integrated demod */ if (msg[0].buf[0] == 0x00) { + if (msg[0].len < 2) { + ret = -EOPNOTSUPP; + goto err_mutex_unlock; + } /* save demod page for later demod access */ dev->page = msg[0].buf[1]; ret = 0; @@ -231,6 +247,10 @@ static int rtl28xxu_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], ret = rtl28xxu_ctrl_msg(d, &req); } } else if ((msg[0].len < 23) && (!dev->new_i2c_write)) { + if (msg[0].len < 1) { + ret = -EOPNOTSUPP; + goto err_mutex_unlock; + } /* method 2 - old I2C */ req.value = (msg[0].buf[0] << 8) | (msg[0].addr << 1); req.index = CMD_I2C_WR; diff --git a/drivers/media/usb/dvb-usb/az6027.c b/drivers/media/usb/dvb-usb/az6027.c index 7d78ee09be5e1dc40d813777d5e26e2abeb8aba3..a31c6f82f4e90fbcd08bc600ada4948180a9970f 100644 --- a/drivers/media/usb/dvb-usb/az6027.c +++ b/drivers/media/usb/dvb-usb/az6027.c @@ -988,6 +988,10 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n /* write/read request */ if (i + 1 < num && (msg[i + 1].flags & I2C_M_RD)) { req = 0xB9; + if (msg[i].len < 1) { + i = -EOPNOTSUPP; + break; + } index = (((msg[i].buf[0] << 8) & 0xff00) | (msg[i].buf[1] & 0x00ff)); value = msg[i].addr + (msg[i].len << 8); length = msg[i + 1].len + 6; @@ -1001,6 +1005,10 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n /* demod 16bit addr */ req = 0xBD; + if (msg[i].len < 1) { + i = -EOPNOTSUPP; + break; + } index = (((msg[i].buf[0] << 8) & 0xff00) | (msg[i].buf[1] & 0x00ff)); value = msg[i].addr + (2 << 8); length = msg[i].len - 2; @@ -1026,6 +1034,10 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n } else { req = 0xBD; + if (msg[i].len < 1) { + i = -EOPNOTSUPP; + break; + } index = msg[i].buf[0] & 0x00FF; value = msg[i].addr + (1 << 8); length = msg[i].len - 1; diff --git a/drivers/media/usb/dvb-usb/digitv.c b/drivers/media/usb/dvb-usb/digitv.c index 2756815a780bca7e2c2fc07f549d0c3ca99651a4..32134be1691489fa3bed90cc97f68f8eea8179a0 100644 --- a/drivers/media/usb/dvb-usb/digitv.c +++ b/drivers/media/usb/dvb-usb/digitv.c @@ -63,6 +63,10 @@ static int digitv_i2c_xfer(struct i2c_adapter *adap,struct i2c_msg msg[],int num warn("more than 2 i2c messages at a time is not handled yet. TODO."); for (i = 0; i < num; i++) { + if (msg[i].len < 1) { + i = -EOPNOTSUPP; + break; + } /* write/read request */ if (i+1 < num && (msg[i+1].flags & I2C_M_RD)) { if (digitv_ctrl_msg(d, USB_READ_COFDM, msg[i].buf[0], NULL, 0, diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c index 0ca764282c767eb1b12aa7cb515453068fb2d4a7..8747960e614611a71f2d5ec78ecac77f633207f9 100644 --- a/drivers/media/usb/dvb-usb/dw2102.c +++ b/drivers/media/usb/dvb-usb/dw2102.c @@ -946,7 +946,7 @@ static int su3000_read_mac_address(struct dvb_usb_device *d, u8 mac[6]) for (i = 0; i < 6; i++) { obuf[1] = 0xf0 + i; if (i2c_transfer(&d->i2c_adap, msg, 2) != 2) - break; + return -1; else mac[i] = ibuf[0]; } diff --git a/drivers/media/usb/pvrusb2/Kconfig b/drivers/media/usb/pvrusb2/Kconfig index 9501b10b31aa57d1683f7ace1a0c05ebc7c604e8..0df10270dbdfcf47a1e428d095182e42cfc686ee 100644 --- a/drivers/media/usb/pvrusb2/Kconfig +++ b/drivers/media/usb/pvrusb2/Kconfig @@ -37,6 +37,7 @@ config VIDEO_PVRUSB2_DVB bool "pvrusb2 ATSC/DVB support" default y depends on VIDEO_PVRUSB2 && DVB_CORE + depends on VIDEO_PVRUSB2=m || DVB_CORE=y select DVB_LGDT330X if MEDIA_SUBDRV_AUTOSELECT select DVB_S5H1409 if MEDIA_SUBDRV_AUTOSELECT select DVB_S5H1411 if MEDIA_SUBDRV_AUTOSELECT diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c index 38822cedd93a97c23486de57fa8931f9723f92fa..c4474d4c44e28adc84a26265ac45dd619bef3df2 100644 --- a/drivers/media/usb/ttusb-dec/ttusb_dec.c +++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c @@ -1544,8 +1544,7 @@ static void ttusb_dec_exit_dvb(struct ttusb_dec *dec) dvb_dmx_release(&dec->demux); if (dec->fe) { dvb_unregister_frontend(dec->fe); - if (dec->fe->ops.release) - dec->fe->ops.release(dec->fe); + dvb_frontend_detach(dec->fe); } dvb_unregister_adapter(&dec->adapter); } diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c index 7aefa76a42b316f30cb6643d833a92b94547e9bc..d631ce4f9f7bb298d2b4c309d0bf242cdbd61a93 100644 --- a/drivers/media/usb/uvc/uvc_driver.c +++ b/drivers/media/usb/uvc/uvc_driver.c @@ -251,14 +251,17 @@ static int uvc_parse_format(struct uvc_device *dev, /* Find the format descriptor from its GUID. */ fmtdesc = uvc_format_by_guid(&buffer[5]); - if (fmtdesc != NULL) { - format->fcc = fmtdesc->fcc; - } else { + if (!fmtdesc) { + /* + * Unknown video formats are not fatal errors, the + * caller will skip this descriptor. + */ dev_info(&streaming->intf->dev, "Unknown video format %pUl\n", &buffer[5]); - format->fcc = 0; + return 0; } + format->fcc = fmtdesc->fcc; format->bpp = buffer[21]; /* @@ -675,7 +678,7 @@ static int uvc_parse_streaming(struct uvc_device *dev, interval = (u32 *)&frame[nframes]; streaming->format = format; - streaming->nformats = nformats; + streaming->nformats = 0; /* Parse the format descriptors. */ while (buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE) { @@ -689,7 +692,10 @@ static int uvc_parse_streaming(struct uvc_device *dev, &interval, buffer, buflen); if (ret < 0) goto error; + if (!ret) + break; + streaming->nformats++; frame += format->nframes; format++; diff --git a/drivers/media/v4l2-core/v4l2-mc.c b/drivers/media/v4l2-core/v4l2-mc.c index bf0c18100664f56366a188ee22a1f354184d3b39..22fe08fce0a9abe8a3fb359a014b2471acab2ac3 100644 --- a/drivers/media/v4l2-core/v4l2-mc.c +++ b/drivers/media/v4l2-core/v4l2-mc.c @@ -314,8 +314,7 @@ int v4l2_create_fwnode_links_to_pad(struct v4l2_subdev *src_sd, { struct fwnode_handle *endpoint; - if (!(sink->flags & MEDIA_PAD_FL_SINK) || - !is_media_entity_v4l2_subdev(sink->entity)) + if (!(sink->flags & MEDIA_PAD_FL_SINK)) return -EINVAL; fwnode_graph_for_each_endpoint(dev_fwnode(src_sd->dev), endpoint) { diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig index f0a7531f354c1215df6c25b55590cb5e925a5290..2d240bfa819f8bd8bdd6f854609a91adb737f5b3 100644 --- a/drivers/misc/eeprom/Kconfig +++ b/drivers/misc/eeprom/Kconfig @@ -6,6 +6,7 @@ config EEPROM_AT24 depends on I2C && SYSFS select NVMEM select NVMEM_SYSFS + select REGMAP select REGMAP_I2C help Enable this driver to get read/write support to most I2C EEPROMs diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index f48466960f1b9c0f9b78ff4c6d0038c7719e124f..30d4d0476248ffdfeec0f5c2c2c7a71840e26d20 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -316,12 +316,14 @@ static void fastrpc_free_map(struct kref *ref) if (map->table) { if (map->attr & FASTRPC_ATTR_SECUREMAP) { struct qcom_scm_vmperm perm; + int vmid = map->fl->cctx->vmperms[0].vmid; + u64 src_perms = BIT(QCOM_SCM_VMID_HLOS) | BIT(vmid); int err = 0; perm.vmid = QCOM_SCM_VMID_HLOS; perm.perm = QCOM_SCM_PERM_RWX; err = qcom_scm_assign_mem(map->phys, map->size, - &map->fl->cctx->perms, &perm, 1); + &src_perms, &perm, 1); if (err) { dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d", map->phys, map->size, err); @@ -787,8 +789,12 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd, goto map_err; } - map->phys = sg_dma_address(map->table->sgl); - map->phys += ((u64)fl->sctx->sid << 32); + if (attr & FASTRPC_ATTR_SECUREMAP) { + map->phys = sg_phys(map->table->sgl); + } else { + map->phys = sg_dma_address(map->table->sgl); + map->phys += ((u64)fl->sctx->sid << 32); + } map->size = len; map->va = sg_virt(map->table->sgl); map->len = len; @@ -798,9 +804,15 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd, * If subsystem VMIDs are defined in DTSI, then do * hyp_assign from HLOS to those VM(s) */ + u64 src_perms = BIT(QCOM_SCM_VMID_HLOS); + struct qcom_scm_vmperm dst_perms[2] = {0}; + + dst_perms[0].vmid = QCOM_SCM_VMID_HLOS; + dst_perms[0].perm = QCOM_SCM_PERM_RW; + dst_perms[1].vmid = fl->cctx->vmperms[0].vmid; + dst_perms[1].perm = QCOM_SCM_PERM_RWX; map->attr = attr; - err = qcom_scm_assign_mem(map->phys, (u64)map->size, &fl->cctx->perms, - fl->cctx->vmperms, fl->cctx->vmcount); + err = qcom_scm_assign_mem(map->phys, (u64)map->size, &src_perms, dst_perms, 2); if (err) { dev_err(sess->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d", map->phys, map->size, err); @@ -1892,7 +1904,7 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp) req.vaddrout = rsp_msg.vaddr; /* Add memory to static PD pool, protection thru hypervisor */ - if (req.flags != ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) { + if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) { struct qcom_scm_vmperm perm; perm.vmid = QCOM_SCM_VMID_HLOS; @@ -2337,8 +2349,10 @@ static void fastrpc_notify_users(struct fastrpc_user *user) struct fastrpc_invoke_ctx *ctx; spin_lock(&user->lock); - list_for_each_entry(ctx, &user->pending, node) + list_for_each_entry(ctx, &user->pending, node) { + ctx->retval = -EPIPE; complete(&ctx->work); + } spin_unlock(&user->lock); } @@ -2349,7 +2363,9 @@ static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev) struct fastrpc_user *user; unsigned long flags; + /* No invocations past this point */ spin_lock_irqsave(&cctx->lock, flags); + cctx->rpdev = NULL; list_for_each_entry(user, &cctx->users, user) fastrpc_notify_users(user); spin_unlock_irqrestore(&cctx->lock, flags); @@ -2368,7 +2384,6 @@ static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev) of_platform_depopulate(&rpdev->dev); - cctx->rpdev = NULL; fastrpc_channel_ctx_put(cctx); } diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 00c33edb9fb94ed4c6cd7c4f0830ea5d71d901e1..d920c417838930214b7d9c77fc4208d66c177e6e 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -264,6 +264,7 @@ static ssize_t power_ro_lock_store(struct device *dev, goto out_put; } req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP; + req_to_mmc_queue_req(req)->drv_op_result = -EIO; blk_execute_rq(req, false); ret = req_to_mmc_queue_req(req)->drv_op_result; blk_mq_free_request(req); @@ -651,6 +652,7 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md, idatas[0] = idata; req_to_mmc_queue_req(req)->drv_op = rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL; + req_to_mmc_queue_req(req)->drv_op_result = -EIO; req_to_mmc_queue_req(req)->drv_op_data = idatas; req_to_mmc_queue_req(req)->ioc_count = 1; blk_execute_rq(req, false); @@ -722,6 +724,7 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md, } req_to_mmc_queue_req(req)->drv_op = rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL; + req_to_mmc_queue_req(req)->drv_op_result = -EIO; req_to_mmc_queue_req(req)->drv_op_data = idata; req_to_mmc_queue_req(req)->ioc_count = n; blk_execute_rq(req, false); @@ -2806,6 +2809,7 @@ static int mmc_dbg_card_status_get(void *data, u64 *val) if (IS_ERR(req)) return PTR_ERR(req); req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS; + req_to_mmc_queue_req(req)->drv_op_result = -EIO; blk_execute_rq(req, false); ret = req_to_mmc_queue_req(req)->drv_op_result; if (ret >= 0) { @@ -2844,6 +2848,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp) goto out_free; } req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD; + req_to_mmc_queue_req(req)->drv_op_result = -EIO; req_to_mmc_queue_req(req)->drv_op_data = &ext_csd; blk_execute_rq(req, false); err = req_to_mmc_queue_req(req)->drv_op_result; diff --git a/drivers/mmc/core/pwrseq_sd8787.c b/drivers/mmc/core/pwrseq_sd8787.c index 2e120ad83020f8f51d4bb529b1a470f9de65bf78..0c5f5e371e1f8311f3454e1fc0c462cde2841787 100644 --- a/drivers/mmc/core/pwrseq_sd8787.c +++ b/drivers/mmc/core/pwrseq_sd8787.c @@ -28,7 +28,6 @@ struct mmc_pwrseq_sd8787 { struct mmc_pwrseq pwrseq; struct gpio_desc *reset_gpio; struct gpio_desc *pwrdn_gpio; - u32 reset_pwrdwn_delay_ms; }; #define to_pwrseq_sd8787(p) container_of(p, struct mmc_pwrseq_sd8787, pwrseq) @@ -39,7 +38,7 @@ static void mmc_pwrseq_sd8787_pre_power_on(struct mmc_host *host) gpiod_set_value_cansleep(pwrseq->reset_gpio, 1); - msleep(pwrseq->reset_pwrdwn_delay_ms); + msleep(300); gpiod_set_value_cansleep(pwrseq->pwrdn_gpio, 1); } @@ -51,17 +50,37 @@ static void mmc_pwrseq_sd8787_power_off(struct mmc_host *host) gpiod_set_value_cansleep(pwrseq->reset_gpio, 0); } +static void mmc_pwrseq_wilc1000_pre_power_on(struct mmc_host *host) +{ + struct mmc_pwrseq_sd8787 *pwrseq = to_pwrseq_sd8787(host->pwrseq); + + /* The pwrdn_gpio is really CHIP_EN, reset_gpio is RESETN */ + gpiod_set_value_cansleep(pwrseq->pwrdn_gpio, 1); + msleep(5); + gpiod_set_value_cansleep(pwrseq->reset_gpio, 1); +} + +static void mmc_pwrseq_wilc1000_power_off(struct mmc_host *host) +{ + struct mmc_pwrseq_sd8787 *pwrseq = to_pwrseq_sd8787(host->pwrseq); + + gpiod_set_value_cansleep(pwrseq->reset_gpio, 0); + gpiod_set_value_cansleep(pwrseq->pwrdn_gpio, 0); +} + static const struct mmc_pwrseq_ops mmc_pwrseq_sd8787_ops = { .pre_power_on = mmc_pwrseq_sd8787_pre_power_on, .power_off = mmc_pwrseq_sd8787_power_off, }; -static const u32 sd8787_delay_ms = 300; -static const u32 wilc1000_delay_ms = 5; +static const struct mmc_pwrseq_ops mmc_pwrseq_wilc1000_ops = { + .pre_power_on = mmc_pwrseq_wilc1000_pre_power_on, + .power_off = mmc_pwrseq_wilc1000_power_off, +}; static const struct of_device_id mmc_pwrseq_sd8787_of_match[] = { - { .compatible = "mmc-pwrseq-sd8787", .data = &sd8787_delay_ms }, - { .compatible = "mmc-pwrseq-wilc1000", .data = &wilc1000_delay_ms }, + { .compatible = "mmc-pwrseq-sd8787", .data = &mmc_pwrseq_sd8787_ops }, + { .compatible = "mmc-pwrseq-wilc1000", .data = &mmc_pwrseq_wilc1000_ops }, {/* sentinel */}, }; MODULE_DEVICE_TABLE(of, mmc_pwrseq_sd8787_of_match); @@ -77,7 +96,6 @@ static int mmc_pwrseq_sd8787_probe(struct platform_device *pdev) return -ENOMEM; match = of_match_node(mmc_pwrseq_sd8787_of_match, pdev->dev.of_node); - pwrseq->reset_pwrdwn_delay_ms = *(u32 *)match->data; pwrseq->pwrdn_gpio = devm_gpiod_get(dev, "powerdown", GPIOD_OUT_LOW); if (IS_ERR(pwrseq->pwrdn_gpio)) @@ -88,7 +106,7 @@ static int mmc_pwrseq_sd8787_probe(struct platform_device *pdev) return PTR_ERR(pwrseq->reset_gpio); pwrseq->pwrseq.dev = dev; - pwrseq->pwrseq.ops = &mmc_pwrseq_sd8787_ops; + pwrseq->pwrseq.ops = match->data; pwrseq->pwrseq.owner = THIS_MODULE; platform_set_drvdata(pdev, pwrseq); diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c index 8648f7e63ca1a6554ea40071c244778f1d9fb8de..eea208856ce0d0de6b5cc3cfd8c50474d4e9f317 100644 --- a/drivers/mmc/host/bcm2835.c +++ b/drivers/mmc/host/bcm2835.c @@ -1403,8 +1403,8 @@ static int bcm2835_probe(struct platform_device *pdev) host->max_clk = clk_get_rate(clk); host->irq = platform_get_irq(pdev, 0); - if (host->irq <= 0) { - ret = -EINVAL; + if (host->irq < 0) { + ret = host->irq; goto err; } diff --git a/drivers/mmc/host/litex_mmc.c b/drivers/mmc/host/litex_mmc.c index 39c6707fdfdbcdb160902abcc377794d471bd3bf..9af6b0902efe1462080ee492ba9b12ff998eec64 100644 --- a/drivers/mmc/host/litex_mmc.c +++ b/drivers/mmc/host/litex_mmc.c @@ -649,6 +649,7 @@ static struct platform_driver litex_mmc_driver = { .driver = { .name = "litex-mmc", .of_match_table = litex_match, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, }, }; module_platform_driver(litex_mmc_driver); diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index b8514d9d5e736d6ed1a3805940722957cb8fb56c..ee9a25b900aec98f5c008b74adb42a7b2e80d6f0 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -991,11 +991,8 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id) if (data && !cmd->error) data->bytes_xfered = data->blksz * data->blocks; - if (meson_mmc_bounce_buf_read(data) || - meson_mmc_get_next_command(cmd)) - ret = IRQ_WAKE_THREAD; - else - ret = IRQ_HANDLED; + + return IRQ_WAKE_THREAD; } out: @@ -1007,9 +1004,6 @@ out: writel(start, host->regs + SD_EMMC_START); } - if (ret == IRQ_HANDLED) - meson_mmc_request_done(host->mmc, cmd->mrq); - return ret; } @@ -1192,8 +1186,8 @@ static int meson_mmc_probe(struct platform_device *pdev) return PTR_ERR(host->regs); host->irq = platform_get_irq(pdev, 0); - if (host->irq <= 0) - return -EINVAL; + if (host->irq < 0) + return host->irq; cd_irq = platform_get_irq_optional(pdev, 1); mmc_gpio_set_cd_irq(mmc, cd_irq); diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index f2b2e8b0574e8a8c02bcce349e9b0e7d307f3ee1..696cbef3ff7def13bda84d3a40b0d7a857cf13bc 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -1735,7 +1735,8 @@ static void mmci_set_max_busy_timeout(struct mmc_host *mmc) return; if (host->variant->busy_timeout && mmc->actual_clock) - max_busy_timeout = ~0UL / (mmc->actual_clock / MSEC_PER_SEC); + max_busy_timeout = U32_MAX / DIV_ROUND_UP(mmc->actual_clock, + MSEC_PER_SEC); mmc->max_busy_timeout = max_busy_timeout; } diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index edade0e54a0c284c70ffc2056483779a2764c8a9..9785ec91654f76aef4cbb4a5dbab48f9648527f1 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -2680,7 +2680,7 @@ static int msdc_drv_probe(struct platform_device *pdev) host->irq = platform_get_irq(pdev, 0); if (host->irq < 0) { - ret = -EINVAL; + ret = host->irq; goto host_free; } diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c index 629efbe639c4f6e52548bfad9e2af52a291a6bc3..b4f6a0a2fcb51c540bd735fbdf80235ac7568b6e 100644 --- a/drivers/mmc/host/mvsdio.c +++ b/drivers/mmc/host/mvsdio.c @@ -704,7 +704,7 @@ static int mvsd_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); if (irq < 0) - return -ENXIO; + return irq; mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev); if (!mmc) { diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index ce78edfb402b2cacc41a10931ff62e433f44c216..86454f1182bb1968f30053a2a2791c3f48621277 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c @@ -1343,7 +1343,7 @@ static int mmc_omap_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) - return -ENXIO; + return irq; host->virt_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(host->virt_base)) diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 517dde777413409a0300dcc138cff55e0ad1acdc..1e0f2d7774bd10c467cef1897f14c79874d55e5b 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -1791,9 +1791,11 @@ static int omap_hsmmc_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - irq = platform_get_irq(pdev, 0); - if (res == NULL || irq < 0) + if (!res) return -ENXIO; + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(base)) diff --git a/drivers/mmc/host/owl-mmc.c b/drivers/mmc/host/owl-mmc.c index 6f9d31a886ba1a306afe3a30af24b07c71ff58fe..1bf22b08b373d2dd7541b63dbd10b9e9c73dc692 100644 --- a/drivers/mmc/host/owl-mmc.c +++ b/drivers/mmc/host/owl-mmc.c @@ -637,7 +637,7 @@ static int owl_mmc_probe(struct platform_device *pdev) owl_host->irq = platform_get_irq(pdev, 0); if (owl_host->irq < 0) { - ret = -EINVAL; + ret = owl_host->irq; goto err_release_channel; } diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index 8f0e639236b15c4cbb725f52196c52a00483b1d6..edf2e6c14dc6f09970d5583f6369faf05a33153c 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -829,7 +829,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev) host->ops = &sdhci_acpi_ops_dflt; host->irq = platform_get_irq(pdev, 0); if (host->irq < 0) { - err = -EINVAL; + err = host->irq; goto err_free; } diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c index b24aa27da50c40dd861212d8b7e7cfc848e962cd..d2f62505468932b069e3411f2a4b7418ffece517 100644 --- a/drivers/mmc/host/sdhci-cadence.c +++ b/drivers/mmc/host/sdhci-cadence.c @@ -540,9 +540,11 @@ static int sdhci_cdns_probe(struct platform_device *pdev) if (host->mmc->caps & MMC_CAP_HW_RESET) { priv->rst_hw = devm_reset_control_get_optional_exclusive(dev, NULL); - if (IS_ERR(priv->rst_hw)) - return dev_err_probe(mmc_dev(host->mmc), PTR_ERR(priv->rst_hw), - "reset controller error\n"); + if (IS_ERR(priv->rst_hw)) { + ret = dev_err_probe(mmc_dev(host->mmc), PTR_ERR(priv->rst_hw), + "reset controller error\n"); + goto free; + } if (priv->rst_hw) host->mmc_host_ops.card_hw_reset = sdhci_cdns_mmc_hw_reset; } diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index d7c0c0b9e26c5e10563337cfbfca23d739956dd6..eebf94604a7fdd45dc578bb1fa7d4217ad996927 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -1634,6 +1634,10 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, if (ret) return ret; + /* HS400/HS400ES require 8 bit bus */ + if (!(host->mmc->caps & MMC_CAP_8_BIT_DATA)) + host->mmc->caps2 &= ~(MMC_CAP2_HS400 | MMC_CAP2_HS400_ES); + if (mmc_gpio_get_cd(host->mmc) >= 0) host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; @@ -1724,10 +1728,6 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) host->mmc_host_ops.init_card = usdhc_init_card; } - err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data); - if (err) - goto disable_ahb_clk; - if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) sdhci_esdhc_ops.platform_execute_tuning = esdhc_executing_tuning; @@ -1735,15 +1735,13 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536) host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; - if (host->mmc->caps & MMC_CAP_8_BIT_DATA && - imx_data->socdata->flags & ESDHC_FLAG_HS400) + if (imx_data->socdata->flags & ESDHC_FLAG_HS400) host->mmc->caps2 |= MMC_CAP2_HS400; if (imx_data->socdata->flags & ESDHC_FLAG_BROKEN_AUTO_CMD23) host->quirks2 |= SDHCI_QUIRK2_ACMD23_BROKEN; - if (host->mmc->caps & MMC_CAP_8_BIT_DATA && - imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) { + if (imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) { host->mmc->caps2 |= MMC_CAP2_HS400_ES; host->mmc_host_ops.hs400_enhanced_strobe = esdhc_hs400_enhanced_strobe; @@ -1765,6 +1763,10 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) goto disable_ahb_clk; } + err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data); + if (err) + goto disable_ahb_clk; + sdhci_esdhc_imx_hwinit(host); err = sdhci_add_host(host); diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 8ac81d57a3dfe5ceffb767cdab0449a5998ac5d1..1877d583fe8c371fed0935bc6845c04a90423d47 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -2479,6 +2479,9 @@ static inline void sdhci_msm_get_of_property(struct platform_device *pdev, msm_host->ddr_config = DDR_CONFIG_POR_VAL; of_property_read_u32(node, "qcom,dll-config", &msm_host->dll_config); + + if (of_device_is_compatible(node, "qcom,msm8916-sdhci")) + host->quirks2 |= SDHCI_QUIRK2_BROKEN_64_BIT_DMA; } static int sdhci_msm_gcc_reset(struct device *dev, struct sdhci_host *host) diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c index d463e2fd5b1a8bfe38bd09f825f135dde485fb8c..c79035727b20b36b7d49a5088e2356d01c805746 100644 --- a/drivers/mmc/host/sdhci-spear.c +++ b/drivers/mmc/host/sdhci-spear.c @@ -65,8 +65,8 @@ static int sdhci_probe(struct platform_device *pdev) host->hw_name = "sdhci"; host->ops = &sdhci_pltfm_ops; host->irq = platform_get_irq(pdev, 0); - if (host->irq <= 0) { - ret = -EINVAL; + if (host->irq < 0) { + ret = host->irq; goto err_host; } host->quirks = SDHCI_QUIRK_BROKEN_ADMA; diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index 0fd4c9d644dd5149357cf40c3d110d1b0e417fae..5cf53348372a4a1b8a2d2c49545d6ab545436330 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c @@ -1400,7 +1400,7 @@ static int sh_mmcif_probe(struct platform_device *pdev) irq[0] = platform_get_irq(pdev, 0); irq[1] = platform_get_irq_optional(pdev, 1); if (irq[0] < 0) - return -ENXIO; + return irq[0]; reg = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(reg)) diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index 3db9f32d6a7b9fcd51672ed299e7456c0ac079d7..69dcb8805e05fef0b48f161e8d5aaa4a81b5f6f3 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c @@ -1350,8 +1350,8 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, return ret; host->irq = platform_get_irq(pdev, 0); - if (host->irq <= 0) { - ret = -EINVAL; + if (host->irq < 0) { + ret = host->irq; goto error_disable_mmc; } diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c index 2f59917b105eb44e025d77a75a7292ca93050fd5..2e17903658fc8347cfbc3a99aaa5bf1d3ee27c6c 100644 --- a/drivers/mmc/host/usdhi6rol0.c +++ b/drivers/mmc/host/usdhi6rol0.c @@ -1757,8 +1757,10 @@ static int usdhi6_probe(struct platform_device *pdev) irq_cd = platform_get_irq_byname(pdev, "card detect"); irq_sd = platform_get_irq_byname(pdev, "data"); irq_sdio = platform_get_irq_byname(pdev, "SDIO"); - if (irq_sd < 0 || irq_sdio < 0) - return -ENODEV; + if (irq_sd < 0) + return irq_sd; + if (irq_sdio < 0) + return irq_sdio; mmc = mmc_alloc_host(sizeof(struct usdhi6_host), dev); if (!mmc) diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c index e4c4bfac3763bae1ec808dcdb3d06bb9fb049769..9ec593d52f0fa930daee67dbf2bf6979695e7e5e 100644 --- a/drivers/mmc/host/vub300.c +++ b/drivers/mmc/host/vub300.c @@ -1713,6 +1713,9 @@ static void construct_request_response(struct vub300_mmc_host *vub300, int bytes = 3 & less_cmd; int words = less_cmd >> 2; u8 *r = vub300->resp.response.command_response; + + if (!resp_len) + return; if (bytes == 3) { cmd->resp[words] = (r[1 + (words << 2)] << 24) | (r[2 + (words << 2)] << 16) diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index 01f1c6792df9c790622d2b039ee0fab992e6d0da..8dc4f5c493fcbac3abe17818cd5f7b2adcc70dd9 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c @@ -590,8 +590,8 @@ static void adjust_oob_length(struct mtd_info *mtd, uint64_t start, (end_page - start_page + 1) * oob_per_page); } -static int mtdchar_write_ioctl(struct mtd_info *mtd, - struct mtd_write_req __user *argp) +static noinline_for_stack int +mtdchar_write_ioctl(struct mtd_info *mtd, struct mtd_write_req __user *argp) { struct mtd_info *master = mtd_get_master(mtd); struct mtd_write_req req; @@ -688,8 +688,8 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd, return ret; } -static int mtdchar_read_ioctl(struct mtd_info *mtd, - struct mtd_read_req __user *argp) +static noinline_for_stack int +mtdchar_read_ioctl(struct mtd_info *mtd, struct mtd_read_req __user *argp) { struct mtd_info *master = mtd_get_master(mtd); struct mtd_read_req req; diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.h b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.h index 2cda439b5e11bff0fc091a4ba31109b27cfdbb98..017868f59f2221611256b3f34edc09a196ea2581 100644 --- a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.h +++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.h @@ -36,25 +36,25 @@ int ingenic_ecc_correct(struct ingenic_ecc *ecc, void ingenic_ecc_release(struct ingenic_ecc *ecc); struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *np); #else /* CONFIG_MTD_NAND_INGENIC_ECC */ -int ingenic_ecc_calculate(struct ingenic_ecc *ecc, +static inline int ingenic_ecc_calculate(struct ingenic_ecc *ecc, struct ingenic_ecc_params *params, const u8 *buf, u8 *ecc_code) { return -ENODEV; } -int ingenic_ecc_correct(struct ingenic_ecc *ecc, +static inline int ingenic_ecc_correct(struct ingenic_ecc *ecc, struct ingenic_ecc_params *params, u8 *buf, u8 *ecc_code) { return -ENODEV; } -void ingenic_ecc_release(struct ingenic_ecc *ecc) +static inline void ingenic_ecc_release(struct ingenic_ecc *ecc) { } -struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *np) +static inline struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *np) { return ERR_PTR(-ENODEV); } diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index afb424579f0b89777037726f54060fd21c2e0e73..30c15e4e1cc0d2899bf2fa943c1f7b95cc1d0e79 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -2457,6 +2457,12 @@ static int marvell_nfc_setup_interface(struct nand_chip *chip, int chipnr, NDTR1_WAIT_MODE; } + /* + * Reset nfc->selected_chip so the next command will cause the timing + * registers to be updated in marvell_nfc_select_target(). + */ + nfc->selected_chip = NULL; + return 0; } @@ -2894,10 +2900,6 @@ static int marvell_nfc_init(struct marvell_nfc *nfc) regmap_update_bits(sysctrl_base, GENCONF_CLK_GATING_CTRL, GENCONF_CLK_GATING_CTRL_ND_GATE, GENCONF_CLK_GATING_CTRL_ND_GATE); - - regmap_update_bits(sysctrl_base, GENCONF_ND_CLK_CTRL, - GENCONF_ND_CLK_CTRL_EN, - GENCONF_ND_CLK_CTRL_EN); } /* Configure the DMA if appropriate */ diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c index 0bb0ad14a2fc01c97d8e20144f8844186aea17df..5f29fac8669a301e8010566eee02edfdcdb66620 100644 --- a/drivers/mtd/spi-nor/core.c +++ b/drivers/mtd/spi-nor/core.c @@ -2018,6 +2018,7 @@ static const struct spi_nor_manufacturer *manufacturers[] = { static const struct flash_info spi_nor_generic_flash = { .name = "spi-nor-generic", + .n_banks = 1, /* * JESD216 rev A doesn't specify the page size, therefore we need a * sane default. @@ -2921,7 +2922,8 @@ static void spi_nor_late_init_params(struct spi_nor *nor) if (nor->flags & SNOR_F_HAS_LOCK && !nor->params->locking_ops) spi_nor_init_default_locking_ops(nor); - nor->params->bank_size = div64_u64(nor->params->size, nor->info->n_banks); + if (nor->info->n_banks > 1) + params->bank_size = div64_u64(params->size, nor->info->n_banks); } /** @@ -2987,6 +2989,7 @@ static void spi_nor_init_default_params(struct spi_nor *nor) /* Set SPI NOR sizes. */ params->writesize = 1; params->size = (u64)info->sector_size * info->n_sectors; + params->bank_size = params->size; params->page_size = info->page_size; if (!(info->flags & SPI_NOR_NO_FR)) { diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c index 15f9a80c10b9bd58096d95214b921dee5c72f48f..36876aa849ede7cda8d9900589fccbcb879c6d93 100644 --- a/drivers/mtd/spi-nor/spansion.c +++ b/drivers/mtd/spi-nor/spansion.c @@ -361,7 +361,7 @@ static int cypress_nor_determine_addr_mode_by_sr1(struct spi_nor *nor, */ static int cypress_nor_set_addr_mode_nbytes(struct spi_nor *nor) { - struct spi_mem_op op; + struct spi_mem_op op = {}; u8 addr_mode; int ret; @@ -492,7 +492,7 @@ s25fs256t_post_bfpt_fixup(struct spi_nor *nor, const struct sfdp_parameter_header *bfpt_header, const struct sfdp_bfpt *bfpt) { - struct spi_mem_op op; + struct spi_mem_op op = {}; int ret; ret = cypress_nor_set_addr_mode_nbytes(nor); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 3fed888629f7b5d08fede3187fb5639d74185c13..edbaa1444f8ecd9bf344a50f6f599d7eaaf4ff3e 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3947,7 +3947,11 @@ static int bond_slave_netdev_event(unsigned long event, unblock_netpoll_tx(); break; case NETDEV_FEAT_CHANGE: - bond_compute_features(bond); + if (!bond->notifier_ctx) { + bond->notifier_ctx = true; + bond_compute_features(bond); + bond->notifier_ctx = false; + } break; case NETDEV_RESEND_IGMP: /* Propagate to master device */ @@ -6342,6 +6346,8 @@ static int bond_init(struct net_device *bond_dev) if (!bond->wq) return -ENOMEM; + bond->notifier_ctx = false; + spin_lock_init(&bond->stats_lock); netdev_lockdep_set_classes(bond_dev); diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index 3ceccafd701b2a319535791edd73d69513df45bc..b190007c01bec5f4e3b6bf11a60026560e09f333 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -95,7 +95,7 @@ config CAN_AT91 config CAN_BXCAN tristate "STM32 Basic Extended CAN (bxCAN) devices" - depends on OF || ARCH_STM32 || COMPILE_TEST + depends on ARCH_STM32 || COMPILE_TEST depends on HAS_IOMEM select CAN_RX_OFFLOAD help diff --git a/drivers/net/can/bxcan.c b/drivers/net/can/bxcan.c index e26ccd41e3cba5ae4a13256e3b4dd3bca4ba3b0b..027a8a162fe49fb3457dd943b8d84cd55e4f319e 100644 --- a/drivers/net/can/bxcan.c +++ b/drivers/net/can/bxcan.c @@ -118,7 +118,7 @@ #define BXCAN_FiR1_REG(b) (0x40 + (b) * 8) #define BXCAN_FiR2_REG(b) (0x44 + (b) * 8) -#define BXCAN_FILTER_ID(primary) (primary ? 0 : 14) +#define BXCAN_FILTER_ID(cfg) ((cfg) == BXCAN_CFG_DUAL_SECONDARY ? 14 : 0) /* Filter primary register (FMR) bits */ #define BXCAN_FMR_CANSB_MASK GENMASK(13, 8) @@ -135,6 +135,12 @@ enum bxcan_lec_code { BXCAN_LEC_UNUSED }; +enum bxcan_cfg { + BXCAN_CFG_SINGLE = 0, + BXCAN_CFG_DUAL_PRIMARY, + BXCAN_CFG_DUAL_SECONDARY +}; + /* Structure of the message buffer */ struct bxcan_mb { u32 id; /* can identifier */ @@ -167,7 +173,7 @@ struct bxcan_priv { struct regmap *gcan; int tx_irq; int sce_irq; - bool primary; + enum bxcan_cfg cfg; struct clk *clk; spinlock_t rmw_lock; /* lock for read-modify-write operations */ unsigned int tx_head; @@ -202,17 +208,17 @@ static inline void bxcan_rmw(struct bxcan_priv *priv, void __iomem *addr, spin_unlock_irqrestore(&priv->rmw_lock, flags); } -static void bxcan_disable_filters(struct bxcan_priv *priv, bool primary) +static void bxcan_disable_filters(struct bxcan_priv *priv, enum bxcan_cfg cfg) { - unsigned int fid = BXCAN_FILTER_ID(primary); + unsigned int fid = BXCAN_FILTER_ID(cfg); u32 fmask = BIT(fid); regmap_update_bits(priv->gcan, BXCAN_FA1R_REG, fmask, 0); } -static void bxcan_enable_filters(struct bxcan_priv *priv, bool primary) +static void bxcan_enable_filters(struct bxcan_priv *priv, enum bxcan_cfg cfg) { - unsigned int fid = BXCAN_FILTER_ID(primary); + unsigned int fid = BXCAN_FILTER_ID(cfg); u32 fmask = BIT(fid); /* Filter settings: @@ -680,7 +686,7 @@ static int bxcan_chip_start(struct net_device *ndev) BXCAN_BTR_BRP_MASK | BXCAN_BTR_TS1_MASK | BXCAN_BTR_TS2_MASK | BXCAN_BTR_SJW_MASK, set); - bxcan_enable_filters(priv, priv->primary); + bxcan_enable_filters(priv, priv->cfg); /* Clear all internal status */ priv->tx_head = 0; @@ -806,7 +812,7 @@ static void bxcan_chip_stop(struct net_device *ndev) BXCAN_IER_EPVIE | BXCAN_IER_EWGIE | BXCAN_IER_FOVIE1 | BXCAN_IER_FFIE1 | BXCAN_IER_FMPIE1 | BXCAN_IER_FOVIE0 | BXCAN_IER_FFIE0 | BXCAN_IER_FMPIE0 | BXCAN_IER_TMEIE, 0); - bxcan_disable_filters(priv, priv->primary); + bxcan_disable_filters(priv, priv->cfg); bxcan_enter_sleep_mode(priv); priv->can.state = CAN_STATE_STOPPED; } @@ -931,7 +937,7 @@ static int bxcan_probe(struct platform_device *pdev) struct clk *clk = NULL; void __iomem *regs; struct regmap *gcan; - bool primary; + enum bxcan_cfg cfg; int err, rx_irq, tx_irq, sce_irq; regs = devm_platform_ioremap_resource(pdev, 0); @@ -946,7 +952,13 @@ static int bxcan_probe(struct platform_device *pdev) return PTR_ERR(gcan); } - primary = of_property_read_bool(np, "st,can-primary"); + if (of_property_read_bool(np, "st,can-primary")) + cfg = BXCAN_CFG_DUAL_PRIMARY; + else if (of_property_read_bool(np, "st,can-secondary")) + cfg = BXCAN_CFG_DUAL_SECONDARY; + else + cfg = BXCAN_CFG_SINGLE; + clk = devm_clk_get(dev, NULL); if (IS_ERR(clk)) { dev_err(dev, "failed to get clock\n"); @@ -992,7 +1004,7 @@ static int bxcan_probe(struct platform_device *pdev) priv->clk = clk; priv->tx_irq = tx_irq; priv->sce_irq = sce_irq; - priv->primary = primary; + priv->cfg = cfg; priv->can.clock.freq = clk_get_rate(clk); spin_lock_init(&priv->rmw_lock); priv->tx_head = 0; diff --git a/drivers/net/can/dev/skb.c b/drivers/net/can/dev/skb.c index 241ec636e91fd09f5006b694a3f32fec46662af6..f6d05b3ef59abf85a0bd7f40f9333983b44e4a90 100644 --- a/drivers/net/can/dev/skb.c +++ b/drivers/net/can/dev/skb.c @@ -54,7 +54,8 @@ int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, /* check flag whether this packet has to be looped back */ if (!(dev->flags & IFF_ECHO) || (skb->protocol != htons(ETH_P_CAN) && - skb->protocol != htons(ETH_P_CANFD))) { + skb->protocol != htons(ETH_P_CANFD) && + skb->protocol != htons(ETH_P_CANXL))) { kfree_skb(skb); return 0; } diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c index 53e8a914c88b57884b76a81c9a8750535550ca92..be189edb256ceb13590b170e3294ef72264a5092 100644 --- a/drivers/net/can/kvaser_pciefd.c +++ b/drivers/net/can/kvaser_pciefd.c @@ -71,10 +71,12 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices"); #define KVASER_PCIEFD_SYSID_BUILD_REG (KVASER_PCIEFD_SYSID_BASE + 0x14) /* Shared receive buffer registers */ #define KVASER_PCIEFD_SRB_BASE 0x1f200 +#define KVASER_PCIEFD_SRB_FIFO_LAST_REG (KVASER_PCIEFD_SRB_BASE + 0x1f4) #define KVASER_PCIEFD_SRB_CMD_REG (KVASER_PCIEFD_SRB_BASE + 0x200) #define KVASER_PCIEFD_SRB_IEN_REG (KVASER_PCIEFD_SRB_BASE + 0x204) #define KVASER_PCIEFD_SRB_IRQ_REG (KVASER_PCIEFD_SRB_BASE + 0x20c) #define KVASER_PCIEFD_SRB_STAT_REG (KVASER_PCIEFD_SRB_BASE + 0x210) +#define KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG (KVASER_PCIEFD_SRB_BASE + 0x214) #define KVASER_PCIEFD_SRB_CTRL_REG (KVASER_PCIEFD_SRB_BASE + 0x218) /* EPCS flash controller registers */ #define KVASER_PCIEFD_SPI_BASE 0x1fc00 @@ -111,6 +113,9 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices"); /* DMA support */ #define KVASER_PCIEFD_SRB_STAT_DMA BIT(24) +/* SRB current packet level */ +#define KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK 0xff + /* DMA Enable */ #define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0) @@ -526,7 +531,7 @@ static int kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can) KVASER_PCIEFD_KCAN_IRQ_TOF | KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TAE | KVASER_PCIEFD_KCAN_IRQ_TAL | KVASER_PCIEFD_KCAN_IRQ_FDIC | KVASER_PCIEFD_KCAN_IRQ_BPP | - KVASER_PCIEFD_KCAN_IRQ_TAR | KVASER_PCIEFD_KCAN_IRQ_TFD; + KVASER_PCIEFD_KCAN_IRQ_TAR; iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); @@ -554,6 +559,8 @@ static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can) if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) mode |= KVASER_PCIEFD_KCAN_MODE_LOM; + else + mode &= ~KVASER_PCIEFD_KCAN_MODE_LOM; mode |= KVASER_PCIEFD_KCAN_MODE_EEN; mode |= KVASER_PCIEFD_KCAN_MODE_EPEN; @@ -572,7 +579,7 @@ static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can) spin_lock_irqsave(&can->lock, irq); iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); - iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD, + iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); @@ -615,7 +622,7 @@ static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can) iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); - iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD, + iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); @@ -719,6 +726,7 @@ static int kvaser_pciefd_stop(struct net_device *netdev) iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); del_timer(&can->bec_poll_timer); } + can->can.state = CAN_STATE_STOPPED; close_candev(netdev); return ret; @@ -1007,8 +1015,7 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) SET_NETDEV_DEV(netdev, &pcie->pci->dev); iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); - iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | - KVASER_PCIEFD_KCAN_IRQ_TFD, + iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); pcie->can[i] = can; @@ -1058,6 +1065,7 @@ static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie) { int i; u32 srb_status; + u32 srb_packet_count; dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT]; /* Disable the DMA */ @@ -1085,6 +1093,15 @@ static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie) KVASER_PCIEFD_SRB_CMD_RDB1, pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); + /* Empty Rx FIFO */ + srb_packet_count = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG) & + KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK; + while (srb_packet_count) { + /* Drop current packet in FIFO */ + ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_FIFO_LAST_REG); + srb_packet_count--; + } + srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG); if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) { dev_err(&pcie->pci->dev, "DMA not idle before enabling\n"); @@ -1425,9 +1442,6 @@ static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie, cmd = KVASER_PCIEFD_KCAN_CMD_AT; cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT; iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG); - - iowrite32(KVASER_PCIEFD_KCAN_IRQ_TFD, - can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); } else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET && p->header[0] & KVASER_PCIEFD_SPACK_IRM && cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) && @@ -1714,15 +1728,6 @@ static int kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can) if (irq & KVASER_PCIEFD_KCAN_IRQ_TOF) netdev_err(can->can.dev, "Tx FIFO overflow\n"); - if (irq & KVASER_PCIEFD_KCAN_IRQ_TFD) { - u8 count = ioread32(can->reg_base + - KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff; - - if (count == 0) - iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH, - can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG); - } - if (irq & KVASER_PCIEFD_KCAN_IRQ_BPP) netdev_err(can->can.dev, "Fail to change bittiming, when not in reset mode\n"); @@ -1824,6 +1829,11 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev, if (err) goto err_teardown_can_ctrls; + err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler, + IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie); + if (err) + goto err_teardown_can_ctrls; + iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1, pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG); @@ -1844,11 +1854,6 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev, iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); - err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler, - IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie); - if (err) - goto err_teardown_can_ctrls; - err = kvaser_pciefd_reg_candev(pcie); if (err) goto err_free_irq; @@ -1856,6 +1861,8 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev, return 0; err_free_irq: + /* Disable PCI interrupts */ + iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG); free_irq(pcie->pci->irq, pcie); err_teardown_can_ctrls: diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index cbe831875347170dee071996a6d9e10a5992d3dd..c0215a8770f49d71c9847853fe38492a8a01d80e 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -1188,8 +1188,6 @@ static int lan9303_port_fdb_add(struct dsa_switch *ds, int port, struct lan9303 *chip = ds->priv; dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, addr, vid); - if (vid) - return -EOPNOTSUPP; return lan9303_alr_add_port(chip, addr, port, false); } @@ -1201,8 +1199,6 @@ static int lan9303_port_fdb_del(struct dsa_switch *ds, int port, struct lan9303 *chip = ds->priv; dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, addr, vid); - if (vid) - return -EOPNOTSUPP; lan9303_alr_del_port(chip, addr, port); return 0; diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 9bc54e1348cb9bb8164f74a54bc76f345c90411a..7e773c4ba04638dd5f27b627a522c12391f856f9 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -399,6 +399,20 @@ static void mt7530_pll_setup(struct mt7530_priv *priv) core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); } +/* If port 6 is available as a CPU port, always prefer that as the default, + * otherwise don't care. + */ +static struct dsa_port * +mt753x_preferred_default_local_cpu_port(struct dsa_switch *ds) +{ + struct dsa_port *cpu_dp = dsa_to_port(ds, 6); + + if (dsa_port_is_cpu(cpu_dp)) + return cpu_dp; + + return NULL; +} + /* Setup port 6 interface mode and TRGMII TX circuit */ static int mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface) @@ -985,6 +999,18 @@ unlock_exit: mutex_unlock(&priv->reg_mutex); } +static void +mt753x_trap_frames(struct mt7530_priv *priv) +{ + /* Trap BPDUs to the CPU port(s) */ + mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK, + MT753X_BPDU_CPU_ONLY); + + /* Trap LLDP frames with :0E MAC DA to the CPU port(s) */ + mt7530_rmw(priv, MT753X_RGAC2, MT753X_R0E_PORT_FW_MASK, + MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY)); +} + static int mt753x_cpu_port_enable(struct dsa_switch *ds, int port) { @@ -1007,9 +1033,16 @@ mt753x_cpu_port_enable(struct dsa_switch *ds, int port) UNU_FFP(BIT(port))); /* Set CPU port number */ - if (priv->id == ID_MT7621) + if (priv->id == ID_MT7530 || priv->id == ID_MT7621) mt7530_rmw(priv, MT7530_MFC, CPU_MASK, CPU_EN | CPU_PORT(port)); + /* Add the CPU port to the CPU port bitmap for MT7531 and the switch on + * the MT7988 SoC. Trapped frames will be forwarded to the CPU port that + * is affine to the inbound user port. + */ + if (priv->id == ID_MT7531 || priv->id == ID_MT7988) + mt7530_set(priv, MT7531_CFC, MT7531_CPU_PMAP(BIT(port))); + /* CPU port gets connected to all user ports of * the switch. */ @@ -2255,6 +2288,8 @@ mt7530_setup(struct dsa_switch *ds) priv->p6_interface = PHY_INTERFACE_MODE_NA; + mt753x_trap_frames(priv); + /* Enable and reset MIB counters */ mt7530_mib_reset(ds); @@ -2352,17 +2387,9 @@ static int mt7531_setup_common(struct dsa_switch *ds) { struct mt7530_priv *priv = ds->priv; - struct dsa_port *cpu_dp; int ret, i; - /* BPDU to CPU port */ - dsa_switch_for_each_cpu_port(cpu_dp, ds) { - mt7530_rmw(priv, MT7531_CFC, MT7531_CPU_PMAP_MASK, - BIT(cpu_dp->index)); - break; - } - mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK, - MT753X_BPDU_CPU_ONLY); + mt753x_trap_frames(priv); /* Enable and reset MIB counters */ mt7530_mib_reset(ds); @@ -3085,6 +3112,7 @@ static int mt7988_setup(struct dsa_switch *ds) const struct dsa_switch_ops mt7530_switch_ops = { .get_tag_protocol = mtk_get_tag_protocol, .setup = mt753x_setup, + .preferred_default_local_cpu_port = mt753x_preferred_default_local_cpu_port, .get_strings = mt7530_get_strings, .get_ethtool_stats = mt7530_get_ethtool_stats, .get_sset_count = mt7530_get_sset_count, diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h index 5084f48a886913821cff0ba2e14fbba802e54bdf..08045b035e6ab02cd0396cca9e6d7aaa9d294fb4 100644 --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h @@ -54,6 +54,7 @@ enum mt753x_id { #define MT7531_MIRROR_PORT_GET(x) (((x) >> 16) & MIRROR_MASK) #define MT7531_MIRROR_PORT_SET(x) (((x) & MIRROR_MASK) << 16) #define MT7531_CPU_PMAP_MASK GENMASK(7, 0) +#define MT7531_CPU_PMAP(x) FIELD_PREP(MT7531_CPU_PMAP_MASK, x) #define MT753X_MIRROR_REG(id) ((((id) == ID_MT7531) || ((id) == ID_MT7988)) ? \ MT7531_CFC : MT7530_MFC) @@ -66,6 +67,11 @@ enum mt753x_id { #define MT753X_BPC 0x24 #define MT753X_BPDU_PORT_FW_MASK GENMASK(2, 0) +/* Register for :03 and :0E MAC DA frame control */ +#define MT753X_RGAC2 0x2c +#define MT753X_R0E_PORT_FW_MASK GENMASK(18, 16) +#define MT753X_R0E_PORT_FW(x) FIELD_PREP(MT753X_R0E_PORT_FW_MASK, x) + enum mt753x_bpdu_port_fw { MT753X_BPDU_FOLLOW_MFC, MT753X_BPDU_CPU_EXCLUDE = 4, diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 64a2f2f83735d04fdaa343cc80e24de8f1f3b97e..08a46ffd53af99524dc751dd05f2dbece4a6d71f 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -7170,7 +7170,7 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) goto out; } if (chip->reset) - usleep_range(1000, 2000); + usleep_range(10000, 20000); /* Detect if the device is configured in single chip addressing mode, * otherwise continue with address specific smi init/detection. diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h index aec9d4fd20e3631cbb9cc6eb3951b8e2d74ff644..d19b6303b91f0aafc6924d6098a93f93fc07213c 100644 --- a/drivers/net/dsa/mv88e6xxx/port.h +++ b/drivers/net/dsa/mv88e6xxx/port.h @@ -276,7 +276,7 @@ /* Offset 0x10: Extended Port Control Command */ #define MV88E6393X_PORT_EPC_CMD 0x10 #define MV88E6393X_PORT_EPC_CMD_BUSY 0x8000 -#define MV88E6393X_PORT_EPC_CMD_WRITE 0x0300 +#define MV88E6393X_PORT_EPC_CMD_WRITE 0x3000 #define MV88E6393X_PORT_EPC_INDEX_PORT_ETYPE 0x02 /* Offset 0x11: Extended Port Control Data */ diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index cfb3faeaa5bfa9ee282fe28e3084bf8dbc5d1e5c..d172a3e9736c426516e687e6f99ae1302c2c73d6 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -1263,7 +1263,7 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port) /* Consider the standard Ethernet overhead of 8 octets preamble+SFD, * 4 octets FCS, 12 octets IFG. */ - needed_bit_time_ps = (maxlen + 24) * picos_per_byte; + needed_bit_time_ps = (u64)(maxlen + 24) * picos_per_byte; dev_dbg(ocelot->dev, "port %d: max frame size %d needs %llu ps at speed %d\n", diff --git a/drivers/net/dsa/qca/Kconfig b/drivers/net/dsa/qca/Kconfig index 4347b42c50fd24762c64b5f41e262570a53ba60c..de9da469908bcff10cf658c3b9f46c4770d015ad 100644 --- a/drivers/net/dsa/qca/Kconfig +++ b/drivers/net/dsa/qca/Kconfig @@ -20,6 +20,7 @@ config NET_DSA_QCA8K_LEDS_SUPPORT bool "Qualcomm Atheros QCA8K Ethernet switch family LEDs support" depends on NET_DSA_QCA8K depends on LEDS_CLASS=y || LEDS_CLASS=NET_DSA_QCA8K + depends on LEDS_TRIGGERS help This enabled support for LEDs present on the Qualcomm Atheros QCA8K Ethernet switch chips. diff --git a/drivers/net/dsa/rzn1_a5psw.c b/drivers/net/dsa/rzn1_a5psw.c index 919027cf20124350e31fcc9ca13f0a19071fee11..c37d2e5372302bd62a9f3c14cd0c768033946ec8 100644 --- a/drivers/net/dsa/rzn1_a5psw.c +++ b/drivers/net/dsa/rzn1_a5psw.c @@ -120,6 +120,22 @@ static void a5psw_port_mgmtfwd_set(struct a5psw *a5psw, int port, bool enable) a5psw_port_pattern_set(a5psw, port, A5PSW_PATTERN_MGMTFWD, enable); } +static void a5psw_port_tx_enable(struct a5psw *a5psw, int port, bool enable) +{ + u32 mask = A5PSW_PORT_ENA_TX(port); + u32 reg = enable ? mask : 0; + + /* Even though the port TX is disabled through TXENA bit in the + * PORT_ENA register, it can still send BPDUs. This depends on the tag + * configuration added when sending packets from the CPU port to the + * switch port. Indeed, when using forced forwarding without filtering, + * even disabled ports will be able to send packets that are tagged. + * This allows to implement STP support when ports are in a state where + * forwarding traffic should be stopped but BPDUs should still be sent. + */ + a5psw_reg_rmw(a5psw, A5PSW_PORT_ENA, mask, reg); +} + static void a5psw_port_enable_set(struct a5psw *a5psw, int port, bool enable) { u32 port_ena = 0; @@ -292,6 +308,22 @@ static int a5psw_set_ageing_time(struct dsa_switch *ds, unsigned int msecs) return 0; } +static void a5psw_port_learning_set(struct a5psw *a5psw, int port, bool learn) +{ + u32 mask = A5PSW_INPUT_LEARN_DIS(port); + u32 reg = !learn ? mask : 0; + + a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg); +} + +static void a5psw_port_rx_block_set(struct a5psw *a5psw, int port, bool block) +{ + u32 mask = A5PSW_INPUT_LEARN_BLOCK(port); + u32 reg = block ? mask : 0; + + a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg); +} + static void a5psw_flooding_set_resolution(struct a5psw *a5psw, int port, bool set) { @@ -308,6 +340,14 @@ static void a5psw_flooding_set_resolution(struct a5psw *a5psw, int port, a5psw_reg_writel(a5psw, offsets[i], a5psw->bridged_ports); } +static void a5psw_port_set_standalone(struct a5psw *a5psw, int port, + bool standalone) +{ + a5psw_port_learning_set(a5psw, port, !standalone); + a5psw_flooding_set_resolution(a5psw, port, !standalone); + a5psw_port_mgmtfwd_set(a5psw, port, standalone); +} + static int a5psw_port_bridge_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge, bool *tx_fwd_offload, @@ -323,8 +363,7 @@ static int a5psw_port_bridge_join(struct dsa_switch *ds, int port, } a5psw->br_dev = bridge.dev; - a5psw_flooding_set_resolution(a5psw, port, true); - a5psw_port_mgmtfwd_set(a5psw, port, false); + a5psw_port_set_standalone(a5psw, port, false); return 0; } @@ -334,8 +373,7 @@ static void a5psw_port_bridge_leave(struct dsa_switch *ds, int port, { struct a5psw *a5psw = ds->priv; - a5psw_flooding_set_resolution(a5psw, port, false); - a5psw_port_mgmtfwd_set(a5psw, port, true); + a5psw_port_set_standalone(a5psw, port, true); /* No more ports bridged */ if (a5psw->bridged_ports == BIT(A5PSW_CPU_PORT)) @@ -344,28 +382,35 @@ static void a5psw_port_bridge_leave(struct dsa_switch *ds, int port, static void a5psw_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) { - u32 mask = A5PSW_INPUT_LEARN_DIS(port) | A5PSW_INPUT_LEARN_BLOCK(port); + bool learning_enabled, rx_enabled, tx_enabled; struct a5psw *a5psw = ds->priv; - u32 reg = 0; switch (state) { case BR_STATE_DISABLED: case BR_STATE_BLOCKING: - reg |= A5PSW_INPUT_LEARN_DIS(port); - reg |= A5PSW_INPUT_LEARN_BLOCK(port); - break; case BR_STATE_LISTENING: - reg |= A5PSW_INPUT_LEARN_DIS(port); + rx_enabled = false; + tx_enabled = false; + learning_enabled = false; break; case BR_STATE_LEARNING: - reg |= A5PSW_INPUT_LEARN_BLOCK(port); + rx_enabled = false; + tx_enabled = false; + learning_enabled = true; break; case BR_STATE_FORWARDING: - default: + rx_enabled = true; + tx_enabled = true; + learning_enabled = true; break; + default: + dev_err(ds->dev, "invalid STP state: %d\n", state); + return; } - a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg); + a5psw_port_learning_set(a5psw, port, learning_enabled); + a5psw_port_rx_block_set(a5psw, port, !rx_enabled); + a5psw_port_tx_enable(a5psw, port, tx_enabled); } static void a5psw_port_fast_age(struct dsa_switch *ds, int port) @@ -673,7 +718,7 @@ static int a5psw_setup(struct dsa_switch *ds) } /* Configure management port */ - reg = A5PSW_CPU_PORT | A5PSW_MGMT_CFG_DISCARD; + reg = A5PSW_CPU_PORT | A5PSW_MGMT_CFG_ENABLE; a5psw_reg_writel(a5psw, A5PSW_MGMT_CFG, reg); /* Set pattern 0 to forward all frame to mgmt port */ @@ -722,13 +767,15 @@ static int a5psw_setup(struct dsa_switch *ds) if (dsa_port_is_unused(dp)) continue; - /* Enable egress flooding for CPU port */ - if (dsa_port_is_cpu(dp)) + /* Enable egress flooding and learning for CPU port */ + if (dsa_port_is_cpu(dp)) { a5psw_flooding_set_resolution(a5psw, port, true); + a5psw_port_learning_set(a5psw, port, true); + } - /* Enable management forward only for user ports */ + /* Enable standalone mode for user ports */ if (dsa_port_is_user(dp)) - a5psw_port_mgmtfwd_set(a5psw, port, true); + a5psw_port_set_standalone(a5psw, port, true); } return 0; diff --git a/drivers/net/dsa/rzn1_a5psw.h b/drivers/net/dsa/rzn1_a5psw.h index c67abd49c013d356a5d00c293070c4cdde8e1ce9..b869192eef3f71bb224b8308f3f6301e1efa5218 100644 --- a/drivers/net/dsa/rzn1_a5psw.h +++ b/drivers/net/dsa/rzn1_a5psw.h @@ -19,6 +19,7 @@ #define A5PSW_PORT_OFFSET(port) (0x400 * (port)) #define A5PSW_PORT_ENA 0x8 +#define A5PSW_PORT_ENA_TX(port) BIT(port) #define A5PSW_PORT_ENA_RX_SHIFT 16 #define A5PSW_PORT_ENA_TX_RX(port) (BIT((port) + A5PSW_PORT_ENA_RX_SHIFT) | \ BIT(port)) @@ -36,7 +37,7 @@ #define A5PSW_INPUT_LEARN_BLOCK(p) BIT(p) #define A5PSW_MGMT_CFG 0x20 -#define A5PSW_MGMT_CFG_DISCARD BIT(7) +#define A5PSW_MGMT_CFG_ENABLE BIT(6) #define A5PSW_MODE_CFG 0x24 #define A5PSW_MODE_STATS_RESET BIT(31) diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c index d2f4358cc5503e939d1fd23fbb37933bb494aa36..ba3e7aa1a28fb577185be24a6592d37f298e9eef 100644 --- a/drivers/net/ethernet/3com/3c515.c +++ b/drivers/net/ethernet/3com/3c515.c @@ -66,8 +66,10 @@ static int max_interrupt_work = 20; #include #include #include - #include + +#include + #include #include diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c index 82f94b1635bf816150e5f3c4f7d3e71417d81263..5267e9dcd87ef90927e7806206dba7d108e96ddc 100644 --- a/drivers/net/ethernet/3com/3c589_cs.c +++ b/drivers/net/ethernet/3com/3c589_cs.c @@ -195,6 +195,7 @@ static int tc589_probe(struct pcmcia_device *link) { struct el3_private *lp; struct net_device *dev; + int ret; dev_dbg(&link->dev, "3c589_attach()\n"); @@ -218,7 +219,15 @@ static int tc589_probe(struct pcmcia_device *link) dev->ethtool_ops = &netdev_ethtool_ops; - return tc589_config(link); + ret = tc589_config(link); + if (ret) + goto err_free_netdev; + + return 0; + +err_free_netdev: + free_netdev(dev); + return ret; } static void tc589_detach(struct pcmcia_device *link) diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c index 0a9118b8be0c64b481f8e43988f5a8679c461e5c..bc9c81dc00fd20c37311da008e9d6bc04dc8f864 100644 --- a/drivers/net/ethernet/8390/ne.c +++ b/drivers/net/ethernet/8390/ne.c @@ -52,6 +52,7 @@ static const char version2[] = #include #include #include +#include #include diff --git a/drivers/net/ethernet/8390/smc-ultra.c b/drivers/net/ethernet/8390/smc-ultra.c index 6e62c37c9400565616fd35a061aece8eb4beca8f..7465650c80780594203516d5a4915a9122d04448 100644 --- a/drivers/net/ethernet/8390/smc-ultra.c +++ b/drivers/net/ethernet/8390/smc-ultra.c @@ -66,6 +66,7 @@ static const char version[] = #include #include #include +#include #include #include diff --git a/drivers/net/ethernet/8390/wd.c b/drivers/net/ethernet/8390/wd.c index 5b00c452bede64a71519d34dfab79a0d50e05f64..119021d41451e88726af07852b587b4d33673efd 100644 --- a/drivers/net/ethernet/8390/wd.c +++ b/drivers/net/ethernet/8390/wd.c @@ -37,6 +37,7 @@ static const char version[] = #include #include #include +#include #include diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c index 8971665a4b2ac3adc53de5fd292dfe479deec304..6cf38180cc01934f6252d2dcc4a3b3b9c8fecfe1 100644 --- a/drivers/net/ethernet/amd/lance.c +++ b/drivers/net/ethernet/amd/lance.c @@ -59,6 +59,7 @@ static const char version[] = "lance.c:v1.16 2006/11/09 dplatt@3do.com, becker@c #include #include #include +#include #include #include diff --git a/drivers/net/ethernet/amd/pds_core/dev.c b/drivers/net/ethernet/amd/pds_core/dev.c index f7c597ea5daf3dd0582b5fc98737a8b20f0a0f63..debe5216fe29e2e8ada260ffd3fd4887f5b161d6 100644 --- a/drivers/net/ethernet/amd/pds_core/dev.c +++ b/drivers/net/ethernet/amd/pds_core/dev.c @@ -68,9 +68,15 @@ bool pdsc_is_fw_running(struct pdsc *pdsc) bool pdsc_is_fw_good(struct pdsc *pdsc) { - u8 gen = pdsc->fw_status & PDS_CORE_FW_STS_F_GENERATION; + bool fw_running = pdsc_is_fw_running(pdsc); + u8 gen; - return pdsc_is_fw_running(pdsc) && gen == pdsc->fw_generation; + /* Make sure to update the cached fw_status by calling + * pdsc_is_fw_running() before getting the generation + */ + gen = pdsc->fw_status & PDS_CORE_FW_STS_F_GENERATION; + + return fw_running && gen == pdsc->fw_generation; } static u8 pdsc_devcmd_status(struct pdsc *pdsc) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 33a9574e9e0434fbdfcfd022b9731329c6fbad8f..32d2c6fac65266baee9bc36477f21418b50f67e0 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -1329,7 +1329,7 @@ static enum xgbe_mode xgbe_phy_status_aneg(struct xgbe_prv_data *pdata) return pdata->phy_if.phy_impl.an_outcome(pdata); } -static void xgbe_phy_status_result(struct xgbe_prv_data *pdata) +static bool xgbe_phy_status_result(struct xgbe_prv_data *pdata) { struct ethtool_link_ksettings *lks = &pdata->phy.lks; enum xgbe_mode mode; @@ -1367,8 +1367,13 @@ static void xgbe_phy_status_result(struct xgbe_prv_data *pdata) pdata->phy.duplex = DUPLEX_FULL; - if (xgbe_set_mode(pdata, mode) && pdata->an_again) + if (!xgbe_set_mode(pdata, mode)) + return false; + + if (pdata->an_again) xgbe_phy_reconfig_aneg(pdata); + + return true; } static void xgbe_phy_status(struct xgbe_prv_data *pdata) @@ -1398,7 +1403,8 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata) return; } - xgbe_phy_status_result(pdata); + if (xgbe_phy_status_result(pdata)) + return; if (test_bit(XGBE_LINK_INIT, &pdata->dev_state)) clear_bit(XGBE_LINK_INIT, &pdata->dev_state); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 38d0cdaf22a5b3bd9d5ae396d02f54609e1254d7..bf1611cce974a95d6e53df6cf52e0a47ecf25311 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -2531,9 +2531,9 @@ static int bcm_sysport_probe(struct platform_device *pdev) priv->irq0 = platform_get_irq(pdev, 0); if (!priv->is_lite) { priv->irq1 = platform_get_irq(pdev, 1); - priv->wol_irq = platform_get_irq(pdev, 2); + priv->wol_irq = platform_get_irq_optional(pdev, 2); } else { - priv->wol_irq = platform_get_irq(pdev, 1); + priv->wol_irq = platform_get_irq_optional(pdev, 1); } if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) { ret = -EINVAL; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 637d162bbcfaf662407c819468e7e2379918b9f2..1e7a6f1d422337f855fc5d0e578f9bfe137017f5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -14294,11 +14294,16 @@ static void bnx2x_io_resume(struct pci_dev *pdev) bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & DRV_MSG_SEQ_NUMBER_MASK; - if (netif_running(dev)) - bnx2x_nic_load(bp, LOAD_NORMAL); + if (netif_running(dev)) { + if (bnx2x_nic_load(bp, LOAD_NORMAL)) { + netdev_err(bp->dev, "Error during driver initialization, try unloading/reloading the driver\n"); + goto done; + } + } netif_device_attach(dev); +done: rtnl_unlock(); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index dcd9367f05afd476afa1294a2fb18902e2d08ffd..b499bc9c4e06701c3d5bc02e0898d5c79ee1b267 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -692,7 +692,7 @@ next_tx_int: __netif_txq_completed_wake(txq, nr_pkts, tx_bytes, bnxt_tx_avail(bp, txr), bp->tx_wake_thresh, - READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING); + READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING); } static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, @@ -2365,6 +2365,9 @@ static int bnxt_async_event_process(struct bnxt *bp, struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; u64 ns; + if (!ptp) + goto async_event_process_exit; + spin_lock_bh(&ptp->ptp_lock); bnxt_ptp_update_current_time(bp); ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) << @@ -4763,6 +4766,9 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY && !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) continue; + if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE && + !bp->ptp_cfg) + continue; __set_bit(bnxt_async_events_arr[i], async_events_bmap); } if (bmap && bmap_size) { @@ -5350,6 +5356,7 @@ static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp) if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG)) return; + req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); /* all contexts configured to same hash_type, zero always exists */ req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); resp = hwrm_req_hold(bp, req); @@ -8812,6 +8819,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) goto err_out; } + if (BNXT_VF(bp)) + bnxt_hwrm_func_qcfg(bp); + rc = bnxt_setup_vnic(bp, 0); if (rc) goto err_out; @@ -11598,6 +11608,7 @@ static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue) static void bnxt_fw_health_check(struct bnxt *bp) { struct bnxt_fw_health *fw_health = bp->fw_health; + struct pci_dev *pdev = bp->pdev; u32 val; if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) @@ -11611,7 +11622,7 @@ static void bnxt_fw_health_check(struct bnxt *bp) } val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); - if (val == fw_health->last_fw_heartbeat) { + if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) { fw_health->arrests++; goto fw_reset; } @@ -11619,7 +11630,7 @@ static void bnxt_fw_health_check(struct bnxt *bp) fw_health->last_fw_heartbeat = val; val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); - if (val != fw_health->last_fw_reset_cnt) { + if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) { fw_health->discoveries++; goto fw_reset; } @@ -13025,26 +13036,37 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp) #endif /* CONFIG_RFS_ACCEL */ -static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table) +static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, + unsigned int entry, struct udp_tunnel_info *ti) { struct bnxt *bp = netdev_priv(netdev); - struct udp_tunnel_info ti; unsigned int cmd; - udp_tunnel_nic_get_port(netdev, table, 0, &ti); - if (ti.type == UDP_TUNNEL_TYPE_VXLAN) + if (ti->type == UDP_TUNNEL_TYPE_VXLAN) cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN; else cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE; - if (ti.port) - return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd); + return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd); +} + +static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, + unsigned int entry, struct udp_tunnel_info *ti) +{ + struct bnxt *bp = netdev_priv(netdev); + unsigned int cmd; + + if (ti->type == UDP_TUNNEL_TYPE_VXLAN) + cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN; + else + cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE; return bnxt_hwrm_tunnel_dst_port_free(bp, cmd); } static const struct udp_tunnel_nic_info bnxt_udp_tunnels = { - .sync_table = bnxt_udp_tunnel_sync, + .set_port = bnxt_udp_tunnel_set_port, + .unset_port = bnxt_udp_tunnel_unset_port, .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | UDP_TUNNEL_NIC_INFO_OPEN_ONLY, .tables = { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 2dd8ee4a6f75b76b0ea794925a979400bac85930..8fd5071d8b09951402d217946300a1fed40912d7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -3831,7 +3831,7 @@ static int bnxt_reset(struct net_device *dev, u32 *flags) } } - if (req & BNXT_FW_RESET_AP) { + if (!BNXT_CHIP_P4_PLUS(bp) && (req & BNXT_FW_RESET_AP)) { /* This feature is not supported in older firmware versions */ if (bp->hwrm_spec_code >= 0x10803) { if (!bnxt_firmware_reset_ap(dev)) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index e46689128e323fd1d7748c2d509a38b308a27f3d..f3886710e77873a83e8b8aad3eb2f3e2ba7d465e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -952,6 +952,7 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg) bnxt_ptp_timecounter_init(bp, true); bnxt_ptp_adjfine_rtc(bp, 0); } + bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true); ptp->ptp_info = bnxt_ptp_caps; if ((bp->fw_cap & BNXT_FW_CAP_PTP_PPS)) { diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index f28ffc31df2200660954cbebea08a5e06232b947..2b5761ad2f92f1f0be680e56981a6ca8266c7d9b 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1272,7 +1272,8 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev, } } -static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable) +void bcmgenet_eee_enable_set(struct net_device *dev, bool enable, + bool tx_lpi_enabled) { struct bcmgenet_priv *priv = netdev_priv(dev); u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL; @@ -1292,7 +1293,7 @@ static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable) /* Enable EEE and switch to a 27Mhz clock automatically */ reg = bcmgenet_readl(priv->base + off); - if (enable) + if (tx_lpi_enabled) reg |= TBUF_EEE_EN | TBUF_PM_EN; else reg &= ~(TBUF_EEE_EN | TBUF_PM_EN); @@ -1313,6 +1314,7 @@ static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable) priv->eee.eee_enabled = enable; priv->eee.eee_active = enable; + priv->eee.tx_lpi_enabled = tx_lpi_enabled; } static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e) @@ -1328,6 +1330,7 @@ static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e) e->eee_enabled = p->eee_enabled; e->eee_active = p->eee_active; + e->tx_lpi_enabled = p->tx_lpi_enabled; e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER); return phy_ethtool_get_eee(dev->phydev, e); @@ -1337,7 +1340,6 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e) { struct bcmgenet_priv *priv = netdev_priv(dev); struct ethtool_eee *p = &priv->eee; - int ret = 0; if (GENET_IS_V1(priv)) return -EOPNOTSUPP; @@ -1348,16 +1350,11 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e) p->eee_enabled = e->eee_enabled; if (!p->eee_enabled) { - bcmgenet_eee_enable_set(dev, false); + bcmgenet_eee_enable_set(dev, false, false); } else { - ret = phy_init_eee(dev->phydev, false); - if (ret) { - netif_err(priv, hw, dev, "EEE initialization failed\n"); - return ret; - } - + p->eee_active = phy_init_eee(dev->phydev, false) >= 0; bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER); - bcmgenet_eee_enable_set(dev, true); + bcmgenet_eee_enable_set(dev, p->eee_active, e->tx_lpi_enabled); } return phy_ethtool_set_eee(dev->phydev, e); @@ -3450,7 +3447,7 @@ err_clk_disable: return ret; } -static void bcmgenet_netif_stop(struct net_device *dev) +static void bcmgenet_netif_stop(struct net_device *dev, bool stop_phy) { struct bcmgenet_priv *priv = netdev_priv(dev); @@ -3465,6 +3462,8 @@ static void bcmgenet_netif_stop(struct net_device *dev) /* Disable MAC transmit. TX DMA disabled must be done before this */ umac_enable_set(priv, CMD_TX_EN, false); + if (stop_phy) + phy_stop(dev->phydev); bcmgenet_disable_rx_napi(priv); bcmgenet_intr_disable(priv); @@ -3485,7 +3484,7 @@ static int bcmgenet_close(struct net_device *dev) netif_dbg(priv, ifdown, dev, "bcmgenet_close\n"); - bcmgenet_netif_stop(dev); + bcmgenet_netif_stop(dev, false); /* Really kill the PHY state machine and disconnect from it */ phy_disconnect(dev->phydev); @@ -4277,9 +4276,6 @@ static int bcmgenet_resume(struct device *d) if (!device_may_wakeup(d)) phy_resume(dev->phydev); - if (priv->eee.eee_enabled) - bcmgenet_eee_enable_set(dev, true); - bcmgenet_netif_start(dev); netif_device_attach(dev); @@ -4303,7 +4299,7 @@ static int bcmgenet_suspend(struct device *d) netif_device_detach(dev); - bcmgenet_netif_stop(dev); + bcmgenet_netif_stop(dev, true); if (!device_may_wakeup(d)) phy_suspend(dev->phydev); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 946f6e283c4e6b24d4588ae6f42687ed58d84f41..1985c0ec4da2ab05af224113b2b57c6cf33b7b74 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -703,4 +703,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, enum bcmgenet_power_mode mode); +void bcmgenet_eee_enable_set(struct net_device *dev, bool enable, + bool tx_lpi_enabled); + #endif /* __BCMGENET_H__ */ diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index be042905ada2a41a35250aafde6b8c984d865c3d..c15ed0acdb777deef01816683e4024dcc27e5277 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -87,6 +87,11 @@ static void bcmgenet_mac_config(struct net_device *dev) reg |= CMD_TX_EN | CMD_RX_EN; } bcmgenet_umac_writel(priv, reg, UMAC_CMD); + + priv->eee.eee_active = phy_init_eee(phydev, 0) >= 0; + bcmgenet_eee_enable_set(dev, + priv->eee.eee_enabled && priv->eee.eee_active, + priv->eee.tx_lpi_enabled); } /* setup netdev link state when PHY link status change and diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c index 06a0c00af99c7562d334c4e391686233267b4671..276c32c3926a7d9ae12991cacf2ae1ee87e0f0d5 100644 --- a/drivers/net/ethernet/cirrus/cs89x0.c +++ b/drivers/net/ethernet/cirrus/cs89x0.c @@ -72,6 +72,8 @@ #include #include +#include + #include #include #if ALLOW_DMA diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 7e408bcc88deda638a53102401773d49af8a63d5..0defd519ba62e8c8185121715f5bf36e3db56c49 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1135,8 +1135,8 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter, eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ? VLAN_ETH_HLEN : ETH_HLEN; if (skb->len <= 60 && - (lancer_chip(adapter) || skb_vlan_tag_present(skb)) && - is_ipv4_pkt(skb)) { + (lancer_chip(adapter) || BE3_chip(adapter) || + skb_vlan_tag_present(skb)) && is_ipv4_pkt(skb)) { ip = (struct iphdr *)ip_hdr(skb); pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len)); } diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c index b1871e6c4006946995718dc4290281b01a11d45c..00e50bd30189eea82203fa670bb68a36a1f9c268 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c @@ -54,6 +54,9 @@ static int phy_mode(enum dpmac_eth_if eth_if, phy_interface_t *if_mode) case DPMAC_ETH_IF_XFI: *if_mode = PHY_INTERFACE_MODE_10GBASER; break; + case DPMAC_ETH_IF_CAUI: + *if_mode = PHY_INTERFACE_MODE_25GBASER; + break; default: return -EINVAL; } @@ -79,6 +82,8 @@ static enum dpmac_eth_if dpmac_eth_if_mode(phy_interface_t if_mode) return DPMAC_ETH_IF_XFI; case PHY_INTERFACE_MODE_1000BASEX: return DPMAC_ETH_IF_1000BASEX; + case PHY_INTERFACE_MODE_25GBASER: + return DPMAC_ETH_IF_CAUI; default: return DPMAC_ETH_IF_MII; } @@ -418,7 +423,7 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac) mac->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | MAC_10FD | MAC_100FD | MAC_1000FD | MAC_2500FD | MAC_5000FD | - MAC_10000FD; + MAC_10000FD | MAC_25000FD; dpaa2_mac_set_supported_interfaces(mac); diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 3c4fa26f0f9b1c55a46fceae945234e5dfe068bc..9e1b2536e9a916f5541fbe3f9fbfd51109767e2c 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -1229,7 +1229,13 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, if (!skb) break; - rx_byte_cnt += skb->len; + /* When set, the outer VLAN header is extracted and reported + * in the receive buffer descriptor. So rx_byte_cnt should + * add the length of the extracted VLAN header. + */ + if (bd_status & ENETC_RXBD_FLAG_VLAN) + rx_byte_cnt += VLAN_HLEN; + rx_byte_cnt += skb->len + ETH_HLEN; rx_frm_cnt++; napi_gro_receive(napi, skb); @@ -1565,6 +1571,14 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i, &cleaned_cnt, &xdp_buff); + /* When set, the outer VLAN header is extracted and reported + * in the receive buffer descriptor. So rx_byte_cnt should + * add the length of the extracted VLAN header. + */ + if (bd_status & ENETC_RXBD_FLAG_VLAN) + rx_byte_cnt += VLAN_HLEN; + rx_byte_cnt += xdp_get_buff_len(&xdp_buff); + xdp_act = bpf_prog_run_xdp(prog, &xdp_buff); switch (xdp_act) { diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c index 83c27bbbc6edffa294d9f20618583622aeb031e2..126007ab70f619f3497e0d1ad76e2317fb78a71c 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -181,8 +181,8 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data) int bw_sum = 0; u8 bw; - prio_top = netdev_get_prio_tc_map(ndev, tc_nums - 1); - prio_next = netdev_get_prio_tc_map(ndev, tc_nums - 2); + prio_top = tc_nums - 1; + prio_next = tc_nums - 2; /* Support highest prio and second prio tc in cbs mode */ if (tc != prio_top && tc != prio_next) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 42ec6ca3bf035a87e11a7dfc92b19483e843cae1..38e5b5abe067cf6ce8a2bf8144c9dd7245ae2e8c 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3798,7 +3798,6 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep, entries_free = fec_enet_get_free_txdesc_num(txq); if (entries_free < MAX_SKB_FRAGS + 1) { netdev_err(fep->netdev, "NOT enough BD for SG!\n"); - xdp_return_frame(frame); return NETDEV_TX_BUSY; } @@ -3835,6 +3834,11 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep, index = fec_enet_get_bd_index(last_bdp, &txq->bd); txq->tx_skbuff[index] = NULL; + /* Make sure the updates to rest of the descriptor are performed before + * transferring ownership. + */ + dma_wmb(); + /* Send it on its way. Tell FEC it's ready, interrupt when done, * it's the last BD of the frame, and to put the CRC on the end. */ @@ -3844,8 +3848,14 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep, /* If this was the last BD in the ring, start at the beginning again. */ bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd); + /* Make sure the update to bdp are performed before txq->bd.cur. */ + dma_wmb(); + txq->bd.cur = bdp; + /* Trigger transmission start */ + writel(0, txq->bd.reg_desc_active); + return 0; } @@ -3874,12 +3884,6 @@ static int fec_enet_xdp_xmit(struct net_device *dev, sent_frames++; } - /* Make sure the update to bdp and tx_skbuff are performed. */ - wmb(); - - /* Trigger transmission start */ - writel(0, txq->bd.reg_desc_active); - __netif_tx_unlock(nq); return sent_frames; @@ -4478,9 +4482,11 @@ fec_drv_remove(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; int ret; - ret = pm_runtime_resume_and_get(&pdev->dev); + ret = pm_runtime_get_sync(&pdev->dev); if (ret < 0) - return ret; + dev_err(&pdev->dev, + "Failed to resume device in remove callback (%pe)\n", + ERR_PTR(ret)); cancel_work_sync(&fep->tx_timeout_work); fec_ptp_stop(pdev); @@ -4493,8 +4499,13 @@ fec_drv_remove(struct platform_device *pdev) of_phy_deregister_fixed_link(np); of_node_put(fep->phy_node); - clk_disable_unprepare(fep->clk_ahb); - clk_disable_unprepare(fep->clk_ipg); + /* After pm_runtime_get_sync() failed, the clks are still off, so skip + * disabling them again. + */ + if (ret >= 0) { + clk_disable_unprepare(fep->clk_ahb); + clk_disable_unprepare(fep->clk_ipg); + } pm_runtime_put_noidle(&pdev->dev); pm_runtime_disable(&pdev->dev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c index cbbab5b2b402b70d4b1a2f680fa8b773c1cb7744..b85c412683ddc227ad06bc71b5357f80e9d50c93 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c @@ -331,9 +331,25 @@ static int hclge_comm_cmd_csq_done(struct hclge_comm_hw *hw) return head == hw->cmq.csq.next_to_use; } -static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw, +static u32 hclge_get_cmdq_tx_timeout(u16 opcode, u32 tx_timeout) +{ + static const struct hclge_cmdq_tx_timeout_map cmdq_tx_timeout_map[] = { + {HCLGE_OPC_CFG_RST_TRIGGER, HCLGE_COMM_CMDQ_TX_TIMEOUT_500MS}, + }; + u32 i; + + for (i = 0; i < ARRAY_SIZE(cmdq_tx_timeout_map); i++) + if (cmdq_tx_timeout_map[i].opcode == opcode) + return cmdq_tx_timeout_map[i].tx_timeout; + + return tx_timeout; +} + +static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw, u16 opcode, bool *is_completed) { + u32 cmdq_tx_timeout = hclge_get_cmdq_tx_timeout(opcode, + hw->cmq.tx_timeout); u32 timeout = 0; do { @@ -343,7 +359,7 @@ static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw, } udelay(1); timeout++; - } while (timeout < hw->cmq.tx_timeout); + } while (timeout < cmdq_tx_timeout); } static int hclge_comm_cmd_convert_err_code(u16 desc_ret) @@ -407,7 +423,8 @@ static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw, * if multi descriptors to be sent, use the first one to check */ if (HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag))) - hclge_comm_wait_for_resp(hw, &is_completed); + hclge_comm_wait_for_resp(hw, le16_to_cpu(desc->opcode), + &is_completed); if (!is_completed) ret = -EBADE; @@ -529,7 +546,7 @@ int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw) cmdq->crq.desc_num = HCLGE_COMM_NIC_CMQ_DESC_NUM; /* Setup Tx write back timeout */ - cmdq->tx_timeout = HCLGE_COMM_CMDQ_TX_TIMEOUT; + cmdq->tx_timeout = HCLGE_COMM_CMDQ_TX_TIMEOUT_DEFAULT; /* Setup queue rings */ ret = hclge_comm_alloc_cmd_queue(hw, HCLGE_COMM_TYPE_CSQ); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h index de72ecbfd5ad8549653dc730e986a1b3a0f37af9..18f1b4bf362da9b83f4fe9fd8d4885842ff4d7dc 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h @@ -54,7 +54,8 @@ #define HCLGE_COMM_NIC_SW_RST_RDY BIT(HCLGE_COMM_NIC_SW_RST_RDY_B) #define HCLGE_COMM_NIC_CMQ_DESC_NUM_S 3 #define HCLGE_COMM_NIC_CMQ_DESC_NUM 1024 -#define HCLGE_COMM_CMDQ_TX_TIMEOUT 30000 +#define HCLGE_COMM_CMDQ_TX_TIMEOUT_DEFAULT 30000 +#define HCLGE_COMM_CMDQ_TX_TIMEOUT_500MS 500000 enum hclge_opcode_type { /* Generic commands */ @@ -360,6 +361,11 @@ struct hclge_comm_caps_bit_map { u16 local_bit; }; +struct hclge_cmdq_tx_timeout_map { + u32 opcode; + u32 tx_timeout; +}; + struct hclge_comm_firmware_compat_cmd { __le32 compat; u8 rsv[20]; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index 4c3e90a1c4d07368ad4757926aeced0ea8832808..d385ffc21876601d94022104c545fa95be60ddcb 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -130,7 +130,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = { .name = "tx_bd_queue", .cmd = HNAE3_DBG_CMD_TX_BD, .dentry = HNS3_DBG_DENTRY_TX_BD, - .buf_len = HNS3_DBG_READ_LEN_4MB, + .buf_len = HNS3_DBG_READ_LEN_5MB, .init = hns3_dbg_bd_file_init, }, { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h index 97578eabb7d8b7a2defc1ac1422bb77f29b1f6bf..4a5ef8a90a1046883416f00648e5f47ff2a42721 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h @@ -10,6 +10,7 @@ #define HNS3_DBG_READ_LEN_128KB 0x20000 #define HNS3_DBG_READ_LEN_1MB 0x100000 #define HNS3_DBG_READ_LEN_4MB 0x400000 +#define HNS3_DBG_READ_LEN_5MB 0x500000 #define HNS3_DBG_WRITE_LEN 1024 #define HNS3_DBG_DATA_STR_LEN 32 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 4fb5406c1951dce7095010a4e7c7528d8e59091b..2689b108f7df7b4e94efdeec45135b8b297a15d7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -8053,12 +8053,15 @@ static void hclge_ae_stop(struct hnae3_handle *handle) /* If it is not PF reset or FLR, the firmware will disable the MAC, * so it only need to stop phy here. */ - if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && - hdev->reset_type != HNAE3_FUNC_RESET && - hdev->reset_type != HNAE3_FLR_RESET) { - hclge_mac_stop_phy(hdev); - hclge_update_link_status(hdev); - return; + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) { + hclge_pfc_pause_en_cfg(hdev, HCLGE_PFC_TX_RX_DISABLE, + HCLGE_PFC_DISABLE); + if (hdev->reset_type != HNAE3_FUNC_RESET && + hdev->reset_type != HNAE3_FLR_RESET) { + hclge_mac_stop_phy(hdev); + hclge_update_link_status(hdev); + return; + } } hclge_reset_tqp(handle); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 4a33f65190e2b431ea201c42ce81b86ed5c99d54..922c0da3660c7b9ff466828c1896676da7206ca2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -171,8 +171,8 @@ int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx) return hclge_cmd_send(&hdev->hw, &desc, 1); } -static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap, - u8 pfc_bitmap) +int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap, + u8 pfc_bitmap) { struct hclge_desc desc; struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index 68f28a98e380bc529b1808e39d74a679decbbc43..dd6f1fd486cf24d326c808e4d902732ca9329cf7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -164,6 +164,9 @@ struct hclge_bp_to_qs_map_cmd { u32 rsvd1; }; +#define HCLGE_PFC_DISABLE 0 +#define HCLGE_PFC_TX_RX_DISABLE 0 + struct hclge_pfc_en_cmd { u8 tx_rx_en_bitmap; u8 pri_en_bitmap; @@ -235,6 +238,8 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc); void hclge_tm_pfc_info_update(struct hclge_dev *hdev); int hclge_tm_dwrr_cfg(struct hclge_dev *hdev); int hclge_tm_init_hw(struct hclge_dev *hdev, bool init); +int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap, + u8 pfc_bitmap); int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx); int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr); void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index f240462503419035c7b2e733434b66969372adee..dd08989a4c7c1d122955c8b2839f07b32ff2283d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -1436,7 +1436,10 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev) * might happen in case reset assertion was made by PF. Yes, this also * means we might end up waiting bit more even for VF reset. */ - msleep(5000); + if (hdev->reset_type == HNAE3_VF_FULL_RESET) + msleep(5000); + else + msleep(500); return 0; } diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 9abaff1f2affbc456545a2ccdeb2e7b2da6cf062..39d0fe76a38ff80e7ce895a48473d0f676dc6a35 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -525,7 +525,7 @@ void iavf_set_ethtool_ops(struct net_device *netdev); void iavf_update_stats(struct iavf_adapter *adapter); void iavf_reset_interrupt_capability(struct iavf_adapter *adapter); int iavf_init_interrupt_scheme(struct iavf_adapter *adapter); -void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask); +void iavf_irq_enable_queues(struct iavf_adapter *adapter); void iavf_free_all_tx_resources(struct iavf_adapter *adapter); void iavf_free_all_rx_resources(struct iavf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 2de4baff4c20501baab55b7ffddb84b6f8e20aee..4a66873882d12a70b6d79eedd40684d3f5a77b11 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -359,21 +359,18 @@ static void iavf_irq_disable(struct iavf_adapter *adapter) } /** - * iavf_irq_enable_queues - Enable interrupt for specified queues + * iavf_irq_enable_queues - Enable interrupt for all queues * @adapter: board private structure - * @mask: bitmap of queues to enable **/ -void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask) +void iavf_irq_enable_queues(struct iavf_adapter *adapter) { struct iavf_hw *hw = &adapter->hw; int i; for (i = 1; i < adapter->num_msix_vectors; i++) { - if (mask & BIT(i - 1)) { - wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), - IAVF_VFINT_DYN_CTLN1_INTENA_MASK | - IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK); - } + wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), + IAVF_VFINT_DYN_CTLN1_INTENA_MASK | + IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK); } } @@ -387,7 +384,7 @@ void iavf_irq_enable(struct iavf_adapter *adapter, bool flush) struct iavf_hw *hw = &adapter->hw; iavf_misc_irq_enable(adapter); - iavf_irq_enable_queues(adapter, ~0); + iavf_irq_enable_queues(adapter); if (flush) iavf_flush(hw); diff --git a/drivers/net/ethernet/intel/iavf/iavf_register.h b/drivers/net/ethernet/intel/iavf/iavf_register.h index bf793332fc9d562cc09772f665a470865ce97552..a19e88898a0bb0a9d1755abaa4ff6439b9c4a373 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_register.h +++ b/drivers/net/ethernet/intel/iavf/iavf_register.h @@ -40,7 +40,7 @@ #define IAVF_VFINT_DYN_CTL01_INTENA_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTL01_INTENA_SHIFT) #define IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3 #define IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) -#define IAVF_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */ +#define IAVF_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...63 */ /* Reset: VFR */ #define IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT 0 #define IAVF_VFINT_DYN_CTLN1_INTENA_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT) #define IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2 diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 9afbbdac35903f1ae018c00fe28213ece3227d3f..7c0578b5457b96bd9d4b4f53a19f64cb4ca8560b 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -2238,11 +2238,6 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, iavf_process_config(adapter); adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES; - /* Request VLAN offload settings */ - if (VLAN_V2_ALLOWED(adapter)) - iavf_set_vlan_offload_features(adapter, 0, - netdev->features); - iavf_set_queue_vlan_tag_loc(adapter); was_mac_changed = !ether_addr_equal(netdev->dev_addr, diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 0157f6e98d3e24e3e77224e45a92d8698e3fc84a..eb2dc09837769ce33a7d5d1efdfae4a23fcf7a0c 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -5160,7 +5160,7 @@ ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, */ int ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, - u16 bus_addr, __le16 addr, u8 params, u8 *data, + u16 bus_addr, __le16 addr, u8 params, const u8 *data, struct ice_sq_cd *cd) { struct ice_aq_desc desc = { 0 }; diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 8ba5f935a092bf1525137c0467ae63b4c8ac16bd..81961a7d65985cae2068ed366512040ebc501303 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -229,7 +229,7 @@ ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, struct ice_sq_cd *cd); int ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, - u16 bus_addr, __le16 addr, u8 params, u8 *data, + u16 bus_addr, __le16 addr, u8 params, const u8 *data, struct ice_sq_cd *cd); bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw); #endif /* _ICE_COMMON_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index c6d4926f0fcf50df80bec065e8b150ffe45b0831..850db8e0e6b0021ab0f70718634c4dfb496f0928 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -932,10 +932,9 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring, if ((first->tx_flags & ICE_TX_FLAGS_HW_VLAN || first->tx_flags & ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN) || skb->priority != TC_PRIO_CONTROL) { - first->tx_flags &= ~ICE_TX_FLAGS_VLAN_PR_M; + first->vid &= ~VLAN_PRIO_MASK; /* Mask the lower 3 bits to set the 802.1p priority */ - first->tx_flags |= (skb->priority & 0x7) << - ICE_TX_FLAGS_VLAN_PR_S; + first->vid |= (skb->priority << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK; /* if this is not already set it means a VLAN 0 + priority needs * to be offloaded */ diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c index 2ea8a2b11bcda7d905d3f06c12c4eab78c698043..75c9de675f2028611c2d91d1b2ed055401dbeadd 100644 --- a/drivers/net/ethernet/intel/ice/ice_gnss.c +++ b/drivers/net/ethernet/intel/ice/ice_gnss.c @@ -16,8 +16,8 @@ * * number of bytes written - success * * negative - error code */ -static unsigned int -ice_gnss_do_write(struct ice_pf *pf, unsigned char *buf, unsigned int size) +static int +ice_gnss_do_write(struct ice_pf *pf, const unsigned char *buf, unsigned int size) { struct ice_aqc_link_topo_addr link_topo; struct ice_hw *hw = &pf->hw; @@ -72,39 +72,7 @@ err_out: dev_err(ice_pf_to_dev(pf), "GNSS failed to write, offset=%u, size=%u, err=%d\n", offset, size, err); - return offset; -} - -/** - * ice_gnss_write_pending - Write all pending data to internal GNSS - * @work: GNSS write work structure - */ -static void ice_gnss_write_pending(struct kthread_work *work) -{ - struct gnss_serial *gnss = container_of(work, struct gnss_serial, - write_work); - struct ice_pf *pf = gnss->back; - - if (!pf) - return; - - if (!test_bit(ICE_FLAG_GNSS, pf->flags)) - return; - - if (!list_empty(&gnss->queue)) { - struct gnss_write_buf *write_buf = NULL; - unsigned int bytes; - - write_buf = list_first_entry(&gnss->queue, - struct gnss_write_buf, queue); - - bytes = ice_gnss_do_write(pf, write_buf->buf, write_buf->size); - dev_dbg(ice_pf_to_dev(pf), "%u bytes written to GNSS\n", bytes); - - list_del(&write_buf->queue); - kfree(write_buf->buf); - kfree(write_buf); - } + return err; } /** @@ -128,12 +96,7 @@ static void ice_gnss_read(struct kthread_work *work) int err = 0; pf = gnss->back; - if (!pf) { - err = -EFAULT; - goto exit; - } - - if (!test_bit(ICE_FLAG_GNSS, pf->flags)) + if (!pf || !test_bit(ICE_FLAG_GNSS, pf->flags)) return; hw = &pf->hw; @@ -191,7 +154,6 @@ free_buf: free_page((unsigned long)buf); requeue: kthread_queue_delayed_work(gnss->kworker, &gnss->read_work, delay); -exit: if (err) dev_dbg(ice_pf_to_dev(pf), "GNSS failed to read err=%d\n", err); } @@ -220,8 +182,6 @@ static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf) pf->gnss_serial = gnss; kthread_init_delayed_work(&gnss->read_work, ice_gnss_read); - INIT_LIST_HEAD(&gnss->queue); - kthread_init_work(&gnss->write_work, ice_gnss_write_pending); kworker = kthread_create_worker(0, "ice-gnss-%s", dev_name(dev)); if (IS_ERR(kworker)) { kfree(gnss); @@ -281,7 +241,6 @@ static void ice_gnss_close(struct gnss_device *gdev) if (!gnss) return; - kthread_cancel_work_sync(&gnss->write_work); kthread_cancel_delayed_work_sync(&gnss->read_work); } @@ -300,10 +259,7 @@ ice_gnss_write(struct gnss_device *gdev, const unsigned char *buf, size_t count) { struct ice_pf *pf = gnss_get_drvdata(gdev); - struct gnss_write_buf *write_buf; struct gnss_serial *gnss; - unsigned char *cmd_buf; - int err = count; /* We cannot write a single byte using our I2C implementation. */ if (count <= 1 || count > ICE_GNSS_TTY_WRITE_BUF) @@ -319,24 +275,7 @@ ice_gnss_write(struct gnss_device *gdev, const unsigned char *buf, if (!gnss) return -ENODEV; - cmd_buf = kcalloc(count, sizeof(*buf), GFP_KERNEL); - if (!cmd_buf) - return -ENOMEM; - - memcpy(cmd_buf, buf, count); - write_buf = kzalloc(sizeof(*write_buf), GFP_KERNEL); - if (!write_buf) { - kfree(cmd_buf); - return -ENOMEM; - } - - write_buf->buf = cmd_buf; - write_buf->size = count; - INIT_LIST_HEAD(&write_buf->queue); - list_add_tail(&write_buf->queue, &gnss->queue); - kthread_queue_work(gnss->kworker, &gnss->write_work); - - return err; + return ice_gnss_do_write(pf, buf, count); } static const struct gnss_operations ice_gnss_ops = { @@ -432,7 +371,6 @@ void ice_gnss_exit(struct ice_pf *pf) if (pf->gnss_serial) { struct gnss_serial *gnss = pf->gnss_serial; - kthread_cancel_work_sync(&gnss->write_work); kthread_cancel_delayed_work_sync(&gnss->read_work); kthread_destroy_worker(gnss->kworker); gnss->kworker = NULL; diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.h b/drivers/net/ethernet/intel/ice/ice_gnss.h index b8bb8b63d081723b13719e75fb974df2e865c143..75e567ad70594528c9cb3cb2aa966b9c7353ad7c 100644 --- a/drivers/net/ethernet/intel/ice/ice_gnss.h +++ b/drivers/net/ethernet/intel/ice/ice_gnss.h @@ -22,26 +22,16 @@ */ #define ICE_GNSS_UBX_WRITE_BYTES (ICE_MAX_I2C_WRITE_BYTES + 1) -struct gnss_write_buf { - struct list_head queue; - unsigned int size; - unsigned char *buf; -}; - /** * struct gnss_serial - data used to initialize GNSS TTY port * @back: back pointer to PF * @kworker: kwork thread for handling periodic work * @read_work: read_work function for handling GNSS reads - * @write_work: write_work function for handling GNSS writes - * @queue: write buffers queue */ struct gnss_serial { struct ice_pf *back; struct kthread_worker *kworker; struct kthread_delayed_work read_work; - struct kthread_work write_work; - struct list_head queue; }; #if IS_ENABLED(CONFIG_GNSS) diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 450317dfcca7360cb1acdfca470bd4295d11e3db..11ae0e41f518a13b1c77c56a15404338495b746a 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -2745,6 +2745,8 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) goto unroll_vector_base; ice_vsi_map_rings_to_vectors(vsi); + vsi->stat_offsets_loaded = false; + if (ice_is_xdp_ena_vsi(vsi)) { ret = ice_vsi_determine_xdp_res(vsi); if (ret) @@ -2793,6 +2795,9 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) ret = ice_vsi_alloc_ring_stats(vsi); if (ret) goto unroll_vector_base; + + vsi->stat_offsets_loaded = false; + /* Do not exit if configuring RSS had an issue, at least * receive traffic on first queue. Hence no need to capture * return value diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index a1f7c8edc22f341eb68bb30a507981c7c4baaa2d..42c318ceff61837495c33fa4e19ecc3db0e9bf63 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -4802,9 +4802,13 @@ err_init_pf: static void ice_deinit_dev(struct ice_pf *pf) { ice_free_irq_msix_misc(pf); - ice_clear_interrupt_scheme(pf); ice_deinit_pf(pf); ice_deinit_hw(&pf->hw); + + /* Service task is already stopped, so call reset directly. */ + ice_reset(&pf->hw, ICE_RESET_PFR); + pci_wait_for_pending_transaction(pf->pdev); + ice_clear_interrupt_scheme(pf); } static void ice_init_features(struct ice_pf *pf) @@ -5094,10 +5098,6 @@ int ice_load(struct ice_pf *pf) struct ice_vsi *vsi; int err; - err = ice_reset(&pf->hw, ICE_RESET_PFR); - if (err) - return err; - err = ice_init_dev(pf); if (err) return err; @@ -5354,12 +5354,6 @@ static void ice_remove(struct pci_dev *pdev) ice_setup_mc_magic_wake(pf); ice_set_wake(pf); - /* Issue a PFR as part of the prescribed driver unload flow. Do not - * do it via ice_schedule_reset() since there is no need to rebuild - * and the service task is already stopped. - */ - ice_reset(&pf->hw, ICE_RESET_PFR); - pci_wait_for_pending_transaction(pdev); pci_disable_device(pdev); } @@ -7056,6 +7050,10 @@ int ice_down(struct ice_vsi *vsi) ice_for_each_txq(vsi, i) ice_clean_tx_ring(vsi->tx_rings[i]); + if (ice_is_xdp_ena_vsi(vsi)) + ice_for_each_xdp_txq(vsi, i) + ice_clean_tx_ring(vsi->xdp_rings[i]); + ice_for_each_rxq(vsi, i) ice_clean_rx_ring(vsi->rx_rings[i]); diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index f1dca59bd8449ea91706451c3226fe4ff912cc40..588ad8696756defb6154022fe4fc9b7c75eed366 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -1171,7 +1171,7 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) if (!vf) return -EINVAL; - ret = ice_check_vf_ready_for_cfg(vf); + ret = ice_check_vf_ready_for_reset(vf); if (ret) goto out_put_vf; @@ -1286,7 +1286,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) goto out_put_vf; } - ret = ice_check_vf_ready_for_cfg(vf); + ret = ice_check_vf_ready_for_reset(vf); if (ret) goto out_put_vf; @@ -1340,7 +1340,7 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) return -EOPNOTSUPP; } - ret = ice_check_vf_ready_for_cfg(vf); + ret = ice_check_vf_ready_for_reset(vf); if (ret) goto out_put_vf; @@ -1653,7 +1653,7 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, if (!vf) return -EINVAL; - ret = ice_check_vf_ready_for_cfg(vf); + ret = ice_check_vf_ready_for_reset(vf); if (ret) goto out_put_vf; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 4fcf2d07eb853b10be4b5b483b33e0f95db9446a..52d0a126eb6161852bb96457b9025a35e5641578 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -1152,11 +1152,11 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) unsigned int total_rx_bytes = 0, total_rx_pkts = 0; unsigned int offset = rx_ring->rx_offset; struct xdp_buff *xdp = &rx_ring->xdp; + u32 cached_ntc = rx_ring->first_desc; struct ice_tx_ring *xdp_ring = NULL; struct bpf_prog *xdp_prog = NULL; u32 ntc = rx_ring->next_to_clean; u32 cnt = rx_ring->count; - u32 cached_ntc = ntc; u32 xdp_xmit = 0; u32 cached_ntu; bool failure; @@ -1664,8 +1664,7 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first, if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) { td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1; - td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >> - ICE_TX_FLAGS_VLAN_S; + td_tag = first->vid; } dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); @@ -1998,7 +1997,7 @@ ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first) * VLAN offloads exclusively so we only care about the VLAN ID here */ if (skb_vlan_tag_present(skb)) { - first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S; + first->vid = skb_vlan_tag_get(skb); if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2) first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN; else @@ -2388,8 +2387,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring) offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | (ICE_TX_CTX_DESC_IL2TAG2 << ICE_TXD_CTX_QW1_CMD_S)); - offload.cd_l2tag2 = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >> - ICE_TX_FLAGS_VLAN_S; + offload.cd_l2tag2 = first->vid; } /* set up TSO offload */ diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index fff0efe28373affc555994c335f3c412d8844cdb..166413fc33f48f71a459009819a75d000601b03f 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -127,10 +127,6 @@ static inline int ice_skb_pad(void) #define ICE_TX_FLAGS_IPV6 BIT(6) #define ICE_TX_FLAGS_TUNNEL BIT(7) #define ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN BIT(8) -#define ICE_TX_FLAGS_VLAN_M 0xffff0000 -#define ICE_TX_FLAGS_VLAN_PR_M 0xe0000000 -#define ICE_TX_FLAGS_VLAN_PR_S 29 -#define ICE_TX_FLAGS_VLAN_S 16 #define ICE_XDP_PASS 0 #define ICE_XDP_CONSUMED BIT(0) @@ -182,8 +178,9 @@ struct ice_tx_buf { unsigned int gso_segs; unsigned int nr_frags; /* used for mbuf XDP */ }; - u32 type:16; /* &ice_tx_buf_type */ - u32 tx_flags:16; + u32 tx_flags:12; + u32 type:4; /* &ice_tx_buf_type */ + u32 vid:16; DEFINE_DMA_UNMAP_LEN(len); DEFINE_DMA_UNMAP_ADDR(dma); }; diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index 89fd6982df0934b53b27a538856bdc7b3c30c75b..bf74a2f3a4f8c3d8ebf5a1492782ec06c79b3ddd 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -185,6 +185,25 @@ int ice_check_vf_ready_for_cfg(struct ice_vf *vf) return 0; } +/** + * ice_check_vf_ready_for_reset - check if VF is ready to be reset + * @vf: VF to check if it's ready to be reset + * + * The purpose of this function is to ensure that the VF is not in reset, + * disabled, and is both initialized and active, thus enabling us to safely + * initialize another reset. + */ +int ice_check_vf_ready_for_reset(struct ice_vf *vf) +{ + int ret; + + ret = ice_check_vf_ready_for_cfg(vf); + if (!ret && !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) + ret = -EAGAIN; + + return ret; +} + /** * ice_trigger_vf_reset - Reset a VF on HW * @vf: pointer to the VF structure diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h index e3cda6fb71ab1e61221557abb0692ad9c1b19d14..a38ef00a367942b52f45c8b9d87abe4d95f52fef 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h @@ -215,6 +215,7 @@ u16 ice_get_num_vfs(struct ice_pf *pf); struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf); bool ice_is_vf_disabled(struct ice_vf *vf); int ice_check_vf_ready_for_cfg(struct ice_vf *vf); +int ice_check_vf_ready_for_reset(struct ice_vf *vf); void ice_set_vf_state_dis(struct ice_vf *vf); bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf); void diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index 97243c616d5d6d939196fcca9b4ea04724dbbf0a..f4a524f80b110f1dc3d60d706eedb31a20740000 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -3955,6 +3955,7 @@ error_handler: ice_vc_notify_vf_link_state(vf); break; case VIRTCHNL_OP_RESET_VF: + clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states); ops->reset_vf(vf); break; case VIRTCHNL_OP_ADD_ETH_ADDR: diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c index 205d577bdbbaad57af46694858f6d4f28617e3a7..caf91c6f52b4d03cabef2cc7fe6a4d545a0cc899 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.c +++ b/drivers/net/ethernet/intel/igb/e1000_mac.c @@ -426,7 +426,7 @@ void igb_mta_set(struct e1000_hw *hw, u32 hash_value) static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) { u32 hash_value, hash_mask; - u8 bit_shift = 0; + u8 bit_shift = 1; /* Register count multiplied by bits per register */ hash_mask = (hw->mac.mta_reg_count * 32) - 1; @@ -434,7 +434,7 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) /* For a mc_filter_type of 0, bit_shift is the number of left-shifts * where 0xFF would still fall within the hash mask. */ - while (hash_mask >> bit_shift != 0xFF) + while (hash_mask >> bit_shift != 0xFF && bit_shift < 4) bit_shift++; /* The portion of the address that is used for the hash table diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 7d60da1b7bf4182ffebdcbd3ff2c4dbe7df47d6b..319ed601eaa1e9827e7fab48c6d13b01e2bedff1 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -822,6 +822,8 @@ static int igb_set_eeprom(struct net_device *netdev, */ ret_val = hw->nvm.ops.read(hw, last_word, 1, &eeprom_buff[last_word - first_word]); + if (ret_val) + goto out; } /* Device's eeprom is always little-endian, word addressable */ @@ -841,6 +843,7 @@ static int igb_set_eeprom(struct net_device *netdev, hw->nvm.ops.update(hw); igb_set_fw_version(adapter); +out: kfree(eeprom_buff); return ret_val; } diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 58872a4c254015f527093076a9124b615d63243e..bb3db387d49cf6de899fec0acbcb1882a5f2452e 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -6947,6 +6947,7 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt) struct e1000_hw *hw = &adapter->hw; struct ptp_clock_event event; struct timespec64 ts; + unsigned long flags; if (pin < 0 || pin >= IGB_N_SDP) return; @@ -6954,9 +6955,12 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt) if (hw->mac.type == e1000_82580 || hw->mac.type == e1000_i354 || hw->mac.type == e1000_i350) { - s64 ns = rd32(auxstmpl); + u64 ns = rd32(auxstmpl); - ns += ((s64)(rd32(auxstmph) & 0xFF)) << 32; + ns += ((u64)(rd32(auxstmph) & 0xFF)) << 32; + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_cyc2time(&adapter->tc, ns); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); ts = ns_to_timespec64(ns); } else { ts.tv_nsec = rd32(auxstmpl); diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 1c46768820825ff20bb401b6c4a519fabbcdd429..fa764190f27050a28dc0778e426eec18467c9389 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -254,6 +254,13 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring) /* reset BQL for queue */ netdev_tx_reset_queue(txring_txq(tx_ring)); + /* Zero out the buffer ring */ + memset(tx_ring->tx_buffer_info, 0, + sizeof(*tx_ring->tx_buffer_info) * tx_ring->count); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + /* reset next_to_use and next_to_clean */ tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; @@ -267,7 +274,7 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring) */ void igc_free_tx_resources(struct igc_ring *tx_ring) { - igc_clean_tx_ring(tx_ring); + igc_disable_tx_ring(tx_ring); vfree(tx_ring->tx_buffer_info); tx_ring->tx_buffer_info = NULL; @@ -6723,6 +6730,9 @@ static void igc_remove(struct pci_dev *pdev) igc_ptp_stop(adapter); + pci_disable_ptm(pdev); + pci_clear_master(pdev); + set_bit(__IGC_DOWN, &adapter->state); del_timer_sync(&adapter->watchdog_timer); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 5d83c887a3fceb9f131abfc7c41798a982772fd2..1726297f2e0df09e92e8e5b327162b8a87571040 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1256,7 +1256,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, if (!__netif_txq_completed_wake(txq, total_packets, total_bytes, ixgbe_desc_unused(tx_ring), TX_WAKE_THRESHOLD, - netif_carrier_ok(tx_ring->netdev) && + !netif_carrier_ok(tx_ring->netdev) || test_bit(__IXGBE_DOWN, &adapter->state))) ++tx_ring->tx_stats.restart_queue; diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c index e1853da280f961b3f14099fa2ff51bc4b2fb7312..43eb6e871351101f5f917ec90105f1202b21f50e 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c @@ -981,6 +981,9 @@ int octep_device_setup(struct octep_device *oct) oct->mmio[i].hw_addr = ioremap(pci_resource_start(oct->pdev, i * 2), pci_resource_len(oct->pdev, i * 2)); + if (!oct->mmio[i].hw_addr) + goto unmap_prev; + oct->mmio[i].mapped = 1; } @@ -1015,7 +1018,9 @@ int octep_device_setup(struct octep_device *oct) return 0; unsupported_dev: - for (i = 0; i < OCTEP_MMIO_REGIONS; i++) + i = OCTEP_MMIO_REGIONS; +unmap_prev: + while (i--) iounmap(oct->mmio[i].hw_addr); kfree(oct->conf); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 4ad707e758b9f700cd675f0f093b59f5b088779d..f01d057ad025ae7f2aca0d06c16238bc2d005227 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -1878,7 +1878,8 @@ static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc, free_cnt = rvu_rsrc_free_count(&txsch->schq); } - if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC) + if (free_cnt < req_schq || req->schq[lvl] > MAX_TXSCHQ_PER_FUNC || + req->schq_contig[lvl] > MAX_TXSCHQ_PER_FUNC) return NIX_AF_ERR_TLX_ALLOC_FAIL; /* If contiguous queues are needed, check for availability */ @@ -4080,10 +4081,6 @@ int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs) { - /* CN10k supports 72KB FIFO size and max packet size of 64k */ - if (rvu->hw->lbk_bufsize == 0x12000) - return (rvu->hw->lbk_bufsize - lbk_max_frs) / 16; - return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */ } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c index 51209119f0f2faf431ab46df5ee71e706472c9e0..9f11c1e4073737f3f8d187da7f29497d925e2b8b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c @@ -1164,10 +1164,8 @@ static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_i { struct npc_exact_table *table; u16 *cnt, old_cnt; - bool promisc; table = rvu->hw->table; - promisc = table->promisc_mode[drop_mcam_idx]; cnt = &table->cnt_cmd_rules[drop_mcam_idx]; old_cnt = *cnt; @@ -1179,16 +1177,13 @@ static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_i *enable_or_disable_cam = false; - if (promisc) - goto done; - - /* If all rules are deleted and not already in promisc mode; disable cam */ + /* If all rules are deleted, disable cam */ if (!*cnt && val < 0) { *enable_or_disable_cam = true; goto done; } - /* If rule got added and not already in promisc mode; enable cam */ + /* If rule got added, enable cam */ if (!old_cnt && val > 0) { *enable_or_disable_cam = true; goto done; @@ -1443,7 +1438,6 @@ int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc) u32 drop_mcam_idx; bool *promisc; bool rc; - u32 cnt; table = rvu->hw->table; @@ -1466,17 +1460,8 @@ int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc) return LMAC_AF_ERR_INVALID_PARAM; } *promisc = false; - cnt = __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 0, NULL); mutex_unlock(&table->lock); - /* If no dmac filter entries configured, disable drop rule */ - if (!cnt) - rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false); - else - rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, !*promisc); - - dev_dbg(rvu->dev, "%s: disabled promisc mode (cgx=%d lmac=%d, cnt=%d)\n", - __func__, cgx_id, lmac_id, cnt); return 0; } @@ -1494,7 +1479,6 @@ int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc) u32 drop_mcam_idx; bool *promisc; bool rc; - u32 cnt; table = rvu->hw->table; @@ -1517,17 +1501,8 @@ int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc) return LMAC_AF_ERR_INVALID_PARAM; } *promisc = true; - cnt = __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 0, NULL); mutex_unlock(&table->lock); - /* If no dmac filter entries configured, disable drop rule */ - if (!cnt) - rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false); - else - rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, !*promisc); - - dev_dbg(rvu->dev, "%s: Enabled promisc mode (cgx=%d lmac=%d cnt=%d)\n", - __func__, cgx_id, lmac_id, cnt); return 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index 7045fedfd73a04ba63cdb2d657754722f5291de8..7af223b0a37f51e129c5d28d9d500262a4c0b620 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -652,9 +652,7 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, htons(ext->lso_sb - skb_network_offset(skb)); } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { ext->lso_format = pfvf->hw.lso_tsov6_idx; - - ipv6_hdr(skb)->payload_len = - htons(ext->lso_sb - skb_network_offset(skb)); + ipv6_hdr(skb)->payload_len = htons(tcp_hdrlen(skb)); } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { __be16 l3_proto = vlan_get_protocol(skb); struct udphdr *udph = udp_hdr(skb); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index a75fd072082c6e415043398a8293323c246ac4a1..834c644b67db51b69724d4b8671f3bb65821e7b0 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -3269,18 +3269,14 @@ static int mtk_open(struct net_device *dev) eth->dsa_meta[i] = md_dst; } } else { - /* Hardware special tag parsing needs to be disabled if at least - * one MAC does not use DSA. + /* Hardware DSA untagging and VLAN RX offloading need to be + * disabled if at least one MAC does not use DSA. */ u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL); val &= ~MTK_CDMP_STAG_EN; mtk_w32(eth, val, MTK_CDMP_IG_CTRL); - val = mtk_r32(eth, MTK_CDMQ_IG_CTRL); - val &= ~MTK_CDMQ_STAG_EN; - mtk_w32(eth, val, MTK_CDMQ_IG_CTRL); - mtk_w32(eth, 0, MTK_CDMP_EG_CTRL); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index d53de39539a8fd90d455fbe83f87dba0225dde14..d532883b42d7c30bf2f1230097a1cd55d46e4cf4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -1920,9 +1920,10 @@ static void mlx5_cmd_err_trace(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status, u32 syndrome, int err) { + const char *namep = mlx5_command_str(opcode); struct mlx5_cmd_stats *stats; - if (!err) + if (!err || !(strcmp(namep, "unknown command opcode"))) return; stats = &dev->cmd.stats[opcode]; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index f40497823e65fada81f0d289e0b4c68842ffc940..7c0f2adbea000d2453add53348a48c49ccc75110 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -490,7 +490,7 @@ static void poll_trace(struct mlx5_fw_tracer *tracer, (u64)timestamp_low; break; default: - if (tracer_event->event_id >= tracer->str_db.first_string_trace || + if (tracer_event->event_id >= tracer->str_db.first_string_trace && tracer_event->event_id <= tracer->str_db.first_string_trace + tracer->str_db.num_string_trace) { tracer_event->type = TRACER_EVENT_TYPE_STRING; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index b8987a404d7526d2f6fe3cd48c0a5f94e22d9aee..8e999f238194812bb44e5b57e1f3b22cf08c67dd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -327,6 +327,7 @@ struct mlx5e_params { unsigned int sw_mtu; int hard_mtu; bool ptp_rx; + __be32 terminate_lkey_be; }; static inline u8 mlx5e_get_dcb_num_tc(struct mlx5e_params *params) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 9c94807097cb84463ef7a5b6dfc8b3af416f880a..5ce28ff7685fcedd66994c3cc98340eda7eef513 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -732,7 +732,8 @@ static void mlx5e_rx_compute_wqe_bulk_params(struct mlx5e_params *params, static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_xsk_param *xsk, - struct mlx5e_rq_frags_info *info) + struct mlx5e_rq_frags_info *info, + u32 *xdp_frag_size) { u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu); int frag_size_max = DEFAULT_FRAG_SIZE; @@ -845,6 +846,8 @@ out: info->log_num_frags = order_base_2(info->num_frags); + *xdp_frag_size = info->num_frags > 1 && params->xdp_prog ? PAGE_SIZE : 0; + return 0; } @@ -989,7 +992,8 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev, } default: /* MLX5_WQ_TYPE_CYCLIC */ MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames); - err = mlx5e_build_rq_frags_info(mdev, params, xsk, ¶m->frags_info); + err = mlx5e_build_rq_frags_info(mdev, params, xsk, ¶m->frags_info, + ¶m->xdp_frag_size); if (err) return err; ndsegs = param->frags_info.num_frags; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h index a5d20f6d6d9c703c8828f4c11f4d19be86a688f9..6800949dafbc99b0b530ec9d8dcda89179dd1e1e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h @@ -24,6 +24,7 @@ struct mlx5e_rq_param { u32 rqc[MLX5_ST_SZ_DW(rqc)]; struct mlx5_wq_param wq; struct mlx5e_rq_frags_info frags_info; + u32 xdp_frag_size; }; struct mlx5e_sq_param { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c index 7ac1ad9c46de0352a0a323f815f8a0b28f2281d2..7e8e96cc5cd083e220e36334dd499220f2ed542a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c @@ -51,7 +51,7 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv, if (err) goto out; - for (i = 0; i < MLX5E_MAX_BUFFER; i++) { + for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) { buffer = MLX5_ADDR_OF(pbmc_reg, out, buffer[i]); port_buffer->buffer[i].lossy = MLX5_GET(bufferx_reg, buffer, lossy); @@ -73,14 +73,24 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv, port_buffer->buffer[i].lossy); } - port_buffer->headroom_size = total_used; + port_buffer->internal_buffers_size = 0; + for (i = MLX5E_MAX_NETWORK_BUFFER; i < MLX5E_TOTAL_BUFFERS; i++) { + buffer = MLX5_ADDR_OF(pbmc_reg, out, buffer[i]); + port_buffer->internal_buffers_size += + MLX5_GET(bufferx_reg, buffer, size) * port_buff_cell_sz; + } + port_buffer->port_buffer_size = MLX5_GET(pbmc_reg, out, port_buffer_size) * port_buff_cell_sz; - port_buffer->spare_buffer_size = - port_buffer->port_buffer_size - total_used; - - mlx5e_dbg(HW, priv, "total buffer size=%d, spare buffer size=%d\n", - port_buffer->port_buffer_size, + port_buffer->headroom_size = total_used; + port_buffer->spare_buffer_size = port_buffer->port_buffer_size - + port_buffer->internal_buffers_size - + port_buffer->headroom_size; + + mlx5e_dbg(HW, priv, + "total buffer size=%u, headroom buffer size=%u, internal buffers size=%u, spare buffer size=%u\n", + port_buffer->port_buffer_size, port_buffer->headroom_size, + port_buffer->internal_buffers_size, port_buffer->spare_buffer_size); out: kfree(out); @@ -206,11 +216,11 @@ static int port_update_pool_cfg(struct mlx5_core_dev *mdev, if (!MLX5_CAP_GEN(mdev, sbcam_reg)) return 0; - for (i = 0; i < MLX5E_MAX_BUFFER; i++) + for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) lossless_buff_count += ((port_buffer->buffer[i].size) && (!(port_buffer->buffer[i].lossy))); - for (i = 0; i < MLX5E_MAX_BUFFER; i++) { + for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) { p = select_sbcm_params(&port_buffer->buffer[i], lossless_buff_count); err = mlx5e_port_set_sbcm(mdev, 0, i, MLX5_INGRESS_DIR, @@ -293,7 +303,7 @@ static int port_set_buffer(struct mlx5e_priv *priv, if (err) goto out; - for (i = 0; i < MLX5E_MAX_BUFFER; i++) { + for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) { void *buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]); u64 size = port_buffer->buffer[i].size; u64 xoff = port_buffer->buffer[i].xoff; @@ -351,7 +361,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, { int i; - for (i = 0; i < MLX5E_MAX_BUFFER; i++) { + for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) { if (port_buffer->buffer[i].lossy) { port_buffer->buffer[i].xoff = 0; port_buffer->buffer[i].xon = 0; @@ -408,7 +418,7 @@ static int update_buffer_lossy(struct mlx5_core_dev *mdev, int err; int i; - for (i = 0; i < MLX5E_MAX_BUFFER; i++) { + for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) { prio_count = 0; lossy_count = 0; @@ -432,11 +442,11 @@ static int update_buffer_lossy(struct mlx5_core_dev *mdev, } if (changed) { - err = port_update_pool_cfg(mdev, port_buffer); + err = update_xoff_threshold(port_buffer, xoff, max_mtu, port_buff_cell_sz); if (err) return err; - err = update_xoff_threshold(port_buffer, xoff, max_mtu, port_buff_cell_sz); + err = port_update_pool_cfg(mdev, port_buffer); if (err) return err; @@ -515,7 +525,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, if (change & MLX5E_PORT_BUFFER_PRIO2BUFFER) { update_prio2buffer = true; - for (i = 0; i < MLX5E_MAX_BUFFER; i++) + for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) mlx5e_dbg(HW, priv, "%s: requested to map prio[%d] to buffer %d\n", __func__, i, prio2buffer[i]); @@ -530,7 +540,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, } if (change & MLX5E_PORT_BUFFER_SIZE) { - for (i = 0; i < MLX5E_MAX_BUFFER; i++) { + for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) { mlx5e_dbg(HW, priv, "%s: buffer[%d]=%d\n", __func__, i, buffer_size[i]); if (!port_buffer.buffer[i].lossy && !buffer_size[i]) { mlx5e_dbg(HW, priv, "%s: lossless buffer[%d] size cannot be zero\n", @@ -544,7 +554,9 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, mlx5e_dbg(HW, priv, "%s: total buffer requested=%d\n", __func__, total_used); - if (total_used > port_buffer.port_buffer_size) + if (total_used > port_buffer.headroom_size && + (total_used - port_buffer.headroom_size) > + port_buffer.spare_buffer_size) return -EINVAL; update_buffer = true; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h index a6ef118de758f88591181d521d83cd2493a08bda..f4a19ffbb641c07b82291091e4f67f8524378e82 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h @@ -35,7 +35,8 @@ #include "en.h" #include "port.h" -#define MLX5E_MAX_BUFFER 8 +#define MLX5E_MAX_NETWORK_BUFFER 8 +#define MLX5E_TOTAL_BUFFERS 10 #define MLX5E_DEFAULT_CABLE_LEN 7 /* 7 meters */ #define MLX5_BUFFER_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, pcam_reg) && \ @@ -60,8 +61,9 @@ struct mlx5e_bufferx_reg { struct mlx5e_port_buffer { u32 port_buffer_size; u32 spare_buffer_size; - u32 headroom_size; - struct mlx5e_bufferx_reg buffer[MLX5E_MAX_BUFFER]; + u32 headroom_size; /* Buffers 0-7 */ + u32 internal_buffers_size; /* Buffers 8-9 */ + struct mlx5e_bufferx_reg buffer[MLX5E_MAX_NETWORK_BUFFER]; }; int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c index eb5abd0e55d9060ae62ea7542eeb2b111128c4ab..3cbebfba582bdbce79741c4c7d798dd13846af64 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c @@ -175,6 +175,8 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget) /* ensure cq space is freed before enabling more cqes */ wmb(); + mlx5e_txqsq_wake(&ptpsq->txqsq); + return work_done == budget; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c index fc923a99b6a483d5eaddba1f01c76d66549b74a3..0380a04c3691cbc72d13c873886a4ffd3ec96d01 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c @@ -84,7 +84,7 @@ mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state, int mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state, - struct flow_action *flow_action, + struct flow_action *flow_action, int from, int to, struct mlx5_flow_attr *attr, enum mlx5_flow_namespace_type ns_type) { @@ -96,6 +96,11 @@ mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state, priv = parse_state->flow->priv; flow_action_for_each(i, act, flow_action) { + if (i < from) + continue; + else if (i > to) + break; + tc_act = mlx5e_tc_act_get(act->id, ns_type); if (!tc_act || !tc_act->post_parse) continue; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h index 0e6e1872ac62e291aef03dcafd4d413fcb9d6bb7..d6c12d0ea55ba5458bda9d0ec1199ab2b4891c2d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h @@ -112,7 +112,7 @@ mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state, int mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state, - struct flow_action *flow_action, + struct flow_action *flow_action, int from, int to, struct mlx5_flow_attr *attr, enum mlx5_flow_namespace_type ns_type); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index ead38ef6948313dd96366ac70c1d2d1f0c7cf0c3..a254e728ac95442cbbccde0d02e6616a11b97d72 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -2021,6 +2021,8 @@ void mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *priv, struct mlx5_flow_attr *attr) { + if (!attr->ct_attr.ft) /* no ct action, return */ + return; if (!attr->ct_attr.nf_ft) /* means only ct clear action, and not ct_clear,ct() */ return; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c index 20c2d2ecaf9356da8d253e8e33a9b45add25bf73..f0c3464f037f4a0d0bc37047e7ace1b50ba8ff6c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c @@ -492,6 +492,19 @@ void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e) mlx5e_encap_dealloc(priv, e); } +static void mlx5e_encap_put_locked(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + + lockdep_assert_held(&esw->offloads.encap_tbl_lock); + + if (!refcount_dec_and_test(&e->refcnt)) + return; + list_del(&e->route_list); + hash_del_rcu(&e->encap_hlist); + mlx5e_encap_dealloc(priv, e); +} + static void mlx5e_decap_put(struct mlx5e_priv *priv, struct mlx5e_decap_entry *d) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; @@ -816,6 +829,8 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv, uintptr_t hash_key; int err = 0; + lockdep_assert_held(&esw->offloads.encap_tbl_lock); + parse_attr = attr->parse_attr; tun_info = parse_attr->tun_info[out_index]; mpls_info = &parse_attr->mpls_info[out_index]; @@ -829,7 +844,6 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv, hash_key = hash_encap_info(&key); - mutex_lock(&esw->offloads.encap_tbl_lock); e = mlx5e_encap_get(priv, &key, hash_key); /* must verify if encap is valid or not */ @@ -840,15 +854,6 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv, goto out_err; } - mutex_unlock(&esw->offloads.encap_tbl_lock); - wait_for_completion(&e->res_ready); - - /* Protect against concurrent neigh update. */ - mutex_lock(&esw->offloads.encap_tbl_lock); - if (e->compl_result < 0) { - err = -EREMOTEIO; - goto out_err; - } goto attach_flow; } @@ -877,15 +882,12 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv, INIT_LIST_HEAD(&e->flows); hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key); tbl_time_before = mlx5e_route_tbl_get_last_update(priv); - mutex_unlock(&esw->offloads.encap_tbl_lock); if (family == AF_INET) err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e); else if (family == AF_INET6) err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e); - /* Protect against concurrent neigh update. */ - mutex_lock(&esw->offloads.encap_tbl_lock); complete_all(&e->res_ready); if (err) { e->compl_result = err; @@ -920,18 +922,15 @@ attach_flow: } else { flow_flag_set(flow, SLOW); } - mutex_unlock(&esw->offloads.encap_tbl_lock); return err; out_err: - mutex_unlock(&esw->offloads.encap_tbl_lock); if (e) - mlx5e_encap_put(priv, e); + mlx5e_encap_put_locked(priv, e); return err; out_err_init: - mutex_unlock(&esw->offloads.encap_tbl_lock); kfree(tun_info); kfree(e); return err; @@ -1016,6 +1015,93 @@ out_err: return err; } +int mlx5e_tc_tun_encap_dests_set(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct mlx5_flow_attr *attr, + struct netlink_ext_ack *extack, + bool *vf_tun) +{ + struct mlx5e_tc_flow_parse_attr *parse_attr; + struct mlx5_esw_flow_attr *esw_attr; + struct net_device *encap_dev = NULL; + struct mlx5e_rep_priv *rpriv; + struct mlx5e_priv *out_priv; + struct mlx5_eswitch *esw; + int out_index; + int err = 0; + + if (!mlx5e_is_eswitch_flow(flow)) + return 0; + + parse_attr = attr->parse_attr; + esw_attr = attr->esw_attr; + *vf_tun = false; + + esw = priv->mdev->priv.eswitch; + mutex_lock(&esw->offloads.encap_tbl_lock); + for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) { + struct net_device *out_dev; + int mirred_ifindex; + + if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)) + continue; + + mirred_ifindex = parse_attr->mirred_ifindex[out_index]; + out_dev = dev_get_by_index(dev_net(priv->netdev), mirred_ifindex); + if (!out_dev) { + NL_SET_ERR_MSG_MOD(extack, "Requested mirred device not found"); + err = -ENODEV; + goto out; + } + err = mlx5e_attach_encap(priv, flow, attr, out_dev, out_index, + extack, &encap_dev); + dev_put(out_dev); + if (err) + goto out; + + if (esw_attr->dests[out_index].flags & + MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE && + !esw_attr->dest_int_port) + *vf_tun = true; + + out_priv = netdev_priv(encap_dev); + rpriv = out_priv->ppriv; + esw_attr->dests[out_index].rep = rpriv->rep; + esw_attr->dests[out_index].mdev = out_priv->mdev; + } + + if (*vf_tun && esw_attr->out_count > 1) { + NL_SET_ERR_MSG_MOD(extack, "VF tunnel encap with mirroring is not supported"); + err = -EOPNOTSUPP; + goto out; + } + +out: + mutex_unlock(&esw->offloads.encap_tbl_lock); + return err; +} + +void mlx5e_tc_tun_encap_dests_unset(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct mlx5_flow_attr *attr) +{ + struct mlx5_esw_flow_attr *esw_attr; + int out_index; + + if (!mlx5e_is_eswitch_flow(flow)) + return; + + esw_attr = attr->esw_attr; + + for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) { + if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)) + continue; + + mlx5e_detach_encap(flow->priv, flow, attr, out_index); + kfree(attr->parse_attr->tun_info[out_index]); + } +} + static int cmp_route_info(struct mlx5e_route_key *a, struct mlx5e_route_key *b) { @@ -1369,11 +1455,13 @@ static void mlx5e_invalidate_encap(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow; list_for_each_entry(flow, encap_flows, tmp_list) { - struct mlx5_flow_attr *attr = flow->attr; struct mlx5_esw_flow_attr *esw_attr; + struct mlx5_flow_attr *attr; if (!mlx5e_is_offloaded_flow(flow)) continue; + + attr = mlx5e_tc_get_encap_attr(flow); esw_attr = attr->esw_attr; if (flow_flag_test(flow, SLOW)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h index 8ad273dde40ee5b41ad1cdc4963ce8ad9d10fccb..5d7d67687cbcd29204ae121e4af99b1292335b02 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h @@ -30,6 +30,15 @@ int mlx5e_attach_decap_route(struct mlx5e_priv *priv, void mlx5e_detach_decap_route(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow); +int mlx5e_tc_tun_encap_dests_set(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct mlx5_flow_attr *attr, + struct netlink_ext_ack *extack, + bool *vf_tun); +void mlx5e_tc_tun_encap_dests_unset(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct mlx5_flow_attr *attr); + struct ip_tunnel_info *mlx5e_dup_tun_info(const struct ip_tunnel_info *tun_info); int mlx5e_tc_set_attr_rx_tun(struct mlx5e_tc_flow *flow, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index 47381e949f1f5fc7006fddeb2b5e19e1dee7a354..879d698b6119303836915d5dc19f6f910d506890 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -193,6 +193,8 @@ static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size) return pi; } +void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq); + static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { return be16_to_cpu(cqe->shampo.header_entry_index) & (rq->mpwqe.shampo->hd_per_wq - 1); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c index ed279f4509761b5b3acd298d35477e83bd248647..36826b58248478a4150818093ab29b4e04574c01 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c @@ -86,7 +86,7 @@ static int mlx5e_init_xsk_rq(struct mlx5e_channel *c, if (err) return err; - return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix, 0); + return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix, c->napi.napi_id); } static int mlx5e_open_xsk_rq(struct mlx5e_channel *c, struct mlx5e_params *params, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 55b38544422f3e11365c94d103d83dd321dc4faf..891d39b4bfd4f77fc68357990ce36ecf54cb1ff4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -61,16 +61,19 @@ static void mlx5e_ipsec_handle_tx_limit(struct work_struct *_work) struct mlx5e_ipsec_sa_entry *sa_entry = dwork->sa_entry; struct xfrm_state *x = sa_entry->x; - spin_lock(&x->lock); + if (sa_entry->attrs.drop) + return; + + spin_lock_bh(&x->lock); xfrm_state_check_expire(x); if (x->km.state == XFRM_STATE_EXPIRED) { sa_entry->attrs.drop = true; - mlx5e_accel_ipsec_fs_modify(sa_entry); - } - spin_unlock(&x->lock); + spin_unlock_bh(&x->lock); - if (sa_entry->attrs.drop) + mlx5e_accel_ipsec_fs_modify(sa_entry); return; + } + spin_unlock_bh(&x->lock); queue_delayed_work(sa_entry->ipsec->wq, &dwork->dwork, MLX5_IPSEC_RESCHED); @@ -1040,11 +1043,17 @@ err_fs: return err; } -static void mlx5e_xfrm_free_policy(struct xfrm_policy *x) +static void mlx5e_xfrm_del_policy(struct xfrm_policy *x) { struct mlx5e_ipsec_pol_entry *pol_entry = to_ipsec_pol_entry(x); mlx5e_accel_ipsec_fs_del_pol(pol_entry); +} + +static void mlx5e_xfrm_free_policy(struct xfrm_policy *x) +{ + struct mlx5e_ipsec_pol_entry *pol_entry = to_ipsec_pol_entry(x); + kfree(pol_entry); } @@ -1065,6 +1074,7 @@ static const struct xfrmdev_ops mlx5e_ipsec_packet_xfrmdev_ops = { .xdo_dev_state_update_curlft = mlx5e_xfrm_update_curlft, .xdo_dev_policy_add = mlx5e_xfrm_add_policy, + .xdo_dev_policy_delete = mlx5e_xfrm_del_policy, .xdo_dev_policy_free = mlx5e_xfrm_free_policy, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c index df90e19066bcc58085373d58342a83e708a82e8f..a3554bde3e0758deaa7c9a4e03a133fabc9db84e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c @@ -305,7 +305,17 @@ static void mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry, } mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs); + + /* It is safe to execute the modify below unlocked since the only flows + * that could affect this HW object, are create, destroy and this work. + * + * Creation flow can't co-exist with this modify work, the destruction + * flow would cancel this work, and this work is a single entity that + * can't conflict with it self. + */ + spin_unlock_bh(&sa_entry->x->lock); mlx5_accel_esp_modify_xfrm(sa_entry, &attrs); + spin_lock_bh(&sa_entry->x->lock); data.data_offset_condition_operand = MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET; @@ -431,7 +441,7 @@ static void mlx5e_ipsec_handle_event(struct work_struct *_work) aso = sa_entry->ipsec->aso; attrs = &sa_entry->attrs; - spin_lock(&sa_entry->x->lock); + spin_lock_bh(&sa_entry->x->lock); ret = mlx5e_ipsec_aso_query(sa_entry, NULL); if (ret) goto unlock; @@ -447,7 +457,7 @@ static void mlx5e_ipsec_handle_event(struct work_struct *_work) mlx5e_ipsec_handle_limits(sa_entry); unlock: - spin_unlock(&sa_entry->x->lock); + spin_unlock_bh(&sa_entry->x->lock); kfree(work); } @@ -596,7 +606,8 @@ int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry, do { ret = mlx5_aso_poll_cq(aso->aso, false); if (ret) - usleep_range(2, 10); + /* We are in atomic context */ + udelay(10); } while (ret && time_is_after_jiffies(expires)); spin_unlock_bh(&aso->lock); return ret; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index 1f90594499c6098d06f37f1beed887d200b1a1a4..41c396e764579492b21b2f1cedb3f54a471894c4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -150,10 +150,8 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb, inlen = MLX5_ST_SZ_BYTES(modify_tir_in); in = kvzalloc(inlen, GFP_KERNEL); - if (!in) { - err = -ENOMEM; - goto out; - } + if (!in) + return -ENOMEM; if (enable_uc_lb) lb_flags = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST; @@ -171,14 +169,13 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb, tirn = tir->tirn; err = mlx5_core_modify_tir(mdev, tirn, in); if (err) - goto out; + break; } + mutex_unlock(&mdev->mlx5e_res.hw_objs.td.list_lock); -out: kvfree(in); if (err) netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err); - mutex_unlock(&mdev->mlx5e_res.hw_objs.td.list_lock); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index 89de92d0648363a899793f1aa5ea4fb324a9ddc8..ebee52a8361aa7f7102683c4d62dbc4571ed1a71 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -926,9 +926,10 @@ static int mlx5e_dcbnl_getbuffer(struct net_device *dev, if (err) return err; - for (i = 0; i < MLX5E_MAX_BUFFER; i++) + for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) dcb_buffer->buffer_size[i] = port_buffer.buffer[i].size; - dcb_buffer->total_size = port_buffer.port_buffer_size; + dcb_buffer->total_size = port_buffer.port_buffer_size - + port_buffer.internal_buffers_size; return 0; } @@ -970,7 +971,7 @@ static int mlx5e_dcbnl_setbuffer(struct net_device *dev, if (err) return err; - for (i = 0; i < MLX5E_MAX_BUFFER; i++) { + for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) { if (port_buffer.buffer[i].size != dcb_buffer->buffer_size[i]) { changed |= MLX5E_PORT_BUFFER_SIZE; buffer_size = dcb_buffer->buffer_size; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 2944691f06added0b8420c962f514a7bee413e49..a5bdf78955d765f5276dd0ddee9e38bf35117f26 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -641,7 +641,7 @@ static void mlx5e_free_mpwqe_rq_drop_page(struct mlx5e_rq *rq) } static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *params, - struct mlx5e_rq *rq) + u32 xdp_frag_size, struct mlx5e_rq *rq) { struct mlx5_core_dev *mdev = c->mdev; int err; @@ -665,7 +665,8 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param if (err) return err; - return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, c->napi.napi_id); + return __xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, c->napi.napi_id, + xdp_frag_size); } static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev, @@ -727,26 +728,6 @@ static void mlx5e_rq_free_shampo(struct mlx5e_rq *rq) mlx5e_rq_shampo_hd_free(rq); } -static __be32 mlx5e_get_terminate_scatter_list_mkey(struct mlx5_core_dev *dev) -{ - u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {}; - u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {}; - int res; - - if (!MLX5_CAP_GEN(dev, terminate_scatter_list_mkey)) - return MLX5_TERMINATE_SCATTER_LIST_LKEY; - - MLX5_SET(query_special_contexts_in, in, opcode, - MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS); - res = mlx5_cmd_exec_inout(dev, query_special_contexts, in, out); - if (res) - return MLX5_TERMINATE_SCATTER_LIST_LKEY; - - res = MLX5_GET(query_special_contexts_out, out, - terminate_scatter_list_mkey); - return cpu_to_be32(res); -} - static int mlx5e_alloc_rq(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk, struct mlx5e_rq_param *rqp, @@ -908,7 +889,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, /* check if num_frags is not a pow of two */ if (rq->wqe.info.num_frags < (1 << rq->wqe.info.log_num_frags)) { wqe->data[f].byte_count = 0; - wqe->data[f].lkey = mlx5e_get_terminate_scatter_list_mkey(mdev); + wqe->data[f].lkey = params->terminate_lkey_be; wqe->data[f].addr = 0; } } @@ -2260,7 +2241,7 @@ static int mlx5e_open_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param { int err; - err = mlx5e_init_rxq_rq(c, params, &c->rq); + err = mlx5e_init_rxq_rq(c, params, rq_params->xdp_frag_size, &c->rq); if (err) return err; @@ -5007,6 +4988,8 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 /* RQ */ mlx5e_build_rq_params(mdev, params); + params->terminate_lkey_be = mlx5_core_get_terminate_scatter_list_mkey(mdev); + params->packet_merge.timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT); /* CQ moderation params */ @@ -5279,12 +5262,16 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, mlx5e_timestamp_init(priv); + priv->dfs_root = debugfs_create_dir("nic", + mlx5_debugfs_get_dev_root(mdev)); + fs = mlx5e_fs_init(priv->profile, mdev, !test_bit(MLX5E_STATE_DESTROYING, &priv->state), priv->dfs_root); if (!fs) { err = -ENOMEM; mlx5_core_err(mdev, "FS initialization failed, %d\n", err); + debugfs_remove_recursive(priv->dfs_root); return err; } priv->fs = fs; @@ -5305,6 +5292,7 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) mlx5e_health_destroy_reporters(priv); mlx5e_ktls_cleanup(priv); mlx5e_fs_cleanup(priv->fs); + debugfs_remove_recursive(priv->dfs_root); priv->fs = NULL; } @@ -5851,8 +5839,8 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv) } static int -mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mdev, - const struct mlx5e_profile *new_profile, void *new_ppriv) +mlx5e_netdev_init_profile(struct net_device *netdev, struct mlx5_core_dev *mdev, + const struct mlx5e_profile *new_profile, void *new_ppriv) { struct mlx5e_priv *priv = netdev_priv(netdev); int err; @@ -5868,6 +5856,25 @@ mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mde err = new_profile->init(priv->mdev, priv->netdev); if (err) goto priv_cleanup; + + return 0; + +priv_cleanup: + mlx5e_priv_cleanup(priv); + return err; +} + +static int +mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mdev, + const struct mlx5e_profile *new_profile, void *new_ppriv) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + int err; + + err = mlx5e_netdev_init_profile(netdev, mdev, new_profile, new_ppriv); + if (err) + return err; + err = mlx5e_attach_netdev(priv); if (err) goto profile_cleanup; @@ -5875,7 +5882,6 @@ mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mde profile_cleanup: new_profile->cleanup(priv); -priv_cleanup: mlx5e_priv_cleanup(priv); return err; } @@ -5894,6 +5900,12 @@ int mlx5e_netdev_change_profile(struct mlx5e_priv *priv, priv->profile->cleanup(priv); mlx5e_priv_cleanup(priv); + if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { + mlx5e_netdev_init_profile(netdev, mdev, new_profile, new_ppriv); + set_bit(MLX5E_STATE_DESTROYING, &priv->state); + return -EIO; + } + err = mlx5e_netdev_attach_profile(netdev, mdev, new_profile, new_ppriv); if (err) { /* roll back to original profile */ netdev_warn(netdev, "%s: new profile init failed, %d\n", __func__, err); @@ -5955,8 +5967,11 @@ static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state) struct net_device *netdev = priv->netdev; struct mlx5_core_dev *mdev = priv->mdev; - if (!netif_device_present(netdev)) + if (!netif_device_present(netdev)) { + if (test_bit(MLX5E_STATE_DESTROYING, &priv->state)) + mlx5e_destroy_mdev_resources(mdev); return -ENODEV; + } mlx5e_detach_netdev(priv); mlx5e_destroy_mdev_resources(mdev); @@ -6002,9 +6017,6 @@ static int mlx5e_probe(struct auxiliary_device *adev, priv->profile = profile; priv->ppriv = NULL; - priv->dfs_root = debugfs_create_dir("nic", - mlx5_debugfs_get_dev_root(priv->mdev)); - err = profile->init(mdev, netdev); if (err) { mlx5_core_err(mdev, "mlx5e_nic_profile init failed, %d\n", err); @@ -6033,7 +6045,6 @@ err_resume: err_profile_cleanup: profile->cleanup(priv); err_destroy_netdev: - debugfs_remove_recursive(priv->dfs_root); mlx5e_destroy_netdev(priv); err_devlink_port_unregister: mlx5e_devlink_port_unregister(mlx5e_dev); @@ -6053,7 +6064,6 @@ static void mlx5e_remove(struct auxiliary_device *adev) unregister_netdev(priv->netdev); mlx5e_suspend(adev, state); priv->profile->cleanup(priv); - debugfs_remove_recursive(priv->dfs_root); mlx5e_destroy_netdev(priv); mlx5e_devlink_port_unregister(mlx5e_dev); mlx5e_destroy_devlink(mlx5e_dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 1fc386eccaf8f9f1345d1a2c640b1039781ca0e1..3e7041bd5705e8761258d3513c5bc358a602cf68 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -30,6 +30,7 @@ * SOFTWARE. */ +#include #include #include #include @@ -812,11 +813,15 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev, { struct mlx5e_priv *priv = netdev_priv(netdev); + priv->dfs_root = debugfs_create_dir("nic", + mlx5_debugfs_get_dev_root(mdev)); + priv->fs = mlx5e_fs_init(priv->profile, mdev, !test_bit(MLX5E_STATE_DESTROYING, &priv->state), priv->dfs_root); if (!priv->fs) { netdev_err(priv->netdev, "FS allocation failed\n"); + debugfs_remove_recursive(priv->dfs_root); return -ENOMEM; } @@ -829,6 +834,7 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev, static void mlx5e_cleanup_rep(struct mlx5e_priv *priv) { mlx5e_fs_cleanup(priv->fs); + debugfs_remove_recursive(priv->dfs_root); priv->fs = NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 728b82ce4031a2b6ce3e0434064980f39985ff2e..b9b1da751a3b8c762c69eb27d7991037c5711d04 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1439,6 +1439,7 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, mlx5e_hairpin_flow_del(priv, flow); free_flow_post_acts(flow); + mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), attr); kvfree(attr->parse_attr); kfree(flow->attr); @@ -1665,11 +1666,9 @@ bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport) { struct mlx5e_priv *out_priv, *route_priv; - struct mlx5_devcom *devcom = NULL; struct mlx5_core_dev *route_mdev; struct mlx5_eswitch *esw; u16 vhca_id; - int err; out_priv = netdev_priv(out_dev); esw = out_priv->mdev->priv.eswitch; @@ -1678,6 +1677,9 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id); if (mlx5_lag_is_active(out_priv->mdev)) { + struct mlx5_devcom *devcom; + int err; + /* In lag case we may get devices from different eswitch instances. * If we failed to get vport num, it means, mostly, that we on the wrong * eswitch. @@ -1686,101 +1688,16 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro if (err != -ENOENT) return err; + rcu_read_lock(); devcom = out_priv->mdev->priv.devcom; - esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); - if (!esw) - return -ENODEV; - } - - err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport); - if (devcom) - mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); - return err; -} - -static int -set_encap_dests(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow, - struct mlx5_flow_attr *attr, - struct netlink_ext_ack *extack, - bool *vf_tun) -{ - struct mlx5e_tc_flow_parse_attr *parse_attr; - struct mlx5_esw_flow_attr *esw_attr; - struct net_device *encap_dev = NULL; - struct mlx5e_rep_priv *rpriv; - struct mlx5e_priv *out_priv; - int out_index; - int err = 0; - - if (!mlx5e_is_eswitch_flow(flow)) - return 0; - - parse_attr = attr->parse_attr; - esw_attr = attr->esw_attr; - *vf_tun = false; - - for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) { - struct net_device *out_dev; - int mirred_ifindex; - - if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)) - continue; - - mirred_ifindex = parse_attr->mirred_ifindex[out_index]; - out_dev = dev_get_by_index(dev_net(priv->netdev), mirred_ifindex); - if (!out_dev) { - NL_SET_ERR_MSG_MOD(extack, "Requested mirred device not found"); - err = -ENODEV; - goto out; - } - err = mlx5e_attach_encap(priv, flow, attr, out_dev, out_index, - extack, &encap_dev); - dev_put(out_dev); - if (err) - goto out; - - if (esw_attr->dests[out_index].flags & - MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE && - !esw_attr->dest_int_port) - *vf_tun = true; + esw = mlx5_devcom_get_peer_data_rcu(devcom, MLX5_DEVCOM_ESW_OFFLOADS); + err = esw ? mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport) : -ENODEV; + rcu_read_unlock(); - out_priv = netdev_priv(encap_dev); - rpriv = out_priv->ppriv; - esw_attr->dests[out_index].rep = rpriv->rep; - esw_attr->dests[out_index].mdev = out_priv->mdev; - } - - if (*vf_tun && esw_attr->out_count > 1) { - NL_SET_ERR_MSG_MOD(extack, "VF tunnel encap with mirroring is not supported"); - err = -EOPNOTSUPP; - goto out; + return err; } -out: - return err; -} - -static void -clean_encap_dests(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow, - struct mlx5_flow_attr *attr) -{ - struct mlx5_esw_flow_attr *esw_attr; - int out_index; - - if (!mlx5e_is_eswitch_flow(flow)) - return; - - esw_attr = attr->esw_attr; - - for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) { - if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)) - continue; - - mlx5e_detach_encap(priv, flow, attr, out_index); - kfree(attr->parse_attr->tun_info[out_index]); - } + return mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport); } static int @@ -1819,7 +1736,7 @@ post_process_attr(struct mlx5e_tc_flow *flow, if (err) goto err_out; - err = set_encap_dests(flow->priv, flow, attr, extack, &vf_tun); + err = mlx5e_tc_tun_encap_dests_set(flow->priv, flow, attr, extack, &vf_tun); if (err) goto err_out; @@ -3943,8 +3860,8 @@ parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state, struct mlx5_flow_attr *prev_attr; struct flow_action_entry *act; struct mlx5e_tc_act *tc_act; + int err, i, i_split = 0; bool is_missable; - int err, i; ns_type = mlx5e_get_flow_namespace(flow); list_add(&attr->list, &flow->attrs); @@ -3985,7 +3902,8 @@ parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state, i < flow_action->num_entries - 1)) { is_missable = tc_act->is_missable ? tc_act->is_missable(act) : false; - err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type); + err = mlx5e_tc_act_post_parse(parse_state, flow_action, i_split, i, attr, + ns_type); if (err) goto out_free_post_acts; @@ -3995,6 +3913,7 @@ parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state, goto out_free_post_acts; } + i_split = i + 1; list_add(&attr->list, &flow->attrs); } @@ -4009,7 +3928,7 @@ parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state, } } - err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type); + err = mlx5e_tc_act_post_parse(parse_state, flow_action, i_split, i, attr, ns_type); if (err) goto out_free_post_acts; @@ -4323,7 +4242,7 @@ mlx5_free_flow_attr_actions(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *a if (attr->post_act_handle) mlx5e_tc_post_act_del(get_post_action(flow->priv), attr->post_act_handle); - clean_encap_dests(flow->priv, flow, attr); + mlx5e_tc_tun_encap_dests_unset(flow->priv, flow, attr); if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) mlx5_fc_destroy(counter_dev, attr->counter); @@ -5301,6 +5220,8 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv) goto err_action_counter; } + mlx5_esw_offloads_devcom_init(esw); + return 0; err_action_counter: @@ -5329,7 +5250,7 @@ void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv) priv = netdev_priv(rpriv->netdev); esw = priv->mdev->priv.eswitch; - mlx5e_tc_clean_fdb_peer_flows(esw); + mlx5_esw_offloads_devcom_cleanup(esw); mlx5e_tc_tun_cleanup(uplink_priv->encap); @@ -5643,22 +5564,43 @@ bool mlx5e_tc_update_skb_nic(struct mlx5_cqe64 *cqe, struct sk_buff *skb) 0, NULL); } +static struct mapping_ctx * +mlx5e_get_priv_obj_mapping(struct mlx5e_priv *priv) +{ + struct mlx5e_tc_table *tc; + struct mlx5_eswitch *esw; + struct mapping_ctx *ctx; + + if (is_mdev_switchdev_mode(priv->mdev)) { + esw = priv->mdev->priv.eswitch; + ctx = esw->offloads.reg_c0_obj_pool; + } else { + tc = mlx5e_fs_get_tc(priv->fs); + ctx = tc->mapping; + } + + return ctx; +} + int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr, u64 act_miss_cookie, u32 *act_miss_mapping) { - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_mapped_obj mapped_obj = {}; + struct mlx5_eswitch *esw; struct mapping_ctx *ctx; int err; - ctx = esw->offloads.reg_c0_obj_pool; - + ctx = mlx5e_get_priv_obj_mapping(priv); mapped_obj.type = MLX5_MAPPED_OBJ_ACT_MISS; mapped_obj.act_miss_cookie = act_miss_cookie; err = mapping_add(ctx, &mapped_obj, act_miss_mapping); if (err) return err; + if (!is_mdev_switchdev_mode(priv->mdev)) + return 0; + + esw = priv->mdev->priv.eswitch; attr->act_id_restore_rule = esw_add_restore_rule(esw, *act_miss_mapping); if (IS_ERR(attr->act_id_restore_rule)) goto err_rule; @@ -5673,10 +5615,9 @@ err_rule: void mlx5e_tc_action_miss_mapping_put(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr, u32 act_miss_mapping) { - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct mapping_ctx *ctx; + struct mapping_ctx *ctx = mlx5e_get_priv_obj_mapping(priv); - ctx = esw->offloads.reg_c0_obj_pool; - mlx5_del_flow_rules(attr->act_id_restore_rule); + if (is_mdev_switchdev_mode(priv->mdev)) + mlx5_del_flow_rules(attr->act_id_restore_rule); mapping_remove(ctx, act_miss_mapping); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index df5e780e8e6a6537b23c8fc8b0fb18234480e108..c7eb6b238c2ba15510533a904920afd9790dca5e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -762,6 +762,17 @@ static void mlx5e_tx_wi_consume_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_t } } +void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq) +{ + if (netif_tx_queue_stopped(sq->txq) && + mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) && + mlx5e_ptpsq_fifo_has_room(sq) && + !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) { + netif_tx_wake_queue(sq->txq); + sq->stats->wake++; + } +} + bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) { struct mlx5e_sq_stats *stats; @@ -861,13 +872,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) netdev_tx_completed_queue(sq->txq, npkts, nbytes); - if (netif_tx_queue_stopped(sq->txq) && - mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) && - mlx5e_ptpsq_fifo_has_room(sq) && - !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) { - netif_tx_wake_queue(sq->txq); - stats->wake++; - } + mlx5e_txqsq_wake(sq); return (i == MLX5E_TX_CQ_POLL_BUDGET); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index a50bfda18e96d95796ecb10a80c839d17455d569..fbb2d963fb7e1bc3afce053980f360f2467d1bf1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -161,20 +161,22 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) } } + /* budget=0 means we may be in IRQ context, do as little as possible */ + if (unlikely(!budget)) + goto out; + busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq); if (c->xdp) busy |= mlx5e_poll_xdpsq_cq(&c->rq_xdpsq.cq); - if (likely(budget)) { /* budget=0 means: don't poll rx rings */ - if (xsk_open) - work_done = mlx5e_poll_rx_cq(&xskrq->cq, budget); + if (xsk_open) + work_done = mlx5e_poll_rx_cq(&xskrq->cq, budget); - if (likely(budget - work_done)) - work_done += mlx5e_poll_rx_cq(&rq->cq, budget - work_done); + if (likely(budget - work_done)) + work_done += mlx5e_poll_rx_cq(&rq->cq, budget - work_done); - busy |= work_done == budget; - } + busy |= work_done == budget; mlx5e_poll_ico_cq(&c->icosq.cq); if (mlx5e_poll_ico_cq(&c->async_icosq.cq)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 1c35d721a31d7d250705aa7630ea666cffedc639..3db4866d7880fc4c41b05971e8fc4224e8f391ac 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -824,7 +824,7 @@ static int comp_irqs_request_pci(struct mlx5_core_dev *dev) ncomp_eqs = table->num_comp_eqs; cpus = kcalloc(ncomp_eqs, sizeof(*cpus), GFP_KERNEL); if (!cpus) - ret = -ENOMEM; + return -ENOMEM; i = 0; rcu_read_lock(); @@ -1104,7 +1104,7 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev) struct mlx5_eq_table *table = dev->priv.eq_table; mutex_lock(&table->lock); /* sync with create/destroy_async_eq */ - mlx5_irq_table_destroy(dev); + mlx5_irq_table_free_irqs(dev); mutex_unlock(&table->lock); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 1a042c9817131e7860a046c77ed7cc18b0b6e7e6..add6cfa432a5b7dd2c5cd4830a576d3ec9a00e7e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -342,6 +342,7 @@ struct mlx5_eswitch { u32 large_group_num; } params; struct blocking_notifier_head n_head; + bool paired[MLX5_MAX_PORTS]; }; void esw_offloads_disable(struct mlx5_eswitch *esw); @@ -369,6 +370,8 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs); void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf); void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw); void mlx5_eswitch_disable(struct mlx5_eswitch *esw); +void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw); +void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw); int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, u16 vport, const u8 *mac); int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, @@ -767,6 +770,8 @@ static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {} static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; } static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) {} static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {} +static inline void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw) {} +static inline void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) {} static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; } static inline int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 69215ffb9999532d839f5c21f7881f716ee35c96..8d19c20d3447e67cb63b4e7b4ead294ba3147b66 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -2742,6 +2742,9 @@ static int mlx5_esw_offloads_devcom_event(int event, mlx5_eswitch_vport_match_metadata_enabled(peer_esw)) break; + if (esw->paired[mlx5_get_dev_index(peer_esw->dev)]) + break; + err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true); if (err) goto err_out; @@ -2753,14 +2756,18 @@ static int mlx5_esw_offloads_devcom_event(int event, if (err) goto err_pair; + esw->paired[mlx5_get_dev_index(peer_esw->dev)] = true; + peer_esw->paired[mlx5_get_dev_index(esw->dev)] = true; mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true); break; case ESW_OFFLOADS_DEVCOM_UNPAIR: - if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) + if (!esw->paired[mlx5_get_dev_index(peer_esw->dev)]) break; mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false); + esw->paired[mlx5_get_dev_index(peer_esw->dev)] = false; + peer_esw->paired[mlx5_get_dev_index(esw->dev)] = false; mlx5_esw_offloads_unpair(peer_esw); mlx5_esw_offloads_unpair(esw); mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false); @@ -2779,7 +2786,7 @@ err_out: return err; } -static void esw_offloads_devcom_init(struct mlx5_eswitch *esw) +void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw) { struct mlx5_devcom *devcom = esw->dev->priv.devcom; @@ -2802,7 +2809,7 @@ static void esw_offloads_devcom_init(struct mlx5_eswitch *esw) ESW_OFFLOADS_DEVCOM_PAIR, esw); } -static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) +void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) { struct mlx5_devcom *devcom = esw->dev->priv.devcom; @@ -3250,8 +3257,6 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) if (err) goto err_vports; - esw_offloads_devcom_init(esw); - return 0; err_vports: @@ -3292,7 +3297,6 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw, void esw_offloads_disable(struct mlx5_eswitch *esw) { - esw_offloads_devcom_cleanup(esw); mlx5_eswitch_disable_pf_vf_vports(esw); esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK); esw_set_passing_vport_metadata(esw, false); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 144e59480686447de7c74d60f28b5efa9066dce6..ec83e6483d1a2acb5b33f44cd514ed96e0efb3da 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -511,10 +511,11 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, struct mlx5_flow_rule *dst; void *in_flow_context, *vlan; void *in_match_value; + int reformat_id = 0; unsigned int inlen; int dst_cnt_size; + u32 *in, action; void *in_dests; - u32 *in; int err; if (mlx5_set_extended_dest(dev, fte, &extended_dest)) @@ -553,22 +554,42 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, MLX5_SET(flow_context, in_flow_context, extended_destination, extended_dest); - if (extended_dest) { - u32 action; - action = fte->action.action & - ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; - MLX5_SET(flow_context, in_flow_context, action, action); - } else { - MLX5_SET(flow_context, in_flow_context, action, - fte->action.action); - if (fte->action.pkt_reformat) - MLX5_SET(flow_context, in_flow_context, packet_reformat_id, - fte->action.pkt_reformat->id); + action = fte->action.action; + if (extended_dest) + action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; + + MLX5_SET(flow_context, in_flow_context, action, action); + + if (!extended_dest && fte->action.pkt_reformat) { + struct mlx5_pkt_reformat *pkt_reformat = fte->action.pkt_reformat; + + if (pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_SW) { + reformat_id = mlx5_fs_dr_action_get_pkt_reformat_id(pkt_reformat); + if (reformat_id < 0) { + mlx5_core_err(dev, + "Unsupported SW-owned pkt_reformat type (%d) in FW-owned table\n", + pkt_reformat->reformat_type); + err = reformat_id; + goto err_out; + } + } else { + reformat_id = fte->action.pkt_reformat->id; + } } - if (fte->action.modify_hdr) + + MLX5_SET(flow_context, in_flow_context, packet_reformat_id, (u32)reformat_id); + + if (fte->action.modify_hdr) { + if (fte->action.modify_hdr->owner == MLX5_FLOW_RESOURCE_OWNER_SW) { + mlx5_core_err(dev, "Can't use SW-owned modify_hdr in FW-owned table\n"); + err = -EOPNOTSUPP; + goto err_out; + } + MLX5_SET(flow_context, in_flow_context, modify_header_id, fte->action.modify_hdr->id); + } MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_type, fte->action.crypto.type); @@ -885,6 +906,8 @@ static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns, pkt_reformat->id = MLX5_GET(alloc_packet_reformat_context_out, out, packet_reformat_id); + pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_FW; + kfree(in); return err; } @@ -969,6 +992,7 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns, err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); modify_hdr->id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id); + modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_FW; kfree(in); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index f137a0611b77b41a7c733b60b1321af60b70c0f3..b043190e50a86d4a95c143215ed7cfc82bfc2440 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -54,8 +54,14 @@ struct mlx5_flow_definer { u32 id; }; +enum mlx5_flow_resource_owner { + MLX5_FLOW_RESOURCE_OWNER_FW, + MLX5_FLOW_RESOURCE_OWNER_SW, +}; + struct mlx5_modify_hdr { enum mlx5_flow_namespace_type ns_type; + enum mlx5_flow_resource_owner owner; union { struct mlx5_fs_dr_action action; u32 id; @@ -65,6 +71,7 @@ struct mlx5_modify_hdr { struct mlx5_pkt_reformat { enum mlx5_flow_namespace_type ns_type; int reformat_type; /* from mlx5_ifc */ + enum mlx5_flow_resource_owner owner; union { struct mlx5_fs_dr_action action; u32 id; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c index adefde3ea941010f7133184a1dd40c405c06841f..b7d779d08d8371082415d439b547e54cf6f6b8e5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c @@ -3,6 +3,7 @@ #include #include "lib/devcom.h" +#include "mlx5_core.h" static LIST_HEAD(devcom_list); @@ -13,7 +14,7 @@ static LIST_HEAD(devcom_list); struct mlx5_devcom_component { struct { - void *data; + void __rcu *data; } device[MLX5_DEVCOM_PORTS_SUPPORTED]; mlx5_devcom_event_handler_t handler; @@ -77,6 +78,7 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev) if (MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_DEVCOM_PORTS_SUPPORTED) return NULL; + mlx5_dev_list_lock(); sguid0 = mlx5_query_nic_system_image_guid(dev); list_for_each_entry(iter, &devcom_list, list) { struct mlx5_core_dev *tmp_dev = NULL; @@ -102,8 +104,10 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev) if (!priv) { priv = mlx5_devcom_list_alloc(); - if (!priv) - return ERR_PTR(-ENOMEM); + if (!priv) { + devcom = ERR_PTR(-ENOMEM); + goto out; + } idx = 0; new_priv = true; @@ -112,13 +116,16 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev) priv->devs[idx] = dev; devcom = mlx5_devcom_alloc(priv, idx); if (!devcom) { - kfree(priv); - return ERR_PTR(-ENOMEM); + if (new_priv) + kfree(priv); + devcom = ERR_PTR(-ENOMEM); + goto out; } if (new_priv) list_add(&priv->list, &devcom_list); - +out: + mlx5_dev_list_unlock(); return devcom; } @@ -131,6 +138,7 @@ void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom) if (IS_ERR_OR_NULL(devcom)) return; + mlx5_dev_list_lock(); priv = devcom->priv; priv->devs[devcom->idx] = NULL; @@ -141,10 +149,12 @@ void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom) break; if (i != MLX5_DEVCOM_PORTS_SUPPORTED) - return; + goto out; list_del(&priv->list); kfree(priv); +out: + mlx5_dev_list_unlock(); } void mlx5_devcom_register_component(struct mlx5_devcom *devcom, @@ -162,7 +172,7 @@ void mlx5_devcom_register_component(struct mlx5_devcom *devcom, comp = &devcom->priv->components[id]; down_write(&comp->sem); comp->handler = handler; - comp->device[devcom->idx].data = data; + rcu_assign_pointer(comp->device[devcom->idx].data, data); up_write(&comp->sem); } @@ -176,8 +186,9 @@ void mlx5_devcom_unregister_component(struct mlx5_devcom *devcom, comp = &devcom->priv->components[id]; down_write(&comp->sem); - comp->device[devcom->idx].data = NULL; + RCU_INIT_POINTER(comp->device[devcom->idx].data, NULL); up_write(&comp->sem); + synchronize_rcu(); } int mlx5_devcom_send_event(struct mlx5_devcom *devcom, @@ -193,12 +204,15 @@ int mlx5_devcom_send_event(struct mlx5_devcom *devcom, comp = &devcom->priv->components[id]; down_write(&comp->sem); - for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) - if (i != devcom->idx && comp->device[i].data) { - err = comp->handler(event, comp->device[i].data, - event_data); + for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) { + void *data = rcu_dereference_protected(comp->device[i].data, + lockdep_is_held(&comp->sem)); + + if (i != devcom->idx && data) { + err = comp->handler(event, data, event_data); break; } + } up_write(&comp->sem); return err; @@ -213,7 +227,7 @@ void mlx5_devcom_set_paired(struct mlx5_devcom *devcom, comp = &devcom->priv->components[id]; WARN_ON(!rwsem_is_locked(&comp->sem)); - comp->paired = paired; + WRITE_ONCE(comp->paired, paired); } bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom, @@ -222,7 +236,7 @@ bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom, if (IS_ERR_OR_NULL(devcom)) return false; - return devcom->priv->components[id].paired; + return READ_ONCE(devcom->priv->components[id].paired); } void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom, @@ -236,7 +250,7 @@ void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom, comp = &devcom->priv->components[id]; down_read(&comp->sem); - if (!comp->paired) { + if (!READ_ONCE(comp->paired)) { up_read(&comp->sem); return NULL; } @@ -245,7 +259,29 @@ void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom, if (i != devcom->idx) break; - return comp->device[i].data; + return rcu_dereference_protected(comp->device[i].data, lockdep_is_held(&comp->sem)); +} + +void *mlx5_devcom_get_peer_data_rcu(struct mlx5_devcom *devcom, enum mlx5_devcom_components id) +{ + struct mlx5_devcom_component *comp; + int i; + + if (IS_ERR_OR_NULL(devcom)) + return NULL; + + for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) + if (i != devcom->idx) + break; + + comp = &devcom->priv->components[id]; + /* This can change concurrently, however 'data' pointer will remain + * valid for the duration of RCU read section. + */ + if (!READ_ONCE(comp->paired)) + return NULL; + + return rcu_dereference(comp->device[i].data); } void mlx5_devcom_release_peer_data(struct mlx5_devcom *devcom, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h index 94313c18bb6470d593e8e15e161188a279b58401..9a496f4722dadd690388a98c5cba59f0cfa9371b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h @@ -41,6 +41,7 @@ bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom, void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom, enum mlx5_devcom_components id); +void *mlx5_devcom_get_peer_data_rcu(struct mlx5_devcom *devcom, enum mlx5_devcom_components id); void mlx5_devcom_release_peer_data(struct mlx5_devcom *devcom, enum mlx5_devcom_components id); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 995eb2d5ace0b2681718920d7ebdb09c94625156..d6ee016deae176aa2ac54401304a5e8e1dda4c8f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -923,7 +923,6 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev, } mlx5_pci_vsc_init(dev); - dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev); return 0; err_clr_master: @@ -1049,7 +1048,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) dev->dm = mlx5_dm_create(dev); if (IS_ERR(dev->dm)) - mlx5_core_warn(dev, "Failed to init device memory%d\n", err); + mlx5_core_warn(dev, "Failed to init device memory %ld\n", PTR_ERR(dev->dm)); dev->tracer = mlx5_fw_tracer_create(dev); dev->hv_vhca = mlx5_hv_vhca_create(dev); @@ -1155,6 +1154,7 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot, u64 timeout goto err_cmd_cleanup; } + dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev); mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP); mlx5_start_health_poll(dev); @@ -1802,15 +1802,16 @@ static void remove_one(struct pci_dev *pdev) struct devlink *devlink = priv_to_devlink(dev); set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state); - /* mlx5_drain_fw_reset() is using devlink APIs. Hence, we must drain - * fw_reset before unregistering the devlink. + /* mlx5_drain_fw_reset() and mlx5_drain_health_wq() are using + * devlink notify APIs. + * Hence, we must drain them before unregistering the devlink. */ mlx5_drain_fw_reset(dev); + mlx5_drain_health_wq(dev); devlink_unregister(devlink); mlx5_sriov_disable(pdev); mlx5_thermal_uninit(dev); mlx5_crdump_disable(dev); - mlx5_drain_health_wq(dev); mlx5_uninit_one(dev); mlx5_pci_close(dev); mlx5_mdev_uninit(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 1d879374acaa4e2ffceabc6acf5bd4c5896ac31c..229520405d4ae9d74abc7e844e2bea82184c6bf0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -276,18 +276,6 @@ static inline bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev) return pci_num_vf(dev->pdev) ? true : false; } -static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev) -{ - /* LACP owner conditions: - * 1) Function is physical. - * 2) LAG is supported by FW. - * 3) LAG is managed by driver (currently the only option). - */ - return MLX5_CAP_GEN(dev, vport_group_manager) && - (MLX5_CAP_GEN(dev, num_lag_ports) > 1) && - MLX5_CAP_GEN(dev, lag_master); -} - int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev); static inline int mlx5_rescan_drivers(struct mlx5_core_dev *dev) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h index efd0c299c5c73f54a3e8356aff4ea30a57459dec..aa403a5ea34ed22d30f987c84260423c444b37ec 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h @@ -15,6 +15,7 @@ int mlx5_irq_table_init(struct mlx5_core_dev *dev); void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev); int mlx5_irq_table_create(struct mlx5_core_dev *dev); void mlx5_irq_table_destroy(struct mlx5_core_dev *dev); +void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev); int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table); int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table); struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c index 9d735c343a3b8c0df0a1a90ceacab0e9439d86b7..678f0be8137520bc91e3d6fcc322529e20b1062a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c @@ -32,6 +32,7 @@ #include #include +#include #include "mlx5_core.h" int mlx5_core_create_mkey(struct mlx5_core_dev *dev, u32 *mkey, u32 *in, @@ -122,3 +123,23 @@ int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num) return mlx5_cmd_exec_in(dev, destroy_psv, in); } EXPORT_SYMBOL(mlx5_core_destroy_psv); + +__be32 mlx5_core_get_terminate_scatter_list_mkey(struct mlx5_core_dev *dev) +{ + u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {}; + u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {}; + u32 mkey; + + if (!MLX5_CAP_GEN(dev, terminate_scatter_list_mkey)) + return MLX5_TERMINATE_SCATTER_LIST_LKEY; + + MLX5_SET(query_special_contexts_in, in, opcode, + MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS); + if (mlx5_cmd_exec_inout(dev, query_special_contexts, in, out)) + return MLX5_TERMINATE_SCATTER_LIST_LKEY; + + mkey = MLX5_GET(query_special_contexts_out, out, + terminate_scatter_list_mkey); + return cpu_to_be32(mkey); +} +EXPORT_SYMBOL(mlx5_core_get_terminate_scatter_list_mkey); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index 2245d3b2f393bcd60affd0963d3340016502570a..98412bd5a6961134a611ae75c239eff44b227db5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -32,6 +32,7 @@ struct mlx5_irq { struct mlx5_irq_pool *pool; int refcount; struct msi_map map; + u32 pool_index; }; struct mlx5_irq_table { @@ -125,14 +126,22 @@ out: return ret; } -static void irq_release(struct mlx5_irq *irq) +/* mlx5_system_free_irq - Free an IRQ + * @irq: IRQ to free + * + * Free the IRQ and other resources such as rmap from the system. + * BUT doesn't free or remove reference from mlx5. + * This function is very important for the shutdown flow, where we need to + * cleanup system resoruces but keep mlx5 objects alive, + * see mlx5_irq_table_free_irqs(). + */ +static void mlx5_system_free_irq(struct mlx5_irq *irq) { struct mlx5_irq_pool *pool = irq->pool; #ifdef CONFIG_RFS_ACCEL struct cpu_rmap *rmap; #endif - xa_erase(&pool->irqs, irq->map.index); /* free_irq requires that affinity_hint and rmap will be cleared before * calling it. To satisfy this requirement, we call * irq_cpu_rmap_remove() to remove the notifier @@ -140,14 +149,22 @@ static void irq_release(struct mlx5_irq *irq) irq_update_affinity_hint(irq->map.virq, NULL); #ifdef CONFIG_RFS_ACCEL rmap = mlx5_eq_table_get_rmap(pool->dev); - if (rmap && irq->map.index) + if (rmap) irq_cpu_rmap_remove(rmap, irq->map.virq); #endif - free_cpumask_var(irq->mask); free_irq(irq->map.virq, &irq->nh); if (irq->map.index && pci_msix_can_alloc_dyn(pool->dev->pdev)) pci_msix_free_irq(pool->dev->pdev, irq->map); +} + +static void irq_release(struct mlx5_irq *irq) +{ + struct mlx5_irq_pool *pool = irq->pool; + + xa_erase(&pool->irqs, irq->pool_index); + mlx5_system_free_irq(irq); + free_cpumask_var(irq->mask); kfree(irq); } @@ -231,12 +248,13 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i, if (!irq) return ERR_PTR(-ENOMEM); if (!i || !pci_msix_can_alloc_dyn(dev->pdev)) { - /* The vector at index 0 was already allocated. - * Just get the irq number. If dynamic irq is not supported - * vectors have also been allocated. + /* The vector at index 0 is always statically allocated. If + * dynamic irq is not supported all vectors are statically + * allocated. In both cases just get the irq number and set + * the index. */ irq->map.virq = pci_irq_vector(dev->pdev, i); - irq->map.index = 0; + irq->map.index = i; } else { irq->map = pci_msix_alloc_irq_at(dev->pdev, MSI_ANY_INDEX, af_desc); if (!irq->map.virq) { @@ -276,11 +294,11 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i, } irq->pool = pool; irq->refcount = 1; - irq->map.index = i; - err = xa_err(xa_store(&pool->irqs, irq->map.index, irq, GFP_KERNEL)); + irq->pool_index = i; + err = xa_err(xa_store(&pool->irqs, irq->pool_index, irq, GFP_KERNEL)); if (err) { mlx5_core_err(dev, "Failed to alloc xa entry for irq(%u). err = %d\n", - irq->map.index, err); + irq->pool_index, err); goto err_xa; } return irq; @@ -563,17 +581,23 @@ void mlx5_irqs_release_vectors(struct mlx5_irq **irqs, int nirqs) int mlx5_irqs_request_vectors(struct mlx5_core_dev *dev, u16 *cpus, int nirqs, struct mlx5_irq **irqs, struct cpu_rmap **rmap) { + struct mlx5_irq_table *table = mlx5_irq_table_get(dev); + struct mlx5_irq_pool *pool = table->pcif_pool; struct irq_affinity_desc af_desc; struct mlx5_irq *irq; + int offset = 1; int i; - af_desc.is_managed = 1; + if (!pool->xa_num_irqs.max) + offset = 0; + + af_desc.is_managed = false; for (i = 0; i < nirqs; i++) { + cpumask_clear(&af_desc.mask); cpumask_set_cpu(cpus[i], &af_desc.mask); - irq = mlx5_irq_request(dev, i + 1, &af_desc, rmap); + irq = mlx5_irq_request(dev, i + offset, &af_desc, rmap); if (IS_ERR(irq)) break; - cpumask_clear(&af_desc.mask); irqs[i] = irq; } @@ -691,6 +715,25 @@ static void irq_pools_destroy(struct mlx5_irq_table *table) irq_pool_free(table->pcif_pool); } +static void mlx5_irq_pool_free_irqs(struct mlx5_irq_pool *pool) +{ + struct mlx5_irq *irq; + unsigned long index; + + xa_for_each(&pool->irqs, index, irq) + mlx5_system_free_irq(irq); + +} + +static void mlx5_irq_pools_free_irqs(struct mlx5_irq_table *table) +{ + if (table->sf_ctrl_pool) { + mlx5_irq_pool_free_irqs(table->sf_comp_pool); + mlx5_irq_pool_free_irqs(table->sf_ctrl_pool); + } + mlx5_irq_pool_free_irqs(table->pcif_pool); +} + /* irq_table API */ int mlx5_irq_table_init(struct mlx5_core_dev *dev) @@ -774,6 +817,17 @@ void mlx5_irq_table_destroy(struct mlx5_core_dev *dev) pci_free_irq_vectors(dev->pdev); } +void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev) +{ + struct mlx5_irq_table *table = dev->priv.irq_table; + + if (mlx5_core_is_sf(dev)) + return; + + mlx5_irq_pools_free_irqs(table); + pci_free_irq_vectors(dev->pdev); +} + int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table) { if (table->sf_comp_pool) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c index e2f26d0bc61541f98d637d0c45f9c710c4325230..0692363cf80e4d8f2d31e4133faab797a03abf10 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c @@ -63,6 +63,7 @@ static void mlx5_sf_dev_remove(struct auxiliary_device *adev) struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev); struct devlink *devlink = priv_to_devlink(sf_dev->mdev); + mlx5_drain_health_wq(sf_dev->mdev); devlink_unregister(devlink); mlx5_uninit_one(sf_dev->mdev); iounmap(sf_dev->mdev->iseg); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c index 0eb9a8d7f282fc93e99ab4598a52b950d877f6d0..0f783e7906cb5357685543ad0db50541eaa6166f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c @@ -1421,9 +1421,13 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn, } case DR_ACTION_TYP_TNL_L3_TO_L2: { - u8 hw_actions[DR_ACTION_CACHE_LINE_SIZE] = {}; + u8 *hw_actions; int ret; + hw_actions = kzalloc(DR_ACTION_CACHE_LINE_SIZE, GFP_KERNEL); + if (!hw_actions) + return -ENOMEM; + ret = mlx5dr_ste_set_action_decap_l3_list(dmn->ste_ctx, data, data_sz, hw_actions, @@ -1431,6 +1435,7 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn, &action->rewrite->num_of_actions); if (ret) { mlx5dr_dbg(dmn, "Failed creating decap l3 action list\n"); + kfree(hw_actions); return ret; } @@ -1440,6 +1445,7 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn, ret = mlx5dr_ste_alloc_modify_hdr(action); if (ret) { mlx5dr_dbg(dmn, "Failed preparing reformat data\n"); + kfree(hw_actions); return ret; } return 0; @@ -2129,6 +2135,11 @@ mlx5dr_action_create_aso(struct mlx5dr_domain *dmn, u32 obj_id, return action; } +u32 mlx5dr_action_get_pkt_reformat_id(struct mlx5dr_action *action) +{ + return action->reformat->id; +} + int mlx5dr_action_destroy(struct mlx5dr_action *action) { if (WARN_ON_ONCE(refcount_read(&action->refcount) > 1)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c index 3835ba3f4ddac6a3b82aab154121d15b8ec4fb6a..1aa525e509f109277b03d53c11f51e18886fde95 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c @@ -117,6 +117,8 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev, caps->gvmi = MLX5_CAP_GEN(mdev, vhca_id); caps->flex_protocols = MLX5_CAP_GEN(mdev, flex_parser_protocols); caps->sw_format_ver = MLX5_CAP_GEN(mdev, steering_format_version); + caps->roce_caps.fl_rc_qp_when_roce_disabled = + MLX5_CAP_GEN(mdev, fl_rc_qp_when_roce_disabled); if (MLX5_CAP_GEN(mdev, roce)) { err = dr_cmd_query_nic_vport_roce_en(mdev, 0, &roce_en); @@ -124,7 +126,7 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev, return err; caps->roce_caps.roce_en = roce_en; - caps->roce_caps.fl_rc_qp_when_roce_disabled = + caps->roce_caps.fl_rc_qp_when_roce_disabled |= MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_disabled); caps->roce_caps.fl_rc_qp_when_roce_enabled = MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_enabled); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c index 13e06a6a6b22af75704204eae5243f0288b71049..d6947fe13d560d141e8e2efa9c7be49a485abcc0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c @@ -213,6 +213,8 @@ struct mlx5dr_ptrn_mgr *mlx5dr_ptrn_mgr_create(struct mlx5dr_domain *dmn) } INIT_LIST_HEAD(&mgr->ptrn_list); + mutex_init(&mgr->modify_hdr_mutex); + return mgr; free_mgr: @@ -237,5 +239,6 @@ void mlx5dr_ptrn_mgr_destroy(struct mlx5dr_ptrn_mgr *mgr) } mlx5dr_icm_pool_destroy(mgr->ptrn_icm_pool); + mutex_destroy(&mgr->modify_hdr_mutex); kfree(mgr); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c index 9413aaf51251b798cc5fc70c861599041efbd687..e94fbb015efad7e1988e738980d2e1378e4d4d50 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c @@ -15,7 +15,8 @@ static u32 dr_ste_crc32_calc(const void *input_data, size_t length) { u32 crc = crc32(0, input_data, length); - return (__force u32)htonl(crc); + return (__force u32)((crc >> 24) & 0xff) | ((crc << 8) & 0xff0000) | + ((crc >> 8) & 0xff00) | ((crc << 24) & 0xff000000); } bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c index 984653756779616de63528501cbdab338c025162..cc215beb743673fd6a6a72de7df7712e42b51dc9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c @@ -331,8 +331,16 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, } if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) { - bool is_decap = fte->action.pkt_reformat->reformat_type == - MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2; + bool is_decap; + + if (fte->action.pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) { + err = -EINVAL; + mlx5dr_err(domain, "FW-owned reformat can't be used in SW rule\n"); + goto free_actions; + } + + is_decap = fte->action.pkt_reformat->reformat_type == + MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2; if (is_decap) actions[num_actions++] = @@ -661,6 +669,7 @@ static int mlx5_cmd_dr_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns return -EINVAL; } + pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_SW; pkt_reformat->action.dr_action = action; return 0; @@ -691,6 +700,7 @@ static int mlx5_cmd_dr_modify_header_alloc(struct mlx5_flow_root_namespace *ns, return -EINVAL; } + modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_SW; modify_hdr->action.dr_action = action; return 0; @@ -816,6 +826,19 @@ static u32 mlx5_cmd_dr_get_capabilities(struct mlx5_flow_root_namespace *ns, return steering_caps; } +int mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat) +{ + switch (pkt_reformat->reformat_type) { + case MLX5_REFORMAT_TYPE_L2_TO_VXLAN: + case MLX5_REFORMAT_TYPE_L2_TO_NVGRE: + case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL: + case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL: + case MLX5_REFORMAT_TYPE_INSERT_HDR: + return mlx5dr_action_get_pkt_reformat_id(pkt_reformat->action.dr_action); + } + return -EOPNOTSUPP; +} + bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev) { return mlx5dr_is_supported(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h index d168622063d559d06b69656190c085411f5f8153..99a3b2eff6b8fdd5c946f71dea7944ea11040777 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h @@ -38,6 +38,8 @@ struct mlx5_fs_dr_table { bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev); +int mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat); + const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void); #else @@ -47,6 +49,11 @@ static inline const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void) return NULL; } +static inline u32 mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat) +{ + return 0; +} + static inline bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev) { return false; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h index 9afd268a257381015d347eae469f5407b1132bfd..d1c04f43d86df578e9b000d28708be385a1e7b7e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h @@ -150,6 +150,8 @@ mlx5dr_action_create_dest_match_range(struct mlx5dr_domain *dmn, int mlx5dr_action_destroy(struct mlx5dr_action *action); +u32 mlx5dr_action_get_pkt_reformat_id(struct mlx5dr_action *action); + int mlx5dr_definer_get(struct mlx5dr_domain *dmn, u16 format_id, u8 *dw_selectors, u8 *byte_selectors, u8 *match_mask, u32 *definer_id); diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c index afa3b92a6905f691dbd63649ccf4190f20e9bd48..0d5a41a2ae010914b50fac74b5c719c6aa3fa2cd 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c @@ -245,12 +245,6 @@ static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts) skb = priv->rx_skb[rx_pi_rem]; - skb_put(skb, datalen); - - skb->ip_summed = CHECKSUM_NONE; /* device did not checksum packet */ - - skb->protocol = eth_type_trans(skb, netdev); - /* Alloc another RX SKB for this same index */ rx_skb = mlxbf_gige_alloc_skb(priv, MLXBF_GIGE_DEFAULT_BUF_SZ, &rx_buf_dma, DMA_FROM_DEVICE); @@ -259,6 +253,13 @@ static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts) priv->rx_skb[rx_pi_rem] = rx_skb; dma_unmap_single(priv->dev, *rx_wqe_addr, MLXBF_GIGE_DEFAULT_BUF_SZ, DMA_FROM_DEVICE); + + skb_put(skb, datalen); + + skb->ip_summed = CHECKSUM_NONE; /* device did not checksum packet */ + + skb->protocol = eth_type_trans(skb, netdev); + *rx_wqe_addr = rx_buf_dma; } else if (rx_cqe & MLXBF_GIGE_RX_CQE_PKT_STATUS_MAC_ERR) { priv->stats.rx_mac_errors++; diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c index 2b6e046e1d10bbc7fcec9f334e344eec90bb7d11..ee2698698d71a43bb71256a085f91daf603ac095 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c @@ -1039,6 +1039,16 @@ static int lan966x_reset_switch(struct lan966x *lan966x) reset_control_reset(switch_reset); + /* Don't reinitialize the switch core, if it is already initialized. In + * case it is initialized twice, some pointers inside the queue system + * in HW will get corrupted and then after a while the queue system gets + * full and no traffic is passing through the switch. The issue is seen + * when loading and unloading the driver and sending traffic through the + * switch. + */ + if (lan_rd(lan966x, SYS_RESET_CFG) & SYS_RESET_CFG_CORE_ENA) + return 0; + lan_wr(SYS_RESET_CFG_CORE_ENA_SET(0), lan966x, SYS_RESET_CFG); lan_wr(SYS_RAM_INIT_RAM_INIT_SET(1), lan966x, SYS_RAM_INIT); ret = readx_poll_timeout(lan966x_ram_init, lan966x, diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index b3fcb767b9aba823c325e8b9132dd5a0d37ebfac..7f4e861e398e66e2af09643680bb4a1e1aee99d6 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -1282,8 +1282,6 @@ static void mana_poll_tx_cq(struct mana_cq *cq) if (comp_read < 1) return; - apc->eth_stats.tx_cqes = comp_read; - for (i = 0; i < comp_read; i++) { struct mana_tx_comp_oob *cqe_oob; @@ -1366,8 +1364,6 @@ static void mana_poll_tx_cq(struct mana_cq *cq) WARN_ON_ONCE(1); cq->work_done = pkt_transmitted; - - apc->eth_stats.tx_cqes -= pkt_transmitted; } static void mana_post_pkt_rxq(struct mana_rxq *rxq) @@ -1629,15 +1625,11 @@ static void mana_poll_rx_cq(struct mana_cq *cq) { struct gdma_comp *comp = cq->gdma_comp_buf; struct mana_rxq *rxq = cq->rxq; - struct mana_port_context *apc; int comp_read, i; - apc = netdev_priv(rxq->ndev); - comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER); WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER); - apc->eth_stats.rx_cqes = comp_read; rxq->xdp_flush = false; for (i = 0; i < comp_read; i++) { @@ -1649,8 +1641,6 @@ static void mana_poll_rx_cq(struct mana_cq *cq) return; mana_process_rx_cqe(rxq, cq, &comp[i]); - - apc->eth_stats.rx_cqes--; } if (rxq->xdp_flush) diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c index a64c81410dc14cfcd44d263f0e79cc3337805ca4..0dc78679f62096ebf79da9d9af12b0519acb1e95 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c +++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c @@ -13,11 +13,9 @@ static const struct { } mana_eth_stats[] = { {"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)}, {"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)}, - {"tx_cqes", offsetof(struct mana_ethtool_stats, tx_cqes)}, {"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)}, {"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats, tx_cqe_unknown_type)}, - {"rx_cqes", offsetof(struct mana_ethtool_stats, rx_cqes)}, {"rx_coalesced_err", offsetof(struct mana_ethtool_stats, rx_coalesced_err)}, {"rx_cqe_unknown_type", offsetof(struct mana_ethtool_stats, diff --git a/drivers/net/ethernet/netronome/nfp/nic/main.h b/drivers/net/ethernet/netronome/nfp/nic/main.h index 094374df42b8c41513f968e44455f3e286fdcd84..38b8b10b03cd3b5c94a52925751e3171a5601a7e 100644 --- a/drivers/net/ethernet/netronome/nfp/nic/main.h +++ b/drivers/net/ethernet/netronome/nfp/nic/main.h @@ -8,7 +8,7 @@ #ifdef CONFIG_DCB /* DCB feature definitions */ -#define NFP_NET_MAX_DSCP 4 +#define NFP_NET_MAX_DSCP 64 #define NFP_NET_MAX_TC IEEE_8021QAZ_MAX_TCS #define NFP_NET_MAX_PRIO 8 #define NFP_DCB_CFG_STRIDE 256 diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 0605d1ee490dd0599dc6d69b31a52955d71ef9c8..7a549b834e970a5ac40330513e3220466d78acef 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -6138,6 +6138,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) return 0; out_error: + nv_mgmt_release_sema(dev); if (phystate_orig) writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); out_freering: diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 2edd6bf64a3cc29ea1934eb10c7bc1b23edb767a..7776d3bdd459a54fe3f93c0166f824c6878e50e6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -1903,7 +1903,7 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats) { u32 i; - if (!cdev) { + if (!cdev || cdev->recov_in_prog) { memset(stats, 0, sizeof(*stats)); return; } diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index f9931ecb7baa252c9e7b05c57461e29da9f60313..4d83ceebdc498dfd700531f822721edea96ed6a5 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -269,6 +269,10 @@ struct qede_dev { #define QEDE_ERR_WARN 3 struct qede_dump_info dump_info; + struct delayed_work periodic_task; + unsigned long stats_coal_ticks; + u32 stats_coal_usecs; + spinlock_t stats_lock; /* lock for vport stats access */ }; enum QEDE_STATE { diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 374a86b875a3e5db52182258c642dd97a4fc4d3c..95820cf1cd6cc7b3ba95463adb3a1a9b41f33b1e 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -429,6 +429,8 @@ static void qede_get_ethtool_stats(struct net_device *dev, } } + spin_lock(&edev->stats_lock); + for (i = 0; i < QEDE_NUM_STATS; i++) { if (qede_is_irrelevant_stat(edev, i)) continue; @@ -438,6 +440,8 @@ static void qede_get_ethtool_stats(struct net_device *dev, buf++; } + spin_unlock(&edev->stats_lock); + __qede_unlock(edev); } @@ -829,6 +833,7 @@ out: coal->rx_coalesce_usecs = rx_coal; coal->tx_coalesce_usecs = tx_coal; + coal->stats_block_coalesce_usecs = edev->stats_coal_usecs; return rc; } @@ -842,6 +847,19 @@ int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal, int i, rc = 0; u16 rxc, txc; + if (edev->stats_coal_usecs != coal->stats_block_coalesce_usecs) { + edev->stats_coal_usecs = coal->stats_block_coalesce_usecs; + if (edev->stats_coal_usecs) { + edev->stats_coal_ticks = usecs_to_jiffies(edev->stats_coal_usecs); + schedule_delayed_work(&edev->periodic_task, 0); + + DP_INFO(edev, "Configured stats coal ticks=%lu jiffies\n", + edev->stats_coal_ticks); + } else { + cancel_delayed_work_sync(&edev->periodic_task); + } + } + if (!netif_running(dev)) { DP_INFO(edev, "Interface is down\n"); return -EINVAL; @@ -2252,7 +2270,8 @@ out: } static const struct ethtool_ops qede_ethtool_ops = { - .supported_coalesce_params = ETHTOOL_COALESCE_USECS, + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_STATS_BLOCK_USECS, .get_link_ksettings = qede_get_link_ksettings, .set_link_ksettings = qede_set_link_ksettings, .get_drvinfo = qede_get_drvinfo, @@ -2303,7 +2322,8 @@ static const struct ethtool_ops qede_ethtool_ops = { }; static const struct ethtool_ops qede_vf_ethtool_ops = { - .supported_coalesce_params = ETHTOOL_COALESCE_USECS, + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_STATS_BLOCK_USECS, .get_link_ksettings = qede_get_link_ksettings, .get_drvinfo = qede_get_drvinfo, .get_msglevel = qede_get_msglevel, diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 4c6c685820e3f0dc66d6375f0a5242b59cbbd3d2..4b004a7281903fe72e351ee2a5a22bc1c52a731e 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -307,6 +307,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev) edev->ops->get_vport_stats(edev->cdev, &stats); + spin_lock(&edev->stats_lock); + p_common->no_buff_discards = stats.common.no_buff_discards; p_common->packet_too_big_discard = stats.common.packet_too_big_discard; p_common->ttl0_discard = stats.common.ttl0_discard; @@ -404,6 +406,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev) p_ah->tx_1519_to_max_byte_packets = stats.ah.tx_1519_to_max_byte_packets; } + + spin_unlock(&edev->stats_lock); } static void qede_get_stats64(struct net_device *dev, @@ -412,9 +416,10 @@ static void qede_get_stats64(struct net_device *dev, struct qede_dev *edev = netdev_priv(dev); struct qede_stats_common *p_common; - qede_fill_by_demand_stats(edev); p_common = &edev->stats.common; + spin_lock(&edev->stats_lock); + stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts + p_common->rx_bcast_pkts; stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts + @@ -434,6 +439,8 @@ static void qede_get_stats64(struct net_device *dev, stats->collisions = edev->stats.bb.tx_total_collisions; stats->rx_crc_errors = p_common->rx_crc_errors; stats->rx_frame_errors = p_common->rx_align_errors; + + spin_unlock(&edev->stats_lock); } #ifdef CONFIG_QED_SRIOV @@ -1063,6 +1070,23 @@ static void qede_unlock(struct qede_dev *edev) rtnl_unlock(); } +static void qede_periodic_task(struct work_struct *work) +{ + struct qede_dev *edev = container_of(work, struct qede_dev, + periodic_task.work); + + qede_fill_by_demand_stats(edev); + schedule_delayed_work(&edev->periodic_task, edev->stats_coal_ticks); +} + +static void qede_init_periodic_task(struct qede_dev *edev) +{ + INIT_DELAYED_WORK(&edev->periodic_task, qede_periodic_task); + spin_lock_init(&edev->stats_lock); + edev->stats_coal_usecs = USEC_PER_SEC; + edev->stats_coal_ticks = usecs_to_jiffies(USEC_PER_SEC); +} + static void qede_sp_task(struct work_struct *work) { struct qede_dev *edev = container_of(work, struct qede_dev, @@ -1082,6 +1106,7 @@ static void qede_sp_task(struct work_struct *work) */ if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) { + cancel_delayed_work_sync(&edev->periodic_task); #ifdef CONFIG_QED_SRIOV /* SRIOV must be disabled outside the lock to avoid a deadlock. * The recovery of the active VFs is currently not supported. @@ -1272,6 +1297,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, */ INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); mutex_init(&edev->qede_lock); + qede_init_periodic_task(edev); rc = register_netdev(edev->ndev); if (rc) { @@ -1296,6 +1322,11 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, edev->rx_copybreak = QEDE_RX_HDR_SIZE; qede_log_probe(edev); + + /* retain user config (for example - after recovery) */ + if (edev->stats_coal_usecs) + schedule_delayed_work(&edev->periodic_task, 0); + return 0; err4: @@ -1364,6 +1395,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) unregister_netdev(ndev); cancel_delayed_work_sync(&edev->sp_task); + cancel_delayed_work_sync(&edev->periodic_task); edev->ops->common->set_power_state(cdev, PCI_D0); diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index c865a4be05eec2d4144792179a0517c997f66d95..4a1b94e5a8ea9d7f9c6c0518699c364a17b77b2b 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -582,8 +582,7 @@ qcaspi_spi_thread(void *data) while (!kthread_should_stop()) { set_current_state(TASK_INTERRUPTIBLE); if ((qca->intr_req == qca->intr_svc) && - (qca->txr.skb[qca->txr.head] == NULL) && - (qca->sync == QCASPI_SYNC_READY)) + !qca->txr.skb[qca->txr.head]) schedule(); set_current_state(TASK_RUNNING); diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index a7e376e7e689c016e1d7cad675b27b0821002883..4b19803a7dd011c3fa13f2c65b6f886e8a291b7d 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -616,10 +616,10 @@ struct rtl8169_private { struct work_struct work; } wk; - spinlock_t config25_lock; - spinlock_t mac_ocp_lock; + raw_spinlock_t config25_lock; + raw_spinlock_t mac_ocp_lock; - spinlock_t cfg9346_usage_lock; + raw_spinlock_t cfg9346_usage_lock; int cfg9346_usage_count; unsigned supports_gmii:1; @@ -671,20 +671,20 @@ static void rtl_lock_config_regs(struct rtl8169_private *tp) { unsigned long flags; - spin_lock_irqsave(&tp->cfg9346_usage_lock, flags); + raw_spin_lock_irqsave(&tp->cfg9346_usage_lock, flags); if (!--tp->cfg9346_usage_count) RTL_W8(tp, Cfg9346, Cfg9346_Lock); - spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags); + raw_spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags); } static void rtl_unlock_config_regs(struct rtl8169_private *tp) { unsigned long flags; - spin_lock_irqsave(&tp->cfg9346_usage_lock, flags); + raw_spin_lock_irqsave(&tp->cfg9346_usage_lock, flags); if (!tp->cfg9346_usage_count++) RTL_W8(tp, Cfg9346, Cfg9346_Unlock); - spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags); + raw_spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags); } static void rtl_pci_commit(struct rtl8169_private *tp) @@ -698,10 +698,10 @@ static void rtl_mod_config2(struct rtl8169_private *tp, u8 clear, u8 set) unsigned long flags; u8 val; - spin_lock_irqsave(&tp->config25_lock, flags); + raw_spin_lock_irqsave(&tp->config25_lock, flags); val = RTL_R8(tp, Config2); RTL_W8(tp, Config2, (val & ~clear) | set); - spin_unlock_irqrestore(&tp->config25_lock, flags); + raw_spin_unlock_irqrestore(&tp->config25_lock, flags); } static void rtl_mod_config5(struct rtl8169_private *tp, u8 clear, u8 set) @@ -709,10 +709,10 @@ static void rtl_mod_config5(struct rtl8169_private *tp, u8 clear, u8 set) unsigned long flags; u8 val; - spin_lock_irqsave(&tp->config25_lock, flags); + raw_spin_lock_irqsave(&tp->config25_lock, flags); val = RTL_R8(tp, Config5); RTL_W8(tp, Config5, (val & ~clear) | set); - spin_unlock_irqrestore(&tp->config25_lock, flags); + raw_spin_unlock_irqrestore(&tp->config25_lock, flags); } static bool rtl_is_8125(struct rtl8169_private *tp) @@ -899,9 +899,9 @@ static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) { unsigned long flags; - spin_lock_irqsave(&tp->mac_ocp_lock, flags); + raw_spin_lock_irqsave(&tp->mac_ocp_lock, flags); __r8168_mac_ocp_write(tp, reg, data); - spin_unlock_irqrestore(&tp->mac_ocp_lock, flags); + raw_spin_unlock_irqrestore(&tp->mac_ocp_lock, flags); } static u16 __r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg) @@ -919,9 +919,9 @@ static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg) unsigned long flags; u16 val; - spin_lock_irqsave(&tp->mac_ocp_lock, flags); + raw_spin_lock_irqsave(&tp->mac_ocp_lock, flags); val = __r8168_mac_ocp_read(tp, reg); - spin_unlock_irqrestore(&tp->mac_ocp_lock, flags); + raw_spin_unlock_irqrestore(&tp->mac_ocp_lock, flags); return val; } @@ -932,10 +932,10 @@ static void r8168_mac_ocp_modify(struct rtl8169_private *tp, u32 reg, u16 mask, unsigned long flags; u16 data; - spin_lock_irqsave(&tp->mac_ocp_lock, flags); + raw_spin_lock_irqsave(&tp->mac_ocp_lock, flags); data = __r8168_mac_ocp_read(tp, reg); __r8168_mac_ocp_write(tp, reg, (data & ~mask) | set); - spin_unlock_irqrestore(&tp->mac_ocp_lock, flags); + raw_spin_unlock_irqrestore(&tp->mac_ocp_lock, flags); } /* Work around a hw issue with RTL8168g PHY, the quirk disables @@ -1420,14 +1420,14 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) r8168_mac_ocp_modify(tp, 0xc0b6, BIT(0), 0); } - spin_lock_irqsave(&tp->config25_lock, flags); + raw_spin_lock_irqsave(&tp->config25_lock, flags); for (i = 0; i < tmp; i++) { options = RTL_R8(tp, cfg[i].reg) & ~cfg[i].mask; if (wolopts & cfg[i].opt) options |= cfg[i].mask; RTL_W8(tp, cfg[i].reg, options); } - spin_unlock_irqrestore(&tp->config25_lock, flags); + raw_spin_unlock_irqrestore(&tp->config25_lock, flags); switch (tp->mac_version) { case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: @@ -5179,9 +5179,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->eee_adv = -1; tp->ocp_base = OCP_STD_PHY_BASE; - spin_lock_init(&tp->cfg9346_usage_lock); - spin_lock_init(&tp->config25_lock); - spin_lock_init(&tp->mac_ocp_lock); + raw_spin_lock_init(&tp->cfg9346_usage_lock); + raw_spin_lock_init(&tp->config25_lock); + raw_spin_lock_init(&tp->mac_ocp_lock); dev->tstats = devm_netdev_alloc_pcpu_stats(&pdev->dev, struct pcpu_sw_netstats); diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c index 29afaddb598db4b32acebdfa34667365ab503c85..fa6d6202b129d083a025cdec6cf1ff9f30aaddba 100644 --- a/drivers/net/ethernet/renesas/rswitch.c +++ b/drivers/net/ethernet/renesas/rswitch.c @@ -347,17 +347,6 @@ out: return -ENOMEM; } -static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv) -{ - struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; - - gq->ring_size = TS_RING_SIZE; - gq->ts_ring = dma_alloc_coherent(&priv->pdev->dev, - sizeof(struct rswitch_ts_desc) * - (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); - return !gq->ts_ring ? -ENOMEM : 0; -} - static void rswitch_desc_set_dptr(struct rswitch_desc *desc, dma_addr_t addr) { desc->dptrl = cpu_to_le32(lower_32_bits(addr)); @@ -533,6 +522,28 @@ static void rswitch_gwca_linkfix_free(struct rswitch_private *priv) gwca->linkfix_table = NULL; } +static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv) +{ + struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; + struct rswitch_ts_desc *desc; + + gq->ring_size = TS_RING_SIZE; + gq->ts_ring = dma_alloc_coherent(&priv->pdev->dev, + sizeof(struct rswitch_ts_desc) * + (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); + + if (!gq->ts_ring) + return -ENOMEM; + + rswitch_gwca_ts_queue_fill(priv, 0, TS_RING_SIZE); + desc = &gq->ts_ring[gq->ring_size]; + desc->desc.die_dt = DT_LINKFIX; + rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); + INIT_LIST_HEAD(&priv->gwca.ts_info_list); + + return 0; +} + static struct rswitch_gwca_queue *rswitch_gwca_get(struct rswitch_private *priv) { struct rswitch_gwca_queue *gq; @@ -1485,7 +1496,7 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - 1) { netif_stop_subqueue(ndev, 0); - return ret; + return NETDEV_TX_BUSY; } if (skb_put_padto(skb, ETH_ZLEN)) @@ -1780,9 +1791,6 @@ static int rswitch_init(struct rswitch_private *priv) if (err < 0) goto err_ts_queue_alloc; - rswitch_gwca_ts_queue_fill(priv, 0, TS_RING_SIZE); - INIT_LIST_HEAD(&priv->gwca.ts_info_list); - for (i = 0; i < RSWITCH_NUM_PORTS; i++) { err = rswitch_device_alloc(priv, i); if (err < 0) { diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index d30459dbfe8f838e7677d26038824484353f5eff..b63e47af636559734b45c3f886ae7a2f027ad0f4 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -2950,7 +2950,7 @@ static u32 efx_ef10_extract_event_ts(efx_qword_t *event) return tstamp; } -static void +static int efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) { struct efx_nic *efx = channel->efx; @@ -2958,13 +2958,14 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) unsigned int tx_ev_desc_ptr; unsigned int tx_ev_q_label; unsigned int tx_ev_type; + int work_done; u64 ts_part; if (unlikely(READ_ONCE(efx->reset_pending))) - return; + return 0; if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT))) - return; + return 0; /* Get the transmit queue */ tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL); @@ -2973,8 +2974,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) if (!tx_queue->timestamping) { /* Transmit completion */ tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX); - efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask); - return; + return efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask); } /* Transmit timestamps are only available for 8XXX series. They result @@ -3000,6 +3000,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) * fields in the event. */ tx_ev_type = EFX_QWORD_FIELD(*event, ESF_EZ_TX_SOFT1); + work_done = 0; switch (tx_ev_type) { case TX_TIMESTAMP_EVENT_TX_EV_COMPLETION: @@ -3016,6 +3017,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) tx_queue->completed_timestamp_major = ts_part; efx_xmit_done_single(tx_queue); + work_done = 1; break; default: @@ -3026,6 +3028,8 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) EFX_QWORD_VAL(*event)); break; } + + return work_done; } static void @@ -3081,13 +3085,16 @@ static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel, } } +#define EFX_NAPI_MAX_TX 512 + static int efx_ef10_ev_process(struct efx_channel *channel, int quota) { struct efx_nic *efx = channel->efx; efx_qword_t event, *p_event; unsigned int read_ptr; - int ev_code; + int spent_tx = 0; int spent = 0; + int ev_code; if (quota <= 0) return spent; @@ -3126,7 +3133,11 @@ static int efx_ef10_ev_process(struct efx_channel *channel, int quota) } break; case ESE_DZ_EV_CODE_TX_EV: - efx_ef10_handle_tx_event(channel, &event); + spent_tx += efx_ef10_handle_tx_event(channel, &event); + if (spent_tx >= EFX_NAPI_MAX_TX) { + spent = quota; + goto out; + } break; case ESE_DZ_EV_CODE_DRIVER_EV: efx_ef10_handle_driver_event(channel, &event); diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c index d916877b5a9ad92414c36f317ea8aae41706cecd..be395cd8770bc71db641b710b90c3d00dc2f43fe 100644 --- a/drivers/net/ethernet/sfc/ef100_netdev.c +++ b/drivers/net/ethernet/sfc/ef100_netdev.c @@ -378,7 +378,9 @@ int ef100_probe_netdev(struct efx_probe_data *probe_data) efx->net_dev = net_dev; SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev); - net_dev->features |= efx->type->offload_features; + /* enable all supported features except rx-fcs and rx-all */ + net_dev->features |= efx->type->offload_features & + ~(NETIF_F_RXFCS | NETIF_F_RXALL); net_dev->hw_features |= efx->type->offload_features; net_dev->hw_enc_features |= efx->type->offload_features; net_dev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_SG | diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index 4dc643b0d2db475bebae922dfda0f92b4f24028b..7adde9639c8aba091396236db1aa888241042ba7 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -253,6 +253,8 @@ static void ef100_ev_read_ack(struct efx_channel *channel) efx_reg(channel->efx, ER_GZ_EVQ_INT_PRIME)); } +#define EFX_NAPI_MAX_TX 512 + static int ef100_ev_process(struct efx_channel *channel, int quota) { struct efx_nic *efx = channel->efx; @@ -260,6 +262,7 @@ static int ef100_ev_process(struct efx_channel *channel, int quota) bool evq_phase, old_evq_phase; unsigned int read_ptr; efx_qword_t *p_event; + int spent_tx = 0; int spent = 0; bool ev_phase; int ev_type; @@ -295,7 +298,9 @@ static int ef100_ev_process(struct efx_channel *channel, int quota) efx_mcdi_process_event(channel, p_event); break; case ESE_GZ_EF100_EV_TX_COMPLETION: - ef100_ev_tx(channel, p_event); + spent_tx += ef100_ev_tx(channel, p_event); + if (spent_tx >= EFX_NAPI_MAX_TX) + spent = quota; break; case ESE_GZ_EF100_EV_DRIVER: netif_info(efx, drv, efx->net_dev, diff --git a/drivers/net/ethernet/sfc/ef100_tx.c b/drivers/net/ethernet/sfc/ef100_tx.c index 29ffaf35559d6ecb63c3c010d33bfaeccf449b28..849e5555bd1280a862f2d6e52c236820c4375374 100644 --- a/drivers/net/ethernet/sfc/ef100_tx.c +++ b/drivers/net/ethernet/sfc/ef100_tx.c @@ -346,7 +346,7 @@ void ef100_tx_write(struct efx_tx_queue *tx_queue) ef100_tx_push_buffers(tx_queue); } -void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event) +int ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event) { unsigned int tx_done = EFX_QWORD_FIELD(*p_event, ESF_GZ_EV_TXCMPL_NUM_DESC); @@ -357,7 +357,7 @@ void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event) unsigned int tx_index = (tx_queue->read_count + tx_done - 1) & tx_queue->ptr_mask; - efx_xmit_done(tx_queue, tx_index); + return efx_xmit_done(tx_queue, tx_index); } /* Add a socket buffer to a TX queue diff --git a/drivers/net/ethernet/sfc/ef100_tx.h b/drivers/net/ethernet/sfc/ef100_tx.h index e9e11540fcdeafdea63c11e68352c8d187707b12..d9a0819c5a72c0a3a68f9a234722f451a47ab65c 100644 --- a/drivers/net/ethernet/sfc/ef100_tx.h +++ b/drivers/net/ethernet/sfc/ef100_tx.h @@ -20,7 +20,7 @@ void ef100_tx_init(struct efx_tx_queue *tx_queue); void ef100_tx_write(struct efx_tx_queue *tx_queue); unsigned int ef100_tx_max_skb_descs(struct efx_nic *efx); -void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event); +int ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event); netdev_tx_t ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); int __ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb, diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index fcea3ea809d77b315959fcc241b30e283f603ad6..41b33a75333cd40224282a958a76e241a407b566 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -301,6 +301,7 @@ int efx_probe_interrupts(struct efx_nic *efx) efx->tx_channel_offset = 0; efx->n_xdp_channels = 0; efx->xdp_channel_offset = efx->n_channels; + efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED; rc = pci_enable_msi(efx->pci_dev); if (rc == 0) { efx_get_channel(efx, 0)->irq = efx->pci_dev->irq; @@ -322,6 +323,7 @@ int efx_probe_interrupts(struct efx_nic *efx) efx->tx_channel_offset = efx_separate_tx_channels ? 1 : 0; efx->n_xdp_channels = 0; efx->xdp_channel_offset = efx->n_channels; + efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED; efx->legacy_irq = efx->pci_dev->irq; } diff --git a/drivers/net/ethernet/sfc/efx_devlink.c b/drivers/net/ethernet/sfc/efx_devlink.c index 381b805659d39a11d5ceef2cf9d67cb2fba3bc8d..ef9971cbb695d2e82fb58268522295ce853ebffe 100644 --- a/drivers/net/ethernet/sfc/efx_devlink.c +++ b/drivers/net/ethernet/sfc/efx_devlink.c @@ -171,9 +171,14 @@ static int efx_devlink_info_nvram_partition(struct efx_nic *efx, rc = efx_mcdi_nvram_metadata(efx, partition_type, NULL, version, NULL, 0); + + /* If the partition does not exist, that is not an error. */ + if (rc == -ENOENT) + return 0; + if (rc) { - netif_err(efx, drv, efx->net_dev, "mcdi nvram %s: failed\n", - version_name); + netif_err(efx, drv, efx->net_dev, "mcdi nvram %s: failed (rc=%d)\n", + version_name, rc); return rc; } @@ -187,36 +192,33 @@ static int efx_devlink_info_nvram_partition(struct efx_nic *efx, static int efx_devlink_info_stored_versions(struct efx_nic *efx, struct devlink_info_req *req) { - int rc; - - rc = efx_devlink_info_nvram_partition(efx, req, - NVRAM_PARTITION_TYPE_BUNDLE, - DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID); - if (rc) - return rc; - - rc = efx_devlink_info_nvram_partition(efx, req, - NVRAM_PARTITION_TYPE_MC_FIRMWARE, - DEVLINK_INFO_VERSION_GENERIC_FW_MGMT); - if (rc) - return rc; - - rc = efx_devlink_info_nvram_partition(efx, req, - NVRAM_PARTITION_TYPE_SUC_FIRMWARE, - EFX_DEVLINK_INFO_VERSION_FW_MGMT_SUC); - if (rc) - return rc; - - rc = efx_devlink_info_nvram_partition(efx, req, - NVRAM_PARTITION_TYPE_EXPANSION_ROM, - EFX_DEVLINK_INFO_VERSION_FW_EXPROM); - if (rc) - return rc; + int err; - rc = efx_devlink_info_nvram_partition(efx, req, - NVRAM_PARTITION_TYPE_EXPANSION_UEFI, - EFX_DEVLINK_INFO_VERSION_FW_UEFI); - return rc; + /* We do not care here about the specific error but just if an error + * happened. The specific error will be reported inside the call + * through system messages, and if any error happened in any call + * below, we report it through extack. + */ + err = efx_devlink_info_nvram_partition(efx, req, + NVRAM_PARTITION_TYPE_BUNDLE, + DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID); + + err |= efx_devlink_info_nvram_partition(efx, req, + NVRAM_PARTITION_TYPE_MC_FIRMWARE, + DEVLINK_INFO_VERSION_GENERIC_FW_MGMT); + + err |= efx_devlink_info_nvram_partition(efx, req, + NVRAM_PARTITION_TYPE_SUC_FIRMWARE, + EFX_DEVLINK_INFO_VERSION_FW_MGMT_SUC); + + err |= efx_devlink_info_nvram_partition(efx, req, + NVRAM_PARTITION_TYPE_EXPANSION_ROM, + EFX_DEVLINK_INFO_VERSION_FW_EXPROM); + + err |= efx_devlink_info_nvram_partition(efx, req, + NVRAM_PARTITION_TYPE_EXPANSION_UEFI, + EFX_DEVLINK_INFO_VERSION_FW_UEFI); + return err; } #define EFX_VER_FLAG(_f) \ @@ -587,27 +589,20 @@ static int efx_devlink_info_get(struct devlink *devlink, { struct efx_devlink *devlink_private = devlink_priv(devlink); struct efx_nic *efx = devlink_private->efx; - int rc; + int err; - /* Several different MCDI commands are used. We report first error - * through extack returning at that point. Specific error - * information via system messages. + /* Several different MCDI commands are used. We report if errors + * happened through extack. Specific error information via system + * messages inside the calls. */ - rc = efx_devlink_info_board_cfg(efx, req); - if (rc) { - NL_SET_ERR_MSG_MOD(extack, "Getting board info failed"); - return rc; - } - rc = efx_devlink_info_stored_versions(efx, req); - if (rc) { - NL_SET_ERR_MSG_MOD(extack, "Getting stored versions failed"); - return rc; - } - rc = efx_devlink_info_running_versions(efx, req); - if (rc) { - NL_SET_ERR_MSG_MOD(extack, "Getting running versions failed"); - return rc; - } + err = efx_devlink_info_board_cfg(efx, req); + + err |= efx_devlink_info_stored_versions(efx, req); + + err |= efx_devlink_info_running_versions(efx, req); + + if (err) + NL_SET_ERR_MSG_MOD(extack, "Errors when getting device info. Check system messages"); return 0; } diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c index 06ed74994e366201ec468e2fd05ff14f474b8e7f..1776f7f8a7a90ea4043086008847ab2a52c532d8 100644 --- a/drivers/net/ethernet/sfc/siena/efx_channels.c +++ b/drivers/net/ethernet/sfc/siena/efx_channels.c @@ -302,6 +302,7 @@ int efx_siena_probe_interrupts(struct efx_nic *efx) efx->tx_channel_offset = 0; efx->n_xdp_channels = 0; efx->xdp_channel_offset = efx->n_channels; + efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED; rc = pci_enable_msi(efx->pci_dev); if (rc == 0) { efx_get_channel(efx, 0)->irq = efx->pci_dev->irq; @@ -323,6 +324,7 @@ int efx_siena_probe_interrupts(struct efx_nic *efx) efx->tx_channel_offset = efx_siena_separate_tx_channels ? 1 : 0; efx->n_xdp_channels = 0; efx->xdp_channel_offset = efx->n_channels; + efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED; efx->legacy_irq = efx->pci_dev->irq; } diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c index 0327639a628a91adccd4229bca56c30fb75b5c78..c004443c1d58c3eebe042489fb18b1430688cd27 100644 --- a/drivers/net/ethernet/sfc/tc.c +++ b/drivers/net/ethernet/sfc/tc.c @@ -624,13 +624,12 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx, if (!found) { /* We don't care. */ netif_dbg(efx, drv, efx->net_dev, "Ignoring foreign filter that doesn't egdev us\n"); - rc = -EOPNOTSUPP; - goto release; + return -EOPNOTSUPP; } rc = efx_mae_match_check_caps(efx, &match.mask, NULL); if (rc) - goto release; + return rc; if (efx_tc_match_is_encap(&match.mask)) { enum efx_encap_type type; @@ -639,8 +638,7 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx, if (type == EFX_ENCAP_TYPE_NONE) { NL_SET_ERR_MSG_MOD(extack, "Egress encap match on unsupported tunnel device"); - rc = -EOPNOTSUPP; - goto release; + return -EOPNOTSUPP; } rc = efx_mae_check_encap_type_supported(efx, type); @@ -648,25 +646,24 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx, NL_SET_ERR_MSG_FMT_MOD(extack, "Firmware reports no support for %s encap match", efx_tc_encap_type_name(type)); - goto release; + return rc; } rc = efx_tc_flower_record_encap_match(efx, &match, type, extack); if (rc) - goto release; + return rc; } else { /* This is not a tunnel decap rule, ignore it */ netif_dbg(efx, drv, efx->net_dev, "Ignoring foreign filter without encap match\n"); - rc = -EOPNOTSUPP; - goto release; + return -EOPNOTSUPP; } rule = kzalloc(sizeof(*rule), GFP_USER); if (!rule) { rc = -ENOMEM; - goto release; + goto out_free; } INIT_LIST_HEAD(&rule->acts.list); rule->cookie = tc->cookie; @@ -678,7 +675,7 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx, "Ignoring already-offloaded rule (cookie %lx)\n", tc->cookie); rc = -EEXIST; - goto release; + goto out_free; } act = kzalloc(sizeof(*act), GFP_USER); @@ -843,6 +840,7 @@ release: efx_tc_match_action_ht_params); efx_tc_free_action_set_list(efx, &rule->acts, false); } +out_free: kfree(rule); if (match.encap) efx_tc_flower_release_encap_match(efx, match.encap); @@ -899,8 +897,7 @@ static int efx_tc_flower_replace(struct efx_nic *efx, return rc; if (efx_tc_match_is_encap(&match.mask)) { NL_SET_ERR_MSG_MOD(extack, "Ingress enc_key matches not supported"); - rc = -EOPNOTSUPP; - goto release; + return -EOPNOTSUPP; } if (tc->common.chain_index) { @@ -924,9 +921,9 @@ static int efx_tc_flower_replace(struct efx_nic *efx, if (old) { netif_dbg(efx, drv, efx->net_dev, "Already offloaded rule (cookie %lx)\n", tc->cookie); - rc = -EEXIST; NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded"); - goto release; + kfree(rule); + return -EEXIST; } /* Parse actions */ diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c index 67e789b96c437cb8f230b074c846d6b5884d62cc..755aa92bf82363f7877b2ac2d30b06ee53112708 100644 --- a/drivers/net/ethernet/sfc/tx_common.c +++ b/drivers/net/ethernet/sfc/tx_common.c @@ -249,7 +249,7 @@ void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue) } } -void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) +int efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) { unsigned int fill_level, pkts_compl = 0, bytes_compl = 0; unsigned int efv_pkts_compl = 0; @@ -279,6 +279,8 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) } efx_xmit_done_check_empty(tx_queue); + + return pkts_compl + efv_pkts_compl; } /* Remove buffers put into a tx_queue for the current packet. diff --git a/drivers/net/ethernet/sfc/tx_common.h b/drivers/net/ethernet/sfc/tx_common.h index d87aecbc7bf1ab35bca5e48bdf56073f4830008b..1e9f42938aac9207e7b06bc59d03471aef190a26 100644 --- a/drivers/net/ethernet/sfc/tx_common.h +++ b/drivers/net/ethernet/sfc/tx_common.h @@ -28,7 +28,7 @@ static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer) } void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue); -void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); +int efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); void efx_enqueue_unwind(struct efx_tx_queue *tx_queue, unsigned int insert_count); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index 16a8c361283b4f90a04472669671a80db29dfcf7..f07905f00f9888e64cc177e58edb16c220a8d1cc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -644,7 +644,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev) plat_dat->fix_mac_speed = ethqos_fix_mac_speed; plat_dat->dump_debug_regs = rgmii_dump; plat_dat->has_gmac4 = 1; - plat_dat->dwmac4_addrs = &data->dwmac4_addrs; + if (ethqos->has_emac3) + plat_dat->dwmac4_addrs = &data->dwmac4_addrs; plat_dat->pmt = 1; plat_dat->tso_en = of_property_read_bool(np, "snps,tso"); if (of_device_is_compatible(np, "qcom,qcs404-ethqos")) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 0fca81507a779acafc11faa52aafeb41326b1f8e..87510951f4e80b4f6e3aa52424439886216fe195 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3873,7 +3873,6 @@ irq_error: stmmac_hw_teardown(dev); init_error: - free_dma_desc_resources(priv, &priv->dma_conf); phylink_disconnect_phy(priv->phylink); init_phy_error: pm_runtime_put(priv->device); @@ -3891,6 +3890,9 @@ static int stmmac_open(struct net_device *dev) return PTR_ERR(dma_conf); ret = __stmmac_open(dev, dma_conf); + if (ret) + free_dma_desc_resources(priv, dma_conf); + kfree(dma_conf); return ret; } @@ -5633,12 +5635,15 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu) stmmac_release(dev); ret = __stmmac_open(dev, dma_conf); - kfree(dma_conf); if (ret) { + free_dma_desc_resources(priv, dma_conf); + kfree(dma_conf); netdev_err(priv->dev, "failed reopening the interface after MTU change\n"); return ret; } + kfree(dma_conf); + stmmac_set_rx_mode(dev); } @@ -7233,8 +7238,7 @@ int stmmac_dvr_probe(struct device *device, ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | - NETDEV_XDP_ACT_XSK_ZEROCOPY | - NETDEV_XDP_ACT_NDO_XMIT; + NETDEV_XDP_ACT_XSK_ZEROCOPY; ret = stmmac_tc_init(priv, priv); if (!ret) { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c index 9d4d8c3dad0a3f769650d571ace87a4e77d292a7..aa6f16d3df649e8742584e0aacf83204acc1030c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c @@ -117,6 +117,9 @@ int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog, return -EOPNOTSUPP; } + if (!prog) + xdp_features_clear_redirect_target(dev); + need_update = !!priv->xdp_prog != !!prog; if (if_running && need_update) stmmac_xdp_release(dev); @@ -131,5 +134,8 @@ int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog, if (if_running && need_update) stmmac_xdp_open(dev); + if (prog) + xdp_features_set_redirect_target(dev, false); + return 0; } diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 4ef05bad4613c9309a81b00ba896b365a698eec7..d61dfa250feb722b67352104b272dbeebd056f0d 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -5077,6 +5077,8 @@ err_out_iounmap: cas_shutdown(cp); mutex_unlock(&cp->pm_mutex); + vfree(cp->fw_data); + pci_iounmap(pdev, cp->regs); diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 11cbcd9e2c72549f799e2058ebb925476fbdf63f..bebcfd5e6b579ef115588ae106eba0f87b15637a 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -2068,7 +2068,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) /* Initialize the Serdes PHY for the port */ ret = am65_cpsw_init_serdes_phy(dev, port_np, port); if (ret) - return ret; + goto of_node_put; port->slave.mac_only = of_property_read_bool(port_np, "ti,mac-only"); diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c index f9972b8140f9f7f3ba599cc42b2f4db2a6c7434c..a03490ba2e5b38a798fbd6cee5a451d16990de60 100644 --- a/drivers/net/ieee802154/adf7242.c +++ b/drivers/net/ieee802154/adf7242.c @@ -1348,3 +1348,5 @@ module_spi_driver(adf7242_driver); MODULE_AUTHOR("Michael Hennerich "); MODULE_DESCRIPTION("ADF7242 IEEE802.15.4 Transceiver Driver"); MODULE_LICENSE("GPL"); + +MODULE_FIRMWARE(FIRMWARE); diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c index 8445c2189d11664ab4910cc8f03e622790ba7b06..31cba9aa76366467aeb7175bb8b02cf60e77f133 100644 --- a/drivers/net/ieee802154/mac802154_hwsim.c +++ b/drivers/net/ieee802154/mac802154_hwsim.c @@ -685,7 +685,7 @@ static int hwsim_del_edge_nl(struct sk_buff *msg, struct genl_info *info) static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info) { struct nlattr *edge_attrs[MAC802154_HWSIM_EDGE_ATTR_MAX + 1]; - struct hwsim_edge_info *einfo; + struct hwsim_edge_info *einfo, *einfo_old; struct hwsim_phy *phy_v0; struct hwsim_edge *e; u32 v0, v1; @@ -723,8 +723,10 @@ static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info) list_for_each_entry_rcu(e, &phy_v0->edges, list) { if (e->endpoint->idx == v1) { einfo->lqi = lqi; - rcu_assign_pointer(e->info, einfo); + einfo_old = rcu_replace_pointer(e->info, einfo, + lockdep_is_held(&hwsim_phys_lock)); rcu_read_unlock(); + kfree_rcu(einfo_old, rcu); mutex_unlock(&hwsim_phys_lock); return 0; } diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 2ee80ed140b72b1cf1702b19bf9bad69c36c7d47..afa1d56d9095c13fe65a09f76fbc99679d9d8624 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -119,7 +119,7 @@ enum ipa_status_field_id { }; /* Size in bytes of an IPA packet status structure */ -#define IPA_STATUS_SIZE sizeof(__le32[4]) +#define IPA_STATUS_SIZE sizeof(__le32[8]) /* IPA status structure decoder; looks up field values for a structure */ static u32 ipa_status_extract(struct ipa *ipa, const void *data, diff --git a/drivers/net/ipvlan/ipvlan_l3s.c b/drivers/net/ipvlan/ipvlan_l3s.c index 71712ea25403dd71322f36297bb54f782d789f2c..d5b05e8032199a7bd10380547df7d5e82e667f69 100644 --- a/drivers/net/ipvlan/ipvlan_l3s.c +++ b/drivers/net/ipvlan/ipvlan_l3s.c @@ -102,6 +102,10 @@ static unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb, skb->dev = addr->master->dev; skb->skb_iif = skb->dev->ifindex; +#if IS_ENABLED(CONFIG_IPV6) + if (addr->atype == IPVL_IPV6) + IP6CB(skb)->iif = skb->dev->ifindex; +#endif len = skb->len + ETH_HLEN; ipvlan_count_rx(addr->master, len, true, false); out: diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 3427993f94f748ccb60af1989f0cb37da2cdad7b..984dfa5d6c11cb3ae6900597edeb1612f27decc3 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -3997,17 +3997,15 @@ static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len) return -ENOMEM; secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats); - if (!secy->tx_sc.stats) { - free_percpu(macsec->stats); + if (!secy->tx_sc.stats) return -ENOMEM; - } secy->tx_sc.md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL); - if (!secy->tx_sc.md_dst) { - free_percpu(secy->tx_sc.stats); - free_percpu(macsec->stats); + if (!secy->tx_sc.md_dst) + /* macsec and secy percpu stats will be freed when unregistering + * net_device in macsec_free_netdev() + */ return -ENOMEM; - } if (sci == MACSEC_UNDEF_SCI) sci = dev_to_sci(dev, MACSEC_PORT_ES); diff --git a/drivers/net/mdio/mdio-i2c.c b/drivers/net/mdio/mdio-i2c.c index 1e0c206d0f2e68175e671b7d6ec65a869af6fc4f..da2001ea1f9937dd5c4e633dd3d781ef7d5f1dbb 100644 --- a/drivers/net/mdio/mdio-i2c.c +++ b/drivers/net/mdio/mdio-i2c.c @@ -291,7 +291,8 @@ static int i2c_rollball_mii_cmd(struct mii_bus *bus, int bus_addr, u8 cmd, return i2c_transfer_rollball(i2c, msgs, ARRAY_SIZE(msgs)); } -static int i2c_mii_read_rollball(struct mii_bus *bus, int phy_id, int reg) +static int i2c_mii_read_rollball(struct mii_bus *bus, int phy_id, int devad, + int reg) { u8 buf[4], res[6]; int bus_addr, ret; @@ -302,7 +303,7 @@ static int i2c_mii_read_rollball(struct mii_bus *bus, int phy_id, int reg) return 0xffff; buf[0] = ROLLBALL_DATA_ADDR; - buf[1] = (reg >> 16) & 0x1f; + buf[1] = devad; buf[2] = (reg >> 8) & 0xff; buf[3] = reg & 0xff; @@ -322,8 +323,8 @@ static int i2c_mii_read_rollball(struct mii_bus *bus, int phy_id, int reg) return val; } -static int i2c_mii_write_rollball(struct mii_bus *bus, int phy_id, int reg, - u16 val) +static int i2c_mii_write_rollball(struct mii_bus *bus, int phy_id, int devad, + int reg, u16 val) { int bus_addr, ret; u8 buf[6]; @@ -333,7 +334,7 @@ static int i2c_mii_write_rollball(struct mii_bus *bus, int phy_id, int reg, return 0; buf[0] = ROLLBALL_DATA_ADDR; - buf[1] = (reg >> 16) & 0x1f; + buf[1] = devad; buf[2] = (reg >> 8) & 0xff; buf[3] = reg & 0xff; buf[4] = val >> 8; @@ -405,8 +406,8 @@ struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c, return ERR_PTR(ret); } - mii->read = i2c_mii_read_rollball; - mii->write = i2c_mii_write_rollball; + mii->read_c45 = i2c_mii_read_rollball; + mii->write_c45 = i2c_mii_write_rollball; break; default: mii->read = i2c_mii_read_default_c22; diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c index f19d48c94fe0e9ccd66c3a7c451268af7d713248..72f25e778840f9330ca862e611732aabd11c340c 100644 --- a/drivers/net/pcs/pcs-xpcs.c +++ b/drivers/net/pcs/pcs-xpcs.c @@ -873,7 +873,7 @@ int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface, switch (compat->an_mode) { case DW_AN_C73: - if (phylink_autoneg_inband(mode)) { + if (test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, advertising)) { ret = xpcs_config_aneg_c73(xpcs, compat); if (ret) return ret; diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index d75f526a20a49b6a65889c41adeb1395dcea002c..e397e7d642d925079f5346fbb9433a81fa96d74a 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -44,6 +44,7 @@ #define DP83867_STRAP_STS1 0x006E #define DP83867_STRAP_STS2 0x006f #define DP83867_RGMIIDCTL 0x0086 +#define DP83867_DSP_FFE_CFG 0x012c #define DP83867_RXFCFG 0x0134 #define DP83867_RXFPMD1 0x0136 #define DP83867_RXFPMD2 0x0137 @@ -935,14 +936,33 @@ static int dp83867_phy_reset(struct phy_device *phydev) { int err; - err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESTART); + err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESET); if (err < 0) return err; usleep_range(10, 20); - return phy_modify(phydev, MII_DP83867_PHYCTRL, + err = phy_modify(phydev, MII_DP83867_PHYCTRL, DP83867_PHYCR_FORCE_LINK_GOOD, 0); + if (err < 0) + return err; + + /* Configure the DSP Feedforward Equalizer Configuration register to + * improve short cable (< 1 meter) performance. This will not affect + * long cable performance. + */ + err = phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_DSP_FFE_CFG, + 0x0e81); + if (err < 0) + return err; + + err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESTART); + if (err < 0) + return err; + + usleep_range(10, 20); + + return 0; } static void dp83867_link_change_notify(struct phy_device *phydev) diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 389f33a12534419ded1015b780fd4dbb1b32a55c..8b3618d3da4aa552596a3e22a8ded47204335be0 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -1287,7 +1287,7 @@ EXPORT_SYMBOL_GPL(mdiobus_modify_changed); * @mask: bit mask of bits to clear * @set: bit mask of bits to set */ -int mdiobus_c45_modify_changed(struct mii_bus *bus, int devad, int addr, +int mdiobus_c45_modify_changed(struct mii_bus *bus, int addr, int devad, u32 regnum, u16 mask, u16 set) { int err; diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h index a50235fdf7d99d2f5db21dc8e002a0830223ae08..defe5cc6d4fcef4754edfb0aa5a1d6e562e0b282 100644 --- a/drivers/net/phy/mscc/mscc.h +++ b/drivers/net/phy/mscc/mscc.h @@ -179,6 +179,7 @@ enum rgmii_clock_delay { #define VSC8502_RGMII_CNTL 20 #define VSC8502_RGMII_RX_DELAY_MASK 0x0070 #define VSC8502_RGMII_TX_DELAY_MASK 0x0007 +#define VSC8502_RGMII_RX_CLK_DISABLE 0x0800 #define MSCC_PHY_WOL_LOWER_MAC_ADDR 21 #define MSCC_PHY_WOL_MID_MAC_ADDR 22 @@ -276,6 +277,7 @@ enum rgmii_clock_delay { /* Microsemi PHY ID's * Code assumes lowest nibble is 0 */ +#define PHY_ID_VSC8501 0x00070530 #define PHY_ID_VSC8502 0x00070630 #define PHY_ID_VSC8504 0x000704c0 #define PHY_ID_VSC8514 0x00070670 diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c index 62bf99e45af16d86ff80428af812e34183888715..28df8a2e4230f3ffd6f559e8ac9521cedc7d0100 100644 --- a/drivers/net/phy/mscc/mscc_main.c +++ b/drivers/net/phy/mscc/mscc_main.c @@ -519,16 +519,27 @@ out_unlock: * * 2.0 ns (which causes the data to be sampled at exactly half way between * clock transitions at 1000 Mbps) if delays should be enabled */ -static int vsc85xx_rgmii_set_skews(struct phy_device *phydev, u32 rgmii_cntl, - u16 rgmii_rx_delay_mask, - u16 rgmii_tx_delay_mask) +static int vsc85xx_update_rgmii_cntl(struct phy_device *phydev, u32 rgmii_cntl, + u16 rgmii_rx_delay_mask, + u16 rgmii_tx_delay_mask) { u16 rgmii_rx_delay_pos = ffs(rgmii_rx_delay_mask) - 1; u16 rgmii_tx_delay_pos = ffs(rgmii_tx_delay_mask) - 1; u16 reg_val = 0; - int rc; + u16 mask = 0; + int rc = 0; - mutex_lock(&phydev->lock); + /* For traffic to pass, the VSC8502 family needs the RX_CLK disable bit + * to be unset for all PHY modes, so do that as part of the paged + * register modification. + * For some family members (like VSC8530/31/40/41) this bit is reserved + * and read-only, and the RX clock is enabled by default. + */ + if (rgmii_cntl == VSC8502_RGMII_CNTL) + mask |= VSC8502_RGMII_RX_CLK_DISABLE; + + if (phy_interface_is_rgmii(phydev)) + mask |= rgmii_rx_delay_mask | rgmii_tx_delay_mask; if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID || phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) @@ -537,31 +548,20 @@ static int vsc85xx_rgmii_set_skews(struct phy_device *phydev, u32 rgmii_cntl, phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) reg_val |= RGMII_CLK_DELAY_2_0_NS << rgmii_tx_delay_pos; - rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED_2, - rgmii_cntl, - rgmii_rx_delay_mask | rgmii_tx_delay_mask, - reg_val); - - mutex_unlock(&phydev->lock); + if (mask) + rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED_2, + rgmii_cntl, mask, reg_val); return rc; } static int vsc85xx_default_config(struct phy_device *phydev) { - int rc; - phydev->mdix_ctrl = ETH_TP_MDI_AUTO; - if (phy_interface_mode_is_rgmii(phydev->interface)) { - rc = vsc85xx_rgmii_set_skews(phydev, VSC8502_RGMII_CNTL, - VSC8502_RGMII_RX_DELAY_MASK, - VSC8502_RGMII_TX_DELAY_MASK); - if (rc) - return rc; - } - - return 0; + return vsc85xx_update_rgmii_cntl(phydev, VSC8502_RGMII_CNTL, + VSC8502_RGMII_RX_DELAY_MASK, + VSC8502_RGMII_TX_DELAY_MASK); } static int vsc85xx_get_tunable(struct phy_device *phydev, @@ -1758,13 +1758,11 @@ static int vsc8584_config_init(struct phy_device *phydev) if (ret) return ret; - if (phy_interface_is_rgmii(phydev)) { - ret = vsc85xx_rgmii_set_skews(phydev, VSC8572_RGMII_CNTL, - VSC8572_RGMII_RX_DELAY_MASK, - VSC8572_RGMII_TX_DELAY_MASK); - if (ret) - return ret; - } + ret = vsc85xx_update_rgmii_cntl(phydev, VSC8572_RGMII_CNTL, + VSC8572_RGMII_RX_DELAY_MASK, + VSC8572_RGMII_TX_DELAY_MASK); + if (ret) + return ret; ret = genphy_soft_reset(phydev); if (ret) @@ -2316,6 +2314,30 @@ static int vsc85xx_probe(struct phy_device *phydev) /* Microsemi VSC85xx PHYs */ static struct phy_driver vsc85xx_driver[] = { +{ + .phy_id = PHY_ID_VSC8501, + .name = "Microsemi GE VSC8501 SyncE", + .phy_id_mask = 0xfffffff0, + /* PHY_BASIC_FEATURES */ + .soft_reset = &genphy_soft_reset, + .config_init = &vsc85xx_config_init, + .config_aneg = &vsc85xx_config_aneg, + .read_status = &vsc85xx_read_status, + .handle_interrupt = vsc85xx_handle_interrupt, + .config_intr = &vsc85xx_config_intr, + .suspend = &genphy_suspend, + .resume = &genphy_resume, + .probe = &vsc85xx_probe, + .set_wol = &vsc85xx_wol_set, + .get_wol = &vsc85xx_wol_get, + .get_tunable = &vsc85xx_get_tunable, + .set_tunable = &vsc85xx_set_tunable, + .read_page = &vsc85xx_phy_read_page, + .write_page = &vsc85xx_phy_write_page, + .get_sset_count = &vsc85xx_get_sset_count, + .get_strings = &vsc85xx_get_strings, + .get_stats = &vsc85xx_get_stats, +}, { .phy_id = PHY_ID_VSC8502, .name = "Microsemi GE VSC8502 SyncE", @@ -2656,6 +2678,8 @@ static struct phy_driver vsc85xx_driver[] = { module_phy_driver(vsc85xx_driver); static struct mdio_device_id __maybe_unused vsc85xx_tbl[] = { + { PHY_ID_VSC8501, 0xfffffff0, }, + { PHY_ID_VSC8502, 0xfffffff0, }, { PHY_ID_VSC8504, 0xfffffff0, }, { PHY_ID_VSC8514, 0xfffffff0, }, { PHY_ID_VSC8530, 0xfffffff0, }, diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c index 6301a9abfb95596647e81e9071ca7828e54d6bcf..ea1073adc5a16a6b531e9f1f2c51869dad8ead80 100644 --- a/drivers/net/phy/mxl-gpy.c +++ b/drivers/net/phy/mxl-gpy.c @@ -274,13 +274,6 @@ static int gpy_config_init(struct phy_device *phydev) return ret < 0 ? ret : 0; } -static bool gpy_has_broken_mdint(struct phy_device *phydev) -{ - /* At least these PHYs are known to have broken interrupt handling */ - return phydev->drv->phy_id == PHY_ID_GPY215B || - phydev->drv->phy_id == PHY_ID_GPY215C; -} - static int gpy_probe(struct phy_device *phydev) { struct device *dev = &phydev->mdio.dev; @@ -300,8 +293,7 @@ static int gpy_probe(struct phy_device *phydev) phydev->priv = priv; mutex_init(&priv->mbox_lock); - if (gpy_has_broken_mdint(phydev) && - !device_property_present(dev, "maxlinear,use-broken-interrupts")) + if (!device_property_present(dev, "maxlinear,use-broken-interrupts")) phydev->dev_flags |= PHY_F_NO_IRQ; fw_version = phy_read(phydev, PHY_FWV); @@ -659,11 +651,9 @@ static irqreturn_t gpy_handle_interrupt(struct phy_device *phydev) * frame. Therefore, polling is the best we can do and won't do any more * harm. * It was observed that this bug happens on link state and link speed - * changes on a GPY215B and GYP215C independent of the firmware version - * (which doesn't mean that this list is exhaustive). + * changes independent of the firmware version. */ - if (gpy_has_broken_mdint(phydev) && - (reg & (PHY_IMASK_LSTC | PHY_IMASK_LSPC))) { + if (reg & (PHY_IMASK_LSTC | PHY_IMASK_LSPC)) { reg = gpy_mbox_read(phydev, REG_GPIO0_OUT); if (reg < 0) { phy_error(phydev); diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 17d0d0555a79f1349ed6e3325d31e799d8ef053d..53598210be6cbded0a06ecf46a62882fdc4d2dde 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -3021,6 +3021,15 @@ static int phy_led_blink_set(struct led_classdev *led_cdev, return err; } +static void phy_leds_unregister(struct phy_device *phydev) +{ + struct phy_led *phyled; + + list_for_each_entry(phyled, &phydev->leds, list) { + led_classdev_unregister(&phyled->led_cdev); + } +} + static int of_phy_led(struct phy_device *phydev, struct device_node *led) { @@ -3054,7 +3063,7 @@ static int of_phy_led(struct phy_device *phydev, init_data.fwnode = of_fwnode_handle(led); init_data.devname_mandatory = true; - err = devm_led_classdev_register_ext(dev, cdev, &init_data); + err = led_classdev_register_ext(dev, cdev, &init_data); if (err) return err; @@ -3083,6 +3092,7 @@ static int of_phy_leds(struct phy_device *phydev) err = of_phy_led(phydev, led); if (err) { of_node_put(led); + phy_leds_unregister(phydev); return err; } } @@ -3305,6 +3315,9 @@ static int phy_remove(struct device *dev) cancel_delayed_work_sync(&phydev->state_queue); + if (IS_ENABLED(CONFIG_PHYLIB_LEDS)) + phy_leds_unregister(phydev); + phydev->state = PHY_DOWN; sfp_bus_del_upstream(phydev->sfp_bus); diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index a4111f1be375738033042f3c86fcba8197c528bb..5efdeb59f4b2972131b79453692591da481b8baa 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -188,6 +188,7 @@ static int phylink_interface_max_speed(phy_interface_t interface) case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_QSGMII: + case PHY_INTERFACE_MODE_QUSGMII: case PHY_INTERFACE_MODE_SGMII: case PHY_INTERFACE_MODE_GMII: return SPEED_1000; @@ -204,7 +205,6 @@ static int phylink_interface_max_speed(phy_interface_t interface) case PHY_INTERFACE_MODE_10GBASER: case PHY_INTERFACE_MODE_10GKR: case PHY_INTERFACE_MODE_USXGMII: - case PHY_INTERFACE_MODE_QUSGMII: return SPEED_10000; case PHY_INTERFACE_MODE_25GBASER: @@ -2226,6 +2226,12 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, ASSERT_RTNL(); if (pl->phydev) { + struct ethtool_link_ksettings phy_kset = *kset; + + linkmode_and(phy_kset.link_modes.advertising, + phy_kset.link_modes.advertising, + pl->supported); + /* We can rely on phylib for this update; we also do not need * to update the pl->link_config settings: * - the configuration returned via ksettings_get() will come @@ -2244,11 +2250,10 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, * the presence of a PHY, this should not be changed as that * should be determined from the media side advertisement. */ - return phy_ethtool_ksettings_set(pl->phydev, kset); + return phy_ethtool_ksettings_set(pl->phydev, &phy_kset); } config = pl->link_config; - /* Mask out unsupported advertisements */ linkmode_and(config.advertising, kset->link_modes.advertising, pl->supported); @@ -3293,6 +3298,41 @@ void phylink_decode_usxgmii_word(struct phylink_link_state *state, } EXPORT_SYMBOL_GPL(phylink_decode_usxgmii_word); +/** + * phylink_decode_usgmii_word() - decode the USGMII word from a MAC PCS + * @state: a pointer to a struct phylink_link_state. + * @lpa: a 16 bit value which stores the USGMII auto-negotiation word + * + * Helper for MAC PCS supporting the USGMII protocol and the auto-negotiation + * code word. Decode the USGMII code word and populate the corresponding fields + * (speed, duplex) into the phylink_link_state structure. The structure for this + * word is the same as the USXGMII word, except it only supports speeds up to + * 1Gbps. + */ +static void phylink_decode_usgmii_word(struct phylink_link_state *state, + uint16_t lpa) +{ + switch (lpa & MDIO_USXGMII_SPD_MASK) { + case MDIO_USXGMII_10: + state->speed = SPEED_10; + break; + case MDIO_USXGMII_100: + state->speed = SPEED_100; + break; + case MDIO_USXGMII_1000: + state->speed = SPEED_1000; + break; + default: + state->link = false; + return; + } + + if (lpa & MDIO_USXGMII_FULL_DUPLEX) + state->duplex = DUPLEX_FULL; + else + state->duplex = DUPLEX_HALF; +} + /** * phylink_mii_c22_pcs_decode_state() - Decode MAC PCS state from MII registers * @state: a pointer to a &struct phylink_link_state. @@ -3330,9 +3370,11 @@ void phylink_mii_c22_pcs_decode_state(struct phylink_link_state *state, case PHY_INTERFACE_MODE_SGMII: case PHY_INTERFACE_MODE_QSGMII: - case PHY_INTERFACE_MODE_QUSGMII: phylink_decode_sgmii_word(state, lpa); break; + case PHY_INTERFACE_MODE_QUSGMII: + phylink_decode_usgmii_word(state, lpa); + break; default: state->link = false; diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index d10606f257c43345e1d293dd462cdf77fc37497b..555b0b1e9a7893ac828fa2e9488d1d3d7df3c064 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1629,6 +1629,7 @@ static int team_init(struct net_device *dev) team->dev = dev; team_set_no_mode(team); + team->notifier_ctx = false; team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats); if (!team->pcpu_stats) @@ -3022,7 +3023,11 @@ static int team_device_event(struct notifier_block *unused, team_del_slave(port->team->dev, dev); break; case NETDEV_FEAT_CHANGE: - team_compute_features(port->team); + if (!port->team->notifier_ctx) { + port->team->notifier_ctx = true; + team_compute_features(port->team); + port->team->notifier_ctx = false; + } break; case NETDEV_PRECHANGEMTU: /* Forbid to change mtu of underlaying device */ diff --git a/drivers/net/tun.c b/drivers/net/tun.c index d4d0a41a905a7e7fb332af8279136041b9229f23..d75456adc62ac82a4f9da10c50c48266c5a9a0c0 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1977,6 +1977,14 @@ napi_busy: int queue_len; spin_lock_bh(&queue->lock); + + if (unlikely(tfile->detached)) { + spin_unlock_bh(&queue->lock); + rcu_read_unlock(); + err = -EBUSY; + goto free_skb; + } + __skb_queue_tail(queue, skb); queue_len = skb_queue_len(queue); spin_unlock(&queue->lock); @@ -2512,6 +2520,13 @@ build: if (tfile->napi_enabled) { queue = &tfile->sk.sk_write_queue; spin_lock(&queue->lock); + + if (unlikely(tfile->detached)) { + spin_unlock(&queue->lock); + kfree_skb(skb); + return -EBUSY; + } + __skb_queue_tail(queue, skb); spin_unlock(&queue->lock); ret = 1; diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 6ce8f4f0c70e898c5ebb7198c46ac6a38f5b588f..db05622f1f703ec6fc147935c296640c55499a4b 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -181,9 +181,12 @@ static u32 cdc_ncm_check_tx_max(struct usbnet *dev, u32 new_tx) else min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth32); - max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_TX, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize)); - if (max == 0) + if (le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) == 0) max = CDC_NCM_NTB_MAX_SIZE_TX; /* dwNtbOutMaxSize not set */ + else + max = clamp_t(u32, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize), + USB_CDC_NCM_NTB_MIN_OUT_SIZE, + CDC_NCM_NTB_MAX_SIZE_TX); /* some devices set dwNtbOutMaxSize too low for the above default */ min = min(min, max); @@ -1244,6 +1247,9 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) * further. */ if (skb_out == NULL) { + /* If even the smallest allocation fails, abort. */ + if (ctx->tx_curr_size == USB_CDC_NCM_NTB_MIN_OUT_SIZE) + goto alloc_failed; ctx->tx_low_mem_max_cnt = min(ctx->tx_low_mem_max_cnt + 1, (unsigned)CDC_NCM_LOW_MEM_MAX_CNT); ctx->tx_low_mem_val = ctx->tx_low_mem_max_cnt; @@ -1262,13 +1268,8 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) skb_out = alloc_skb(ctx->tx_curr_size, GFP_ATOMIC); /* No allocation possible so we will abort */ - if (skb_out == NULL) { - if (skb != NULL) { - dev_kfree_skb_any(skb); - dev->net->stats.tx_dropped++; - } - goto exit_no_skb; - } + if (!skb_out) + goto alloc_failed; ctx->tx_low_mem_val--; } if (ctx->is_ndp16) { @@ -1461,6 +1462,11 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) return skb_out; +alloc_failed: + if (skb) { + dev_kfree_skb_any(skb); + dev->net->stats.tx_dropped++; + } exit_no_skb: /* Start timer, if there is a remaining non-empty skb */ if (ctx->tx_curr_skb != NULL && n > 0) diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 571e37e67f9ce1706de16d3f4a4f9c883a6e8992..2e7c7b0cdc549e6b9e8799ea38083dca982c6db9 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1220,7 +1220,9 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x05c6, 0x9080, 8)}, {QMI_FIXED_INTF(0x05c6, 0x9083, 3)}, {QMI_FIXED_INTF(0x05c6, 0x9084, 4)}, + {QMI_QUIRK_SET_DTR(0x05c6, 0x9091, 2)}, /* Compal RXM-G1 */ {QMI_FIXED_INTF(0x05c6, 0x90b2, 3)}, /* ublox R410M */ + {QMI_QUIRK_SET_DTR(0x05c6, 0x90db, 2)}, /* Compal RXM-G1 */ {QMI_FIXED_INTF(0x05c6, 0x920d, 0)}, {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, {QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */ @@ -1325,7 +1327,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x2001, 0x7e3d, 4)}, /* D-Link DWM-222 A2 */ {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */ {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */ - {QMI_FIXED_INTF(0x2020, 0x2060, 4)}, /* BroadMobi BM818 */ + {QMI_QUIRK_SET_DTR(0x2020, 0x2060, 4)}, /* BroadMobi BM818 */ {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index a12ae26db0e223e57b691b0dc0728b97734d0659..486b5849033dc96cf2ff3ded546c7736f840f61d 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -205,6 +205,8 @@ struct control_buf { __virtio16 vid; __virtio64 offloads; struct virtio_net_ctrl_rss rss; + struct virtio_net_ctrl_coal_tx coal_tx; + struct virtio_net_ctrl_coal_rx coal_rx; }; struct virtnet_info { @@ -1868,6 +1870,38 @@ static int virtnet_poll(struct napi_struct *napi, int budget) return received; } +static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index) +{ + virtnet_napi_tx_disable(&vi->sq[qp_index].napi); + napi_disable(&vi->rq[qp_index].napi); + xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq); +} + +static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index) +{ + struct net_device *dev = vi->dev; + int err; + + err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index, + vi->rq[qp_index].napi.napi_id); + if (err < 0) + return err; + + err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL); + if (err < 0) + goto err_xdp_reg_mem_model; + + virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi); + virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi); + + return 0; + +err_xdp_reg_mem_model: + xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq); + return err; +} + static int virtnet_open(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); @@ -1881,22 +1915,20 @@ static int virtnet_open(struct net_device *dev) if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) schedule_delayed_work(&vi->refill, 0); - err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i, vi->rq[i].napi.napi_id); + err = virtnet_enable_queue_pair(vi, i); if (err < 0) - return err; - - err = xdp_rxq_info_reg_mem_model(&vi->rq[i].xdp_rxq, - MEM_TYPE_PAGE_SHARED, NULL); - if (err < 0) { - xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq); - return err; - } - - virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); - virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi); + goto err_enable_qp; } return 0; + +err_enable_qp: + disable_delayed_refill(vi); + cancel_delayed_work_sync(&vi->refill); + + for (i--; i >= 0; i--) + virtnet_disable_queue_pair(vi, i); + return err; } static int virtnet_poll_tx(struct napi_struct *napi, int budget) @@ -2305,11 +2337,8 @@ static int virtnet_close(struct net_device *dev) /* Make sure refill_work doesn't re-enable napi! */ cancel_delayed_work_sync(&vi->refill); - for (i = 0; i < vi->max_queue_pairs; i++) { - virtnet_napi_tx_disable(&vi->sq[i].napi); - napi_disable(&vi->rq[i].napi); - xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq); - } + for (i = 0; i < vi->max_queue_pairs; i++) + virtnet_disable_queue_pair(vi, i); return 0; } @@ -2907,12 +2936,10 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi, struct ethtool_coalesce *ec) { struct scatterlist sgs_tx, sgs_rx; - struct virtio_net_ctrl_coal_tx coal_tx; - struct virtio_net_ctrl_coal_rx coal_rx; - coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs); - coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames); - sg_init_one(&sgs_tx, &coal_tx, sizeof(coal_tx)); + vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs); + vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames); + sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, VIRTIO_NET_CTRL_NOTF_COAL_TX_SET, @@ -2923,9 +2950,9 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi, vi->tx_usecs = ec->tx_coalesce_usecs; vi->tx_max_packets = ec->tx_max_coalesced_frames; - coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs); - coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames); - sg_init_one(&sgs_rx, &coal_rx, sizeof(coal_rx)); + vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs); + vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames); + sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, VIRTIO_NET_CTRL_NOTF_COAL_RX_SET, diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index d62a904d2e422d7e48cf67cba829df0568e99b01..56326f38fe8a30f45e88cdce7efd43e18041e52a 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -384,6 +384,9 @@ static int lapbeth_new_device(struct net_device *dev) ASSERT_RTNL(); + if (dev->type != ARPHRD_ETHER) + return -EINVAL; + ndev = alloc_netdev(sizeof(*lapbeth), "lapb%d", NET_NAME_UNKNOWN, lapbeth_setup); if (!ndev) diff --git a/drivers/net/wireless/broadcom/b43/b43.h b/drivers/net/wireless/broadcom/b43/b43.h index 9fc7c088a539e3728631ef41bf49004e60fa17f5..67b4bac048e585bd336196b8004fa6555d6abd0c 100644 --- a/drivers/net/wireless/broadcom/b43/b43.h +++ b/drivers/net/wireless/broadcom/b43/b43.h @@ -651,7 +651,7 @@ struct b43_iv { union { __be16 d16; __be32 d32; - } data __packed; + } __packed data; } __packed; diff --git a/drivers/net/wireless/broadcom/b43legacy/b43legacy.h b/drivers/net/wireless/broadcom/b43legacy/b43legacy.h index 6b0cec467938fc555f23f0d464086afec69181d9..f49365d14619f39d05535e0b82b4e7f2228fbfa5 100644 --- a/drivers/net/wireless/broadcom/b43legacy/b43legacy.h +++ b/drivers/net/wireless/broadcom/b43legacy/b43legacy.h @@ -379,7 +379,7 @@ struct b43legacy_iv { union { __be16 d16; __be32 d32; - } data __packed; + } __packed data; } __packed; #define B43legacy_PHYMODE(phytype) (1 << (phytype)) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index ff710b0b5071a19b60e8f5d3a7fe1e22cd7b75a9..00679a990e3dac9bc9b71c0941a737ec0338ccc4 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -1039,6 +1039,11 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func, struct brcmf_sdio_dev *sdiodev; struct brcmf_bus *bus_if; + if (!id) { + dev_err(&func->dev, "Error no sdio_device_id passed for %x:%x\n", func->vendor, func->device); + return -ENODEV; + } + brcmf_dbg(SDIO, "Enter\n"); brcmf_dbg(SDIO, "Class=%x\n", func->class); brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 59f3e9c5e13907bec4902210ddc7024829b2024d..80220685f5e4514782a1179d3195cbd34cd890b3 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -2394,6 +2394,9 @@ static void brcmf_pcie_debugfs_create(struct device *dev) } #endif +/* Forward declaration for pci_match_id() call */ +static const struct pci_device_id brcmf_pcie_devid_table[]; + static int brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) { @@ -2404,6 +2407,14 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) struct brcmf_core *core; struct brcmf_bus *bus; + if (!id) { + id = pci_match_id(brcmf_pcie_devid_table, pdev); + if (!id) { + pci_err(pdev, "Error could not find pci_device_id for %x:%x\n", pdev->vendor, pdev->device); + return -ENODEV; + } + } + brcmf_dbg(PCIE, "Enter %x:%x\n", pdev->vendor, pdev->device); ret = -ENOMEM; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c index 246843aeb6964af25f5fc9b6a64d0b9e169dc4de..2178675ae1a44d0e04afdd4d26ad6aa0625da8cf 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c @@ -1331,6 +1331,9 @@ brcmf_usb_disconnect_cb(struct brcmf_usbdev_info *devinfo) brcmf_usb_detach(devinfo); } +/* Forward declaration for usb_match_id() call */ +static const struct usb_device_id brcmf_usb_devid_table[]; + static int brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { @@ -1342,6 +1345,14 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) u32 num_of_eps; u8 endpoint_num, ep; + if (!id) { + id = usb_match_id(intf, brcmf_usb_devid_table); + if (!id) { + dev_err(&intf->dev, "Error could not find matching usb_device_id\n"); + return -ENODEV; + } + } + brcmf_dbg(USB, "Enter 0x%04x:0x%04x\n", id->idVendor, id->idProduct); devinfo = kzalloc(sizeof(*devinfo), GFP_ATOMIC); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index 5f4a51310add082ecd5c931883ff4926fae0ed5d..cb9181f05501130b3cd6fb9cf348996397f6082c 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -38,7 +38,7 @@ static const struct dmi_system_id dmi_ppag_approved_list[] = { }, { .ident = "ASUS", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek COMPUTER INC."), + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), }, }, {} diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index d9faaae01abd22b149566dfbd6eda80672c59ea6..55219974b92bf73c6ab70aa3e53ca4408257753a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -1664,14 +1664,10 @@ static __le32 iwl_get_mon_reg(struct iwl_fw_runtime *fwrt, u32 alloc_id, } static void * -iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt, - struct iwl_dump_ini_region_data *reg_data, +iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt, u32 alloc_id, struct iwl_fw_ini_monitor_dump *data, const struct iwl_fw_mon_regs *addrs) { - struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; - u32 alloc_id = le32_to_cpu(reg->dram_alloc_id); - if (!iwl_trans_grab_nic_access(fwrt->trans)) { IWL_ERR(fwrt, "Failed to get monitor header\n"); return NULL; @@ -1702,8 +1698,10 @@ iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt, void *data, u32 data_len) { struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data; + struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; + u32 alloc_id = le32_to_cpu(reg->dram_alloc_id); - return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump, + return iwl_dump_ini_mon_fill_header(fwrt, alloc_id, mon_dump, &fwrt->trans->cfg->mon_dram_regs); } @@ -1713,8 +1711,10 @@ iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt, void *data, u32 data_len) { struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data; + struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; + u32 alloc_id = le32_to_cpu(reg->internal_buffer.alloc_id); - return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump, + return iwl_dump_ini_mon_fill_header(fwrt, alloc_id, mon_dump, &fwrt->trans->cfg->mon_smem_regs); } @@ -1725,7 +1725,10 @@ iwl_dump_ini_mon_dbgi_fill_header(struct iwl_fw_runtime *fwrt, { struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data; - return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump, + return iwl_dump_ini_mon_fill_header(fwrt, + /* no offset calculation later */ + IWL_FW_INI_ALLOCATION_ID_DBGC1, + mon_dump, &fwrt->trans->cfg->mon_dbgi_regs); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 37aa4676dc94852a1ae6add76d52d0ab5f8c8912..6d1007f24b4aa91ba33e4e60d1019eb309a35984 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -2732,17 +2732,13 @@ static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait, if (wowlan_info_ver < 2) { struct iwl_wowlan_info_notif_v1 *notif_v1 = (void *)pkt->data; - notif = kmemdup(notif_v1, - offsetofend(struct iwl_wowlan_info_notif, - received_beacons), - GFP_ATOMIC); - + notif = kmemdup(notif_v1, sizeof(*notif), GFP_ATOMIC); if (!notif) return false; notif->tid_tear_down = notif_v1->tid_tear_down; notif->station_id = notif_v1->station_id; - + memset_after(notif, 0, station_id); } else { notif = (void *)pkt->data; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c index 3963a0d4ed0427ba004e0bb5f846df5ab4eb8332..652a603c4500efed3be6162d4f26a520eeda3f93 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c @@ -526,6 +526,11 @@ iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif, rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->deflink.ap_sta_id]); + if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { + rcu_read_unlock(); + return PTR_ERR_OR_ZERO(sta); + } + if (sta->mfp && (peer->ftm.trigger_based || peer->ftm.non_trigger_based)) FTM_PUT_FLAG(PMF); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index b35c96cf7ad24d5f6f2a3da03568a7db3db4fe6c..205c09bc986342ff8d98866310a4056dd0366602 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -1091,7 +1091,7 @@ static const struct dmi_system_id dmi_tas_approved_list[] = { }, { .ident = "LENOVO", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Lenovo"), + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), }, }, { .ident = "DELL", @@ -1727,8 +1727,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) iwl_mvm_tas_init(mvm); iwl_mvm_leds_sync(mvm); - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_RFIM_SUPPORT)) { + if (iwl_rfi_supported(mvm)) { if (iwl_mvm_eval_dsm_rfi(mvm) == DSM_VALUE_RFI_ENABLE) iwl_rfi_send_config_cmd(mvm, NULL); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c index eb828de40a3c6d9491dca4839fea28ce66911b4e..3814915cb1a67df435b3b84a973af9fa9e29505f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c @@ -123,11 +123,13 @@ int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (mvmvif->link[i]->phy_ctxt) count++; - /* FIXME: IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM should be - * defined per HW - */ - if (count >= IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM) - return -EINVAL; + if (vif->type == NL80211_IFTYPE_AP) { + if (count > mvm->fw->ucode_capa.num_beacons) + return -EOPNOTSUPP; + /* this should be per HW or such */ + } else if (count >= IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM) { + return -EOPNOTSUPP; + } } /* Catch early if driver tries to activate or deactivate a link diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 0f01b62357c6fded37ad07cfcacbd7eec8cf8118..17f788a5ff6ba686e0d9743c3e8677dfecf9dc6d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2022 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -3607,7 +3607,8 @@ static bool iwl_mvm_vif_conf_from_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { - unsigned int i; + struct ieee80211_link_sta *link_sta; + unsigned int link_id; /* Beacon interval check - firmware will crash if the beacon * interval is less than 16. We can't avoid connecting at all, @@ -3616,14 +3617,11 @@ static bool iwl_mvm_vif_conf_from_sta(struct iwl_mvm *mvm, * wpa_s will blocklist the AP... */ - for_each_set_bit(i, (unsigned long *)&sta->valid_links, - IEEE80211_MLD_MAX_NUM_LINKS) { - struct ieee80211_link_sta *link_sta = - link_sta_dereference_protected(sta, i); + for_each_sta_active_link(vif, sta, link_sta, link_id) { struct ieee80211_bss_conf *link_conf = - link_conf_dereference_protected(vif, i); + link_conf_dereference_protected(vif, link_id); - if (!link_conf || !link_sta) + if (!link_conf) continue; if (link_conf->beacon_int < IWL_MVM_MIN_BEACON_INTERVAL_TU) { @@ -3645,24 +3643,23 @@ static void iwl_mvm_vif_set_he_support(struct ieee80211_hw *hw, bool is_sta) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - unsigned int i; + struct ieee80211_link_sta *link_sta; + unsigned int link_id; - for_each_set_bit(i, (unsigned long *)&sta->valid_links, - IEEE80211_MLD_MAX_NUM_LINKS) { - struct ieee80211_link_sta *link_sta = - link_sta_dereference_protected(sta, i); + for_each_sta_active_link(vif, sta, link_sta, link_id) { struct ieee80211_bss_conf *link_conf = - link_conf_dereference_protected(vif, i); + link_conf_dereference_protected(vif, link_id); - if (!link_conf || !link_sta || !mvmvif->link[i]) + if (!link_conf || !mvmvif->link[link_id]) continue; link_conf->he_support = link_sta->he_cap.has_he; if (is_sta) { - mvmvif->link[i]->he_ru_2mhz_block = false; + mvmvif->link[link_id]->he_ru_2mhz_block = false; if (link_sta->he_cap.has_he) - iwl_mvm_check_he_obss_narrow_bw_ru(hw, vif, i, + iwl_mvm_check_he_obss_narrow_bw_ru(hw, vif, + link_id, link_conf); } } @@ -3675,6 +3672,7 @@ iwl_mvm_sta_state_notexist_to_none(struct iwl_mvm *mvm, struct iwl_mvm_sta_state_ops *callbacks) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_link_sta *link_sta; unsigned int i; int ret; @@ -3699,15 +3697,9 @@ iwl_mvm_sta_state_notexist_to_none(struct iwl_mvm *mvm, NL80211_TDLS_SETUP); } - for (i = 0; i < ARRAY_SIZE(sta->link); i++) { - struct ieee80211_link_sta *link_sta; - - link_sta = link_sta_dereference_protected(sta, i); - if (!link_sta) - continue; - + for_each_sta_active_link(vif, sta, link_sta, i) link_sta->agg.max_rc_amsdu_len = 1; - } + ieee80211_sta_recalc_aggregates(sta); if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) @@ -3725,7 +3717,8 @@ iwl_mvm_sta_state_auth_to_assoc(struct ieee80211_hw *hw, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); - unsigned int i; + struct ieee80211_link_sta *link_sta; + unsigned int link_id; lockdep_assert_held(&mvm->mutex); @@ -3751,14 +3744,13 @@ iwl_mvm_sta_state_auth_to_assoc(struct ieee80211_hw *hw, if (!mvm->mld_api_is_used) goto out; - for_each_set_bit(i, (unsigned long *)&sta->valid_links, - IEEE80211_MLD_MAX_NUM_LINKS) { + for_each_sta_active_link(vif, sta, link_sta, link_id) { struct ieee80211_bss_conf *link_conf = - link_conf_dereference_protected(vif, i); + link_conf_dereference_protected(vif, link_id); if (WARN_ON(!link_conf)) return -EINVAL; - if (!mvmvif->link[i]) + if (!mvmvif->link[link_id]) continue; iwl_mvm_link_changed(mvm, vif, link_conf, @@ -3889,6 +3881,9 @@ int iwl_mvm_mac_sta_state_common(struct ieee80211_hw *hw, * from the AP now. */ iwl_mvm_reset_cca_40mhz_workaround(mvm, vif); + + /* Also free dup data just in case any assertions below fail */ + kfree(mvm_sta->dup_data); } mutex_lock(&mvm->mutex); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c index fbc2d5ed100690a583f750e0db0a91603aa7881c..7fb66c5709596af2ffa7c1168e1bcf96879643a4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c @@ -906,11 +906,12 @@ iwl_mvm_mld_change_vif_links(struct ieee80211_hw *hw, n_active++; } - if (vif->type == NL80211_IFTYPE_AP && - n_active > mvm->fw->ucode_capa.num_beacons) - return -EOPNOTSUPP; - else if (n_active > 1) + if (vif->type == NL80211_IFTYPE_AP) { + if (n_active > mvm->fw->ucode_capa.num_beacons) + return -EOPNOTSUPP; + } else if (n_active > 1) { return -EOPNOTSUPP; + } } for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c index 0bfdf446275508a1c82b8a3335048af2a1591c3a..85a4ce8449ade824dc067961ac2938b4e4cfc661 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c @@ -667,15 +667,15 @@ int iwl_mvm_mld_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ret = iwl_mvm_mld_alloc_sta_links(mvm, vif, sta); if (ret) return ret; - } - spin_lock_init(&mvm_sta->lock); + spin_lock_init(&mvm_sta->lock); - if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) - ret = iwl_mvm_alloc_sta_after_restart(mvm, vif, sta); - else ret = iwl_mvm_sta_init(mvm, vif, sta, IWL_MVM_INVALID_STA, STATION_TYPE_PEER); + } else { + ret = iwl_mvm_alloc_sta_after_restart(mvm, vif, sta); + } + if (ret) goto err; @@ -728,7 +728,7 @@ int iwl_mvm_mld_update_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct ieee80211_link_sta *link_sta; unsigned int link_id; - int ret = 0; + int ret = -EINVAL; lockdep_assert_held(&mvm->mutex); @@ -791,8 +791,6 @@ int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, lockdep_assert_held(&mvm->mutex); - kfree(mvm_sta->dup_data); - /* flush its queues here since we are freeing mvm_sta */ for_each_sta_active_link(vif, sta, link_sta, link_id) { struct iwl_mvm_link_sta *mvm_link_sta = diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 6e7470d3a826d35ded09d840985893907205d671..9e5008e0e47f5c1824854c654a101cda4600caa3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -2347,6 +2347,7 @@ int iwl_mvm_mld_update_sta_keys(struct iwl_mvm *mvm, u32 old_sta_mask, u32 new_sta_mask); +bool iwl_rfi_supported(struct iwl_mvm *mvm); int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, struct iwl_rfi_lut_entry *rfi_table); struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index 6d18a1fd649b90e52006f7869663541e7320360a..fdf60afb0f3f26ec9e96f9132aacf45d61564ae4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -445,6 +445,11 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, struct iwl_mcc_update_resp *mcc_resp = (void *)pkt->data; n_channels = __le32_to_cpu(mcc_resp->n_channels); + if (iwl_rx_packet_payload_len(pkt) != + struct_size(mcc_resp, channels, n_channels)) { + resp_cp = ERR_PTR(-EINVAL); + goto exit; + } resp_len = sizeof(struct iwl_mcc_update_resp) + n_channels * sizeof(__le32); resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL); @@ -456,6 +461,11 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, struct iwl_mcc_update_resp_v3 *mcc_resp_v3 = (void *)pkt->data; n_channels = __le32_to_cpu(mcc_resp_v3->n_channels); + if (iwl_rx_packet_payload_len(pkt) != + struct_size(mcc_resp_v3, channels, n_channels)) { + resp_cp = ERR_PTR(-EINVAL); + goto exit; + } resp_len = sizeof(struct iwl_mcc_update_resp) + n_channels * sizeof(__le32); resp_cp = kzalloc(resp_len, GFP_KERNEL); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c index bb77bc9aa8218af83b21fa8b353d118b12c3685b..2ecd32bed752ff55734d5e9f78f36efc213cb107 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2020 - 2021 Intel Corporation + * Copyright (C) 2020 - 2022 Intel Corporation */ #include "mvm.h" @@ -70,6 +70,16 @@ static const struct iwl_rfi_lut_entry iwl_rfi_table[IWL_RFI_LUT_SIZE] = { PHY_BAND_6, PHY_BAND_6,}}, }; +bool iwl_rfi_supported(struct iwl_mvm *mvm) +{ + /* The feature depends on a platform bugfix, so for now + * it's always disabled. + * When the platform support detection is implemented we should + * check FW TLV and platform support instead. + */ + return false; +} + int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, struct iwl_rfi_lut_entry *rfi_table) { int ret; @@ -81,7 +91,7 @@ int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, struct iwl_rfi_lut_entry *rfi_t .len[0] = sizeof(cmd), }; - if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_RFIM_SUPPORT)) + if (!iwl_rfi_supported(mvm)) return -EOPNOTSUPP; lockdep_assert_held(&mvm->mutex); @@ -113,7 +123,7 @@ struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm) .flags = CMD_WANT_SKB, }; - if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_RFIM_SUPPORT)) + if (!iwl_rfi_supported(mvm)) return ERR_PTR(-EOPNOTSUPP); mutex_lock(&mvm->mutex); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index a4c1e3bf4ff1de8db144095f157984ea11943999..9a20468345e49db727cfe087da652f73d71b7026 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -2691,6 +2691,8 @@ static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta, return; lq_sta = mvm_sta; + + spin_lock_bh(&lq_sta->pers.lock); iwl_mvm_hwrate_to_tx_rate_v1(lq_sta->last_rate_n_flags, info->band, &info->control.rates[0]); info->control.rates[0].count = 1; @@ -2705,6 +2707,7 @@ static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta, iwl_mvm_hwrate_to_tx_rate_v1(last_ucode_rate, info->band, &txrc->reported_rate); } + spin_unlock_bh(&lq_sta->pers.lock); } static void *rs_drv_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta, @@ -3261,11 +3264,11 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, /* If it's locked we are in middle of init flow * just wait for next tx status to update the lq_sta data */ - if (!spin_trylock(&mvmsta->deflink.lq_sta.rs_drv.pers.lock)) + if (!spin_trylock_bh(&mvmsta->deflink.lq_sta.rs_drv.pers.lock)) return; __iwl_mvm_rs_tx_status(mvm, sta, tid, info, ndp); - spin_unlock(&mvmsta->deflink.lq_sta.rs_drv.pers.lock); + spin_unlock_bh(&mvmsta->deflink.lq_sta.rs_drv.pers.lock); } #ifdef CONFIG_MAC80211_DEBUGFS @@ -4114,9 +4117,9 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, } else { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - spin_lock(&mvmsta->deflink.lq_sta.rs_drv.pers.lock); + spin_lock_bh(&mvmsta->deflink.lq_sta.rs_drv.pers.lock); rs_drv_rate_init(mvm, sta, band); - spin_unlock(&mvmsta->deflink.lq_sta.rs_drv.pers.lock); + spin_unlock_bh(&mvmsta->deflink.lq_sta.rs_drv.pers.lock); } } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index e1d02c260e69d504a35f6ddf3cf8d04b23751a9a..6226e4e54a51d2775a19174f7683d2c1db5ec729 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -691,6 +691,11 @@ void iwl_mvm_reorder_timer_expired(struct timer_list *t) rcu_read_lock(); sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[sta_id]); + if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { + rcu_read_unlock(); + goto out; + } + mvmsta = iwl_mvm_sta_from_mac80211(sta); /* SN is set to the last expired frame + 1 */ @@ -712,6 +717,8 @@ void iwl_mvm_reorder_timer_expired(struct timer_list *t) entries[index].e.reorder_time + 1 + RX_REORDER_BUF_TIMEOUT_MQ); } + +out: spin_unlock(&buf->lock); } @@ -2512,7 +2519,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL); /* Unblock BCAST / MCAST station */ iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false); - cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork); + cancel_delayed_work(&mvm->cs_tx_unblock_dwork); } } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 5469d634e28998a12b2c9bd2fefde11dbddbdd0a..05a54a69c1357c59f121803204c8d7d448d4c0ef 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -281,7 +281,7 @@ static void iwl_mvm_rx_agg_session_expired(struct timer_list *t) * A-MDPU and hence the timer continues to run. Then, the * timer expires and sta is NULL. */ - if (!sta) + if (IS_ERR_OR_NULL(sta)) goto unlock; mvm_sta = iwl_mvm_sta_from_mac80211(sta); @@ -2089,9 +2089,6 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); - if (iwl_mvm_has_new_rx_api(mvm)) - kfree(mvm_sta->dup_data); - ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); if (ret) return ret; @@ -3785,6 +3782,9 @@ static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm, u8 sta_id = mvmvif->deflink.ap_sta_id; sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); + if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) + return NULL; + return sta->addr; } @@ -3822,6 +3822,11 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, if (keyconf->cipher == WLAN_CIPHER_SUITE_TKIP) { addr = iwl_mvm_get_mac_addr(mvm, vif, sta); + if (!addr) { + IWL_ERR(mvm, "Failed to find mac address\n"); + return -EINVAL; + } + /* get phase 1 key from mac80211 */ ieee80211_get_key_rx_seq(keyconf, 0, &seq); ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 10d7178f10718ebe350494c40235368eacfeeb6c..00719e1304386454f350677322764e46483fab57 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -1875,7 +1875,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); - if (WARN_ON_ONCE(!sta || !sta->wme)) { + if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta) || !sta->wme)) { rcu_read_unlock(); return; } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index dba1123948386c2199123f18f006d957b77153ff..79115eb1c2852bb13f63172a30bca25d03c3b9bf 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -548,6 +548,8 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_DEV_INFO(0x54F0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), IWL_DEV_INFO(0x7A70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), IWL_DEV_INFO(0x7A70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), + IWL_DEV_INFO(0x7AF0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), + IWL_DEV_INFO(0x7AF0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), IWL_DEV_INFO(0x271C, 0x0214, iwl9260_2ac_cfg, iwl9260_1_name), IWL_DEV_INFO(0x7E40, 0x1691, iwl_cfg_ma_a0_gf4_a0, iwl_ax411_killer_1690s_name), diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index da1d17b73a2553c39556d64a60d1b014de8f233e..64002484ccadbc27ece0021b8fcaff664433a626 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -914,7 +914,10 @@ void mt7615_mac_sta_poll(struct mt7615_dev *dev) msta = list_first_entry(&sta_poll_list, struct mt7615_sta, poll_list); + + spin_lock_bh(&dev->sta_poll_lock); list_del_init(&msta->poll_list); + spin_unlock_bh(&dev->sta_poll_lock); addr = mt7615_mac_wtbl_addr(dev, msta->wcid.idx) + 19 * 4; diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h index a5ec0f6313850a54c7cb5170b7d9c051b8d554ca..fabf637bdf7f99f5d6b6d59d1c1dd6d7b7a1ec90 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h @@ -173,7 +173,7 @@ enum { #define MT_TXS5_MPDU_TX_CNT GENMASK(31, 23) #define MT_TXS6_MPDU_FAIL_CNT GENMASK(31, 23) - +#define MT_TXS7_MPDU_RETRY_BYTE GENMASK(22, 0) #define MT_TXS7_MPDU_RETRY_CNT GENMASK(31, 23) /* RXD DW0 */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c index ee0fbfcd07d6469d94ce9a3f62c6868ef1fb753b..d39a3cc5e381f8b01e9ca2c8d0559215969ebeba 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c @@ -608,7 +608,8 @@ bool mt76_connac2_mac_fill_txs(struct mt76_dev *dev, struct mt76_wcid *wcid, /* PPDU based reporting */ if (FIELD_GET(MT_TXS0_TXS_FORMAT, txs) > 1) { stats->tx_bytes += - le32_get_bits(txs_data[5], MT_TXS5_MPDU_TX_BYTE); + le32_get_bits(txs_data[5], MT_TXS5_MPDU_TX_BYTE) - + le32_get_bits(txs_data[7], MT_TXS7_MPDU_RETRY_BYTE); stats->tx_packets += le32_get_bits(txs_data[5], MT_TXS5_MPDU_TX_CNT); stats->tx_failed += diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c index 130eb7b4fd914c02f9b938b49e945365c36a1122..9b0f6053e0fa6280a58c48e029a91d8a0061d4e3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c @@ -1004,10 +1004,10 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi, { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_vif *vif = info->control.vif; - struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; u8 band_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2; u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0; bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; + struct mt7996_vif *mvif; u16 tx_count = 15; u32 val; bool beacon = !!(changed & (BSS_CHANGED_BEACON | @@ -1015,7 +1015,8 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi, bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP | BSS_CHANGED_FILS_DISCOVERY)); - if (vif) { + mvif = vif ? (struct mt7996_vif *)vif->drv_priv : NULL; + if (mvif) { omac_idx = mvif->mt76.omac_idx; wmm_idx = mvif->mt76.wmm_idx; band_idx = mvif->mt76.band_idx; @@ -1081,14 +1082,18 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi, struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; bool mcast = ieee80211_is_data(hdr->frame_control) && is_multicast_ether_addr(hdr->addr1); - u8 idx = mvif->basic_rates_idx; + u8 idx = MT7996_BASIC_RATES_TBL; - if (mcast && mvif->mcast_rates_idx) - idx = mvif->mcast_rates_idx; - else if (beacon && mvif->beacon_rates_idx) - idx = mvif->beacon_rates_idx; + if (mvif) { + if (mcast && mvif->mcast_rates_idx) + idx = mvif->mcast_rates_idx; + else if (beacon && mvif->beacon_rates_idx) + idx = mvif->beacon_rates_idx; + else + idx = mvif->basic_rates_idx; + } - txwi[6] |= FIELD_PREP(MT_TXD6_TX_RATE, idx); + txwi[6] |= cpu_to_le32(FIELD_PREP(MT_TXD6_TX_RATE, idx)); txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE); } } diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 8eafbf1cee718dc31045bdfb71efe94dfb0a6a55..808c1c895113dacf4190674fa6aae9e7ffe121e9 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -1803,6 +1803,7 @@ struct rtl8xxxu_priv { u32 rege9c; u32 regeb4; u32 regebc; + u32 regrcr; int next_mbox; int nr_out_eps; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index fd8c8c6d53d60b771da8ff0332c669167b203be4..831639d73657be9c338ec21d6cf4108331c40ac3 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -4171,6 +4171,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) RCR_ACCEPT_MGMT_FRAME | RCR_HTC_LOC_CTRL | RCR_APPEND_PHYSTAT | RCR_APPEND_ICV | RCR_APPEND_MIC; rtl8xxxu_write32(priv, REG_RCR, val32); + priv->regrcr = val32; if (fops->init_reg_rxfltmap) { /* Accept all data frames */ @@ -6501,7 +6502,7 @@ static void rtl8xxxu_configure_filter(struct ieee80211_hw *hw, unsigned int *total_flags, u64 multicast) { struct rtl8xxxu_priv *priv = hw->priv; - u32 rcr = rtl8xxxu_read32(priv, REG_RCR); + u32 rcr = priv->regrcr; dev_dbg(&priv->udev->dev, "%s: changed_flags %08x, total_flags %08x\n", __func__, changed_flags, *total_flags); @@ -6547,6 +6548,7 @@ static void rtl8xxxu_configure_filter(struct ieee80211_hw *hw, */ rtl8xxxu_write32(priv, REG_RCR, rcr); + priv->regrcr = rcr; *total_flags &= (FIF_ALLMULTI | FIF_FCSFAIL | FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL | FIF_OTHER_BSS | FIF_PSPOLL | diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c index 7aa6edad0d012738887d45febf34a54c8065d6e5..144618bb94c8654ad731dc872286dc068d0df801 100644 --- a/drivers/net/wireless/realtek/rtw88/mac80211.c +++ b/drivers/net/wireless/realtek/rtw88/mac80211.c @@ -88,15 +88,6 @@ static int rtw_ops_config(struct ieee80211_hw *hw, u32 changed) } } - if (changed & IEEE80211_CONF_CHANGE_PS) { - if (hw->conf.flags & IEEE80211_CONF_PS) { - rtwdev->ps_enabled = true; - } else { - rtwdev->ps_enabled = false; - rtw_leave_lps(rtwdev); - } - } - if (changed & IEEE80211_CONF_CHANGE_CHANNEL) rtw_set_channel(rtwdev); @@ -213,6 +204,7 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw, config |= PORT_SET_BCN_CTRL; rtw_vif_port_config(rtwdev, rtwvif, config); rtw_core_port_switch(rtwdev, vif); + rtw_recalc_lps(rtwdev, vif); mutex_unlock(&rtwdev->mutex); @@ -244,6 +236,7 @@ static void rtw_ops_remove_interface(struct ieee80211_hw *hw, config |= PORT_SET_BCN_CTRL; rtw_vif_port_config(rtwdev, rtwvif, config); clear_bit(rtwvif->port, rtwdev->hw_port); + rtw_recalc_lps(rtwdev, NULL); mutex_unlock(&rtwdev->mutex); } @@ -438,6 +431,9 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_ERP_SLOT) rtw_conf_tx(rtwdev, rtwvif); + if (changed & BSS_CHANGED_PS) + rtw_recalc_lps(rtwdev, NULL); + rtw_vif_port_config(rtwdev, rtwvif, config); mutex_unlock(&rtwdev->mutex); @@ -918,7 +914,7 @@ static void rtw_ops_sta_rc_update(struct ieee80211_hw *hw, struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; if (changed & IEEE80211_RC_BW_CHANGED) - rtw_update_sta_info(rtwdev, si, true); + ieee80211_queue_work(rtwdev->hw, &si->rc_work); } const struct ieee80211_ops rtw_ops = { diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c index 5bf6b45815578feb766453db223e7a71c93119e7..9447a3aae3b5e16d4beb86c2129e283847c5fc70 100644 --- a/drivers/net/wireless/realtek/rtw88/main.c +++ b/drivers/net/wireless/realtek/rtw88/main.c @@ -271,8 +271,8 @@ static void rtw_watch_dog_work(struct work_struct *work) * more than two stations associated to the AP, then we can not enter * lps, because fw does not handle the overlapped beacon interval * - * mac80211 should iterate vifs and determine if driver can enter - * ps by passing IEEE80211_CONF_PS to us, all we need to do is to + * rtw_recalc_lps() iterate vifs and determine if driver can enter + * ps by vif->type and vif->cfg.ps, all we need to do here is to * get that vif and check if device is having traffic more than the * threshold. */ @@ -319,6 +319,17 @@ static u8 rtw_acquire_macid(struct rtw_dev *rtwdev) return mac_id; } +static void rtw_sta_rc_work(struct work_struct *work) +{ + struct rtw_sta_info *si = container_of(work, struct rtw_sta_info, + rc_work); + struct rtw_dev *rtwdev = si->rtwdev; + + mutex_lock(&rtwdev->mutex); + rtw_update_sta_info(rtwdev, si, true); + mutex_unlock(&rtwdev->mutex); +} + int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta, struct ieee80211_vif *vif) { @@ -329,12 +340,14 @@ int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta, if (si->mac_id >= RTW_MAX_MAC_ID_NUM) return -ENOSPC; + si->rtwdev = rtwdev; si->sta = sta; si->vif = vif; si->init_ra_lv = 1; ewma_rssi_init(&si->avg_rssi); for (i = 0; i < ARRAY_SIZE(sta->txq); i++) rtw_txq_init(rtwdev, sta->txq[i]); + INIT_WORK(&si->rc_work, rtw_sta_rc_work); rtw_update_sta_info(rtwdev, si, true); rtw_fw_media_status_report(rtwdev, si->mac_id, true); @@ -353,6 +366,8 @@ void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta, struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; int i; + cancel_work_sync(&si->rc_work); + rtw_release_macid(rtwdev, si->mac_id); if (fw_exist) rtw_fw_media_status_report(rtwdev, si->mac_id, false); diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h index a563285e90ede577f287d7329f1246cdb3569a14..9e841f6991a9a4d0d984fe5892282bd8d473ade4 100644 --- a/drivers/net/wireless/realtek/rtw88/main.h +++ b/drivers/net/wireless/realtek/rtw88/main.h @@ -743,6 +743,7 @@ struct rtw_txq { DECLARE_EWMA(rssi, 10, 16); struct rtw_sta_info { + struct rtw_dev *rtwdev; struct ieee80211_sta *sta; struct ieee80211_vif *vif; @@ -767,6 +768,8 @@ struct rtw_sta_info { bool use_cfg_mask; struct cfg80211_bitrate_mask *mask; + + struct work_struct rc_work; }; enum rtw_bfee_role { diff --git a/drivers/net/wireless/realtek/rtw88/ps.c b/drivers/net/wireless/realtek/rtw88/ps.c index 996365575f44fe7d16e0ce10a7d77586e8ca4367..53933fb38a33084dcbdb81f68ba8f524e73feb02 100644 --- a/drivers/net/wireless/realtek/rtw88/ps.c +++ b/drivers/net/wireless/realtek/rtw88/ps.c @@ -299,3 +299,46 @@ void rtw_leave_lps_deep(struct rtw_dev *rtwdev) __rtw_leave_lps_deep(rtwdev); } + +struct rtw_vif_recalc_lps_iter_data { + struct rtw_dev *rtwdev; + struct ieee80211_vif *found_vif; + int count; +}; + +static void __rtw_vif_recalc_lps(struct rtw_vif_recalc_lps_iter_data *data, + struct ieee80211_vif *vif) +{ + if (data->count < 0) + return; + + if (vif->type != NL80211_IFTYPE_STATION) { + data->count = -1; + return; + } + + data->count++; + data->found_vif = vif; +} + +static void rtw_vif_recalc_lps_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + __rtw_vif_recalc_lps(data, vif); +} + +void rtw_recalc_lps(struct rtw_dev *rtwdev, struct ieee80211_vif *new_vif) +{ + struct rtw_vif_recalc_lps_iter_data data = { .rtwdev = rtwdev }; + + if (new_vif) + __rtw_vif_recalc_lps(&data, new_vif); + rtw_iterate_vifs(rtwdev, rtw_vif_recalc_lps_iter, &data); + + if (data.count == 1 && data.found_vif->cfg.ps) { + rtwdev->ps_enabled = true; + } else { + rtwdev->ps_enabled = false; + rtw_leave_lps(rtwdev); + } +} diff --git a/drivers/net/wireless/realtek/rtw88/ps.h b/drivers/net/wireless/realtek/rtw88/ps.h index c194386f6db53a378489255e4dfe188e2733d983..5ae83d2526cfd2730daaeed07f1d0ca56527a068 100644 --- a/drivers/net/wireless/realtek/rtw88/ps.h +++ b/drivers/net/wireless/realtek/rtw88/ps.h @@ -23,4 +23,6 @@ void rtw_enter_lps(struct rtw_dev *rtwdev, u8 port_id); void rtw_leave_lps(struct rtw_dev *rtwdev); void rtw_leave_lps_deep(struct rtw_dev *rtwdev); enum rtw_lps_deep_mode rtw_get_lps_deep_mode(struct rtw_dev *rtwdev); +void rtw_recalc_lps(struct rtw_dev *rtwdev, struct ieee80211_vif *new_vif); + #endif diff --git a/drivers/net/wireless/realtek/rtw88/sdio.c b/drivers/net/wireless/realtek/rtw88/sdio.c index af0459a79899f65bd675d7078d00b44c8f617b4b..06fce7c3addaa395bc5c622621bf8bc7c0537dae 100644 --- a/drivers/net/wireless/realtek/rtw88/sdio.c +++ b/drivers/net/wireless/realtek/rtw88/sdio.c @@ -87,11 +87,6 @@ static void rtw_sdio_writew(struct rtw_dev *rtwdev, u16 val, u32 addr, u8 buf[2]; int i; - if (rtw_sdio_use_memcpy_io(rtwdev, addr, 2)) { - sdio_writew(rtwsdio->sdio_func, val, addr, err_ret); - return; - } - *(__le16 *)buf = cpu_to_le16(val); for (i = 0; i < 2; i++) { @@ -125,9 +120,6 @@ static u16 rtw_sdio_readw(struct rtw_dev *rtwdev, u32 addr, int *err_ret) u8 buf[2]; int i; - if (rtw_sdio_use_memcpy_io(rtwdev, addr, 2)) - return sdio_readw(rtwsdio->sdio_func, addr, err_ret); - for (i = 0; i < 2; i++) { buf[i] = sdio_readb(rtwsdio->sdio_func, addr + i, err_ret); if (*err_ret) diff --git a/drivers/net/wireless/realtek/rtw88/usb.h b/drivers/net/wireless/realtek/rtw88/usb.h index 30647f0dd61c66fb6433c7976752b916d8be3e53..ad1d7955c6a51e545b92c5e5bbd316f9120bf438 100644 --- a/drivers/net/wireless/realtek/rtw88/usb.h +++ b/drivers/net/wireless/realtek/rtw88/usb.h @@ -78,7 +78,7 @@ struct rtw_usb { u8 pipe_interrupt; u8 pipe_in; u8 out_ep[RTW_USB_EP_MAX]; - u8 qsel_to_ep[TX_DESC_QSEL_MAX]; + int qsel_to_ep[TX_DESC_QSEL_MAX]; u8 usb_txagg_num; struct workqueue_struct *txwq, *rxwq; diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c index 7fc0a26a4d7317cde259f2521607d5f6378d70e0..bad864d56bd5cc7a16dc4c3eca81121db00efa89 100644 --- a/drivers/net/wireless/realtek/rtw89/core.c +++ b/drivers/net/wireless/realtek/rtw89/core.c @@ -2531,9 +2531,6 @@ static void rtw89_vif_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwv rtwvif->tdls_peer) return; - if (rtwdev->total_sta_assoc > 1) - return; - if (rtwvif->offchan) return; diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index b8019cfc11b20dd5889b6106ba905d5999f4aab3..512de491a064b1b741ad34802b423e7a2b59a968 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -1425,6 +1425,8 @@ const struct rtw89_mac_size_set rtw89_mac_size = { .wde_size4 = {RTW89_WDE_PG_64, 0, 4096,}, /* PCIE 64 */ .wde_size6 = {RTW89_WDE_PG_64, 512, 0,}, + /* 8852B PCIE SCC */ + .wde_size7 = {RTW89_WDE_PG_64, 510, 2,}, /* DLFW */ .wde_size9 = {RTW89_WDE_PG_64, 0, 1024,}, /* 8852C DLFW */ @@ -1449,6 +1451,8 @@ const struct rtw89_mac_size_set rtw89_mac_size = { .wde_qt4 = {0, 0, 0, 0,}, /* PCIE 64 */ .wde_qt6 = {448, 48, 0, 16,}, + /* 8852B PCIE SCC */ + .wde_qt7 = {446, 48, 0, 16,}, /* 8852C DLFW */ .wde_qt17 = {0, 0, 0, 0,}, /* 8852C PCIE SCC */ diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h index a8d9847ef0b49e1df2f7ffa19abc9729cf4db7b9..6ba633ccdd0377ff571a5c0bbf2017c8bb11807e 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.h +++ b/drivers/net/wireless/realtek/rtw89/mac.h @@ -792,6 +792,7 @@ struct rtw89_mac_size_set { const struct rtw89_dle_size wde_size0; const struct rtw89_dle_size wde_size4; const struct rtw89_dle_size wde_size6; + const struct rtw89_dle_size wde_size7; const struct rtw89_dle_size wde_size9; const struct rtw89_dle_size wde_size18; const struct rtw89_dle_size wde_size19; @@ -804,6 +805,7 @@ struct rtw89_mac_size_set { const struct rtw89_wde_quota wde_qt0; const struct rtw89_wde_quota wde_qt4; const struct rtw89_wde_quota wde_qt6; + const struct rtw89_wde_quota wde_qt7; const struct rtw89_wde_quota wde_qt17; const struct rtw89_wde_quota wde_qt18; const struct rtw89_ple_quota ple_qt4; diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c index ee4588b61b8f66bd6157e5ada41a34e96f47d9c0..c42e310690351a37fcd7053a30362918155b6cfe 100644 --- a/drivers/net/wireless/realtek/rtw89/mac80211.c +++ b/drivers/net/wireless/realtek/rtw89/mac80211.c @@ -89,15 +89,6 @@ static int rtw89_ops_config(struct ieee80211_hw *hw, u32 changed) !(hw->conf.flags & IEEE80211_CONF_IDLE)) rtw89_leave_ips(rtwdev); - if (changed & IEEE80211_CONF_CHANGE_PS) { - if (hw->conf.flags & IEEE80211_CONF_PS) { - rtwdev->lps_enabled = true; - } else { - rtw89_leave_lps(rtwdev); - rtwdev->lps_enabled = false; - } - } - if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { rtw89_config_entity_chandef(rtwdev, RTW89_SUB_ENTITY_0, &hw->conf.chandef); @@ -168,6 +159,8 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw, rtw89_core_txq_init(rtwdev, vif->txq); rtw89_btc_ntfy_role_info(rtwdev, rtwvif, NULL, BTC_ROLE_START); + + rtw89_recalc_lps(rtwdev); out: mutex_unlock(&rtwdev->mutex); @@ -192,6 +185,7 @@ static void rtw89_ops_remove_interface(struct ieee80211_hw *hw, rtw89_mac_remove_vif(rtwdev, rtwvif); rtw89_core_release_bit_map(rtwdev->hw_port, rtwvif->port); list_del_init(&rtwvif->list); + rtw89_recalc_lps(rtwdev); rtw89_enter_ips_by_hwflags(rtwdev); mutex_unlock(&rtwdev->mutex); @@ -451,6 +445,9 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_CQM) rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, vif, true); + if (changed & BSS_CHANGED_PS) + rtw89_recalc_lps(rtwdev); + mutex_unlock(&rtwdev->mutex); } diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c index fa94335f699aa47eba2f76a686f36b7cce8507ec..84201ef19c176c9426ac2cbeb3bbbc7a760d83a5 100644 --- a/drivers/net/wireless/realtek/rtw89/ps.c +++ b/drivers/net/wireless/realtek/rtw89/ps.c @@ -252,3 +252,29 @@ void rtw89_process_p2p_ps(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif) rtw89_p2p_disable_all_noa(rtwdev, vif); rtw89_p2p_update_noa(rtwdev, vif); } + +void rtw89_recalc_lps(struct rtw89_dev *rtwdev) +{ + struct ieee80211_vif *vif, *found_vif = NULL; + struct rtw89_vif *rtwvif; + int count = 0; + + rtw89_for_each_rtwvif(rtwdev, rtwvif) { + vif = rtwvif_to_vif(rtwvif); + + if (vif->type != NL80211_IFTYPE_STATION) { + count = 0; + break; + } + + count++; + found_vif = vif; + } + + if (count == 1 && found_vif->cfg.ps) { + rtwdev->lps_enabled = true; + } else { + rtw89_leave_lps(rtwdev); + rtwdev->lps_enabled = false; + } +} diff --git a/drivers/net/wireless/realtek/rtw89/ps.h b/drivers/net/wireless/realtek/rtw89/ps.h index 73c008db042643e953ff500f72f8afa2647dbeda..4c18f49204b285aaaf2ce3a7cf6ea57b0622163e 100644 --- a/drivers/net/wireless/realtek/rtw89/ps.h +++ b/drivers/net/wireless/realtek/rtw89/ps.h @@ -15,6 +15,7 @@ void rtw89_enter_ips(struct rtw89_dev *rtwdev); void rtw89_leave_ips(struct rtw89_dev *rtwdev); void rtw89_set_coex_ctrl_lps(struct rtw89_dev *rtwdev, bool btc_ctrl); void rtw89_process_p2p_ps(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif); +void rtw89_recalc_lps(struct rtw89_dev *rtwdev); static inline void rtw89_leave_ips_by_hwflags(struct rtw89_dev *rtwdev) { diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c index eaa2ea0586bc621852d2b318d107a4eb95099ee4..6da1b603a9a95c096fc531b727ea080c84ebeaa2 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c @@ -18,25 +18,25 @@ RTW8852B_FW_BASENAME "-" __stringify(RTW8852B_FW_FORMAT_MAX) ".bin" static const struct rtw89_hfc_ch_cfg rtw8852b_hfc_chcfg_pcie[] = { - {5, 343, grp_0}, /* ACH 0 */ - {5, 343, grp_0}, /* ACH 1 */ - {5, 343, grp_0}, /* ACH 2 */ - {5, 343, grp_0}, /* ACH 3 */ + {5, 341, grp_0}, /* ACH 0 */ + {5, 341, grp_0}, /* ACH 1 */ + {4, 342, grp_0}, /* ACH 2 */ + {4, 342, grp_0}, /* ACH 3 */ {0, 0, grp_0}, /* ACH 4 */ {0, 0, grp_0}, /* ACH 5 */ {0, 0, grp_0}, /* ACH 6 */ {0, 0, grp_0}, /* ACH 7 */ - {4, 344, grp_0}, /* B0MGQ */ - {4, 344, grp_0}, /* B0HIQ */ + {4, 342, grp_0}, /* B0MGQ */ + {4, 342, grp_0}, /* B0HIQ */ {0, 0, grp_0}, /* B1MGQ */ {0, 0, grp_0}, /* B1HIQ */ {40, 0, 0} /* FWCMDQ */ }; static const struct rtw89_hfc_pub_cfg rtw8852b_hfc_pubcfg_pcie = { - 448, /* Group 0 */ + 446, /* Group 0 */ 0, /* Group 1 */ - 448, /* Public Max */ + 446, /* Public Max */ 0 /* WP threshold */ }; @@ -49,13 +49,13 @@ static const struct rtw89_hfc_param_ini rtw8852b_hfc_param_ini_pcie[] = { }; static const struct rtw89_dle_mem rtw8852b_dle_mem_pcie[] = { - [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_mac_size.wde_size6, - &rtw89_mac_size.ple_size6, &rtw89_mac_size.wde_qt6, - &rtw89_mac_size.wde_qt6, &rtw89_mac_size.ple_qt18, + [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_mac_size.wde_size7, + &rtw89_mac_size.ple_size6, &rtw89_mac_size.wde_qt7, + &rtw89_mac_size.wde_qt7, &rtw89_mac_size.ple_qt18, &rtw89_mac_size.ple_qt58}, - [RTW89_QTA_WOW] = {RTW89_QTA_WOW, &rtw89_mac_size.wde_size6, - &rtw89_mac_size.ple_size6, &rtw89_mac_size.wde_qt6, - &rtw89_mac_size.wde_qt6, &rtw89_mac_size.ple_qt18, + [RTW89_QTA_WOW] = {RTW89_QTA_WOW, &rtw89_mac_size.wde_size7, + &rtw89_mac_size.ple_size6, &rtw89_mac_size.wde_qt7, + &rtw89_mac_size.wde_qt7, &rtw89_mac_size.ple_qt18, &rtw89_mac_size.ple_qt_52b_wow}, [RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_mac_size.wde_size9, &rtw89_mac_size.ple_size8, &rtw89_mac_size.wde_qt4, diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c index 9a8faaf4c6b642e9960ea593072f476f12ec3985..89c7a1420381d369930cf7e6907d08740638290f 100644 --- a/drivers/net/wireless/virtual/mac80211_hwsim.c +++ b/drivers/net/wireless/virtual/mac80211_hwsim.c @@ -5964,10 +5964,11 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) ret = -ENOMEM; goto out_free; } + param.pmsr_capa = pmsr_capa; + ret = parse_pmsr_capa(info->attrs[HWSIM_ATTR_PMSR_SUPPORT], pmsr_capa, info); if (ret) goto out_free; - param.pmsr_capa = pmsr_capa; } ret = mac80211_hwsim_new_radio(info, ¶m); diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.c b/drivers/net/wwan/iosm/iosm_ipc_imem.c index c066b0040a3fe5fb6668bd35d45694495dc1647a..829515a601b379d3acb57927b67076a35ebf16c9 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem.c +++ b/drivers/net/wwan/iosm/iosm_ipc_imem.c @@ -565,24 +565,32 @@ static void ipc_imem_run_state_worker(struct work_struct *instance) struct ipc_mux_config mux_cfg; struct iosm_imem *ipc_imem; u8 ctrl_chl_idx = 0; + int ret; ipc_imem = container_of(instance, struct iosm_imem, run_state_worker); if (ipc_imem->phase != IPC_P_RUN) { dev_err(ipc_imem->dev, "Modem link down. Exit run state worker."); - return; + goto err_out; } if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag)) ipc_devlink_deinit(ipc_imem->ipc_devlink); - if (!ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg)) - ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem); + ret = ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg); + if (ret < 0) + goto err_out; + + ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem); + if (!ipc_imem->mux) + goto err_out; + + ret = ipc_imem_wwan_channel_init(ipc_imem, mux_cfg.protocol); + if (ret < 0) + goto err_ipc_mux_deinit; - ipc_imem_wwan_channel_init(ipc_imem, mux_cfg.protocol); - if (ipc_imem->mux) - ipc_imem->mux->wwan = ipc_imem->wwan; + ipc_imem->mux->wwan = ipc_imem->wwan; while (ctrl_chl_idx < IPC_MEM_MAX_CHANNELS) { if (!ipc_chnl_cfg_get(&chnl_cfg_port, ctrl_chl_idx)) { @@ -622,6 +630,13 @@ static void ipc_imem_run_state_worker(struct work_struct *instance) /* Complete all memory stores after setting bit */ smp_mb__after_atomic(); + + return; + +err_ipc_mux_deinit: + ipc_mux_deinit(ipc_imem->mux); +err_out: + ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY_LINK_DOWN); } static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq) diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c index 66b90cc4c3460f4d046037c6e51181322720f56e..109cf89304888bc15e045536daca29979bc8a8f2 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c +++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c @@ -77,8 +77,8 @@ out: } /* Initialize wwan channel */ -void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem, - enum ipc_mux_protocol mux_type) +int ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem, + enum ipc_mux_protocol mux_type) { struct ipc_chnl_cfg chnl_cfg = { 0 }; @@ -87,7 +87,7 @@ void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem, /* If modem version is invalid (0xffffffff), do not initialize WWAN. */ if (ipc_imem->cp_version == -1) { dev_err(ipc_imem->dev, "invalid CP version"); - return; + return -EIO; } ipc_chnl_cfg_get(&chnl_cfg, ipc_imem->nr_of_channels); @@ -104,9 +104,13 @@ void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem, /* WWAN registration. */ ipc_imem->wwan = ipc_wwan_init(ipc_imem, ipc_imem->dev); - if (!ipc_imem->wwan) + if (!ipc_imem->wwan) { dev_err(ipc_imem->dev, "failed to register the ipc_wwan interfaces"); + return -ENOMEM; + } + + return 0; } /* Map SKB to DMA for transfer */ diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h index f8afb217d9e2fb8acad63e6b6de8b4f726542db9..026c5bd0f9992f7afeb03667149e4fbbd8dc83cc 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h +++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h @@ -91,9 +91,11 @@ int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem, int if_id, * MUX. * @ipc_imem: Pointer to iosm_imem struct. * @mux_type: Type of mux protocol. + * + * Return: 0 on success and failure value on error */ -void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem, - enum ipc_mux_protocol mux_type); +int ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem, + enum ipc_mux_protocol mux_type); /** * ipc_imem_sys_devlink_open - Open a Flash/CD Channel link to CP diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c index d6b166fc5c0ef5ae86e04ee0f225b583aed0ac7a..bff46f7ca59f3ffa086e2155edcc1990ef214203 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c +++ b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c @@ -626,14 +626,12 @@ static void mux_dl_adb_decode(struct iosm_mux *ipc_mux, if (adth->signature != cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH)) goto adb_decode_err; - if (le16_to_cpu(adth->table_length) < (sizeof(struct mux_adth) - - sizeof(struct mux_adth_dg))) + if (le16_to_cpu(adth->table_length) < sizeof(struct mux_adth)) goto adb_decode_err; /* Calculate the number of datagrams. */ nr_of_dg = (le16_to_cpu(adth->table_length) - - sizeof(struct mux_adth) + - sizeof(struct mux_adth_dg)) / + sizeof(struct mux_adth)) / sizeof(struct mux_adth_dg); /* Is the datagram table empty ? */ @@ -649,7 +647,7 @@ static void mux_dl_adb_decode(struct iosm_mux *ipc_mux, } /* New aggregated datagram table. */ - dg = &adth->dg; + dg = adth->dg; if (mux_dl_process_dg(ipc_mux, adbh, dg, skb, if_id, nr_of_dg) < 0) goto adb_decode_err; @@ -849,7 +847,7 @@ static void ipc_mux_ul_encode_adth(struct iosm_mux *ipc_mux, adth->if_id = i; adth->table_length = cpu_to_le16(adth_dg_size); adth_dg_size -= offsetof(struct mux_adth, dg); - memcpy(&adth->dg, ul_adb->dg[i], adth_dg_size); + memcpy(adth->dg, ul_adb->dg[i], adth_dg_size); ul_adb->if_cnt++; } @@ -1426,14 +1424,13 @@ static int ipc_mux_get_payload_from_adb(struct iosm_mux *ipc_mux, if (adth->signature == cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH)) { nr_of_dg = (le16_to_cpu(adth->table_length) - - sizeof(struct mux_adth) + - sizeof(struct mux_adth_dg)) / + sizeof(struct mux_adth)) / sizeof(struct mux_adth_dg); if (nr_of_dg <= 0) return payload_size; - dg = &adth->dg; + dg = adth->dg; for (i = 0; i < nr_of_dg; i++, dg++) { if (le32_to_cpu(dg->datagram_index) < diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h index 5d4e3b89542cc42c43f7008cbe4e70feb6b65550..f8df88f816c4cc88bfc61af2093d0b11c5318a07 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h +++ b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h @@ -161,7 +161,7 @@ struct mux_adth { u8 opt_ipv4v6; __le32 next_table_index; __le32 reserved2; - struct mux_adth_dg dg; + struct mux_adth_dg dg[]; }; /** diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c index 226fc1703e90f2d56728cfbf73b64c9f5bf49849..91256e005b846f17f869d62ceb6ac645d698c5fc 100644 --- a/drivers/net/wwan/t7xx/t7xx_pci.c +++ b/drivers/net/wwan/t7xx/t7xx_pci.c @@ -45,6 +45,7 @@ #define T7XX_PCI_IREG_BASE 0 #define T7XX_PCI_EREG_BASE 2 +#define T7XX_INIT_TIMEOUT 20 #define PM_SLEEP_DIS_TIMEOUT_MS 20 #define PM_ACK_TIMEOUT_MS 1500 #define PM_AUTOSUSPEND_MS 20000 @@ -96,6 +97,7 @@ static int t7xx_pci_pm_init(struct t7xx_pci_dev *t7xx_dev) spin_lock_init(&t7xx_dev->md_pm_lock); init_completion(&t7xx_dev->sleep_lock_acquire); init_completion(&t7xx_dev->pm_sr_ack); + init_completion(&t7xx_dev->init_done); atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT); device_init_wakeup(&pdev->dev, true); @@ -124,6 +126,7 @@ void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev) pm_runtime_mark_last_busy(&t7xx_dev->pdev->dev); pm_runtime_allow(&t7xx_dev->pdev->dev); pm_runtime_put_noidle(&t7xx_dev->pdev->dev); + complete_all(&t7xx_dev->init_done); } static int t7xx_pci_pm_reinit(struct t7xx_pci_dev *t7xx_dev) @@ -529,6 +532,20 @@ static void t7xx_pci_shutdown(struct pci_dev *pdev) __t7xx_pci_pm_suspend(pdev); } +static int t7xx_pci_pm_prepare(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct t7xx_pci_dev *t7xx_dev; + + t7xx_dev = pci_get_drvdata(pdev); + if (!wait_for_completion_timeout(&t7xx_dev->init_done, T7XX_INIT_TIMEOUT * HZ)) { + dev_warn(dev, "Not ready for system sleep.\n"); + return -ETIMEDOUT; + } + + return 0; +} + static int t7xx_pci_pm_suspend(struct device *dev) { return __t7xx_pci_pm_suspend(to_pci_dev(dev)); @@ -555,6 +572,7 @@ static int t7xx_pci_pm_runtime_resume(struct device *dev) } static const struct dev_pm_ops t7xx_pci_pm_ops = { + .prepare = t7xx_pci_pm_prepare, .suspend = t7xx_pci_pm_suspend, .resume = t7xx_pci_pm_resume, .resume_noirq = t7xx_pci_pm_resume_noirq, diff --git a/drivers/net/wwan/t7xx/t7xx_pci.h b/drivers/net/wwan/t7xx/t7xx_pci.h index 112efa534eaceffb47a8b049d5126eae79c971e3..f08f1ab7446917b0b84f053fe31a0f9902d76f90 100644 --- a/drivers/net/wwan/t7xx/t7xx_pci.h +++ b/drivers/net/wwan/t7xx/t7xx_pci.h @@ -69,6 +69,7 @@ struct t7xx_pci_dev { struct t7xx_modem *md; struct t7xx_ccmni_ctrl *ccmni_ctlb; bool rgu_pci_irq_en; + struct completion init_done; /* Low Power Items */ struct list_head md_pm_entities; diff --git a/drivers/nfc/fdp/fdp.c b/drivers/nfc/fdp/fdp.c index f12f903a9dd134bf327744302205dc203ed92f7f..da3e2dce8e70a69ecefac5b6d379d70f941d0ac1 100644 --- a/drivers/nfc/fdp/fdp.c +++ b/drivers/nfc/fdp/fdp.c @@ -762,3 +762,6 @@ EXPORT_SYMBOL(fdp_nci_remove); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("NFC NCI driver for Intel Fields Peak NFC controller"); MODULE_AUTHOR("Robert Dolca "); + +MODULE_FIRMWARE(FDP_OTP_PATCH_NAME); +MODULE_FIRMWARE(FDP_RAM_PATCH_NAME); diff --git a/drivers/nfc/nfcsim.c b/drivers/nfc/nfcsim.c index 44eeb17ae48d911fbb75b92d0d5d8987d5e5bea6..a55381f80cd6e4d98eaece05340269ab6a7f45ed 100644 --- a/drivers/nfc/nfcsim.c +++ b/drivers/nfc/nfcsim.c @@ -336,10 +336,6 @@ static struct dentry *nfcsim_debugfs_root; static void nfcsim_debugfs_init(void) { nfcsim_debugfs_root = debugfs_create_dir("nfcsim", NULL); - - if (!nfcsim_debugfs_root) - pr_err("Could not create debugfs entry\n"); - } static void nfcsim_debugfs_remove(void) diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c index bc523ca0225486776eba989fab6ed758b4c569d0..5e4f8848dce08e912d5c4d85b84ae70cf55c14e5 100644 --- a/drivers/nvme/host/constants.c +++ b/drivers/nvme/host/constants.c @@ -21,7 +21,7 @@ static const char * const nvme_ops[] = { [nvme_cmd_resv_release] = "Reservation Release", [nvme_cmd_zone_mgmt_send] = "Zone Management Send", [nvme_cmd_zone_mgmt_recv] = "Zone Management Receive", - [nvme_cmd_zone_append] = "Zone Management Append", + [nvme_cmd_zone_append] = "Zone Append", }; static const char * const nvme_admin_ops[] = { diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index ccb6eb1282f82d8b801b38172261be1052119a6a..3ec38e2b91732691ae53ac50eb7fc8a45add5465 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -397,7 +397,16 @@ void nvme_complete_rq(struct request *req) trace_nvme_complete_rq(req); nvme_cleanup_cmd(req); - if (ctrl->kas) + /* + * Completions of long-running commands should not be able to + * defer sending of periodic keep alives, since the controller + * may have completed processing such commands a long time ago + * (arbitrarily close to command submission time). + * req->deadline - req->timeout is the command submission time + * in jiffies. + */ + if (ctrl->kas && + req->deadline - req->timeout >= ctrl->ka_last_check_time) ctrl->comp_seen = true; switch (nvme_decide_disposition(req)) { @@ -1115,7 +1124,7 @@ u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) } EXPORT_SYMBOL_NS_GPL(nvme_passthru_start, NVME_TARGET_PASSTHRU); -void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects, +void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects, struct nvme_command *cmd, int status) { if (effects & NVME_CMD_EFFECTS_CSE_MASK) { @@ -1132,6 +1141,8 @@ void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects, nvme_queue_scan(ctrl); flush_work(&ctrl->scan_work); } + if (ns) + return; switch (cmd->common.opcode) { case nvme_admin_set_features: @@ -1161,9 +1172,25 @@ EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU); * The host should send Keep Alive commands at half of the Keep Alive Timeout * accounting for transport roundtrip times [..]. */ +static unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl) +{ + unsigned long delay = ctrl->kato * HZ / 2; + + /* + * When using Traffic Based Keep Alive, we need to run + * nvme_keep_alive_work at twice the normal frequency, as one + * command completion can postpone sending a keep alive command + * by up to twice the delay between runs. + */ + if (ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) + delay /= 2; + return delay; +} + static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl) { - queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ / 2); + queue_delayed_work(nvme_wq, &ctrl->ka_work, + nvme_keep_alive_work_period(ctrl)); } static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, @@ -1172,6 +1199,20 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, struct nvme_ctrl *ctrl = rq->end_io_data; unsigned long flags; bool startka = false; + unsigned long rtt = jiffies - (rq->deadline - rq->timeout); + unsigned long delay = nvme_keep_alive_work_period(ctrl); + + /* + * Subtract off the keepalive RTT so nvme_keep_alive_work runs + * at the desired frequency. + */ + if (rtt <= delay) { + delay -= rtt; + } else { + dev_warn(ctrl->device, "long keepalive RTT (%u ms)\n", + jiffies_to_msecs(rtt)); + delay = 0; + } blk_mq_free_request(rq); @@ -1182,6 +1223,7 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, return RQ_END_IO_NONE; } + ctrl->ka_last_check_time = jiffies; ctrl->comp_seen = false; spin_lock_irqsave(&ctrl->lock, flags); if (ctrl->state == NVME_CTRL_LIVE || @@ -1189,7 +1231,7 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, startka = true; spin_unlock_irqrestore(&ctrl->lock, flags); if (startka) - nvme_queue_keep_alive_work(ctrl); + queue_delayed_work(nvme_wq, &ctrl->ka_work, delay); return RQ_END_IO_NONE; } @@ -1200,6 +1242,8 @@ static void nvme_keep_alive_work(struct work_struct *work) bool comp_seen = ctrl->comp_seen; struct request *rq; + ctrl->ka_last_check_time = jiffies; + if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) { dev_dbg(ctrl->device, "reschedule traffic based keep-alive timer\n"); @@ -3585,6 +3629,9 @@ static ssize_t nvme_sysfs_delete(struct device *dev, { struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + if (!test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags)) + return -EBUSY; + if (device_remove_file_self(dev, attr)) nvme_delete_ctrl_sync(ctrl); return count; @@ -5045,7 +5092,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl) * that were missed. We identify persistent discovery controllers by * checking that they started once before, hence are reconnecting back. */ - if (test_and_set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) && + if (test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) && nvme_discovery_ctrl(ctrl)) nvme_change_uevent(ctrl, "NVME_EVENT=rediscover"); @@ -5056,6 +5103,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl) } nvme_change_uevent(ctrl, "NVME_EVENT=connected"); + set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags); } EXPORT_SYMBOL_GPL(nvme_start_ctrl); diff --git a/drivers/nvme/host/hwmon.c b/drivers/nvme/host/hwmon.c index 9e6e56c20ec993bfca3ee2eb18d2e371d7225230..316f3e4ca7cc60da613e5f246f9e5a861d60e14f 100644 --- a/drivers/nvme/host/hwmon.c +++ b/drivers/nvme/host/hwmon.c @@ -163,7 +163,9 @@ static umode_t nvme_hwmon_is_visible(const void *_data, case hwmon_temp_max: case hwmon_temp_min: if ((!channel && data->ctrl->wctemp) || - (channel && data->log->temp_sensor[channel - 1])) { + (channel && data->log->temp_sensor[channel - 1] && + !(data->ctrl->quirks & + NVME_QUIRK_NO_SECONDARY_TEMP_THRESH))) { if (data->ctrl->quirks & NVME_QUIRK_NO_TEMP_THRESH_CHANGE) return 0444; diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index 81c5c9e38477470c1134506b78bdd09ceaa36be5..f15e7330b75ac5c310bf05f88cd1275469461097 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -254,7 +254,7 @@ static int nvme_submit_user_cmd(struct request_queue *q, blk_mq_free_request(req); if (effects) - nvme_passthru_end(ctrl, effects, cmd, ret); + nvme_passthru_end(ctrl, ns, effects, cmd, ret); return ret; } diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 9171452e2f6d4e2eed95d0c6ef9ec0cbf05bc7ef..2bc159a318ff0af19a06aa668aeb94bf8167ff1d 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -884,7 +884,6 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head) { if (!head->disk) return; - blk_mark_disk_dead(head->disk); /* make sure all pending bios are cleaned up */ kblockd_schedule_work(&head->requeue_work); flush_work(&head->requeue_work); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index bf46f122e9e1e476c7127186c356c3857c37c30a..8657811f8b8875e21a53732252d7f0738c85d678 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -149,6 +149,11 @@ enum nvme_quirks { * Reports garbage in the namespace identifiers (eui64, nguid, uuid). */ NVME_QUIRK_BOGUS_NID = (1 << 18), + + /* + * No temperature thresholds for channels other than 0 (Composite). + */ + NVME_QUIRK_NO_SECONDARY_TEMP_THRESH = (1 << 19), }; /* @@ -323,6 +328,7 @@ struct nvme_ctrl { struct delayed_work ka_work; struct delayed_work failfast_work; struct nvme_command ka_cmd; + unsigned long ka_last_check_time; struct work_struct fw_act_work; unsigned long events; @@ -1072,7 +1078,7 @@ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode); u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode); int nvme_execute_rq(struct request *rq, bool at_head); -void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects, +void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects, struct nvme_command *cmd, int status); struct nvme_ctrl *nvme_ctrl_from_file(struct file *file); struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 7f25c0fe3a0bf5baf92dcd5f81613ecac75bb593..492f319ebdf37ad838dacbb5237f1a804b243840 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2956,7 +2956,7 @@ static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev, * over a single page. */ dev->ctrl.max_hw_sectors = min_t(u32, - NVME_MAX_KB_SZ << 1, dma_max_mapping_size(&pdev->dev) >> 9); + NVME_MAX_KB_SZ << 1, dma_opt_mapping_size(&pdev->dev) >> 9); dev->ctrl.max_segments = NVME_MAX_SEGS; /* @@ -3402,6 +3402,8 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, + { PCI_DEVICE(0x2646, 0x5013), /* Kingston KC3000, Kingston FURY Renegade */ + .driver_data = NVME_QUIRK_NO_SECONDARY_TEMP_THRESH, }, { PCI_DEVICE(0x2646, 0x5018), /* KINGSTON OM8SFP4xxxxP OS21012 NVMe SSD */ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x2646, 0x5016), /* KINGSTON OM3PGP4xxxxP OS21011 NVMe SSD */ @@ -3422,6 +3424,8 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1e4B, 0x1602), /* MAXIO MAP1602 */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1dbe, 0x5236), /* ADATA XPG GAMMIX S70 */ @@ -3441,6 +3445,10 @@ static const struct pci_device_id nvme_id_table[] = { NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_DEVICE(0x10ec, 0x5763), /* TEAMGROUP T-FORCE CARDEA ZERO Z330 SSD */ .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1e4b, 0x1602), /* HS-SSD-FUTURE 2048G */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x10ec, 0x5765), /* TEAMGROUP MP33 2TB SSD */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061), .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065), diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index 511c980d538df3af57fa9f1d998607393c417008..71a9c1cc57f59c265478b8c816c8e4f5a9b65f61 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -243,7 +243,7 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w) blk_mq_free_request(rq); if (effects) - nvme_passthru_end(ctrl, effects, req->cmd, status); + nvme_passthru_end(ctrl, ns, effects, req->cmd, status); } static enum rq_end_io_ret nvmet_passthru_req_done(struct request *rq, diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index 2e01960f1aeb3e3f7695849bbff6cc4ce67ad4f9..7feb643f13707d34b4012e365c6011f18cb37373 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c @@ -811,6 +811,7 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs) if (!fragment->target) { pr_err("symbols in overlay, but not in live tree\n"); ret = -EINVAL; + of_node_put(node); goto err_out; } diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index bc32662c6bb7fd2de7ffeae661dd53210d6288ac..2d93d0c4f10db8ae9c16223758efe3ec3024017b 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -489,7 +489,10 @@ struct hv_pcibus_device { struct fwnode_handle *fwnode; /* Protocol version negotiated with the host */ enum pci_protocol_version_t protocol_version; + + struct mutex state_lock; enum hv_pcibus_state state; + struct hv_device *hdev; resource_size_t low_mmio_space; resource_size_t high_mmio_space; @@ -545,19 +548,10 @@ struct hv_dr_state { struct hv_pcidev_description func[]; }; -enum hv_pcichild_state { - hv_pcichild_init = 0, - hv_pcichild_requirements, - hv_pcichild_resourced, - hv_pcichild_ejecting, - hv_pcichild_maximum -}; - struct hv_pci_dev { /* List protected by pci_rescan_remove_lock */ struct list_head list_entry; refcount_t refs; - enum hv_pcichild_state state; struct pci_slot *pci_slot; struct hv_pcidev_description desc; bool reported_missing; @@ -635,6 +629,11 @@ static void hv_arch_irq_unmask(struct irq_data *data) pbus = pdev->bus; hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata); int_desc = data->chip_data; + if (!int_desc) { + dev_warn(&hbus->hdev->device, "%s() can not unmask irq %u\n", + __func__, data->irq); + return; + } local_irq_save(flags); @@ -2004,12 +2003,6 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) hv_pci_onchannelcallback(hbus); spin_unlock_irqrestore(&channel->sched_lock, flags); - if (hpdev->state == hv_pcichild_ejecting) { - dev_err_once(&hbus->hdev->device, - "the device is being ejected\n"); - goto enable_tasklet; - } - udelay(100); } @@ -2615,6 +2608,8 @@ static void pci_devices_present_work(struct work_struct *work) if (!dr) return; + mutex_lock(&hbus->state_lock); + /* First, mark all existing children as reported missing. */ spin_lock_irqsave(&hbus->device_list_lock, flags); list_for_each_entry(hpdev, &hbus->children, list_entry) { @@ -2696,6 +2691,8 @@ static void pci_devices_present_work(struct work_struct *work) break; } + mutex_unlock(&hbus->state_lock); + kfree(dr); } @@ -2844,7 +2841,7 @@ static void hv_eject_device_work(struct work_struct *work) hpdev = container_of(work, struct hv_pci_dev, wrk); hbus = hpdev->hbus; - WARN_ON(hpdev->state != hv_pcichild_ejecting); + mutex_lock(&hbus->state_lock); /* * Ejection can come before or after the PCI bus has been set up, so @@ -2882,6 +2879,8 @@ static void hv_eject_device_work(struct work_struct *work) put_pcichild(hpdev); put_pcichild(hpdev); /* hpdev has been freed. Do not use it any more. */ + + mutex_unlock(&hbus->state_lock); } /** @@ -2902,7 +2901,6 @@ static void hv_pci_eject_device(struct hv_pci_dev *hpdev) return; } - hpdev->state = hv_pcichild_ejecting; get_pcichild(hpdev); INIT_WORK(&hpdev->wrk, hv_eject_device_work); queue_work(hbus->wq, &hpdev->wrk); @@ -3331,8 +3329,10 @@ static int hv_pci_enter_d0(struct hv_device *hdev) struct pci_bus_d0_entry *d0_entry; struct hv_pci_compl comp_pkt; struct pci_packet *pkt; + bool retry = true; int ret; +enter_d0_retry: /* * Tell the host that the bus is ready to use, and moved into the * powered-on state. This includes telling the host which region @@ -3359,6 +3359,38 @@ static int hv_pci_enter_d0(struct hv_device *hdev) if (ret) goto exit; + /* + * In certain case (Kdump) the pci device of interest was + * not cleanly shut down and resource is still held on host + * side, the host could return invalid device status. + * We need to explicitly request host to release the resource + * and try to enter D0 again. + */ + if (comp_pkt.completion_status < 0 && retry) { + retry = false; + + dev_err(&hdev->device, "Retrying D0 Entry\n"); + + /* + * Hv_pci_bus_exit() calls hv_send_resource_released() + * to free up resources of its child devices. + * In the kdump kernel we need to set the + * wslot_res_allocated to 255 so it scans all child + * devices to release resources allocated in the + * normal kernel before panic happened. + */ + hbus->wslot_res_allocated = 255; + + ret = hv_pci_bus_exit(hdev, true); + + if (ret == 0) { + kfree(pkt); + goto enter_d0_retry; + } + dev_err(&hdev->device, + "Retrying D0 failed with ret %d\n", ret); + } + if (comp_pkt.completion_status < 0) { dev_err(&hdev->device, "PCI Pass-through VSP failed D0 Entry with status %x\n", @@ -3401,6 +3433,24 @@ static int hv_pci_query_relations(struct hv_device *hdev) if (!ret) ret = wait_for_response(hdev, &comp); + /* + * In the case of fast device addition/removal, it's possible that + * vmbus_sendpacket() or wait_for_response() returns -ENODEV but we + * already got a PCI_BUS_RELATIONS* message from the host and the + * channel callback already scheduled a work to hbus->wq, which can be + * running pci_devices_present_work() -> survey_child_resources() -> + * complete(&hbus->survey_event), even after hv_pci_query_relations() + * exits and the stack variable 'comp' is no longer valid; as a result, + * a hang or a page fault may happen when the complete() calls + * raw_spin_lock_irqsave(). Flush hbus->wq before we exit from + * hv_pci_query_relations() to avoid the issues. Note: if 'ret' is + * -ENODEV, there can't be any more work item scheduled to hbus->wq + * after the flush_workqueue(): see vmbus_onoffer_rescind() -> + * vmbus_reset_channel_cb(), vmbus_rescind_cleanup() -> + * channel->rescind = true. + */ + flush_workqueue(hbus->wq); + return ret; } @@ -3586,7 +3636,6 @@ static int hv_pci_probe(struct hv_device *hdev, struct hv_pcibus_device *hbus; u16 dom_req, dom; char *name; - bool enter_d0_retry = true; int ret; bridge = devm_pci_alloc_host_bridge(&hdev->device, 0); @@ -3598,6 +3647,7 @@ static int hv_pci_probe(struct hv_device *hdev, return -ENOMEM; hbus->bridge = bridge; + mutex_init(&hbus->state_lock); hbus->state = hv_pcibus_init; hbus->wslot_res_allocated = -1; @@ -3703,49 +3753,15 @@ static int hv_pci_probe(struct hv_device *hdev, if (ret) goto free_fwnode; -retry: ret = hv_pci_query_relations(hdev); if (ret) goto free_irq_domain; - ret = hv_pci_enter_d0(hdev); - /* - * In certain case (Kdump) the pci device of interest was - * not cleanly shut down and resource is still held on host - * side, the host could return invalid device status. - * We need to explicitly request host to release the resource - * and try to enter D0 again. - * Since the hv_pci_bus_exit() call releases structures - * of all its child devices, we need to start the retry from - * hv_pci_query_relations() call, requesting host to send - * the synchronous child device relations message before this - * information is needed in hv_send_resources_allocated() - * call later. - */ - if (ret == -EPROTO && enter_d0_retry) { - enter_d0_retry = false; - - dev_err(&hdev->device, "Retrying D0 Entry\n"); - - /* - * Hv_pci_bus_exit() calls hv_send_resources_released() - * to free up resources of its child devices. - * In the kdump kernel we need to set the - * wslot_res_allocated to 255 so it scans all child - * devices to release resources allocated in the - * normal kernel before panic happened. - */ - hbus->wslot_res_allocated = 255; - ret = hv_pci_bus_exit(hdev, true); - - if (ret == 0) - goto retry; + mutex_lock(&hbus->state_lock); - dev_err(&hdev->device, - "Retrying D0 failed with ret %d\n", ret); - } + ret = hv_pci_enter_d0(hdev); if (ret) - goto free_irq_domain; + goto release_state_lock; ret = hv_pci_allocate_bridge_windows(hbus); if (ret) @@ -3763,12 +3779,15 @@ retry: if (ret) goto free_windows; + mutex_unlock(&hbus->state_lock); return 0; free_windows: hv_pci_free_bridge_windows(hbus); exit_d0: (void) hv_pci_bus_exit(hdev, true); +release_state_lock: + mutex_unlock(&hbus->state_lock); free_irq_domain: irq_domain_remove(hbus->irq_domain); free_fwnode: @@ -4018,20 +4037,26 @@ static int hv_pci_resume(struct hv_device *hdev) if (ret) goto out; + mutex_lock(&hbus->state_lock); + ret = hv_pci_enter_d0(hdev); if (ret) - goto out; + goto release_state_lock; ret = hv_send_resources_allocated(hdev); if (ret) - goto out; + goto release_state_lock; prepopulate_bars(hbus); hv_pci_restore_msi_state(hbus); hbus->state = hv_pcibus_installed; + mutex_unlock(&hbus->state_lock); return 0; + +release_state_lock: + mutex_unlock(&hbus->state_lock); out: vmbus_close(hdev->channel); return ret; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index f4e2a88729fd144bfa125a7378153b4cd26fd34c..c525867760bf819c0af9ccf601844b2c37d0ca49 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -6003,8 +6003,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56c1, aspm_l1_acceptable_latency #ifdef CONFIG_PCIE_DPC /* - * Intel Tiger Lake and Alder Lake BIOS has a bug that clears the DPC - * RP PIO Log Size of the integrated Thunderbolt PCIe Root Ports. + * Intel Ice Lake, Tiger Lake and Alder Lake BIOS has a bug that clears + * the DPC RP PIO Log Size of the integrated Thunderbolt PCIe Root + * Ports. */ static void dpc_log_size(struct pci_dev *dev) { @@ -6027,6 +6028,10 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x461f, dpc_log_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x462f, dpc_log_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x463f, dpc_log_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x466e, dpc_log_size); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8a1d, dpc_log_size); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8a1f, dpc_log_size); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8a21, dpc_log_size); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8a23, dpc_log_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a23, dpc_log_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a25, dpc_log_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a27, dpc_log_size); diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c index c98e4039386dbcbdb1de1f3c8848dbf038b0ceb9..93b7edb5f1e7c15fadc945510b3674dfc43374b0 100644 --- a/drivers/perf/arm_pmuv3.c +++ b/drivers/perf/arm_pmuv3.c @@ -677,9 +677,25 @@ static inline u32 armv8pmu_getreset_flags(void) return value; } +static void update_pmuserenr(u64 val) +{ + lockdep_assert_irqs_disabled(); + + /* + * The current PMUSERENR_EL0 value might be the value for the guest. + * If that's the case, have KVM keep tracking of the register value + * for the host EL0 so that KVM can restore it before returning to + * the host EL0. Otherwise, update the register now. + */ + if (kvm_set_pmuserenr(val)) + return; + + write_pmuserenr(val); +} + static void armv8pmu_disable_user_access(void) { - write_pmuserenr(0); + update_pmuserenr(0); } static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu) @@ -695,8 +711,7 @@ static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu) armv8pmu_write_evcntr(i, 0); } - write_pmuserenr(0); - write_pmuserenr(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR); + update_pmuserenr(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR); } static void armv8pmu_enable_event(struct perf_event *event) diff --git a/drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c b/drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c index c14089fa7db495b869556e785a9ff1651419d0a4..cabdddbbabfd7dc251a7027b6e5745dea398d273 100644 --- a/drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c +++ b/drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c @@ -70,7 +70,7 @@ static int phy_g12a_mipi_dphy_analog_power_on(struct phy *phy) HHI_MIPI_CNTL1_BANDGAP); regmap_write(priv->regmap, HHI_MIPI_CNTL2, - FIELD_PREP(HHI_MIPI_CNTL2_DIF_TX_CTL0, 0x459) | + FIELD_PREP(HHI_MIPI_CNTL2_DIF_TX_CTL0, 0x45a) | FIELD_PREP(HHI_MIPI_CNTL2_DIF_TX_CTL1, 0x2680)); reg = DSI_LANE_CLK; diff --git a/drivers/phy/mediatek/phy-mtk-hdmi-mt8195.c b/drivers/phy/mediatek/phy-mtk-hdmi-mt8195.c index caa953780beebb85a07fe2e582ad70fbfec43f55..8aa7251de4a96e48c4a0148950e0961d3ba6d9a0 100644 --- a/drivers/phy/mediatek/phy-mtk-hdmi-mt8195.c +++ b/drivers/phy/mediatek/phy-mtk-hdmi-mt8195.c @@ -237,11 +237,11 @@ static int mtk_hdmi_pll_calc(struct mtk_hdmi_phy *hdmi_phy, struct clk_hw *hw, */ if (tmds_clk < 54 * MEGA) txposdiv = 8; - else if (tmds_clk >= 54 * MEGA && tmds_clk < 148.35 * MEGA) + else if (tmds_clk >= 54 * MEGA && (tmds_clk * 100) < 14835 * MEGA) txposdiv = 4; - else if (tmds_clk >= 148.35 * MEGA && tmds_clk < 296.7 * MEGA) + else if ((tmds_clk * 100) >= 14835 * MEGA && (tmds_clk * 10) < 2967 * MEGA) txposdiv = 2; - else if (tmds_clk >= 296.7 * MEGA && tmds_clk <= 594 * MEGA) + else if ((tmds_clk * 10) >= 2967 * MEGA && tmds_clk <= 594 * MEGA) txposdiv = 1; else return -EINVAL; @@ -324,12 +324,12 @@ static int mtk_hdmi_pll_drv_setting(struct clk_hw *hw) clk_channel_bias = 0x34; /* 20mA */ impedance_en = 0xf; impedance = 0x36; /* 100ohm */ - } else if (pixel_clk >= 74.175 * MEGA && pixel_clk <= 300 * MEGA) { + } else if (((u64)pixel_clk * 1000) >= 74175 * MEGA && pixel_clk <= 300 * MEGA) { data_channel_bias = 0x34; /* 20mA */ clk_channel_bias = 0x2c; /* 16mA */ impedance_en = 0xf; impedance = 0x36; /* 100ohm */ - } else if (pixel_clk >= 27 * MEGA && pixel_clk < 74.175 * MEGA) { + } else if (pixel_clk >= 27 * MEGA && ((u64)pixel_clk * 1000) < 74175 * MEGA) { data_channel_bias = 0x14; /* 10mA */ clk_channel_bias = 0x14; /* 10mA */ impedance_en = 0x0; diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c index 6850e04c329b8e679814890d799e7f83e78176cc..87b17e5877ab8c6d9ed1ab082cda5ba974ab617b 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c @@ -2472,7 +2472,7 @@ static int qmp_combo_com_init(struct qmp_combo *qmp) ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs); if (ret) { dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret); - goto err_unlock; + goto err_decrement_count; } ret = reset_control_bulk_assert(cfg->num_resets, qmp->resets); @@ -2522,7 +2522,8 @@ err_assert_reset: reset_control_bulk_assert(cfg->num_resets, qmp->resets); err_disable_regulators: regulator_bulk_disable(cfg->num_vregs, qmp->vregs); -err_unlock: +err_decrement_count: + qmp->init_count--; mutex_unlock(&qmp->phy_mutex); return ret; diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c index 09824be088c96c0b6354ff0488068b17e573a34b..0c603bc06e0998a95a0853072a10fffba87958ff 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c @@ -379,7 +379,7 @@ static int qmp_pcie_msm8996_com_init(struct qmp_phy *qphy) ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs); if (ret) { dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret); - goto err_unlock; + goto err_decrement_count; } ret = reset_control_bulk_assert(cfg->num_resets, qmp->resets); @@ -409,7 +409,8 @@ err_assert_reset: reset_control_bulk_assert(cfg->num_resets, qmp->resets); err_disable_regulators: regulator_bulk_disable(cfg->num_vregs, qmp->vregs); -err_unlock: +err_decrement_count: + qmp->init_count--; mutex_unlock(&qmp->phy_mutex); return ret; diff --git a/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c b/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c index a590635962140f817e0ee3287d576bbd043ffa3c..6c237f3cc66db8235dd3ae100ce11a487a8bf06c 100644 --- a/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c +++ b/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c @@ -115,11 +115,11 @@ struct phy_override_seq { * * @cfg_ahb_clk: AHB2PHY interface clock * @ref_clk: phy reference clock - * @iface_clk: phy interface clock * @phy_reset: phy reset control * @vregs: regulator supplies bulk data * @phy_initialized: if PHY has been initialized correctly * @mode: contains the current mode the PHY is in + * @update_seq_cfg: tuning parameters for phy init */ struct qcom_snps_hsphy { struct phy *phy; diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg.c b/drivers/pinctrl/meson/pinctrl-meson-axg.c index 7bfecdfba17797620070ca9a8eeee2326837cd09..d249a035c2b9b37bea0f0e48fa5dbc75366cf8be 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-axg.c +++ b/drivers/pinctrl/meson/pinctrl-meson-axg.c @@ -400,6 +400,7 @@ static struct meson_pmx_group meson_axg_periphs_groups[] = { GPIO_GROUP(GPIOA_15), GPIO_GROUP(GPIOA_16), GPIO_GROUP(GPIOA_17), + GPIO_GROUP(GPIOA_18), GPIO_GROUP(GPIOA_19), GPIO_GROUP(GPIOA_20), diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c index c2c9b0d3244cbf2bc0cfad486169a0741d0d931e..be967d797c28e04c6ce726b434e0edb322737f88 100644 --- a/drivers/platform/mellanox/mlxbf-pmc.c +++ b/drivers/platform/mellanox/mlxbf-pmc.c @@ -1348,9 +1348,8 @@ static int mlxbf_pmc_map_counters(struct device *dev) for (i = 0; i < pmc->total_blocks; ++i) { if (strstr(pmc->block_name[i], "tile")) { - ret = sscanf(pmc->block_name[i], "tile%d", &tile_num); - if (ret < 0) - return ret; + if (sscanf(pmc->block_name[i], "tile%d", &tile_num) != 1) + return -EINVAL; if (tile_num >= pmc->tile_count) continue; diff --git a/drivers/platform/surface/aggregator/controller.c b/drivers/platform/surface/aggregator/controller.c index 535581c0471c509cc6836aedc66b32a345b0561d..7fc602e01487d42542338f4c767cac28b34fcd31 100644 --- a/drivers/platform/surface/aggregator/controller.c +++ b/drivers/platform/surface/aggregator/controller.c @@ -825,7 +825,7 @@ static int ssam_cplt_init(struct ssam_cplt *cplt, struct device *dev) cplt->dev = dev; - cplt->wq = create_workqueue(SSAM_CPLT_WQ_NAME); + cplt->wq = alloc_workqueue(SSAM_CPLT_WQ_NAME, WQ_UNBOUND | WQ_MEM_RECLAIM, 0); if (!cplt->wq) return -ENOMEM; diff --git a/drivers/platform/surface/surface_aggregator_tabletsw.c b/drivers/platform/surface/surface_aggregator_tabletsw.c index 8f52b62d1c19567b041a9558d92337fae2631474..c0a1a5869246edde0e5e65f1e1b75a019fe1ced0 100644 --- a/drivers/platform/surface/surface_aggregator_tabletsw.c +++ b/drivers/platform/surface/surface_aggregator_tabletsw.c @@ -210,6 +210,7 @@ enum ssam_kip_cover_state { SSAM_KIP_COVER_STATE_LAPTOP = 0x03, SSAM_KIP_COVER_STATE_FOLDED_CANVAS = 0x04, SSAM_KIP_COVER_STATE_FOLDED_BACK = 0x05, + SSAM_KIP_COVER_STATE_BOOK = 0x06, }; static const char *ssam_kip_cover_state_name(struct ssam_tablet_sw *sw, @@ -231,6 +232,9 @@ static const char *ssam_kip_cover_state_name(struct ssam_tablet_sw *sw, case SSAM_KIP_COVER_STATE_FOLDED_BACK: return "folded-back"; + case SSAM_KIP_COVER_STATE_BOOK: + return "book"; + default: dev_warn(&sw->sdev->dev, "unknown KIP cover state: %u\n", state->state); return ""; @@ -244,6 +248,7 @@ static bool ssam_kip_cover_state_is_tablet_mode(struct ssam_tablet_sw *sw, case SSAM_KIP_COVER_STATE_DISCONNECTED: case SSAM_KIP_COVER_STATE_FOLDED_CANVAS: case SSAM_KIP_COVER_STATE_FOLDED_BACK: + case SSAM_KIP_COVER_STATE_BOOK: return true; case SSAM_KIP_COVER_STATE_CLOSED: @@ -335,6 +340,7 @@ enum ssam_pos_state_cover { SSAM_POS_COVER_LAPTOP = 0x03, SSAM_POS_COVER_FOLDED_CANVAS = 0x04, SSAM_POS_COVER_FOLDED_BACK = 0x05, + SSAM_POS_COVER_BOOK = 0x06, }; enum ssam_pos_state_sls { @@ -367,6 +373,9 @@ static const char *ssam_pos_state_name_cover(struct ssam_tablet_sw *sw, u32 stat case SSAM_POS_COVER_FOLDED_BACK: return "folded-back"; + case SSAM_POS_COVER_BOOK: + return "book"; + default: dev_warn(&sw->sdev->dev, "unknown device posture for type-cover: %u\n", state); return ""; @@ -416,6 +425,7 @@ static bool ssam_pos_state_is_tablet_mode_cover(struct ssam_tablet_sw *sw, u32 s case SSAM_POS_COVER_DISCONNECTED: case SSAM_POS_COVER_FOLDED_CANVAS: case SSAM_POS_COVER_FOLDED_BACK: + case SSAM_POS_COVER_BOOK: return true; case SSAM_POS_COVER_CLOSED: diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c index d5bb775dadcf20328294da3cea153075c6456138..7780705917b76f006cbdd3b678f43c165b9e63c7 100644 --- a/drivers/platform/x86/amd/pmf/core.c +++ b/drivers/platform/x86/amd/pmf/core.c @@ -245,24 +245,29 @@ static const struct pci_device_id pmf_pci_ids[] = { { } }; -int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev) +static void amd_pmf_set_dram_addr(struct amd_pmf_dev *dev) { u64 phys_addr; u32 hi, low; - INIT_DELAYED_WORK(&dev->work_buffer, amd_pmf_get_metrics); + phys_addr = virt_to_phys(dev->buf); + hi = phys_addr >> 32; + low = phys_addr & GENMASK(31, 0); + + amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, 0, hi, NULL); + amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, 0, low, NULL); +} +int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev) +{ /* Get Metrics Table Address */ dev->buf = kzalloc(sizeof(dev->m_table), GFP_KERNEL); if (!dev->buf) return -ENOMEM; - phys_addr = virt_to_phys(dev->buf); - hi = phys_addr >> 32; - low = phys_addr & GENMASK(31, 0); + INIT_DELAYED_WORK(&dev->work_buffer, amd_pmf_get_metrics); - amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, 0, hi, NULL); - amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, 0, low, NULL); + amd_pmf_set_dram_addr(dev); /* * Start collecting the metrics data after a small delay @@ -273,6 +278,18 @@ int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev) return 0; } +static int amd_pmf_resume_handler(struct device *dev) +{ + struct amd_pmf_dev *pdev = dev_get_drvdata(dev); + + if (pdev->buf) + amd_pmf_set_dram_addr(pdev); + + return 0; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(amd_pmf_pm, NULL, amd_pmf_resume_handler); + static void amd_pmf_init_features(struct amd_pmf_dev *dev) { int ret; @@ -280,6 +297,8 @@ static void amd_pmf_init_features(struct amd_pmf_dev *dev) /* Enable Static Slider */ if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) { amd_pmf_init_sps(dev); + dev->pwr_src_notifier.notifier_call = amd_pmf_pwr_src_notify_call; + power_supply_reg_notifier(&dev->pwr_src_notifier); dev_dbg(dev->dev, "SPS enabled and Platform Profiles registered\n"); } @@ -298,8 +317,10 @@ static void amd_pmf_init_features(struct amd_pmf_dev *dev) static void amd_pmf_deinit_features(struct amd_pmf_dev *dev) { - if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) + if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) { + power_supply_unreg_notifier(&dev->pwr_src_notifier); amd_pmf_deinit_sps(dev); + } if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) { amd_pmf_deinit_auto_mode(dev); @@ -382,9 +403,6 @@ static int amd_pmf_probe(struct platform_device *pdev) apmf_install_handler(dev); amd_pmf_dbgfs_register(dev); - dev->pwr_src_notifier.notifier_call = amd_pmf_pwr_src_notify_call; - power_supply_reg_notifier(&dev->pwr_src_notifier); - dev_info(dev->dev, "registered PMF device successfully\n"); return 0; @@ -394,7 +412,6 @@ static void amd_pmf_remove(struct platform_device *pdev) { struct amd_pmf_dev *dev = platform_get_drvdata(pdev); - power_supply_unreg_notifier(&dev->pwr_src_notifier); amd_pmf_deinit_features(dev); apmf_acpi_deinit(dev); amd_pmf_dbgfs_unregister(dev); @@ -413,6 +430,7 @@ static struct platform_driver amd_pmf_driver = { .name = "amd-pmf", .acpi_match_table = amd_pmf_acpi_ids, .dev_groups = amd_pmf_driver_groups, + .pm = pm_sleep_ptr(&amd_pmf_pm), }, .probe = amd_pmf_probe, .remove_new = amd_pmf_remove, diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index e2c9a68d12df9cc00ce3deda7964fb12bfd23a0f..fdf7da06af3067495fe7132e725a52542010ff8d 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -555,6 +555,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = { { KE_KEY, 0x71, { KEY_F13 } }, /* General-purpose button */ { KE_IGNORE, 0x79, }, /* Charger type dectection notification */ { KE_KEY, 0x7a, { KEY_ALS_TOGGLE } }, /* Ambient Light Sensor Toggle */ + { KE_IGNORE, 0x7B, }, /* Charger connect/disconnect notification */ { KE_KEY, 0x7c, { KEY_MICMUTE } }, { KE_KEY, 0x7D, { KEY_BLUETOOTH } }, /* Bluetooth Enable */ { KE_KEY, 0x7E, { KEY_BLUETOOTH } }, /* Bluetooth Disable */ @@ -584,6 +585,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = { { KE_KEY, 0xAE, { KEY_FN_F5 } }, /* Fn+F5 fan mode on 2020+ */ { KE_KEY, 0xB3, { KEY_PROG4 } }, /* AURA */ { KE_KEY, 0xB5, { KEY_CALC } }, + { KE_IGNORE, 0xC0, }, /* External display connect/disconnect notification */ { KE_KEY, 0xC4, { KEY_KBDILLUMUP } }, { KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } }, { KE_IGNORE, 0xC6, }, /* Ambient Light Sensor notification */ diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c index 61dffb4c8a1dcd07c42623b78b90d7eda8dba884..e6ae8265f3a37eab17d9698a78363fbadf562265 100644 --- a/drivers/platform/x86/intel/ifs/load.c +++ b/drivers/platform/x86/intel/ifs/load.c @@ -208,7 +208,7 @@ static int scan_chunks_sanity_check(struct device *dev) continue; reinit_completion(&ifs_done); local_work.dev = dev; - INIT_WORK(&local_work.w, copy_hashes_authenticate_chunks); + INIT_WORK_ONSTACK(&local_work.w, copy_hashes_authenticate_chunks); schedule_work_on(cpu, &local_work.w); wait_for_completion(&ifs_done); if (ifsd->loading_error) { diff --git a/drivers/platform/x86/intel/int3472/clk_and_regulator.c b/drivers/platform/x86/intel/int3472/clk_and_regulator.c index 1086c3d8349454f98ade335daf046bafc7d5116f..399f0623ca1b5ca817db2a30e5e4b425ef37cbed 100644 --- a/drivers/platform/x86/intel/int3472/clk_and_regulator.c +++ b/drivers/platform/x86/intel/int3472/clk_and_regulator.c @@ -101,9 +101,11 @@ int skl_int3472_register_clock(struct int3472_discrete_device *int3472, int3472->clock.ena_gpio = acpi_get_and_request_gpiod(path, agpio->pin_table[0], "int3472,clk-enable"); - if (IS_ERR(int3472->clock.ena_gpio)) - return dev_err_probe(int3472->dev, PTR_ERR(int3472->clock.ena_gpio), - "getting clk-enable GPIO\n"); + if (IS_ERR(int3472->clock.ena_gpio)) { + ret = PTR_ERR(int3472->clock.ena_gpio); + int3472->clock.ena_gpio = NULL; + return dev_err_probe(int3472->dev, ret, "getting clk-enable GPIO\n"); + } if (polarity == GPIO_ACTIVE_LOW) gpiod_toggle_active_low(int3472->clock.ena_gpio); @@ -199,8 +201,9 @@ int skl_int3472_register_regulator(struct int3472_discrete_device *int3472, int3472->regulator.gpio = acpi_get_and_request_gpiod(path, agpio->pin_table[0], "int3472,regulator"); if (IS_ERR(int3472->regulator.gpio)) { - dev_err(int3472->dev, "Failed to get regulator GPIO line\n"); - return PTR_ERR(int3472->regulator.gpio); + ret = PTR_ERR(int3472->regulator.gpio); + int3472->regulator.gpio = NULL; + return dev_err_probe(int3472->dev, ret, "getting regulator GPIO\n"); } /* Ensure the pin is in output mode and non-active state */ diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c index e0572a29212e850c28313bcca2822ca0097ba3c1..02fe360a59c7cf427c7293f5a0f3d89c78fe92ae 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c @@ -304,14 +304,13 @@ struct isst_if_pkg_info { static struct isst_if_cpu_info *isst_cpu_info; static struct isst_if_pkg_info *isst_pkg_info; -#define ISST_MAX_PCI_DOMAINS 8 - static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn) { struct pci_dev *matched_pci_dev = NULL; struct pci_dev *pci_dev = NULL; + struct pci_dev *_pci_dev = NULL; int no_matches = 0, pkg_id; - int i, bus_number; + int bus_number; if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER || cpu < 0 || cpu >= nr_cpu_ids || cpu >= num_possible_cpus()) @@ -323,12 +322,11 @@ static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn if (bus_number < 0) return NULL; - for (i = 0; i < ISST_MAX_PCI_DOMAINS; ++i) { - struct pci_dev *_pci_dev; + for_each_pci_dev(_pci_dev) { int node; - _pci_dev = pci_get_domain_bus_and_slot(i, bus_number, PCI_DEVFN(dev, fn)); - if (!_pci_dev) + if (_pci_dev->bus->number != bus_number || + _pci_dev->devfn != PCI_DEVFN(dev, fn)) continue; ++no_matches; diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c index 307ee6f71042e953f26a8f9c552aef63e10a6a13..6f83e99d2eb72e34b5666a76ecff1cf7ffd367ab 100644 --- a/drivers/power/supply/ab8500_btemp.c +++ b/drivers/power/supply/ab8500_btemp.c @@ -624,10 +624,8 @@ static int ab8500_btemp_get_ext_psy_data(struct device *dev, void *data) */ static void ab8500_btemp_external_power_changed(struct power_supply *psy) { - struct ab8500_btemp *di = power_supply_get_drvdata(psy); - - class_for_each_device(power_supply_class, NULL, - di->btemp_psy, ab8500_btemp_get_ext_psy_data); + class_for_each_device(power_supply_class, NULL, psy, + ab8500_btemp_get_ext_psy_data); } /* ab8500 btemp driver interrupts and their respective isr */ diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c index 41a7bff9ac3765639a463b24aa94da8ef105dc51..53560fbb6dcd34bb08db8543ce9eb7e338afdb40 100644 --- a/drivers/power/supply/ab8500_fg.c +++ b/drivers/power/supply/ab8500_fg.c @@ -2407,10 +2407,8 @@ out: */ static void ab8500_fg_external_power_changed(struct power_supply *psy) { - struct ab8500_fg *di = power_supply_get_drvdata(psy); - - class_for_each_device(power_supply_class, NULL, - di->fg_psy, ab8500_fg_get_ext_psy_data); + class_for_each_device(power_supply_class, NULL, psy, + ab8500_fg_get_ext_psy_data); } /** diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c index 05f41317846296b067f41d050abc66d26cf09ff0..3be6f3b10ea4286e218f876798a0c7f0b99e4aa0 100644 --- a/drivers/power/supply/axp288_fuel_gauge.c +++ b/drivers/power/supply/axp288_fuel_gauge.c @@ -507,7 +507,7 @@ static void fuel_gauge_external_power_changed(struct power_supply *psy) mutex_lock(&info->lock); info->valid = 0; /* Force updating of the cached registers */ mutex_unlock(&info->lock); - power_supply_changed(info->bat); + power_supply_changed(psy); } static struct power_supply_desc fuel_gauge_desc = { diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c index de67b985f0a913a355ea3c27e3897d7373b78db2..dc33f00fcc0682a3c69ead86186df4219a1cd879 100644 --- a/drivers/power/supply/bq24190_charger.c +++ b/drivers/power/supply/bq24190_charger.c @@ -1262,6 +1262,7 @@ static void bq24190_input_current_limit_work(struct work_struct *work) bq24190_charger_set_property(bdi->charger, POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT, &val); + power_supply_changed(bdi->charger); } /* Sync the input-current-limit with our parent supply (if we have one) */ diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c index 22cde35eb144c612edc491307c06f5dddad9e3ab..f8636cf86505a7749eb7835eb422eda819137cf6 100644 --- a/drivers/power/supply/bq25890_charger.c +++ b/drivers/power/supply/bq25890_charger.c @@ -750,7 +750,7 @@ static void bq25890_charger_external_power_changed(struct power_supply *psy) if (bq->chip_version != BQ25892) return; - ret = power_supply_get_property_from_supplier(bq->charger, + ret = power_supply_get_property_from_supplier(psy, POWER_SUPPLY_PROP_USB_TYPE, &val); if (ret) @@ -775,6 +775,7 @@ static void bq25890_charger_external_power_changed(struct power_supply *psy) } bq25890_field_write(bq, F_IINLIM, input_current_limit); + power_supply_changed(psy); } static int bq25890_get_chip_state(struct bq25890_device *bq, @@ -1106,6 +1107,8 @@ static void bq25890_pump_express_work(struct work_struct *data) dev_info(bq->dev, "Hi-voltage charging requested, input voltage is %d mV\n", voltage); + power_supply_changed(bq->charger); + return; error_print: bq25890_field_write(bq, F_PUMPX_EN, 0); diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c index 5ff6f44fd47b22e709813fbfdb3eedd7a4320aa5..4296600e8912a3988c45286f58420ffabb547345 100644 --- a/drivers/power/supply/bq27xxx_battery.c +++ b/drivers/power/supply/bq27xxx_battery.c @@ -1083,10 +1083,8 @@ static int poll_interval_param_set(const char *val, const struct kernel_param *k return ret; mutex_lock(&bq27xxx_list_lock); - list_for_each_entry(di, &bq27xxx_battery_devices, list) { - cancel_delayed_work_sync(&di->work); - schedule_delayed_work(&di->work, 0); - } + list_for_each_entry(di, &bq27xxx_battery_devices, list) + mod_delayed_work(system_wq, &di->work, 0); mutex_unlock(&bq27xxx_list_lock); return ret; @@ -1761,60 +1759,6 @@ static int bq27xxx_battery_read_health(struct bq27xxx_device_info *di) return POWER_SUPPLY_HEALTH_GOOD; } -void bq27xxx_battery_update(struct bq27xxx_device_info *di) -{ - struct bq27xxx_reg_cache cache = {0, }; - bool has_singe_flag = di->opts & BQ27XXX_O_ZERO; - - cache.flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag); - if ((cache.flags & 0xff) == 0xff) - cache.flags = -1; /* read error */ - if (cache.flags >= 0) { - cache.temperature = bq27xxx_battery_read_temperature(di); - if (di->regs[BQ27XXX_REG_TTE] != INVALID_REG_ADDR) - cache.time_to_empty = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTE); - if (di->regs[BQ27XXX_REG_TTECP] != INVALID_REG_ADDR) - cache.time_to_empty_avg = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTECP); - if (di->regs[BQ27XXX_REG_TTF] != INVALID_REG_ADDR) - cache.time_to_full = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTF); - - cache.charge_full = bq27xxx_battery_read_fcc(di); - cache.capacity = bq27xxx_battery_read_soc(di); - if (di->regs[BQ27XXX_REG_AE] != INVALID_REG_ADDR) - cache.energy = bq27xxx_battery_read_energy(di); - di->cache.flags = cache.flags; - cache.health = bq27xxx_battery_read_health(di); - if (di->regs[BQ27XXX_REG_CYCT] != INVALID_REG_ADDR) - cache.cycle_count = bq27xxx_battery_read_cyct(di); - - /* We only have to read charge design full once */ - if (di->charge_design_full <= 0) - di->charge_design_full = bq27xxx_battery_read_dcap(di); - } - - if ((di->cache.capacity != cache.capacity) || - (di->cache.flags != cache.flags)) - power_supply_changed(di->bat); - - if (memcmp(&di->cache, &cache, sizeof(cache)) != 0) - di->cache = cache; - - di->last_update = jiffies; -} -EXPORT_SYMBOL_GPL(bq27xxx_battery_update); - -static void bq27xxx_battery_poll(struct work_struct *work) -{ - struct bq27xxx_device_info *di = - container_of(work, struct bq27xxx_device_info, - work.work); - - bq27xxx_battery_update(di); - - if (poll_interval > 0) - schedule_delayed_work(&di->work, poll_interval * HZ); -} - static bool bq27xxx_battery_is_full(struct bq27xxx_device_info *di, int flags) { if (di->opts & BQ27XXX_O_ZERO) @@ -1833,7 +1777,8 @@ static bool bq27xxx_battery_is_full(struct bq27xxx_device_info *di, int flags) static int bq27xxx_battery_current_and_status( struct bq27xxx_device_info *di, union power_supply_propval *val_curr, - union power_supply_propval *val_status) + union power_supply_propval *val_status, + struct bq27xxx_reg_cache *cache) { bool single_flags = (di->opts & BQ27XXX_O_ZERO); int curr; @@ -1845,10 +1790,14 @@ static int bq27xxx_battery_current_and_status( return curr; } - flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, single_flags); - if (flags < 0) { - dev_err(di->dev, "error reading flags\n"); - return flags; + if (cache) { + flags = cache->flags; + } else { + flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, single_flags); + if (flags < 0) { + dev_err(di->dev, "error reading flags\n"); + return flags; + } } if (di->opts & BQ27XXX_O_ZERO) { @@ -1883,6 +1832,78 @@ static int bq27xxx_battery_current_and_status( return 0; } +static void bq27xxx_battery_update_unlocked(struct bq27xxx_device_info *di) +{ + union power_supply_propval status = di->last_status; + struct bq27xxx_reg_cache cache = {0, }; + bool has_singe_flag = di->opts & BQ27XXX_O_ZERO; + + cache.flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag); + if ((cache.flags & 0xff) == 0xff) + cache.flags = -1; /* read error */ + if (cache.flags >= 0) { + cache.temperature = bq27xxx_battery_read_temperature(di); + if (di->regs[BQ27XXX_REG_TTE] != INVALID_REG_ADDR) + cache.time_to_empty = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTE); + if (di->regs[BQ27XXX_REG_TTECP] != INVALID_REG_ADDR) + cache.time_to_empty_avg = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTECP); + if (di->regs[BQ27XXX_REG_TTF] != INVALID_REG_ADDR) + cache.time_to_full = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTF); + + cache.charge_full = bq27xxx_battery_read_fcc(di); + cache.capacity = bq27xxx_battery_read_soc(di); + if (di->regs[BQ27XXX_REG_AE] != INVALID_REG_ADDR) + cache.energy = bq27xxx_battery_read_energy(di); + di->cache.flags = cache.flags; + cache.health = bq27xxx_battery_read_health(di); + if (di->regs[BQ27XXX_REG_CYCT] != INVALID_REG_ADDR) + cache.cycle_count = bq27xxx_battery_read_cyct(di); + + /* + * On gauges with signed current reporting the current must be + * checked to detect charging <-> discharging status changes. + */ + if (!(di->opts & BQ27XXX_O_ZERO)) + bq27xxx_battery_current_and_status(di, NULL, &status, &cache); + + /* We only have to read charge design full once */ + if (di->charge_design_full <= 0) + di->charge_design_full = bq27xxx_battery_read_dcap(di); + } + + if ((di->cache.capacity != cache.capacity) || + (di->cache.flags != cache.flags) || + (di->last_status.intval != status.intval)) { + di->last_status.intval = status.intval; + power_supply_changed(di->bat); + } + + if (memcmp(&di->cache, &cache, sizeof(cache)) != 0) + di->cache = cache; + + di->last_update = jiffies; + + if (!di->removed && poll_interval > 0) + mod_delayed_work(system_wq, &di->work, poll_interval * HZ); +} + +void bq27xxx_battery_update(struct bq27xxx_device_info *di) +{ + mutex_lock(&di->lock); + bq27xxx_battery_update_unlocked(di); + mutex_unlock(&di->lock); +} +EXPORT_SYMBOL_GPL(bq27xxx_battery_update); + +static void bq27xxx_battery_poll(struct work_struct *work) +{ + struct bq27xxx_device_info *di = + container_of(work, struct bq27xxx_device_info, + work.work); + + bq27xxx_battery_update(di); +} + /* * Get the average power in µW * Return < 0 if something fails. @@ -1985,10 +2006,8 @@ static int bq27xxx_battery_get_property(struct power_supply *psy, struct bq27xxx_device_info *di = power_supply_get_drvdata(psy); mutex_lock(&di->lock); - if (time_is_before_jiffies(di->last_update + 5 * HZ)) { - cancel_delayed_work_sync(&di->work); - bq27xxx_battery_poll(&di->work.work); - } + if (time_is_before_jiffies(di->last_update + 5 * HZ)) + bq27xxx_battery_update_unlocked(di); mutex_unlock(&di->lock); if (psp != POWER_SUPPLY_PROP_PRESENT && di->cache.flags < 0) @@ -1996,7 +2015,7 @@ static int bq27xxx_battery_get_property(struct power_supply *psy, switch (psp) { case POWER_SUPPLY_PROP_STATUS: - ret = bq27xxx_battery_current_and_status(di, NULL, val); + ret = bq27xxx_battery_current_and_status(di, NULL, val, NULL); break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: ret = bq27xxx_battery_voltage(di, val); @@ -2005,7 +2024,7 @@ static int bq27xxx_battery_get_property(struct power_supply *psy, val->intval = di->cache.flags < 0 ? 0 : 1; break; case POWER_SUPPLY_PROP_CURRENT_NOW: - ret = bq27xxx_battery_current_and_status(di, val, NULL); + ret = bq27xxx_battery_current_and_status(di, val, NULL, NULL); break; case POWER_SUPPLY_PROP_CAPACITY: ret = bq27xxx_simple_value(di->cache.capacity, val); @@ -2078,8 +2097,8 @@ static void bq27xxx_external_power_changed(struct power_supply *psy) { struct bq27xxx_device_info *di = power_supply_get_drvdata(psy); - cancel_delayed_work_sync(&di->work); - schedule_delayed_work(&di->work, 0); + /* After charger plug in/out wait 0.5s for things to stabilize */ + mod_delayed_work(system_wq, &di->work, HZ / 2); } int bq27xxx_battery_setup(struct bq27xxx_device_info *di) @@ -2127,22 +2146,18 @@ EXPORT_SYMBOL_GPL(bq27xxx_battery_setup); void bq27xxx_battery_teardown(struct bq27xxx_device_info *di) { - /* - * power_supply_unregister call bq27xxx_battery_get_property which - * call bq27xxx_battery_poll. - * Make sure that bq27xxx_battery_poll will not call - * schedule_delayed_work again after unregister (which cause OOPS). - */ - poll_interval = 0; - - cancel_delayed_work_sync(&di->work); - - power_supply_unregister(di->bat); - mutex_lock(&bq27xxx_list_lock); list_del(&di->list); mutex_unlock(&bq27xxx_list_lock); + /* Set removed to avoid bq27xxx_battery_update() re-queuing the work */ + mutex_lock(&di->lock); + di->removed = true; + mutex_unlock(&di->lock); + + cancel_delayed_work_sync(&di->work); + + power_supply_unregister(di->bat); mutex_destroy(&di->lock); } EXPORT_SYMBOL_GPL(bq27xxx_battery_teardown); diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c index f8768997333bc59412d040cee7427efad086a7f2..6d3c748763390675f759388cc1ceef3d73afee02 100644 --- a/drivers/power/supply/bq27xxx_battery_i2c.c +++ b/drivers/power/supply/bq27xxx_battery_i2c.c @@ -179,7 +179,7 @@ static int bq27xxx_battery_i2c_probe(struct i2c_client *client) i2c_set_clientdata(client, di); if (client->irq) { - ret = devm_request_threaded_irq(&client->dev, client->irq, + ret = request_threaded_irq(client->irq, NULL, bq27xxx_battery_irq_handler_thread, IRQF_ONESHOT, di->name, di); @@ -209,6 +209,7 @@ static void bq27xxx_battery_i2c_remove(struct i2c_client *client) { struct bq27xxx_device_info *di = i2c_get_clientdata(client); + free_irq(client->irq, di); bq27xxx_battery_teardown(di); mutex_lock(&battery_mutex); diff --git a/drivers/power/supply/mt6360_charger.c b/drivers/power/supply/mt6360_charger.c index 92e48e3a485364dd154e8489398488494215874c..1305cba61edd4b957313d7d86fb05158975b3086 100644 --- a/drivers/power/supply/mt6360_charger.c +++ b/drivers/power/supply/mt6360_charger.c @@ -796,7 +796,9 @@ static int mt6360_charger_probe(struct platform_device *pdev) mci->vinovp = 6500000; mutex_init(&mci->chgdet_lock); platform_set_drvdata(pdev, mci); - devm_work_autocancel(&pdev->dev, &mci->chrdet_work, mt6360_chrdet_work); + ret = devm_work_autocancel(&pdev->dev, &mci->chrdet_work, mt6360_chrdet_work); + if (ret) + return dev_err_probe(&pdev->dev, ret, "Failed to set delayed work\n"); ret = device_property_read_u32(&pdev->dev, "richtek,vinovp-microvolt", &mci->vinovp); if (ret) diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c index ab986dbace16a309e8598d802095efab4ae4d3a9..3791aec69ddc675404ccc5974710a01ca49be9ad 100644 --- a/drivers/power/supply/power_supply_core.c +++ b/drivers/power/supply/power_supply_core.c @@ -348,6 +348,10 @@ static int __power_supply_is_system_supplied(struct device *dev, void *data) struct power_supply *psy = dev_get_drvdata(dev); unsigned int *count = data; + if (!psy->desc->get_property(psy, POWER_SUPPLY_PROP_SCOPE, &ret)) + if (ret.intval == POWER_SUPPLY_SCOPE_DEVICE) + return 0; + (*count)++; if (psy->desc->type != POWER_SUPPLY_TYPE_BATTERY) if (!psy->desc->get_property(psy, POWER_SUPPLY_PROP_ONLINE, @@ -366,8 +370,8 @@ int power_supply_is_system_supplied(void) __power_supply_is_system_supplied); /* - * If no power class device was found at all, most probably we are - * running on a desktop system, so assume we are on mains power. + * If no system scope power class device was found at all, most probably we + * are running on a desktop system, so assume we are on mains power. */ if (count == 0) return 1; @@ -573,7 +577,7 @@ int power_supply_get_battery_info(struct power_supply *psy, struct power_supply_battery_info *info; struct device_node *battery_np = NULL; struct fwnode_reference_args args; - struct fwnode_handle *fwnode; + struct fwnode_handle *fwnode = NULL; const char *value; int err, len, index; const __be32 *list; @@ -585,7 +589,7 @@ int power_supply_get_battery_info(struct power_supply *psy, return -ENODEV; fwnode = fwnode_handle_get(of_fwnode_handle(battery_np)); - } else { + } else if (psy->dev.parent) { err = fwnode_property_get_reference_args( dev_fwnode(psy->dev.parent), "monitored-battery", NULL, 0, 0, &args); @@ -595,6 +599,9 @@ int power_supply_get_battery_info(struct power_supply *psy, fwnode = args.fwnode; } + if (!fwnode) + return -ENOENT; + err = fwnode_property_read_string(fwnode, "compatible", &value); if (err) goto out_put_node; diff --git a/drivers/power/supply/power_supply_leds.c b/drivers/power/supply/power_supply_leds.c index 702bf83f6e6d256e7012c9657b750ee3f51d2ca4..0674483279d77c2b7bd1e8e687839332074d1ca8 100644 --- a/drivers/power/supply/power_supply_leds.c +++ b/drivers/power/supply/power_supply_leds.c @@ -35,8 +35,9 @@ static void power_supply_update_bat_leds(struct power_supply *psy) led_trigger_event(psy->charging_full_trig, LED_FULL); led_trigger_event(psy->charging_trig, LED_OFF); led_trigger_event(psy->full_trig, LED_FULL); - led_trigger_event(psy->charging_blink_full_solid_trig, - LED_FULL); + /* Going from blink to LED on requires a LED_OFF event to stop blink */ + led_trigger_event(psy->charging_blink_full_solid_trig, LED_OFF); + led_trigger_event(psy->charging_blink_full_solid_trig, LED_FULL); break; case POWER_SUPPLY_STATUS_CHARGING: led_trigger_event(psy->charging_full_trig, LED_FULL); diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index ba3b125cd66e432631d6170b048bc089870aedd7..06e5b6b0e255ce8bf159c2f5f0e012916fe12551 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -286,7 +286,8 @@ static ssize_t power_supply_show_property(struct device *dev, if (ret < 0) { if (ret == -ENODATA) - dev_dbg(dev, "driver has no data for `%s' property\n", + dev_dbg_ratelimited(dev, + "driver has no data for `%s' property\n", attr->attr.name); else if (ret != -ENODEV && ret != -EAGAIN) dev_err_ratelimited(dev, diff --git a/drivers/power/supply/rt9467-charger.c b/drivers/power/supply/rt9467-charger.c index 73f744a3155d4ce55011d26310173b6eb69803cb..ea33693b69779ac0928d112ec8ec5d45dc5a90e3 100644 --- a/drivers/power/supply/rt9467-charger.c +++ b/drivers/power/supply/rt9467-charger.c @@ -1023,7 +1023,7 @@ static int rt9467_request_interrupt(struct rt9467_chg_data *data) for (i = 0; i < num_chg_irqs; i++) { virq = regmap_irq_get_virq(data->irq_chip_data, chg_irqs[i].hwirq); if (virq <= 0) - return dev_err_probe(dev, virq, "Failed to get (%s) irq\n", + return dev_err_probe(dev, -EINVAL, "Failed to get (%s) irq\n", chg_irqs[i].name); ret = devm_request_threaded_irq(dev, virq, NULL, chg_irqs[i].handler, diff --git a/drivers/power/supply/sbs-charger.c b/drivers/power/supply/sbs-charger.c index 75ebcbf0a7883750b009ef21a6bd10692f138b1a..a14e89ac0369cae0a0990775eb41fec84d537c4b 100644 --- a/drivers/power/supply/sbs-charger.c +++ b/drivers/power/supply/sbs-charger.c @@ -24,7 +24,7 @@ #define SBS_CHARGER_REG_STATUS 0x13 #define SBS_CHARGER_REG_ALARM_WARNING 0x16 -#define SBS_CHARGER_STATUS_CHARGE_INHIBITED BIT(1) +#define SBS_CHARGER_STATUS_CHARGE_INHIBITED BIT(0) #define SBS_CHARGER_STATUS_RES_COLD BIT(9) #define SBS_CHARGER_STATUS_RES_HOT BIT(10) #define SBS_CHARGER_STATUS_BATTERY_PRESENT BIT(14) diff --git a/drivers/power/supply/sc27xx_fuel_gauge.c b/drivers/power/supply/sc27xx_fuel_gauge.c index 632977f84b954e6355e87a1edcac7b24020f16c1..bd23c4d9fed43482e972ccc086311e7bfcec2d54 100644 --- a/drivers/power/supply/sc27xx_fuel_gauge.c +++ b/drivers/power/supply/sc27xx_fuel_gauge.c @@ -733,13 +733,6 @@ static int sc27xx_fgu_set_property(struct power_supply *psy, return ret; } -static void sc27xx_fgu_external_power_changed(struct power_supply *psy) -{ - struct sc27xx_fgu_data *data = power_supply_get_drvdata(psy); - - power_supply_changed(data->battery); -} - static int sc27xx_fgu_property_is_writeable(struct power_supply *psy, enum power_supply_property psp) { @@ -774,7 +767,7 @@ static const struct power_supply_desc sc27xx_fgu_desc = { .num_properties = ARRAY_SIZE(sc27xx_fgu_props), .get_property = sc27xx_fgu_get_property, .set_property = sc27xx_fgu_set_property, - .external_power_changed = sc27xx_fgu_external_power_changed, + .external_power_changed = power_supply_changed, .property_is_writeable = sc27xx_fgu_property_is_writeable, .no_thermal = true, }; diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index dc741ac156c32c084c7f469e44feccf297863ece..698ab7f5004bf6b74e2e57ac224f6138139f4666 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -5256,7 +5256,7 @@ static void rdev_init_debugfs(struct regulator_dev *rdev) } rdev->debugfs = debugfs_create_dir(rname, debugfs_root); - if (!rdev->debugfs) { + if (IS_ERR(rdev->debugfs)) { rdev_warn(rdev, "Failed to create debugfs directory\n"); return; } @@ -6178,7 +6178,7 @@ static int __init regulator_init(void) ret = class_register(®ulator_class); debugfs_root = debugfs_create_dir("regulator", NULL); - if (!debugfs_root) + if (IS_ERR(debugfs_root)) pr_warn("regulator: Failed to create debugfs directory\n"); #ifdef CONFIG_DEBUG_FS diff --git a/drivers/regulator/mt6359-regulator.c b/drivers/regulator/mt6359-regulator.c index 1849566784ab5e60b75cba257b965baa3bc388e7..3eb86ec21d08800bac3838f443f6615e75cb1562 100644 --- a/drivers/regulator/mt6359-regulator.c +++ b/drivers/regulator/mt6359-regulator.c @@ -951,9 +951,12 @@ static int mt6359_regulator_probe(struct platform_device *pdev) struct regulator_config config = {}; struct regulator_dev *rdev; struct mt6359_regulator_info *mt6359_info; - int i, hw_ver; + int i, hw_ver, ret; + + ret = regmap_read(mt6397->regmap, MT6359P_HWCID, &hw_ver); + if (ret) + return ret; - regmap_read(mt6397->regmap, MT6359P_HWCID, &hw_ver); if (hw_ver >= MT6359P_CHIP_VER) mt6359_info = mt6359p_regulators; else diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c index 87a746dcb5169759f5413dfd8fe150933e267c3e..e75dd92f86ca728435247b1523a1a3283e0e0efc 100644 --- a/drivers/regulator/pca9450-regulator.c +++ b/drivers/regulator/pca9450-regulator.c @@ -264,7 +264,7 @@ static const struct pca9450_regulator_desc pca9450a_regulators[] = { .vsel_reg = PCA9450_REG_BUCK2OUT_DVS0, .vsel_mask = BUCK2OUT_DVS0_MASK, .enable_reg = PCA9450_REG_BUCK2CTRL, - .enable_mask = BUCK1_ENMODE_MASK, + .enable_mask = BUCK2_ENMODE_MASK, .ramp_reg = PCA9450_REG_BUCK2CTRL, .ramp_mask = BUCK2_RAMP_MASK, .ramp_delay_table = pca9450_dvs_buck_ramp_table, @@ -502,7 +502,7 @@ static const struct pca9450_regulator_desc pca9450bc_regulators[] = { .vsel_reg = PCA9450_REG_BUCK2OUT_DVS0, .vsel_mask = BUCK2OUT_DVS0_MASK, .enable_reg = PCA9450_REG_BUCK2CTRL, - .enable_mask = BUCK1_ENMODE_MASK, + .enable_mask = BUCK2_ENMODE_MASK, .ramp_reg = PCA9450_REG_BUCK2CTRL, .ramp_mask = BUCK2_RAMP_MASK, .ramp_delay_table = pca9450_dvs_buck_ramp_table, diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c index b0a58c62b1e2c2f35dee8720fc5f0f85a5b5a5f1..f3b280af0773786999b36877f00f1eb52cf88d21 100644 --- a/drivers/regulator/qcom-rpmh-regulator.c +++ b/drivers/regulator/qcom-rpmh-regulator.c @@ -1057,21 +1057,21 @@ static const struct rpmh_vreg_init_data pm8450_vreg_data[] = { }; static const struct rpmh_vreg_init_data pm8550_vreg_data[] = { - RPMH_VREG("ldo1", "ldo%s1", &pmic5_pldo, "vdd-l1-l4-l10"), + RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo515, "vdd-l1-l4-l10"), RPMH_VREG("ldo2", "ldo%s2", &pmic5_pldo, "vdd-l2-l13-l14"), - RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo, "vdd-l3"), - RPMH_VREG("ldo4", "ldo%s4", &pmic5_nldo, "vdd-l1-l4-l10"), + RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo515, "vdd-l3"), + RPMH_VREG("ldo4", "ldo%s4", &pmic5_nldo515, "vdd-l1-l4-l10"), RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo, "vdd-l5-l16"), - RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo_lv, "vdd-l6-l7"), - RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo_lv, "vdd-l6-l7"), - RPMH_VREG("ldo8", "ldo%s8", &pmic5_pldo_lv, "vdd-l8-l9"), + RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo, "vdd-l6-l7"), + RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo, "vdd-l6-l7"), + RPMH_VREG("ldo8", "ldo%s8", &pmic5_pldo, "vdd-l8-l9"), RPMH_VREG("ldo9", "ldo%s9", &pmic5_pldo, "vdd-l8-l9"), - RPMH_VREG("ldo10", "ldo%s10", &pmic5_nldo, "vdd-l1-l4-l10"), - RPMH_VREG("ldo11", "ldo%s11", &pmic5_nldo, "vdd-l11"), + RPMH_VREG("ldo10", "ldo%s10", &pmic5_nldo515, "vdd-l1-l4-l10"), + RPMH_VREG("ldo11", "ldo%s11", &pmic5_nldo515, "vdd-l11"), RPMH_VREG("ldo12", "ldo%s12", &pmic5_pldo, "vdd-l12"), RPMH_VREG("ldo13", "ldo%s13", &pmic5_pldo, "vdd-l2-l13-l14"), RPMH_VREG("ldo14", "ldo%s14", &pmic5_pldo, "vdd-l2-l13-l14"), - RPMH_VREG("ldo15", "ldo%s15", &pmic5_pldo, "vdd-l15"), + RPMH_VREG("ldo15", "ldo%s15", &pmic5_nldo515, "vdd-l15"), RPMH_VREG("ldo16", "ldo%s16", &pmic5_pldo, "vdd-l5-l16"), RPMH_VREG("ldo17", "ldo%s17", &pmic5_pldo, "vdd-l17"), RPMH_VREG("bob1", "bob%s1", &pmic5_bob, "vdd-bob1"), @@ -1086,9 +1086,9 @@ static const struct rpmh_vreg_init_data pm8550vs_vreg_data[] = { RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525_lv, "vdd-s4"), RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525_lv, "vdd-s5"), RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525_mv, "vdd-s6"), - RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo, "vdd-l1"), - RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo, "vdd-l2"), - RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo, "vdd-l3"), + RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo515, "vdd-l1"), + RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo515, "vdd-l2"), + RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo515, "vdd-l3"), {} }; @@ -1101,9 +1101,9 @@ static const struct rpmh_vreg_init_data pm8550ve_vreg_data[] = { RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525_lv, "vdd-s6"), RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps525_lv, "vdd-s7"), RPMH_VREG("smps8", "smp%s8", &pmic5_ftsmps525_lv, "vdd-s8"), - RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo, "vdd-l1"), - RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo, "vdd-l2"), - RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo, "vdd-l3"), + RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo515, "vdd-l1"), + RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo515, "vdd-l2"), + RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo515, "vdd-l3"), {} }; diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index ade1369fe5ed3056706b3f8361af2efe8dc6e327..113c509bf6d052bf70b3642adcf4bb6ad41ffeea 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -127,6 +127,8 @@ static int prepare_itcw(struct itcw *, unsigned int, unsigned int, int, struct dasd_device *, struct dasd_device *, unsigned int, int, unsigned int, unsigned int, unsigned int, unsigned int); +static int dasd_eckd_query_pprc_status(struct dasd_device *, + struct dasd_pprc_data_sc4 *); /* initial attempt at a probe function. this can be simplified once * the other detection code is gone */ @@ -3733,6 +3735,26 @@ static int count_exts(unsigned int from, unsigned int to, int trks_per_ext) return count; } +static int dasd_in_copy_relation(struct dasd_device *device) +{ + struct dasd_pprc_data_sc4 *temp; + int rc; + + if (!dasd_eckd_pprc_enabled(device)) + return 0; + + temp = kzalloc(sizeof(*temp), GFP_KERNEL); + if (!temp) + return -ENOMEM; + + rc = dasd_eckd_query_pprc_status(device, temp); + if (!rc) + rc = temp->dev_info[0].state; + + kfree(temp); + return rc; +} + /* * Release allocated space for a given range or an entire volume. */ @@ -3749,6 +3771,7 @@ dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block, int cur_to_trk, cur_from_trk; struct dasd_ccw_req *cqr; u32 beg_cyl, end_cyl; + int copy_relation; struct ccw1 *ccw; int trks_per_ext; size_t ras_size; @@ -3760,6 +3783,10 @@ dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block, if (dasd_eckd_ras_sanity_checks(device, first_trk, last_trk)) return ERR_PTR(-EINVAL); + copy_relation = dasd_in_copy_relation(device); + if (copy_relation < 0) + return ERR_PTR(copy_relation); + rq = req ? blk_mq_rq_to_pdu(req) : NULL; features = &private->features; @@ -3788,9 +3815,11 @@ dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block, /* * This bit guarantees initialisation of tracks within an extent that is * not fully specified, but is only supported with a certain feature - * subset. + * subset and for devices not in a copy relation. */ - ras_data->op_flags.guarantee_init = !!(features->feature[56] & 0x01); + if (features->feature[56] & 0x01 && !copy_relation) + ras_data->op_flags.guarantee_init = 1; + ras_data->lss = private->conf.ned->ID; ras_data->dev_addr = private->conf.ned->unit_addr; ras_data->nr_exts = nr_exts; diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c index 9327dcdd6e5e43271276226ebdda360130389ee8..8fca725b3daeceda4f80be4200d65464349aae9f 100644 --- a/drivers/s390/block/dasd_ioctl.c +++ b/drivers/s390/block/dasd_ioctl.c @@ -552,10 +552,10 @@ static int __dasd_ioctl_information(struct dasd_block *block, memcpy(dasd_info->type, base->discipline->name, 4); - spin_lock_irqsave(&block->queue_lock, flags); + spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); list_for_each(l, &base->ccw_queue) dasd_info->chanq_len++; - spin_unlock_irqrestore(&block->queue_lock, flags); + spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); return 0; } diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 8eb089b99cde92bad4670656b281b4cfcc8425b6..c0d620ffea618e87da0ca248cc2effbc3800cc62 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -1111,6 +1111,8 @@ static void io_subchannel_verify(struct subchannel *sch) cdev = sch_get_cdev(sch); if (cdev) dev_fsm_event(cdev, DEV_EVENT_VERIFY); + else + css_schedule_eval(sch->schid); } static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask) @@ -1374,6 +1376,7 @@ void ccw_device_set_notoper(struct ccw_device *cdev) enum io_sch_action { IO_SCH_UNREG, IO_SCH_ORPH_UNREG, + IO_SCH_UNREG_CDEV, IO_SCH_ATTACH, IO_SCH_UNREG_ATTACH, IO_SCH_ORPH_ATTACH, @@ -1406,7 +1409,7 @@ static enum io_sch_action sch_get_action(struct subchannel *sch) } if ((sch->schib.pmcw.pam & sch->opm) == 0) { if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) - return IO_SCH_UNREG; + return IO_SCH_UNREG_CDEV; return IO_SCH_DISC; } if (device_is_disconnected(cdev)) @@ -1468,6 +1471,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process) case IO_SCH_ORPH_ATTACH: ccw_device_set_disconnected(cdev); break; + case IO_SCH_UNREG_CDEV: case IO_SCH_UNREG_ATTACH: case IO_SCH_UNREG: if (!cdev) @@ -1501,6 +1505,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process) if (rc) goto out; break; + case IO_SCH_UNREG_CDEV: case IO_SCH_UNREG_ATTACH: spin_lock_irqsave(sch->lock, flags); sch_set_cdev(sch, NULL); diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index 5ea6249d81803ad2358592f2e4c40cb167692828..641f0dbb65a90b15577b27a941a8b65718b09159 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -95,7 +95,7 @@ static inline int do_sqbs(u64 token, unsigned char state, int queue, " lgr 1,%[token]\n" " .insn rsy,0xeb000000008a,%[qs],%[ccq],0(%[state])" : [ccq] "+&d" (_ccq), [qs] "+&d" (_queuestart) - : [state] "d" ((unsigned long)state), [token] "d" (token) + : [state] "a" ((unsigned long)state), [token] "d" (token) : "memory", "cc", "1"); *count = _ccq & 0xff; *start = _queuestart & 0xff; diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index 5a05d1cdfec20adf5bc43a6fad6fa2f219fc9805..a8def50c149bd433b1324cbd6d3798ea4130bc7e 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c @@ -1293,6 +1293,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, return PTR_ERR(kkey); rc = pkey_keyblob2pkey(kkey, ktp.keylen, &ktp.protkey); DEBUG_DBG("%s pkey_keyblob2pkey()=%d\n", __func__, rc); + memzero_explicit(kkey, ktp.keylen); kfree(kkey); if (rc) break; @@ -1426,6 +1427,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, kkey, ktp.keylen, &ktp.protkey); DEBUG_DBG("%s pkey_keyblob2pkey2()=%d\n", __func__, rc); kfree(apqns); + memzero_explicit(kkey, ktp.keylen); kfree(kkey); if (rc) break; @@ -1552,6 +1554,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, protkey, &protkeylen); DEBUG_DBG("%s pkey_keyblob2pkey3()=%d\n", __func__, rc); kfree(apqns); + memzero_explicit(kkey, ktp.keylen); kfree(kkey); if (rc) { kfree(protkey); diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c index 8acb9eba691b5ec9f804f8cf647f9752ee54a345..c2096e4bba319960de45c4768943d0aa1634f87a 100644 --- a/drivers/s390/net/ism_drv.c +++ b/drivers/s390/net/ism_drv.c @@ -771,14 +771,6 @@ static int __init ism_init(void) static void __exit ism_exit(void) { - struct ism_dev *ism; - - mutex_lock(&ism_dev_list.mutex); - list_for_each_entry(ism, &ism_dev_list.list, list) { - ism_dev_exit(ism); - } - mutex_unlock(&ism_dev_list.mutex); - pci_unregister_driver(&ism_driver); debug_unregister(ism_debug_info); } diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 5e115e8b2ba462bae6d821e195a6f9462e10c70f..7c6efde75da664b3819161d5aa96466b0d26aaa1 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -1678,6 +1678,7 @@ struct aac_dev u32 handle_pci_error; bool init_reset; u8 soft_reset_support; + u8 use_map_queue; }; #define aac_adapter_interrupt(dev) \ diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index deb32c9f4b3e6fca8041c3aa093671d04a58b53d..3f062e4013ab6c3b9cb54e46f608bee974b5e9a1 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -223,8 +223,12 @@ int aac_fib_setup(struct aac_dev * dev) struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd) { struct fib *fibptr; + u32 blk_tag; + int i; - fibptr = &dev->fibs[scsi_cmd_to_rq(scmd)->tag]; + blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); + i = blk_mq_unique_tag_to_tag(blk_tag); + fibptr = &dev->fibs[i]; /* * Null out fields that depend on being zero at the start of * each I/O diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 68f4dbcfff49250a632a7c1e2e10f74e303a0e19..c4a36c0be527cd86b04713dbfcab4c1206e30618 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -19,6 +19,7 @@ #include #include +#include #include #include #include @@ -504,6 +505,15 @@ common_config: return 0; } +static void aac_map_queues(struct Scsi_Host *shost) +{ + struct aac_dev *aac = (struct aac_dev *)shost->hostdata; + + blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], + aac->pdev, 0); + aac->use_map_queue = true; +} + /** * aac_change_queue_depth - alter queue depths * @sdev: SCSI device we are considering @@ -1488,6 +1498,7 @@ static const struct scsi_host_template aac_driver_template = { .bios_param = aac_biosparm, .shost_groups = aac_host_groups, .slave_configure = aac_slave_configure, + .map_queues = aac_map_queues, .change_queue_depth = aac_change_queue_depth, .sdev_groups = aac_dev_groups, .eh_abort_handler = aac_eh_abort, @@ -1775,6 +1786,8 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) shost->max_lun = AAC_MAX_LUN; pci_set_drvdata(pdev, shost); + shost->nr_hw_queues = aac->max_msix; + shost->host_tagset = 1; error = scsi_add_host(shost, &pdev->dev); if (error) @@ -1906,6 +1919,7 @@ static void aac_remove_one(struct pci_dev *pdev) struct aac_dev *aac = (struct aac_dev *)shost->hostdata; aac_cancel_rescan_worker(aac); + aac->use_map_queue = false; scsi_remove_host(shost); __aac_shutdown(aac); diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index 11ef58204e96f179227c166433eb4932ed7dbafa..61949f3741886ba1b439292b473bf27d50d1b227 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c @@ -493,6 +493,10 @@ static int aac_src_deliver_message(struct fib *fib) #endif u16 vector_no; + struct scsi_cmnd *scmd; + u32 blk_tag; + struct Scsi_Host *shost = dev->scsi_host_ptr; + struct blk_mq_queue_map *qmap; atomic_inc(&q->numpending); @@ -505,8 +509,25 @@ static int aac_src_deliver_message(struct fib *fib) if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) && dev->sa_firmware) vector_no = aac_get_vector(dev); - else - vector_no = fib->vector_no; + else { + if (!fib->vector_no || !fib->callback_data) { + if (shost && dev->use_map_queue) { + qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; + vector_no = qmap->mq_map[raw_smp_processor_id()]; + } + /* + * We hardcode the vector_no for + * reserved commands as a valid shost is + * absent during the init + */ + else + vector_no = 0; + } else { + scmd = (struct scsi_cmnd *)fib->callback_data; + blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); + vector_no = blk_mq_unique_tag_to_hwq(blk_tag); + } + } if (native_hba) { if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF) { diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 9a322a3a215020609513faf15f0b822558beaba3..595dca92e8db5ad76dffddac8181fd561408372b 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -889,7 +889,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *piocbq) { uint32_t evt_req_id = 0; - uint32_t cmd; + u16 cmd; struct lpfc_dmabuf *dmabuf = NULL; struct lpfc_bsg_event *evt; struct event_data *evt_dat = NULL; @@ -915,7 +915,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, ct_req = (struct lpfc_sli_ct_request *)bdeBuf1->virt; evt_req_id = ct_req->FsType; - cmd = ct_req->CommandResponse.bits.CmdRsp; + cmd = be16_to_cpu(ct_req->CommandResponse.bits.CmdRsp); spin_lock_irqsave(&phba->ct_ev_lock, flags); list_for_each_entry(evt, &phba->ct_ev_waiters, node) { @@ -3186,8 +3186,8 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job) ctreq->RevisionId.bits.InId = 0; ctreq->FsType = SLI_CT_ELX_LOOPBACK; ctreq->FsSubType = 0; - ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA; - ctreq->CommandResponse.bits.Size = size; + ctreq->CommandResponse.bits.CmdRsp = cpu_to_be16(ELX_LOOPBACK_DATA); + ctreq->CommandResponse.bits.Size = cpu_to_be16(size); segment_offset = ELX_LOOPBACK_HEADER_SZ; } else segment_offset = 0; diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index df5e5b7fdcfe73accb080e412f43372c44974cbe..84aa3571be6d4de1f8e7cf7e2e0a011a6c4b6fd1 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -3796,6 +3796,7 @@ struct qla_qpair { uint64_t retry_term_jiff; struct qla_tgt_counters tgt_counters; uint16_t cpuid; + bool cpu_mapped; struct qla_fw_resources fwres ____cacheline_aligned; struct qla_buf_pool buf_pool; u32 cmd_cnt; diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index ec0423ec66817c1a54ea00d508aeaccc02def258..1a955c3ff3d6ce2732e3fd03e65203a5162dbc9a 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -9426,6 +9426,9 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, qpair->rsp->req = qpair->req; qpair->rsp->qpair = qpair; + if (!qpair->cpu_mapped) + qla_cpu_update(qpair, raw_smp_processor_id()); + if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { if (ha->fw_attributes & BIT_4) qpair->difdix_supported = 1; diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index cce6e425c1214a9aabaf9fe2677ba1ef0419cdb9..7b42558a8839ad51e0b92ede49c89656a16b03f7 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -539,11 +539,14 @@ qla_mapq_init_qp_cpu_map(struct qla_hw_data *ha, if (!ha->qp_cpu_map) return; mask = pci_irq_get_affinity(ha->pdev, msix->vector_base0); + if (!mask) + return; qpair->cpuid = cpumask_first(mask); for_each_cpu(cpu, mask) { ha->qp_cpu_map[cpu] = qpair; } msix->cpuid = qpair->cpuid; + qpair->cpu_mapped = true; } static inline void diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 71feda2cdb63046a713c5018ae64948abed1084f..245e3a5d81fd33b3040df3000712d5d961ec9d6e 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -3770,6 +3770,9 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, if (rsp->qpair->cpuid != smp_processor_id() || !rsp->qpair->rcv_intr) { rsp->qpair->rcv_intr = 1; + + if (!rsp->qpair->cpu_mapped) + qla_cpu_update(rsp->qpair, raw_smp_processor_id()); } #define __update_rsp_in(_is_shadow_hba, _rsp, _rsp_in) \ diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index b7c569a42aa477551597ff0629fc6c04888277fb..0226c9279cef60b0daf3cebfba77c467200975c9 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1463,6 +1463,8 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd) struct Scsi_Host *host = cmd->device->host; int rtn = 0; + atomic_inc(&cmd->device->iorequest_cnt); + /* check if the device is still usable */ if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { /* in SDEV_DEL we error all commands. DID_NO_CONNECT @@ -1483,6 +1485,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd) */ SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, "queuecommand : device blocked\n")); + atomic_dec(&cmd->device->iorequest_cnt); return SCSI_MLQUEUE_DEVICE_BUSY; } @@ -1515,6 +1518,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd) trace_scsi_dispatch_cmd_start(cmd); rtn = host->hostt->queuecommand(host, cmd); if (rtn) { + atomic_dec(&cmd->device->iorequest_cnt); trace_scsi_dispatch_cmd_error(cmd, rtn); if (rtn != SCSI_MLQUEUE_DEVICE_BUSY && rtn != SCSI_MLQUEUE_TARGET_BUSY) @@ -1761,7 +1765,6 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, goto out_dec_host_busy; } - atomic_inc(&cmd->device->iorequest_cnt); return BLK_STS_OK; out_dec_host_busy: diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c index 5b230e149c3dfe346588121e03ed81efd64cde95..8ffb75be99bc8fab964fc6df5663f9392c844258 100644 --- a/drivers/scsi/stex.c +++ b/drivers/scsi/stex.c @@ -109,7 +109,9 @@ enum { TASK_ATTRIBUTE_HEADOFQUEUE = 0x1, TASK_ATTRIBUTE_ORDERED = 0x2, TASK_ATTRIBUTE_ACA = 0x4, +}; +enum { SS_STS_NORMAL = 0x80000000, SS_STS_DONE = 0x40000000, SS_STS_HANDSHAKE = 0x20000000, @@ -121,7 +123,9 @@ enum { SS_I2H_REQUEST_RESET = 0x2000, SS_MU_OPERATIONAL = 0x80000000, +}; +enum { STEX_CDB_LENGTH = 16, STATUS_VAR_LEN = 128, diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index d9ce379c4d2e836476a04a724622d80a0a85a36a..659196a2f63ad33d3f16e9cb0e26449e78ef46a9 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1567,6 +1567,8 @@ static int storvsc_device_configure(struct scsi_device *sdevice) { blk_queue_rq_timeout(sdevice->request_queue, (storvsc_timeout * HZ)); + /* storvsc devices don't support MAINTENANCE_IN SCSI cmd */ + sdevice->no_report_opcodes = 1; sdevice->no_write_same = 1; /* @@ -1780,7 +1782,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) length = scsi_bufflen(scmnd); payload = (struct vmbus_packet_mpb_array *)&cmd_request->mpb; - payload_sz = sizeof(cmd_request->mpb); + payload_sz = 0; if (scsi_sg_count(scmnd)) { unsigned long offset_in_hvpg = offset_in_hvpage(sgl->offset); @@ -1789,10 +1791,10 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) unsigned long hvpfn, hvpfns_to_add; int j, i = 0, sg_count; - if (hvpg_count > MAX_PAGE_BUFFER_COUNT) { + payload_sz = (hvpg_count * sizeof(u64) + + sizeof(struct vmbus_packet_mpb_array)); - payload_sz = (hvpg_count * sizeof(u64) + - sizeof(struct vmbus_packet_mpb_array)); + if (hvpg_count > MAX_PAGE_BUFFER_COUNT) { payload = kzalloc(payload_sz, GFP_ATOMIC); if (!payload) return SCSI_MLQUEUE_DEVICE_BUSY; diff --git a/drivers/soc/fsl/qe/Kconfig b/drivers/soc/fsl/qe/Kconfig index 7268c2fbcbc1d80b7b72b45ae4668584016f92c8..e0d096607fefbaf4f9ee93921046ec675a9af05f 100644 --- a/drivers/soc/fsl/qe/Kconfig +++ b/drivers/soc/fsl/qe/Kconfig @@ -36,7 +36,7 @@ config UCC config CPM_TSA tristate "CPM TSA support" depends on OF && HAS_IOMEM - depends on CPM1 || COMPILE_TEST + depends on CPM1 || (CPM && COMPILE_TEST) help Freescale CPM Time Slot Assigner (TSA) controller. @@ -47,7 +47,7 @@ config CPM_TSA config CPM_QMC tristate "CPM QMC support" depends on OF && HAS_IOMEM - depends on CPM1 || (FSL_SOC && COMPILE_TEST) + depends on CPM1 || (FSL_SOC && CPM && COMPILE_TEST) depends on CPM_TSA help Freescale CPM QUICC Multichannel Controller diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index 0f43a88b4894e4675b7e456a08d906890c538829..89b775512befa1cc2d12462a09dd567cf5d6a726 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -32,4 +32,5 @@ obj-$(CONFIG_QCOM_RPMHPD) += rpmhpd.o obj-$(CONFIG_QCOM_RPMPD) += rpmpd.o obj-$(CONFIG_QCOM_KRYO_L2_ACCESSORS) += kryo-l2-accessors.o obj-$(CONFIG_QCOM_ICC_BWMON) += icc-bwmon.o -obj-$(CONFIG_QCOM_INLINE_CRYPTO_ENGINE) += ice.o +qcom_ice-objs += ice.o +obj-$(CONFIG_QCOM_INLINE_CRYPTO_ENGINE) += qcom_ice.o diff --git a/drivers/soc/qcom/icc-bwmon.c b/drivers/soc/qcom/icc-bwmon.c index fd58c5b698978765d3213b8689d7db4dab39d415..f65bfeca7ed6b9802e7a2eb038f6fe72710b98af 100644 --- a/drivers/soc/qcom/icc-bwmon.c +++ b/drivers/soc/qcom/icc-bwmon.c @@ -773,12 +773,12 @@ static int bwmon_probe(struct platform_device *pdev) bwmon->max_bw_kbps = UINT_MAX; opp = dev_pm_opp_find_bw_floor(dev, &bwmon->max_bw_kbps, 0); if (IS_ERR(opp)) - return dev_err_probe(dev, ret, "failed to find max peak bandwidth\n"); + return dev_err_probe(dev, PTR_ERR(opp), "failed to find max peak bandwidth\n"); bwmon->min_bw_kbps = 0; opp = dev_pm_opp_find_bw_ceil(dev, &bwmon->min_bw_kbps, 0); if (IS_ERR(opp)) - return dev_err_probe(dev, ret, "failed to find min peak bandwidth\n"); + return dev_err_probe(dev, PTR_ERR(opp), "failed to find min peak bandwidth\n"); bwmon->dev = dev; diff --git a/drivers/soc/qcom/ramp_controller.c b/drivers/soc/qcom/ramp_controller.c index dc74d2a19de2bd9ef89e758764f34523bab000fb..5e3ba0be090354fb05d84c450879c8ddb9a2625a 100644 --- a/drivers/soc/qcom/ramp_controller.c +++ b/drivers/soc/qcom/ramp_controller.c @@ -296,7 +296,7 @@ static int qcom_ramp_controller_probe(struct platform_device *pdev) return -ENOMEM; qrc->desc = device_get_match_data(&pdev->dev); - if (!qrc) + if (!qrc->desc) return -EINVAL; qrc->regmap = devm_regmap_init_mmio(&pdev->dev, base, &qrc_regmap_config); diff --git a/drivers/soc/qcom/rmtfs_mem.c b/drivers/soc/qcom/rmtfs_mem.c index ce48a9f3b4c87f284eed596f5d0c578a3dea74f2..f83811f51175352777ccc95886df4d39d4ffd8cb 100644 --- a/drivers/soc/qcom/rmtfs_mem.c +++ b/drivers/soc/qcom/rmtfs_mem.c @@ -233,6 +233,7 @@ static int qcom_rmtfs_mem_probe(struct platform_device *pdev) num_vmids = 0; } else if (num_vmids < 0) { dev_err(&pdev->dev, "failed to count qcom,vmid elements: %d\n", num_vmids); + ret = num_vmids; goto remove_cdev; } else if (num_vmids > NUM_MAX_VMIDS) { dev_warn(&pdev->dev, diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c index f93544f6d7961a01e18858f9b8cfee42f3da5645..0dd4363ebac8f77ffc36b5798697a87475977d41 100644 --- a/drivers/soc/qcom/rpmh-rsc.c +++ b/drivers/soc/qcom/rpmh-rsc.c @@ -1073,7 +1073,7 @@ static int rpmh_rsc_probe(struct platform_device *pdev) drv->ver.minor = rsc_id & (MINOR_VER_MASK << MINOR_VER_SHIFT); drv->ver.minor >>= MINOR_VER_SHIFT; - if (drv->ver.major == 3 && drv->ver.minor >= 0) + if (drv->ver.major == 3) drv->regs = rpmh_rsc_reg_offset_ver_3_0; else drv->regs = rpmh_rsc_reg_offset_ver_2_7; diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c index f20e2a49a6695757385020e25a6f96bd53f773ad..63c35a32065ba4463324a67bcefa9588f1179c28 100644 --- a/drivers/soc/qcom/rpmhpd.c +++ b/drivers/soc/qcom/rpmhpd.c @@ -342,6 +342,21 @@ static const struct rpmhpd_desc sm8150_desc = { .num_pds = ARRAY_SIZE(sm8150_rpmhpds), }; +static struct rpmhpd *sa8155p_rpmhpds[] = { + [SA8155P_CX] = &cx_w_mx_parent, + [SA8155P_CX_AO] = &cx_ao_w_mx_parent, + [SA8155P_EBI] = &ebi, + [SA8155P_GFX] = &gfx, + [SA8155P_MSS] = &mss, + [SA8155P_MX] = &mx, + [SA8155P_MX_AO] = &mx_ao, +}; + +static const struct rpmhpd_desc sa8155p_desc = { + .rpmhpds = sa8155p_rpmhpds, + .num_pds = ARRAY_SIZE(sa8155p_rpmhpds), +}; + /* SM8250 RPMH powerdomains */ static struct rpmhpd *sm8250_rpmhpds[] = { [SM8250_CX] = &cx_w_mx_parent, @@ -519,6 +534,7 @@ static const struct rpmhpd_desc sc8280xp_desc = { static const struct of_device_id rpmhpd_match_table[] = { { .compatible = "qcom,qdu1000-rpmhpd", .data = &qdu1000_desc }, + { .compatible = "qcom,sa8155p-rpmhpd", .data = &sa8155p_desc }, { .compatible = "qcom,sa8540p-rpmhpd", .data = &sa8540p_desc }, { .compatible = "qcom,sa8775p-rpmhpd", .data = &sa8775p_desc }, { .compatible = "qcom,sc7180-rpmhpd", .data = &sc7180_desc }, diff --git a/drivers/soundwire/dmi-quirks.c b/drivers/soundwire/dmi-quirks.c index 58ea013fa918a105edf1d002bc44e6fce815f1e6..2a1096dab63d3c06c1d4dc9ab44a9a409273b048 100644 --- a/drivers/soundwire/dmi-quirks.c +++ b/drivers/soundwire/dmi-quirks.c @@ -99,6 +99,13 @@ static const struct dmi_system_id adr_remap_quirk_table[] = { }, .driver_data = (void *)intel_tgl_bios, }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_BOARD_NAME, "8709"), + }, + .driver_data = (void *)intel_tgl_bios, + }, { /* quirk used for NUC15 'Bishop County' LAPBC510 and LAPBC710 skews */ .matches = { diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c index c296e0bf897b756e0f00428c8b558d40e50ae5ef..280455f047a3608ae4689d8126482c25903f8ec5 100644 --- a/drivers/soundwire/qcom.c +++ b/drivers/soundwire/qcom.c @@ -1099,8 +1099,10 @@ static int qcom_swrm_startup(struct snd_pcm_substream *substream, } sruntime = sdw_alloc_stream(dai->name); - if (!sruntime) - return -ENOMEM; + if (!sruntime) { + ret = -ENOMEM; + goto err_alloc; + } ctrl->sruntime[dai->id] = sruntime; @@ -1110,12 +1112,19 @@ static int qcom_swrm_startup(struct snd_pcm_substream *substream, if (ret < 0 && ret != -ENOTSUPP) { dev_err(dai->dev, "Failed to set sdw stream on %s\n", codec_dai->name); - sdw_release_stream(sruntime); - return ret; + goto err_set_stream; } } return 0; + +err_set_stream: + sdw_release_stream(sruntime); +err_alloc: + pm_runtime_mark_last_busy(ctrl->dev); + pm_runtime_put_autosuspend(ctrl->dev); + + return ret; } static void qcom_swrm_shutdown(struct snd_pcm_substream *substream, diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c index c2191c07442b02f0a1e4f2725eb35b2222bdb0b7..379228f2218696ec42801fcbb8a080dc6b87f850 100644 --- a/drivers/soundwire/stream.c +++ b/drivers/soundwire/stream.c @@ -2021,8 +2021,10 @@ int sdw_stream_add_slave(struct sdw_slave *slave, skip_alloc_master_rt: s_rt = sdw_slave_rt_find(slave, stream); - if (s_rt) + if (s_rt) { + alloc_slave_rt = false; goto skip_alloc_slave_rt; + } s_rt = sdw_slave_rt_alloc(slave, m_rt); if (!s_rt) { diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index 6ddb2dfc0f005fedf1fbd71b62812c8beb29d62e..32449bef4415ad889931a442dba44df016b20d85 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -1756,8 +1756,11 @@ static int cqspi_probe(struct platform_device *pdev) cqspi->slow_sram = true; if (of_device_is_compatible(pdev->dev.of_node, - "xlnx,versal-ospi-1.0")) - dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + "xlnx,versal-ospi-1.0")) { + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + if (ret) + goto probe_reset_failed; + } } ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0, diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c index ac85d5562212705a9fd8c0bb5f13c73f024fbf67..26e6633693196ef20ae95b93114627a9d97a2e7b 100644 --- a/drivers/spi/spi-cadence.c +++ b/drivers/spi/spi-cadence.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -301,49 +302,43 @@ static int cdns_spi_setup_transfer(struct spi_device *spi, } /** - * cdns_spi_fill_tx_fifo - Fills the TX FIFO with as many bytes as possible + * cdns_spi_process_fifo - Fills the TX FIFO, and drain the RX FIFO * @xspi: Pointer to the cdns_spi structure + * @ntx: Number of bytes to pack into the TX FIFO + * @nrx: Number of bytes to drain from the RX FIFO */ -static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi) +static void cdns_spi_process_fifo(struct cdns_spi *xspi, int ntx, int nrx) { - unsigned long trans_cnt = 0; + ntx = clamp(ntx, 0, xspi->tx_bytes); + nrx = clamp(nrx, 0, xspi->rx_bytes); - while ((trans_cnt < xspi->tx_fifo_depth) && - (xspi->tx_bytes > 0)) { + xspi->tx_bytes -= ntx; + xspi->rx_bytes -= nrx; + while (ntx || nrx) { /* When xspi in busy condition, bytes may send failed, * then spi control did't work thoroughly, add one byte delay */ - if (cdns_spi_read(xspi, CDNS_SPI_ISR) & - CDNS_SPI_IXR_TXFULL) + if (cdns_spi_read(xspi, CDNS_SPI_ISR) & CDNS_SPI_IXR_TXFULL) udelay(10); - if (xspi->txbuf) - cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++); - else - cdns_spi_write(xspi, CDNS_SPI_TXD, 0); + if (ntx) { + if (xspi->txbuf) + cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++); + else + cdns_spi_write(xspi, CDNS_SPI_TXD, 0); - xspi->tx_bytes--; - trans_cnt++; - } -} + ntx--; + } -/** - * cdns_spi_read_rx_fifo - Reads the RX FIFO with as many bytes as possible - * @xspi: Pointer to the cdns_spi structure - * @count: Read byte count - */ -static void cdns_spi_read_rx_fifo(struct cdns_spi *xspi, unsigned long count) -{ - u8 data; - - /* Read out the data from the RX FIFO */ - while (count > 0) { - data = cdns_spi_read(xspi, CDNS_SPI_RXD); - if (xspi->rxbuf) - *xspi->rxbuf++ = data; - xspi->rx_bytes--; - count--; + if (nrx) { + u8 data = cdns_spi_read(xspi, CDNS_SPI_RXD); + + if (xspi->rxbuf) + *xspi->rxbuf++ = data; + + nrx--; + } } } @@ -381,33 +376,22 @@ static irqreturn_t cdns_spi_irq(int irq, void *dev_id) spi_finalize_current_transfer(ctlr); status = IRQ_HANDLED; } else if (intr_status & CDNS_SPI_IXR_TXOW) { - int trans_cnt = cdns_spi_read(xspi, CDNS_SPI_THLD); + int threshold = cdns_spi_read(xspi, CDNS_SPI_THLD); + int trans_cnt = xspi->rx_bytes - xspi->tx_bytes; + + if (threshold > 1) + trans_cnt -= threshold; + /* Set threshold to one if number of pending are * less than half fifo */ if (xspi->tx_bytes < xspi->tx_fifo_depth >> 1) cdns_spi_write(xspi, CDNS_SPI_THLD, 1); - while (trans_cnt) { - cdns_spi_read_rx_fifo(xspi, 1); - - if (xspi->tx_bytes) { - if (xspi->txbuf) - cdns_spi_write(xspi, CDNS_SPI_TXD, - *xspi->txbuf++); - else - cdns_spi_write(xspi, CDNS_SPI_TXD, 0); - xspi->tx_bytes--; - } - trans_cnt--; - } - if (!xspi->tx_bytes) { - /* Fixed delay due to controller limitation with - * RX_NEMPTY incorrect status - * Xilinx AR:65885 contains more details - */ - udelay(10); - cdns_spi_read_rx_fifo(xspi, xspi->rx_bytes); + if (xspi->tx_bytes) { + cdns_spi_process_fifo(xspi, trans_cnt, trans_cnt); + } else { + cdns_spi_process_fifo(xspi, 0, trans_cnt); cdns_spi_write(xspi, CDNS_SPI_IDR, CDNS_SPI_IXR_DEFAULT); spi_finalize_current_transfer(ctlr); @@ -450,16 +434,17 @@ static int cdns_transfer_one(struct spi_controller *ctlr, xspi->tx_bytes = transfer->len; xspi->rx_bytes = transfer->len; - if (!spi_controller_is_slave(ctlr)) + if (!spi_controller_is_slave(ctlr)) { cdns_spi_setup_transfer(spi, transfer); + } else { + /* Set TX empty threshold to half of FIFO depth + * only if TX bytes are more than half FIFO depth. + */ + if (xspi->tx_bytes > xspi->tx_fifo_depth) + cdns_spi_write(xspi, CDNS_SPI_THLD, xspi->tx_fifo_depth >> 1); + } - /* Set TX empty threshold to half of FIFO depth - * only if TX bytes are more than half FIFO depth. - */ - if (xspi->tx_bytes > (xspi->tx_fifo_depth >> 1)) - cdns_spi_write(xspi, CDNS_SPI_THLD, xspi->tx_fifo_depth >> 1); - - cdns_spi_fill_tx_fifo(xspi); + cdns_spi_process_fifo(xspi, xspi->tx_fifo_depth, 0); spi_transfer_delay_exec(transfer); cdns_spi_write(xspi, CDNS_SPI_IER, CDNS_SPI_IXR_DEFAULT); diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c index 5e6faa98aa8523d0229454afebcdb6beb50af109..15f5e9cb54ad45717c61e21ab8005b79d53307b0 100644 --- a/drivers/spi/spi-dw-mmio.c +++ b/drivers/spi/spi-dw-mmio.c @@ -264,17 +264,17 @@ static void dw_spi_elba_set_cs(struct spi_device *spi, bool enable) struct regmap *syscon = dwsmmio->priv; u8 cs; - cs = spi->chip_select; + cs = spi_get_chipselect(spi, 0); if (cs < 2) - dw_spi_elba_override_cs(syscon, spi->chip_select, enable); + dw_spi_elba_override_cs(syscon, spi_get_chipselect(spi, 0), enable); /* * The DW SPI controller needs a native CS bit selected to start * the serial engine. */ - spi->chip_select = 0; + spi_set_chipselect(spi, 0, 0); dw_spi_set_cs(spi, enable); - spi->chip_select = cs; + spi_set_chipselect(spi, 0, cs); } static int dw_spi_elba_init(struct platform_device *pdev, diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 4339485d202c05c7fac4f9897ea7d36872851bb8..674cfe05f4118abf53995ce8d7283ea675d99587 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -1002,7 +1002,9 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, static int dspi_setup(struct spi_device *spi) { struct fsl_dspi *dspi = spi_controller_get_devdata(spi->controller); + u32 period_ns = DIV_ROUND_UP(NSEC_PER_SEC, spi->max_speed_hz); unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0; + u32 quarter_period_ns = DIV_ROUND_UP(period_ns, 4); u32 cs_sck_delay = 0, sck_cs_delay = 0; struct fsl_dspi_platform_data *pdata; unsigned char pasc = 0, asc = 0; @@ -1031,6 +1033,19 @@ static int dspi_setup(struct spi_device *spi) sck_cs_delay = pdata->sck_cs_delay; } + /* Since tCSC and tASC apply to continuous transfers too, avoid SCK + * glitches of half a cycle by never allowing tCSC + tASC to go below + * half a SCK period. + */ + if (cs_sck_delay < quarter_period_ns) + cs_sck_delay = quarter_period_ns; + if (sck_cs_delay < quarter_period_ns) + sck_cs_delay = quarter_period_ns; + + dev_dbg(&spi->dev, + "DSPI controller timing params: CS-to-SCK delay %u ns, SCK-to-CS delay %u ns\n", + cs_sck_delay, sck_cs_delay); + clkrate = clk_get_rate(dspi->clk); hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate); diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c index f2341ab99556649fa308931afc7ede4e8bc7cd52..4b70038ceb6b28e16d1cd2936547074834764f13 100644 --- a/drivers/spi/spi-fsl-lpspi.c +++ b/drivers/spi/spi-fsl-lpspi.c @@ -910,9 +910,14 @@ static int fsl_lpspi_probe(struct platform_device *pdev) ret = fsl_lpspi_dma_init(&pdev->dev, fsl_lpspi, controller); if (ret == -EPROBE_DEFER) goto out_pm_get; - if (ret < 0) dev_err(&pdev->dev, "dma setup error %d, use pio\n", ret); + else + /* + * disable LPSPI module IRQ when enable DMA mode successfully, + * to prevent the unexpected LPSPI module IRQ events. + */ + disable_irq(irq); ret = devm_spi_register_controller(&pdev->dev, controller); if (ret < 0) { diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c index ba7be505ec4ef5594ea9e9d4ee6e24c247f10140..b293428760bc6479025b8821b57628a6992a8d95 100644 --- a/drivers/spi/spi-geni-qcom.c +++ b/drivers/spi/spi-geni-qcom.c @@ -294,6 +294,8 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag) mas->cs_flag = set_flag; /* set xfer_mode to FIFO to complete cs_done in isr */ mas->cur_xfer_mode = GENI_SE_FIFO; + geni_se_select_mode(se, mas->cur_xfer_mode); + reinit_completion(&mas->cs_done); if (set_flag) geni_se_setup_m_cmd(se, SPI_CS_ASSERT, 0); @@ -644,6 +646,8 @@ static int spi_geni_init(struct spi_geni_master *mas) geni_se_select_mode(se, GENI_GPI_DMA); dev_dbg(mas->dev, "Using GPI DMA mode for SPI\n"); break; + } else if (ret == -EPROBE_DEFER) { + goto out_pm; } /* * in case of failure to get gpi dma channel, we can still do the diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c index 21c321f437667495c5a3ed94b3d73bf483672ffa..d7432e2219d850685d3ad83a5f8a60b3f7266dde 100644 --- a/drivers/spi/spi-mt65xx.c +++ b/drivers/spi/spi-mt65xx.c @@ -1275,6 +1275,9 @@ static int mtk_spi_remove(struct platform_device *pdev) struct mtk_spi *mdata = spi_master_get_devdata(master); int ret; + if (mdata->use_spimem && !completion_done(&mdata->spimem_done)) + complete(&mdata->spimem_done); + ret = pm_runtime_resume_and_get(&pdev->dev); if (ret < 0) return ret; diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c index 944ef6b42bce6f2c22d4627f0c88716e7c2ac894..00e5e88e72c49d9c143b1ec942c38ebf2f482cae 100644 --- a/drivers/spi/spi-qup.c +++ b/drivers/spi/spi-qup.c @@ -1028,23 +1028,8 @@ static int spi_qup_probe(struct platform_device *pdev) return -ENXIO; } - ret = clk_prepare_enable(cclk); - if (ret) { - dev_err(dev, "cannot enable core clock\n"); - return ret; - } - - ret = clk_prepare_enable(iclk); - if (ret) { - clk_disable_unprepare(cclk); - dev_err(dev, "cannot enable iface clock\n"); - return ret; - } - master = spi_alloc_master(dev, sizeof(struct spi_qup)); if (!master) { - clk_disable_unprepare(cclk); - clk_disable_unprepare(iclk); dev_err(dev, "cannot allocate master\n"); return -ENOMEM; } @@ -1092,6 +1077,19 @@ static int spi_qup_probe(struct platform_device *pdev) spin_lock_init(&controller->lock); init_completion(&controller->done); + ret = clk_prepare_enable(cclk); + if (ret) { + dev_err(dev, "cannot enable core clock\n"); + goto error_dma; + } + + ret = clk_prepare_enable(iclk); + if (ret) { + clk_disable_unprepare(cclk); + dev_err(dev, "cannot enable iface clock\n"); + goto error_dma; + } + iomode = readl_relaxed(base + QUP_IO_M_MODES); size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode); @@ -1121,7 +1119,7 @@ static int spi_qup_probe(struct platform_device *pdev) ret = spi_qup_set_state(controller, QUP_STATE_RESET); if (ret) { dev_err(dev, "cannot set RESET state\n"); - goto error_dma; + goto error_clk; } writel_relaxed(0, base + QUP_OPERATIONAL); @@ -1145,7 +1143,7 @@ static int spi_qup_probe(struct platform_device *pdev) ret = devm_request_irq(dev, irq, spi_qup_qup_irq, IRQF_TRIGGER_HIGH, pdev->name, controller); if (ret) - goto error_dma; + goto error_clk; pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC); pm_runtime_use_autosuspend(dev); @@ -1160,11 +1158,12 @@ static int spi_qup_probe(struct platform_device *pdev) disable_pm: pm_runtime_disable(&pdev->dev); +error_clk: + clk_disable_unprepare(cclk); + clk_disable_unprepare(iclk); error_dma: spi_qup_release_dma(master); error: - clk_disable_unprepare(cclk); - clk_disable_unprepare(iclk); spi_master_put(master); return ret; } diff --git a/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c b/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c index 63de214916f59ad7ceb27a2ac7761ae4aa6862a6..c079368019e87f5d7d5695ad9c2cff47e922042a 100644 --- a/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c +++ b/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c @@ -373,7 +373,7 @@ static int ov2680_get_fmt(struct v4l2_subdev *sd, static int ov2680_detect(struct i2c_client *client) { struct i2c_adapter *adapter = client->adapter; - u32 high, low; + u32 high = 0, low = 0; int ret; u16 id; u8 revision; @@ -383,7 +383,7 @@ static int ov2680_detect(struct i2c_client *client) ret = ov_read_reg8(client, OV2680_SC_CMMN_CHIP_ID_H, &high); if (ret) { - dev_err(&client->dev, "sensor_id_high = 0x%x\n", high); + dev_err(&client->dev, "sensor_id_high read failed (%d)\n", ret); return -ENODEV; } ret = ov_read_reg8(client, OV2680_SC_CMMN_CHIP_ID_L, &low); diff --git a/drivers/staging/media/imx/imx8mq-mipi-csi2.c b/drivers/staging/media/imx/imx8mq-mipi-csi2.c index 32700cb8bc4d551055b8d6f36618acbe07088042..ca2efcc21efe3063df15e811d2cb9a317bd3b8c1 100644 --- a/drivers/staging/media/imx/imx8mq-mipi-csi2.c +++ b/drivers/staging/media/imx/imx8mq-mipi-csi2.c @@ -354,7 +354,7 @@ static int imx8mq_mipi_csi_start_stream(struct csi_state *state, struct v4l2_subdev_state *sd_state) { int ret; - u32 hs_settle; + u32 hs_settle = 0; ret = imx8mq_mipi_csi_sw_reset(state); if (ret) diff --git a/drivers/staging/octeon/TODO b/drivers/staging/octeon/TODO index 67a0a1f6b922edcdedb2d25f0912c273d80d6097..044e48e3d65f662daf4bfa53adb3254c26ad2de2 100644 --- a/drivers/staging/octeon/TODO +++ b/drivers/staging/octeon/TODO @@ -6,4 +6,3 @@ TODO: - make driver self-contained instead of being split between staging and arch/mips/cavium-octeon. -Contact: Aaro Koskinen diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 834cce50f9b00ae5588abaf1c2c657203176ee39..b516c2893420bc142412061aa5db056d6732b685 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -364,8 +364,6 @@ struct iscsi_np *iscsit_add_np( init_completion(&np->np_restart_comp); INIT_LIST_HEAD(&np->np_list); - timer_setup(&np->np_login_timer, iscsi_handle_login_thread_timeout, 0); - ret = iscsi_target_setup_login_socket(np, sockaddr); if (ret != 0) { kfree(np); diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 274bdd7845ca936a038dc5e602dc34e3750cbcbd..90b870f234f03cd9033418387482a34bf9d4ede1 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -811,59 +811,6 @@ void iscsi_post_login_handler( iscsit_dec_conn_usage_count(conn); } -void iscsi_handle_login_thread_timeout(struct timer_list *t) -{ - struct iscsi_np *np = from_timer(np, t, np_login_timer); - - spin_lock_bh(&np->np_thread_lock); - pr_err("iSCSI Login timeout on Network Portal %pISpc\n", - &np->np_sockaddr); - - if (np->np_login_timer_flags & ISCSI_TF_STOP) { - spin_unlock_bh(&np->np_thread_lock); - return; - } - - if (np->np_thread) - send_sig(SIGINT, np->np_thread, 1); - - np->np_login_timer_flags &= ~ISCSI_TF_RUNNING; - spin_unlock_bh(&np->np_thread_lock); -} - -static void iscsi_start_login_thread_timer(struct iscsi_np *np) -{ - /* - * This used the TA_LOGIN_TIMEOUT constant because at this - * point we do not have access to ISCSI_TPG_ATTRIB(tpg)->login_timeout - */ - spin_lock_bh(&np->np_thread_lock); - np->np_login_timer_flags &= ~ISCSI_TF_STOP; - np->np_login_timer_flags |= ISCSI_TF_RUNNING; - mod_timer(&np->np_login_timer, jiffies + TA_LOGIN_TIMEOUT * HZ); - - pr_debug("Added timeout timer to iSCSI login request for" - " %u seconds.\n", TA_LOGIN_TIMEOUT); - spin_unlock_bh(&np->np_thread_lock); -} - -static void iscsi_stop_login_thread_timer(struct iscsi_np *np) -{ - spin_lock_bh(&np->np_thread_lock); - if (!(np->np_login_timer_flags & ISCSI_TF_RUNNING)) { - spin_unlock_bh(&np->np_thread_lock); - return; - } - np->np_login_timer_flags |= ISCSI_TF_STOP; - spin_unlock_bh(&np->np_thread_lock); - - del_timer_sync(&np->np_login_timer); - - spin_lock_bh(&np->np_thread_lock); - np->np_login_timer_flags &= ~ISCSI_TF_RUNNING; - spin_unlock_bh(&np->np_thread_lock); -} - int iscsit_setup_np( struct iscsi_np *np, struct sockaddr_storage *sockaddr) @@ -1123,10 +1070,13 @@ static struct iscsit_conn *iscsit_alloc_conn(struct iscsi_np *np) spin_lock_init(&conn->nopin_timer_lock); spin_lock_init(&conn->response_queue_lock); spin_lock_init(&conn->state_lock); + spin_lock_init(&conn->login_worker_lock); + spin_lock_init(&conn->login_timer_lock); timer_setup(&conn->nopin_response_timer, iscsit_handle_nopin_response_timeout, 0); timer_setup(&conn->nopin_timer, iscsit_handle_nopin_timeout, 0); + timer_setup(&conn->login_timer, iscsit_login_timeout, 0); if (iscsit_conn_set_transport(conn, np->np_transport) < 0) goto free_conn; @@ -1304,7 +1254,7 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) goto new_sess_out; } - iscsi_start_login_thread_timer(np); + iscsit_start_login_timer(conn, current); pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n"); conn->conn_state = TARG_CONN_STATE_XPT_UP; @@ -1417,8 +1367,6 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) if (ret < 0) goto new_sess_out; - iscsi_stop_login_thread_timer(np); - if (ret == 1) { tpg_np = conn->tpg_np; @@ -1434,7 +1382,7 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) new_sess_out: new_sess = true; old_sess_out: - iscsi_stop_login_thread_timer(np); + iscsit_stop_login_timer(conn); tpg_np = conn->tpg_np; iscsi_target_login_sess_out(conn, zero_tsih, new_sess); new_sess = false; @@ -1448,7 +1396,6 @@ old_sess_out: return 1; exit: - iscsi_stop_login_thread_timer(np); spin_lock_bh(&np->np_thread_lock); np->np_thread_state = ISCSI_NP_THREAD_EXIT; spin_unlock_bh(&np->np_thread_lock); diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 24040c118e49d26145eceaaa3b6ce34331366a8f..fa3fb5f4e6bc44db94eb42740dec672c099dd889 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -535,25 +535,6 @@ static void iscsi_target_login_drop(struct iscsit_conn *conn, struct iscsi_login iscsi_target_login_sess_out(conn, zero_tsih, true); } -struct conn_timeout { - struct timer_list timer; - struct iscsit_conn *conn; -}; - -static void iscsi_target_login_timeout(struct timer_list *t) -{ - struct conn_timeout *timeout = from_timer(timeout, t, timer); - struct iscsit_conn *conn = timeout->conn; - - pr_debug("Entering iscsi_target_login_timeout >>>>>>>>>>>>>>>>>>>\n"); - - if (conn->login_kworker) { - pr_debug("Sending SIGINT to conn->login_kworker %s/%d\n", - conn->login_kworker->comm, conn->login_kworker->pid); - send_sig(SIGINT, conn->login_kworker, 1); - } -} - static void iscsi_target_do_login_rx(struct work_struct *work) { struct iscsit_conn *conn = container_of(work, @@ -562,12 +543,15 @@ static void iscsi_target_do_login_rx(struct work_struct *work) struct iscsi_np *np = login->np; struct iscsi_portal_group *tpg = conn->tpg; struct iscsi_tpg_np *tpg_np = conn->tpg_np; - struct conn_timeout timeout; int rc, zero_tsih = login->zero_tsih; bool state; pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n", conn, current->comm, current->pid); + + spin_lock(&conn->login_worker_lock); + set_bit(LOGIN_FLAGS_WORKER_RUNNING, &conn->login_flags); + spin_unlock(&conn->login_worker_lock); /* * If iscsi_target_do_login_rx() has been invoked by ->sk_data_ready() * before initial PDU processing in iscsi_target_start_negotiation() @@ -597,19 +581,16 @@ static void iscsi_target_do_login_rx(struct work_struct *work) goto err; } - conn->login_kworker = current; allow_signal(SIGINT); - - timeout.conn = conn; - timer_setup_on_stack(&timeout.timer, iscsi_target_login_timeout, 0); - mod_timer(&timeout.timer, jiffies + TA_LOGIN_TIMEOUT * HZ); - pr_debug("Starting login timer for %s/%d\n", current->comm, current->pid); + rc = iscsit_set_login_timer_kworker(conn, current); + if (rc < 0) { + /* The login timer has already expired */ + pr_debug("iscsi_target_do_login_rx, login failed\n"); + goto err; + } rc = conn->conn_transport->iscsit_get_login_rx(conn, login); - del_timer_sync(&timeout.timer); - destroy_timer_on_stack(&timeout.timer); flush_signals(current); - conn->login_kworker = NULL; if (rc < 0) goto err; @@ -646,7 +627,17 @@ static void iscsi_target_do_login_rx(struct work_struct *work) if (iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_WRITE_ACTIVE)) goto err; + + /* + * Set the login timer thread pointer to NULL to prevent the + * login process from getting stuck if the initiator + * stops sending data. + */ + rc = iscsit_set_login_timer_kworker(conn, NULL); + if (rc < 0) + goto err; } else if (rc == 1) { + iscsit_stop_login_timer(conn); cancel_delayed_work(&conn->login_work); iscsi_target_nego_release(conn); iscsi_post_login_handler(np, conn, zero_tsih); @@ -656,6 +647,7 @@ static void iscsi_target_do_login_rx(struct work_struct *work) err: iscsi_target_restore_sock_callbacks(conn); + iscsit_stop_login_timer(conn); cancel_delayed_work(&conn->login_work); iscsi_target_login_drop(conn, login); iscsit_deaccess_np(np, tpg, tpg_np); @@ -1130,6 +1122,7 @@ int iscsi_target_locate_portal( iscsi_target_set_sock_callbacks(conn); login->np = np; + conn->tpg = NULL; login_req = (struct iscsi_login_req *) login->req; payload_length = ntoh24(login_req->dlength); @@ -1197,7 +1190,6 @@ int iscsi_target_locate_portal( */ sessiontype = strncmp(s_buf, DISCOVERY, 9); if (!sessiontype) { - conn->tpg = iscsit_global->discovery_tpg; if (!login->leading_connection) goto get_target; @@ -1214,9 +1206,11 @@ int iscsi_target_locate_portal( * Serialize access across the discovery struct iscsi_portal_group to * process login attempt. */ + conn->tpg = iscsit_global->discovery_tpg; if (iscsit_access_np(np, conn->tpg) < 0) { iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE); + conn->tpg = NULL; ret = -1; goto out; } @@ -1368,14 +1362,30 @@ int iscsi_target_start_negotiation( * and perform connection cleanup now. */ ret = iscsi_target_do_login(conn, login); - if (!ret && iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU)) - ret = -1; + if (!ret) { + spin_lock(&conn->login_worker_lock); + + if (iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU)) + ret = -1; + else if (!test_bit(LOGIN_FLAGS_WORKER_RUNNING, &conn->login_flags)) { + if (iscsit_set_login_timer_kworker(conn, NULL) < 0) { + /* + * The timeout has expired already. + * Schedule login_work to perform the cleanup. + */ + schedule_delayed_work(&conn->login_work, 0); + } + } + + spin_unlock(&conn->login_worker_lock); + } if (ret < 0) { iscsi_target_restore_sock_callbacks(conn); iscsi_remove_failed_auth_entry(conn); } if (ret != 0) { + iscsit_stop_login_timer(conn); cancel_delayed_work_sync(&conn->login_work); iscsi_target_nego_release(conn); } diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 26dc8ed3045b6ad9aee85d543bd2ed144e77d2ed..b14835fcb03344780b764bbeeee21bbf53838b86 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -1040,6 +1040,57 @@ void iscsit_stop_nopin_timer(struct iscsit_conn *conn) spin_unlock_bh(&conn->nopin_timer_lock); } +void iscsit_login_timeout(struct timer_list *t) +{ + struct iscsit_conn *conn = from_timer(conn, t, login_timer); + struct iscsi_login *login = conn->login; + + pr_debug("Entering iscsi_target_login_timeout >>>>>>>>>>>>>>>>>>>\n"); + + spin_lock_bh(&conn->login_timer_lock); + login->login_failed = 1; + + if (conn->login_kworker) { + pr_debug("Sending SIGINT to conn->login_kworker %s/%d\n", + conn->login_kworker->comm, conn->login_kworker->pid); + send_sig(SIGINT, conn->login_kworker, 1); + } else { + schedule_delayed_work(&conn->login_work, 0); + } + spin_unlock_bh(&conn->login_timer_lock); +} + +void iscsit_start_login_timer(struct iscsit_conn *conn, struct task_struct *kthr) +{ + pr_debug("Login timer started\n"); + + conn->login_kworker = kthr; + mod_timer(&conn->login_timer, jiffies + TA_LOGIN_TIMEOUT * HZ); +} + +int iscsit_set_login_timer_kworker(struct iscsit_conn *conn, struct task_struct *kthr) +{ + struct iscsi_login *login = conn->login; + int ret = 0; + + spin_lock_bh(&conn->login_timer_lock); + if (login->login_failed) { + /* The timer has already expired */ + ret = -1; + } else { + conn->login_kworker = kthr; + } + spin_unlock_bh(&conn->login_timer_lock); + + return ret; +} + +void iscsit_stop_login_timer(struct iscsit_conn *conn) +{ + pr_debug("Login timer stopped\n"); + timer_delete_sync(&conn->login_timer); +} + int iscsit_send_tx_data( struct iscsit_cmd *cmd, struct iscsit_conn *conn, diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h index 33ea799a08508f16c59ca931e8182d9afeec9dac..24b8e577575a6ba567b0ca33cc21d3e8e10dd678 100644 --- a/drivers/target/iscsi/iscsi_target_util.h +++ b/drivers/target/iscsi/iscsi_target_util.h @@ -56,6 +56,10 @@ extern void iscsit_handle_nopin_timeout(struct timer_list *t); extern void __iscsit_start_nopin_timer(struct iscsit_conn *); extern void iscsit_start_nopin_timer(struct iscsit_conn *); extern void iscsit_stop_nopin_timer(struct iscsit_conn *); +extern void iscsit_login_timeout(struct timer_list *t); +extern void iscsit_start_login_timer(struct iscsit_conn *, struct task_struct *kthr); +extern void iscsit_stop_login_timer(struct iscsit_conn *); +extern int iscsit_set_login_timer_kworker(struct iscsit_conn *, struct task_struct *kthr); extern int iscsit_send_tx_data(struct iscsit_cmd *, struct iscsit_conn *, int); extern int iscsit_fe_sendpage_sg(struct iscsit_cmd *, struct iscsit_conn *); extern int iscsit_tx_login_rsp(struct iscsit_conn *, u8, u8); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 86adff2a86eddb5baa9e436f58074be4d2478c30..687adc9e086ca942ba85dd591ab3b7bf76cc9192 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -504,6 +504,8 @@ target_setup_session(struct se_portal_group *tpg, free_sess: transport_free_session(sess); + return ERR_PTR(rc); + free_cnt: target_free_cmd_counter(cmd_cnt); return ERR_PTR(rc); diff --git a/drivers/tee/amdtee/amdtee_if.h b/drivers/tee/amdtee/amdtee_if.h index ff48c3e4737503934acd9912f719ac656a9ac11b..e2014e21530acb86d50eb5995e2124f52acb5e7c 100644 --- a/drivers/tee/amdtee/amdtee_if.h +++ b/drivers/tee/amdtee/amdtee_if.h @@ -118,16 +118,18 @@ struct tee_cmd_unmap_shared_mem { /** * struct tee_cmd_load_ta - load Trusted Application (TA) binary into TEE - * @low_addr: [in] bits [31:0] of the physical address of the TA binary - * @hi_addr: [in] bits [63:32] of the physical address of the TA binary - * @size: [in] size of TA binary in bytes - * @ta_handle: [out] return handle of the loaded TA + * @low_addr: [in] bits [31:0] of the physical address of the TA binary + * @hi_addr: [in] bits [63:32] of the physical address of the TA binary + * @size: [in] size of TA binary in bytes + * @ta_handle: [out] return handle of the loaded TA + * @return_origin: [out] origin of return code after TEE processing */ struct tee_cmd_load_ta { u32 low_addr; u32 hi_addr; u32 size; u32 ta_handle; + u32 return_origin; }; /** diff --git a/drivers/tee/amdtee/call.c b/drivers/tee/amdtee/call.c index e8cd9aaa346750fd7e47a2a861813ecb512b51b5..e9b63dcb3194cf4cef47b16a698a12f944200ef1 100644 --- a/drivers/tee/amdtee/call.c +++ b/drivers/tee/amdtee/call.c @@ -423,19 +423,23 @@ int handle_load_ta(void *data, u32 size, struct tee_ioctl_open_session_arg *arg) if (ret) { arg->ret_origin = TEEC_ORIGIN_COMMS; arg->ret = TEEC_ERROR_COMMUNICATION; - } else if (arg->ret == TEEC_SUCCESS) { - ret = get_ta_refcount(load_cmd.ta_handle); - if (!ret) { - arg->ret_origin = TEEC_ORIGIN_COMMS; - arg->ret = TEEC_ERROR_OUT_OF_MEMORY; - - /* Unload the TA on error */ - unload_cmd.ta_handle = load_cmd.ta_handle; - psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA, - (void *)&unload_cmd, - sizeof(unload_cmd), &ret); - } else { - set_session_id(load_cmd.ta_handle, 0, &arg->session); + } else { + arg->ret_origin = load_cmd.return_origin; + + if (arg->ret == TEEC_SUCCESS) { + ret = get_ta_refcount(load_cmd.ta_handle); + if (!ret) { + arg->ret_origin = TEEC_ORIGIN_COMMS; + arg->ret = TEEC_ERROR_OUT_OF_MEMORY; + + /* Unload the TA on error */ + unload_cmd.ta_handle = load_cmd.ta_handle; + psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA, + (void *)&unload_cmd, + sizeof(unload_cmd), &ret); + } else { + set_session_id(load_cmd.ta_handle, 0, &arg->session); + } } } mutex_unlock(&ta_refcount_mutex); diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c index 49702cb08f4fb3a56f28f3bd9f643268b68cba1f..3861ae06d902f072451bd46d99206ab8bb8e53f5 100644 --- a/drivers/tee/optee/smc_abi.c +++ b/drivers/tee/optee/smc_abi.c @@ -1004,8 +1004,10 @@ static u32 get_async_notif_value(optee_invoke_fn *invoke_fn, bool *value_valid, invoke_fn(OPTEE_SMC_GET_ASYNC_NOTIF_VALUE, 0, 0, 0, 0, 0, 0, 0, &res); - if (res.a0) + if (res.a0) { + *value_valid = false; return 0; + } *value_valid = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID); *value_pending = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING); return res.a1; diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c index 810231b59dcdcc7b1e2943a603b099ac40a3e33b..5e1164226ada21b93d2ef2eb954d17a5d214b4bc 100644 --- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c @@ -131,7 +131,7 @@ static ssize_t available_uuids_show(struct device *dev, for (i = 0; i < INT3400_THERMAL_MAXIMUM_UUID; i++) { if (priv->uuid_bitmap & (1 << i)) - length += sysfs_emit_at(buf, length, int3400_thermal_uuids[i]); + length += sysfs_emit_at(buf, length, "%s\n", int3400_thermal_uuids[i]); } return length; @@ -149,7 +149,7 @@ static ssize_t current_uuid_show(struct device *dev, for (i = 0; i <= INT3400_THERMAL_CRITICAL; i++) { if (priv->os_uuid_mask & BIT(i)) - length += sysfs_emit_at(buf, length, int3400_thermal_uuids[i]); + length += sysfs_emit_at(buf, length, "%s\n", int3400_thermal_uuids[i]); } if (length) diff --git a/drivers/thermal/intel/intel_soc_dts_iosf.c b/drivers/thermal/intel/intel_soc_dts_iosf.c index f99dc7e4ae893949e1fb8009982f2a922aeeb0e5..db97499f4f0ab01d829167b84e5303cd8034b56d 100644 --- a/drivers/thermal/intel/intel_soc_dts_iosf.c +++ b/drivers/thermal/intel/intel_soc_dts_iosf.c @@ -398,7 +398,7 @@ struct intel_soc_dts_sensors *intel_soc_dts_iosf_init( spin_lock_init(&sensors->intr_notify_lock); mutex_init(&sensors->dts_update_lock); sensors->intr_type = intr_type; - sensors->tj_max = tj_max; + sensors->tj_max = tj_max * 1000; if (intr_type == INTEL_SOC_DTS_INTERRUPT_NONE) notification = false; else diff --git a/drivers/thunderbolt/dma_test.c b/drivers/thunderbolt/dma_test.c index 3bedecb236e0d15f67e9c09586eddb008276c87a..14bb6dec6c4b023c38ff671da20965c18a82d2b3 100644 --- a/drivers/thunderbolt/dma_test.c +++ b/drivers/thunderbolt/dma_test.c @@ -192,9 +192,9 @@ static int dma_test_start_rings(struct dma_test *dt) } ret = tb_xdomain_enable_paths(dt->xd, dt->tx_hopid, - dt->tx_ring ? dt->tx_ring->hop : 0, + dt->tx_ring ? dt->tx_ring->hop : -1, dt->rx_hopid, - dt->rx_ring ? dt->rx_ring->hop : 0); + dt->rx_ring ? dt->rx_ring->hop : -1); if (ret) { dma_test_free_rings(dt); return ret; @@ -218,9 +218,9 @@ static void dma_test_stop_rings(struct dma_test *dt) tb_ring_stop(dt->tx_ring); ret = tb_xdomain_disable_paths(dt->xd, dt->tx_hopid, - dt->tx_ring ? dt->tx_ring->hop : 0, + dt->tx_ring ? dt->tx_ring->hop : -1, dt->rx_hopid, - dt->rx_ring ? dt->rx_ring->hop : 0); + dt->rx_ring ? dt->rx_ring->hop : -1); if (ret) dev_warn(&dt->svc->dev, "failed to disable DMA paths\n"); diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c index d76e923fbc6a01fcc8ce93bb998d4231511ffa39..e58beac442958c922a55bc876d9c39c8912cd4c7 100644 --- a/drivers/thunderbolt/nhi.c +++ b/drivers/thunderbolt/nhi.c @@ -54,6 +54,26 @@ static int ring_interrupt_index(const struct tb_ring *ring) return bit; } +static void nhi_mask_interrupt(struct tb_nhi *nhi, int mask, int ring) +{ + if (nhi->quirks & QUIRK_AUTO_CLEAR_INT) { + u32 val; + + val = ioread32(nhi->iobase + REG_RING_INTERRUPT_BASE + ring); + iowrite32(val & ~mask, nhi->iobase + REG_RING_INTERRUPT_BASE + ring); + } else { + iowrite32(mask, nhi->iobase + REG_RING_INTERRUPT_MASK_CLEAR_BASE + ring); + } +} + +static void nhi_clear_interrupt(struct tb_nhi *nhi, int ring) +{ + if (nhi->quirks & QUIRK_AUTO_CLEAR_INT) + ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + ring); + else + iowrite32(~0, nhi->iobase + REG_RING_INT_CLEAR + ring); +} + /* * ring_interrupt_active() - activate/deactivate interrupts for a single ring * @@ -61,8 +81,8 @@ static int ring_interrupt_index(const struct tb_ring *ring) */ static void ring_interrupt_active(struct tb_ring *ring, bool active) { - int reg = REG_RING_INTERRUPT_BASE + - ring_interrupt_index(ring) / 32 * 4; + int index = ring_interrupt_index(ring) / 32 * 4; + int reg = REG_RING_INTERRUPT_BASE + index; int interrupt_bit = ring_interrupt_index(ring) & 31; int mask = 1 << interrupt_bit; u32 old, new; @@ -123,7 +143,11 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active) "interrupt for %s %d is already %s\n", RING_TYPE(ring), ring->hop, active ? "enabled" : "disabled"); - iowrite32(new, ring->nhi->iobase + reg); + + if (active) + iowrite32(new, ring->nhi->iobase + reg); + else + nhi_mask_interrupt(ring->nhi, mask, index); } /* @@ -136,11 +160,11 @@ static void nhi_disable_interrupts(struct tb_nhi *nhi) int i = 0; /* disable interrupts */ for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++) - iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i); + nhi_mask_interrupt(nhi, ~0, 4 * i); /* clear interrupt status bits */ for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++) - ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i); + nhi_clear_interrupt(nhi, 4 * i); } /* ring helper methods */ diff --git a/drivers/thunderbolt/nhi_regs.h b/drivers/thunderbolt/nhi_regs.h index faef165a919ccda93a00fea018151210f68aec1b..6ba2958154770f9d2f2e11251b754c818baf360e 100644 --- a/drivers/thunderbolt/nhi_regs.h +++ b/drivers/thunderbolt/nhi_regs.h @@ -93,6 +93,8 @@ struct ring_desc { #define REG_RING_INTERRUPT_BASE 0x38200 #define RING_INTERRUPT_REG_COUNT(nhi) ((31 + 2 * nhi->hop_count) / 32) +#define REG_RING_INTERRUPT_MASK_CLEAR_BASE 0x38208 + #define REG_INT_THROTTLING_RATE 0x38c00 /* Interrupt Vector Allocation */ diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index 7bfbc9ca9ba4fe3bf59d4b635ef74137e2b76739..c1af712ca728827ac3b11ea9d101989fcbab4d95 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -737,6 +737,7 @@ static void tb_scan_port(struct tb_port *port) { struct tb_cm *tcm = tb_priv(port->sw->tb); struct tb_port *upstream_port; + bool discovery = false; struct tb_switch *sw; int ret; @@ -804,8 +805,10 @@ static void tb_scan_port(struct tb_port *port) * tunnels and know which switches were authorized already by * the boot firmware. */ - if (!tcm->hotplug_active) + if (!tcm->hotplug_active) { dev_set_uevent_suppress(&sw->dev, true); + discovery = true; + } /* * At the moment Thunderbolt 2 and beyond (devices with LC) we @@ -835,10 +838,14 @@ static void tb_scan_port(struct tb_port *port) * CL0s and CL1 are enabled and supported together. * Silently ignore CLx enabling in case CLx is not supported. */ - ret = tb_switch_enable_clx(sw, TB_CL1); - if (ret && ret != -EOPNOTSUPP) - tb_sw_warn(sw, "failed to enable %s on upstream port\n", - tb_switch_clx_name(TB_CL1)); + if (discovery) { + tb_sw_dbg(sw, "discovery, not touching CL states\n"); + } else { + ret = tb_switch_enable_clx(sw, TB_CL1); + if (ret && ret != -EOPNOTSUPP) + tb_sw_warn(sw, "failed to enable %s on upstream port\n", + tb_switch_clx_name(TB_CL1)); + } if (tb_switch_is_clx_enabled(sw, TB_CL1)) /* diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c index 9099ae73e78f39417e3d67b1706722f610450f6e..4f222673d651d945dff7ea01565d1500b84fa4ed 100644 --- a/drivers/thunderbolt/tunnel.c +++ b/drivers/thunderbolt/tunnel.c @@ -526,7 +526,7 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel) * Perform connection manager handshake between IN and OUT ports * before capabilities exchange can take place. */ - ret = tb_dp_cm_handshake(in, out, 1500); + ret = tb_dp_cm_handshake(in, out, 3000); if (ret) return ret; diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c index f801b1f5b46c0cd98df6ed19e9c76d7342765585..af0e1c07018795d406bab65eea73181f79920c57 100644 --- a/drivers/tty/serial/8250/8250_bcm7271.c +++ b/drivers/tty/serial/8250/8250_bcm7271.c @@ -1012,7 +1012,7 @@ static int brcmuart_probe(struct platform_device *pdev) of_property_read_u32(np, "clock-frequency", &clk_rate); /* See if a Baud clock has been specified */ - baud_mux_clk = of_clk_get_by_name(np, "sw_baud"); + baud_mux_clk = devm_clk_get(dev, "sw_baud"); if (IS_ERR(baud_mux_clk)) { if (PTR_ERR(baud_mux_clk) == -EPROBE_DEFER) { ret = -EPROBE_DEFER; @@ -1032,7 +1032,7 @@ static int brcmuart_probe(struct platform_device *pdev) if (clk_rate == 0) { dev_err(dev, "clock-frequency or clk not defined\n"); ret = -EINVAL; - goto release_dma; + goto err_clk_disable; } dev_dbg(dev, "DMA is %senabled\n", priv->dma_enabled ? "" : "not "); @@ -1119,6 +1119,8 @@ err1: serial8250_unregister_port(priv->line); err: brcmuart_free_bufs(dev, priv); +err_clk_disable: + clk_disable_unprepare(baud_mux_clk); release_dma: if (priv->dma_enabled) brcmuart_arbitration(priv, 0); @@ -1133,6 +1135,7 @@ static int brcmuart_remove(struct platform_device *pdev) hrtimer_cancel(&priv->hrt); serial8250_unregister_port(priv->line); brcmuart_free_bufs(&pdev->dev, priv); + clk_disable_unprepare(priv->baud_mux_clk); if (priv->dma_enabled) brcmuart_arbitration(priv, 0); return 0; diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c index 64770c62bbec5436d332821ffaf57074276b2422..b406cba10b0eb4b6bb97aacb18d938000efb1903 100644 --- a/drivers/tty/serial/8250/8250_exar.c +++ b/drivers/tty/serial/8250/8250_exar.c @@ -40,9 +40,13 @@ #define PCI_DEVICE_ID_COMMTECH_4224PCIE 0x0020 #define PCI_DEVICE_ID_COMMTECH_4228PCIE 0x0021 #define PCI_DEVICE_ID_COMMTECH_4222PCIE 0x0022 + #define PCI_DEVICE_ID_EXAR_XR17V4358 0x4358 #define PCI_DEVICE_ID_EXAR_XR17V8358 0x8358 +#define PCI_SUBDEVICE_ID_USR_2980 0x0128 +#define PCI_SUBDEVICE_ID_USR_2981 0x0129 + #define PCI_DEVICE_ID_SEALEVEL_710xC 0x1001 #define PCI_DEVICE_ID_SEALEVEL_720xC 0x1002 #define PCI_DEVICE_ID_SEALEVEL_740xC 0x1004 @@ -829,6 +833,15 @@ static const struct exar8250_board pbn_exar_XR17V8358 = { (kernel_ulong_t)&bd \ } +#define USR_DEVICE(devid, sdevid, bd) { \ + PCI_DEVICE_SUB( \ + PCI_VENDOR_ID_USR, \ + PCI_DEVICE_ID_EXAR_##devid, \ + PCI_VENDOR_ID_EXAR, \ + PCI_SUBDEVICE_ID_USR_##sdevid), 0, 0, \ + (kernel_ulong_t)&bd \ + } + static const struct pci_device_id exar_pci_tbl[] = { EXAR_DEVICE(ACCESSIO, COM_2S, pbn_exar_XR17C15x), EXAR_DEVICE(ACCESSIO, COM_4S, pbn_exar_XR17C15x), @@ -853,6 +866,10 @@ static const struct pci_device_id exar_pci_tbl[] = { IBM_DEVICE(XR17C152, SATURN_SERIAL_ONE_PORT, pbn_exar_ibm_saturn), + /* USRobotics USR298x-OEM PCI Modems */ + USR_DEVICE(XR17C152, 2980, pbn_exar_XR17C15x), + USR_DEVICE(XR17C152, 2981, pbn_exar_XR17C15x), + /* Exar Corp. XR17C15[248] Dual/Quad/Octal UART */ EXAR_DEVICE(EXAR, XR17C152, pbn_exar_XR17C15x), EXAR_DEVICE(EXAR, XR17C154, pbn_exar_XR17C15x), diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index c55be6fda0cab1fb9cedc6014fb8b90e5094a492..e80c4f6551a1c26c701729d2eb6d22623016c3d4 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -1920,6 +1920,8 @@ pci_moxa_setup(struct serial_private *priv, #define PCI_SUBDEVICE_ID_SIIG_DUAL_30 0x2530 #define PCI_VENDOR_ID_ADVANTECH 0x13fe #define PCI_DEVICE_ID_INTEL_CE4100_UART 0x2e66 +#define PCI_DEVICE_ID_ADVANTECH_PCI1600 0x1600 +#define PCI_DEVICE_ID_ADVANTECH_PCI1600_1611 0x1611 #define PCI_DEVICE_ID_ADVANTECH_PCI3620 0x3620 #define PCI_DEVICE_ID_ADVANTECH_PCI3618 0x3618 #define PCI_DEVICE_ID_ADVANTECH_PCIf618 0xf618 @@ -4085,6 +4087,9 @@ static SIMPLE_DEV_PM_OPS(pciserial_pm_ops, pciserial_suspend_one, pciserial_resume_one); static const struct pci_device_id serial_pci_tbl[] = { + { PCI_VENDOR_ID_ADVANTECH, PCI_DEVICE_ID_ADVANTECH_PCI1600, + PCI_DEVICE_ID_ADVANTECH_PCI1600_1611, PCI_ANY_ID, 0, 0, + pbn_b0_4_921600 }, /* Advantech use PCI_DEVICE_ID_ADVANTECH_PCI3620 (0x3620) as 'PCI_SUBVENDOR_ID' */ { PCI_VENDOR_ID_ADVANTECH, PCI_DEVICE_ID_ADVANTECH_PCI3620, PCI_DEVICE_ID_ADVANTECH_PCI3620, 0x0001, 0, 0, diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index fe8d79c4ae95e5c0cb944c6906f9b286832f46e6..c153ba3a018a255bd4304099d951a017f3482502 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -669,6 +669,7 @@ EXPORT_SYMBOL_GPL(serial8250_em485_supported); /** * serial8250_em485_config() - generic ->rs485_config() callback * @port: uart port + * @termios: termios structure * @rs485: rs485 settings * * Generic callback usable by 8250 uart drivers to activate rs485 settings diff --git a/drivers/tty/serial/8250/8250_tegra.c b/drivers/tty/serial/8250/8250_tegra.c index 2509e7f74ccf2bcd7a366c90dee0724a9b38ef06..89956bbf34d9e4d4ad6b921db7a13b039629741a 100644 --- a/drivers/tty/serial/8250/8250_tegra.c +++ b/drivers/tty/serial/8250/8250_tegra.c @@ -113,13 +113,15 @@ static int tegra_uart_probe(struct platform_device *pdev) ret = serial8250_register_8250_port(&port8250); if (ret < 0) - goto err_clkdisable; + goto err_ctrl_assert; platform_set_drvdata(pdev, uart); uart->line = ret; return 0; +err_ctrl_assert: + reset_control_assert(uart->rst); err_clkdisable: clk_disable_unprepare(uart->clk); diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 398e5aac2e77a326686310d4b3c2355502765ce2..3e3fb377d90d4c9f15ad55055025d4d6d8883c25 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -762,7 +762,7 @@ config SERIAL_PMACZILOG_CONSOLE config SERIAL_CPM tristate "CPM SCC/SMC serial port support" - depends on CPM2 || CPM1 || (PPC32 && COMPILE_TEST) + depends on CPM2 || CPM1 select SERIAL_CORE help This driver supports the SCC and SMC serial ports on Motorola diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c index 59e25f2b6632215cbb06a60a4c17e57402abc53e..4b2512eef577b431748cf539609cb4e72678633c 100644 --- a/drivers/tty/serial/arc_uart.c +++ b/drivers/tty/serial/arc_uart.c @@ -606,10 +606,11 @@ static int arc_serial_probe(struct platform_device *pdev) } uart->baud = val; - port->membase = of_iomap(np, 0); - if (!port->membase) + port->membase = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(port->membase)) { /* No point of dev_err since UART itself is hosed here */ - return -ENXIO; + return PTR_ERR(port->membase); + } port->irq = irq_of_parse_and_map(np, 0); diff --git a/drivers/tty/serial/cpm_uart/cpm_uart.h b/drivers/tty/serial/cpm_uart/cpm_uart.h index 0577618e78c041af601b2f238f86b67de4ab2f1a..46c03ed71c31bb346e67e48cd998500f903b53fa 100644 --- a/drivers/tty/serial/cpm_uart/cpm_uart.h +++ b/drivers/tty/serial/cpm_uart/cpm_uart.h @@ -19,8 +19,6 @@ struct gpio_desc; #include "cpm_uart_cpm2.h" #elif defined(CONFIG_CPM1) #include "cpm_uart_cpm1.h" -#elif defined(CONFIG_COMPILE_TEST) -#include "cpm_uart_cpm2.h" #endif #define SERIAL_CPM_MAJOR 204 diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index c91916e13648302545d8e60727d8615fa94ca436..7fd30fcc10c6238c2f3961ddc215a39a8cd61082 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -310,7 +310,7 @@ static const struct lpuart_soc_data ls1021a_data = { static const struct lpuart_soc_data ls1028a_data = { .devtype = LS1028A_LPUART, .iotype = UPIO_MEM32, - .rx_watermark = 1, + .rx_watermark = 0, }; static struct lpuart_soc_data imx7ulp_data = { @@ -1495,34 +1495,36 @@ static void lpuart_break_ctl(struct uart_port *port, int break_state) static void lpuart32_break_ctl(struct uart_port *port, int break_state) { - unsigned long temp, modem; - struct tty_struct *tty; - unsigned int cflag = 0; - - tty = tty_port_tty_get(&port->state->port); - if (tty) { - cflag = tty->termios.c_cflag; - tty_kref_put(tty); - } + unsigned long temp; - temp = lpuart32_read(port, UARTCTRL) & ~UARTCTRL_SBK; - modem = lpuart32_read(port, UARTMODIR); + temp = lpuart32_read(port, UARTCTRL); + /* + * LPUART IP now has two known bugs, one is CTS has higher priority than the + * break signal, which causes the break signal sending through UARTCTRL_SBK + * may impacted by the CTS input if the HW flow control is enabled. It + * exists on all platforms we support in this driver. + * Another bug is i.MX8QM LPUART may have an additional break character + * being sent after SBK was cleared. + * To avoid above two bugs, we use Transmit Data Inversion function to send + * the break signal instead of UARTCTRL_SBK. + */ if (break_state != 0) { - temp |= UARTCTRL_SBK; /* - * LPUART CTS has higher priority than SBK, need to disable CTS before - * asserting SBK to avoid any interference if flow control is enabled. + * Disable the transmitter to prevent any data from being sent out + * during break, then invert the TX line to send break. */ - if (cflag & CRTSCTS && modem & UARTMODIR_TXCTSE) - lpuart32_write(port, modem & ~UARTMODIR_TXCTSE, UARTMODIR); + temp &= ~UARTCTRL_TE; + lpuart32_write(port, temp, UARTCTRL); + temp |= UARTCTRL_TXINV; + lpuart32_write(port, temp, UARTCTRL); } else { - /* Re-enable the CTS when break off. */ - if (cflag & CRTSCTS && !(modem & UARTMODIR_TXCTSE)) - lpuart32_write(port, modem | UARTMODIR_TXCTSE, UARTMODIR); + /* Disable the TXINV to turn off break and re-enable transmitter. */ + temp &= ~UARTCTRL_TXINV; + lpuart32_write(port, temp, UARTCTRL); + temp |= UARTCTRL_TE; + lpuart32_write(port, temp, UARTCTRL); } - - lpuart32_write(port, temp, UARTCTRL); } static void lpuart_setup_watermark(struct lpuart_port *sport) diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c index a58e9277dfad8752b10af8ca5bd8ad7383984531..f1387f1024dbb336b2304966d990b563ba42f0db 100644 --- a/drivers/tty/serial/lantiq.c +++ b/drivers/tty/serial/lantiq.c @@ -250,6 +250,7 @@ lqasc_err_int(int irq, void *_port) struct ltq_uart_port *ltq_port = to_ltq_uart_port(port); spin_lock_irqsave(<q_port->lock, flags); + __raw_writel(ASC_IRNCR_EIR, port->membase + LTQ_ASC_IRNCR); /* clear any pending interrupts */ asc_update_bits(0, ASCWHBSTATE_CLRPE | ASCWHBSTATE_CLRFE | ASCWHBSTATE_CLRROE, port->membase + LTQ_ASC_WHBSTATE); diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index 08dc3e2a729c76af3cb1ccb2658831c83873b45a..8582479f0211a082cfe1209dcee1aacc3cdc1459 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -1664,19 +1664,18 @@ static int qcom_geni_serial_probe(struct platform_device *pdev) uport->private_data = &port->private_data; platform_set_drvdata(pdev, port); - ret = uart_add_one_port(drv, uport); - if (ret) - return ret; - irq_set_status_flags(uport->irq, IRQ_NOAUTOEN); ret = devm_request_irq(uport->dev, uport->irq, qcom_geni_serial_isr, IRQF_TRIGGER_HIGH, port->name, uport); if (ret) { dev_err(uport->dev, "Failed to get IRQ ret %d\n", ret); - uart_remove_one_port(drv, uport); return ret; } + ret = uart_add_one_port(drv, uport); + if (ret) + return ret; + /* * Set pm_runtime status as ACTIVE so that wakeup_irq gets * enabled/disabled from dev_pm_arm_wake_irq during system diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c index 498ba9c0ee9374f971854009ce39f64dfef345b9..829c4be66f3b036e1941a7e6fab286f90d9e8725 100644 --- a/drivers/tty/vt/vc_screen.c +++ b/drivers/tty/vt/vc_screen.c @@ -656,10 +656,17 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) } } - /* The vcs_size might have changed while we slept to grab - * the user buffer, so recheck. + /* The vc might have been freed or vcs_size might have changed + * while we slept to grab the user buffer, so recheck. * Return data written up to now on failure. */ + vc = vcs_vc(inode, &viewed); + if (!vc) { + if (written) + break; + ret = -ENXIO; + goto unlock_out; + } size = vcs_size(vc, attr, false); if (size < 0) { if (written) diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index 202ff71e1b582132f474c91379a63b158074a928..51b3c6ae781df4a4ab93f5ac5a61f71e622c27e2 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -150,7 +150,8 @@ static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba) u32 hba_maxq, rem, tot_queues; struct Scsi_Host *host = hba->host; - hba_maxq = FIELD_GET(MAX_QUEUE_SUP, hba->mcq_capabilities); + /* maxq is 0 based value */ + hba_maxq = FIELD_GET(MAX_QUEUE_SUP, hba->mcq_capabilities) + 1; tot_queues = UFS_MCQ_NUM_DEV_CMD_QUEUES + read_queues + poll_queues + rw_queues; @@ -265,7 +266,7 @@ static int ufshcd_mcq_get_tag(struct ufs_hba *hba, addr = (le64_to_cpu(cqe->command_desc_base_addr) & CQE_UCD_BA) - hba->ucdl_dma_addr; - return div_u64(addr, sizeof(struct utp_transfer_cmd_desc)); + return div_u64(addr, ufshcd_get_ucd_size(hba)); } static void ufshcd_mcq_process_cqe(struct ufs_hba *hba, diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 45fd374fe56c13e6b834ddee59ceb092c0c94701..e7e79f515e14182c627069ea231fd8c2a0f0baaf 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -2849,10 +2849,10 @@ static void ufshcd_map_queues(struct Scsi_Host *shost) static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i) { struct utp_transfer_cmd_desc *cmd_descp = (void *)hba->ucdl_base_addr + - i * sizeof_utp_transfer_cmd_desc(hba); + i * ufshcd_get_ucd_size(hba); struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr; dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr + - i * sizeof_utp_transfer_cmd_desc(hba); + i * ufshcd_get_ucd_size(hba); u16 response_offset = offsetof(struct utp_transfer_cmd_desc, response_upiu); u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table); @@ -3761,7 +3761,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba) size_t utmrdl_size, utrdl_size, ucdl_size; /* Allocate memory for UTP command descriptors */ - ucdl_size = sizeof_utp_transfer_cmd_desc(hba) * hba->nutrs; + ucdl_size = ufshcd_get_ucd_size(hba) * hba->nutrs; hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev, ucdl_size, &hba->ucdl_dma_addr, @@ -3861,7 +3861,7 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba) prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table); - cmd_desc_size = sizeof_utp_transfer_cmd_desc(hba); + cmd_desc_size = ufshcd_get_ucd_size(hba); cmd_desc_dma_addr = hba->ucdl_dma_addr; for (i = 0; i < hba->nutrs; i++) { @@ -8452,7 +8452,7 @@ static void ufshcd_release_sdb_queue(struct ufs_hba *hba, int nutrs) { size_t ucdl_size, utrdl_size; - ucdl_size = sizeof(struct utp_transfer_cmd_desc) * nutrs; + ucdl_size = ufshcd_get_ucd_size(hba) * nutrs; dmam_free_coherent(hba->dev, ucdl_size, hba->ucdl_base_addr, hba->ucdl_dma_addr); diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c index ccfaebca6faa7512ac92ad0beeea239f467bbb52..1dcadef933e3a6fb3e163ec5061a8f391960baa5 100644 --- a/drivers/usb/cdns3/cdns3-gadget.c +++ b/drivers/usb/cdns3/cdns3-gadget.c @@ -2097,6 +2097,19 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable) else priv_ep->trb_burst_size = 16; + /* + * In versions preceding DEV_VER_V2, for example, iMX8QM, there exit the bugs + * in the DMA. These bugs occur when the trb_burst_size exceeds 16 and the + * address is not aligned to 128 Bytes (which is a product of the 64-bit AXI + * and AXI maximum burst length of 16 or 0xF+1, dma_axi_ctrl0[3:0]). This + * results in data corruption when it crosses the 4K border. The corruption + * specifically occurs from the position (4K - (address & 0x7F)) to 4K. + * + * So force trb_burst_size to 16 at such platform. + */ + if (priv_dev->dev_ver < DEV_VER_V2) + priv_ep->trb_burst_size = 16; + mult = min_t(u8, mult, EP_CFG_MULT_MAX); buffering = min_t(u8, buffering, EP_CFG_BUFFERING_MAX); maxburst = min_t(u8, maxburst, EP_CFG_MAXBURST_MAX); diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index 4bb6d304eb4b21fe3049f44ca000826af2c40ff0..311007b1d9046566b8a155c21ef8188cca13a819 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c @@ -1928,6 +1928,8 @@ static int usbtmc_ioctl_request(struct usbtmc_device_data *data, if (request.req.wLength > USBTMC_BUFSIZE) return -EMSGSIZE; + if (request.req.wLength == 0) /* Length-0 requests are never IN */ + request.req.bRequestType &= ~USB_DIR_IN; is_in = request.req.bRequestType & USB_DIR_IN; diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c index fbb087b728dc987fbcda7d56fbba97b0d6a948a5..268ccbec88f951ab44ad5f410dc19a3a22e3c7a7 100644 --- a/drivers/usb/core/buffer.c +++ b/drivers/usb/core/buffer.c @@ -172,3 +172,44 @@ void hcd_buffer_free( } dma_free_coherent(hcd->self.sysdev, size, addr, dma); } + +void *hcd_buffer_alloc_pages(struct usb_hcd *hcd, + size_t size, gfp_t mem_flags, dma_addr_t *dma) +{ + if (size == 0) + return NULL; + + if (hcd->localmem_pool) + return gen_pool_dma_alloc_align(hcd->localmem_pool, + size, dma, PAGE_SIZE); + + /* some USB hosts just use PIO */ + if (!hcd_uses_dma(hcd)) { + *dma = DMA_MAPPING_ERROR; + return (void *)__get_free_pages(mem_flags, + get_order(size)); + } + + return dma_alloc_coherent(hcd->self.sysdev, + size, dma, mem_flags); +} + +void hcd_buffer_free_pages(struct usb_hcd *hcd, + size_t size, void *addr, dma_addr_t dma) +{ + if (!addr) + return; + + if (hcd->localmem_pool) { + gen_pool_free(hcd->localmem_pool, + (unsigned long)addr, size); + return; + } + + if (!hcd_uses_dma(hcd)) { + free_pages((unsigned long)addr, get_order(size)); + return; + } + + dma_free_coherent(hcd->self.sysdev, size, addr, dma); +} diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index e501a03d6c700b930b0a152bcb9c87dfe6f5ae7e..fcf68818e999286296e51626e72b25461ab3ea7f 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -186,6 +186,7 @@ static int connected(struct usb_dev_state *ps) static void dec_usb_memory_use_count(struct usb_memory *usbm, int *count) { struct usb_dev_state *ps = usbm->ps; + struct usb_hcd *hcd = bus_to_hcd(ps->dev->bus); unsigned long flags; spin_lock_irqsave(&ps->lock, flags); @@ -194,8 +195,8 @@ static void dec_usb_memory_use_count(struct usb_memory *usbm, int *count) list_del(&usbm->memlist); spin_unlock_irqrestore(&ps->lock, flags); - usb_free_coherent(ps->dev, usbm->size, usbm->mem, - usbm->dma_handle); + hcd_buffer_free_pages(hcd, usbm->size, + usbm->mem, usbm->dma_handle); usbfs_decrease_memory_usage( usbm->size + sizeof(struct usb_memory)); kfree(usbm); @@ -234,7 +235,7 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma) size_t size = vma->vm_end - vma->vm_start; void *mem; unsigned long flags; - dma_addr_t dma_handle; + dma_addr_t dma_handle = DMA_MAPPING_ERROR; int ret; ret = usbfs_increase_memory_usage(size + sizeof(struct usb_memory)); @@ -247,8 +248,8 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma) goto error_decrease_mem; } - mem = usb_alloc_coherent(ps->dev, size, GFP_USER | __GFP_NOWARN, - &dma_handle); + mem = hcd_buffer_alloc_pages(hcd, + size, GFP_USER | __GFP_NOWARN, &dma_handle); if (!mem) { ret = -ENOMEM; goto error_free_usbm; @@ -264,7 +265,14 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma) usbm->vma_use_count = 1; INIT_LIST_HEAD(&usbm->memlist); - if (hcd->localmem_pool || !hcd_uses_dma(hcd)) { + /* + * In DMA-unavailable cases, hcd_buffer_alloc_pages allocates + * normal pages and assigns DMA_MAPPING_ERROR to dma_handle. Check + * whether we are in such cases, and then use remap_pfn_range (or + * dma_mmap_coherent) to map normal (or DMA) pages into the user + * space, respectively. + */ + if (dma_handle == DMA_MAPPING_ERROR) { if (remap_pfn_range(vma, vma->vm_start, virt_to_phys(usbm->mem) >> PAGE_SHIFT, size, vma->vm_page_prot) < 0) { diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 0beaab932e7db2122e05bf52ef84c0c4b538d434..d68958e151a78d70e7f823899ec9152ec6bf568b 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -1137,7 +1137,7 @@ static int dwc3_core_init(struct dwc3 *dwc) dwc3_set_incr_burst_type(dwc); - dwc3_phy_power_on(dwc); + ret = dwc3_phy_power_on(dwc); if (ret) goto err_exit_phy; @@ -1929,6 +1929,11 @@ static int dwc3_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); pm_runtime_dont_use_autosuspend(&pdev->dev); pm_runtime_put_noidle(&pdev->dev); + /* + * HACK: Clear the driver data, which is currently accessed by parent + * glue drivers, before allowing the parent to suspend. + */ + platform_set_drvdata(pdev, NULL); pm_runtime_set_suspended(&pdev->dev); dwc3_free_event_buffers(dwc); diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index d56457c02996b669c60a096533a506c324ad4a98..1f043c31a0969ffc427f62b56974162c0cf0651b 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -1116,6 +1116,7 @@ struct dwc3_scratchpad_array { * @dis_metastability_quirk: set to disable metastability quirk. * @dis_split_quirk: set to disable split boundary. * @wakeup_configured: set if the device is configured for remote wakeup. + * @suspended: set to track suspend event due to U3/L2. * @imod_interval: set the interrupt moderation interval in 250ns * increments or 0 to disable. * @max_cfg_eps: current max number of IN eps used across all USB configs. @@ -1332,6 +1333,7 @@ struct dwc3 { unsigned dis_split_quirk:1; unsigned async_callbacks:1; unsigned wakeup_configured:1; + unsigned suspended:1; u16 imod_interval; diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c index e4a2560b9dc0c670d9a5c775a73d17c340b5a992..ebf03468fac4a9f89cefa093c6ba346f0b1035dd 100644 --- a/drivers/usb/dwc3/debugfs.c +++ b/drivers/usb/dwc3/debugfs.c @@ -332,6 +332,11 @@ static int dwc3_lsp_show(struct seq_file *s, void *unused) unsigned int current_mode; unsigned long flags; u32 reg; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); reg = dwc3_readl(dwc->regs, DWC3_GSTS); @@ -350,6 +355,8 @@ static int dwc3_lsp_show(struct seq_file *s, void *unused) } spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -395,6 +402,11 @@ static int dwc3_mode_show(struct seq_file *s, void *unused) struct dwc3 *dwc = s->private; unsigned long flags; u32 reg; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); reg = dwc3_readl(dwc->regs, DWC3_GCTL); @@ -414,6 +426,8 @@ static int dwc3_mode_show(struct seq_file *s, void *unused) seq_printf(s, "UNKNOWN %08x\n", DWC3_GCTL_PRTCAP(reg)); } + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -463,6 +477,11 @@ static int dwc3_testmode_show(struct seq_file *s, void *unused) struct dwc3 *dwc = s->private; unsigned long flags; u32 reg; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); reg = dwc3_readl(dwc->regs, DWC3_DCTL); @@ -493,6 +512,8 @@ static int dwc3_testmode_show(struct seq_file *s, void *unused) seq_printf(s, "UNKNOWN %d\n", reg); } + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -509,6 +530,7 @@ static ssize_t dwc3_testmode_write(struct file *file, unsigned long flags; u32 testmode = 0; char buf[32]; + int ret; if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; @@ -526,10 +548,16 @@ static ssize_t dwc3_testmode_write(struct file *file, else testmode = 0; + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; + spin_lock_irqsave(&dwc->lock, flags); dwc3_gadget_set_test_mode(dwc, testmode); spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return count; } @@ -548,12 +576,18 @@ static int dwc3_link_state_show(struct seq_file *s, void *unused) enum dwc3_link_state state; u32 reg; u8 speed; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); reg = dwc3_readl(dwc->regs, DWC3_GSTS); if (DWC3_GSTS_CURMOD(reg) != DWC3_GSTS_CURMOD_DEVICE) { seq_puts(s, "Not available\n"); spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); return 0; } @@ -566,6 +600,8 @@ static int dwc3_link_state_show(struct seq_file *s, void *unused) dwc3_gadget_hs_link_string(state)); spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -584,6 +620,7 @@ static ssize_t dwc3_link_state_write(struct file *file, char buf[32]; u32 reg; u8 speed; + int ret; if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; @@ -603,10 +640,15 @@ static ssize_t dwc3_link_state_write(struct file *file, else return -EINVAL; + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; + spin_lock_irqsave(&dwc->lock, flags); reg = dwc3_readl(dwc->regs, DWC3_GSTS); if (DWC3_GSTS_CURMOD(reg) != DWC3_GSTS_CURMOD_DEVICE) { spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); return -EINVAL; } @@ -616,12 +658,15 @@ static ssize_t dwc3_link_state_write(struct file *file, if (speed < DWC3_DSTS_SUPERSPEED && state != DWC3_LINK_STATE_RECOV) { spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); return -EINVAL; } dwc3_gadget_set_link_state(dwc, state); spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return count; } @@ -645,6 +690,11 @@ static int dwc3_tx_fifo_size_show(struct seq_file *s, void *unused) unsigned long flags; u32 mdwidth; u32 val; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); val = dwc3_core_fifo_space(dep, DWC3_TXFIFO); @@ -657,6 +707,8 @@ static int dwc3_tx_fifo_size_show(struct seq_file *s, void *unused) seq_printf(s, "%u\n", val); spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -667,6 +719,11 @@ static int dwc3_rx_fifo_size_show(struct seq_file *s, void *unused) unsigned long flags; u32 mdwidth; u32 val; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); val = dwc3_core_fifo_space(dep, DWC3_RXFIFO); @@ -679,6 +736,8 @@ static int dwc3_rx_fifo_size_show(struct seq_file *s, void *unused) seq_printf(s, "%u\n", val); spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -688,12 +747,19 @@ static int dwc3_tx_request_queue_show(struct seq_file *s, void *unused) struct dwc3 *dwc = dep->dwc; unsigned long flags; u32 val; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); val = dwc3_core_fifo_space(dep, DWC3_TXREQQ); seq_printf(s, "%u\n", val); spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -703,12 +769,19 @@ static int dwc3_rx_request_queue_show(struct seq_file *s, void *unused) struct dwc3 *dwc = dep->dwc; unsigned long flags; u32 val; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); val = dwc3_core_fifo_space(dep, DWC3_RXREQQ); seq_printf(s, "%u\n", val); spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -718,12 +791,19 @@ static int dwc3_rx_info_queue_show(struct seq_file *s, void *unused) struct dwc3 *dwc = dep->dwc; unsigned long flags; u32 val; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); val = dwc3_core_fifo_space(dep, DWC3_RXINFOQ); seq_printf(s, "%u\n", val); spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -733,12 +813,19 @@ static int dwc3_descriptor_fetch_queue_show(struct seq_file *s, void *unused) struct dwc3 *dwc = dep->dwc; unsigned long flags; u32 val; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); val = dwc3_core_fifo_space(dep, DWC3_DESCFETCHQ); seq_printf(s, "%u\n", val); spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -748,12 +835,19 @@ static int dwc3_event_queue_show(struct seq_file *s, void *unused) struct dwc3 *dwc = dep->dwc; unsigned long flags; u32 val; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); val = dwc3_core_fifo_space(dep, DWC3_EVENTQ); seq_printf(s, "%u\n", val); spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -798,6 +892,11 @@ static int dwc3_trb_ring_show(struct seq_file *s, void *unused) struct dwc3 *dwc = dep->dwc; unsigned long flags; int i; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); if (dep->number <= 1) { @@ -827,6 +926,8 @@ static int dwc3_trb_ring_show(struct seq_file *s, void *unused) out: spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -839,6 +940,11 @@ static int dwc3_ep_info_register_show(struct seq_file *s, void *unused) u32 lower_32_bits; u32 upper_32_bits; u32 reg; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); reg = DWC3_GDBGLSPMUX_EPSELECT(dep->number); @@ -851,6 +957,8 @@ static int dwc3_ep_info_register_show(struct seq_file *s, void *unused) seq_printf(s, "0x%016llx\n", ep_info); spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -910,6 +1018,7 @@ void dwc3_debugfs_init(struct dwc3 *dwc) dwc->regset->regs = dwc3_regs; dwc->regset->nregs = ARRAY_SIZE(dwc3_regs); dwc->regset->base = dwc->regs - DWC3_GLOBALS_REGS_START; + dwc->regset->dev = dwc->dev; root = debugfs_create_dir(dev_name(dwc->dev), usb_debug_root); dwc->debug_root = root; diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c index 959fc925ca7c52316b4df8f23157fd4cedc02257..79b22abf97276fee67f594837d028262e7ff5729 100644 --- a/drivers/usb/dwc3/dwc3-qcom.c +++ b/drivers/usb/dwc3/dwc3-qcom.c @@ -308,7 +308,16 @@ static void dwc3_qcom_interconnect_exit(struct dwc3_qcom *qcom) /* Only usable in contexts where the role can not change. */ static bool dwc3_qcom_is_host(struct dwc3_qcom *qcom) { - struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3); + struct dwc3 *dwc; + + /* + * FIXME: Fix this layering violation. + */ + dwc = platform_get_drvdata(qcom->dwc3); + + /* Core driver may not have probed yet. */ + if (!dwc) + return false; return dwc->xhci; } diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index c0ca4d12f95d96b189128456d1b7c33455c02723..b78599dd705c28a9bf38943a649d710ec5226709 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -198,6 +198,7 @@ static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep, list_del(&req->list); req->remaining = 0; req->needs_extra_trb = false; + req->num_trbs = 0; if (req->request.status == -EINPROGRESS) req->request.status = status; @@ -2440,6 +2441,7 @@ static int dwc3_gadget_func_wakeup(struct usb_gadget *g, int intf_id) return -EINVAL; } dwc3_resume_gadget(dwc); + dwc->suspended = false; dwc->link_state = DWC3_LINK_STATE_U0; } @@ -2699,6 +2701,21 @@ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc) return ret; } +static int dwc3_gadget_soft_connect(struct dwc3 *dwc) +{ + /* + * In the Synopsys DWC_usb31 1.90a programming guide section + * 4.1.9, it specifies that for a reconnect after a + * device-initiated disconnect requires a core soft reset + * (DCTL.CSftRst) before enabling the run/stop bit. + */ + dwc3_core_soft_reset(dwc); + + dwc3_event_buffers_setup(dwc); + __dwc3_gadget_start(dwc); + return dwc3_gadget_run_stop(dwc, true); +} + static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) { struct dwc3 *dwc = gadget_to_dwc(g); @@ -2737,21 +2754,10 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) synchronize_irq(dwc->irq_gadget); - if (!is_on) { + if (!is_on) ret = dwc3_gadget_soft_disconnect(dwc); - } else { - /* - * In the Synopsys DWC_usb31 1.90a programming guide section - * 4.1.9, it specifies that for a reconnect after a - * device-initiated disconnect requires a core soft reset - * (DCTL.CSftRst) before enabling the run/stop bit. - */ - dwc3_core_soft_reset(dwc); - - dwc3_event_buffers_setup(dwc); - __dwc3_gadget_start(dwc); - ret = dwc3_gadget_run_stop(dwc, true); - } + else + ret = dwc3_gadget_soft_connect(dwc); pm_runtime_put(dwc->dev); @@ -3938,6 +3944,8 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc) { int reg; + dwc->suspended = false; + dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RX_DET); reg = dwc3_readl(dwc->regs, DWC3_DCTL); @@ -3962,6 +3970,8 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) { u32 reg; + dwc->suspended = false; + /* * Ideally, dwc3_reset_gadget() would trigger the function * drivers to stop any active transfers through ep disable. @@ -4180,6 +4190,8 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc, unsigned int evtinfo) { + dwc->suspended = false; + /* * TODO take core out of low power mode when that's * implemented. @@ -4277,6 +4289,7 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, if (dwc->gadget->wakeup_armed) { dwc3_gadget_enable_linksts_evts(dwc, false); dwc3_resume_gadget(dwc); + dwc->suspended = false; } break; case DWC3_LINK_STATE_U1: @@ -4303,8 +4316,10 @@ static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc, { enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; - if (dwc->link_state != next && next == DWC3_LINK_STATE_U3) + if (!dwc->suspended && next == DWC3_LINK_STATE_U3) { + dwc->suspended = true; dwc3_suspend_gadget(dwc); + } dwc->link_state = next; } @@ -4655,42 +4670,39 @@ void dwc3_gadget_exit(struct dwc3 *dwc) int dwc3_gadget_suspend(struct dwc3 *dwc) { unsigned long flags; + int ret; if (!dwc->gadget_driver) return 0; - dwc3_gadget_run_stop(dwc, false); + ret = dwc3_gadget_soft_disconnect(dwc); + if (ret) + goto err; spin_lock_irqsave(&dwc->lock, flags); dwc3_disconnect_gadget(dwc); - __dwc3_gadget_stop(dwc); spin_unlock_irqrestore(&dwc->lock, flags); return 0; + +err: + /* + * Attempt to reset the controller's state. Likely no + * communication can be established until the host + * performs a port reset. + */ + if (dwc->softconnect) + dwc3_gadget_soft_connect(dwc); + + return ret; } int dwc3_gadget_resume(struct dwc3 *dwc) { - int ret; - if (!dwc->gadget_driver || !dwc->softconnect) return 0; - ret = __dwc3_gadget_start(dwc); - if (ret < 0) - goto err0; - - ret = dwc3_gadget_run_stop(dwc, true); - if (ret < 0) - goto err1; - - return 0; - -err1: - __dwc3_gadget_stop(dwc); - -err0: - return ret; + return dwc3_gadget_soft_connect(dwc); } void dwc3_gadget_process_pending_events(struct dwc3 *dwc) diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index a13c946e0663c2c41736bc04f0dcfae44895a93b..f41a385a5c421adbdeee11f1f010fb3a885a0b30 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -3535,6 +3535,7 @@ static void ffs_func_unbind(struct usb_configuration *c, /* Drain any pending AIO completions */ drain_workqueue(ffs->io_completion_wq); + ffs_event_add(ffs, FUNCTIONFS_UNBIND); if (!--opts->refcnt) functionfs_unbind(ffs); @@ -3559,7 +3560,6 @@ static void ffs_func_unbind(struct usb_configuration *c, func->function.ssp_descriptors = NULL; func->interfaces_nums = NULL; - ffs_event_add(ffs, FUNCTIONFS_UNBIND); } static struct usb_function *ffs_alloc(struct usb_function_instance *fi) diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c index 6956ad8ba8dd86e5c1a3fd6e10bc25adc8a734f2..a366abb456236c7033b6fbd1c7cfb9b4dcc1a7cf 100644 --- a/drivers/usb/gadget/function/u_ether.c +++ b/drivers/usb/gadget/function/u_ether.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include "u_ether.h" @@ -965,6 +966,8 @@ int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len) dev = netdev_priv(net); snprintf(host_addr, len, "%pm", dev->host_mac); + string_upper(host_addr, host_addr); + return strlen(host_addr); } EXPORT_SYMBOL_GPL(gether_get_host_addr_cdc); diff --git a/drivers/usb/gadget/udc/amd5536udc_pci.c b/drivers/usb/gadget/udc/amd5536udc_pci.c index c80f9bd51b7501eef578fde52dae2d40dc712ca6..a36913ae31f9ef56f5814e61bc01d6da1769f8a4 100644 --- a/drivers/usb/gadget/udc/amd5536udc_pci.c +++ b/drivers/usb/gadget/udc/amd5536udc_pci.c @@ -170,6 +170,9 @@ static int udc_pci_probe( retval = -ENODEV; goto err_probe; } + + udc = dev; + return 0; err_probe: diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index 4641153e9706b161889ec3836d5807ec71552309..83fd1de14784f29ca78be8ea9fb9eb4a7cc6ab06 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -37,10 +37,14 @@ static const struct bus_type gadget_bus_type; * @vbus: for udcs who care about vbus status, this value is real vbus status; * for udcs who do not care about vbus status, this value is always true * @started: the UDC's started state. True if the UDC had started. - * @connect_lock: protects udc->vbus, udc->started, gadget->connect, gadget->deactivate related - * functions. usb_gadget_connect_locked, usb_gadget_disconnect_locked, - * usb_udc_connect_control_locked, usb_gadget_udc_start_locked, usb_gadget_udc_stop_locked are - * called with this lock held. + * @allow_connect: Indicates whether UDC is allowed to be pulled up. + * Set/cleared by gadget_(un)bind_driver() after gadget driver is bound or + * unbound. + * @connect_lock: protects udc->started, gadget->connect, + * gadget->allow_connect and gadget->deactivate. The routines + * usb_gadget_connect_locked(), usb_gadget_disconnect_locked(), + * usb_udc_connect_control_locked(), usb_gadget_udc_start_locked() and + * usb_gadget_udc_stop_locked() are called with this lock held. * * This represents the internal data structure which is used by the UDC-class * to hold information about udc driver and gadget together. @@ -52,6 +56,8 @@ struct usb_udc { struct list_head list; bool vbus; bool started; + bool allow_connect; + struct work_struct vbus_work; struct mutex connect_lock; }; @@ -692,7 +698,6 @@ out: } EXPORT_SYMBOL_GPL(usb_gadget_vbus_disconnect); -/* Internal version of usb_gadget_connect needs to be called with connect_lock held. */ static int usb_gadget_connect_locked(struct usb_gadget *gadget) __must_hold(&gadget->udc->connect_lock) { @@ -703,15 +708,12 @@ static int usb_gadget_connect_locked(struct usb_gadget *gadget) goto out; } - if (gadget->connected) - goto out; - - if (gadget->deactivated || !gadget->udc->started) { + if (gadget->deactivated || !gadget->udc->allow_connect || !gadget->udc->started) { /* - * If gadget is deactivated we only save new state. - * Gadget will be connected automatically after activation. - * - * udc first needs to be started before gadget can be pulled up. + * If the gadget isn't usable (because it is deactivated, + * unbound, or not yet started), we only save the new state. + * The gadget will be connected automatically when it is + * activated/bound/started. */ gadget->connected = true; goto out; @@ -749,7 +751,6 @@ int usb_gadget_connect(struct usb_gadget *gadget) } EXPORT_SYMBOL_GPL(usb_gadget_connect); -/* Internal version of usb_gadget_disconnect needs to be called with connect_lock held. */ static int usb_gadget_disconnect_locked(struct usb_gadget *gadget) __must_hold(&gadget->udc->connect_lock) { @@ -767,8 +768,6 @@ static int usb_gadget_disconnect_locked(struct usb_gadget *gadget) /* * If gadget is deactivated we only save new state. * Gadget will stay disconnected after activation. - * - * udc should have been started before gadget being pulled down. */ gadget->connected = false; goto out; @@ -829,10 +828,10 @@ int usb_gadget_deactivate(struct usb_gadget *gadget) { int ret = 0; + mutex_lock(&gadget->udc->connect_lock); if (gadget->deactivated) - goto out; + goto unlock; - mutex_lock(&gadget->udc->connect_lock); if (gadget->connected) { ret = usb_gadget_disconnect_locked(gadget); if (ret) @@ -848,7 +847,6 @@ int usb_gadget_deactivate(struct usb_gadget *gadget) unlock: mutex_unlock(&gadget->udc->connect_lock); -out: trace_usb_gadget_deactivate(gadget, ret); return ret; @@ -868,10 +866,10 @@ int usb_gadget_activate(struct usb_gadget *gadget) { int ret = 0; + mutex_lock(&gadget->udc->connect_lock); if (!gadget->deactivated) - goto out; + goto unlock; - mutex_lock(&gadget->udc->connect_lock); gadget->deactivated = false; /* @@ -882,7 +880,8 @@ int usb_gadget_activate(struct usb_gadget *gadget) ret = usb_gadget_connect_locked(gadget); mutex_unlock(&gadget->udc->connect_lock); -out: +unlock: + mutex_unlock(&gadget->udc->connect_lock); trace_usb_gadget_activate(gadget, ret); return ret; @@ -1124,12 +1123,21 @@ EXPORT_SYMBOL_GPL(usb_gadget_set_state); /* Acquire connect_lock before calling this function. */ static void usb_udc_connect_control_locked(struct usb_udc *udc) __must_hold(&udc->connect_lock) { - if (udc->vbus && udc->started) + if (udc->vbus) usb_gadget_connect_locked(udc->gadget); else usb_gadget_disconnect_locked(udc->gadget); } +static void vbus_event_work(struct work_struct *work) +{ + struct usb_udc *udc = container_of(work, struct usb_udc, vbus_work); + + mutex_lock(&udc->connect_lock); + usb_udc_connect_control_locked(udc); + mutex_unlock(&udc->connect_lock); +} + /** * usb_udc_vbus_handler - updates the udc core vbus status, and try to * connect or disconnect gadget @@ -1138,17 +1146,23 @@ static void usb_udc_connect_control_locked(struct usb_udc *udc) __must_hold(&udc * * The udc driver calls it when it wants to connect or disconnect gadget * according to vbus status. + * + * This function can be invoked from interrupt context by irq handlers of + * the gadget drivers, however, usb_udc_connect_control() has to run in + * non-atomic context due to the following: + * a. Some of the gadget driver implementations expect the ->pullup + * callback to be invoked in non-atomic context. + * b. usb_gadget_disconnect() acquires udc_lock which is a mutex. + * Hence offload invocation of usb_udc_connect_control() to workqueue. */ void usb_udc_vbus_handler(struct usb_gadget *gadget, bool status) { struct usb_udc *udc = gadget->udc; - mutex_lock(&udc->connect_lock); if (udc) { udc->vbus = status; - usb_udc_connect_control_locked(udc); + schedule_work(&udc->vbus_work); } - mutex_unlock(&udc->connect_lock); } EXPORT_SYMBOL_GPL(usb_udc_vbus_handler); @@ -1381,6 +1395,7 @@ int usb_add_gadget(struct usb_gadget *gadget) mutex_lock(&udc_lock); list_add_tail(&udc->list, &udc_list); mutex_unlock(&udc_lock); + INIT_WORK(&udc->vbus_work, vbus_event_work); ret = device_add(&udc->dev); if (ret) @@ -1512,6 +1527,7 @@ void usb_del_gadget(struct usb_gadget *gadget) flush_work(&gadget->work); device_del(&gadget->dev); ida_free(&gadget_id_numbers, gadget->id_number); + cancel_work_sync(&udc->vbus_work); device_unregister(&udc->dev); } EXPORT_SYMBOL_GPL(usb_del_gadget); @@ -1583,6 +1599,7 @@ static int gadget_bind_driver(struct device *dev) goto err_start; } usb_gadget_enable_async_callbacks(udc); + udc->allow_connect = true; usb_udc_connect_control_locked(udc); mutex_unlock(&udc->connect_lock); @@ -1615,6 +1632,8 @@ static void gadget_unbind_driver(struct device *dev) kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE); + udc->allow_connect = false; + cancel_work_sync(&udc->vbus_work); mutex_lock(&udc->connect_lock); usb_gadget_disconnect_locked(gadget); usb_gadget_disable_async_callbacks(udc); diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index aac8bc185afa906eb0fa9891a59675478ec9b2e9..eb008e873361fcaaf5f7c5e8f3571e041bfe8ac0 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c @@ -2877,9 +2877,9 @@ static int renesas_usb3_probe(struct platform_device *pdev) struct rzv2m_usb3drd *ddata = dev_get_drvdata(pdev->dev.parent); usb3->drd_reg = ddata->reg; - ret = devm_request_irq(ddata->dev, ddata->drd_irq, + ret = devm_request_irq(&pdev->dev, ddata->drd_irq, renesas_usb3_otg_irq, 0, - dev_name(ddata->dev), usb3); + dev_name(&pdev->dev), usb3); if (ret < 0) return ret; } diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c index 3592f757fe05ddfbce622b7bf91b93cc76b30ce2..7bd2fddde770ae4a895dc2988bc1fda0e8d56a55 100644 --- a/drivers/usb/host/uhci-pci.c +++ b/drivers/usb/host/uhci-pci.c @@ -119,11 +119,13 @@ static int uhci_pci_init(struct usb_hcd *hcd) uhci->rh_numports = uhci_count_ports(hcd); - /* Intel controllers report the OverCurrent bit active on. - * VIA controllers report it active off, so we'll adjust the - * bit value. (It's not standardized in the UHCI spec.) + /* + * Intel controllers report the OverCurrent bit active on. VIA + * and ZHAOXIN controllers report it active off, so we'll adjust + * the bit value. (It's not standardized in the UHCI spec.) */ - if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_VIA) + if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_VIA || + to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_ZHAOXIN) uhci->oc_low = 1; /* HP's server management chip requires a longer port reset delay. */ diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index ddb79f23fb3b7ce0a54b93f388e46db6508aecff..79b3691f373f34e8f4d325b1e9ae57949c23a54d 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "xhci.h" #include "xhci-trace.h" @@ -387,7 +388,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == PCI_DEVICE_ID_AMD_RENOIR_XHCI) - xhci->quirks |= XHCI_BROKEN_D3COLD; + xhci->quirks |= XHCI_BROKEN_D3COLD_S2I; if (pdev->vendor == PCI_VENDOR_ID_INTEL) { xhci->quirks |= XHCI_LPM_SUPPORT; @@ -801,9 +802,16 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) * Systems with the TI redriver that loses port status change events * need to have the registers polled during D3, so avoid D3cold. */ - if (xhci->quirks & (XHCI_COMP_MODE_QUIRK | XHCI_BROKEN_D3COLD)) + if (xhci->quirks & XHCI_COMP_MODE_QUIRK) pci_d3cold_disable(pdev); +#ifdef CONFIG_SUSPEND + /* d3cold is broken, but only when s2idle is used */ + if (pm_suspend_target_state == PM_SUSPEND_TO_IDLE && + xhci->quirks & (XHCI_BROKEN_D3COLD_S2I)) + pci_d3cold_disable(pdev); +#endif + if (xhci->quirks & XHCI_PME_STUCK_QUIRK) xhci_pme_quirk(hcd); diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 1ad12d5a48572b15e3d877df43dd80cf480ccc00..2bc82b3a2f984c33e5d32b23336dce57acdf5a24 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -276,6 +276,26 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, trace_xhci_inc_enq(ring); } +static int xhci_num_trbs_to(struct xhci_segment *start_seg, union xhci_trb *start, + struct xhci_segment *end_seg, union xhci_trb *end, + unsigned int num_segs) +{ + union xhci_trb *last_on_seg; + int num = 0; + int i = 0; + + do { + if (start_seg == end_seg && end >= start) + return num + (end - start); + last_on_seg = &start_seg->trbs[TRBS_PER_SEGMENT - 1]; + num += last_on_seg - start; + start_seg = start_seg->next; + start = start_seg->trbs; + } while (i++ <= num_segs); + + return -EINVAL; +} + /* * Check to see if there's room to enqueue num_trbs on the ring and make sure * enqueue pointer will not advance into dequeue segment. See rules above. @@ -2140,6 +2160,7 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, u32 trb_comp_code) { struct xhci_ep_ctx *ep_ctx; + int trbs_freed; ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index); @@ -2209,9 +2230,15 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, } /* Update ring dequeue pointer */ + trbs_freed = xhci_num_trbs_to(ep_ring->deq_seg, ep_ring->dequeue, + td->last_trb_seg, td->last_trb, + ep_ring->num_segs); + if (trbs_freed < 0) + xhci_dbg(xhci, "Failed to count freed trbs at TD finish\n"); + else + ep_ring->num_trbs_free += trbs_freed; ep_ring->dequeue = td->last_trb; ep_ring->deq_seg = td->last_trb_seg; - ep_ring->num_trbs_free += td->num_trbs - 1; inc_deq(xhci, ep_ring); return xhci_td_cleanup(xhci, td, ep_ring, td->status); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 08d721921b7bb22606d82cea5b4736208beb0a9c..6b690ec91ff3ae1044905657dd1f2f0f0f749d80 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1901,7 +1901,7 @@ struct xhci_hcd { #define XHCI_DISABLE_SPARSE BIT_ULL(38) #define XHCI_SG_TRB_CACHE_SIZE_QUIRK BIT_ULL(39) #define XHCI_NO_SOFT_RETRY BIT_ULL(40) -#define XHCI_BROKEN_D3COLD BIT_ULL(41) +#define XHCI_BROKEN_D3COLD_S2I BIT_ULL(41) #define XHCI_EP_CTX_BROKEN_DCS BIT_ULL(42) #define XHCI_SUSPEND_RESUME_CLKS BIT_ULL(43) #define XHCI_RESET_TO_DEFAULT BIT_ULL(44) diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 644a55447fd7f2fe88c957d8003f2e482914edda..fd42e3a0bd1879a2cb9de7fc11d42666b2166beb 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -248,6 +248,8 @@ static void option_instat_callback(struct urb *urb); #define QUECTEL_VENDOR_ID 0x2c7c /* These Quectel products use Quectel's vendor ID */ #define QUECTEL_PRODUCT_EC21 0x0121 +#define QUECTEL_PRODUCT_EM061K_LTA 0x0123 +#define QUECTEL_PRODUCT_EM061K_LMS 0x0124 #define QUECTEL_PRODUCT_EC25 0x0125 #define QUECTEL_PRODUCT_EG91 0x0191 #define QUECTEL_PRODUCT_EG95 0x0195 @@ -266,6 +268,8 @@ static void option_instat_callback(struct urb *urb); #define QUECTEL_PRODUCT_RM520N 0x0801 #define QUECTEL_PRODUCT_EC200U 0x0901 #define QUECTEL_PRODUCT_EC200S_CN 0x6002 +#define QUECTEL_PRODUCT_EM061K_LWW 0x6008 +#define QUECTEL_PRODUCT_EM061K_LCN 0x6009 #define QUECTEL_PRODUCT_EC200T 0x6026 #define QUECTEL_PRODUCT_RM500K 0x7001 @@ -1189,6 +1193,18 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0x00, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x30) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0x00, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LMS, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LMS, 0xff, 0x00, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LMS, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LTA, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LTA, 0xff, 0x00, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LTA, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LWW, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LWW, 0xff, 0x00, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LWW, 0xff, 0xff, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff), .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) }, diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index 8931df5a85fd9e49de0e8b8f399771405168ed06..c54e9805da536a0ec139ad789017b79131c88561 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c @@ -406,22 +406,25 @@ static DEF_SCSI_QCMD(queuecommand) ***********************************************************************/ /* Command timeout and abort */ -static int command_abort(struct scsi_cmnd *srb) +static int command_abort_matching(struct us_data *us, struct scsi_cmnd *srb_match) { - struct us_data *us = host_to_us(srb->device->host); - - usb_stor_dbg(us, "%s called\n", __func__); - /* * us->srb together with the TIMED_OUT, RESETTING, and ABORTING * bits are protected by the host lock. */ scsi_lock(us_to_host(us)); - /* Is this command still active? */ - if (us->srb != srb) { + /* is there any active pending command to abort ? */ + if (!us->srb) { scsi_unlock(us_to_host(us)); usb_stor_dbg(us, "-- nothing to abort\n"); + return SUCCESS; + } + + /* Does the command match the passed srb if any ? */ + if (srb_match && us->srb != srb_match) { + scsi_unlock(us_to_host(us)); + usb_stor_dbg(us, "-- pending command mismatch\n"); return FAILED; } @@ -444,6 +447,14 @@ static int command_abort(struct scsi_cmnd *srb) return SUCCESS; } +static int command_abort(struct scsi_cmnd *srb) +{ + struct us_data *us = host_to_us(srb->device->host); + + usb_stor_dbg(us, "%s called\n", __func__); + return command_abort_matching(us, srb); +} + /* * This invokes the transport reset mechanism to reset the state of the * device @@ -455,6 +466,9 @@ static int device_reset(struct scsi_cmnd *srb) usb_stor_dbg(us, "%s called\n", __func__); + /* abort any pending command before reset */ + command_abort_matching(us, NULL); + /* lock the device pointers and do the reset */ mutex_lock(&(us->dev_mutex)); result = us->transport_reset(us); diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c index 8f3e884222adef512a424e16623ad3cad8565143..66de880b28d01a60d592a490d820aa78f87a9aa0 100644 --- a/drivers/usb/typec/altmodes/displayport.c +++ b/drivers/usb/typec/altmodes/displayport.c @@ -516,6 +516,10 @@ static ssize_t pin_assignment_show(struct device *dev, mutex_unlock(&dp->lock); + /* get_current_pin_assignments can return 0 when no matching pin assignments are found */ + if (len == 0) + len++; + buf[len - 1] = '\n'; return len; } diff --git a/drivers/usb/typec/pd.c b/drivers/usb/typec/pd.c index 0bcde1ff4d39b9a212f061448035e53819924fcb..8cc66e4467c48f3f6175f612092c661b484a44e0 100644 --- a/drivers/usb/typec/pd.c +++ b/drivers/usb/typec/pd.c @@ -95,7 +95,7 @@ peak_current_show(struct device *dev, struct device_attribute *attr, char *buf) static ssize_t fast_role_swap_current_show(struct device *dev, struct device_attribute *attr, char *buf) { - return sysfs_emit(buf, "%u\n", to_pdo(dev)->pdo >> PDO_FIXED_FRS_CURR_SHIFT) & 3; + return sysfs_emit(buf, "%u\n", (to_pdo(dev)->pdo >> PDO_FIXED_FRS_CURR_SHIFT) & 3); } static DEVICE_ATTR_RO(fast_role_swap_current); diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c index 8b075ca82ef6de80b4b6f31fa316b675141705c3..603dbd44deba9aaab55ad671526a6bf14befd339 100644 --- a/drivers/usb/typec/tipd/core.c +++ b/drivers/usb/typec/tipd/core.c @@ -886,6 +886,9 @@ static void tps6598x_remove(struct i2c_client *client) { struct tps6598x *tps = i2c_get_clientdata(client); + if (!client->irq) + cancel_delayed_work_sync(&tps->wq_poll); + tps6598x_disconnect(tps, 0); typec_unregister_port(tps->port); usb_role_switch_put(tps->role_sw); @@ -917,7 +920,7 @@ static int __maybe_unused tps6598x_resume(struct device *dev) enable_irq(client->irq); } - if (client->irq) + if (!client->irq) queue_delayed_work(system_power_efficient_wq, &tps->wq_poll, msecs_to_jiffies(POLL_INTERVAL)); diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index 2b472ec01dc42e84c7c355c6eae4073428b9a2ac..b664ecbb798be6012d988e15d7622c6071f20fe8 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -132,10 +132,8 @@ static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd) if (ret) return ret; - if (cci & UCSI_CCI_BUSY) { - ucsi->ops->async_write(ucsi, UCSI_CANCEL, NULL, 0); - return -EBUSY; - } + if (cmd != UCSI_CANCEL && cci & UCSI_CCI_BUSY) + return ucsi_exec_command(ucsi, UCSI_CANCEL); if (!(cci & UCSI_CCI_COMMAND_COMPLETE)) return -EIO; @@ -149,6 +147,11 @@ static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd) return ucsi_read_error(ucsi); } + if (cmd == UCSI_CANCEL && cci & UCSI_CCI_CANCEL_COMPLETE) { + ret = ucsi_acknowledge_command(ucsi); + return ret ? ret : -EBUSY; + } + return UCSI_CCI_LENGTH(cci); } diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index e29e32b306adc683e9e9e7414244d709f71f2b12..279ac6a558d29a7124204c9229fe13f7ee4eafd3 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -3349,10 +3349,10 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device * mlx5_vdpa_remove_debugfs(ndev->debugfs); ndev->debugfs = NULL; unregister_link_notifier(ndev); + _vdpa_unregister_device(dev); wq = mvdev->wq; mvdev->wq = NULL; destroy_workqueue(wq); - _vdpa_unregister_device(dev); mgtdev->ndev = NULL; } diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c index de97e38c3b82fea80204d4be6ccfbd1b08cf4063..5f5c21674fdce5050fa1bc0c672b3b3606871da9 100644 --- a/drivers/vdpa/vdpa_user/vduse_dev.c +++ b/drivers/vdpa/vdpa_user/vduse_dev.c @@ -1685,6 +1685,9 @@ static bool vduse_validate_config(struct vduse_dev_config *config) if (config->vq_num > 0xffff) return false; + if (!config->name[0]) + return false; + if (!device_is_allowed(config->device_id)) return false; diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 3d4dd9420c30701453be9323a0e3e46bfbf58755..0d2f805468e190c37b5e79527f864d13a42d311e 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -860,6 +860,11 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data, if (ret) goto pin_unwind; + if (!pfn_valid(phys_pfn)) { + ret = -EINVAL; + goto pin_unwind; + } + ret = vfio_add_to_pfn_list(dma, iova, phys_pfn); if (ret) { if (put_pfn(phys_pfn, dma->prot) && do_accounting) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 07181cd8d52e638126cf6583eeba3bbfe17ffdb0..ae2273196b0c902c472b3f5cf45249cb6cae37bf 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -935,13 +935,18 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock) err = sock->ops->sendmsg(sock, &msg, len); if (unlikely(err < 0)) { + bool retry = err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS; + if (zcopy_used) { if (vq->heads[ubuf->desc].len == VHOST_DMA_IN_PROGRESS) vhost_net_ubuf_put(ubufs); - nvq->upend_idx = ((unsigned)nvq->upend_idx - 1) - % UIO_MAXIOV; + if (retry) + nvq->upend_idx = ((unsigned)nvq->upend_idx - 1) + % UIO_MAXIOV; + else + vq->heads[ubuf->desc].len = VHOST_DMA_DONE_LEN; } - if (err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS) { + if (retry) { vhost_discard_vq_desc(vq, 1); vhost_net_enable_vq(net, vq); break; diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 8c1aefc865f0854ca7c07873a2c757c1688d30f0..bf77924d5b602389fd79b9faadd8409cb1422943 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -407,7 +407,10 @@ static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep) { struct vdpa_device *vdpa = v->vdpa; const struct vdpa_config_ops *ops = vdpa->config; + struct vhost_dev *d = &v->vdev; + u64 actual_features; u64 features; + int i; /* * It's not allowed to change the features after they have @@ -422,6 +425,16 @@ static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep) if (vdpa_set_features(vdpa, features)) return -EINVAL; + /* let the vqs know what has been configured */ + actual_features = ops->get_driver_features(vdpa); + for (i = 0; i < d->nvqs; ++i) { + struct vhost_virtqueue *vq = d->vqs[i]; + + mutex_lock(&vq->mutex); + vq->acked_features = actual_features; + mutex_unlock(&vq->mutex); + } + return 0; } @@ -594,7 +607,14 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, if (r) return r; - vq->last_avail_idx = vq_state.split.avail_index; + if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) { + vq->last_avail_idx = vq_state.packed.last_avail_idx | + (vq_state.packed.last_avail_counter << 15); + vq->last_used_idx = vq_state.packed.last_used_idx | + (vq_state.packed.last_used_counter << 15); + } else { + vq->last_avail_idx = vq_state.split.avail_index; + } break; } @@ -612,9 +632,15 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, break; case VHOST_SET_VRING_BASE: - vq_state.split.avail_index = vq->last_avail_idx; - if (ops->set_vq_state(vdpa, idx, &vq_state)) - r = -EINVAL; + if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) { + vq_state.packed.last_avail_idx = vq->last_avail_idx & 0x7fff; + vq_state.packed.last_avail_counter = !!(vq->last_avail_idx & 0x8000); + vq_state.packed.last_used_idx = vq->last_used_idx & 0x7fff; + vq_state.packed.last_used_counter = !!(vq->last_used_idx & 0x8000); + } else { + vq_state.split.avail_index = vq->last_avail_idx; + } + r = ops->set_vq_state(vdpa, idx, &vq_state); break; case VHOST_SET_VRING_CALL: diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index a92af08e786450640b2bb6fa848810dec402fcbb..60c9ebd629dd159d0ceb8a0c14f96091be193753 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -235,7 +235,7 @@ void vhost_dev_flush(struct vhost_dev *dev) { struct vhost_flush_struct flush; - if (dev->worker) { + if (dev->worker.vtsk) { init_completion(&flush.wait_event); vhost_work_init(&flush.work, vhost_flush_work); @@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vhost_dev_flush); void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work) { - if (!dev->worker) + if (!dev->worker.vtsk) return; if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) { @@ -255,8 +255,8 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work) * sure it was not in the list. * test_and_set_bit() implies a memory barrier. */ - llist_add(&work->node, &dev->worker->work_list); - wake_up_process(dev->worker->vtsk->task); + llist_add(&work->node, &dev->worker.work_list); + vhost_task_wake(dev->worker.vtsk); } } EXPORT_SYMBOL_GPL(vhost_work_queue); @@ -264,7 +264,7 @@ EXPORT_SYMBOL_GPL(vhost_work_queue); /* A lockless hint for busy polling code to exit the loop */ bool vhost_has_work(struct vhost_dev *dev) { - return dev->worker && !llist_empty(&dev->worker->work_list); + return !llist_empty(&dev->worker.work_list); } EXPORT_SYMBOL_GPL(vhost_has_work); @@ -333,31 +333,21 @@ static void vhost_vq_reset(struct vhost_dev *dev, __vhost_vq_meta_reset(vq); } -static int vhost_worker(void *data) +static bool vhost_worker(void *data) { struct vhost_worker *worker = data; struct vhost_work *work, *work_next; struct llist_node *node; - for (;;) { - /* mb paired w/ kthread_stop */ - set_current_state(TASK_INTERRUPTIBLE); - - if (vhost_task_should_stop(worker->vtsk)) { - __set_current_state(TASK_RUNNING); - break; - } - - node = llist_del_all(&worker->work_list); - if (!node) - schedule(); + node = llist_del_all(&worker->work_list); + if (node) { + __set_current_state(TASK_RUNNING); node = llist_reverse_order(node); /* make sure flag is seen after deletion */ smp_wmb(); llist_for_each_entry_safe(work, work_next, node, node) { clear_bit(VHOST_WORK_QUEUED, &work->flags); - __set_current_state(TASK_RUNNING); kcov_remote_start_common(worker->kcov_handle); work->fn(work); kcov_remote_stop(); @@ -365,7 +355,7 @@ static int vhost_worker(void *data) } } - return 0; + return !!node; } static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq) @@ -468,7 +458,8 @@ void vhost_dev_init(struct vhost_dev *dev, dev->umem = NULL; dev->iotlb = NULL; dev->mm = NULL; - dev->worker = NULL; + memset(&dev->worker, 0, sizeof(dev->worker)); + init_llist_head(&dev->worker.work_list); dev->iov_limit = iov_limit; dev->weight = weight; dev->byte_weight = byte_weight; @@ -542,47 +533,30 @@ static void vhost_detach_mm(struct vhost_dev *dev) static void vhost_worker_free(struct vhost_dev *dev) { - struct vhost_worker *worker = dev->worker; - - if (!worker) + if (!dev->worker.vtsk) return; - dev->worker = NULL; - WARN_ON(!llist_empty(&worker->work_list)); - vhost_task_stop(worker->vtsk); - kfree(worker); + WARN_ON(!llist_empty(&dev->worker.work_list)); + vhost_task_stop(dev->worker.vtsk); + dev->worker.kcov_handle = 0; + dev->worker.vtsk = NULL; } static int vhost_worker_create(struct vhost_dev *dev) { - struct vhost_worker *worker; struct vhost_task *vtsk; char name[TASK_COMM_LEN]; - int ret; - - worker = kzalloc(sizeof(*worker), GFP_KERNEL_ACCOUNT); - if (!worker) - return -ENOMEM; - dev->worker = worker; - worker->kcov_handle = kcov_common_handle(); - init_llist_head(&worker->work_list); snprintf(name, sizeof(name), "vhost-%d", current->pid); - vtsk = vhost_task_create(vhost_worker, worker, name); - if (!vtsk) { - ret = -ENOMEM; - goto free_worker; - } + vtsk = vhost_task_create(vhost_worker, &dev->worker, name); + if (!vtsk) + return -ENOMEM; - worker->vtsk = vtsk; + dev->worker.kcov_handle = kcov_common_handle(); + dev->worker.vtsk = vtsk; vhost_task_start(vtsk); return 0; - -free_worker: - kfree(worker); - dev->worker = NULL; - return ret; } /* Caller should have device mutex */ @@ -1626,17 +1600,25 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg r = -EFAULT; break; } - if (s.num > 0xffff) { - r = -EINVAL; - break; + if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) { + vq->last_avail_idx = s.num & 0xffff; + vq->last_used_idx = (s.num >> 16) & 0xffff; + } else { + if (s.num > 0xffff) { + r = -EINVAL; + break; + } + vq->last_avail_idx = s.num; } - vq->last_avail_idx = s.num; /* Forget the cached index value. */ vq->avail_idx = vq->last_avail_idx; break; case VHOST_GET_VRING_BASE: s.index = idx; - s.num = vq->last_avail_idx; + if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) + s.num = (u32)vq->last_avail_idx | ((u32)vq->last_used_idx << 16); + else + s.num = vq->last_avail_idx; if (copy_to_user(argp, &s, sizeof s)) r = -EFAULT; break; @@ -2575,12 +2557,11 @@ EXPORT_SYMBOL_GPL(vhost_disable_notify); /* Create a new message. */ struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type) { - struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL); + /* Make sure all padding within the structure is initialized. */ + struct vhost_msg_node *node = kzalloc(sizeof(*node), GFP_KERNEL); if (!node) return NULL; - /* Make sure all padding within the structure is initialized. */ - memset(&node->msg, 0, sizeof node->msg); node->vq = vq; node->msg.type = type; return node; diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 0308638cdeeebef46ca0ad98994eb8c6edd803f4..fc900be504b38e48d7c74f96e78d825d4a2a909e 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -92,13 +92,17 @@ struct vhost_virtqueue { /* The routine to call when the Guest pings us, or timeout. */ vhost_work_fn_t handle_kick; - /* Last available index we saw. */ + /* Last available index we saw. + * Values are limited to 0x7fff, and the high bit is used as + * a wrap counter when using VIRTIO_F_RING_PACKED. */ u16 last_avail_idx; /* Caches available index value from user. */ u16 avail_idx; - /* Last index we used. */ + /* Last index we used. + * Values are limited to 0x7fff, and the high bit is used as + * a wrap counter when using VIRTIO_F_RING_PACKED. */ u16 last_used_idx; /* Used flags */ @@ -154,7 +158,7 @@ struct vhost_dev { struct vhost_virtqueue **vqs; int nvqs; struct eventfd_ctx *log_ctx; - struct vhost_worker *worker; + struct vhost_worker worker; struct vhost_iotlb *umem; struct vhost_iotlb *iotlb; spinlock_t iotlb_lock; diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index 96e91570cdd3294041c79b25edb62df08331538c..0fdf5f46802c7f508e3025ce292bd1c3a620913c 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -124,7 +124,7 @@ config FB_PROVIDE_GET_FB_UNMAPPED_AREA depends on FB help Allow generic frame-buffer to provide get_fb_unmapped_area - function. + function to provide shareable character device support on nommu. menuconfig FB_FOREIGN_ENDIAN bool "Framebuffer foreign endianness support" diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c index 024d0ee4f04f93eae748e420a05c920f8db880c5..08d15e4084130ed2d020194137b62cb058dcf8d2 100644 --- a/drivers/video/fbdev/arcfb.c +++ b/drivers/video/fbdev/arcfb.c @@ -590,7 +590,7 @@ err_fb_alloc: return retval; } -static int arcfb_remove(struct platform_device *dev) +static void arcfb_remove(struct platform_device *dev) { struct fb_info *info = platform_get_drvdata(dev); @@ -601,12 +601,11 @@ static int arcfb_remove(struct platform_device *dev) vfree((void __force *)info->screen_base); framebuffer_release(info); } - return 0; } static struct platform_driver arcfb_driver = { .probe = arcfb_probe, - .remove = arcfb_remove, + .remove_new = arcfb_remove, .driver = { .name = "arcfb", }, diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c index b02e4e645035c44716740805bc3ffdb666d12c0d..cba2b113b28b0533418e017e8658138c97d21fd7 100644 --- a/drivers/video/fbdev/aty/atyfb_base.c +++ b/drivers/video/fbdev/aty/atyfb_base.c @@ -3498,11 +3498,6 @@ static int atyfb_setup_generic(struct pci_dev *pdev, struct fb_info *info, if (ret) goto atyfb_setup_generic_fail; #endif - if (!(aty_ld_le32(CRTC_GEN_CNTL, par) & CRTC_EXT_DISP_EN)) - par->clk_wr_offset = (inb(R_GENMO) & 0x0CU) >> 2; - else - par->clk_wr_offset = aty_ld_8(CLOCK_CNTL, par) & 0x03U; - /* according to ATI, we should use clock 3 for acelerated mode */ par->clk_wr_offset = 3; diff --git a/drivers/video/fbdev/au1100fb.c b/drivers/video/fbdev/au1100fb.c index 519313b8bb004fa245def79b0e74525a4e2edd42..648d6cac86e8fb1386c459d05852f6f603c00fd7 100644 --- a/drivers/video/fbdev/au1100fb.c +++ b/drivers/video/fbdev/au1100fb.c @@ -520,13 +520,10 @@ failed: return -ENODEV; } -int au1100fb_drv_remove(struct platform_device *dev) +void au1100fb_drv_remove(struct platform_device *dev) { struct au1100fb_device *fbdev = NULL; - if (!dev) - return -ENODEV; - fbdev = platform_get_drvdata(dev); #if !defined(CONFIG_FRAMEBUFFER_CONSOLE) && defined(CONFIG_LOGO) @@ -543,8 +540,6 @@ int au1100fb_drv_remove(struct platform_device *dev) clk_disable_unprepare(fbdev->lcdclk); clk_put(fbdev->lcdclk); } - - return 0; } #ifdef CONFIG_PM @@ -593,9 +588,9 @@ static struct platform_driver au1100fb_driver = { .name = "au1100-lcd", }, .probe = au1100fb_drv_probe, - .remove = au1100fb_drv_remove, + .remove_new = au1100fb_drv_remove, .suspend = au1100fb_drv_suspend, - .resume = au1100fb_drv_resume, + .resume = au1100fb_drv_resume, }; module_platform_driver(au1100fb_driver); diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c index b6b22fa4a8a01360095c3549dc63800f6dd0ddf9..aed88ce45bf091d6990c5242369f959c5b2bb2a0 100644 --- a/drivers/video/fbdev/au1200fb.c +++ b/drivers/video/fbdev/au1200fb.c @@ -1765,7 +1765,7 @@ failed: return ret; } -static int au1200fb_drv_remove(struct platform_device *dev) +static void au1200fb_drv_remove(struct platform_device *dev) { struct au1200fb_platdata *pd = platform_get_drvdata(dev); struct fb_info *fbi; @@ -1788,8 +1788,6 @@ static int au1200fb_drv_remove(struct platform_device *dev) } free_irq(platform_get_irq(dev, 0), (void *)dev); - - return 0; } #ifdef CONFIG_PM @@ -1840,7 +1838,7 @@ static struct platform_driver au1200fb_driver = { .pm = AU1200FB_PMOPS, }, .probe = au1200fb_drv_probe, - .remove = au1200fb_drv_remove, + .remove_new = au1200fb_drv_remove, }; module_platform_driver(au1200fb_driver); diff --git a/drivers/video/fbdev/broadsheetfb.c b/drivers/video/fbdev/broadsheetfb.c index 55e62dd96f9bef0ee3f6ff7915517feac9a6e6cf..b518cacbf7cd9edb2d8d51d290047d267a76e528 100644 --- a/drivers/video/fbdev/broadsheetfb.c +++ b/drivers/video/fbdev/broadsheetfb.c @@ -1193,7 +1193,7 @@ err: } -static int broadsheetfb_remove(struct platform_device *dev) +static void broadsheetfb_remove(struct platform_device *dev) { struct fb_info *info = platform_get_drvdata(dev); @@ -1209,12 +1209,11 @@ static int broadsheetfb_remove(struct platform_device *dev) module_put(par->board->owner); framebuffer_release(info); } - return 0; } static struct platform_driver broadsheetfb_driver = { .probe = broadsheetfb_probe, - .remove = broadsheetfb_remove, + .remove_new = broadsheetfb_remove, .driver = { .name = "broadsheetfb", }, diff --git a/drivers/video/fbdev/bw2.c b/drivers/video/fbdev/bw2.c index 9cbadcd18b256c4ff0e525f7d09867d377d1028f..025d663dc6fdcae85bc10e8e1813cd5305ca97e5 100644 --- a/drivers/video/fbdev/bw2.c +++ b/drivers/video/fbdev/bw2.c @@ -352,7 +352,7 @@ out_err: return err; } -static int bw2_remove(struct platform_device *op) +static void bw2_remove(struct platform_device *op) { struct fb_info *info = dev_get_drvdata(&op->dev); struct bw2_par *par = info->par; @@ -363,8 +363,6 @@ static int bw2_remove(struct platform_device *op) of_iounmap(&op->resource[0], info->screen_base, info->fix.smem_len); framebuffer_release(info); - - return 0; } static const struct of_device_id bw2_match[] = { @@ -381,7 +379,7 @@ static struct platform_driver bw2_driver = { .of_match_table = bw2_match, }, .probe = bw2_probe, - .remove = bw2_remove, + .remove_new = bw2_remove, }; static int __init bw2_init(void) diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c index f98e8f298bc19a0ae63a8d493ae2e316d8e2e38a..8587c9da067003f74f50d3234a759bf3f218bf12 100644 --- a/drivers/video/fbdev/core/bitblit.c +++ b/drivers/video/fbdev/core/bitblit.c @@ -247,6 +247,9 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode, cursor.set = 0; + if (!vc->vc_font.data) + return; + c = scr_readw((u16 *) vc->vc_pos); attribute = get_attribute(info, c); src = vc->vc_font.data + ((c & charmask) * (w * vc->vc_font.height)); diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index e808dc86001cc98ca449cdecb63fe891d8579671..28739f1cb5e7a1da84e65b66850984ba4dc4c6be 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -1468,7 +1468,7 @@ __releases(&info->lock) } #if defined(CONFIG_FB_PROVIDE_GET_FB_UNMAPPED_AREA) && !defined(CONFIG_MMU) -unsigned long get_fb_unmapped_area(struct file *filp, +static unsigned long get_fb_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { diff --git a/drivers/video/fbdev/i810/i810_dvt.c b/drivers/video/fbdev/i810/i810_dvt.c index b4b3670667ab8ce42dce3d58cc2d65be02d72eaa..2082b5c92e8f3b9f72deff934b6fc4b8a958e8fc 100644 --- a/drivers/video/fbdev/i810/i810_dvt.c +++ b/drivers/video/fbdev/i810/i810_dvt.c @@ -14,6 +14,7 @@ #include "i810_regs.h" #include "i810.h" +#include "i810_main.h" struct mode_registers std_modes[] = { /* 640x480 @ 60Hz */ @@ -276,7 +277,7 @@ void i810fb_fill_var_timings(struct fb_var_screeninfo *var) var->upper_margin = total - (yres + var->lower_margin + var->vsync_len); } -u32 i810_get_watermark(struct fb_var_screeninfo *var, +u32 i810_get_watermark(const struct fb_var_screeninfo *var, struct i810fb_par *par) { struct mode_registers *params = &par->regs; diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c index 975dd682fae4bb59187b0589a4487eee1acb8cd9..ee7d01ad14068687b968a42f1558ed730664e47b 100644 --- a/drivers/video/fbdev/imsttfb.c +++ b/drivers/video/fbdev/imsttfb.c @@ -1452,9 +1452,13 @@ static int init_imstt(struct fb_info *info) FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_YPAN; - fb_alloc_cmap(&info->cmap, 0, 0); + if (fb_alloc_cmap(&info->cmap, 0, 0)) { + framebuffer_release(info); + return -ENODEV; + } if (register_framebuffer(info) < 0) { + fb_dealloc_cmap(&info->cmap); framebuffer_release(info); return -ENODEV; } @@ -1531,8 +1535,10 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto error; info->pseudo_palette = par->palette; ret = init_imstt(info); - if (!ret) - pci_set_drvdata(pdev, info); + if (ret) + goto error; + + pci_set_drvdata(pdev, info); return ret; error: diff --git a/drivers/video/fbdev/matrox/matroxfb_maven.c b/drivers/video/fbdev/matrox/matroxfb_maven.c index 727a10a59811b38d701f12159ce5201ee8d18bea..b15a8ad92ba704e234e7e16874897dffb2427cf7 100644 --- a/drivers/video/fbdev/matrox/matroxfb_maven.c +++ b/drivers/video/fbdev/matrox/matroxfb_maven.c @@ -1291,7 +1291,7 @@ static struct i2c_driver maven_driver={ .driver = { .name = "maven", }, - .probe_new = maven_probe, + .probe = maven_probe, .remove = maven_remove, .id_table = maven_id, }; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c index 1eaa35c278359836ebd4860af036b42fce2d010b..477789cff8e08d5e620a0754feb1f1dba6008370 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c @@ -491,7 +491,8 @@ static int tpo_td043_probe(struct spi_device *spi) ddata->vcc_reg = devm_regulator_get(&spi->dev, "vcc"); if (IS_ERR(ddata->vcc_reg)) { - r = dev_err_probe(&spi->dev, r, "failed to get LCD VCC regulator\n"); + r = dev_err_probe(&spi->dev, PTR_ERR(ddata->vcc_reg), + "failed to get LCD VCC regulator\n"); goto err_regulator; } diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c index 046b9990d27cd92c1a26b16bf9f6c05713a65dda..132d1a205011836eb43db7dcd380e03f24f17caf 100644 --- a/drivers/video/fbdev/ssd1307fb.c +++ b/drivers/video/fbdev/ssd1307fb.c @@ -844,7 +844,7 @@ static const struct i2c_device_id ssd1307fb_i2c_id[] = { MODULE_DEVICE_TABLE(i2c, ssd1307fb_i2c_id); static struct i2c_driver ssd1307fb_driver = { - .probe_new = ssd1307fb_probe, + .probe = ssd1307fb_probe, .remove = ssd1307fb_remove, .id_table = ssd1307fb_i2c_id, .driver = { diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c index 14c9215284c504df26901f8f00e94a0131426efb..686a234f3899e5635b23da77375b2b6a5b8155b1 100644 --- a/drivers/video/fbdev/stifb.c +++ b/drivers/video/fbdev/stifb.c @@ -741,7 +741,7 @@ ngleClearOverlayPlanes(struct stifb_info *fb, int mask, int data) packed_len = (fb->info.var.xres << 16) | fb->info.var.yres; NGLE_SET_DSTXY(fb, packed_dst); - /* Write zeroes to overlay planes */ + /* Write zeroes to overlay planes */ NGLE_QUICK_SET_IMAGE_BITMAP_OP(fb, IBOvals(RopSrc, MaskAddrOffset(0), BitmapExtent08, StaticReg(0), @@ -1297,14 +1297,14 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref) break; default: #ifdef FALLBACK_TO_1BPP - printk(KERN_WARNING + printk(KERN_WARNING "stifb: Unsupported graphics card (id=0x%08x) " "- now trying 1bpp mode instead\n", fb->id); bpp = 1; /* default to 1 bpp */ break; #else - printk(KERN_WARNING + printk(KERN_WARNING "stifb: Unsupported graphics card (id=0x%08x) " "- skipping.\n", fb->id); diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c index 216d49c9d47e5c57f11e93c5c01a56b371cb39b1..dabc30a09f969876654280ddc31db0355c41de3b 100644 --- a/drivers/video/fbdev/udlfb.c +++ b/drivers/video/fbdev/udlfb.c @@ -27,6 +27,8 @@ #include