diff --git a/.mailmap b/.mailmap index 00581c1f09838af86edba1ff7abd9bedd216a428..d9d5c80252f9cf2d2f140d97fb86a87c1ecf4b22 100644 --- a/.mailmap +++ b/.mailmap @@ -99,6 +99,7 @@ Jacob Shin Jaegeuk Kim Jaegeuk Kim Jaegeuk Kim +Jakub Kicinski James Bottomley James Bottomley James E Wilson @@ -152,6 +153,7 @@ Linus Lüssing Linus Lüssing Li Yang Li Yang +Lukasz Luba Maciej W. Rozycki Marc Zyngier Marcin Nowakowski @@ -265,6 +267,7 @@ Vinod Koul Viresh Kumar Viresh Kumar Viresh Kumar +Vivien Didelot Vlad Dogaru Vladimir Davydov Vladimir Davydov diff --git a/Documentation/ABI/stable/sysfs-driver-mlxreg-io b/Documentation/ABI/stable/sysfs-driver-mlxreg-io index 8ca498447aeb9f4bfbc8b6e71cdf2086893ea5f3..05601a90a9b6f2251c81c881256221b9bf1f42cb 100644 --- a/Documentation/ABI/stable/sysfs-driver-mlxreg-io +++ b/Documentation/ABI/stable/sysfs-driver-mlxreg-io @@ -29,13 +29,13 @@ Description: This file shows the system fans direction: The files are read only. -What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/jtag_enable +What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/cpld3_version Date: November 2018 KernelVersion: 5.0 Contact: Vadim Pasternak Description: These files show with which CPLD versions have been burned - on LED board. + on LED or Gearbox board. The files are read only. @@ -121,6 +121,15 @@ Description: These files show the system reset cause, as following: ComEx The files are read only. +What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/cpld4_version +Date: November 2018 +KernelVersion: 5.0 +Contact: Vadim Pasternak +Description: These files show with which CPLD versions have been burned + on LED board. + + The files are read only. + Date: June 2019 KernelVersion: 5.3 Contact: Vadim Pasternak diff --git a/Documentation/ABI/testing/sysfs-platform-mellanox-bootctl b/Documentation/ABI/testing/sysfs-platform-mellanox-bootctl index c65a8057486921ae53d505c7a03f0a76333c5bed..401d202f478b230014ce7a6ee247cbea76dc45da 100644 --- a/Documentation/ABI/testing/sysfs-platform-mellanox-bootctl +++ b/Documentation/ABI/testing/sysfs-platform-mellanox-bootctl @@ -1,4 +1,4 @@ -What: /sys/bus/platform/devices/MLNXBF04:00/driver/lifecycle_state +What: /sys/bus/platform/devices/MLNXBF04:00/lifecycle_state Date: Oct 2019 KernelVersion: 5.5 Contact: "Liming Sun " @@ -10,7 +10,7 @@ Description: GA Non-Secured - Non-Secure chip and not able to change state RMA - Return Merchandise Authorization -What: /sys/bus/platform/devices/MLNXBF04:00/driver/post_reset_wdog +What: /sys/bus/platform/devices/MLNXBF04:00/post_reset_wdog Date: Oct 2019 KernelVersion: 5.5 Contact: "Liming Sun " @@ -19,7 +19,7 @@ Description: to reboot the chip and recover it to the old state if the new boot partition fails. -What: /sys/bus/platform/devices/MLNXBF04:00/driver/reset_action +What: /sys/bus/platform/devices/MLNXBF04:00/reset_action Date: Oct 2019 KernelVersion: 5.5 Contact: "Liming Sun " @@ -30,7 +30,7 @@ Description: emmc - boot from the onchip eMMC emmc_legacy - boot from the onchip eMMC in legacy (slow) mode -What: /sys/bus/platform/devices/MLNXBF04:00/driver/second_reset_action +What: /sys/bus/platform/devices/MLNXBF04:00/second_reset_action Date: Oct 2019 KernelVersion: 5.5 Contact: "Liming Sun " @@ -44,7 +44,7 @@ Description: swap_emmc - swap the primary / secondary boot partition none - cancel the action -What: /sys/bus/platform/devices/MLNXBF04:00/driver/secure_boot_fuse_state +What: /sys/bus/platform/devices/MLNXBF04:00/secure_boot_fuse_state Date: Oct 2019 KernelVersion: 5.5 Contact: "Liming Sun " diff --git a/Documentation/admin-guide/devices.txt b/Documentation/admin-guide/devices.txt index 1c5d2281efc9744df8427f34b99cb6b3883877c4..2a97aaec8b122cd7322744a43413d742a246afa2 100644 --- a/Documentation/admin-guide/devices.txt +++ b/Documentation/admin-guide/devices.txt @@ -319,7 +319,7 @@ 182 = /dev/perfctr Performance-monitoring counters 183 = /dev/hwrng Generic random number generator 184 = /dev/cpu/microcode CPU microcode update interface - 186 = /dev/atomicps Atomic shapshot of process state data + 186 = /dev/atomicps Atomic snapshot of process state data 187 = /dev/irnet IrNET device 188 = /dev/smbusbios SMBus BIOS 189 = /dev/ussp_ctl User space serial port control diff --git a/Documentation/admin-guide/ext4.rst b/Documentation/admin-guide/ext4.rst index 059ddcbe769d94bfc9a53762939512b02b0d7396..9bc93f0ce0c907fa2cb2b54a86bbb2c7ee417570 100644 --- a/Documentation/admin-guide/ext4.rst +++ b/Documentation/admin-guide/ext4.rst @@ -181,14 +181,17 @@ When mounting an ext4 filesystem, the following option are accepted: system after its metadata has been committed to the journal. commit=nrsec (*) - Ext4 can be told to sync all its data and metadata every 'nrsec' - seconds. The default value is 5 seconds. This means that if you lose - your power, you will lose as much as the latest 5 seconds of work (your - filesystem will not be damaged though, thanks to the journaling). This - default value (or any low value) will hurt performance, but it's good - for data-safety. Setting it to 0 will have the same effect as leaving - it at the default (5 seconds). Setting it to very large values will - improve performance. + This setting limits the maximum age of the running transaction to + 'nrsec' seconds. The default value is 5 seconds. This means that if + you lose your power, you will lose as much as the latest 5 seconds of + metadata changes (your filesystem will not be damaged though, thanks + to the journaling). This default value (or any low value) will hurt + performance, but it's good for data-safety. Setting it to 0 will have + the same effect as leaving it at the default (5 seconds). Setting it + to very large values will improve performance. Note that due to + delayed allocation even older data can be lost on power failure since + writeback of those data begins only after time set in + /proc/sys/vm/dirty_expire_centisecs. barrier=<0|1(*)>, barrier(*), nobarrier This enables/disables the use of write barriers in the jbd code. diff --git a/Documentation/admin-guide/xfs.rst b/Documentation/admin-guide/xfs.rst index fb5b39f73059d15fdcba0ce922f85006193f098c..ad911be5b5e93e8d931ce1442b901c148fc013bd 100644 --- a/Documentation/admin-guide/xfs.rst +++ b/Documentation/admin-guide/xfs.rst @@ -253,7 +253,7 @@ The following sysctls are available for the XFS filesystem: pool. fs.xfs.speculative_prealloc_lifetime - (Units: seconds Min: 1 Default: 300 Max: 86400) + (Units: seconds Min: 1 Default: 300 Max: 86400) The interval at which the background scanning for inodes with unused speculative preallocation runs. The scan removes unused preallocation from clean inodes and releases diff --git a/Documentation/dev-tools/kcov.rst b/Documentation/dev-tools/kcov.rst index 36890b026e7777b206b8da3ca107f9ad81cb89b2..1c4e1825d769590a98aa9371546b8d716bd67ad5 100644 --- a/Documentation/dev-tools/kcov.rst +++ b/Documentation/dev-tools/kcov.rst @@ -251,11 +251,11 @@ selectively from different subsystems. .. code-block:: c struct kcov_remote_arg { - unsigned trace_mode; - unsigned area_size; - unsigned num_handles; - uint64_t common_handle; - uint64_t handles[0]; + __u32 trace_mode; + __u32 area_size; + __u32 num_handles; + __aligned_u64 common_handle; + __aligned_u64 handles[0]; }; #define KCOV_INIT_TRACE _IOR('c', 1, unsigned long) diff --git a/Documentation/dev-tools/kselftest.rst b/Documentation/dev-tools/kselftest.rst index ecdfdc9d4b0320f9ea4fb51974bfd5e35dfa36e0..61ae13c44f915db2b8339541798eb0266e10e3d4 100644 --- a/Documentation/dev-tools/kselftest.rst +++ b/Documentation/dev-tools/kselftest.rst @@ -203,12 +203,12 @@ Test Module Kselftest tests the kernel from userspace. Sometimes things need testing from within the kernel, one method of doing this is to create a test module. We can tie the module into the kselftest framework by -using a shell script test runner. ``kselftest_module.sh`` is designed +using a shell script test runner. ``kselftest/module.sh`` is designed to facilitate this process. There is also a header file provided to assist writing kernel modules that are for use with kselftest: - ``tools/testing/kselftest/kselftest_module.h`` -- ``tools/testing/kselftest/kselftest_module.sh`` +- ``tools/testing/kselftest/kselftest/module.sh`` How to use ---------- @@ -247,7 +247,7 @@ A bare bones test module might look like this: #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include "../tools/testing/selftests/kselftest_module.h" + #include "../tools/testing/selftests/kselftest/module.h" KSTM_MODULE_GLOBALS(); @@ -276,7 +276,7 @@ Example test script #!/bin/bash # SPDX-License-Identifier: GPL-2.0+ - $(dirname $0)/../kselftest_module.sh "foo" test_foo + $(dirname $0)/../kselftest/module.sh "foo" test_foo Test Harness diff --git a/Documentation/dev-tools/kunit/index.rst b/Documentation/dev-tools/kunit/index.rst index 26ffb46bdf99d0382d29bde9173c75a61b74deee..c60d760a0eed1f732cca2fd3c51075368a0ce24f 100644 --- a/Documentation/dev-tools/kunit/index.rst +++ b/Documentation/dev-tools/kunit/index.rst @@ -9,6 +9,7 @@ KUnit - Unit Testing for the Linux Kernel start usage + kunit-tool api/index faq diff --git a/Documentation/dev-tools/kunit/kunit-tool.rst b/Documentation/dev-tools/kunit/kunit-tool.rst new file mode 100644 index 0000000000000000000000000000000000000000..50d46394e97e39c2c2f4a1023bf7c02c364c88d4 --- /dev/null +++ b/Documentation/dev-tools/kunit/kunit-tool.rst @@ -0,0 +1,57 @@ +.. SPDX-License-Identifier: GPL-2.0 + +================= +kunit_tool How-To +================= + +What is kunit_tool? +=================== + +kunit_tool is a script (``tools/testing/kunit/kunit.py``) that aids in building +the Linux kernel as UML (`User Mode Linux +`_), running KUnit tests, parsing +the test results and displaying them in a user friendly manner. + +What is a kunitconfig? +====================== + +It's just a defconfig that kunit_tool looks for in the base directory. +kunit_tool uses it to generate a .config as you might expect. In addition, it +verifies that the generated .config contains the CONFIG options in the +kunitconfig; the reason it does this is so that it is easy to be sure that a +CONFIG that enables a test actually ends up in the .config. + +How do I use kunit_tool? +======================== + +If a kunitconfig is present at the root directory, all you have to do is: + +.. code-block:: bash + + ./tools/testing/kunit/kunit.py run + +However, you most likely want to use it with the following options: + +.. code-block:: bash + + ./tools/testing/kunit/kunit.py run --timeout=30 --jobs=`nproc --all` + +- ``--timeout`` sets a maximum amount of time to allow tests to run. +- ``--jobs`` sets the number of threads to use to build the kernel. + +If you just want to use the defconfig that ships with the kernel, you can +append the ``--defconfig`` flag as well: + +.. code-block:: bash + + ./tools/testing/kunit/kunit.py run --timeout=30 --jobs=`nproc --all` --defconfig + +.. note:: + This command is particularly helpful for getting started because it + just works. No kunitconfig needs to be present. + +For a list of all the flags supported by kunit_tool, you can run: + +.. code-block:: bash + + ./tools/testing/kunit/kunit.py run --help diff --git a/Documentation/dev-tools/kunit/start.rst b/Documentation/dev-tools/kunit/start.rst index aeeddfafeea20befae5a3c28b25f207f226d78fa..4e1d24db6b139f60f0576fae9e30f1b7ad7e5c6a 100644 --- a/Documentation/dev-tools/kunit/start.rst +++ b/Documentation/dev-tools/kunit/start.rst @@ -19,21 +19,21 @@ The wrapper can be run with: .. code-block:: bash - ./tools/testing/kunit/kunit.py run + ./tools/testing/kunit/kunit.py run --defconfig -Creating a kunitconfig -====================== -The Python script is a thin wrapper around Kbuild as such, it needs to be -configured with a ``kunitconfig`` file. This file essentially contains the +For more information on this wrapper (also called kunit_tool) checkout the +:doc:`kunit-tool` page. + +Creating a .kunitconfig +======================= +The Python script is a thin wrapper around Kbuild. As such, it needs to be +configured with a ``.kunitconfig`` file. This file essentially contains the regular Kernel config, with the specific test targets as well. .. code-block:: bash - git clone -b master https://kunit.googlesource.com/kunitconfig $PATH_TO_KUNITCONFIG_REPO cd $PATH_TO_LINUX_REPO - ln -s $PATH_TO_KUNIT_CONFIG_REPO/kunitconfig kunitconfig - -You may want to add kunitconfig to your local gitignore. + cp arch/um/configs/kunit_defconfig .kunitconfig Verifying KUnit Works --------------------- @@ -59,8 +59,8 @@ If everything worked correctly, you should see the following: followed by a list of tests that are run. All of them should be passing. .. note:: - Because it is building a lot of sources for the first time, the ``Building - kunit kernel`` step may take a while. + Because it is building a lot of sources for the first time, the + ``Building KUnit kernel`` step may take a while. Writing your first test ======================= @@ -148,7 +148,7 @@ and the following to ``drivers/misc/Makefile``: obj-$(CONFIG_MISC_EXAMPLE_TEST) += example-test.o -Now add it to your ``kunitconfig``: +Now add it to your ``.kunitconfig``: .. code-block:: none @@ -159,7 +159,7 @@ Now you can run the test: .. code-block:: bash - ./tools/testing/kunit/kunit.py + ./tools/testing/kunit/kunit.py run You should see the following failure: diff --git a/Documentation/dev-tools/kunit/usage.rst b/Documentation/dev-tools/kunit/usage.rst index c6e69634e274b40b617b23d539734d344b6c88ae..b9a065ab681eeae52d21463daa12d03c633603bd 100644 --- a/Documentation/dev-tools/kunit/usage.rst +++ b/Documentation/dev-tools/kunit/usage.rst @@ -16,7 +16,7 @@ Organization of this document ============================= This document is organized into two main sections: Testing and Isolating -Behavior. The first covers what a unit test is and how to use KUnit to write +Behavior. The first covers what unit tests are and how to use KUnit to write them. The second covers how to use KUnit to isolate code and make it possible to unit test code that was otherwise un-unit-testable. @@ -174,13 +174,13 @@ Test Suites ~~~~~~~~~~~ Now obviously one unit test isn't very helpful; the power comes from having -many test cases covering all of your behaviors. Consequently it is common to -have many *similar* tests; in order to reduce duplication in these closely -related tests most unit testing frameworks provide the concept of a *test -suite*, in KUnit we call it a *test suite*; all it is is just a collection of -test cases for a unit of code with a set up function that gets invoked before -every test cases and then a tear down function that gets invoked after every -test case completes. +many test cases covering all of a unit's behaviors. Consequently it is common +to have many *similar* tests; in order to reduce duplication in these closely +related tests most unit testing frameworks - including KUnit - provide the +concept of a *test suite*. A *test suite* is just a collection of test cases +for a unit of code with a set up function that gets invoked before every test +case and then a tear down function that gets invoked after every test case +completes. Example: @@ -211,7 +211,7 @@ KUnit test framework. .. note:: A test case will only be run if it is associated with a test suite. -For a more information on these types of things see the :doc:`api/test`. +For more information on these types of things see the :doc:`api/test`. Isolating Behavior ================== @@ -338,7 +338,7 @@ We can easily test this code by *faking out* the underlying EEPROM: return count; } - ssize_t fake_eeprom_write(struct eeprom *this, size_t offset, const char *buffer, size_t count) + ssize_t fake_eeprom_write(struct eeprom *parent, size_t offset, const char *buffer, size_t count) { struct fake_eeprom *this = container_of(parent, struct fake_eeprom, parent); @@ -454,7 +454,7 @@ KUnit on non-UML architectures By default KUnit uses UML as a way to provide dependencies for code under test. Under most circumstances KUnit's usage of UML should be treated as an implementation detail of how KUnit works under the hood. Nevertheless, there -are instances where being able to run architecture specific code, or test +are instances where being able to run architecture specific code or test against real hardware is desirable. For these reasons KUnit supports running on other architectures. @@ -557,7 +557,7 @@ run your tests on your hardware setup just by compiling for your architecture. .. important:: Always prefer tests that run on UML to tests that only run under a particular architecture, and always prefer tests that run under QEMU or another easy - (and monitarily free) to obtain software environment to a specific piece of + (and monetarily free) to obtain software environment to a specific piece of hardware. Nevertheless, there are still valid reasons to write an architecture or hardware diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-backend.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-backend.yaml new file mode 100644 index 0000000000000000000000000000000000000000..86057d5410650e400eac24cee01dfdbaf91fdb57 --- /dev/null +++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-backend.yaml @@ -0,0 +1,291 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/allwinner,sun4i-a10-display-backend.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Allwinner A10 Display Engine Backend Device Tree Bindings + +maintainers: + - Chen-Yu Tsai + - Maxime Ripard + +description: | + The display engine backend exposes layers and sprites to the system. + +properties: + compatible: + enum: + - allwinner,sun4i-a10-display-backend + - allwinner,sun5i-a13-display-backend + - allwinner,sun6i-a31-display-backend + - allwinner,sun7i-a20-display-backend + - allwinner,sun8i-a23-display-backend + - allwinner,sun8i-a33-display-backend + - allwinner,sun9i-a80-display-backend + + reg: + minItems: 1 + maxItems: 2 + items: + - description: Display Backend registers + - description: SAT registers + + reg-names: + minItems: 1 + maxItems: 2 + items: + - const: be + - const: sat + + interrupts: + maxItems: 1 + + clocks: + minItems: 3 + maxItems: 4 + items: + - description: The backend interface clock + - description: The backend module clock + - description: The backend DRAM clock + - description: The SAT clock + + clock-names: + minItems: 3 + maxItems: 4 + items: + - const: ahb + - const: mod + - const: ram + - const: sat + + resets: + minItems: 1 + maxItems: 2 + items: + - description: The Backend reset line + - description: The SAT reset line + + reset-names: + minItems: 1 + maxItems: 2 + items: + - const: be + - const: sat + + # FIXME: This should be made required eventually once every SoC will + # have the MBUS declared. + interconnects: + maxItems: 1 + + # FIXME: This should be made required eventually once every SoC will + # have the MBUS declared. + interconnect-names: + const: dma-mem + + ports: + type: object + description: | + A ports node with endpoint definitions as defined in + Documentation/devicetree/bindings/media/video-interfaces.txt. + + properties: + "#address-cells": + const: 1 + + "#size-cells": + const: 0 + + port@0: + type: object + description: | + Input endpoints of the controller. + + port@1: + type: object + description: | + Output endpoints of the controller. + + required: + - "#address-cells" + - "#size-cells" + - port@0 + - port@1 + + additionalProperties: false + +required: + - compatible + - reg + - interrupts + - clocks + - clock-names + - resets + - ports + +additionalProperties: false + +if: + properties: + compatible: + contains: + const: allwinner,sun8i-a33-display-backend + +then: + properties: + reg: + minItems: 2 + + reg-names: + minItems: 2 + + clocks: + minItems: 4 + + clock-names: + minItems: 4 + + resets: + minItems: 2 + + reset-names: + minItems: 2 + + required: + - reg-names + - reset-names + +else: + properties: + reg: + maxItems: 1 + + reg-names: + maxItems: 1 + + clocks: + maxItems: 3 + + clock-names: + maxItems: 3 + + resets: + maxItems: 1 + + reset-names: + maxItems: 1 + +examples: + - | + /* + * This comes from the clock/sun4i-a10-ccu.h and + * reset/sun4i-a10-ccu.h headers, but we can't include them since + * it would trigger a bunch of warnings for redefinitions of + * symbols with the other example. + */ + + #define CLK_AHB_DE_BE0 42 + #define CLK_DRAM_DE_BE0 140 + #define CLK_DE_BE0 144 + #define RST_DE_BE0 5 + + display-backend@1e60000 { + compatible = "allwinner,sun4i-a10-display-backend"; + reg = <0x01e60000 0x10000>; + interrupts = <47>; + clocks = <&ccu CLK_AHB_DE_BE0>, <&ccu CLK_DE_BE0>, + <&ccu CLK_DRAM_DE_BE0>; + clock-names = "ahb", "mod", + "ram"; + resets = <&ccu RST_DE_BE0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + endpoint@0 { + reg = <0>; + remote-endpoint = <&fe0_out_be0>; + }; + + endpoint@1 { + reg = <1>; + remote-endpoint = <&fe1_out_be0>; + }; + }; + + port@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + + endpoint@0 { + reg = <0>; + remote-endpoint = <&tcon0_in_be0>; + }; + + endpoint@1 { + reg = <1>; + remote-endpoint = <&tcon1_in_be0>; + }; + }; + }; + }; + + - | + #include + + /* + * This comes from the clock/sun8i-a23-a33-ccu.h and + * reset/sun8i-a23-a33-ccu.h headers, but we can't include them + * since it would trigger a bunch of warnings for redefinitions of + * symbols with the other example. + */ + + #define CLK_BUS_DE_BE 40 + #define CLK_BUS_SAT 46 + #define CLK_DRAM_DE_BE 84 + #define CLK_DE_BE 85 + #define RST_BUS_DE_BE 21 + #define RST_BUS_SAT 27 + + display-backend@1e60000 { + compatible = "allwinner,sun8i-a33-display-backend"; + reg = <0x01e60000 0x10000>, <0x01e80000 0x1000>; + reg-names = "be", "sat"; + interrupts = ; + clocks = <&ccu CLK_BUS_DE_BE>, <&ccu CLK_DE_BE>, + <&ccu CLK_DRAM_DE_BE>, <&ccu CLK_BUS_SAT>; + clock-names = "ahb", "mod", + "ram", "sat"; + resets = <&ccu RST_BUS_DE_BE>, <&ccu RST_BUS_SAT>; + reset-names = "be", "sat"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + + endpoint { + remote-endpoint = <&fe0_out_be0>; + }; + }; + + port@1 { + reg = <1>; + + endpoint { + remote-endpoint = <&drc0_in_be0>; + }; + }; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-engine.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-engine.yaml new file mode 100644 index 0000000000000000000000000000000000000000..944ff2f1cf937ec1907d87abc96e1b8853dd051e --- /dev/null +++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-engine.yaml @@ -0,0 +1,114 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/allwinner,sun4i-a10-display-engine.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Allwinner A10 Display Engine Pipeline Device Tree Bindings + +maintainers: + - Chen-Yu Tsai + - Maxime Ripard + +description: | + The display engine pipeline (and its entry point, since it can be + either directly the backend or the frontend) is represented as an + extra node. + + The Allwinner A10 Display pipeline is composed of several components + that are going to be documented below: + + For all connections between components up to the TCONs in the + display pipeline, when there are multiple components of the same + type at the same depth, the local endpoint ID must be the same as + the remote component's index. For example, if the remote endpoint is + Frontend 1, then the local endpoint ID must be 1. + + Frontend 0 [0] ------- [0] Backend 0 [0] ------- [0] TCON 0 + [1] -- -- [1] [1] -- -- [1] + \ / \ / + X X + / \ / \ + [0] -- -- [0] [0] -- -- [0] + Frontend 1 [1] ------- [1] Backend 1 [1] ------- [1] TCON 1 + + For a two pipeline system such as the one depicted above, the lines + represent the connections between the components, while the numbers + within the square brackets corresponds to the ID of the local endpoint. + + The same rule also applies to DE 2.0 mixer-TCON connections: + + Mixer 0 [0] ----------- [0] TCON 0 + [1] ---- ---- [1] + \ / + X + / \ + [0] ---- ---- [0] + Mixer 1 [1] ----------- [1] TCON 1 + +properties: + compatible: + enum: + - allwinner,sun4i-a10-display-engine + - allwinner,sun5i-a10s-display-engine + - allwinner,sun5i-a13-display-engine + - allwinner,sun6i-a31-display-engine + - allwinner,sun6i-a31s-display-engine + - allwinner,sun7i-a20-display-engine + - allwinner,sun8i-a23-display-engine + - allwinner,sun8i-a33-display-engine + - allwinner,sun8i-a83t-display-engine + - allwinner,sun8i-h3-display-engine + - allwinner,sun8i-r40-display-engine + - allwinner,sun8i-v3s-display-engine + - allwinner,sun9i-a80-display-engine + - allwinner,sun50i-a64-display-engine + - allwinner,sun50i-h6-display-engine + + allwinner,pipelines: + allOf: + - $ref: /schemas/types.yaml#/definitions/phandle-array + - minItems: 1 + maxItems: 2 + description: | + Available display engine frontends (DE 1.0) or mixers (DE + 2.0/3.0) available. + +required: + - compatible + - allwinner,pipelines + +additionalProperties: false + +if: + properties: + compatible: + contains: + enum: + - allwinner,sun4i-a10-display-engine + - allwinner,sun6i-a31-display-engine + - allwinner,sun6i-a31s-display-engine + - allwinner,sun7i-a20-display-engine + - allwinner,sun8i-a83t-display-engine + - allwinner,sun8i-r40-display-engine + - allwinner,sun9i-a80-display-engine + - allwinner,sun50i-a64-display-engine + +then: + properties: + allwinner,pipelines: + minItems: 2 + +else: + properties: + allwinner,pipelines: + maxItems: 1 + +examples: + - | + de: display-engine { + compatible = "allwinner,sun4i-a10-display-engine"; + allwinner,pipelines = <&fe0>, <&fe1>; + }; + +... diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-frontend.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-frontend.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3eb1c2bbf4e7f976c195f13dba51a507ae8ec641 --- /dev/null +++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-frontend.yaml @@ -0,0 +1,138 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/allwinner,sun4i-a10-display-frontend.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Allwinner A10 Display Engine Frontend Device Tree Bindings + +maintainers: + - Chen-Yu Tsai + - Maxime Ripard + +description: | + The display engine frontend does formats conversion, scaling, + deinterlacing and color space conversion. + +properties: + compatible: + enum: + - allwinner,sun4i-a10-display-frontend + - allwinner,sun5i-a13-display-frontend + - allwinner,sun6i-a31-display-frontend + - allwinner,sun7i-a20-display-frontend + - allwinner,sun8i-a23-display-frontend + - allwinner,sun8i-a33-display-frontend + - allwinner,sun9i-a80-display-frontend + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + clocks: + items: + - description: The frontend interface clock + - description: The frontend module clock + - description: The frontend DRAM clock + + clock-names: + items: + - const: ahb + - const: mod + - const: ram + + # FIXME: This should be made required eventually once every SoC will + # have the MBUS declared. + interconnects: + maxItems: 1 + + # FIXME: This should be made required eventually once every SoC will + # have the MBUS declared. + interconnect-names: + const: dma-mem + + resets: + maxItems: 1 + + ports: + type: object + description: | + A ports node with endpoint definitions as defined in + Documentation/devicetree/bindings/media/video-interfaces.txt. + + properties: + "#address-cells": + const: 1 + + "#size-cells": + const: 0 + + port@0: + type: object + description: | + Input endpoints of the controller. + + port@1: + type: object + description: | + Output endpoints of the controller. + + required: + - "#address-cells" + - "#size-cells" + - port@1 + + additionalProperties: false + +required: + - compatible + - reg + - interrupts + - clocks + - clock-names + - resets + - ports + +additionalProperties: false + +examples: + - | + #include + #include + + fe0: display-frontend@1e00000 { + compatible = "allwinner,sun4i-a10-display-frontend"; + reg = <0x01e00000 0x20000>; + interrupts = <47>; + clocks = <&ccu CLK_AHB_DE_FE0>, <&ccu CLK_DE_FE0>, + <&ccu CLK_DRAM_DE_FE0>; + clock-names = "ahb", "mod", + "ram"; + resets = <&ccu RST_DE_FE0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + fe0_out: port@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + + fe0_out_be0: endpoint@0 { + reg = <0>; + remote-endpoint = <&be0_in_fe0>; + }; + + fe0_out_be1: endpoint@1 { + reg = <1>; + remote-endpoint = <&be1_in_fe0>; + }; + }; + }; + }; + + +... diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-hdmi.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-hdmi.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5d4915aed1e2d38a10681ea2b4ba9f76d58841b6 --- /dev/null +++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-hdmi.yaml @@ -0,0 +1,183 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/allwinner,sun4i-a10-hdmi.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Allwinner A10 HDMI Controller Device Tree Bindings + +description: | + The HDMI Encoder supports the HDMI video and audio outputs, and does + CEC. It is one end of the pipeline. + +maintainers: + - Chen-Yu Tsai + - Maxime Ripard + +properties: + compatible: + oneOf: + - const: allwinner,sun4i-a10-hdmi + - const: allwinner,sun5i-a10s-hdmi + - const: allwinner,sun6i-a31-hdmi + - items: + - const: allwinner,sun7i-a20-hdmi + - const: allwinner,sun5i-a10s-hdmi + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + clocks: + oneOf: + - items: + - description: The HDMI interface clock + - description: The HDMI module clock + - description: The first video PLL + - description: The second video PLL + + - items: + - description: The HDMI interface clock + - description: The HDMI module clock + - description: The HDMI DDC clock + - description: The first video PLL + - description: The second video PLL + + clock-names: + oneOf: + - items: + - const: ahb + - const: mod + - const: pll-0 + - const: pll-1 + + - items: + - const: ahb + - const: mod + - const: ddc + - const: pll-0 + - const: pll-1 + + resets: + maxItems: 1 + + dmas: + items: + - description: DDC Transmission DMA Channel + - description: DDC Reception DMA Channel + - description: Audio Transmission DMA Channel + + dma-names: + items: + - const: ddc-tx + - const: ddc-rx + - const: audio-tx + + ports: + type: object + description: | + A ports node with endpoint definitions as defined in + Documentation/devicetree/bindings/media/video-interfaces.txt. + + properties: + "#address-cells": + const: 1 + + "#size-cells": + const: 0 + + port@0: + type: object + description: | + Input endpoints of the controller. + + port@1: + type: object + description: | + Output endpoints of the controller. Usually an HDMI + connector. + + required: + - "#address-cells" + - "#size-cells" + - port@0 + - port@1 + + additionalProperties: false + +required: + - compatible + - reg + - interrupts + - clocks + - clock-names + - dmas + - dma-names + +if: + properties: + compatible: + contains: + const: allwinner,sun6i-a31-hdmi + +then: + properties: + clocks: + minItems: 5 + + clock-names: + minItems: 5 + + required: + - resets + +additionalProperties: false + +examples: + - | + #include + #include + #include + + hdmi: hdmi@1c16000 { + compatible = "allwinner,sun4i-a10-hdmi"; + reg = <0x01c16000 0x1000>; + interrupts = <58>; + clocks = <&ccu CLK_AHB_HDMI0>, <&ccu CLK_HDMI>, + <&ccu CLK_PLL_VIDEO0_2X>, + <&ccu CLK_PLL_VIDEO1_2X>; + clock-names = "ahb", "mod", "pll-0", "pll-1"; + dmas = <&dma SUN4I_DMA_NORMAL 16>, + <&dma SUN4I_DMA_NORMAL 16>, + <&dma SUN4I_DMA_DEDICATED 24>; + dma-names = "ddc-tx", "ddc-rx", "audio-tx"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + hdmi_in: port@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + hdmi_in_tcon0: endpoint@0 { + reg = <0>; + remote-endpoint = <&tcon0_out_hdmi>; + }; + + hdmi_in_tcon1: endpoint@1 { + reg = <1>; + remote-endpoint = <&tcon1_out_hdmi>; + }; + }; + + hdmi_out: port@1 { + reg = <1>; + }; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml new file mode 100644 index 0000000000000000000000000000000000000000..86ad617d2327d459bd6c11c95705248f35b9df0d --- /dev/null +++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml @@ -0,0 +1,676 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/allwinner,sun4i-a10-tcon.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Allwinner A10 Timings Controller (TCON) Device Tree Bindings + +maintainers: + - Chen-Yu Tsai + - Maxime Ripard + +description: | + The TCON acts as a timing controller for RGB, LVDS and TV + interfaces. + +properties: + "#clock-cells": + const: 0 + + compatible: + oneOf: + - const: allwinner,sun4i-a10-tcon + - const: allwinner,sun5i-a13-tcon + - const: allwinner,sun6i-a31-tcon + - const: allwinner,sun6i-a31s-tcon + - const: allwinner,sun7i-a20-tcon + - const: allwinner,sun8i-a23-tcon + - const: allwinner,sun8i-a33-tcon + - const: allwinner,sun8i-a83t-tcon-lcd + - const: allwinner,sun8i-a83t-tcon-tv + - const: allwinner,sun8i-r40-tcon-tv + - const: allwinner,sun8i-v3s-tcon + - const: allwinner,sun9i-a80-tcon-lcd + - const: allwinner,sun9i-a80-tcon-tv + + - items: + - enum: + - allwinner,sun50i-a64-tcon-lcd + - const: allwinner,sun8i-a83t-tcon-lcd + + - items: + - enum: + - allwinner,sun8i-h3-tcon-tv + - allwinner,sun50i-a64-tcon-tv + - allwinner,sun50i-h6-tcon-tv + - const: allwinner,sun8i-a83t-tcon-tv + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + clocks: + minItems: 1 + maxItems: 4 + + clock-names: + minItems: 1 + maxItems: 4 + + clock-output-names: + allOf: + - $ref: /schemas/types.yaml#/definitions/string-array + - maxItems: 1 + description: + Name of the LCD pixel clock created. + + dmas: + maxItems: 1 + + resets: + anyOf: + - items: + - description: TCON Reset Line + + - items: + - description: TCON Reset Line + - description: TCON LVDS Reset Line + + - items: + - description: TCON Reset Line + - description: TCON eDP Reset Line + + - items: + - description: TCON Reset Line + - description: TCON eDP Reset Line + - description: TCON LVDS Reset Line + + reset-names: + oneOf: + - const: lcd + + - items: + - const: lcd + - const: lvds + + - items: + - const: lcd + - const: edp + + - items: + - const: lcd + - const: edp + - const: lvds + + ports: + type: object + description: | + A ports node with endpoint definitions as defined in + Documentation/devicetree/bindings/media/video-interfaces.txt. + + properties: + "#address-cells": + const: 1 + + "#size-cells": + const: 0 + + port@0: + type: object + description: | + Input endpoints of the controller. + + port@1: + type: object + description: | + Output endpoints of the controller. + + patternProperties: + "^endpoint(@[0-9])$": + type: object + + properties: + allwinner,tcon-channel: + $ref: /schemas/types.yaml#/definitions/uint32 + description: | + TCON can have 1 or 2 channels, usually with the + first channel being used for the panels interfaces + (RGB, LVDS, etc.), and the second being used for the + outputs that require another controller (TV Encoder, + HDMI, etc.). + + If that property is present, specifies the TCON + channel the endpoint is associated to. If that + property is not present, the endpoint number will be + used as the channel number. + + unevaluatedProperties: true + + required: + - "#address-cells" + - "#size-cells" + - port@0 + - port@1 + + additionalProperties: false + +required: + - compatible + - reg + - interrupts + - clocks + - clock-names + - resets + - ports + +additionalProperties: false + +allOf: + - if: + properties: + compatible: + contains: + enum: + - allwinner,sun4i-a10-tcon + - allwinner,sun5i-a13-tcon + - allwinner,sun7i-a20-tcon + + then: + properties: + clocks: + minItems: 3 + + clock-names: + items: + - const: ahb + - const: tcon-ch0 + - const: tcon-ch1 + + - if: + properties: + compatible: + contains: + enum: + - allwinner,sun6i-a31-tcon + - allwinner,sun6i-a31s-tcon + + then: + properties: + clocks: + minItems: 4 + + clock-names: + items: + - const: ahb + - const: tcon-ch0 + - const: tcon-ch1 + - const: lvds-alt + + - if: + properties: + compatible: + contains: + enum: + - allwinner,sun8i-a23-tcon + - allwinner,sun8i-a33-tcon + + then: + properties: + clocks: + minItems: 3 + + clock-names: + items: + - const: ahb + - const: tcon-ch0 + - const: lvds-alt + + - if: + properties: + compatible: + contains: + enum: + - allwinner,sun8i-a83t-tcon-lcd + - allwinner,sun8i-v3s-tcon + - allwinner,sun9i-a80-tcon-lcd + + then: + properties: + clocks: + minItems: 2 + + clock-names: + items: + - const: ahb + - const: tcon-ch0 + + - if: + properties: + compatible: + contains: + enum: + - allwinner,sun8i-a83t-tcon-tv + - allwinner,sun8i-r40-tcon-tv + - allwinner,sun9i-a80-tcon-tv + + then: + properties: + clocks: + minItems: 2 + + clock-names: + items: + - const: ahb + - const: tcon-ch1 + + - if: + properties: + compatible: + contains: + enum: + - allwinner,sun5i-a13-tcon + - allwinner,sun6i-a31-tcon + - allwinner,sun6i-a31s-tcon + - allwinner,sun7i-a20-tcon + - allwinner,sun8i-a23-tcon + - allwinner,sun8i-a33-tcon + - allwinner,sun8i-v3s-tcon + - allwinner,sun9i-a80-tcon-lcd + - allwinner,sun4i-a10-tcon + - allwinner,sun8i-a83t-tcon-lcd + + then: + required: + - "#clock-cells" + - clock-output-names + + - if: + properties: + compatible: + contains: + enum: + - allwinner,sun6i-a31-tcon + - allwinner,sun6i-a31s-tcon + - allwinner,sun8i-a23-tcon + - allwinner,sun8i-a33-tcon + - allwinner,sun8i-a83t-tcon-lcd + + then: + properties: + resets: + minItems: 2 + + reset-names: + items: + - const: lcd + - const: lvds + + - if: + properties: + compatible: + contains: + enum: + - allwinner,sun9i-a80-tcon-lcd + + then: + properties: + resets: + minItems: 3 + + reset-names: + items: + - const: lcd + - const: edp + - const: lvds + + - if: + properties: + compatible: + contains: + enum: + - allwinner,sun9i-a80-tcon-tv + + then: + properties: + resets: + minItems: 2 + + reset-names: + items: + - const: lcd + - const: edp + + - if: + properties: + compatible: + contains: + enum: + - allwinner,sun4i-a10-tcon + - allwinner,sun5i-a13-tcon + - allwinner,sun6i-a31-tcon + - allwinner,sun6i-a31s-tcon + - allwinner,sun7i-a20-tcon + - allwinner,sun8i-a23-tcon + - allwinner,sun8i-a33-tcon + + then: + required: + - dmas + +examples: + - | + #include + + /* + * This comes from the clock/sun4i-a10-ccu.h and + * reset/sun4i-a10-ccu.h headers, but we can't include them since + * it would trigger a bunch of warnings for redefinitions of + * symbols with the other example. + */ + + #define CLK_AHB_LCD0 56 + #define CLK_TCON0_CH0 149 + #define CLK_TCON0_CH1 155 + #define RST_TCON0 11 + + lcd-controller@1c0c000 { + compatible = "allwinner,sun4i-a10-tcon"; + reg = <0x01c0c000 0x1000>; + interrupts = <44>; + resets = <&ccu RST_TCON0>; + reset-names = "lcd"; + clocks = <&ccu CLK_AHB_LCD0>, + <&ccu CLK_TCON0_CH0>, + <&ccu CLK_TCON0_CH1>; + clock-names = "ahb", + "tcon-ch0", + "tcon-ch1"; + clock-output-names = "tcon0-pixel-clock"; + #clock-cells = <0>; + dmas = <&dma SUN4I_DMA_DEDICATED 14>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + endpoint@0 { + reg = <0>; + remote-endpoint = <&be0_out_tcon0>; + }; + + endpoint@1 { + reg = <1>; + remote-endpoint = <&be1_out_tcon0>; + }; + }; + + port@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + + endpoint@1 { + reg = <1>; + remote-endpoint = <&hdmi_in_tcon0>; + allwinner,tcon-channel = <1>; + }; + }; + }; + }; + + #undef CLK_AHB_LCD0 + #undef CLK_TCON0_CH0 + #undef CLK_TCON0_CH1 + #undef RST_TCON0 + + - | + #include + + /* + * This comes from the clock/sun6i-a31-ccu.h and + * reset/sun6i-a31-ccu.h headers, but we can't include them since + * it would trigger a bunch of warnings for redefinitions of + * symbols with the other example. + */ + + #define CLK_PLL_MIPI 15 + #define CLK_AHB1_LCD0 47 + #define CLK_LCD0_CH0 127 + #define CLK_LCD0_CH1 129 + #define RST_AHB1_LCD0 27 + #define RST_AHB1_LVDS 41 + + lcd-controller@1c0c000 { + compatible = "allwinner,sun6i-a31-tcon"; + reg = <0x01c0c000 0x1000>; + interrupts = ; + dmas = <&dma 11>; + resets = <&ccu RST_AHB1_LCD0>, <&ccu RST_AHB1_LVDS>; + reset-names = "lcd", "lvds"; + clocks = <&ccu CLK_AHB1_LCD0>, + <&ccu CLK_LCD0_CH0>, + <&ccu CLK_LCD0_CH1>, + <&ccu CLK_PLL_MIPI>; + clock-names = "ahb", + "tcon-ch0", + "tcon-ch1", + "lvds-alt"; + clock-output-names = "tcon0-pixel-clock"; + #clock-cells = <0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + endpoint@0 { + reg = <0>; + remote-endpoint = <&drc0_out_tcon0>; + }; + + endpoint@1 { + reg = <1>; + remote-endpoint = <&drc1_out_tcon0>; + }; + }; + + port@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + + endpoint@1 { + reg = <1>; + remote-endpoint = <&hdmi_in_tcon0>; + allwinner,tcon-channel = <1>; + }; + }; + }; + }; + + #undef CLK_PLL_MIPI + #undef CLK_AHB1_LCD0 + #undef CLK_LCD0_CH0 + #undef CLK_LCD0_CH1 + #undef RST_AHB1_LCD0 + #undef RST_AHB1_LVDS + + - | + #include + + /* + * This comes from the clock/sun9i-a80-ccu.h and + * reset/sun9i-a80-ccu.h headers, but we can't include them since + * it would trigger a bunch of warnings for redefinitions of + * symbols with the other example. + */ + + #define CLK_BUS_LCD0 102 + #define CLK_LCD0 58 + #define RST_BUS_LCD0 22 + #define RST_BUS_EDP 24 + #define RST_BUS_LVDS 25 + + lcd-controller@3c00000 { + compatible = "allwinner,sun9i-a80-tcon-lcd"; + reg = <0x03c00000 0x10000>; + interrupts = ; + clocks = <&ccu CLK_BUS_LCD0>, <&ccu CLK_LCD0>; + clock-names = "ahb", "tcon-ch0"; + resets = <&ccu RST_BUS_LCD0>, <&ccu RST_BUS_EDP>, <&ccu RST_BUS_LVDS>; + reset-names = "lcd", "edp", "lvds"; + clock-output-names = "tcon0-pixel-clock"; + #clock-cells = <0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + + endpoint { + remote-endpoint = <&drc0_out_tcon0>; + }; + }; + + port@1 { + reg = <1>; + }; + }; + }; + + #undef CLK_BUS_TCON0 + #undef CLK_TCON0 + #undef RST_BUS_TCON0 + #undef RST_BUS_EDP + #undef RST_BUS_LVDS + + - | + #include + + /* + * This comes from the clock/sun8i-a83t-ccu.h and + * reset/sun8i-a83t-ccu.h headers, but we can't include them since + * it would trigger a bunch of warnings for redefinitions of + * symbols with the other example. + */ + + #define CLK_BUS_TCON0 36 + #define CLK_TCON0 85 + #define RST_BUS_TCON0 22 + #define RST_BUS_LVDS 31 + + lcd-controller@1c0c000 { + compatible = "allwinner,sun8i-a83t-tcon-lcd"; + reg = <0x01c0c000 0x1000>; + interrupts = ; + clocks = <&ccu CLK_BUS_TCON0>, <&ccu CLK_TCON0>; + clock-names = "ahb", "tcon-ch0"; + clock-output-names = "tcon-pixel-clock"; + #clock-cells = <0>; + resets = <&ccu RST_BUS_TCON0>, <&ccu RST_BUS_LVDS>; + reset-names = "lcd", "lvds"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + endpoint@0 { + reg = <0>; + remote-endpoint = <&mixer0_out_tcon0>; + }; + + endpoint@1 { + reg = <1>; + remote-endpoint = <&mixer1_out_tcon0>; + }; + }; + + port@1 { + reg = <1>; + }; + }; + }; + + #undef CLK_BUS_TCON0 + #undef CLK_TCON0 + #undef RST_BUS_TCON0 + #undef RST_BUS_LVDS + + - | + #include + + /* + * This comes from the clock/sun8i-r40-ccu.h and + * reset/sun8i-r40-ccu.h headers, but we can't include them since + * it would trigger a bunch of warnings for redefinitions of + * symbols with the other example. + */ + + #define CLK_BUS_TCON_TV0 73 + #define RST_BUS_TCON_TV0 49 + + tcon_tv0: lcd-controller@1c73000 { + compatible = "allwinner,sun8i-r40-tcon-tv"; + reg = <0x01c73000 0x1000>; + interrupts = ; + clocks = <&ccu CLK_BUS_TCON_TV0>, <&tcon_top 0>; + clock-names = "ahb", "tcon-ch1"; + resets = <&ccu RST_BUS_TCON_TV0>; + reset-names = "lcd"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + endpoint@0 { + reg = <0>; + remote-endpoint = <&tcon_top_mixer0_out_tcon_tv0>; + }; + + endpoint@1 { + reg = <1>; + remote-endpoint = <&tcon_top_mixer1_out_tcon_tv0>; + }; + }; + + tcon_tv0_out: port@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + + endpoint@1 { + reg = <1>; + remote-endpoint = <&tcon_top_hdmi_in_tcon_tv0>; + }; + }; + }; + }; + + #undef CLK_BUS_TCON_TV0 + #undef RST_BUS_TCON_TV0 + +... diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tv-encoder.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tv-encoder.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5d5d396651190b72ede21c5d5acd9a40605f2940 --- /dev/null +++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tv-encoder.yaml @@ -0,0 +1,62 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/allwinner,sun4i-a10-tv-encoder.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Allwinner A10 TV Encoder Device Tree Bindings + +maintainers: + - Chen-Yu Tsai + - Maxime Ripard + +properties: + compatible: + const: allwinner,sun4i-a10-tv-encoder + + reg: + maxItems: 1 + + clocks: + maxItems: 1 + + resets: + maxItems: 1 + + port: + type: object + description: + A port node with endpoint definitions as defined in + Documentation/devicetree/bindings/media/video-interfaces.txt. The + first port should be the input endpoint, usually coming from the + associated TCON. + +required: + - compatible + - reg + - clocks + - resets + - port + +additionalProperties: false + +examples: + - | + tve0: tv-encoder@1c0a000 { + compatible = "allwinner,sun4i-a10-tv-encoder"; + reg = <0x01c0a000 0x1000>; + clocks = <&ahb_gates 34>; + resets = <&tcon_ch0_clk 0>; + + port { + #address-cells = <1>; + #size-cells = <0>; + + tve0_in_tcon0: endpoint@0 { + reg = <0>; + remote-endpoint = <&tcon0_out_tve0>; + }; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-drc.yaml b/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-drc.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0c1ce55940e1641cfb34530823bbcef307b2376f --- /dev/null +++ b/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-drc.yaml @@ -0,0 +1,138 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/allwinner,sun6i-a31-drc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Allwinner A31 Dynamic Range Controller Device Tree Bindings + +maintainers: + - Chen-Yu Tsai + - Maxime Ripard + +description: | + The DRC (Dynamic Range Controller) allows to dynamically adjust + pixel brightness/contrast based on histogram measurements for LCD + content adaptive backlight control. + +properties: + compatible: + enum: + - allwinner,sun6i-a31-drc + - allwinner,sun6i-a31s-drc + - allwinner,sun8i-a23-drc + - allwinner,sun8i-a33-drc + - allwinner,sun9i-a80-drc + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + clocks: + items: + - description: The DRC interface clock + - description: The DRC module clock + - description: The DRC DRAM clock + + clock-names: + items: + - const: ahb + - const: mod + - const: ram + + resets: + maxItems: 1 + + ports: + type: object + description: | + A ports node with endpoint definitions as defined in + Documentation/devicetree/bindings/media/video-interfaces.txt. + + properties: + "#address-cells": + const: 1 + + "#size-cells": + const: 0 + + port@0: + type: object + description: | + Input endpoints of the controller. + + port@1: + type: object + description: | + Output endpoints of the controller. + + required: + - "#address-cells" + - "#size-cells" + - port@0 + - port@1 + + additionalProperties: false + +required: + - compatible + - reg + - interrupts + - clocks + - clock-names + - resets + - ports + +additionalProperties: false + +examples: + - | + #include + + #include + #include + + drc0: drc@1e70000 { + compatible = "allwinner,sun6i-a31-drc"; + reg = <0x01e70000 0x10000>; + interrupts = ; + clocks = <&ccu CLK_AHB1_DRC0>, <&ccu CLK_IEP_DRC0>, + <&ccu CLK_DRAM_DRC0>; + clock-names = "ahb", "mod", + "ram"; + resets = <&ccu RST_AHB1_DRC0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + drc0_in: port@0 { + reg = <0>; + + drc0_in_be0: endpoint { + remote-endpoint = <&be0_out_drc0>; + }; + }; + + drc0_out: port@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + + drc0_out_tcon0: endpoint@0 { + reg = <0>; + remote-endpoint = <&tcon0_in_drc0>; + }; + + drc0_out_tcon1: endpoint@1 { + reg = <1>; + remote-endpoint = <&tcon1_in_drc0>; + }; + }; + }; + }; + + +... diff --git a/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-de2-mixer.yaml b/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-de2-mixer.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1dee641e3ea168b724a56f4e6f5ca3a423874acb --- /dev/null +++ b/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-de2-mixer.yaml @@ -0,0 +1,118 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/allwinner,sun8i-a83t-de2-mixer.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Allwinner Display Engine 2.0 Mixer Device Tree Bindings + +maintainers: + - Chen-Yu Tsai + - Maxime Ripard + +properties: + compatible: + enum: + - allwinner,sun8i-a83t-de2-mixer-0 + - allwinner,sun8i-a83t-de2-mixer-1 + - allwinner,sun8i-h3-de2-mixer-0 + - allwinner,sun8i-r40-de2-mixer-0 + - allwinner,sun8i-r40-de2-mixer-1 + - allwinner,sun8i-v3s-de2-mixer + - allwinner,sun50i-a64-de2-mixer-0 + - allwinner,sun50i-a64-de2-mixer-1 + - allwinner,sun50i-h6-de3-mixer-0 + + reg: + maxItems: 1 + + clocks: + items: + - description: The mixer interface clock + - description: The mixer module clock + + clock-names: + items: + - const: bus + - const: mod + + resets: + maxItems: 1 + + ports: + type: object + description: | + A ports node with endpoint definitions as defined in + Documentation/devicetree/bindings/media/video-interfaces.txt. + + properties: + "#address-cells": + const: 1 + + "#size-cells": + const: 0 + + port@0: + type: object + description: | + Input endpoints of the controller. + + port@1: + type: object + description: | + Output endpoints of the controller. + + required: + - "#address-cells" + - "#size-cells" + - port@1 + + additionalProperties: false + +required: + - compatible + - reg + - clocks + - clock-names + - resets + - ports + +additionalProperties: false + +examples: + - | + #include + #include + + mixer0: mixer@1100000 { + compatible = "allwinner,sun8i-a83t-de2-mixer-0"; + reg = <0x01100000 0x100000>; + clocks = <&display_clocks CLK_BUS_MIXER0>, + <&display_clocks CLK_MIXER0>; + clock-names = "bus", + "mod"; + resets = <&display_clocks RST_MIXER0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + mixer0_out: port@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + + mixer0_out_tcon0: endpoint@0 { + reg = <0>; + remote-endpoint = <&tcon0_in_mixer0>; + }; + + mixer0_out_tcon1: endpoint@1 { + reg = <1>; + remote-endpoint = <&tcon1_in_mixer0>; + }; + }; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-dw-hdmi.yaml b/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-dw-hdmi.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4d6795690ac38315f8ee4267f3e182b9a0259380 --- /dev/null +++ b/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-dw-hdmi.yaml @@ -0,0 +1,273 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/allwinner,sun8i-a83t-dw-hdmi.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Allwinner A83t DWC HDMI TX Encoder Device Tree Bindings + +description: | + The HDMI transmitter is a Synopsys DesignWare HDMI 1.4 TX controller + IP with Allwinner\'s own PHY IP. It supports audio and video outputs + and CEC. + + These DT bindings follow the Synopsys DWC HDMI TX bindings defined + in Documentation/devicetree/bindings/display/bridge/dw_hdmi.txt with + the following device-specific properties. + +maintainers: + - Chen-Yu Tsai + - Maxime Ripard + +properties: + "#phy-cells": + const: 0 + + compatible: + oneOf: + - const: allwinner,sun8i-a83t-dw-hdmi + - const: allwinner,sun50i-h6-dw-hdmi + + - items: + - enum: + - allwinner,sun8i-h3-dw-hdmi + - allwinner,sun8i-r40-dw-hdmi + - allwinner,sun50i-a64-dw-hdmi + - const: allwinner,sun8i-a83t-dw-hdmi + + reg: + maxItems: 1 + + reg-io-width: + const: 1 + + interrupts: + maxItems: 1 + + clocks: + minItems: 3 + maxItems: 6 + items: + - description: Bus Clock + - description: Register Clock + - description: TMDS Clock + - description: HDMI CEC Clock + - description: HDCP Clock + - description: HDCP Bus Clock + + clock-names: + minItems: 3 + maxItems: 6 + items: + - const: iahb + - const: isfr + - const: tmds + - const: cec + - const: hdcp + - const: hdcp-bus + + resets: + minItems: 1 + maxItems: 2 + items: + - description: HDMI Controller Reset + - description: HDCP Reset + + reset-names: + minItems: 1 + maxItems: 2 + items: + - const: ctrl + - const: hdcp + + phys: + maxItems: 1 + description: + Phandle to the DWC HDMI PHY. + + phy-names: + const: phy + + hvcc-supply: + description: + The VCC power supply of the controller + + ports: + type: object + description: | + A ports node with endpoint definitions as defined in + Documentation/devicetree/bindings/media/video-interfaces.txt. + + properties: + "#address-cells": + const: 1 + + "#size-cells": + const: 0 + + port@0: + type: object + description: | + Input endpoints of the controller. Usually the associated + TCON. + + port@1: + type: object + description: | + Output endpoints of the controller. Usually an HDMI + connector. + + required: + - "#address-cells" + - "#size-cells" + - port@0 + - port@1 + + additionalProperties: false + +required: + - compatible + - reg + - reg-io-width + - interrupts + - clocks + - clock-names + - resets + - reset-names + - phys + - phy-names + - ports + +if: + properties: + compatible: + contains: + enum: + - allwinner,sun50i-h6-dw-hdmi + +then: + properties: + clocks: + minItems: 6 + + clock-names: + minItems: 6 + + resets: + minItems: 2 + + reset-names: + minItems: 2 + + +additionalProperties: false + +examples: + - | + #include + + /* + * This comes from the clock/sun8i-a83t-ccu.h and + * reset/sun8i-a83t-ccu.h headers, but we can't include them since + * it would trigger a bunch of warnings for redefinitions of + * symbols with the other example. + */ + #define CLK_BUS_HDMI 39 + #define CLK_HDMI 93 + #define CLK_HDMI_SLOW 94 + #define RST_BUS_HDMI1 26 + + hdmi@1ee0000 { + compatible = "allwinner,sun8i-a83t-dw-hdmi"; + reg = <0x01ee0000 0x10000>; + reg-io-width = <1>; + interrupts = ; + clocks = <&ccu CLK_BUS_HDMI>, <&ccu CLK_HDMI_SLOW>, + <&ccu CLK_HDMI>; + clock-names = "iahb", "isfr", "tmds"; + resets = <&ccu RST_BUS_HDMI1>; + reset-names = "ctrl"; + phys = <&hdmi_phy>; + phy-names = "phy"; + pinctrl-names = "default"; + pinctrl-0 = <&hdmi_pins>; + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + + endpoint { + remote-endpoint = <&tcon1_out_hdmi>; + }; + }; + + port@1 { + reg = <1>; + }; + }; + }; + + /* Cleanup after ourselves */ + #undef CLK_BUS_HDMI + #undef CLK_HDMI + #undef CLK_HDMI_SLOW + + - | + #include + + /* + * This comes from the clock/sun50i-h6-ccu.h and + * reset/sun50i-h6-ccu.h headers, but we can't include them since + * it would trigger a bunch of warnings for redefinitions of + * symbols with the other example. + */ + #define CLK_BUS_HDMI 126 + #define CLK_BUS_HDCP 137 + #define CLK_HDMI 123 + #define CLK_HDMI_SLOW 124 + #define CLK_HDMI_CEC 125 + #define CLK_HDCP 136 + #define RST_BUS_HDMI_SUB 57 + #define RST_BUS_HDCP 62 + + hdmi@6000000 { + compatible = "allwinner,sun50i-h6-dw-hdmi"; + reg = <0x06000000 0x10000>; + reg-io-width = <1>; + interrupts = ; + clocks = <&ccu CLK_BUS_HDMI>, <&ccu CLK_HDMI_SLOW>, + <&ccu CLK_HDMI>, <&ccu CLK_HDMI_CEC>, + <&ccu CLK_HDCP>, <&ccu CLK_BUS_HDCP>; + clock-names = "iahb", "isfr", "tmds", "cec", "hdcp", + "hdcp-bus"; + resets = <&ccu RST_BUS_HDMI_SUB>, <&ccu RST_BUS_HDCP>; + reset-names = "ctrl", "hdcp"; + phys = <&hdmi_phy>; + phy-names = "phy"; + pinctrl-names = "default"; + pinctrl-0 = <&hdmi_pins>; + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + + endpoint { + remote-endpoint = <&tcon_top_hdmi_out_hdmi>; + }; + }; + + port@1 { + reg = <1>; + }; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-hdmi-phy.yaml b/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-hdmi-phy.yaml new file mode 100644 index 0000000000000000000000000000000000000000..501cec16168cee3b96cc9d939563ff3a1494e22d --- /dev/null +++ b/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-hdmi-phy.yaml @@ -0,0 +1,117 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/allwinner,sun8i-a83t-hdmi-phy.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Allwinner A83t HDMI PHY Device Tree Bindings + +maintainers: + - Chen-Yu Tsai + - Maxime Ripard + +properties: + "#phy-cells": + const: 0 + + compatible: + enum: + - allwinner,sun8i-a83t-hdmi-phy + - allwinner,sun8i-h3-hdmi-phy + - allwinner,sun8i-r40-hdmi-phy + - allwinner,sun50i-a64-hdmi-phy + - allwinner,sun50i-h6-hdmi-phy + + reg: + maxItems: 1 + + clocks: + minItems: 2 + maxItems: 4 + items: + - description: Bus Clock + - description: Module Clock + - description: Parent of the PHY clock + - description: Second possible parent of the PHY clock + + clock-names: + minItems: 2 + maxItems: 4 + items: + - const: bus + - const: mod + - const: pll-0 + - const: pll-1 + + resets: + maxItems: 1 + + reset-names: + const: phy + +required: + - compatible + - reg + - clocks + - clock-names + - resets + - reset-names + +if: + properties: + compatible: + contains: + enum: + - allwinner,sun8i-r40-hdmi-phy + +then: + properties: + clocks: + minItems: 4 + + clock-names: + minItems: 4 + +else: + if: + properties: + compatible: + contains: + enum: + - allwinner,sun8i-h3-hdmi-phy + - allwinner,sun50i-a64-hdmi-phy + + then: + properties: + clocks: + minItems: 3 + + clock-names: + minItems: 3 + + else: + properties: + clocks: + maxItems: 2 + + clock-names: + maxItems: 2 + +additionalProperties: false + +examples: + - | + #include + #include + + hdmi_phy: hdmi-phy@1ef0000 { + compatible = "allwinner,sun8i-a83t-hdmi-phy"; + reg = <0x01ef0000 0x10000>; + clocks = <&ccu CLK_BUS_HDMI>, <&ccu CLK_HDMI_SLOW>; + clock-names = "bus", "mod"; + resets = <&ccu RST_BUS_HDMI0>; + reset-names = "phy"; + #phy-cells = <0>; + }; + +... diff --git a/Documentation/devicetree/bindings/display/allwinner,sun8i-r40-tcon-top.yaml b/Documentation/devicetree/bindings/display/allwinner,sun8i-r40-tcon-top.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b98ca609824b92a2e3bfef82251be8b1aadd4221 --- /dev/null +++ b/Documentation/devicetree/bindings/display/allwinner,sun8i-r40-tcon-top.yaml @@ -0,0 +1,382 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/allwinner,sun8i-r40-tcon-top.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Allwinner R40 TCON TOP Device Tree Bindings + +maintainers: + - Chen-Yu Tsai + - Maxime Ripard + +description: | + TCON TOPs main purpose is to configure whole display pipeline. It + determines relationships between mixers and TCONs, selects source + TCON for HDMI, muxes LCD and TV encoder GPIO output, selects TV + encoder clock source and contains additional TV TCON and DSI gates. + + It allows display pipeline to be configured in very different ways: + + / LCD0/LVDS0 + / [0] TCON-LCD0 + | \ MIPI DSI + mixer0 | + \ / [1] TCON-LCD1 - LCD1/LVDS1 + TCON-TOP + / \ [2] TCON-TV0 [0] - TVE0/RGB + mixer1 | \ + | TCON-TOP - HDMI + | / + \ [3] TCON-TV1 [1] - TVE1/RGB + + Note that both TCON TOP references same physical unit. Both mixers + can be connected to any TCON. Not all TCON TOP variants support all + features. + +properties: + "#clock-cells": + const: 1 + + compatible: + enum: + - allwinner,sun8i-r40-tcon-top + - allwinner,sun50i-h6-tcon-top + + reg: + maxItems: 1 + + clocks: + minItems: 2 + maxItems: 6 + items: + - description: The TCON TOP interface clock + - description: The TCON TOP TV0 clock + - description: The TCON TOP TVE0 clock + - description: The TCON TOP TV1 clock + - description: The TCON TOP TVE1 clock + - description: The TCON TOP MIPI DSI clock + + clock-names: + minItems: 2 + maxItems: 6 + items: + - const: bus + - const: tcon-tv0 + - const: tve0 + - const: tcon-tv1 + - const: tve1 + - const: dsi + + clock-output-names: + minItems: 1 + maxItems: 3 + description: > + The first item is the name of the clock created for the TV0 + channel, the second item is the name of the TCON TV1 channel + clock and the third one is the name of the DSI channel clock. + + resets: + maxItems: 1 + + ports: + type: object + description: | + A ports node with endpoint definitions as defined in + Documentation/devicetree/bindings/media/video-interfaces.txt. + All ports should have only one endpoint connected to + remote endpoint. + + properties: + "#address-cells": + const: 1 + + "#size-cells": + const: 0 + + port@0: + type: object + description: | + Input endpoint for Mixer 0 mux. + + port@1: + type: object + description: | + Output endpoint for Mixer 0 mux + + properties: + "#address-cells": + const: 1 + + "#size-cells": + const: 0 + + reg: true + + patternProperties: + "^endpoint@[0-9]$": + type: object + + properties: + reg: + description: | + ID of the target TCON + + required: + - reg + + required: + - "#address-cells" + - "#size-cells" + + additionalProperties: false + + port@2: + type: object + description: | + Input endpoint for Mixer 1 mux. + + port@3: + type: object + description: | + Output endpoint for Mixer 1 mux + + properties: + "#address-cells": + const: 1 + + "#size-cells": + const: 0 + + reg: true + + patternProperties: + "^endpoint@[0-9]$": + type: object + + properties: + reg: + description: | + ID of the target TCON + + required: + - reg + + required: + - "#address-cells" + - "#size-cells" + + additionalProperties: false + + port@4: + type: object + description: | + Input endpoint for HDMI mux. + + properties: + "#address-cells": + const: 1 + + "#size-cells": + const: 0 + + reg: true + + patternProperties: + "^endpoint@[0-9]$": + type: object + + properties: + reg: + description: | + ID of the target TCON + + required: + - reg + + required: + - "#address-cells" + - "#size-cells" + + additionalProperties: false + + port@5: + type: object + description: | + Output endpoint for HDMI mux + + required: + - "#address-cells" + - "#size-cells" + - port@0 + - port@1 + - port@4 + - port@5 + + additionalProperties: false + +required: + - "#clock-cells" + - compatible + - reg + - clocks + - clock-names + - clock-output-names + - resets + - ports + +additionalProperties: false + +if: + properties: + compatible: + contains: + const: allwinner,sun50i-h6-tcon-top + +then: + properties: + clocks: + maxItems: 2 + + clock-output-names: + maxItems: 1 + +else: + properties: + clocks: + minItems: 6 + + clock-output-names: + minItems: 3 + + ports: + required: + - port@2 + - port@3 + +examples: + - | + #include + + #include + #include + + tcon_top: tcon-top@1c70000 { + compatible = "allwinner,sun8i-r40-tcon-top"; + reg = <0x01c70000 0x1000>; + clocks = <&ccu CLK_BUS_TCON_TOP>, + <&ccu CLK_TCON_TV0>, + <&ccu CLK_TVE0>, + <&ccu CLK_TCON_TV1>, + <&ccu CLK_TVE1>, + <&ccu CLK_DSI_DPHY>; + clock-names = "bus", + "tcon-tv0", + "tve0", + "tcon-tv1", + "tve1", + "dsi"; + clock-output-names = "tcon-top-tv0", + "tcon-top-tv1", + "tcon-top-dsi"; + resets = <&ccu RST_BUS_TCON_TOP>; + #clock-cells = <1>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + tcon_top_mixer0_in: port@0 { + reg = <0>; + + tcon_top_mixer0_in_mixer0: endpoint { + remote-endpoint = <&mixer0_out_tcon_top>; + }; + }; + + tcon_top_mixer0_out: port@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + + tcon_top_mixer0_out_tcon_lcd0: endpoint@0 { + reg = <0>; + }; + + tcon_top_mixer0_out_tcon_lcd1: endpoint@1 { + reg = <1>; + }; + + tcon_top_mixer0_out_tcon_tv0: endpoint@2 { + reg = <2>; + remote-endpoint = <&tcon_tv0_in_tcon_top_mixer0>; + }; + + tcon_top_mixer0_out_tcon_tv1: endpoint@3 { + reg = <3>; + remote-endpoint = <&tcon_tv1_in_tcon_top_mixer0>; + }; + }; + + tcon_top_mixer1_in: port@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + + tcon_top_mixer1_in_mixer1: endpoint@1 { + reg = <1>; + remote-endpoint = <&mixer1_out_tcon_top>; + }; + }; + + tcon_top_mixer1_out: port@3 { + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + + tcon_top_mixer1_out_tcon_lcd0: endpoint@0 { + reg = <0>; + }; + + tcon_top_mixer1_out_tcon_lcd1: endpoint@1 { + reg = <1>; + }; + + tcon_top_mixer1_out_tcon_tv0: endpoint@2 { + reg = <2>; + remote-endpoint = <&tcon_tv0_in_tcon_top_mixer1>; + }; + + tcon_top_mixer1_out_tcon_tv1: endpoint@3 { + reg = <3>; + remote-endpoint = <&tcon_tv1_in_tcon_top_mixer1>; + }; + }; + + tcon_top_hdmi_in: port@4 { + #address-cells = <1>; + #size-cells = <0>; + reg = <4>; + + tcon_top_hdmi_in_tcon_tv0: endpoint@0 { + reg = <0>; + remote-endpoint = <&tcon_tv0_out_tcon_top>; + }; + + tcon_top_hdmi_in_tcon_tv1: endpoint@1 { + reg = <1>; + remote-endpoint = <&tcon_tv1_out_tcon_top>; + }; + }; + + tcon_top_hdmi_out: port@5 { + reg = <5>; + + tcon_top_hdmi_out_hdmi: endpoint { + remote-endpoint = <&hdmi_in_tcon_top>; + }; + }; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/display/allwinner,sun9i-a80-deu.yaml b/Documentation/devicetree/bindings/display/allwinner,sun9i-a80-deu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..96de41d32b3eb741cbbcb5267f34f1ad052d76c9 --- /dev/null +++ b/Documentation/devicetree/bindings/display/allwinner,sun9i-a80-deu.yaml @@ -0,0 +1,133 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/allwinner,sun9i-a80-deu.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Allwinner A80 Detail Enhancement Unit Device Tree Bindings + +maintainers: + - Chen-Yu Tsai + - Maxime Ripard + +description: | + The DEU (Detail Enhancement Unit), found in the Allwinner A80 SoC, + can sharpen the display content in both luma and chroma channels. + +properties: + compatible: + const: allwinner,sun9i-a80-deu + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + clocks: + items: + - description: The DEU interface clock + - description: The DEU module clock + - description: The DEU DRAM clock + + clock-names: + items: + - const: ahb + - const: mod + - const: ram + + resets: + maxItems: 1 + + ports: + type: object + description: | + A ports node with endpoint definitions as defined in + Documentation/devicetree/bindings/media/video-interfaces.txt. + + properties: + "#address-cells": + const: 1 + + "#size-cells": + const: 0 + + port@0: + type: object + description: | + Input endpoints of the controller. + + port@1: + type: object + description: | + Output endpoints of the controller. + + required: + - "#address-cells" + - "#size-cells" + - port@0 + - port@1 + + additionalProperties: false + +required: + - compatible + - reg + - interrupts + - clocks + - clock-names + - resets + - ports + +additionalProperties: false + +examples: + - | + #include + + #include + #include + + deu0: deu@3300000 { + compatible = "allwinner,sun9i-a80-deu"; + reg = <0x03300000 0x40000>; + interrupts = ; + clocks = <&de_clocks CLK_BUS_DEU0>, + <&de_clocks CLK_IEP_DEU0>, + <&de_clocks CLK_DRAM_DEU0>; + clock-names = "ahb", + "mod", + "ram"; + resets = <&de_clocks RST_DEU0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + deu0_in: port@0 { + reg = <0>; + + deu0_in_fe0: endpoint { + remote-endpoint = <&fe0_out_deu0>; + }; + }; + + deu0_out: port@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + + deu0_out_be0: endpoint@0 { + reg = <0>; + remote-endpoint = <&be0_in_deu0>; + }; + + deu0_out_be1: endpoint@1 { + reg = <1>; + remote-endpoint = <&be1_in_deu0>; + }; + }; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/display/msm/dpu.txt b/Documentation/devicetree/bindings/display/msm/dpu.txt index a61dd40f3792b3bdf50924f978bfb372fdb7cbc4..551ae26f60da74ebb40f141ce85b1582a22a0c41 100644 --- a/Documentation/devicetree/bindings/display/msm/dpu.txt +++ b/Documentation/devicetree/bindings/display/msm/dpu.txt @@ -8,7 +8,7 @@ The DPU display controller is found in SDM845 SoC. MDSS: Required properties: -- compatible: "qcom,sdm845-mdss" +- compatible: "qcom,sdm845-mdss", "qcom,sc7180-mdss" - reg: physical base address and length of contoller's registers. - reg-names: register region names. The following region is required: * "mdss" @@ -41,7 +41,7 @@ Optional properties: MDP: Required properties: -- compatible: "qcom,sdm845-dpu" +- compatible: "qcom,sdm845-dpu", "qcom,sc7180-dpu" - reg: physical base address and length of controller's registers. - reg-names : register region names. The following region is required: * "mdp" diff --git a/Documentation/devicetree/bindings/display/msm/gpu.txt b/Documentation/devicetree/bindings/display/msm/gpu.txt index 2b8fd26c43b07764d7d42569f6cfb828d4b60da8..7edc298a15f297320c70ebda2e8be516775bd74b 100644 --- a/Documentation/devicetree/bindings/display/msm/gpu.txt +++ b/Documentation/devicetree/bindings/display/msm/gpu.txt @@ -23,13 +23,18 @@ Required properties: - iommus: optional phandle to an adreno iommu instance - operating-points-v2: optional phandle to the OPP operating points - interconnects: optional phandle to an interconnect provider. See - ../interconnect/interconnect.txt for details. + ../interconnect/interconnect.txt for details. Some A3xx and all A4xx platforms + will have two paths; all others will have one path. +- interconnect-names: The names of the interconnect paths that correspond to the + interconnects property. Values must be gfx-mem and ocmem. - qcom,gmu: For GMU attached devices a phandle to the GMU device that will control the power for the GPU. Applicable targets: - qcom,adreno-630.2 - zap-shader: For a5xx and a6xx devices this node contains a memory-region that points to reserved memory to store the zap shader that can be used to help bring the GPU out of secure mode. +- firmware-name: optional property of the 'zap-shader' node, listing the + relative path of the device specific zap firmware. Example 3xx/4xx/a5xx: @@ -76,11 +81,13 @@ Example a6xx (with GMU): operating-points-v2 = <&gpu_opp_table>; interconnects = <&rsc_hlos MASTER_GFX3D &rsc_hlos SLAVE_EBI1>; + interconnect-names = "gfx-mem"; qcom,gmu = <&gmu>; zap-shader { memory-region = <&zap_shader_region>; + firmware-name = "qcom/LENOVO/81JL/qcdxkmsuc850.mbn" }; }; }; diff --git a/Documentation/devicetree/bindings/display/panel/ampire,am-480272h3tmqw-t01h.yaml b/Documentation/devicetree/bindings/display/panel/ampire,am-480272h3tmqw-t01h.yaml deleted file mode 100644 index c6e33e7f36d083149b6db07340b26b4275ef707e..0000000000000000000000000000000000000000 --- a/Documentation/devicetree/bindings/display/panel/ampire,am-480272h3tmqw-t01h.yaml +++ /dev/null @@ -1,42 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -%YAML 1.2 ---- -$id: http://devicetree.org/schemas/display/panel/ampire,am-480272h3tmqw-t01h.yaml# -$schema: http://devicetree.org/meta-schemas/core.yaml# - -title: Ampire AM-480272H3TMQW-T01H 4.3" WQVGA TFT LCD panel - -maintainers: - - Yannick Fertre - - Thierry Reding - -allOf: - - $ref: panel-common.yaml# - -properties: - compatible: - const: ampire,am-480272h3tmqw-t01h - - power-supply: true - enable-gpios: true - backlight: true - port: true - -required: - - compatible - -additionalProperties: false - -examples: - - | - panel_rgb: panel { - compatible = "ampire,am-480272h3tmqw-t01h"; - enable-gpios = <&gpioa 8 1>; - port { - panel_in_rgb: endpoint { - remote-endpoint = <&controller_out_rgb>; - }; - }; - }; - -... diff --git a/Documentation/devicetree/bindings/display/panel/ampire,am800480r3tmqwa1h.txt b/Documentation/devicetree/bindings/display/panel/ampire,am800480r3tmqwa1h.txt deleted file mode 100644 index 83e2cae1cc1b48eb7830e923322fa38f043537d5..0000000000000000000000000000000000000000 --- a/Documentation/devicetree/bindings/display/panel/ampire,am800480r3tmqwa1h.txt +++ /dev/null @@ -1,7 +0,0 @@ -Ampire AM-800480R3TMQW-A1H 7.0" WVGA TFT LCD panel - -Required properties: -- compatible: should be "ampire,am800480r3tmqwa1h" - -This binding is compatible with the simple-panel binding, which is specified -in simple-panel.txt in this directory. diff --git a/Documentation/devicetree/bindings/display/panel/giantplus,gpm940b0.txt b/Documentation/devicetree/bindings/display/panel/giantplus,gpm940b0.txt deleted file mode 100644 index 3dab52f92c26f1b3d47bdf00c51514d7e65e079e..0000000000000000000000000000000000000000 --- a/Documentation/devicetree/bindings/display/panel/giantplus,gpm940b0.txt +++ /dev/null @@ -1,12 +0,0 @@ -GiantPlus 3.0" (320x240 pixels) 24-bit TFT LCD panel - -Required properties: -- compatible: should be "giantplus,gpm940b0" -- power-supply: as specified in the base binding - -Optional properties: -- backlight: as specified in the base binding -- enable-gpios: as specified in the base binding - -This binding is compatible with the simple-panel binding, which is specified -in simple-panel.txt in this directory. diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8fe60ee2531c688e3a21b6258ce35f65355c2560 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml @@ -0,0 +1,69 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/panel/panel-simple.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Simple panels with one power supply + +maintainers: + - Thierry Reding + - Sam Ravnborg + +description: | + This binding file is a collection of the simple (dumb) panels that + requires only a single power-supply. + There are optionally a backlight and an enable GPIO. + The panel may use an OF graph binding for the association to the display, + or it may be a direct child node of the display. + + If the panel is more advanced a dedicated binding file is required. + +allOf: + - $ref: panel-common.yaml# + +properties: + + compatible: + enum: + # compatible must be listed in alphabetical order, ordered by compatible. + # The description in the comment is mandatory for each compatible. + + # Ampire AM-480272H3TMQW-T01H 4.3" WQVGA TFT LCD panel + - ampire,am-480272h3tmqw-t01h + # Ampire AM-800480R3TMQW-A1H 7.0" WVGA TFT LCD panel + - ampire,am800480r3tmqwa1h + # AUO B116XAK01 eDP TFT LCD panel + - auo,b116xa01 + # BOE NV140FHM-N49 14.0" FHD a-Si FT panel + - boe,nv140fhmn49 + # GiantPlus GPM940B0 3.0" QVGA TFT LCD panel + - giantplus,gpm940b0 + # Satoz SAT050AT40H12R2 5.0" WVGA TFT LCD panel + - satoz,sat050at40h12r2 + # Sharp LS020B1DD01D 2.0" HQVGA TFT LCD panel + - sharp,ls020b1dd01d + + backlight: true + enable-gpios: true + port: true + power-supply: true + +additionalProperties: false + +required: + - compatible + - power-supply + +examples: + - | + panel_rgb: panel-rgb { + compatible = "ampire,am-480272h3tmqw-t01h"; + power-supply = <&vcc_lcd_reg>; + + port { + panel_in_rgb: endpoint { + remote-endpoint = <<dc_out_rgb>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/display/panel/sharp,ls020b1dd01d.txt b/Documentation/devicetree/bindings/display/panel/sharp,ls020b1dd01d.txt deleted file mode 100644 index e45edbc565a35a5474fd0324721cedcaea52e004..0000000000000000000000000000000000000000 --- a/Documentation/devicetree/bindings/display/panel/sharp,ls020b1dd01d.txt +++ /dev/null @@ -1,12 +0,0 @@ -Sharp 2.0" (240x160 pixels) 16-bit TFT LCD panel - -Required properties: -- compatible: should be "sharp,ls020b1dd01d" -- power-supply: as specified in the base binding - -Optional properties: -- backlight: as specified in the base binding -- enable-gpios: as specified in the base binding - -This binding is compatible with the simple-panel binding, which is specified -in simple-panel.txt in this directory. diff --git a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt deleted file mode 100644 index 31ab72cba3d47e5ba47701342404c8cfa53809c4..0000000000000000000000000000000000000000 --- a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt +++ /dev/null @@ -1,637 +0,0 @@ -Allwinner A10 Display Pipeline -============================== - -The Allwinner A10 Display pipeline is composed of several components -that are going to be documented below: - -For all connections between components up to the TCONs in the display -pipeline, when there are multiple components of the same type at the -same depth, the local endpoint ID must be the same as the remote -component's index. For example, if the remote endpoint is Frontend 1, -then the local endpoint ID must be 1. - - Frontend 0 [0] ------- [0] Backend 0 [0] ------- [0] TCON 0 - [1] -- -- [1] [1] -- -- [1] - \ / \ / - X X - / \ / \ - [0] -- -- [0] [0] -- -- [0] - Frontend 1 [1] ------- [1] Backend 1 [1] ------- [1] TCON 1 - -For a two pipeline system such as the one depicted above, the lines -represent the connections between the components, while the numbers -within the square brackets corresponds to the ID of the local endpoint. - -The same rule also applies to DE 2.0 mixer-TCON connections: - - Mixer 0 [0] ----------- [0] TCON 0 - [1] ---- ---- [1] - \ / - X - / \ - [0] ---- ---- [0] - Mixer 1 [1] ----------- [1] TCON 1 - -HDMI Encoder ------------- - -The HDMI Encoder supports the HDMI video and audio outputs, and does -CEC. It is one end of the pipeline. - -Required properties: - - compatible: value must be one of: - * allwinner,sun4i-a10-hdmi - * allwinner,sun5i-a10s-hdmi - * allwinner,sun6i-a31-hdmi - - reg: base address and size of memory-mapped region - - interrupts: interrupt associated to this IP - - clocks: phandles to the clocks feeding the HDMI encoder - * ahb: the HDMI interface clock - * mod: the HDMI module clock - * ddc: the HDMI ddc clock (A31 only) - * pll-0: the first video PLL - * pll-1: the second video PLL - - clock-names: the clock names mentioned above - - resets: phandle to the reset control for the HDMI encoder (A31 only) - - dmas: phandles to the DMA channels used by the HDMI encoder - * ddc-tx: The channel for DDC transmission - * ddc-rx: The channel for DDC reception - * audio-tx: The channel used for audio transmission - - dma-names: the channel names mentioned above - - - ports: A ports node with endpoint definitions as defined in - Documentation/devicetree/bindings/media/video-interfaces.txt. The - first port should be the input endpoint. The second should be the - output, usually to an HDMI connector. - -DWC HDMI TX Encoder -------------------- - -The HDMI transmitter is a Synopsys DesignWare HDMI 1.4 TX controller IP -with Allwinner's own PHY IP. It supports audio and video outputs and CEC. - -These DT bindings follow the Synopsys DWC HDMI TX bindings defined in -Documentation/devicetree/bindings/display/bridge/dw_hdmi.txt with the -following device-specific properties. - -Required properties: - - - compatible: value must be one of: - * "allwinner,sun8i-a83t-dw-hdmi" - * "allwinner,sun50i-a64-dw-hdmi", "allwinner,sun8i-a83t-dw-hdmi" - * "allwinner,sun50i-h6-dw-hdmi" - - reg: base address and size of memory-mapped region - - reg-io-width: See dw_hdmi.txt. Shall be 1. - - interrupts: HDMI interrupt number - - clocks: phandles to the clocks feeding the HDMI encoder - * iahb: the HDMI bus clock - * isfr: the HDMI register clock - * tmds: TMDS clock - * cec: HDMI CEC clock (H6 only) - * hdcp: HDCP clock (H6 only) - * hdcp-bus: HDCP bus clock (H6 only) - - clock-names: the clock names mentioned above - - resets: - * ctrl: HDMI controller reset - * hdcp: HDCP reset (H6 only) - - reset-names: reset names mentioned above - - phys: phandle to the DWC HDMI PHY - - phy-names: must be "phy" - - - ports: A ports node with endpoint definitions as defined in - Documentation/devicetree/bindings/media/video-interfaces.txt. The - first port should be the input endpoint. The second should be the - output, usually to an HDMI connector. - -Optional properties: - - hvcc-supply: the VCC power supply of the controller - -DWC HDMI PHY ------------- - -Required properties: - - compatible: value must be one of: - * allwinner,sun8i-a83t-hdmi-phy - * allwinner,sun8i-h3-hdmi-phy - * allwinner,sun8i-r40-hdmi-phy - * allwinner,sun50i-a64-hdmi-phy - * allwinner,sun50i-h6-hdmi-phy - - reg: base address and size of memory-mapped region - - clocks: phandles to the clocks feeding the HDMI PHY - * bus: the HDMI PHY interface clock - * mod: the HDMI PHY module clock - - clock-names: the clock names mentioned above - - resets: phandle to the reset controller driving the PHY - - reset-names: must be "phy" - -H3, A64 and R40 HDMI PHY require additional clocks: - - pll-0: parent of phy clock - - pll-1: second possible phy clock parent (A64/R40 only) - -TV Encoder ----------- - -The TV Encoder supports the composite and VGA output. It is one end of -the pipeline. - -Required properties: - - compatible: value should be "allwinner,sun4i-a10-tv-encoder". - - reg: base address and size of memory-mapped region - - clocks: the clocks driving the TV encoder - - resets: phandle to the reset controller driving the encoder - -- ports: A ports node with endpoint definitions as defined in - Documentation/devicetree/bindings/media/video-interfaces.txt. The - first port should be the input endpoint. - -TCON ----- - -The TCON acts as a timing controller for RGB, LVDS and TV interfaces. - -Required properties: - - compatible: value must be either: - * allwinner,sun4i-a10-tcon - * allwinner,sun5i-a13-tcon - * allwinner,sun6i-a31-tcon - * allwinner,sun6i-a31s-tcon - * allwinner,sun7i-a20-tcon - * allwinner,sun8i-a23-tcon - * allwinner,sun8i-a33-tcon - * allwinner,sun8i-a83t-tcon-lcd - * allwinner,sun8i-a83t-tcon-tv - * allwinner,sun8i-r40-tcon-tv - * allwinner,sun8i-v3s-tcon - * allwinner,sun9i-a80-tcon-lcd - * allwinner,sun9i-a80-tcon-tv - * "allwinner,sun50i-a64-tcon-lcd", "allwinner,sun8i-a83t-tcon-lcd" - * "allwinner,sun50i-a64-tcon-tv", "allwinner,sun8i-a83t-tcon-tv" - * allwinner,sun50i-h6-tcon-tv, allwinner,sun8i-r40-tcon-tv - - reg: base address and size of memory-mapped region - - interrupts: interrupt associated to this IP - - clocks: phandles to the clocks feeding the TCON. - - 'ahb': the interface clocks - - 'tcon-ch0': The clock driving the TCON channel 0, if supported - - resets: phandles to the reset controllers driving the encoder - - "lcd": the reset line for the TCON - - "edp": the reset line for the eDP block (A80 only) - - - clock-names: the clock names mentioned above - - reset-names: the reset names mentioned above - - clock-output-names: Name of the pixel clock created, if TCON supports - channel 0. - -- ports: A ports node with endpoint definitions as defined in - Documentation/devicetree/bindings/media/video-interfaces.txt. The - first port should be the input endpoint, the second one the output - - The output may have multiple endpoints. TCON can have 1 or 2 channels, - usually with the first channel being used for the panels interfaces - (RGB, LVDS, etc.), and the second being used for the outputs that - require another controller (TV Encoder, HDMI, etc.). The endpoints - will take an extra property, allwinner,tcon-channel, to specify the - channel the endpoint is associated to. If that property is not - present, the endpoint number will be used as the channel number. - -For TCONs with channel 0, there is one more clock required: - - 'tcon-ch0': The clock driving the TCON channel 0 -For TCONs with channel 1, there is one more clock required: - - 'tcon-ch1': The clock driving the TCON channel 1 - -When TCON support LVDS (all TCONs except TV TCONs on A83T, R40 and those found -in A13, H3, H5 and V3s SoCs), you need one more reset line: - - 'lvds': The reset line driving the LVDS logic - -And on the A23, A31, A31s and A33, you need one more clock line: - - 'lvds-alt': An alternative clock source, separate from the TCON channel 0 - clock, that can be used to drive the LVDS clock - -TCON TOP --------- - -TCON TOPs main purpose is to configure whole display pipeline. It determines -relationships between mixers and TCONs, selects source TCON for HDMI, muxes -LCD and TV encoder GPIO output, selects TV encoder clock source and contains -additional TV TCON and DSI gates. - -It allows display pipeline to be configured in very different ways: - - / LCD0/LVDS0 - / [0] TCON-LCD0 - | \ MIPI DSI - mixer0 | - \ / [1] TCON-LCD1 - LCD1/LVDS1 - TCON-TOP - / \ [2] TCON-TV0 [0] - TVE0/RGB - mixer1 | \ - | TCON-TOP - HDMI - | / - \ [3] TCON-TV1 [1] - TVE1/RGB - -Note that both TCON TOP references same physical unit. Both mixers can be -connected to any TCON. Not all TCON TOP variants support all features. - -Required properties: - - compatible: value must be one of: - * allwinner,sun8i-r40-tcon-top - * allwinner,sun50i-h6-tcon-top - - reg: base address and size of the memory-mapped region. - - clocks: phandle to the clocks feeding the TCON TOP - * bus: TCON TOP interface clock - * tcon-tv0: TCON TV0 clock - * tve0: TVE0 clock (R40 only) - * tcon-tv1: TCON TV1 clock (R40 only) - * tve1: TVE0 clock (R40 only) - * dsi: MIPI DSI clock (R40 only) - - clock-names: clock name mentioned above - - resets: phandle to the reset line driving the TCON TOP - - #clock-cells : must contain 1 - - clock-output-names: Names of clocks created for TCON TV0 channel clock, - TCON TV1 channel clock (R40 only) and DSI channel clock (R40 only), in - that order. - -- ports: A ports node with endpoint definitions as defined in - Documentation/devicetree/bindings/media/video-interfaces.txt. 6 ports should - be defined: - * port 0 is input for mixer0 mux - * port 1 is output for mixer0 mux - * port 2 is input for mixer1 mux - * port 3 is output for mixer1 mux - * port 4 is input for HDMI mux - * port 5 is output for HDMI mux - All output endpoints for mixer muxes and input endpoints for HDMI mux should - have reg property with the id of the target TCON, as shown in above graph - (0-3 for mixer muxes and 0-1 for HDMI mux). All ports should have only one - endpoint connected to remote endpoint. - -DRC ---- - -The DRC (Dynamic Range Controller), found in the latest Allwinner SoCs -(A31, A23, A33, A80), allows to dynamically adjust pixel -brightness/contrast based on histogram measurements for LCD content -adaptive backlight control. - - -Required properties: - - compatible: value must be one of: - * allwinner,sun6i-a31-drc - * allwinner,sun6i-a31s-drc - * allwinner,sun8i-a23-drc - * allwinner,sun8i-a33-drc - * allwinner,sun9i-a80-drc - - reg: base address and size of the memory-mapped region. - - interrupts: interrupt associated to this IP - - clocks: phandles to the clocks feeding the DRC - * ahb: the DRC interface clock - * mod: the DRC module clock - * ram: the DRC DRAM clock - - clock-names: the clock names mentioned above - - resets: phandles to the reset line driving the DRC - -- ports: A ports node with endpoint definitions as defined in - Documentation/devicetree/bindings/media/video-interfaces.txt. The - first port should be the input endpoints, the second one the outputs - -Display Engine Backend ----------------------- - -The display engine backend exposes layers and sprites to the -system. - -Required properties: - - compatible: value must be one of: - * allwinner,sun4i-a10-display-backend - * allwinner,sun5i-a13-display-backend - * allwinner,sun6i-a31-display-backend - * allwinner,sun7i-a20-display-backend - * allwinner,sun8i-a23-display-backend - * allwinner,sun8i-a33-display-backend - * allwinner,sun9i-a80-display-backend - - reg: base address and size of the memory-mapped region. - - interrupts: interrupt associated to this IP - - clocks: phandles to the clocks feeding the frontend and backend - * ahb: the backend interface clock - * mod: the backend module clock - * ram: the backend DRAM clock - - clock-names: the clock names mentioned above - - resets: phandles to the reset controllers driving the backend - -- ports: A ports node with endpoint definitions as defined in - Documentation/devicetree/bindings/media/video-interfaces.txt. The - first port should be the input endpoints, the second one the output - -On the A33, some additional properties are required: - - reg needs to have an additional region corresponding to the SAT - - reg-names need to be set, with "be" and "sat" - - clocks and clock-names need to have a phandle to the SAT bus - clocks, whose name will be "sat" - - resets and reset-names need to have a phandle to the SAT bus - resets, whose name will be "sat" - -DEU ---- - -The DEU (Detail Enhancement Unit), found in the Allwinner A80 SoC, -can sharpen the display content in both luma and chroma channels. - -Required properties: - - compatible: value must be one of: - * allwinner,sun9i-a80-deu - - reg: base address and size of the memory-mapped region. - - interrupts: interrupt associated to this IP - - clocks: phandles to the clocks feeding the DEU - * ahb: the DEU interface clock - * mod: the DEU module clock - * ram: the DEU DRAM clock - - clock-names: the clock names mentioned above - - resets: phandles to the reset line driving the DEU - -- ports: A ports node with endpoint definitions as defined in - Documentation/devicetree/bindings/media/video-interfaces.txt. The - first port should be the input endpoints, the second one the outputs - -Display Engine Frontend ------------------------ - -The display engine frontend does formats conversion, scaling, -deinterlacing and color space conversion. - -Required properties: - - compatible: value must be one of: - * allwinner,sun4i-a10-display-frontend - * allwinner,sun5i-a13-display-frontend - * allwinner,sun6i-a31-display-frontend - * allwinner,sun7i-a20-display-frontend - * allwinner,sun8i-a23-display-frontend - * allwinner,sun8i-a33-display-frontend - * allwinner,sun9i-a80-display-frontend - - reg: base address and size of the memory-mapped region. - - interrupts: interrupt associated to this IP - - clocks: phandles to the clocks feeding the frontend and backend - * ahb: the backend interface clock - * mod: the backend module clock - * ram: the backend DRAM clock - - clock-names: the clock names mentioned above - - resets: phandles to the reset controllers driving the backend - -- ports: A ports node with endpoint definitions as defined in - Documentation/devicetree/bindings/media/video-interfaces.txt. The - first port should be the input endpoints, the second one the outputs - -Display Engine 2.0 Mixer ------------------------- - -The DE2 mixer have many functionalities, currently only layer blending is -supported. - -Required properties: - - compatible: value must be one of: - * allwinner,sun8i-a83t-de2-mixer-0 - * allwinner,sun8i-a83t-de2-mixer-1 - * allwinner,sun8i-h3-de2-mixer-0 - * allwinner,sun8i-r40-de2-mixer-0 - * allwinner,sun8i-r40-de2-mixer-1 - * allwinner,sun8i-v3s-de2-mixer - * allwinner,sun50i-a64-de2-mixer-0 - * allwinner,sun50i-a64-de2-mixer-1 - * allwinner,sun50i-h6-de3-mixer-0 - - reg: base address and size of the memory-mapped region. - - clocks: phandles to the clocks feeding the mixer - * bus: the mixer interface clock - * mod: the mixer module clock - - clock-names: the clock names mentioned above - - resets: phandles to the reset controllers driving the mixer - -- ports: A ports node with endpoint definitions as defined in - Documentation/devicetree/bindings/media/video-interfaces.txt. The - first port should be the input endpoints, the second one the output - - -Display Engine Pipeline ------------------------ - -The display engine pipeline (and its entry point, since it can be -either directly the backend or the frontend) is represented as an -extra node. - -Required properties: - - compatible: value must be one of: - * allwinner,sun4i-a10-display-engine - * allwinner,sun5i-a10s-display-engine - * allwinner,sun5i-a13-display-engine - * allwinner,sun6i-a31-display-engine - * allwinner,sun6i-a31s-display-engine - * allwinner,sun7i-a20-display-engine - * allwinner,sun8i-a23-display-engine - * allwinner,sun8i-a33-display-engine - * allwinner,sun8i-a83t-display-engine - * allwinner,sun8i-h3-display-engine - * allwinner,sun8i-r40-display-engine - * allwinner,sun8i-v3s-display-engine - * allwinner,sun9i-a80-display-engine - * allwinner,sun50i-a64-display-engine - * allwinner,sun50i-h6-display-engine - - - allwinner,pipelines: list of phandle to the display engine - frontends (DE 1.0) or mixers (DE 2.0/3.0) available. - -Example: - -panel: panel { - compatible = "olimex,lcd-olinuxino-43-ts"; - #address-cells = <1>; - #size-cells = <0>; - - port { - #address-cells = <1>; - #size-cells = <0>; - - panel_input: endpoint { - remote-endpoint = <&tcon0_out_panel>; - }; - }; -}; - -connector { - compatible = "hdmi-connector"; - type = "a"; - - port { - hdmi_con_in: endpoint { - remote-endpoint = <&hdmi_out_con>; - }; - }; -}; - -hdmi: hdmi@1c16000 { - compatible = "allwinner,sun5i-a10s-hdmi"; - reg = <0x01c16000 0x1000>; - interrupts = <58>; - clocks = <&ccu CLK_AHB_HDMI>, <&ccu CLK_HDMI>, - <&ccu CLK_PLL_VIDEO0_2X>, - <&ccu CLK_PLL_VIDEO1_2X>; - clock-names = "ahb", "mod", "pll-0", "pll-1"; - dmas = <&dma SUN4I_DMA_NORMAL 16>, - <&dma SUN4I_DMA_NORMAL 16>, - <&dma SUN4I_DMA_DEDICATED 24>; - dma-names = "ddc-tx", "ddc-rx", "audio-tx"; - - ports { - #address-cells = <1>; - #size-cells = <0>; - - port@0 { - #address-cells = <1>; - #size-cells = <0>; - reg = <0>; - - hdmi_in_tcon0: endpoint { - remote-endpoint = <&tcon0_out_hdmi>; - }; - }; - - port@1 { - #address-cells = <1>; - #size-cells = <0>; - reg = <1>; - - hdmi_out_con: endpoint { - remote-endpoint = <&hdmi_con_in>; - }; - }; - }; -}; - -tve0: tv-encoder@1c0a000 { - compatible = "allwinner,sun4i-a10-tv-encoder"; - reg = <0x01c0a000 0x1000>; - clocks = <&ahb_gates 34>; - resets = <&tcon_ch0_clk 0>; - - port { - #address-cells = <1>; - #size-cells = <0>; - - tve0_in_tcon0: endpoint@0 { - reg = <0>; - remote-endpoint = <&tcon0_out_tve0>; - }; - }; -}; - -tcon0: lcd-controller@1c0c000 { - compatible = "allwinner,sun5i-a13-tcon"; - reg = <0x01c0c000 0x1000>; - interrupts = <44>; - resets = <&tcon_ch0_clk 1>; - reset-names = "lcd"; - clocks = <&ahb_gates 36>, - <&tcon_ch0_clk>, - <&tcon_ch1_clk>; - clock-names = "ahb", - "tcon-ch0", - "tcon-ch1"; - clock-output-names = "tcon-pixel-clock"; - - ports { - #address-cells = <1>; - #size-cells = <0>; - - tcon0_in: port@0 { - #address-cells = <1>; - #size-cells = <0>; - reg = <0>; - - tcon0_in_be0: endpoint@0 { - reg = <0>; - remote-endpoint = <&be0_out_tcon0>; - }; - }; - - tcon0_out: port@1 { - #address-cells = <1>; - #size-cells = <0>; - reg = <1>; - - tcon0_out_panel: endpoint@0 { - reg = <0>; - remote-endpoint = <&panel_input>; - }; - - tcon0_out_tve0: endpoint@1 { - reg = <1>; - remote-endpoint = <&tve0_in_tcon0>; - }; - }; - }; -}; - -fe0: display-frontend@1e00000 { - compatible = "allwinner,sun5i-a13-display-frontend"; - reg = <0x01e00000 0x20000>; - interrupts = <47>; - clocks = <&ahb_gates 46>, <&de_fe_clk>, - <&dram_gates 25>; - clock-names = "ahb", "mod", - "ram"; - resets = <&de_fe_clk>; - - ports { - #address-cells = <1>; - #size-cells = <0>; - - fe0_out: port@1 { - #address-cells = <1>; - #size-cells = <0>; - reg = <1>; - - fe0_out_be0: endpoint { - remote-endpoint = <&be0_in_fe0>; - }; - }; - }; -}; - -be0: display-backend@1e60000 { - compatible = "allwinner,sun5i-a13-display-backend"; - reg = <0x01e60000 0x10000>; - interrupts = <47>; - clocks = <&ahb_gates 44>, <&de_be_clk>, - <&dram_gates 26>; - clock-names = "ahb", "mod", - "ram"; - resets = <&de_be_clk>; - - ports { - #address-cells = <1>; - #size-cells = <0>; - - be0_in: port@0 { - #address-cells = <1>; - #size-cells = <0>; - reg = <0>; - - be0_in_fe0: endpoint@0 { - reg = <0>; - remote-endpoint = <&fe0_out_be0>; - }; - }; - - be0_out: port@1 { - #address-cells = <1>; - #size-cells = <0>; - reg = <1>; - - be0_out_tcon0: endpoint@0 { - reg = <0>; - remote-endpoint = <&tcon0_in_be0>; - }; - }; - }; -}; - -display-engine { - compatible = "allwinner,sun5i-a13-display-engine"; - allwinner,pipelines = <&fe0>; -}; diff --git a/Documentation/devicetree/bindings/i2c/i2c-at91.txt b/Documentation/devicetree/bindings/i2c/i2c-at91.txt index 2210f4359c4573a1d74af7dad7dbdc2ed6741d59..8347b1e7c08071023a149cbc5e75e83cded92a8d 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-at91.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-at91.txt @@ -18,8 +18,10 @@ Optional properties: - dma-names: should contain "tx" and "rx". - atmel,fifo-size: maximum number of data the RX and TX FIFOs can store for FIFO capable I2C controllers. -- i2c-sda-hold-time-ns: TWD hold time, only available for "atmel,sama5d4-i2c" - and "atmel,sama5d2-i2c". +- i2c-sda-hold-time-ns: TWD hold time, only available for: + "atmel,sama5d4-i2c", + "atmel,sama5d2-i2c", + "microchip,sam9x60-i2c". - Child nodes conforming to i2c bus binding Examples : diff --git a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt index 27e1b4cebfbd42ad0923bf6bd90521a50a7a925b..6bdcc3f84bd3c4ec25b23bd04309d4b4b322a9dd 100644 --- a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt +++ b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt @@ -10,7 +10,6 @@ Required properties: - #size-cells: 0 - spi-max-frequency: Maximum frequency of the SPI bus the chip can operate at should be less than or equal to 18 MHz. - - device-wake-gpios: Wake up GPIO to wake up the TCAN device. - interrupt-parent: the phandle to the interrupt controller which provides the interrupt. - interrupts: interrupt specification for data-ready. @@ -23,6 +22,7 @@ Optional properties: reset. - device-state-gpios: Input GPIO that indicates if the device is in a sleep state or if the device is active. + - device-wake-gpios: Wake up GPIO to wake up the TCAN device. Example: tcan4x5x: tcan4x5x@0 { @@ -36,5 +36,5 @@ tcan4x5x: tcan4x5x@0 { interrupts = <14 GPIO_ACTIVE_LOW>; device-state-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>; device-wake-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>; - reset-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>; + reset-gpios = <&gpio1 27 GPIO_ACTIVE_HIGH>; }; diff --git a/Documentation/devicetree/bindings/net/snps,dwmac.yaml b/Documentation/devicetree/bindings/net/snps,dwmac.yaml index 4845e29411e460151458a97a39256cff9deff8e7..e08cd4c4d5682f7e7e7b53df24c6b36891d28226 100644 --- a/Documentation/devicetree/bindings/net/snps,dwmac.yaml +++ b/Documentation/devicetree/bindings/net/snps,dwmac.yaml @@ -347,6 +347,7 @@ allOf: - st,spear600-gmac then: + properties: snps,tso: $ref: /schemas/types.yaml#definitions/flag description: diff --git a/Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.txt b/Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.txt index 6e5341b4f8919ecda07304b5a2406f33ffa2c00f..ee59409640f244ebf5d985c18f46d91d3fbd9f6a 100644 --- a/Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.txt +++ b/Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.txt @@ -22,6 +22,6 @@ Example: }; ðernet_switch { - resets = <&reset>; + resets = <&reset 26>; reset-names = "switch"; }; diff --git a/Documentation/devicetree/bindings/spi/spi-controller.yaml b/Documentation/devicetree/bindings/spi/spi-controller.yaml index 732339275848c12a52c438656f97e3e74a44b9ff..1e0ca6ccf64bbd0a2e257d8dbcda2d829fcb742f 100644 --- a/Documentation/devicetree/bindings/spi/spi-controller.yaml +++ b/Documentation/devicetree/bindings/spi/spi-controller.yaml @@ -111,7 +111,7 @@ patternProperties: spi-rx-bus-width: allOf: - $ref: /schemas/types.yaml#/definitions/uint32 - - enum: [ 1, 2, 4 ] + - enum: [ 1, 2, 4, 8 ] - default: 1 description: Bus width to the SPI bus used for MISO. @@ -123,7 +123,7 @@ patternProperties: spi-tx-bus-width: allOf: - $ref: /schemas/types.yaml#/definitions/uint32 - - enum: [ 1, 2, 4 ] + - enum: [ 1, 2, 4, 8 ] - default: 1 description: Bus width to the SPI bus used for MOSI. diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml index 4e6248ec5ed9a8a5a8954fcecbdf15aa8de1e64c..835579edc9718eb8e7a45dfa2d2099d145252703 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.yaml +++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml @@ -825,6 +825,8 @@ patternProperties: description: Sancloud Ltd "^sandisk,.*": description: Sandisk Corporation + "^satoz,.*": + description: Satoz International Co., Ltd "^sbs,.*": description: Smart Battery System "^schindler,.*": diff --git a/Documentation/features/debug/gcov-profile-all/arch-support.txt b/Documentation/features/debug/gcov-profile-all/arch-support.txt index 059d58a549c7a6e71da1eae8408f5b6892e69c02..6fb2b0671994efd5d526762e58ef6f2b242a1276 100644 --- a/Documentation/features/debug/gcov-profile-all/arch-support.txt +++ b/Documentation/features/debug/gcov-profile-all/arch-support.txt @@ -23,7 +23,7 @@ | openrisc: | TODO | | parisc: | TODO | | powerpc: | ok | - | riscv: | TODO | + | riscv: | ok | | s390: | ok | | sh: | ok | | sparc: | TODO | diff --git a/Documentation/kbuild/kconfig-language.rst b/Documentation/kbuild/kconfig-language.rst index 74bef19f69f0a8078f2f988358ff57046b46210b..231e6a64957fcf12915c41b6a8d48d6978fa2690 100644 --- a/Documentation/kbuild/kconfig-language.rst +++ b/Documentation/kbuild/kconfig-language.rst @@ -196,14 +196,11 @@ applicable everywhere (see syntax). or equal to the first symbol and smaller than or equal to the second symbol. -- help text: "help" or "---help---" +- help text: "help" This defines a help text. The end of the help text is determined by the indentation level, this means it ends at the first line which has a smaller indentation than the first line of the help text. - "---help---" and "help" do not differ in behaviour, "---help---" is - used to help visually separate configuration logic from help within - the file as an aid to developers. - misc options: "option" [=] diff --git a/Documentation/kbuild/makefiles.rst b/Documentation/kbuild/makefiles.rst index b9b50553bfc56b5b5b50ea9513700a4421f7e232..d7e6534a8505eeb4f9dcc24b01ef94d674e8bf3e 100644 --- a/Documentation/kbuild/makefiles.rst +++ b/Documentation/kbuild/makefiles.rst @@ -297,9 +297,19 @@ more details, with real examples. If CONFIG_EXT2_FS is set to either 'y' (built-in) or 'm' (modular) the corresponding obj- variable will be set, and kbuild will descend down in the ext2 directory. - Kbuild only uses this information to decide that it needs to visit - the directory, it is the Makefile in the subdirectory that - specifies what is modular and what is built-in. + + Kbuild uses this information not only to decide that it needs to visit + the directory, but also to decide whether or not to link objects from + the directory into vmlinux. + + When Kbuild descends into the directory with 'y', all built-in objects + from that directory are combined into the built-in.a, which will be + eventually linked into vmlinux. + + When Kbuild descends into the directory with 'm', in contrast, nothing + from that directory will be linked into vmlinux. If the Makefile in + that directory specifies obj-y, those objects will be left orphan. + It is very likely a bug of the Makefile or of dependencies in Kconfig. It is good practice to use a `CONFIG_` variable when assigning directory names. This allows kbuild to totally skip the directory if the diff --git a/Documentation/media/v4l-drivers/meye.rst b/Documentation/media/v4l-drivers/meye.rst index a572996cdbf60ab602e3d4ddaaf4658b746eff6a..dc57a6a91b43f121d3258f7ce7924f7cf4fc676a 100644 --- a/Documentation/media/v4l-drivers/meye.rst +++ b/Documentation/media/v4l-drivers/meye.rst @@ -95,7 +95,7 @@ so all video4linux tools (like xawtv) should work with this driver. Besides the video4linux interface, the driver has a private interface for accessing the Motion Eye extended parameters (camera sharpness, -agc, video framerate), the shapshot and the MJPEG capture facilities. +agc, video framerate), the snapshot and the MJPEG capture facilities. This interface consists of several ioctls (prototypes and structures can be found in include/linux/meye.h): diff --git a/Documentation/networking/dsa/sja1105.rst b/Documentation/networking/dsa/sja1105.rst index eef20d0bcf7c15183b7db65ba847dd47f2c45b60..64553d8d91cb9a6e65f1ac1d73b4fbe554d4abde 100644 --- a/Documentation/networking/dsa/sja1105.rst +++ b/Documentation/networking/dsa/sja1105.rst @@ -230,12 +230,6 @@ simultaneously on two ports. The driver checks the consistency of the schedules against this restriction and errors out when appropriate. Schedule analysis is needed to avoid this, which is outside the scope of the document. -At the moment, the time-aware scheduler can only be triggered based on a -standalone clock and not based on PTP time. This means the base-time argument -from tc-taprio is ignored and the schedule starts right away. It also means it -is more difficult to phase-align the scheduler with the other devices in the -network. - Device Tree bindings and board design ===================================== diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index fd26788e8c96c66716babef9683733af4deefb1a..48ccb1b31160287f5402b93a3105ff6f6aff0257 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -603,7 +603,7 @@ tcp_synack_retries - INTEGER with the current initial RTO of 1second. With this the final timeout for a passive TCP connection will happen after 63seconds. -tcp_syncookies - BOOLEAN +tcp_syncookies - INTEGER Only valid when the kernel was compiled with CONFIG_SYN_COOKIES Send out syncookies when the syn backlog queue of a socket overflows. This is to prevent against the common 'SYN flood attack' diff --git a/Documentation/networking/j1939.rst b/Documentation/networking/j1939.rst index dc60b13fcd09653a95eda517e9ccdc38e50c26cf..f5be243d250a40e75d8bdef11b2489b56d7af691 100644 --- a/Documentation/networking/j1939.rst +++ b/Documentation/networking/j1939.rst @@ -339,7 +339,7 @@ To claim an address following code example can be used: .pgn = J1939_PGN_ADDRESS_CLAIMED, .pgn_mask = J1939_PGN_PDU1_MAX, }, { - .pgn = J1939_PGN_ADDRESS_REQUEST, + .pgn = J1939_PGN_REQUEST, .pgn_mask = J1939_PGN_PDU1_MAX, }, { .pgn = J1939_PGN_ADDRESS_COMMANDED, diff --git a/Documentation/networking/netdev-FAQ.rst b/Documentation/networking/netdev-FAQ.rst index 642fa963be3cf8f325c29947072e6c6851213d4b..d5c9320901c3273b99bcc1b4cda78c5472d9f6fd 100644 --- a/Documentation/networking/netdev-FAQ.rst +++ b/Documentation/networking/netdev-FAQ.rst @@ -34,8 +34,8 @@ the names, the ``net`` tree is for fixes to existing code already in the mainline tree from Linus, and ``net-next`` is where the new code goes for the future release. You can find the trees here: -- https://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git -- https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git +- https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git +- https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git Q: How often do changes from these trees make it to the mainline Linus tree? ---------------------------------------------------------------------------- diff --git a/Documentation/process/embargoed-hardware-issues.rst b/Documentation/process/embargoed-hardware-issues.rst index 799580acc8dec621b04aabec3211f4e2a78727cc..5d54946cfc750a9a86a6238cc2939dfe8ba3ca6c 100644 --- a/Documentation/process/embargoed-hardware-issues.rst +++ b/Documentation/process/embargoed-hardware-issues.rst @@ -255,7 +255,7 @@ an involved disclosed party. The current ambassadors list: Red Hat Josh Poimboeuf SUSE Jiri Kosina - Amazon + Amazon Peter Bowen Google Kees Cook ============= ======================================================== diff --git a/Documentation/process/index.rst b/Documentation/process/index.rst index 21aa7d5358e621ab991ff9116278ce35c64b41ad..6399d92f0b21d6db95a6c4893dde1f0d243108c8 100644 --- a/Documentation/process/index.rst +++ b/Documentation/process/index.rst @@ -60,6 +60,7 @@ lack of a better place. volatile-considered-harmful botching-up-ioctls clang-format + ../riscv/patch-acceptance .. only:: subproject and html diff --git a/Documentation/riscv/index.rst b/Documentation/riscv/index.rst index 215fd3c1f2d57f5397b469ee6c42b17be7025732..fa33bffd8992771153162189cc05753d844097a8 100644 --- a/Documentation/riscv/index.rst +++ b/Documentation/riscv/index.rst @@ -7,6 +7,7 @@ RISC-V architecture boot-image-header pmu + patch-acceptance .. only:: subproject and html diff --git a/Documentation/riscv/patch-acceptance.rst b/Documentation/riscv/patch-acceptance.rst new file mode 100644 index 0000000000000000000000000000000000000000..dfe0ac5624fb240ce697a5067666cf46b2dc7209 --- /dev/null +++ b/Documentation/riscv/patch-acceptance.rst @@ -0,0 +1,35 @@ +.. SPDX-License-Identifier: GPL-2.0 + +arch/riscv maintenance guidelines for developers +================================================ + +Overview +-------- +The RISC-V instruction set architecture is developed in the open: +in-progress drafts are available for all to review and to experiment +with implementations. New module or extension drafts can change +during the development process - sometimes in ways that are +incompatible with previous drafts. This flexibility can present a +challenge for RISC-V Linux maintenance. Linux maintainers disapprove +of churn, and the Linux development process prefers well-reviewed and +tested code over experimental code. We wish to extend these same +principles to the RISC-V-related code that will be accepted for +inclusion in the kernel. + +Submit Checklist Addendum +------------------------- +We'll only accept patches for new modules or extensions if the +specifications for those modules or extensions are listed as being +"Frozen" or "Ratified" by the RISC-V Foundation. (Developers may, of +course, maintain their own Linux kernel trees that contain code for +any draft extensions that they wish.) + +Additionally, the RISC-V specification allows implementors to create +their own custom extensions. These custom extensions aren't required +to go through any review or ratification process by the RISC-V +Foundation. To avoid the maintenance complexity and potential +performance impact of adding kernel code for implementor-specific +RISC-V extensions, we'll only to accept patches for extensions that +have been officially frozen or ratified by the RISC-V Foundation. +(Implementors, may, of course, maintain their own Linux kernel trees +containing code for any custom extensions that they wish.) diff --git a/MAINTAINERS b/MAINTAINERS index bfebe689ea7eb93aeb533ee8e1d6376c7d5ce9e2..f1e9be21171522f9b71d711b2b18f3797d46c6cc 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -720,7 +720,7 @@ F: Documentation/devicetree/bindings/i2c/i2c-altera.txt F: drivers/i2c/busses/i2c-altera.c ALTERA MAILBOX DRIVER -M: Ley Foon Tan +M: Ley Foon Tan L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers) S: Maintained F: drivers/mailbox/mailbox-altera.c @@ -771,6 +771,8 @@ F: drivers/thermal/thermal_mmio.c AMAZON ETHERNET DRIVERS M: Netanel Belgazal +M: Arthur Kiyanovski +R: Guy Tzalik R: Saeed Bishara R: Zorik Machulsky L: netdev@vger.kernel.org @@ -1405,7 +1407,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc.git ARM/ACTIONS SEMI ARCHITECTURE M: Andreas Färber -R: Manivannan Sadhasivam +M: Manivannan Sadhasivam L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained N: owl @@ -2272,6 +2274,7 @@ F: drivers/*/*s3c64xx* F: drivers/*/*s5pv210* F: drivers/memory/samsung/ F: drivers/soc/samsung/ +F: drivers/tty/serial/samsung* F: include/linux/soc/samsung/ F: Documentation/arm/samsung/ F: Documentation/devicetree/bindings/arm/samsung/ @@ -3147,7 +3150,7 @@ S: Maintained F: arch/mips/net/ BPF JIT for NFP NICs -M: Jakub Kicinski +M: Jakub Kicinski L: netdev@vger.kernel.org L: bpf@vger.kernel.org S: Supported @@ -5018,7 +5021,7 @@ F: include/linux/dma-mapping.h F: include/linux/dma-noncoherent.h DMC FREQUENCY DRIVER FOR SAMSUNG EXYNOS5422 -M: Lukasz Luba +M: Lukasz Luba L: linux-pm@vger.kernel.org L: linux-samsung-soc@vger.kernel.org S: Maintained @@ -5357,6 +5360,12 @@ S: Maintained F: drivers/gpu/drm/tiny/st7735r.c F: Documentation/devicetree/bindings/display/sitronix,st7735r.txt +DRM DRIVER FOR SONY ACX424AKP PANELS +M: Linus Walleij +T: git git://anongit.freedesktop.org/drm/drm-misc +S: Maintained +F: drivers/gpu/drm/panel/panel-sony-acx424akp.c + DRM DRIVER FOR ST-ERICSSON MCDE M: Linus Walleij T: git git://anongit.freedesktop.org/drm/drm-misc @@ -6049,6 +6058,7 @@ M: Yash Shah L: linux-edac@vger.kernel.org S: Supported F: drivers/edac/sifive_edac.c +F: drivers/soc/sifive_l2_cache.c EDAC-SKYLAKE M: Tony Luck @@ -7055,6 +7065,7 @@ L: linux-acpi@vger.kernel.org S: Maintained F: Documentation/firmware-guide/acpi/gpio-properties.rst F: drivers/gpio/gpiolib-acpi.c +F: drivers/gpio/gpiolib-acpi.h GPIO IR Transmitter M: Sean Young @@ -9062,7 +9073,6 @@ F: include/linux/umh.h KERNEL VIRTUAL MACHINE (KVM) M: Paolo Bonzini -M: Radim Krčmář L: kvm@vger.kernel.org W: http://www.linux-kvm.org T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git @@ -9097,9 +9107,9 @@ F: virt/kvm/arm/ F: include/kvm/arm_* KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips) -M: James Hogan L: linux-mips@vger.kernel.org -S: Supported +L: kvm@vger.kernel.org +S: Orphan F: arch/mips/include/uapi/asm/kvm* F: arch/mips/include/asm/kvm* F: arch/mips/kvm/ @@ -9134,7 +9144,6 @@ F: tools/testing/selftests/kvm/*/s390x/ KERNEL VIRTUAL MACHINE FOR X86 (KVM/x86) M: Paolo Bonzini -M: Radim Krčmář R: Sean Christopherson R: Vitaly Kuznetsov R: Wanpeng Li @@ -10132,6 +10141,7 @@ S: Maintained F: drivers/media/radio/radio-maxiradio* MCAN MMIO DEVICE DRIVER +M: Dan Murphy M: Sriram Dash L: linux-can@vger.kernel.org S: Maintained @@ -11450,7 +11460,7 @@ F: include/uapi/linux/netrom.h F: net/netrom/ NETRONOME ETHERNET DRIVERS -M: Jakub Kicinski +M: Jakub Kicinski L: oss-drivers@netronome.com S: Maintained F: drivers/net/ethernet/netronome/ @@ -11479,8 +11489,8 @@ M: "David S. Miller" L: netdev@vger.kernel.org W: http://www.linuxfoundation.org/en/Net Q: http://patchwork.ozlabs.org/project/netdev/list/ -T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git -T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git S: Odd Fixes F: Documentation/devicetree/bindings/net/ F: drivers/net/ @@ -11521,8 +11531,8 @@ M: "David S. Miller" L: netdev@vger.kernel.org W: http://www.linuxfoundation.org/en/Net Q: http://patchwork.ozlabs.org/project/netdev/list/ -T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git -T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git B: mailto:netdev@vger.kernel.org S: Maintained F: net/ @@ -11567,7 +11577,7 @@ M: "David S. Miller" M: Alexey Kuznetsov M: Hideaki YOSHIFUJI L: netdev@vger.kernel.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git S: Maintained F: net/ipv4/ F: net/ipv6/ @@ -11610,7 +11620,7 @@ M: Boris Pismenny M: Aviad Yehezkel M: John Fastabend M: Daniel Borkmann -M: Jakub Kicinski +M: Jakub Kicinski L: netdev@vger.kernel.org S: Maintained F: net/tls/* @@ -11622,7 +11632,7 @@ L: linux-wireless@vger.kernel.org Q: http://patchwork.kernel.org/project/linux-wireless/list/ NETDEVSIM -M: Jakub Kicinski +M: Jakub Kicinski S: Maintained F: drivers/net/netdevsim/* @@ -11699,7 +11709,7 @@ F: Documentation/scsi/NinjaSCSI.txt F: drivers/scsi/nsp32* NIOS2 ARCHITECTURE -M: Ley Foon Tan +M: Ley Foon Tan L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers) T: git git://git.kernel.org/pub/scm/linux/kernel/git/lftan/nios2.git S: Maintained @@ -12583,7 +12593,7 @@ F: Documentation/devicetree/bindings/pci/aardvark-pci.txt F: drivers/pci/controller/pci-aardvark.c PCI DRIVER FOR ALTERA PCIE IP -M: Ley Foon Tan +M: Ley Foon Tan L: rfi@lists.rocketboards.org (moderated for non-subscribers) L: linux-pci@vger.kernel.org S: Supported @@ -12762,7 +12772,7 @@ S: Supported F: Documentation/PCI/pci-error-recovery.rst PCI MSI DRIVER FOR ALTERA MSI IP -M: Ley Foon Tan +M: Ley Foon Tan L: rfi@lists.rocketboards.org (moderated for non-subscribers) L: linux-pci@vger.kernel.org S: Supported @@ -13698,7 +13708,6 @@ F: drivers/net/ethernet/qualcomm/emac/ QUALCOMM ETHQOS ETHERNET DRIVER M: Vinod Koul -M: Niklas Cassel L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -13732,6 +13741,15 @@ L: linux-arm-msm@vger.kernel.org S: Maintained F: drivers/iommu/qcom_iommu.c +QUALCOMM RMNET DRIVER +M: Subash Abhinov Kasiviswanathan +M: Sean Tranchetti +L: netdev@vger.kernel.org +S: Maintained +F: drivers/net/ethernet/qualcomm/rmnet/ +F: Documentation/networking/device_drivers/qualcomm/rmnet.txt +F: include/linux/if_rmnet.h + QUALCOMM TSENS THERMAL DRIVER M: Amit Kucheria L: linux-pm@vger.kernel.org @@ -14131,6 +14149,7 @@ M: Paul Walmsley M: Palmer Dabbelt M: Albert Ou L: linux-riscv@lists.infradead.org +P: Documentation/riscv/patch-acceptance.rst T: git git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux.git S: Supported F: arch/riscv/ @@ -14558,8 +14577,6 @@ F: include/linux/platform_data/spi-s3c64xx.h SAMSUNG SXGBE DRIVERS M: Byungho An -M: Girish K S -M: Vipul Pandya S: Supported L: netdev@vger.kernel.org F: drivers/net/ethernet/samsung/sxgbe/ @@ -16555,6 +16572,13 @@ L: alsa-devel@alsa-project.org (moderated for non-subscribers) S: Odd Fixes F: sound/soc/codecs/tas571x* +TI TCAN4X5X DEVICE DRIVER +M: Dan Murphy +L: linux-can@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/net/can/tcan4x5x.txt +F: drivers/net/can/m_can/tcan4x5x.c + TI TRF7970A NFC DRIVER M: Mark Greer L: linux-wireless@vger.kernel.org @@ -18047,7 +18071,7 @@ XDP (eXpress Data Path) M: Alexei Starovoitov M: Daniel Borkmann M: David S. Miller -M: Jakub Kicinski +M: Jakub Kicinski M: Jesper Dangaard Brouer M: John Fastabend L: netdev@vger.kernel.org diff --git a/Makefile b/Makefile index f900c23b82914f9ff8717550a4687716f4241d8a..c50ef91f61363588e3750453baa95ed1a8a1731e 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ VERSION = 5 PATCHLEVEL = 5 SUBLEVEL = 0 -EXTRAVERSION = -rc2 +EXTRAVERSION = -rc7 NAME = Kleptomaniac Octopus # *DOCUMENTATION* @@ -414,6 +414,7 @@ STRIP = $(CROSS_COMPILE)strip OBJCOPY = $(CROSS_COMPILE)objcopy OBJDUMP = $(CROSS_COMPILE)objdump OBJSIZE = $(CROSS_COMPILE)size +READELF = $(CROSS_COMPILE)readelf PAHOLE = pahole LEX = flex YACC = bison @@ -472,7 +473,7 @@ GCC_PLUGINS_CFLAGS := CLANG_FLAGS := export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC -export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE PAHOLE LEX YACC AWK INSTALLKERNEL +export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE READELF PAHOLE LEX YACC AWK INSTALLKERNEL export PERL PYTHON PYTHON2 PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h index 41b16f21beecac28099ed7ee154db6947b6b0d14..0b8b63d0bec153285e1e4888a07a4fe9db8f40bf 100644 --- a/arch/arc/include/asm/entry-arcv2.h +++ b/arch/arc/include/asm/entry-arcv2.h @@ -162,7 +162,7 @@ #endif #ifdef CONFIG_ARC_HAS_ACCL_REGS - ST2 r58, r59, PT_sp + 12 + ST2 r58, r59, PT_r58 #endif .endm @@ -172,8 +172,8 @@ LD2 gp, fp, PT_r26 ; gp (r26), fp (r27) - ld r12, [sp, PT_sp + 4] - ld r30, [sp, PT_sp + 8] + ld r12, [sp, PT_r12] + ld r30, [sp, PT_r30] ; Restore SP (into AUX_USER_SP) only if returning to U mode ; - for K mode, it will be implicitly restored as stack is unwound @@ -190,7 +190,7 @@ #endif #ifdef CONFIG_ARC_HAS_ACCL_REGS - LD2 r58, r59, PT_sp + 12 + LD2 r58, r59, PT_r58 #endif .endm diff --git a/arch/arc/include/asm/hugepage.h b/arch/arc/include/asm/hugepage.h index 9a74ce71a7674f99349b51ec95cc44915bcc1aff..30ac40fed2c5c5affb5632e3979b84975d41698c 100644 --- a/arch/arc/include/asm/hugepage.h +++ b/arch/arc/include/asm/hugepage.h @@ -8,7 +8,6 @@ #define _ASM_ARC_HUGEPAGE_H #include -#define __ARCH_USE_5LEVEL_HACK #include static inline pte_t pmd_pte(pmd_t pmd) diff --git a/arch/arc/kernel/asm-offsets.c b/arch/arc/kernel/asm-offsets.c index 1f621e416521f8d1651453da9bee876ab864d21b..c783bcd35eb88c88d8ac39f2af097918bcf26be9 100644 --- a/arch/arc/kernel/asm-offsets.c +++ b/arch/arc/kernel/asm-offsets.c @@ -66,7 +66,15 @@ int main(void) DEFINE(SZ_CALLEE_REGS, sizeof(struct callee_regs)); DEFINE(SZ_PT_REGS, sizeof(struct pt_regs)); - DEFINE(PT_user_r25, offsetof(struct pt_regs, user_r25)); + +#ifdef CONFIG_ISA_ARCV2 + OFFSET(PT_r12, pt_regs, r12); + OFFSET(PT_r30, pt_regs, r30); +#endif +#ifdef CONFIG_ARC_HAS_ACCL_REGS + OFFSET(PT_r58, pt_regs, r58); + OFFSET(PT_r59, pt_regs, r59); +#endif return 0; } diff --git a/arch/arc/plat-eznps/Kconfig b/arch/arc/plat-eznps/Kconfig index a376a50d3fea8f8d1a478f702beb8843630ae176..a931d0a256d01ab0e78d4b8402c77d2567792bff 100644 --- a/arch/arc/plat-eznps/Kconfig +++ b/arch/arc/plat-eznps/Kconfig @@ -7,7 +7,7 @@ menuconfig ARC_PLAT_EZNPS bool "\"EZchip\" ARC dev platform" select CPU_BIG_ENDIAN - select CLKSRC_NPS + select CLKSRC_NPS if !PHYS_ADDR_T_64BIT select EZNPS_GIC select EZCHIP_NPS_MANAGEMENT_ENET if ETHERNET help diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index ba75e3661a4159afa45ac15516db9e16d2fdae29..96dab76da3b3962b8b22899516f47dd3274dcbca 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -72,6 +72,7 @@ config ARM select HAVE_ARM_SMCCC if CPU_V7 select HAVE_EBPF_JIT if !CPU_ENDIAN_BE32 select HAVE_CONTEXT_TRACKING + select HAVE_COPY_THREAD_TLS select HAVE_C_RECORDMCOUNT select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_CONTIGUOUS if MMU diff --git a/arch/arm/boot/dts/am335x-sancloud-bbe.dts b/arch/arm/boot/dts/am335x-sancloud-bbe.dts index 8678e6e35493fc7b6548942ed643feec16a51407..e5fdb7abb0d54a47eceb026009d1ef1173de7e7f 100644 --- a/arch/arm/boot/dts/am335x-sancloud-bbe.dts +++ b/arch/arm/boot/dts/am335x-sancloud-bbe.dts @@ -108,7 +108,7 @@ &cpsw_emac0 { phy-handle = <ðphy0>; - phy-mode = "rgmii-txid"; + phy-mode = "rgmii-id"; }; &i2c0 { diff --git a/arch/arm/boot/dts/am437x-gp-evm.dts b/arch/arm/boot/dts/am437x-gp-evm.dts index cae4500194fecb5e9a7d5c370300fea5ff550148..811c8cae315b520f445964fc07dc00e16266b5f9 100644 --- a/arch/arm/boot/dts/am437x-gp-evm.dts +++ b/arch/arm/boot/dts/am437x-gp-evm.dts @@ -86,7 +86,7 @@ }; lcd0: display { - compatible = "osddisplays,osd057T0559-34ts", "panel-dpi"; + compatible = "osddisplays,osd070t1718-19ts", "panel-dpi"; label = "lcd"; backlight = <&lcd_bl>; diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts index 95314121d11153ba8b9330728bab031f40585fda..078cb473fa7dcd08f27f6a1e094c6855e825fb26 100644 --- a/arch/arm/boot/dts/am43x-epos-evm.dts +++ b/arch/arm/boot/dts/am43x-epos-evm.dts @@ -42,7 +42,7 @@ }; lcd0: display { - compatible = "osddisplays,osd057T0559-34ts", "panel-dpi"; + compatible = "osddisplays,osd070t1718-19ts", "panel-dpi"; label = "lcd"; backlight = <&lcd_bl>; diff --git a/arch/arm/boot/dts/am571x-idk.dts b/arch/arm/boot/dts/am571x-idk.dts index 820ce3b60bb6c50a5b7a44b457e428b4383536d4..669559c9c95b3a83c93e412d7f526cbc6131077c 100644 --- a/arch/arm/boot/dts/am571x-idk.dts +++ b/arch/arm/boot/dts/am571x-idk.dts @@ -167,11 +167,7 @@ &pcie1_rc { status = "okay"; - gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>; -}; - -&pcie1_ep { - gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>; + gpios = <&gpio5 18 GPIO_ACTIVE_HIGH>; }; &mmc1 { diff --git a/arch/arm/boot/dts/am572x-idk-common.dtsi b/arch/arm/boot/dts/am572x-idk-common.dtsi index a064f13b38802d1483796a7e5c5e93c8e03bb640..ddf123620e962ed71e155136ce1ccf48cf4cf19d 100644 --- a/arch/arm/boot/dts/am572x-idk-common.dtsi +++ b/arch/arm/boot/dts/am572x-idk-common.dtsi @@ -147,10 +147,6 @@ gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>; }; -&pcie1_ep { - gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>; -}; - &mailbox5 { status = "okay"; mbox_ipu1_ipc3x: mbox_ipu1_ipc3x { diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi index bc76f1705c0f667dab3ad9af455b5f7685823e08..a813a0cf3ff39a97af53723a5973fe721a1c0117 100644 --- a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi +++ b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi @@ -29,6 +29,27 @@ reg = <0x0 0x80000000 0x0 0x80000000>; }; + main_12v0: fixedregulator-main_12v0 { + /* main supply */ + compatible = "regulator-fixed"; + regulator-name = "main_12v0"; + regulator-min-microvolt = <12000000>; + regulator-max-microvolt = <12000000>; + regulator-always-on; + regulator-boot-on; + }; + + evm_5v0: fixedregulator-evm_5v0 { + /* Output of TPS54531D */ + compatible = "regulator-fixed"; + regulator-name = "evm_5v0"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&main_12v0>; + regulator-always-on; + regulator-boot-on; + }; + vdd_3v3: fixedregulator-vdd_3v3 { compatible = "regulator-fixed"; regulator-name = "vdd_3v3"; @@ -547,10 +568,6 @@ gpios = <&gpio2 8 GPIO_ACTIVE_LOW>; }; -&pcie1_ep { - gpios = <&gpio2 8 GPIO_ACTIVE_LOW>; -}; - &mcasp3 { #sound-dai-cells = <0>; assigned-clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 24>; diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts index c1c9cd30f9803cd20cf7bdcb4f15b9479a4e0b39..13f7aefe045e6d59581bcf4d37ed4e328917c507 100644 --- a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts +++ b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts @@ -258,9 +258,9 @@ }; }; - pca0: pca9552@60 { + pca0: pca9552@61 { compatible = "nxp,pca9552"; - reg = <0x60>; + reg = <0x61>; #address-cells = <1>; #size-cells = <0>; @@ -519,371 +519,6 @@ status = "okay"; }; -&i2c13 { - status = "okay"; -}; - -&i2c14 { - status = "okay"; -}; - -&i2c15 { - status = "okay"; -}; - -&i2c0 { - status = "okay"; -}; - -&i2c1 { - status = "okay"; -}; - -&i2c2 { - status = "okay"; -}; - -&i2c3 { - status = "okay"; - - power-supply@68 { - compatible = "ibm,cffps2"; - reg = <0x68>; - }; - - power-supply@69 { - compatible = "ibm,cffps2"; - reg = <0x69>; - }; - - power-supply@6a { - compatible = "ibm,cffps2"; - reg = <0x6a>; - }; - - power-supply@6b { - compatible = "ibm,cffps2"; - reg = <0x6b>; - }; -}; - -&i2c4 { - status = "okay"; - - tmp275@48 { - compatible = "ti,tmp275"; - reg = <0x48>; - }; - - tmp275@49 { - compatible = "ti,tmp275"; - reg = <0x49>; - }; - - tmp275@4a { - compatible = "ti,tmp275"; - reg = <0x4a>; - }; -}; - -&i2c5 { - status = "okay"; - - tmp275@48 { - compatible = "ti,tmp275"; - reg = <0x48>; - }; - - tmp275@49 { - compatible = "ti,tmp275"; - reg = <0x49>; - }; -}; - -&i2c6 { - status = "okay"; - - tmp275@48 { - compatible = "ti,tmp275"; - reg = <0x48>; - }; - - tmp275@4a { - compatible = "ti,tmp275"; - reg = <0x4a>; - }; - - tmp275@4b { - compatible = "ti,tmp275"; - reg = <0x4b>; - }; -}; - -&i2c7 { - status = "okay"; - - si7021-a20@20 { - compatible = "silabs,si7020"; - reg = <0x20>; - }; - - tmp275@48 { - compatible = "ti,tmp275"; - reg = <0x48>; - }; - - max31785@52 { - compatible = "maxim,max31785a"; - reg = <0x52>; - #address-cells = <1>; - #size-cells = <0>; - - fan@0 { - compatible = "pmbus-fan"; - reg = <0>; - tach-pulses = <2>; - }; - - fan@1 { - compatible = "pmbus-fan"; - reg = <1>; - tach-pulses = <2>; - }; - - fan@2 { - compatible = "pmbus-fan"; - reg = <2>; - tach-pulses = <2>; - }; - - fan@3 { - compatible = "pmbus-fan"; - reg = <3>; - tach-pulses = <2>; - }; - }; - - pca0: pca9552@60 { - compatible = "nxp,pca9552"; - reg = <0x60>; - #address-cells = <1>; - #size-cells = <0>; - - gpio-controller; - #gpio-cells = <2>; - - gpio@0 { - reg = <0>; - }; - - gpio@1 { - reg = <1>; - }; - - gpio@2 { - reg = <2>; - }; - - gpio@3 { - reg = <3>; - }; - - gpio@4 { - reg = <4>; - }; - - gpio@5 { - reg = <5>; - }; - - gpio@6 { - reg = <6>; - }; - - gpio@7 { - reg = <7>; - }; - - gpio@8 { - reg = <8>; - }; - - gpio@9 { - reg = <9>; - }; - - gpio@10 { - reg = <10>; - }; - - gpio@11 { - reg = <11>; - }; - - gpio@12 { - reg = <12>; - }; - - gpio@13 { - reg = <13>; - }; - - gpio@14 { - reg = <14>; - }; - - gpio@15 { - reg = <15>; - }; - }; - - dps: dps310@76 { - compatible = "infineon,dps310"; - reg = <0x76>; - #io-channel-cells = <0>; - }; -}; - -&i2c8 { - status = "okay"; - - ucd90320@b { - compatible = "ti,ucd90160"; - reg = <0x0b>; - }; - - ucd90320@c { - compatible = "ti,ucd90160"; - reg = <0x0c>; - }; - - ucd90320@11 { - compatible = "ti,ucd90160"; - reg = <0x11>; - }; - - rtc@32 { - compatible = "epson,rx8900"; - reg = <0x32>; - }; - - tmp275@48 { - compatible = "ti,tmp275"; - reg = <0x48>; - }; - - tmp275@4a { - compatible = "ti,tmp275"; - reg = <0x4a>; - }; -}; - -&i2c9 { - status = "okay"; - - ir35221@42 { - compatible = "infineon,ir35221"; - reg = <0x42>; - }; - - ir35221@43 { - compatible = "infineon,ir35221"; - reg = <0x43>; - }; - - ir35221@44 { - compatible = "infineon,ir35221"; - reg = <0x44>; - }; - - tmp423a@4c { - compatible = "ti,tmp423"; - reg = <0x4c>; - }; - - tmp423b@4d { - compatible = "ti,tmp423"; - reg = <0x4d>; - }; - - ir35221@72 { - compatible = "infineon,ir35221"; - reg = <0x72>; - }; - - ir35221@73 { - compatible = "infineon,ir35221"; - reg = <0x73>; - }; - - ir35221@74 { - compatible = "infineon,ir35221"; - reg = <0x74>; - }; -}; - -&i2c10 { - status = "okay"; - - ir35221@42 { - compatible = "infineon,ir35221"; - reg = <0x42>; - }; - - ir35221@43 { - compatible = "infineon,ir35221"; - reg = <0x43>; - }; - - ir35221@44 { - compatible = "infineon,ir35221"; - reg = <0x44>; - }; - - tmp423a@4c { - compatible = "ti,tmp423"; - reg = <0x4c>; - }; - - tmp423b@4d { - compatible = "ti,tmp423"; - reg = <0x4d>; - }; - - ir35221@72 { - compatible = "infineon,ir35221"; - reg = <0x72>; - }; - - ir35221@73 { - compatible = "infineon,ir35221"; - reg = <0x73>; - }; - - ir35221@74 { - compatible = "infineon,ir35221"; - reg = <0x74>; - }; -}; - -&i2c11 { - status = "okay"; - - tmp275@48 { - compatible = "ti,tmp275"; - reg = <0x48>; - }; - - tmp275@49 { - compatible = "ti,tmp275"; - reg = <0x49>; - }; -}; - -&i2c12 { - status = "okay"; -}; - &i2c13 { status = "okay"; diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts b/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts index f02de4ab058cd516cb956a3d8ebfcb9ef87028b0..ff49ec76fa7cd132f025baa177f537e9a6652232 100644 --- a/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts +++ b/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts @@ -122,37 +122,6 @@ }; }; -&fmc { - status = "okay"; - flash@0 { - status = "okay"; - m25p,fast-read; - label = "bmc"; - spi-max-frequency = <50000000>; -#include "openbmc-flash-layout-128.dtsi" - }; - - flash@1 { - status = "okay"; - m25p,fast-read; - label = "alt-bmc"; - spi-max-frequency = <50000000>; - }; -}; - -&spi1 { - status = "okay"; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_spi1_default>; - - flash@0 { - status = "okay"; - m25p,fast-read; - label = "pnor"; - spi-max-frequency = <100000000>; - }; -}; - &mac2 { status = "okay"; pinctrl-names = "default"; @@ -165,6 +134,11 @@ &emmc { status = "okay"; +}; + +&fsim0 { + status = "okay"; + #address-cells = <2>; #size-cells = <0>; @@ -820,373 +794,6 @@ status = "okay"; }; -&i2c0 { - status = "okay"; -}; - -&i2c1 { - status = "okay"; -}; - -&i2c2 { - status = "okay"; -}; - -&i2c3 { - status = "okay"; - - bmp: bmp280@77 { - compatible = "bosch,bmp280"; - reg = <0x77>; - #io-channel-cells = <1>; - }; - - max31785@52 { - compatible = "maxim,max31785a"; - reg = <0x52>; - #address-cells = <1>; - #size-cells = <0>; - - fan@0 { - compatible = "pmbus-fan"; - reg = <0>; - tach-pulses = <2>; - maxim,fan-rotor-input = "tach"; - maxim,fan-pwm-freq = <25000>; - maxim,fan-dual-tach; - maxim,fan-no-watchdog; - maxim,fan-no-fault-ramp; - maxim,fan-ramp = <2>; - maxim,fan-fault-pin-mon; - }; - - fan@1 { - compatible = "pmbus-fan"; - reg = <1>; - tach-pulses = <2>; - maxim,fan-rotor-input = "tach"; - maxim,fan-pwm-freq = <25000>; - maxim,fan-dual-tach; - maxim,fan-no-watchdog; - maxim,fan-no-fault-ramp; - maxim,fan-ramp = <2>; - maxim,fan-fault-pin-mon; - }; - - fan@2 { - compatible = "pmbus-fan"; - reg = <2>; - tach-pulses = <2>; - maxim,fan-rotor-input = "tach"; - maxim,fan-pwm-freq = <25000>; - maxim,fan-dual-tach; - maxim,fan-no-watchdog; - maxim,fan-no-fault-ramp; - maxim,fan-ramp = <2>; - maxim,fan-fault-pin-mon; - }; - - fan@3 { - compatible = "pmbus-fan"; - reg = <3>; - tach-pulses = <2>; - maxim,fan-rotor-input = "tach"; - maxim,fan-pwm-freq = <25000>; - maxim,fan-dual-tach; - maxim,fan-no-watchdog; - maxim,fan-no-fault-ramp; - maxim,fan-ramp = <2>; - maxim,fan-fault-pin-mon; - }; - }; - - dps: dps310@76 { - compatible = "infineon,dps310"; - reg = <0x76>; - #io-channel-cells = <0>; - }; - - pca0: pca9552@60 { - compatible = "nxp,pca9552"; - reg = <0x60>; - #address-cells = <1>; - #size-cells = <0>; - - gpio-controller; - #gpio-cells = <2>; - - gpio@0 { - reg = <0>; - type = ; - }; - - gpio@1 { - reg = <1>; - type = ; - }; - - gpio@2 { - reg = <2>; - type = ; - }; - - gpio@3 { - reg = <3>; - type = ; - }; - - gpio@4 { - reg = <4>; - type = ; - }; - - gpio@5 { - reg = <5>; - type = ; - }; - - gpio@6 { - reg = <6>; - type = ; - }; - - gpio@7 { - reg = <7>; - type = ; - }; - - gpio@8 { - reg = <8>; - type = ; - }; - - gpio@9 { - reg = <9>; - type = ; - }; - - gpio@10 { - reg = <10>; - type = ; - }; - - gpio@11 { - reg = <11>; - type = ; - }; - - gpio@12 { - reg = <12>; - type = ; - }; - - gpio@13 { - reg = <13>; - type = ; - }; - - gpio@14 { - reg = <14>; - type = ; - }; - - gpio@15 { - reg = <15>; - type = ; - }; - }; - - power-supply@68 { - compatible = "ibm,cffps1"; - reg = <0x68>; - }; - - power-supply@69 { - compatible = "ibm,cffps1"; - reg = <0x69>; - }; -}; - -&i2c4 { - status = "okay"; - - tmp423a@4c { - compatible = "ti,tmp423"; - reg = <0x4c>; - }; - - ir35221@70 { - compatible = "infineon,ir35221"; - reg = <0x70>; - }; - - ir35221@71 { - compatible = "infineon,ir35221"; - reg = <0x71>; - }; -}; - -&i2c5 { - status = "okay"; - - tmp423a@4c { - compatible = "ti,tmp423"; - reg = <0x4c>; - }; - - ir35221@70 { - compatible = "infineon,ir35221"; - reg = <0x70>; - }; - - ir35221@71 { - compatible = "infineon,ir35221"; - reg = <0x71>; - }; -}; - -&i2c7 { - status = "okay"; -}; - -&i2c9 { - status = "okay"; - - tmp275@4a { - compatible = "ti,tmp275"; - reg = <0x4a>; - }; -}; - -&i2c10 { - status = "okay"; -}; - -&i2c11 { - status = "okay"; - - pca9552: pca9552@60 { - compatible = "nxp,pca9552"; - reg = <0x60>; - #address-cells = <1>; - #size-cells = <0>; - gpio-controller; - #gpio-cells = <2>; - - gpio-line-names = "PS_SMBUS_RESET_N", "APSS_RESET_N", - "GPU0_TH_OVERT_N_BUFF", "GPU1_TH_OVERT_N_BUFF", - "GPU2_TH_OVERT_N_BUFF", "GPU3_TH_OVERT_N_BUFF", - "GPU4_TH_OVERT_N_BUFF", "GPU5_TH_OVERT_N_BUFF", - "GPU0_PWR_GOOD_BUFF", "GPU1_PWR_GOOD_BUFF", - "GPU2_PWR_GOOD_BUFF", "GPU3_PWR_GOOD_BUFF", - "GPU4_PWR_GOOD_BUFF", "GPU5_PWR_GOOD_BUFF", - "12V_BREAKER_FLT_N", "THROTTLE_UNLATCHED_N"; - - gpio@0 { - reg = <0>; - type = ; - }; - - gpio@1 { - reg = <1>; - type = ; - }; - - gpio@2 { - reg = <2>; - type = ; - }; - - gpio@3 { - reg = <3>; - type = ; - }; - - gpio@4 { - reg = <4>; - type = ; - }; - - gpio@5 { - reg = <5>; - type = ; - }; - - gpio@6 { - reg = <6>; - type = ; - }; - - gpio@7 { - reg = <7>; - type = ; - }; - - gpio@8 { - reg = <8>; - type = ; - }; - - gpio@9 { - reg = <9>; - type = ; - }; - - gpio@10 { - reg = <10>; - type = ; - }; - - gpio@11 { - reg = <11>; - type = ; - }; - - gpio@12 { - reg = <12>; - type = ; - }; - - gpio@13 { - reg = <13>; - type = ; - }; - - gpio@14 { - reg = <14>; - type = ; - }; - - gpio@15 { - reg = <15>; - type = ; - }; - }; - - rtc@32 { - compatible = "epson,rx8900"; - reg = <0x32>; - }; - - eeprom@51 { - compatible = "atmel,24c64"; - reg = <0x51>; - }; - - ucd90160@64 { - compatible = "ti,ucd90160"; - reg = <0x64>; - }; -}; - -&i2c12 { - status = "okay"; -}; - -&i2c13 { - status = "okay"; -}; - &pinctrl { /* Hog these as no driver is probed for the entire LPC block */ pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/aspeed-g6.dtsi b/arch/arm/boot/dts/aspeed-g6.dtsi index 5f6142d99eeb68c427333b3a52a40e17bdc90526..b72afbaadaf81fce5bc8ab687af34ae14222baf0 100644 --- a/arch/arm/boot/dts/aspeed-g6.dtsi +++ b/arch/arm/boot/dts/aspeed-g6.dtsi @@ -163,26 +163,6 @@ spi-max-frequency = <50000000>; status = "disabled"; }; - - fsim0: fsi@1e79b000 { - compatible = "aspeed,ast2600-fsi-master", "fsi-master"; - reg = <0x1e79b000 0x94>; - interrupts = ; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_fsi1_default>; - clocks = <&syscon ASPEED_CLK_GATE_FSICLK>; - status = "disabled"; - }; - - fsim1: fsi@1e79b100 { - compatible = "aspeed,ast2600-fsi-master", "fsi-master"; - reg = <0x1e79b100 0x94>; - interrupts = ; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_fsi2_default>; - clocks = <&syscon ASPEED_CLK_GATE_FSICLK>; - status = "disabled"; - }; }; mdio0: mdio@1e650000 { @@ -595,6 +575,25 @@ ranges = <0 0x1e78a000 0x1000>; }; + fsim0: fsi@1e79b000 { + compatible = "aspeed,ast2600-fsi-master", "fsi-master"; + reg = <0x1e79b000 0x94>; + interrupts = ; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_fsi1_default>; + clocks = <&syscon ASPEED_CLK_GATE_FSICLK>; + status = "disabled"; + }; + + fsim1: fsi@1e79b100 { + compatible = "aspeed,ast2600-fsi-master", "fsi-master"; + reg = <0x1e79b100 0x94>; + interrupts = ; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_fsi2_default>; + clocks = <&syscon ASPEED_CLK_GATE_FSICLK>; + status = "disabled"; + }; }; }; }; diff --git a/arch/arm/boot/dts/bcm-cygnus.dtsi b/arch/arm/boot/dts/bcm-cygnus.dtsi index 2dac3efc7640595c352037485ebd89bfde7cff76..1bc45cfd545385c640fdf2c17b6c9da2e08af2c8 100644 --- a/arch/arm/boot/dts/bcm-cygnus.dtsi +++ b/arch/arm/boot/dts/bcm-cygnus.dtsi @@ -174,8 +174,8 @@ mdio: mdio@18002000 { compatible = "brcm,iproc-mdio"; reg = <0x18002000 0x8>; - #size-cells = <1>; - #address-cells = <0>; + #size-cells = <0>; + #address-cells = <1>; status = "disabled"; gphy0: ethernet-phy@0 { diff --git a/arch/arm/boot/dts/bcm2711.dtsi b/arch/arm/boot/dts/bcm2711.dtsi index 961bed832755b0389b93053c7cbf2e5b4dadacbc..e2f6ffb00aa94340fc2db94fb7e75b0e7d998bca 100644 --- a/arch/arm/boot/dts/bcm2711.dtsi +++ b/arch/arm/boot/dts/bcm2711.dtsi @@ -43,7 +43,7 @@ <0x7c000000 0x0 0xfc000000 0x02000000>, <0x40000000 0x0 0xff800000 0x00800000>; /* Emulate a contiguous 30-bit address range for DMA */ - dma-ranges = <0xc0000000 0x0 0x00000000 0x3c000000>; + dma-ranges = <0xc0000000 0x0 0x00000000 0x40000000>; /* * This node is the provider for the enable-method for diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi index 3caaa57eb6c81eb449b6ee8dbdc04101b5b32d00..839491628e87b392cce339a83b8ec261ab70cbf3 100644 --- a/arch/arm/boot/dts/bcm283x.dtsi +++ b/arch/arm/boot/dts/bcm283x.dtsi @@ -37,7 +37,7 @@ trips { cpu-crit { - temperature = <80000>; + temperature = <90000>; hysteresis = <0>; type = "critical"; }; diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi index 372dc1eb88a0e82883e2049cec701034f8374b2d..2d9b4dd058307e7f1c5c44bae5e4f2e75d5ea6c3 100644 --- a/arch/arm/boot/dts/bcm5301x.dtsi +++ b/arch/arm/boot/dts/bcm5301x.dtsi @@ -353,8 +353,8 @@ mdio: mdio@18003000 { compatible = "brcm,iproc-mdio"; reg = <0x18003000 0x8>; - #size-cells = <1>; - #address-cells = <0>; + #size-cells = <0>; + #address-cells = <1>; }; mdio-bus-mux@18003000 { diff --git a/arch/arm/boot/dts/e60k02.dtsi b/arch/arm/boot/dts/e60k02.dtsi index 6472b056a001f9054c525635ab48611bb114240b..5a2c5320437dd246835d8a217c8cfad65694b1d0 100644 --- a/arch/arm/boot/dts/e60k02.dtsi +++ b/arch/arm/boot/dts/e60k02.dtsi @@ -265,11 +265,6 @@ regulator-name = "LDORTC1"; regulator-boot-on; }; - - ldortc2_reg: LDORTC2 { - regulator-name = "LDORTC2"; - regulator-boot-on; - }; }; }; }; diff --git a/arch/arm/boot/dts/imx6dl-icore-mipi.dts b/arch/arm/boot/dts/imx6dl-icore-mipi.dts index e43bccb78ab2ba44130603fb102254a3081ca858..d8f3821a0ffdc33069769942c660866efd824759 100644 --- a/arch/arm/boot/dts/imx6dl-icore-mipi.dts +++ b/arch/arm/boot/dts/imx6dl-icore-mipi.dts @@ -8,7 +8,7 @@ /dts-v1/; #include "imx6dl.dtsi" -#include "imx6qdl-icore.dtsi" +#include "imx6qdl-icore-1.5.dtsi" / { model = "Engicam i.CoreM6 DualLite/Solo MIPI Starter Kit"; diff --git a/arch/arm/boot/dts/imx6q-dhcom-pdk2.dts b/arch/arm/boot/dts/imx6q-dhcom-pdk2.dts index 5219553df1e73529f61cf790c476e25bbd000564..bb74fc62d9135863d6f58e1e1a0b53b1a9bb4dd7 100644 --- a/arch/arm/boot/dts/imx6q-dhcom-pdk2.dts +++ b/arch/arm/boot/dts/imx6q-dhcom-pdk2.dts @@ -63,7 +63,7 @@ #sound-dai-cells = <0>; clocks = <&clk_ext_audio_codec>; VDDA-supply = <®_3p3v>; - VDDIO-supply = <®_3p3v>; + VDDIO-supply = <&sw2_reg>; }; }; diff --git a/arch/arm/boot/dts/imx6q-dhcom-som.dtsi b/arch/arm/boot/dts/imx6q-dhcom-som.dtsi index 845cfad99bf9c41567cd935467e180409aef595d..87f0aa897086e100d9ea368f5fec9dfb269cec62 100644 --- a/arch/arm/boot/dts/imx6q-dhcom-som.dtsi +++ b/arch/arm/boot/dts/imx6q-dhcom-som.dtsi @@ -204,7 +204,7 @@ }; rtc@56 { - compatible = "rv3029c2"; + compatible = "microcrystal,rv3029"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_rtc_hw300>; reg = <0x56>; diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi index 71ca76a5e4a513730c1c0c1c0fe5b832c469a24e..fe59dde41b649cb7ab0a386b2faab9738a69dad6 100644 --- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi +++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi @@ -749,10 +749,6 @@ vin-supply = <&vgen5_reg>; }; -®_vdd3p0 { - vin-supply = <&sw2_reg>; -}; - ®_vdd2p5 { vin-supply = <&vgen5_reg>; }; diff --git a/arch/arm/boot/dts/imx6sl-evk.dts b/arch/arm/boot/dts/imx6sl-evk.dts index 4829aa682aeb01cbc3e9a0c7485e87ca59bebdbf..bc86cfaaa9c270f80cf260b0599dfd931af516fe 100644 --- a/arch/arm/boot/dts/imx6sl-evk.dts +++ b/arch/arm/boot/dts/imx6sl-evk.dts @@ -584,10 +584,6 @@ vin-supply = <&sw2_reg>; }; -®_vdd3p0 { - vin-supply = <&sw2_reg>; -}; - ®_vdd2p5 { vin-supply = <&sw2_reg>; }; diff --git a/arch/arm/boot/dts/imx6sll-evk.dts b/arch/arm/boot/dts/imx6sll-evk.dts index 3e1d32fdf4b856f4742ae7685fa3d7e48da4d440..5ace9e6acf85ca9839d5c15db00d77662288146e 100644 --- a/arch/arm/boot/dts/imx6sll-evk.dts +++ b/arch/arm/boot/dts/imx6sll-evk.dts @@ -265,10 +265,6 @@ status = "okay"; }; -®_3p0 { - vin-supply = <&sw2_reg>; -}; - &snvs_poweroff { status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6sx-sdb-reva.dts b/arch/arm/boot/dts/imx6sx-sdb-reva.dts index f1830ed387a5512ed99ea71b82cbc09876d2f8aa..91a7548fdb8db4d4339f844214778fd80c0702d7 100644 --- a/arch/arm/boot/dts/imx6sx-sdb-reva.dts +++ b/arch/arm/boot/dts/imx6sx-sdb-reva.dts @@ -159,10 +159,6 @@ vin-supply = <&vgen6_reg>; }; -®_vdd3p0 { - vin-supply = <&sw2_reg>; -}; - ®_vdd2p5 { vin-supply = <&vgen6_reg>; }; diff --git a/arch/arm/boot/dts/imx6sx-sdb.dts b/arch/arm/boot/dts/imx6sx-sdb.dts index a8ee7087af5a5e769d505a048f7cc438c14a7a47..5a63ca6157229ccc48d175f8045ef9446b203720 100644 --- a/arch/arm/boot/dts/imx6sx-sdb.dts +++ b/arch/arm/boot/dts/imx6sx-sdb.dts @@ -141,10 +141,6 @@ vin-supply = <&vgen6_reg>; }; -®_vdd3p0 { - vin-supply = <&sw2_reg>; -}; - ®_vdd2p5 { vin-supply = <&vgen6_reg>; }; diff --git a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi index 1506eb12b21e08a8ba316e38ac667690f19a19f2..212144511b661b28cbb8b6f34aeac7261eeec316 100644 --- a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi +++ b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi @@ -30,14 +30,26 @@ enable-active-high; }; - reg_sensors: regulator-sensors { + reg_peri_3v3: regulator-peri-3v3 { compatible = "regulator-fixed"; pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_sensors_reg>; - regulator-name = "sensors-supply"; + pinctrl-0 = <&pinctrl_peri_3v3>; + regulator-name = "VPERI_3V3"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; gpio = <&gpio5 2 GPIO_ACTIVE_LOW>; + /* + * If you want to want to make this dynamic please + * check schematics and test all affected peripherals: + * + * - sensors + * - ethernet phy + * - can + * - bluetooth + * - wm8960 audio codec + * - ov5640 camera + */ + regulator-always-on; }; reg_can_3v3: regulator-can-3v3 { @@ -140,6 +152,7 @@ pinctrl-0 = <&pinctrl_enet1>; phy-mode = "rmii"; phy-handle = <ðphy0>; + phy-supply = <®_peri_3v3>; status = "okay"; }; @@ -148,6 +161,7 @@ pinctrl-0 = <&pinctrl_enet2>; phy-mode = "rmii"; phy-handle = <ðphy1>; + phy-supply = <®_peri_3v3>; status = "okay"; mdio { @@ -193,8 +207,8 @@ magnetometer@e { compatible = "fsl,mag3110"; reg = <0x0e>; - vdd-supply = <®_sensors>; - vddio-supply = <®_sensors>; + vdd-supply = <®_peri_3v3>; + vddio-supply = <®_peri_3v3>; }; }; @@ -227,7 +241,7 @@ flash0: n25q256a@0 { #address-cells = <1>; #size-cells = <1>; - compatible = "micron,n25q256a"; + compatible = "micron,n25q256a", "jedec,spi-nor"; spi-max-frequency = <29000000>; spi-rx-bus-width = <4>; spi-tx-bus-width = <4>; @@ -462,7 +476,7 @@ >; }; - pinctrl_sensors_reg: sensorsreggrp { + pinctrl_peri_3v3: peri3v3grp { fsl,pins = < MX6UL_PAD_SNVS_TAMPER2__GPIO5_IO02 0x1b0b0 >; diff --git a/arch/arm/boot/dts/imx7s-colibri.dtsi b/arch/arm/boot/dts/imx7s-colibri.dtsi index 1fb1ec5d3d70717f08026f47e42d9bfee0c81ca8..6d16e32aed899122efcb4279581844714e05f0ca 100644 --- a/arch/arm/boot/dts/imx7s-colibri.dtsi +++ b/arch/arm/boot/dts/imx7s-colibri.dtsi @@ -49,3 +49,7 @@ reg = <0x80000000 0x10000000>; }; }; + +&gpmi { + status = "okay"; +}; diff --git a/arch/arm/boot/dts/imx7ulp.dtsi b/arch/arm/boot/dts/imx7ulp.dtsi index d37a1927c88ea3fae91f4d1e67407f5d39036f9f..ab91c98f21241127736dada1205d2b57ea7eef59 100644 --- a/arch/arm/boot/dts/imx7ulp.dtsi +++ b/arch/arm/boot/dts/imx7ulp.dtsi @@ -37,10 +37,10 @@ #address-cells = <1>; #size-cells = <0>; - cpu0: cpu@0 { + cpu0: cpu@f00 { compatible = "arm,cortex-a7"; device_type = "cpu"; - reg = <0>; + reg = <0xf00>; }; }; diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi index 5a7e3e5caebe2fc4e7f0880ab9f37d8ff026f9b3..3c534cd50ee3b2527c0e287d5216a1ffc95807c7 100644 --- a/arch/arm/boot/dts/meson8.dtsi +++ b/arch/arm/boot/dts/meson8.dtsi @@ -253,7 +253,7 @@ &aobus { pmu: pmu@e0 { compatible = "amlogic,meson8-pmu", "syscon"; - reg = <0xe0 0x8>; + reg = <0xe0 0x18>; }; pinctrl_aobus: pinctrl@84 { diff --git a/arch/arm/boot/dts/mmp3.dtsi b/arch/arm/boot/dts/mmp3.dtsi index d9762de0ed34be80c943e1bb3e237253ca6fc83a..6f480827b94d36fe47dd3c0c09dd9c8c9e1363ec 100644 --- a/arch/arm/boot/dts/mmp3.dtsi +++ b/arch/arm/boot/dts/mmp3.dtsi @@ -356,7 +356,7 @@ twsi1: i2c@d4011000 { compatible = "mrvl,mmp-twsi"; - reg = <0xd4011000 0x1000>; + reg = <0xd4011000 0x70>; interrupts = ; clocks = <&soc_clocks MMP2_CLK_TWSI0>; resets = <&soc_clocks MMP2_CLK_TWSI0>; @@ -368,7 +368,7 @@ twsi2: i2c@d4031000 { compatible = "mrvl,mmp-twsi"; - reg = <0xd4031000 0x1000>; + reg = <0xd4031000 0x70>; interrupt-parent = <&twsi_mux>; interrupts = <0>; clocks = <&soc_clocks MMP2_CLK_TWSI1>; @@ -380,7 +380,7 @@ twsi3: i2c@d4032000 { compatible = "mrvl,mmp-twsi"; - reg = <0xd4032000 0x1000>; + reg = <0xd4032000 0x70>; interrupt-parent = <&twsi_mux>; interrupts = <1>; clocks = <&soc_clocks MMP2_CLK_TWSI2>; @@ -392,7 +392,7 @@ twsi4: i2c@d4033000 { compatible = "mrvl,mmp-twsi"; - reg = <0xd4033000 0x1000>; + reg = <0xd4033000 0x70>; interrupt-parent = <&twsi_mux>; interrupts = <2>; clocks = <&soc_clocks MMP2_CLK_TWSI3>; @@ -405,7 +405,7 @@ twsi5: i2c@d4033800 { compatible = "mrvl,mmp-twsi"; - reg = <0xd4033800 0x1000>; + reg = <0xd4033800 0x70>; interrupt-parent = <&twsi_mux>; interrupts = <3>; clocks = <&soc_clocks MMP2_CLK_TWSI4>; @@ -417,7 +417,7 @@ twsi6: i2c@d4034000 { compatible = "mrvl,mmp-twsi"; - reg = <0xd4034000 0x1000>; + reg = <0xd4034000 0x70>; interrupt-parent = <&twsi_mux>; interrupts = <4>; clocks = <&soc_clocks MMP2_CLK_TWSI5>; diff --git a/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts b/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts index fb928503ad45da35e12b24189e3ece6ae66d1cca..d9be511f054f0ecf4e5be68bd68bfe7eb25d0d09 100644 --- a/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts +++ b/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts @@ -101,7 +101,7 @@ initial-mode = <1>; /* initialize in HUB mode */ disabled-ports = <1>; intn-gpios = <&pio 7 5 GPIO_ACTIVE_HIGH>; /* PH5 */ - reset-gpios = <&pio 4 16 GPIO_ACTIVE_HIGH>; /* PE16 */ + reset-gpios = <&pio 4 16 GPIO_ACTIVE_LOW>; /* PE16 */ connect-gpios = <&pio 4 17 GPIO_ACTIVE_HIGH>; /* PE17 */ refclk-frequency = <19200000>; }; diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig index e7e4bb5ad8d5c3d73295ad724f9a7a6e8e74a217..fde84f123fbb5502ab07952e09e46c4e7c9b9cb2 100644 --- a/arch/arm/configs/exynos_defconfig +++ b/arch/arm/configs/exynos_defconfig @@ -350,6 +350,7 @@ CONFIG_PRINTK_TIME=y CONFIG_DYNAMIC_DEBUG=y CONFIG_DEBUG_INFO=y CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y CONFIG_SOFTLOCKUP_DETECTOR=y # CONFIG_DETECT_HUNG_TASK is not set diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig index 26d6dee67aa6de729fdb20921b795649d9f04219..3608e55eaecdf46aa52fec6ff02134773d07fad5 100644 --- a/arch/arm/configs/imx_v6_v7_defconfig +++ b/arch/arm/configs/imx_v6_v7_defconfig @@ -462,6 +462,7 @@ CONFIG_FONT_8x8=y CONFIG_FONT_8x16=y CONFIG_PRINTK_TIME=y CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_FS=y # CONFIG_SCHED_DEBUG is not set CONFIG_PROVE_LOCKING=y # CONFIG_DEBUG_BUGVERBOSE is not set diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index 8c37cc8ab6f2bb938f7fca8bd239054c286883ec..c32c338f770426bcb6c97e7a36ca2bd34b241b00 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -92,6 +92,7 @@ CONFIG_IP_PNP_BOOTP=y CONFIG_IP_PNP_RARP=y CONFIG_NETFILTER=y CONFIG_PHONET=m +CONFIG_NET_SWITCHDEV=y CONFIG_CAN=m CONFIG_CAN_C_CAN=m CONFIG_CAN_C_CAN_PLATFORM=m @@ -181,6 +182,7 @@ CONFIG_SMSC911X=y # CONFIG_NET_VENDOR_STMICRO is not set CONFIG_TI_DAVINCI_EMAC=y CONFIG_TI_CPSW=y +CONFIG_TI_CPSW_SWITCHDEV=y CONFIG_TI_CPTS=y # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set @@ -554,6 +556,6 @@ CONFIG_DEBUG_INFO=y CONFIG_DEBUG_INFO_SPLIT=y CONFIG_DEBUG_INFO_DWARF4=y CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_FS=y CONFIG_SCHEDSTATS=y # CONFIG_DEBUG_BUGVERBOSE is not set -CONFIG_TI_CPSW_SWITCHDEV=y diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig index bda57cafa2bcb9cc5ec572e35cf083195bbbded8..de3830443613e94956c9d463fcff15780702b145 100644 --- a/arch/arm/configs/shmobile_defconfig +++ b/arch/arm/configs/shmobile_defconfig @@ -212,4 +212,5 @@ CONFIG_DMA_CMA=y CONFIG_CMA_SIZE_MBYTES=64 CONFIG_PRINTK_TIME=y # CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index cea1c27c29cba2d81aefe19b5b94f94d55b87c85..46e478fb5ea203a7a42650ce4233d801db0c4277 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -226,8 +226,8 @@ void release_thread(struct task_struct *dead_task) asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); int -copy_thread(unsigned long clone_flags, unsigned long stack_start, - unsigned long stk_sz, struct task_struct *p) +copy_thread_tls(unsigned long clone_flags, unsigned long stack_start, + unsigned long stk_sz, struct task_struct *p, unsigned long tls) { struct thread_info *thread = task_thread_info(p); struct pt_regs *childregs = task_pt_regs(p); @@ -261,7 +261,7 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start, clear_ptrace_hw_breakpoint(p); if (clone_flags & CLONE_SETTLS) - thread->tp_value[0] = childregs->ARM_r3; + thread->tp_value[0] = tls; thread->tp_value[1] = get_tpuser(); thread_notify(THREAD_NOTIFY_COPY, thread); diff --git a/arch/arm/mach-bcm/bcm2711.c b/arch/arm/mach-bcm/bcm2711.c index dbe296798647fd92030a0ad2c5e57e14e2d0aa22..fa0300d8c79daae2da19e3290976994bc6536e1e 100644 --- a/arch/arm/mach-bcm/bcm2711.c +++ b/arch/arm/mach-bcm/bcm2711.c @@ -13,6 +13,7 @@ static const char * const bcm2711_compat[] = { #ifdef CONFIG_ARCH_MULTI_V7 "brcm,bcm2711", #endif + NULL }; DT_MACHINE_START(BCM2711, "BCM2711") diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig index dd427bd2768c3cf6f49152a1443cf154a51ad439..02b180ad724540c022d861aab5c6929f37af8060 100644 --- a/arch/arm/mach-davinci/Kconfig +++ b/arch/arm/mach-davinci/Kconfig @@ -9,6 +9,7 @@ menuconfig ARCH_DAVINCI select PM_GENERIC_DOMAINS if PM select PM_GENERIC_DOMAINS_OF if PM && OF select REGMAP_MMIO + select RESET_CONTROLLER select HAVE_IDE select PINCTRL_SINGLE diff --git a/arch/arm/mach-imx/cpu.c b/arch/arm/mach-imx/cpu.c index d8118031c51f710a668cbb7f66e4cb4025d4d38e..871f98342d50e67e9962ae863436bf839c8cb4e7 100644 --- a/arch/arm/mach-imx/cpu.c +++ b/arch/arm/mach-imx/cpu.c @@ -84,7 +84,7 @@ struct device * __init imx_soc_device_init(void) const char *ocotp_compat = NULL; struct soc_device *soc_dev; struct device_node *root; - struct regmap *ocotp; + struct regmap *ocotp = NULL; const char *soc_id; u64 soc_uid = 0; u32 val; @@ -148,11 +148,11 @@ struct device * __init imx_soc_device_init(void) soc_id = "i.MX6UL"; break; case MXC_CPU_IMX6ULL: - ocotp_compat = "fsl,imx6ul-ocotp"; + ocotp_compat = "fsl,imx6ull-ocotp"; soc_id = "i.MX6ULL"; break; case MXC_CPU_IMX6ULZ: - ocotp_compat = "fsl,imx6ul-ocotp"; + ocotp_compat = "fsl,imx6ull-ocotp"; soc_id = "i.MX6ULZ"; break; case MXC_CPU_IMX6SLL: @@ -175,7 +175,9 @@ struct device * __init imx_soc_device_init(void) ocotp = syscon_regmap_lookup_by_compatible(ocotp_compat); if (IS_ERR(ocotp)) pr_err("%s: failed to find %s regmap!\n", __func__, ocotp_compat); + } + if (!IS_ERR_OR_NULL(ocotp)) { regmap_read(ocotp, OCOTP_UID_H, &val); soc_uid = val; regmap_read(ocotp, OCOTP_UID_L, &val); diff --git a/arch/arm/mach-mmp/pxa168.h b/arch/arm/mach-mmp/pxa168.h index 0331c58b07a26a9c8415496c8da068f8b899d43a..dff651b9f252d4451dce883ec5fcd06d2129fda3 100644 --- a/arch/arm/mach-mmp/pxa168.h +++ b/arch/arm/mach-mmp/pxa168.h @@ -17,9 +17,9 @@ extern void pxa168_clear_keypad_wakeup(void); #include #include #include +#include #include "devices.h" -#include "cputype.h" extern struct pxa_device_desc pxa168_device_uart1; extern struct pxa_device_desc pxa168_device_uart2; diff --git a/arch/arm/mach-mmp/time.c b/arch/arm/mach-mmp/time.c index 110dcb3314d13f04c4e783a5762dbcd401c6f77e..c65cfc1ad99b4798a69c0b233e984b0eaf01af3f 100644 --- a/arch/arm/mach-mmp/time.c +++ b/arch/arm/mach-mmp/time.c @@ -207,7 +207,7 @@ static int __init mmp_dt_init_timer(struct device_node *np) ret = clk_prepare_enable(clk); if (ret) return ret; - rate = clk_get_rate(clk) / 2; + rate = clk_get_rate(clk); } else if (cpu_is_pj4()) { rate = 6500000; } else { diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index ad08d470a2cac3f9f767844e0857a371ecb7bdf9..dca7d06c0b9386192010ca469f7a751cad515d36 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -95,6 +95,7 @@ config ARCH_OMAP2PLUS bool select ARCH_HAS_BANDGAP select ARCH_HAS_HOLES_MEMORYMODEL + select ARCH_HAS_RESET_CONTROLLER select ARCH_OMAP select CLKSRC_MMIO select GENERIC_IRQ_CHIP @@ -105,11 +106,11 @@ config ARCH_OMAP2PLUS select OMAP_DM_TIMER select OMAP_GPMC select PINCTRL + select RESET_CONTROLLER select SOC_BUS select TI_SYSC select OMAP_IRQCHIP select CLKSRC_TI_32K - select ARCH_HAS_RESET_CONTROLLER help Systems based on OMAP2, OMAP3, OMAP4 or OMAP5 diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c index ca52271de5a880d7e7e51bb05b21efc04ae706aa..e95c224ffc4d876d38f7b2ab5d07c2a1ed61ec59 100644 --- a/arch/arm/mach-omap2/pdata-quirks.c +++ b/arch/arm/mach-omap2/pdata-quirks.c @@ -306,10 +306,14 @@ static void __init dra7x_evm_mmc_quirk(void) static struct clockdomain *ti_sysc_find_one_clockdomain(struct clk *clk) { + struct clk_hw *hw = __clk_get_hw(clk); struct clockdomain *clkdm = NULL; struct clk_hw_omap *hwclk; - hwclk = to_clk_hw_omap(__clk_get_hw(clk)); + hwclk = to_clk_hw_omap(hw); + if (!omap2_clk_is_hw_omap(hw)) + return NULL; + if (hwclk && hwclk->clkdm_name) clkdm = clkdm_lookup(hwclk->clkdm_name); diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c index 354e0e7025aec5c1da2145ae4037f7e805868970..1da11bdb1dfbd6f1ce906b83ba2d84ecd396f316 100644 --- a/arch/arm/mach-vexpress/spc.c +++ b/arch/arm/mach-vexpress/spc.c @@ -551,8 +551,9 @@ static struct clk *ve_spc_clk_register(struct device *cpu_dev) static int __init ve_spc_clk_init(void) { - int cpu; + int cpu, cluster; struct clk *clk; + bool init_opp_table[MAX_CLUSTERS] = { false }; if (!info) return 0; /* Continue only if SPC is initialised */ @@ -578,8 +579,17 @@ static int __init ve_spc_clk_init(void) continue; } + cluster = topology_physical_package_id(cpu_dev->id); + if (init_opp_table[cluster]) + continue; + if (ve_init_opp_table(cpu_dev)) pr_warn("failed to initialise cpu%d opp table\n", cpu); + else if (dev_pm_opp_set_sharing_cpus(cpu_dev, + topology_core_cpumask(cpu_dev->id))) + pr_warn("failed to mark OPPs shared for cpu%d\n", cpu); + else + init_opp_table[cluster] = true; } platform_device_register_simple("vexpress-spc-cpufreq", -1, NULL, 0); diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index b1b4476ddb834ba64a8ba93e55e88d3a7282fed1..e688dfad0b726284e49aec814f84a68d15b6176e 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -138,6 +138,7 @@ config ARM64 select HAVE_CMPXCHG_DOUBLE select HAVE_CMPXCHG_LOCAL select HAVE_CONTEXT_TRACKING + select HAVE_COPY_THREAD_TLS select HAVE_DEBUG_BUGVERBOSE select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_CONTIGUOUS diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino-emmc.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino-emmc.dts index 96ab0227e82d187bc3b5eb50e3064dcabbe3ef3f..121e6cc4849b45139f5a396cec851352863fe841 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino-emmc.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino-emmc.dts @@ -15,7 +15,7 @@ pinctrl-names = "default"; pinctrl-0 = <&mmc2_pins>; vmmc-supply = <®_dcdc1>; - vqmmc-supply = <®_dcdc1>; + vqmmc-supply = <®_eldo1>; bus-width = <8>; non-removable; cap-mmc-hw-reset; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts index 01a9a52edae4afa88c43f821531345543742a64e..393c1948a4959beb2c07b87456f0a558fffc1c5f 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts @@ -140,7 +140,7 @@ &mmc1 { pinctrl-names = "default"; pinctrl-0 = <&mmc1_pins>; - vmmc-supply = <®_aldo2>; + vmmc-supply = <®_dcdc1>; vqmmc-supply = <®_dldo4>; mmc-pwrseq = <&wifi_pwrseq>; bus-width = <4>; diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi index 144a2c19ac02659a4b288d0df130ae26303d92cf..d1fc9c2055f4904c19e7661be8c1cb36082423ee 100644 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi @@ -61,10 +61,10 @@ pmu { compatible = "arm,armv8-pmuv3"; - interrupts = <0 120 8>, - <0 121 8>, - <0 122 8>, - <0 123 8>; + interrupts = <0 170 4>, + <0 171 4>, + <0 172 4>, + <0 173 4>; interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts index 5bd07469766bf53ed4f86b8096b5160bd68fc2c6..a8bb3fa9fec98e994ce2876b95e81e86b8369c02 100644 --- a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts +++ b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts @@ -46,25 +46,47 @@ }; gpio-keys { - compatible = "gpio-keys-polled"; - poll-interval = <100>; + compatible = "gpio-keys"; key1 { label = "A"; linux,code = ; gpios = <&gpio GPIOH_6 GPIO_ACTIVE_LOW>; + interrupt-parent = <&gpio_intc>; + interrupts = <34 IRQ_TYPE_EDGE_BOTH>; }; key2 { label = "B"; linux,code = ; gpios = <&gpio GPIOH_7 GPIO_ACTIVE_LOW>; + interrupt-parent = <&gpio_intc>; + interrupts = <35 IRQ_TYPE_EDGE_BOTH>; }; key3 { label = "C"; linux,code = ; gpios = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_LOW>; + interrupt-parent = <&gpio_intc>; + interrupts = <2 IRQ_TYPE_EDGE_BOTH>; + }; + + mic_mute { + label = "MicMute"; + linux,code = ; + linux,input-type = ; + gpios = <&gpio_ao GPIOE_2 GPIO_ACTIVE_LOW>; + interrupt-parent = <&gpio_intc>; + interrupts = <99 IRQ_TYPE_EDGE_BOTH>; + }; + + power_key { + label = "PowerKey"; + linux,code = ; + gpios = <&gpio_ao GPIOAO_3 GPIO_ACTIVE_LOW>; + interrupt-parent = <&gpio_intc>; + interrupts = <3 IRQ_TYPE_EDGE_BOTH>; }; }; @@ -569,6 +591,8 @@ bluetooth { compatible = "brcm,bcm43438-bt"; + interrupt-parent = <&gpio_intc>; + interrupts = <95 IRQ_TYPE_LEVEL_HIGH>; shutdown-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>; max-speed = <2000000>; clocks = <&wifi32k>; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi index 8e8a77eb596ae47afca5c62f1842b3a68c02d433..a6f9b7784e8fc8c98bcf90b152a136cc97691c7c 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi @@ -88,7 +88,7 @@ reboot { compatible ="syscon-reboot"; - regmap = <&dcfg>; + regmap = <&rst>; offset = <0xb0>; mask = <0x02>; }; @@ -175,7 +175,13 @@ dcfg: syscon@1e00000 { compatible = "fsl,ls1028a-dcfg", "syscon"; reg = <0x0 0x1e00000 0x0 0x10000>; - big-endian; + little-endian; + }; + + rst: syscon@1e60000 { + compatible = "syscon"; + reg = <0x0 0x1e60000 0x0 0x10000>; + little-endian; }; scfg: syscon@1fc0000 { @@ -584,7 +590,7 @@ 0x00010004 0x0000003d 0x00010005 0x00000045 0x00010006 0x0000004d - 0x00010007 0x00000045 + 0x00010007 0x00000055 0x00010008 0x0000005e 0x00010009 0x00000066 0x0001000a 0x0000006e diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi index 6edbdfe2d0d7c44bec5ff1aeec193b667e2f1050..3d95b66a2d71c0fa97aba2e37d8855c44d295706 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi @@ -740,7 +740,7 @@ reg = <0x30bd0000 0x10000>; interrupts = ; clocks = <&clk IMX8MM_CLK_SDMA1_ROOT>, - <&clk IMX8MM_CLK_SDMA1_ROOT>; + <&clk IMX8MM_CLK_AHB>; clock-names = "ipg", "ahb"; #dma-cells = <3>; fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin"; diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts b/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts index 2a759dff9f87168f00a8c68f920cb846cf9d8ba6..596bc65f475c21a291caa3f143a1ac1e27b91d2f 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts +++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts @@ -421,7 +421,7 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_imu>; interrupt-parent = <&gpio3>; - interrupts = <19 IRQ_TYPE_LEVEL_LOW>; + interrupts = <19 IRQ_TYPE_LEVEL_HIGH>; vdd-supply = <®_3v3_p>; vddio-supply = <®_3v3_p>; }; diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi index 94090c6fb946a6bee451056b935a00d80fbe0ec7..d43e1299c8ef618d65232a7dfcd7fdc9150ebf5b 100644 --- a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi +++ b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi @@ -60,10 +60,10 @@ pmu { compatible = "arm,armv8-pmuv3"; - interrupts = <0 120 8>, - <0 121 8>, - <0 122 8>, - <0 123 8>; + interrupts = <0 170 4>, + <0 171 4>, + <0 172 4>, + <0 173 4>; interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, diff --git a/arch/arm64/boot/dts/rockchip/rk3328-a1.dts b/arch/arm64/boot/dts/rockchip/rk3328-a1.dts index 76b49f57310152829a1705ae2b7be8e32ec016df..16f1656d5203bd95d9c0095bae10ebf0d17b46fe 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-a1.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-a1.dts @@ -49,7 +49,8 @@ ir-receiver { compatible = "gpio-ir-receiver"; - gpios = <&gpio2 RK_PA2 GPIO_ACTIVE_HIGH>; + gpios = <&gpio2 RK_PA2 GPIO_ACTIVE_LOW>; + linux,rc-map-name = "rc-beelink-gs1"; }; }; diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 8dc6c5cdabe62e2f63065abd525ff516fe685387..baf52baaa2a5d0e39c557ada3a18a6b9b89088a4 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -85,13 +85,12 @@ #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE) #define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) #define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN) -#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN) #define __P000 PAGE_NONE #define __P001 PAGE_READONLY #define __P010 PAGE_READONLY #define __P011 PAGE_READONLY -#define __P100 PAGE_EXECONLY +#define __P100 PAGE_READONLY_EXEC #define __P101 PAGE_READONLY_EXEC #define __P110 PAGE_READONLY_EXEC #define __P111 PAGE_READONLY_EXEC @@ -100,7 +99,7 @@ #define __S001 PAGE_READONLY #define __S010 PAGE_SHARED #define __S011 PAGE_SHARED -#define __S100 PAGE_EXECONLY +#define __S100 PAGE_READONLY_EXEC #define __S101 PAGE_READONLY_EXEC #define __S110 PAGE_SHARED_EXEC #define __S111 PAGE_SHARED_EXEC diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 5d15b4735a0eba76569cae8598aa49468c702bad..cd5de0e40bfa06cf6e6fa5e094a80965b6796599 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -96,12 +96,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) -/* - * Execute-only user mappings do not have the PTE_USER bit set. All valid - * kernel mappings have the PTE_UXN bit set. - */ #define pte_valid_not_user(pte) \ - ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN)) + ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID) #define pte_valid_young(pte) \ ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF)) #define pte_valid_user(pte) \ @@ -117,8 +113,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; /* * p??_access_permitted() is true for valid user mappings (subject to the - * write permission check) other than user execute-only which do not have the - * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set. + * write permission check). PROT_NONE mappings do not have the PTE_VALID bit + * set. */ #define pte_access_permitted(pte, write) \ (pte_valid_user(pte) && (!(write) || pte_write(pte))) diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 2629a68b87244facd78a6ad5e9d4b18ca6ac18ad..5af82587909ef9931d4201f2a2850feb6d25bdcf 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -42,7 +42,6 @@ #endif #define __ARCH_WANT_SYS_CLONE -#define __ARCH_WANT_SYS_CLONE3 #ifndef __COMPAT_SYSCALL_NR #include diff --git a/arch/arm64/include/uapi/asm/unistd.h b/arch/arm64/include/uapi/asm/unistd.h index 4703d218663a2ad81e7c8d4fd0749bed8199ef4f..f83a70e07df85ca5029a1e91cde93b8e0dd9fb7e 100644 --- a/arch/arm64/include/uapi/asm/unistd.h +++ b/arch/arm64/include/uapi/asm/unistd.h @@ -19,5 +19,6 @@ #define __ARCH_WANT_NEW_STAT #define __ARCH_WANT_SET_GET_RLIMIT #define __ARCH_WANT_TIME32_SYSCALLS +#define __ARCH_WANT_SYS_CLONE3 #include diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 6a09ca7644ea16faa70073625d05f19da0605bdd..85f4bec22f6d4ead1f60b0a1b380ce479867e207 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -547,6 +547,7 @@ static const struct midr_range spectre_v2_safe_list[] = { MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53), + MIDR_ALL_VERSIONS(MIDR_HISI_TSV110), { /* sentinel */ } }; diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 71f788cd2b18772629c4627be979f75753253d08..d54586d5b031f874e7fa70814cd618ac3c16ceca 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -360,8 +360,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) asmlinkage void ret_from_fork(void) asm("ret_from_fork"); -int copy_thread(unsigned long clone_flags, unsigned long stack_start, - unsigned long stk_sz, struct task_struct *p) +int copy_thread_tls(unsigned long clone_flags, unsigned long stack_start, + unsigned long stk_sz, struct task_struct *p, unsigned long tls) { struct pt_regs *childregs = task_pt_regs(p); @@ -394,11 +394,11 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, } /* - * If a TLS pointer was passed to clone (4th argument), use it - * for the new thread. + * If a TLS pointer was passed to clone, use it for the new + * thread. */ if (clone_flags & CLONE_SETTLS) - p->thread.uw.tp_value = childregs->regs[3]; + p->thread.uw.tp_value = tls; } else { memset(childregs, 0, sizeof(struct pt_regs)); childregs->pstate = PSR_MODE_EL1h; diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 46822afc57e00461843b56f72cd403a939f17e69..9f2165937f7d82752855ae59bb0dd32f82740134 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -2098,9 +2098,9 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu, WARN_ON(1); } - kvm_err("Unsupported guest CP%d access at: %08lx [%08lx]\n", - cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu)); - print_sys_reg_instr(params); + print_sys_reg_msg(params, + "Unsupported guest CP%d access at: %08lx [%08lx]\n", + cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu)); kvm_inject_undefined(vcpu); } @@ -2233,6 +2233,12 @@ int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run) NULL, 0); } +static bool is_imp_def_sys_reg(struct sys_reg_params *params) +{ + // See ARM DDI 0487E.a, section D12.3.2 + return params->Op0 == 3 && (params->CRn & 0b1011) == 0b1011; +} + static int emulate_sys_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *params) { @@ -2248,10 +2254,12 @@ static int emulate_sys_reg(struct kvm_vcpu *vcpu, if (likely(r)) { perform_access(vcpu, params, r); + } else if (is_imp_def_sys_reg(params)) { + kvm_inject_undefined(vcpu); } else { - kvm_err("Unsupported guest sys_reg access at: %lx [%08lx]\n", - *vcpu_pc(vcpu), *vcpu_cpsr(vcpu)); - print_sys_reg_instr(params); + print_sys_reg_msg(params, + "Unsupported guest sys_reg access at: %lx [%08lx]\n", + *vcpu_pc(vcpu), *vcpu_cpsr(vcpu)); kvm_inject_undefined(vcpu); } return 1; @@ -2360,8 +2368,11 @@ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu, if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG) return NULL; + if (!index_to_params(id, ¶ms)) + return NULL; + table = get_target_table(vcpu->arch.target, true, &num); - r = find_reg_by_id(id, ¶ms, table, num); + r = find_reg(¶ms, table, num); if (!r) r = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h index 9bca0312d7982c4d1a8620ef907ea683abd6c598..5a6fc30f59894db482b667c20dd892baba0e6c7f 100644 --- a/arch/arm64/kvm/sys_regs.h +++ b/arch/arm64/kvm/sys_regs.h @@ -62,11 +62,24 @@ struct sys_reg_desc { #define REG_HIDDEN_USER (1 << 0) /* hidden from userspace ioctls */ #define REG_HIDDEN_GUEST (1 << 1) /* hidden from guest */ -static inline void print_sys_reg_instr(const struct sys_reg_params *p) +static __printf(2, 3) +inline void print_sys_reg_msg(const struct sys_reg_params *p, + char *fmt, ...) { + va_list va; + + va_start(va, fmt); /* Look, we even formatted it for you to paste into the table! */ - kvm_pr_unimpl(" { Op0(%2u), Op1(%2u), CRn(%2u), CRm(%2u), Op2(%2u), func_%s },\n", + kvm_pr_unimpl("%pV { Op0(%2u), Op1(%2u), CRn(%2u), CRm(%2u), Op2(%2u), func_%s },\n", + &(struct va_format){ fmt, &va }, p->Op0, p->Op1, p->CRn, p->CRm, p->Op2, p->is_write ? "write" : "read"); + va_end(va); +} + +static inline void print_sys_reg_instr(const struct sys_reg_params *p) +{ + /* GCC warns on an empty format string */ + print_sys_reg_msg(p, "%s", ""); } static inline bool ignore_write(struct kvm_vcpu *vcpu, diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 077b02a2d4d3333e9af010f6c92a011b60f0bd1a..85566d32958f5b8fcc5bdc35ebc2d9d2b2d0469a 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -445,7 +445,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, const struct fault_info *inf; struct mm_struct *mm = current->mm; vm_fault_t fault, major = 0; - unsigned long vm_flags = VM_READ | VM_WRITE; + unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC; unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; if (kprobe_page_fault(regs, esr)) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 5a3b15a14a7f116bc6b61a9279ed5ce84723702e..40797cbfba2d67412eee7864325e9f89adeb08d2 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1070,7 +1070,6 @@ void arch_remove_memory(int nid, u64 start, u64 size, { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - struct zone *zone; /* * FIXME: Cleanup page tables (also in arch_add_memory() in case @@ -1079,7 +1078,6 @@ void arch_remove_memory(int nid, u64 start, u64 size, * unplug. ARCH_ENABLE_MEMORY_HOTREMOVE must not be * unlocked yet. */ - zone = page_zone(pfn_to_page(start_pfn)); - __remove_pages(zone, start_pfn, nr_pages, altmap); + __remove_pages(start_pfn, nr_pages, altmap); } #endif diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h index 12cd9231c4b8fb33d7687a68fd593271a632c946..0231d69c8bf2bf35285a21a3117c5a8f2ab26f1a 100644 --- a/arch/hexagon/include/asm/atomic.h +++ b/arch/hexagon/include/asm/atomic.h @@ -91,7 +91,7 @@ static inline void atomic_##op(int i, atomic_t *v) \ "1: %0 = memw_locked(%1);\n" \ " %0 = "#op "(%0,%2);\n" \ " memw_locked(%1,P3)=%0;\n" \ - " if !P3 jump 1b;\n" \ + " if (!P3) jump 1b;\n" \ : "=&r" (output) \ : "r" (&v->counter), "r" (i) \ : "memory", "p3" \ @@ -107,7 +107,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ "1: %0 = memw_locked(%1);\n" \ " %0 = "#op "(%0,%2);\n" \ " memw_locked(%1,P3)=%0;\n" \ - " if !P3 jump 1b;\n" \ + " if (!P3) jump 1b;\n" \ : "=&r" (output) \ : "r" (&v->counter), "r" (i) \ : "memory", "p3" \ @@ -124,7 +124,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \ "1: %0 = memw_locked(%2);\n" \ " %1 = "#op "(%0,%3);\n" \ " memw_locked(%2,P3)=%1;\n" \ - " if !P3 jump 1b;\n" \ + " if (!P3) jump 1b;\n" \ : "=&r" (output), "=&r" (val) \ : "r" (&v->counter), "r" (i) \ : "memory", "p3" \ @@ -173,7 +173,7 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) " }" " memw_locked(%2, p3) = %1;" " {" - " if !p3 jump 1b;" + " if (!p3) jump 1b;" " }" "2:" : "=&r" (__oldval), "=&r" (tmp) diff --git a/arch/hexagon/include/asm/bitops.h b/arch/hexagon/include/asm/bitops.h index 47384b094b9445598bb2f859f26bba81e444a396..71429f756af0f45a846598529055b34b1d70f269 100644 --- a/arch/hexagon/include/asm/bitops.h +++ b/arch/hexagon/include/asm/bitops.h @@ -38,7 +38,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr) "1: R12 = memw_locked(R10);\n" " { P0 = tstbit(R12,R11); R12 = clrbit(R12,R11); }\n" " memw_locked(R10,P1) = R12;\n" - " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n" + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n" : "=&r" (oldval) : "r" (addr), "r" (nr) : "r10", "r11", "r12", "p0", "p1", "memory" @@ -62,7 +62,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr) "1: R12 = memw_locked(R10);\n" " { P0 = tstbit(R12,R11); R12 = setbit(R12,R11); }\n" " memw_locked(R10,P1) = R12;\n" - " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n" + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n" : "=&r" (oldval) : "r" (addr), "r" (nr) : "r10", "r11", "r12", "p0", "p1", "memory" @@ -88,7 +88,7 @@ static inline int test_and_change_bit(int nr, volatile void *addr) "1: R12 = memw_locked(R10);\n" " { P0 = tstbit(R12,R11); R12 = togglebit(R12,R11); }\n" " memw_locked(R10,P1) = R12;\n" - " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n" + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n" : "=&r" (oldval) : "r" (addr), "r" (nr) : "r10", "r11", "r12", "p0", "p1", "memory" @@ -223,7 +223,7 @@ static inline int ffs(int x) int r; asm("{ P0 = cmp.eq(%1,#0); %0 = ct0(%1);}\n" - "{ if P0 %0 = #0; if !P0 %0 = add(%0,#1);}\n" + "{ if (P0) %0 = #0; if (!P0) %0 = add(%0,#1);}\n" : "=&r" (r) : "r" (x) : "p0"); diff --git a/arch/hexagon/include/asm/cmpxchg.h b/arch/hexagon/include/asm/cmpxchg.h index 6091322c3af9639f2eaa144c5842f41e5a08e123..92b8a02e588ac256341a82053df70340089fd34b 100644 --- a/arch/hexagon/include/asm/cmpxchg.h +++ b/arch/hexagon/include/asm/cmpxchg.h @@ -30,7 +30,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, __asm__ __volatile__ ( "1: %0 = memw_locked(%1);\n" /* load into retval */ " memw_locked(%1,P0) = %2;\n" /* store into memory */ - " if !P0 jump 1b;\n" + " if (!P0) jump 1b;\n" : "=&r" (retval) : "r" (ptr), "r" (x) : "memory", "p0" diff --git a/arch/hexagon/include/asm/futex.h b/arch/hexagon/include/asm/futex.h index cb635216a732c98c2d05e965762696543d7ec3e4..0191f7c7193e6bd2a8700d2813627b60a701a419 100644 --- a/arch/hexagon/include/asm/futex.h +++ b/arch/hexagon/include/asm/futex.h @@ -16,7 +16,7 @@ /* For example: %1 = %4 */ \ insn \ "2: memw_locked(%3,p2) = %1;\n" \ - " if !p2 jump 1b;\n" \ + " if (!p2) jump 1b;\n" \ " %1 = #0;\n" \ "3:\n" \ ".section .fixup,\"ax\"\n" \ @@ -84,10 +84,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, "1: %1 = memw_locked(%3)\n" " {\n" " p2 = cmp.eq(%1,%4)\n" - " if !p2.new jump:NT 3f\n" + " if (!p2.new) jump:NT 3f\n" " }\n" "2: memw_locked(%3,p2) = %5\n" - " if !p2 jump 1b\n" + " if (!p2) jump 1b\n" "3:\n" ".section .fixup,\"ax\"\n" "4: %0 = #%6\n" diff --git a/arch/hexagon/include/asm/io.h b/arch/hexagon/include/asm/io.h index 539e3efcf39c6ed595fea142a39384340544e917..b0dbc347317240ca26a73cc422d4ced3154b1e08 100644 --- a/arch/hexagon/include/asm/io.h +++ b/arch/hexagon/include/asm/io.h @@ -173,6 +173,7 @@ static inline void writel(u32 data, volatile void __iomem *addr) void __iomem *ioremap(unsigned long phys_addr, unsigned long size); #define ioremap_nocache ioremap +#define ioremap_uc(X, Y) ioremap((X), (Y)) #define __raw_writel writel diff --git a/arch/hexagon/include/asm/spinlock.h b/arch/hexagon/include/asm/spinlock.h index bfe07d842ff35c4ac70b9e9d96e2b3ffe9789a8d..ef103b73bec8388df8258e4ac132cbc1db4ebfbc 100644 --- a/arch/hexagon/include/asm/spinlock.h +++ b/arch/hexagon/include/asm/spinlock.h @@ -30,9 +30,9 @@ static inline void arch_read_lock(arch_rwlock_t *lock) __asm__ __volatile__( "1: R6 = memw_locked(%0);\n" " { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n" - " { if !P3 jump 1b; }\n" + " { if (!P3) jump 1b; }\n" " memw_locked(%0,P3) = R6;\n" - " { if !P3 jump 1b; }\n" + " { if (!P3) jump 1b; }\n" : : "r" (&lock->lock) : "memory", "r6", "p3" @@ -46,7 +46,7 @@ static inline void arch_read_unlock(arch_rwlock_t *lock) "1: R6 = memw_locked(%0);\n" " R6 = add(R6,#-1);\n" " memw_locked(%0,P3) = R6\n" - " if !P3 jump 1b;\n" + " if (!P3) jump 1b;\n" : : "r" (&lock->lock) : "memory", "r6", "p3" @@ -61,7 +61,7 @@ static inline int arch_read_trylock(arch_rwlock_t *lock) __asm__ __volatile__( " R6 = memw_locked(%1);\n" " { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n" - " { if !P3 jump 1f; }\n" + " { if (!P3) jump 1f; }\n" " memw_locked(%1,P3) = R6;\n" " { %0 = P3 }\n" "1:\n" @@ -78,9 +78,9 @@ static inline void arch_write_lock(arch_rwlock_t *lock) __asm__ __volatile__( "1: R6 = memw_locked(%0)\n" " { P3 = cmp.eq(R6,#0); R6 = #-1;}\n" - " { if !P3 jump 1b; }\n" + " { if (!P3) jump 1b; }\n" " memw_locked(%0,P3) = R6;\n" - " { if !P3 jump 1b; }\n" + " { if (!P3) jump 1b; }\n" : : "r" (&lock->lock) : "memory", "r6", "p3" @@ -94,7 +94,7 @@ static inline int arch_write_trylock(arch_rwlock_t *lock) __asm__ __volatile__( " R6 = memw_locked(%1)\n" " { %0 = #0; P3 = cmp.eq(R6,#0); R6 = #-1;}\n" - " { if !P3 jump 1f; }\n" + " { if (!P3) jump 1f; }\n" " memw_locked(%1,P3) = R6;\n" " %0 = P3;\n" "1:\n" @@ -117,9 +117,9 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) __asm__ __volatile__( "1: R6 = memw_locked(%0);\n" " P3 = cmp.eq(R6,#0);\n" - " { if !P3 jump 1b; R6 = #1; }\n" + " { if (!P3) jump 1b; R6 = #1; }\n" " memw_locked(%0,P3) = R6;\n" - " { if !P3 jump 1b; }\n" + " { if (!P3) jump 1b; }\n" : : "r" (&lock->lock) : "memory", "r6", "p3" @@ -139,7 +139,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) __asm__ __volatile__( " R6 = memw_locked(%1);\n" " P3 = cmp.eq(R6,#0);\n" - " { if !P3 jump 1f; R6 = #1; %0 = #0; }\n" + " { if (!P3) jump 1f; R6 = #1; %0 = #0; }\n" " memw_locked(%1,P3) = R6;\n" " %0 = P3;\n" "1:\n" diff --git a/arch/hexagon/kernel/stacktrace.c b/arch/hexagon/kernel/stacktrace.c index 35f29423fda808f6edd8202da932ce89ae728eae..5ed02f699479ad68c516444dceae7223452c1e0c 100644 --- a/arch/hexagon/kernel/stacktrace.c +++ b/arch/hexagon/kernel/stacktrace.c @@ -11,8 +11,6 @@ #include #include -register unsigned long current_frame_pointer asm("r30"); - struct stackframe { unsigned long fp; unsigned long rets; @@ -30,7 +28,7 @@ void save_stack_trace(struct stack_trace *trace) low = (unsigned long)task_stack_page(current); high = low + THREAD_SIZE; - fp = current_frame_pointer; + fp = (unsigned long)__builtin_frame_address(0); while (fp >= low && fp <= (high - sizeof(*frame))) { frame = (struct stackframe *)fp; diff --git a/arch/hexagon/kernel/vm_entry.S b/arch/hexagon/kernel/vm_entry.S index 12242c27e2df59257cd60fadbd24480e81d365d8..4023fdbea4902e090e0798cf2c2f1006b1d2b259 100644 --- a/arch/hexagon/kernel/vm_entry.S +++ b/arch/hexagon/kernel/vm_entry.S @@ -369,7 +369,7 @@ ret_from_fork: R26.L = #LO(do_work_pending); R0 = #VM_INT_DISABLE; } - if P0 jump check_work_pending + if (P0) jump check_work_pending { R0 = R25; callr R24 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 58fd67068bac5036e96514039688fc3844a635aa..b01d68a2d5d97360d0b612e465fb70635aa80ef6 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -689,9 +689,7 @@ void arch_remove_memory(int nid, u64 start, u64 size, { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - struct zone *zone; - zone = page_zone(pfn_to_page(start_pfn)); - __remove_pages(zone, start_pfn, nr_pages, altmap); + __remove_pages(start_pfn, nr_pages, altmap); } #endif diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index add388236f4ef2cdf2558b0d282605fadce6912a..ed8e28b0fb3e0375ed8967a3d1e214485db696bb 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -47,7 +47,7 @@ config MIPS select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES select HAVE_ASM_MODVERSIONS - select HAVE_EBPF_JIT if (!CPU_MICROMIPS) + select HAVE_EBPF_JIT if 64BIT && !CPU_MICROMIPS && TARGET_ISA_REV >= 2 select HAVE_CONTEXT_TRACKING select HAVE_COPY_THREAD_TLS select HAVE_C_RECORDMCOUNT diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile index 172801ed35b89994f6a52e492a7c486cb517d821..d859f079b771a158208b0cef571a22a22c3f67ac 100644 --- a/arch/mips/boot/compressed/Makefile +++ b/arch/mips/boot/compressed/Makefile @@ -29,6 +29,9 @@ KBUILD_AFLAGS := $(KBUILD_AFLAGS) -D__ASSEMBLY__ \ -DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) \ -DKERNEL_ENTRY=$(VMLINUX_ENTRY_ADDRESS) +# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in. +KCOV_INSTRUMENT := n + # decompressor objects (linked with vmlinuz) vmlinuzobjs-y := $(obj)/head.o $(obj)/decompress.o $(obj)/string.o diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h index c46c59b0f1b40ce0b4b9abee2176056b1ba95253..49f0061a60514358358da251adc2a76634eb7969 100644 --- a/arch/mips/include/asm/cpu-type.h +++ b/arch/mips/include/asm/cpu-type.h @@ -15,7 +15,8 @@ static inline int __pure __get_cpu_type(const int cpu_type) { switch (cpu_type) { -#if defined(CONFIG_SYS_HAS_CPU_LOONGSON2EF) +#if defined(CONFIG_SYS_HAS_CPU_LOONGSON2E) || \ + defined(CONFIG_SYS_HAS_CPU_LOONGSON2F) case CPU_LOONGSON2EF: #endif diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index 4993db40482c80fa17cacec1ef54f4e356b6d26d..ee26f9a4575dfc2b09568855015200043ca0ffbe 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h @@ -49,8 +49,26 @@ struct thread_info { .addr_limit = KERNEL_DS, \ } -/* How to get the thread information struct from C. */ +/* + * A pointer to the struct thread_info for the currently executing thread is + * held in register $28/$gp. + * + * We declare __current_thread_info as a global register variable rather than a + * local register variable within current_thread_info() because clang doesn't + * support explicit local register variables. + * + * When building the VDSO we take care not to declare the global register + * variable because this causes GCC to not preserve the value of $28/$gp in + * functions that change its value (which is common in the PIC VDSO when + * accessing the GOT). Since the VDSO shouldn't be accessing + * __current_thread_info anyway we declare it extern in order to cause a link + * failure if it's referenced. + */ +#ifdef __VDSO__ +extern struct thread_info *__current_thread_info; +#else register struct thread_info *__current_thread_info __asm__("$28"); +#endif static inline struct thread_info *current_thread_info(void) { diff --git a/arch/mips/include/asm/vdso/gettimeofday.h b/arch/mips/include/asm/vdso/gettimeofday.h index b08825531e9f9a3ce89e926ffc3e096fbfe14fc0..0ae9b4cbc1537ad3604c4b4cbca6179e05671ed7 100644 --- a/arch/mips/include/asm/vdso/gettimeofday.h +++ b/arch/mips/include/asm/vdso/gettimeofday.h @@ -26,8 +26,6 @@ #define __VDSO_USE_SYSCALL ULLONG_MAX -#ifdef CONFIG_MIPS_CLOCK_VSYSCALL - static __always_inline long gettimeofday_fallback( struct __kernel_old_timeval *_tv, struct timezone *_tz) @@ -48,17 +46,6 @@ static __always_inline long gettimeofday_fallback( return error ? -ret : ret; } -#else - -static __always_inline long gettimeofday_fallback( - struct __kernel_old_timeval *_tv, - struct timezone *_tz) -{ - return -1; -} - -#endif - static __always_inline long clock_gettime_fallback( clockid_t _clkid, struct __kernel_timespec *_ts) diff --git a/arch/mips/kernel/cacheinfo.c b/arch/mips/kernel/cacheinfo.c index f777e44653d5767b953483f22158d24e84e6461f..47312c5294102264700c12b9955d39b9cd60ab79 100644 --- a/arch/mips/kernel/cacheinfo.c +++ b/arch/mips/kernel/cacheinfo.c @@ -50,6 +50,25 @@ static int __init_cache_level(unsigned int cpu) return 0; } +static void fill_cpumask_siblings(int cpu, cpumask_t *cpu_map) +{ + int cpu1; + + for_each_possible_cpu(cpu1) + if (cpus_are_siblings(cpu, cpu1)) + cpumask_set_cpu(cpu1, cpu_map); +} + +static void fill_cpumask_cluster(int cpu, cpumask_t *cpu_map) +{ + int cpu1; + int cluster = cpu_cluster(&cpu_data[cpu]); + + for_each_possible_cpu(cpu1) + if (cpu_cluster(&cpu_data[cpu1]) == cluster) + cpumask_set_cpu(cpu1, cpu_map); +} + static int __populate_cache_leaves(unsigned int cpu) { struct cpuinfo_mips *c = ¤t_cpu_data; @@ -57,14 +76,20 @@ static int __populate_cache_leaves(unsigned int cpu) struct cacheinfo *this_leaf = this_cpu_ci->info_list; if (c->icache.waysize) { + /* L1 caches are per core */ + fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map); populate_cache(dcache, this_leaf, 1, CACHE_TYPE_DATA); + fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map); populate_cache(icache, this_leaf, 1, CACHE_TYPE_INST); } else { populate_cache(dcache, this_leaf, 1, CACHE_TYPE_UNIFIED); } - if (c->scache.waysize) + if (c->scache.waysize) { + /* L2 cache is per cluster */ + fill_cpumask_cluster(cpu, &this_leaf->shared_cpu_map); populate_cache(scache, this_leaf, 2, CACHE_TYPE_UNIFIED); + } if (c->tcache.waysize) populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED); diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c index 46b76751f3a5fface8c7c2578487a3c043f9c258..561154cbcc401eb8e5eee381af4873f638583fc1 100644 --- a/arch/mips/net/ebpf_jit.c +++ b/arch/mips/net/ebpf_jit.c @@ -604,6 +604,7 @@ static void emit_const_to_reg(struct jit_ctx *ctx, int dst, u64 value) static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx) { int off, b_off; + int tcc_reg; ctx->flags |= EBPF_SEEN_TC; /* @@ -616,14 +617,14 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx) b_off = b_imm(this_idx + 1, ctx); emit_instr(ctx, bne, MIPS_R_AT, MIPS_R_ZERO, b_off); /* - * if (--TCC < 0) + * if (TCC-- < 0) * goto out; */ /* Delay slot */ - emit_instr(ctx, daddiu, MIPS_R_T5, - (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4, -1); + tcc_reg = (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4; + emit_instr(ctx, daddiu, MIPS_R_T5, tcc_reg, -1); b_off = b_imm(this_idx + 1, ctx); - emit_instr(ctx, bltz, MIPS_R_T5, b_off); + emit_instr(ctx, bltz, tcc_reg, b_off); /* * prog = array->ptrs[index]; * if (prog == NULL) @@ -1803,7 +1804,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) unsigned int image_size; u8 *image_ptr; - if (!prog->jit_requested || MIPS_ISA_REV < 2) + if (!prog->jit_requested) return prog; tmp = bpf_jit_blind_constants(prog); diff --git a/arch/mips/vdso/vgettimeofday.c b/arch/mips/vdso/vgettimeofday.c index 6ebdc37c89fc6a11a32314136c99a7f1a95bf7f3..6b83b6376a4b58d340f1c464cbbf402c588caf73 100644 --- a/arch/mips/vdso/vgettimeofday.c +++ b/arch/mips/vdso/vgettimeofday.c @@ -17,12 +17,22 @@ int __vdso_clock_gettime(clockid_t clock, return __cvdso_clock_gettime32(clock, ts); } +#ifdef CONFIG_MIPS_CLOCK_VSYSCALL + +/* + * This is behind the ifdef so that we don't provide the symbol when there's no + * possibility of there being a usable clocksource, because there's nothing we + * can do without it. When libc fails the symbol lookup it should fall back on + * the standard syscall path. + */ int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz) { return __cvdso_gettimeofday(tv, tz); } +#endif /* CONFIG_MIPS_CLOCK_VSYSCALL */ + int __vdso_clock_getres(clockid_t clock_id, struct old_timespec32 *res) { @@ -43,12 +53,22 @@ int __vdso_clock_gettime(clockid_t clock, return __cvdso_clock_gettime(clock, ts); } +#ifdef CONFIG_MIPS_CLOCK_VSYSCALL + +/* + * This is behind the ifdef so that we don't provide the symbol when there's no + * possibility of there being a usable clocksource, because there's nothing we + * can do without it. When libc fails the symbol lookup it should fall back on + * the standard syscall path. + */ int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz) { return __cvdso_gettimeofday(tv, tz); } +#endif /* CONFIG_MIPS_CLOCK_VSYSCALL */ + int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res) { diff --git a/arch/nds32/include/asm/cacheflush.h b/arch/nds32/include/asm/cacheflush.h index d9ac7e6408ef3196253f3c6838d111a51a1f0973..caddded56e77f3603e93bbb0496b52a2dcb3a60a 100644 --- a/arch/nds32/include/asm/cacheflush.h +++ b/arch/nds32/include/asm/cacheflush.h @@ -9,7 +9,11 @@ #define PG_dcache_dirty PG_arch_1 void flush_icache_range(unsigned long start, unsigned long end); +#define flush_icache_range flush_icache_range + void flush_icache_page(struct vm_area_struct *vma, struct page *page); +#define flush_icache_page flush_icache_page + #ifdef CONFIG_CPU_CACHE_ALIASING void flush_cache_mm(struct mm_struct *mm); void flush_cache_dup_mm(struct mm_struct *mm); @@ -40,12 +44,11 @@ void invalidate_kernel_vmap_range(void *addr, int size); #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&(mapping)->i_pages) #else -#include -#undef flush_icache_range -#undef flush_icache_page -#undef flush_icache_user_range void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, unsigned long addr, int len); +#define flush_icache_user_range flush_icache_user_range + +#include #endif #endif /* __NDS32_CACHEFLUSH_H__ */ diff --git a/arch/nds32/include/asm/pgtable.h b/arch/nds32/include/asm/pgtable.h index 0214e415053902c34d65ecf03d4dd581c49fb72b..6abc58ac406dcd542b3bc5497d80452cba52ff0b 100644 --- a/arch/nds32/include/asm/pgtable.h +++ b/arch/nds32/include/asm/pgtable.h @@ -195,7 +195,7 @@ extern void paging_init(void); #define pte_unmap(pte) do { } while (0) #define pte_unmap_nested(pte) do { } while (0) -#define pmd_off_k(address) pmd_offset(pgd_offset_k(address), address) +#define pmd_off_k(address) pmd_offset(pud_offset(p4d_offset(pgd_offset_k(address), (address)), (address)), (address)) #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) /* diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index b16237c95ea33bd1fb75f50b91fbd6ae053b1687..0c29d6cb2c8dfd1667e705710475b003fcad1a85 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -62,6 +62,7 @@ config PARISC select HAVE_FTRACE_MCOUNT_RECORD if HAVE_DYNAMIC_FTRACE select HAVE_KPROBES_ON_FTRACE select HAVE_DYNAMIC_FTRACE_WITH_REGS + select HAVE_COPY_THREAD_TLS help The PA-RISC microprocessor is designed by Hewlett-Packard and used diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h index f627c37dad9c92a7beeb277ead459726afcf5dad..ab5c215cf46c3d81a5ef840fe46ffe740174c7ec 100644 --- a/arch/parisc/include/asm/cmpxchg.h +++ b/arch/parisc/include/asm/cmpxchg.h @@ -44,8 +44,14 @@ __xchg(unsigned long x, __volatile__ void *ptr, int size) ** if (((unsigned long)p & 0xf) == 0) ** return __ldcw(p); */ -#define xchg(ptr, x) \ - ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))) +#define xchg(ptr, x) \ +({ \ + __typeof__(*(ptr)) __ret; \ + __typeof__(*(ptr)) _x_ = (x); \ + __ret = (__typeof__(*(ptr))) \ + __xchg((unsigned long)_x_, (ptr), sizeof(*(ptr))); \ + __ret; \ +}) /* bug catcher for when unsupported size is used - won't link */ extern void __cmpxchg_called_with_bad_pointer(void); diff --git a/arch/parisc/include/asm/kexec.h b/arch/parisc/include/asm/kexec.h index a99ea747d7edefb681d7eca88917efaca7c245aa..87e17400699551f07af20cf152392bf76c26d9df 100644 --- a/arch/parisc/include/asm/kexec.h +++ b/arch/parisc/include/asm/kexec.h @@ -2,8 +2,6 @@ #ifndef _ASM_PARISC_KEXEC_H #define _ASM_PARISC_KEXEC_H -#ifdef CONFIG_KEXEC - /* Maximum physical address we can use pages from */ #define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) /* Maximum address we can reach in physical address mode */ @@ -32,6 +30,4 @@ static inline void crash_setup_regs(struct pt_regs *newregs, #endif /* __ASSEMBLY__ */ -#endif /* CONFIG_KEXEC */ - #endif /* _ASM_PARISC_KEXEC_H */ diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile index 2663c8f8be115a0929b2e6f933f4b3e05ca9e98d..068d90950d9378e0a50e13d531c6c16cd85751d7 100644 --- a/arch/parisc/kernel/Makefile +++ b/arch/parisc/kernel/Makefile @@ -37,5 +37,5 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_KPROBES) += kprobes.o -obj-$(CONFIG_KEXEC) += kexec.o relocate_kernel.o +obj-$(CONFIG_KEXEC_CORE) += kexec.o relocate_kernel.o obj-$(CONFIG_KEXEC_FILE) += kexec_file.o diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c index 3b330e58a4f033e19ca59a711a0c8e3ddd6842c1..a5f3e50fe97619eff506b92fceb75699bb530548 100644 --- a/arch/parisc/kernel/drivers.c +++ b/arch/parisc/kernel/drivers.c @@ -810,7 +810,7 @@ EXPORT_SYMBOL(device_to_hwpath); static void walk_native_bus(unsigned long io_io_low, unsigned long io_io_high, struct device *parent); -static void walk_lower_bus(struct parisc_device *dev) +static void __init walk_lower_bus(struct parisc_device *dev) { unsigned long io_io_low, io_io_high; @@ -889,8 +889,8 @@ static void print_parisc_device(struct parisc_device *dev) static int count; print_pa_hwpath(dev, hw_path); - pr_info("%d. %s at 0x%px [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }", - ++count, dev->name, (void*) dev->hpa.start, hw_path, dev->id.hw_type, + pr_info("%d. %s at %pap [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }", + ++count, dev->name, &(dev->hpa.start), hw_path, dev->id.hw_type, dev->id.hversion_rev, dev->id.hversion, dev->id.sversion); if (dev->num_addrs) { diff --git a/arch/parisc/kernel/pdt.c b/arch/parisc/kernel/pdt.c index 36434d4da381a2691fea6918b2eda3203bc44dc9..749c4579db0d5542ac274a8198f9f3d825be4d3b 100644 --- a/arch/parisc/kernel/pdt.c +++ b/arch/parisc/kernel/pdt.c @@ -327,8 +327,7 @@ static int pdt_mainloop(void *unused) ((pde & PDT_ADDR_SINGLE_ERR) == 0)) memory_failure(pde >> PAGE_SHIFT, 0); else - soft_offline_page( - pfn_to_page(pde >> PAGE_SHIFT), 0); + soft_offline_page(pde >> PAGE_SHIFT, 0); #else pr_crit("PDT: memory error at 0x%lx ignored.\n" "Rebuild kernel with CONFIG_MEMORY_FAILURE=y " diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index ecc5c277120871efe35a6cebc6158397790d1c0e..230a6422b99f369c52a5d90d113a9a9ffd94b77c 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -208,8 +208,8 @@ arch_initcall(parisc_idle_init); * Copy architecture-specific thread state */ int -copy_thread(unsigned long clone_flags, unsigned long usp, - unsigned long kthread_arg, struct task_struct *p) +copy_thread_tls(unsigned long clone_flags, unsigned long usp, + unsigned long kthread_arg, struct task_struct *p, unsigned long tls) { struct pt_regs *cregs = &(p->thread.regs); void *stack = task_stack_page(p); @@ -254,9 +254,9 @@ copy_thread(unsigned long clone_flags, unsigned long usp, cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN + FRAME_SIZE; cregs->kpc = (unsigned long) &child_return; - /* Setup thread TLS area from the 4th parameter in clone */ + /* Setup thread TLS area */ if (clone_flags & CLONE_SETTLS) - cregs->cr27 = cregs->gr[23]; + cregs->cr27 = tls; } return 0; diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index ddca8287d43bac22e772854d7100975312146866..354cf060b67fc1d4964d0e24cb811b4a48c5cc32 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -401,7 +401,7 @@ static void __init map_pages(unsigned long start_vaddr, pmd = (pmd_t *) __pa(pmd); } - pgd_populate(NULL, pg_dir, __va(pmd)); + pud_populate(NULL, (pud_t *)pg_dir, __va(pmd)); #endif pg_dir++; diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index e9a960e28f3c8468af557533ce3f9b71154d7cac..860228e917dce1d4e8279a4b1a1f97834e606d2c 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -15,6 +15,7 @@ * * (the type definitions are in asm/spinlock_types.h) */ +#include #include #ifdef CONFIG_PPC64 #include @@ -36,10 +37,12 @@ #endif #ifdef CONFIG_PPC_PSERIES +DECLARE_STATIC_KEY_FALSE(shared_processor); + #define vcpu_is_preempted vcpu_is_preempted static inline bool vcpu_is_preempted(int cpu) { - if (!firmware_has_feature(FW_FEATURE_SPLPAR)) + if (!static_branch_unlikely(&shared_processor)) return false; return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1); } @@ -110,13 +113,8 @@ static inline void splpar_rw_yield(arch_rwlock_t *lock) {}; static inline bool is_shared_processor(void) { -/* - * LPPACA is only available on Pseries so guard anything LPPACA related to - * allow other platforms (which include this common header) to compile. - */ -#ifdef CONFIG_PPC_PSERIES - return (IS_ENABLED(CONFIG_PPC_SPLPAR) && - lppaca_shared_proc(local_paca->lppaca_ptr)); +#ifdef CONFIG_PPC_SPLPAR + return static_branch_unlikely(&shared_processor); #else return false; #endif diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 15002b51ff18df83d5d5a5c37c2c65462fffcdc3..c92fe7fe9692ceedcb98d2a88afdecea689bbaca 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -401,7 +401,7 @@ copy_to_user_mcsafe(void __user *to, const void *from, unsigned long n) return n; } -extern unsigned long __clear_user(void __user *addr, unsigned long size); +unsigned long __arch_clear_user(void __user *addr, unsigned long size); static inline unsigned long clear_user(void __user *addr, unsigned long size) { @@ -409,12 +409,17 @@ static inline unsigned long clear_user(void __user *addr, unsigned long size) might_fault(); if (likely(access_ok(addr, size))) { allow_write_to_user(addr, size); - ret = __clear_user(addr, size); + ret = __arch_clear_user(addr, size); prevent_write_to_user(addr, size); } return ret; } +static inline unsigned long __clear_user(void __user *addr, unsigned long size) +{ + return clear_user(addr, size); +} + extern long strncpy_from_user(char *dst, const char __user *src, long count); extern __must_check long strnlen_user(const char __user *str, long n); diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 5645bc9cbc09a28c1fbc324743e4d568c3033dc6..add67498c126bffdc8050f804bed16fa97efd8e4 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -619,8 +619,6 @@ void __do_irq(struct pt_regs *regs) trace_irq_entry(regs); - check_stack_overflow(); - /* * Query the platform PIC for the interrupt & ack it. * @@ -652,6 +650,8 @@ void do_IRQ(struct pt_regs *regs) irqsp = hardirq_ctx[raw_smp_processor_id()]; sirqsp = softirq_ctx[raw_smp_processor_id()]; + check_stack_overflow(); + /* Already there ? */ if (unlikely(cursp == irqsp || cursp == sirqsp)) { __do_irq(regs); diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index dc53578193ee00ef0cd39b43465af4569a51f5b2..6ff3f896d90816efa520657cb747be3a0ea0572d 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -4983,7 +4983,8 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm) if (nesting_enabled(kvm)) kvmhv_release_all_nested(kvm); kvm->arch.process_table = 0; - uv_svm_terminate(kvm->arch.lpid); + if (kvm->arch.secure_guest) + uv_svm_terminate(kvm->arch.lpid); kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0); } diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 0496e66aaa565fbe69f8fb238f503b0eccb6a27f..c6fbbd29bd8717a44bcac3cc5706269867d9d272 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1117,7 +1117,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) ld r7, VCPU_GPR(R7)(r4) bne ret_to_ultra - lwz r0, VCPU_CR(r4) + ld r0, VCPU_CR(r4) mtcr r0 ld r0, VCPU_GPR(R0)(r4) @@ -1137,7 +1137,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) * R3 = UV_RETURN */ ret_to_ultra: - lwz r0, VCPU_CR(r4) + ld r0, VCPU_CR(r4) mtcr r0 ld r0, VCPU_GPR(R3)(r4) diff --git a/arch/powerpc/lib/string_32.S b/arch/powerpc/lib/string_32.S index f69a6aab7bfbb5cb65fd082eaedb939d46c12149..1ddb26394e8ac5a1739b6f5aaf12512e5c3ed9f6 100644 --- a/arch/powerpc/lib/string_32.S +++ b/arch/powerpc/lib/string_32.S @@ -17,7 +17,7 @@ CACHELINE_BYTES = L1_CACHE_BYTES LG_CACHELINE_BYTES = L1_CACHE_SHIFT CACHELINE_MASK = (L1_CACHE_BYTES-1) -_GLOBAL(__clear_user) +_GLOBAL(__arch_clear_user) /* * Use dcbz on the complete cache lines in the destination * to set them to zero. This requires that the destination @@ -87,4 +87,4 @@ _GLOBAL(__clear_user) EX_TABLE(8b, 91b) EX_TABLE(9b, 91b) -EXPORT_SYMBOL(__clear_user) +EXPORT_SYMBOL(__arch_clear_user) diff --git a/arch/powerpc/lib/string_64.S b/arch/powerpc/lib/string_64.S index 507b18b1660e6e2221242b24d9b8e90055d42842..169872bc08928aa5fc566355ac0e5165985a1d00 100644 --- a/arch/powerpc/lib/string_64.S +++ b/arch/powerpc/lib/string_64.S @@ -17,7 +17,7 @@ PPC64_CACHES: .section ".text" /** - * __clear_user: - Zero a block of memory in user space, with less checking. + * __arch_clear_user: - Zero a block of memory in user space, with less checking. * @to: Destination address, in user space. * @n: Number of bytes to zero. * @@ -58,7 +58,7 @@ err3; stb r0,0(r3) mr r3,r4 blr -_GLOBAL_TOC(__clear_user) +_GLOBAL_TOC(__arch_clear_user) cmpdi r4,32 neg r6,r3 li r0,0 @@ -181,4 +181,4 @@ err1; dcbz 0,r3 cmpdi r4,32 blt .Lshort_clear b .Lmedium_clear -EXPORT_SYMBOL(__clear_user) +EXPORT_SYMBOL(__arch_clear_user) diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 9488b63dfc872d2bb7fc3350e1adf22251581f58..f5535eae637fbcf52c91d7e6a7dea4e5c5a7e0ab 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -151,10 +151,9 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size, { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap); int ret; - __remove_pages(page_zone(page), start_pfn, nr_pages, altmap); + __remove_pages(start_pfn, nr_pages, altmap); /* Remove htab bolted mappings for this section of memory */ start = (unsigned long)__va(start); @@ -289,6 +288,14 @@ void __init mem_init(void) BUILD_BUG_ON(MMU_PAGE_COUNT > 16); #ifdef CONFIG_SWIOTLB + /* + * Some platforms (e.g. 85xx) limit DMA-able memory way below + * 4G. We force memblock to bottom-up mode to ensure that the + * memory allocated in swiotlb_init() is DMA-able. + * As it's the last memblock allocation, no need to reset it + * back to to-down. + */ + memblock_set_bottom_up(true); swiotlb_init(0); #endif diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c index 090af2d2d3e4c512f4e04356d01bfe968869cc35..96eb8e43f39b5484db1f22192ff20740276b7a2b 100644 --- a/arch/powerpc/mm/nohash/8xx.c +++ b/arch/powerpc/mm/nohash/8xx.c @@ -103,7 +103,7 @@ static void mmu_patch_addis(s32 *site, long simm) patch_instruction_site(site, instr); } -void __init mmu_mapin_ram_chunk(unsigned long offset, unsigned long top, pgprot_t prot) +static void mmu_mapin_ram_chunk(unsigned long offset, unsigned long top, pgprot_t prot) { unsigned long s = offset; unsigned long v = PAGE_OFFSET + s; diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 42bbcd47cc85ffb1f6900a3ec045db5a2561138d..dffe1a45b6ed4df131f8e18c6cbe851bd1b3fded 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -50,7 +50,7 @@ static void slice_print_mask(const char *label, const struct slice_mask *mask) { #endif -static inline bool slice_addr_is_low(unsigned long addr) +static inline notrace bool slice_addr_is_low(unsigned long addr) { u64 tmp = (u64)addr; @@ -659,7 +659,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp, mm_ctx_user_psize(¤t->mm->context), 1); } -unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr) +unsigned int notrace get_slice_psize(struct mm_struct *mm, unsigned long addr) { unsigned char *psizes; int index, mask_index; diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c index 91571841df8a70d0ea2520dd9c3977e6f85ccc24..9dba7e88088530726862179d53ebc5f8ff986ce6 100644 --- a/arch/powerpc/platforms/pseries/cmm.c +++ b/arch/powerpc/platforms/pseries/cmm.c @@ -539,6 +539,16 @@ static int cmm_migratepage(struct balloon_dev_info *b_dev_info, /* balloon page list reference */ get_page(newpage); + /* + * When we migrate a page to a different zone, we have to fixup the + * count of both involved zones as we adjusted the managed page count + * when inflating. + */ + if (page_zone(page) != page_zone(newpage)) { + adjust_managed_page_count(page, 1); + adjust_managed_page_count(newpage, -1); + } + spin_lock_irqsave(&b_dev_info->pages_lock, flags); balloon_page_insert(b_dev_info, newpage); balloon_page_delete(page); diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 0a40201f315ffd14dd39c9a7b71274c1d3db25a3..0c8421dd01ab5bc911e24f340dbe80407089c204 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -74,6 +74,9 @@ #include "pseries.h" #include "../../../../drivers/pci/pci.h" +DEFINE_STATIC_KEY_FALSE(shared_processor); +EXPORT_SYMBOL_GPL(shared_processor); + int CMO_PrPSP = -1; int CMO_SecPSP = -1; unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K); @@ -758,6 +761,10 @@ static void __init pSeries_setup_arch(void) if (firmware_has_feature(FW_FEATURE_LPAR)) { vpa_init(boot_cpuid); + + if (lppaca_shared_proc(get_lppaca())) + static_branch_enable(&shared_processor); + ppc_md.power_save = pseries_lpar_idle; ppc_md.enable_pmcs = pseries_lpar_enable_pmcs; #ifdef CONFIG_PCI_IOV diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 759ffb00267cf1235cf6b40320994734d571ef7a..fa7dc03459e7f1e7168e9dbef0a3ecf14dae175e 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -64,6 +64,8 @@ config RISCV select SPARSEMEM_STATIC if 32BIT select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU select HAVE_ARCH_MMAP_RND_BITS if MMU + select ARCH_HAS_GCOV_PROFILE_ALL + select HAVE_COPY_THREAD_TLS config ARCH_MMAP_RND_BITS_MIN default 18 if 64BIT @@ -154,7 +156,7 @@ config GENERIC_HWEIGHT def_bool y config FIX_EARLYCON_MEM - def_bool CONFIG_MMU + def_bool MMU config PGTABLE_LEVELS int diff --git a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi index 70a1891e7cd07f472ede9f221e854671cbb23a3a..a2e3d54e830cc2c2de4ab0382254adf8674a11a5 100644 --- a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi +++ b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi @@ -54,6 +54,7 @@ reg = <1>; riscv,isa = "rv64imafdc"; tlb-split; + next-level-cache = <&l2cache>; cpu1_intc: interrupt-controller { #interrupt-cells = <1>; compatible = "riscv,cpu-intc"; @@ -77,6 +78,7 @@ reg = <2>; riscv,isa = "rv64imafdc"; tlb-split; + next-level-cache = <&l2cache>; cpu2_intc: interrupt-controller { #interrupt-cells = <1>; compatible = "riscv,cpu-intc"; @@ -100,6 +102,7 @@ reg = <3>; riscv,isa = "rv64imafdc"; tlb-split; + next-level-cache = <&l2cache>; cpu3_intc: interrupt-controller { #interrupt-cells = <1>; compatible = "riscv,cpu-intc"; @@ -123,6 +126,7 @@ reg = <4>; riscv,isa = "rv64imafdc"; tlb-split; + next-level-cache = <&l2cache>; cpu4_intc: interrupt-controller { #interrupt-cells = <1>; compatible = "riscv,cpu-intc"; @@ -253,6 +257,17 @@ #pwm-cells = <3>; status = "disabled"; }; + l2cache: cache-controller@2010000 { + compatible = "sifive,fu540-c000-ccache", "cache"; + cache-block-size = <64>; + cache-level = <2>; + cache-sets = <1024>; + cache-size = <2097152>; + cache-unified; + interrupt-parent = <&plic0>; + interrupts = <1 2 3>; + reg = <0x0 0x2010000 0x0 0x1000>; + }; }; }; diff --git a/arch/riscv/include/asm/asm-prototypes.h b/arch/riscv/include/asm/asm-prototypes.h index dd62b691c443d821163e7460c4376b482c30396f..27e005fca5849e059bc920d937250bdb251ff35c 100644 --- a/arch/riscv/include/asm/asm-prototypes.h +++ b/arch/riscv/include/asm/asm-prototypes.h @@ -5,4 +5,8 @@ #include #include +long long __lshrti3(long long a, int b); +long long __ashrti3(long long a, int b); +long long __ashlti3(long long a, int b); + #endif /* _ASM_RISCV_PROTOTYPES_H */ diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h index 0a62d2d684552da8c665d17632bc590e61b1dbc0..435b65532e2945703cc17a9e5c4f7613a0d5b6b8 100644 --- a/arch/riscv/include/asm/csr.h +++ b/arch/riscv/include/asm/csr.h @@ -116,9 +116,9 @@ # define SR_PIE SR_MPIE # define SR_PP SR_MPP -# define IRQ_SOFT IRQ_M_SOFT -# define IRQ_TIMER IRQ_M_TIMER -# define IRQ_EXT IRQ_M_EXT +# define RV_IRQ_SOFT IRQ_M_SOFT +# define RV_IRQ_TIMER IRQ_M_TIMER +# define RV_IRQ_EXT IRQ_M_EXT #else /* CONFIG_RISCV_M_MODE */ # define CSR_STATUS CSR_SSTATUS # define CSR_IE CSR_SIE @@ -133,15 +133,15 @@ # define SR_PIE SR_SPIE # define SR_PP SR_SPP -# define IRQ_SOFT IRQ_S_SOFT -# define IRQ_TIMER IRQ_S_TIMER -# define IRQ_EXT IRQ_S_EXT +# define RV_IRQ_SOFT IRQ_S_SOFT +# define RV_IRQ_TIMER IRQ_S_TIMER +# define RV_IRQ_EXT IRQ_S_EXT #endif /* CONFIG_RISCV_M_MODE */ /* IE/IP (Supervisor/Machine Interrupt Enable/Pending) flags */ -#define IE_SIE (_AC(0x1, UL) << IRQ_SOFT) -#define IE_TIE (_AC(0x1, UL) << IRQ_TIMER) -#define IE_EIE (_AC(0x1, UL) << IRQ_EXT) +#define IE_SIE (_AC(0x1, UL) << RV_IRQ_SOFT) +#define IE_TIE (_AC(0x1, UL) << RV_IRQ_TIMER) +#define IE_EIE (_AC(0x1, UL) << RV_IRQ_EXT) #ifndef __ASSEMBLY__ diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 7ff0ed4f292e48fbc267d77fdbad6e69f0a6e92e..36ae0176135277e633a247ccfb2055111614e816 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -90,6 +90,27 @@ extern pgd_t swapper_pg_dir[]; #define __S110 PAGE_SHARED_EXEC #define __S111 PAGE_SHARED_EXEC +#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) +#define VMALLOC_END (PAGE_OFFSET - 1) +#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) + +/* + * Roughly size the vmemmap space to be large enough to fit enough + * struct pages to map half the virtual address space. Then + * position vmemmap directly below the VMALLOC region. + */ +#define VMEMMAP_SHIFT \ + (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT) +#define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT) +#define VMEMMAP_END (VMALLOC_START - 1) +#define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE) + +/* + * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel + * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled. + */ +#define vmemmap ((struct page *)VMEMMAP_START) + static inline int pmd_present(pmd_t pmd) { return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); @@ -400,23 +421,6 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma, #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) -#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) -#define VMALLOC_END (PAGE_OFFSET - 1) -#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) - -/* - * Roughly size the vmemmap space to be large enough to fit enough - * struct pages to map half the virtual address space. Then - * position vmemmap directly below the VMALLOC region. - */ -#define VMEMMAP_SHIFT \ - (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT) -#define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT) -#define VMEMMAP_END (VMALLOC_START - 1) -#define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE) - -#define vmemmap ((struct page *)VMEMMAP_START) - #define PCI_IO_SIZE SZ_16M #define PCI_IO_END VMEMMAP_START #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index a1349ca6466961d8eef510a9d4de9391c0f7c5d1..e163b7b64c86cdac7111a53cc641e41a85fc00bb 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -246,6 +246,7 @@ check_syscall_nr: */ li t1, -1 beq a7, t1, ret_from_syscall_rejected + blt a7, t1, 1f /* Call syscall */ la s0, sys_call_table slli t0, a7, RISCV_LGPTR diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c index b94d8db5ddccbcac050da196c59c926a738894dc..c40fdcdeb950a59f48f1be56ff8776253883bf90 100644 --- a/arch/riscv/kernel/ftrace.c +++ b/arch/riscv/kernel/ftrace.c @@ -142,7 +142,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, */ old = *parent; - if (function_graph_enter(old, self_addr, frame_pointer, parent)) + if (!function_graph_enter(old, self_addr, frame_pointer, parent)) *parent = return_hooker; } diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index 84a6f0a4b120b4c8dbc0d171d9472fd17c862797..a4242be66966b2bdbfaee559b93b905e8a0e0d45 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -80,7 +80,9 @@ _start_kernel: #ifdef CONFIG_SMP li t0, CONFIG_NR_CPUS - bgeu a0, t0, .Lsecondary_park + blt a0, t0, .Lgood_cores + tail .Lsecondary_park +.Lgood_cores: #endif /* Pick one hart to run the main boot sequence */ @@ -209,11 +211,6 @@ relocate: tail smp_callin #endif -.align 2 -.Lsecondary_park: - /* We lack SMP support or have too many harts, so park this hart */ - wfi - j .Lsecondary_park END(_start) #ifdef CONFIG_RISCV_M_MODE @@ -246,12 +243,12 @@ ENTRY(reset_regs) li t4, 0 li t5, 0 li t6, 0 - csrw sscratch, 0 + csrw CSR_SCRATCH, 0 #ifdef CONFIG_FPU csrr t0, CSR_MISA andi t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D) - bnez t0, .Lreset_regs_done + beqz t0, .Lreset_regs_done li t1, SR_FS csrs CSR_STATUS, t1 @@ -295,6 +292,13 @@ ENTRY(reset_regs) END(reset_regs) #endif /* CONFIG_RISCV_M_MODE */ +.section ".text", "ax",@progbits +.align 2 +.Lsecondary_park: + /* We lack SMP support or have too many harts, so park this hart */ + wfi + j .Lsecondary_park + __PAGE_ALIGNED_BSS /* Empty zero page */ .balign PAGE_SIZE diff --git a/arch/riscv/kernel/irq.c b/arch/riscv/kernel/irq.c index 3f07a91d5afb4571bb30eb706bd2bb9c49dacc7e..345c4f2eba13f41a58cdf2f2193f08371863a68f 100644 --- a/arch/riscv/kernel/irq.c +++ b/arch/riscv/kernel/irq.c @@ -23,11 +23,11 @@ asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs) irq_enter(); switch (regs->cause & ~CAUSE_IRQ_FLAG) { - case IRQ_TIMER: + case RV_IRQ_TIMER: riscv_timer_interrupt(); break; #ifdef CONFIG_SMP - case IRQ_SOFT: + case RV_IRQ_SOFT: /* * We only use software interrupts to pass IPIs, so if a non-SMP * system gets one, then we don't know what to do. @@ -35,7 +35,7 @@ asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs) riscv_software_interrupt(); break; #endif - case IRQ_EXT: + case RV_IRQ_EXT: handle_arch_irq(regs); break; default: diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index 95a3031e5c7c9dcfdebaa06ba20f8da31424f905..817cf7b0974ced30d75d536b192b73b4e3c25bdf 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -99,8 +99,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) return 0; } -int copy_thread(unsigned long clone_flags, unsigned long usp, - unsigned long arg, struct task_struct *p) +int copy_thread_tls(unsigned long clone_flags, unsigned long usp, + unsigned long arg, struct task_struct *p, unsigned long tls) { struct pt_regs *childregs = task_pt_regs(p); @@ -121,7 +121,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, if (usp) /* User fork */ childregs->sp = usp; if (clone_flags & CLONE_SETTLS) - childregs->tp = childregs->a5; + childregs->tp = tls; childregs->a0 = 0; /* Return value of fork() */ p->thread.ra = (unsigned long)ret_from_fork; } diff --git a/arch/riscv/kernel/riscv_ksyms.c b/arch/riscv/kernel/riscv_ksyms.c index 4800cf703186d389a6fd3446e3339375f332ee74..2a02b7eebee00b270f4d6b7365de0374c2beaba3 100644 --- a/arch/riscv/kernel/riscv_ksyms.c +++ b/arch/riscv/kernel/riscv_ksyms.c @@ -9,8 +9,5 @@ /* * Assembly functions that may be used (directly or indirectly) by modules */ -EXPORT_SYMBOL(__clear_user); -EXPORT_SYMBOL(__asm_copy_to_user); -EXPORT_SYMBOL(__asm_copy_from_user); EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memcpy); diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile index 49a5852fd07dd5a023c1899e80bffe542f6dedeb..33b16f4212f7a5fceb7feeff0a462e295bb4c0c0 100644 --- a/arch/riscv/kernel/vdso/Makefile +++ b/arch/riscv/kernel/vdso/Makefile @@ -58,7 +58,8 @@ quiet_cmd_vdsold = VDSOLD $@ cmd_vdsold = $(CC) $(KBUILD_CFLAGS) $(call cc-option, -no-pie) -nostdlib -nostartfiles $(SYSCFLAGS_$(@F)) \ -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp && \ $(CROSS_COMPILE)objcopy \ - $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ + $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \ + rm $@.tmp # install commands for the unstripped file quiet_cmd_vdso_install = INSTALL $@ diff --git a/arch/riscv/lib/tishift.S b/arch/riscv/lib/tishift.S index 15f9d54c7db63859de60f86ced505165aa7a84cb..ef90075c4b0a9c153c02cdfc0d0fbdc98e151134 100644 --- a/arch/riscv/lib/tishift.S +++ b/arch/riscv/lib/tishift.S @@ -4,34 +4,73 @@ */ #include +#include -ENTRY(__lshrti3) +SYM_FUNC_START(__lshrti3) beqz a2, .L1 li a5,64 sub a5,a5,a2 - addi sp,sp,-16 sext.w a4,a5 blez a5, .L2 sext.w a2,a2 - sll a4,a1,a4 srl a0,a0,a2 - srl a1,a1,a2 + sll a4,a1,a4 + srl a2,a1,a2 or a0,a0,a4 - sd a1,8(sp) - sd a0,0(sp) - ld a0,0(sp) - ld a1,8(sp) - addi sp,sp,16 - ret + mv a1,a2 .L1: ret .L2: - negw a4,a4 - srl a1,a1,a4 - sd a1,0(sp) - sd zero,8(sp) - ld a0,0(sp) - ld a1,8(sp) - addi sp,sp,16 + negw a0,a4 + li a2,0 + srl a0,a1,a0 + mv a1,a2 + ret +SYM_FUNC_END(__lshrti3) +EXPORT_SYMBOL(__lshrti3) + +SYM_FUNC_START(__ashrti3) + beqz a2, .L3 + li a5,64 + sub a5,a5,a2 + sext.w a4,a5 + blez a5, .L4 + sext.w a2,a2 + srl a0,a0,a2 + sll a4,a1,a4 + sra a2,a1,a2 + or a0,a0,a4 + mv a1,a2 +.L3: + ret +.L4: + negw a0,a4 + srai a2,a1,0x3f + sra a0,a1,a0 + mv a1,a2 + ret +SYM_FUNC_END(__ashrti3) +EXPORT_SYMBOL(__ashrti3) + +SYM_FUNC_START(__ashlti3) + beqz a2, .L5 + li a5,64 + sub a5,a5,a2 + sext.w a4,a5 + blez a5, .L6 + sext.w a2,a2 + sll a1,a1,a2 + srl a4,a0,a4 + sll a2,a0,a2 + or a1,a1,a4 + mv a0,a2 +.L5: + ret +.L6: + negw a1,a4 + li a2,0 + sll a1,a0,a1 + mv a0,a2 ret -ENDPROC(__lshrti3) +SYM_FUNC_END(__ashlti3) +EXPORT_SYMBOL(__ashlti3) diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S index fecd65657a6fc0c3c1e4e18d281f0d2fb82c20f8..f29d2ba2c0a6ce02b80bba6032ddf9e1f897d484 100644 --- a/arch/riscv/lib/uaccess.S +++ b/arch/riscv/lib/uaccess.S @@ -1,4 +1,5 @@ #include +#include #include #include @@ -66,6 +67,8 @@ ENTRY(__asm_copy_from_user) j 3b ENDPROC(__asm_copy_to_user) ENDPROC(__asm_copy_from_user) +EXPORT_SYMBOL(__asm_copy_to_user) +EXPORT_SYMBOL(__asm_copy_from_user) ENTRY(__clear_user) @@ -108,6 +111,7 @@ ENTRY(__clear_user) bltu a0, a3, 5b j 3b ENDPROC(__clear_user) +EXPORT_SYMBOL(__clear_user) .section .fixup,"ax" .balign 4 diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile index 3c8b332584579d97ddb49fec97d3dd159cb0adab..a1bd95c8047a1159cd8f4f497300497e496b0d6d 100644 --- a/arch/riscv/mm/Makefile +++ b/arch/riscv/mm/Makefile @@ -10,7 +10,6 @@ obj-y += extable.o obj-$(CONFIG_MMU) += fault.o obj-y += cacheflush.o obj-y += context.o -obj-y += sifive_l2_cache.o ifeq ($(CONFIG_MMU),y) obj-$(CONFIG_SMP) += tlbflush.o diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c index 8f190068664059d3ed2bfc5e40b27e93c1d7bb88..8930ab7278e6d51a7e14e05a765ca21a6e6e2798 100644 --- a/arch/riscv/mm/cacheflush.c +++ b/arch/riscv/mm/cacheflush.c @@ -22,6 +22,7 @@ void flush_icache_all(void) else on_each_cpu(ipi_remote_fence_i, NULL, 1); } +EXPORT_SYMBOL(flush_icache_all); /* * Performs an icache flush for the given MM context. RISC-V has no direct diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 69f6678db7f370027e00f679f2a8262362fdc185..965a8cf4829ca33bfde4754bf47db0b8b17c6611 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -99,13 +99,13 @@ static void __init setup_initrd(void) pr_info("initrd not found or empty"); goto disable; } - if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) { + if (__pa_symbol(initrd_end) > PFN_PHYS(max_low_pfn)) { pr_err("initrd extends beyond end of memory"); goto disable; } size = initrd_end - initrd_start; - memblock_reserve(__pa(initrd_start), size); + memblock_reserve(__pa_symbol(initrd_start), size); initrd_below_start_ok = 1; pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n", @@ -124,8 +124,8 @@ void __init setup_bootmem(void) { struct memblock_region *reg; phys_addr_t mem_size = 0; - phys_addr_t vmlinux_end = __pa(&_end); - phys_addr_t vmlinux_start = __pa(&_start); + phys_addr_t vmlinux_end = __pa_symbol(&_end); + phys_addr_t vmlinux_start = __pa_symbol(&_start); /* Find the memory region containing the kernel */ for_each_memblock(memory, reg) { @@ -445,7 +445,7 @@ static void __init setup_vm_final(void) /* Setup swapper PGD for fixmap */ create_pgd_mapping(swapper_pg_dir, FIXADDR_START, - __pa(fixmap_pgd_next), + __pa_symbol(fixmap_pgd_next), PGDIR_SIZE, PAGE_TABLE); /* Map all memory banks */ @@ -474,7 +474,7 @@ static void __init setup_vm_final(void) clear_fixmap(FIX_PMD); /* Move to swapper page table */ - csr_write(CSR_SATP, PFN_DOWN(__pa(swapper_pg_dir)) | SATP_MODE); + csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE); local_flush_tlb_all(); } #else diff --git a/arch/riscv/net/bpf_jit_comp.c b/arch/riscv/net/bpf_jit_comp.c index 5451ef3845f2954583cd55fac13275e49eb9b31a..7fbf56aab6610dded8a262f8f0ef74ece105e41a 100644 --- a/arch/riscv/net/bpf_jit_comp.c +++ b/arch/riscv/net/bpf_jit_comp.c @@ -631,14 +631,14 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx) return -1; emit(rv_bgeu(RV_REG_A2, RV_REG_T1, off >> 1), ctx); - /* if (--TCC < 0) + /* if (TCC-- < 0) * goto out; */ emit(rv_addi(RV_REG_T1, tcc, -1), ctx); off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2; if (is_13b_check(off, insn)) return -1; - emit(rv_blt(RV_REG_T1, RV_REG_ZERO, off >> 1), ctx); + emit(rv_blt(tcc, RV_REG_ZERO, off >> 1), ctx); /* prog = array->ptrs[index]; * if (!prog) diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index 6da8885251d65d9499fbf5b3cc4491f9ee1170cd..670f14a228e55bb42c4cc516b660f91431a4cb0a 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h @@ -194,9 +194,9 @@ static inline unsigned long long get_tod_clock_monotonic(void) { unsigned long long tod; - preempt_disable(); + preempt_disable_notrace(); tod = get_tod_clock() - *(unsigned long long *) &tod_clock_base[1]; - preempt_enable(); + preempt_enable_notrace(); return tod; } diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index 9e1660a6b9db6e49be93e4b145bb639dee52d773..c3597d2e2ae0e2ff57f2d1d3b80f89152968da17 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S @@ -35,6 +35,7 @@ EXPORT_SYMBOL(_mcount) ENTRY(ftrace_caller) .globl ftrace_regs_caller .set ftrace_regs_caller,ftrace_caller + stg %r14,(__SF_GPRS+8*8)(%r15) # save traced function caller lgr %r1,%r15 #if !(defined(CC_USING_HOTPATCH) || defined(CC_USING_NOP_MCOUNT)) aghi %r0,MCOUNT_RETURN_FIXUP diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 9cbf490fd162ec6b8df30a8cc0a9292cdbe90acb..d5fbd754f41a6fab29ee0ccfe9228859d2504de0 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -1052,7 +1052,7 @@ static void __init log_component_list(void) if (!early_ipl_comp_list_addr) return; - if (ipl_block.hdr.flags & IPL_PL_FLAG_IPLSR) + if (ipl_block.hdr.flags & IPL_PL_FLAG_SIPL) pr_info("Linux is running with Secure-IPL enabled\n"); else pr_info("Linux is running with Secure-IPL disabled\n"); diff --git a/arch/s390/kernel/unwind_bc.c b/arch/s390/kernel/unwind_bc.c index da2d4d4c5b0e0252186df8d985cf3dafa7fb6994..707fd99f6734d86ca1f037887f6be085b7366a3c 100644 --- a/arch/s390/kernel/unwind_bc.c +++ b/arch/s390/kernel/unwind_bc.c @@ -36,10 +36,17 @@ static bool update_stack_info(struct unwind_state *state, unsigned long sp) return true; } -static inline bool is_task_pt_regs(struct unwind_state *state, - struct pt_regs *regs) +static inline bool is_final_pt_regs(struct unwind_state *state, + struct pt_regs *regs) { - return task_pt_regs(state->task) == regs; + /* user mode or kernel thread pt_regs at the bottom of task stack */ + if (task_pt_regs(state->task) == regs) + return true; + + /* user mode pt_regs at the bottom of irq stack */ + return state->stack_info.type == STACK_TYPE_IRQ && + state->stack_info.end - sizeof(struct pt_regs) == (unsigned long)regs && + READ_ONCE_NOCHECK(regs->psw.mask) & PSW_MASK_PSTATE; } bool unwind_next_frame(struct unwind_state *state) @@ -80,7 +87,7 @@ bool unwind_next_frame(struct unwind_state *state) if (!on_stack(info, sp, sizeof(struct pt_regs))) goto out_err; regs = (struct pt_regs *) sp; - if (is_task_pt_regs(state, regs)) + if (is_final_pt_regs(state, regs)) goto out_stop; ip = READ_ONCE_NOCHECK(regs->psw.addr); sp = READ_ONCE_NOCHECK(regs->gprs[15]); diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index f0ce2222056592fdd0941b43918f610c2ee958cb..ac44bd76db4be13443b3da63e4ef0377516d2ddb 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -292,10 +292,8 @@ void arch_remove_memory(int nid, u64 start, u64 size, { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - struct zone *zone; - zone = page_zone(pfn_to_page(start_pfn)); - __remove_pages(zone, start_pfn, nr_pages, altmap); + __remove_pages(start_pfn, nr_pages, altmap); vmem_remove_mapping(start, size); } #endif /* CONFIG_MEMORY_HOTPLUG */ diff --git a/arch/s390/purgatory/.gitignore b/arch/s390/purgatory/.gitignore index 04a03433c72013b0bbf5a93bb72b477d0cd2d709..c82157f46b18693cf6ed6272934ed6cb5a205490 100644 --- a/arch/s390/purgatory/.gitignore +++ b/arch/s390/purgatory/.gitignore @@ -1,3 +1,4 @@ purgatory +purgatory.chk purgatory.lds purgatory.ro diff --git a/arch/s390/purgatory/Makefile b/arch/s390/purgatory/Makefile index bc0d7a0d039453d28464432c2d74994b827b4c1d..c57f8c40e992685812170f3db796256b7e620934 100644 --- a/arch/s390/purgatory/Makefile +++ b/arch/s390/purgatory/Makefile @@ -4,7 +4,7 @@ OBJECT_FILES_NON_STANDARD := y purgatory-y := head.o purgatory.o string.o sha256.o mem.o -targets += $(purgatory-y) purgatory.lds purgatory purgatory.ro +targets += $(purgatory-y) purgatory.lds purgatory purgatory.chk purgatory.ro PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y)) $(obj)/sha256.o: $(srctree)/lib/crypto/sha256.c FORCE @@ -15,8 +15,10 @@ CFLAGS_sha256.o := -D__DISABLE_EXPORTS $(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S FORCE $(call if_changed_rule,as_o_S) -$(obj)/string.o: $(srctree)/arch/s390/lib/string.c FORCE - $(call if_changed_rule,cc_o_c) +KCOV_INSTRUMENT := n +GCOV_PROFILE := n +UBSAN_SANITIZE := n +KASAN_SANITIZE := n KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare @@ -26,15 +28,22 @@ KBUILD_CFLAGS += $(CLANG_FLAGS) KBUILD_CFLAGS += $(call cc-option,-fno-PIE) KBUILD_AFLAGS := $(filter-out -DCC_USING_EXPOLINE,$(KBUILD_AFLAGS)) -LDFLAGS_purgatory := -r --no-undefined -nostdlib -z nodefaultlib -T +# Since we link purgatory with -r unresolved symbols are not checked, so we +# also link a purgatory.chk binary without -r to check for unresolved symbols. +PURGATORY_LDFLAGS := -nostdlib -z nodefaultlib +LDFLAGS_purgatory := -r $(PURGATORY_LDFLAGS) -T +LDFLAGS_purgatory.chk := -e purgatory_start $(PURGATORY_LDFLAGS) $(obj)/purgatory: $(obj)/purgatory.lds $(PURGATORY_OBJS) FORCE $(call if_changed,ld) +$(obj)/purgatory.chk: $(obj)/purgatory FORCE + $(call if_changed,ld) + OBJCOPYFLAGS_purgatory.ro := -O elf64-s390 OBJCOPYFLAGS_purgatory.ro += --remove-section='*debug*' OBJCOPYFLAGS_purgatory.ro += --remove-section='.comment' OBJCOPYFLAGS_purgatory.ro += --remove-section='.note.*' -$(obj)/purgatory.ro: $(obj)/purgatory FORCE +$(obj)/purgatory.ro: $(obj)/purgatory $(obj)/purgatory.chk FORCE $(call if_changed,objcopy) $(obj)/kexec-purgatory.o: $(obj)/kexec-purgatory.S $(obj)/purgatory.ro FORCE diff --git a/arch/s390/purgatory/string.c b/arch/s390/purgatory/string.c new file mode 100644 index 0000000000000000000000000000000000000000..c98c22a72db717834085c4ef00f66475144c780e --- /dev/null +++ b/arch/s390/purgatory/string.c @@ -0,0 +1,3 @@ +// SPDX-License-Identifier: GPL-2.0 +#define __HAVE_ARCH_MEMCMP /* arch function */ +#include "../lib/string.c" diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index dfdbaa50946ebbdfb2c1853b738eaf72ae0d734e..d1b1ff2be17aa728a84ba8e8701313664e9a76df 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -434,9 +434,7 @@ void arch_remove_memory(int nid, u64 start, u64 size, { unsigned long start_pfn = PFN_DOWN(start); unsigned long nr_pages = size >> PAGE_SHIFT; - struct zone *zone; - zone = page_zone(pfn_to_page(start_pfn)); - __remove_pages(zone, start_pfn, nr_pages, altmap); + __remove_pages(start_pfn, nr_pages, altmap); } #endif /* CONFIG_MEMORY_HOTPLUG */ diff --git a/arch/um/Kconfig b/arch/um/Kconfig index 2a6d04fcb3e91c7e191cec43b7d1e2d6d832819b..6f0edd0c0220f40dac6fd3c89309458e9f0981e6 100644 --- a/arch/um/Kconfig +++ b/arch/um/Kconfig @@ -14,6 +14,7 @@ config UML select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_BUGVERBOSE + select HAVE_COPY_THREAD_TLS select GENERIC_IRQ_SHOW select GENERIC_CPU_DEVICES select GENERIC_CLOCKEVENTS diff --git a/arch/um/include/asm/ptrace-generic.h b/arch/um/include/asm/ptrace-generic.h index 81c647ef9c6c8df98ae917178318a61647d9391e..adf91ef553ae91f215dcf1c457fcf258e8065423 100644 --- a/arch/um/include/asm/ptrace-generic.h +++ b/arch/um/include/asm/ptrace-generic.h @@ -36,7 +36,7 @@ extern long subarch_ptrace(struct task_struct *child, long request, extern unsigned long getreg(struct task_struct *child, int regno); extern int putreg(struct task_struct *child, int regno, unsigned long value); -extern int arch_copy_tls(struct task_struct *new); +extern int arch_set_tls(struct task_struct *new, unsigned long tls); extern void clear_flushed_tls(struct task_struct *task); extern int syscall_trace_enter(struct pt_regs *regs); extern void syscall_trace_leave(struct pt_regs *regs); diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index 263a8f06913341d1b0a9d013d89f12971e928391..17045e7211bfd461d9a055fcee9b0cb1f3ac6fed 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c @@ -153,8 +153,8 @@ void fork_handler(void) userspace(¤t->thread.regs.regs, current_thread_info()->aux_fp_regs); } -int copy_thread(unsigned long clone_flags, unsigned long sp, - unsigned long arg, struct task_struct * p) +int copy_thread_tls(unsigned long clone_flags, unsigned long sp, + unsigned long arg, struct task_struct * p, unsigned long tls) { void (*handler)(void); int kthread = current->flags & PF_KTHREAD; @@ -188,7 +188,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, * Set a new TLS for the child thread? */ if (clone_flags & CLONE_SETTLS) - ret = arch_copy_tls(p); + ret = arch_set_tls(p, tls); } return ret; diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index aa976adb7094ee37158a901a3372b079df13bcb8..1dac210f7d446b906b3c75969331a46a84db2249 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -103,7 +103,7 @@ vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o quiet_cmd_check_data_rel = DATAREL $@ define cmd_check_data_rel for obj in $(filter %.o,$^); do \ - ${CROSS_COMPILE}readelf -S $$obj | grep -qF .rel.local && { \ + $(READELF) -S $$obj | grep -qF .rel.local && { \ echo "error: $$obj has data relocations!" >&2; \ exit 1; \ } || true; \ diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 58a512e33d8d628c60e33cfe21322d7823dbdc3e..ee60b81944a778b3d258d382361d87db8923561f 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -244,6 +244,11 @@ SYM_FUNC_START(efi32_stub_entry) leal efi32_config(%ebp), %eax movl %eax, efi_config(%ebp) + /* Disable paging */ + movl %cr0, %eax + btrl $X86_CR0_PG_BIT, %eax + movl %eax, %cr0 + jmp startup_32 SYM_FUNC_END(efi32_stub_entry) #endif diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 9a89d98c55bd8e87a245024bb0fa635ab0f56dd3..f118af9f0718bec9b0456d7e47d2c78b6489cd02 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -376,7 +376,7 @@ int x86_add_exclusive(unsigned int what) * LBR and BTS are still mutually exclusive. */ if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt) - return 0; + goto out; if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) { mutex_lock(&pmc_reserve_mutex); @@ -388,6 +388,7 @@ int x86_add_exclusive(unsigned int what) mutex_unlock(&pmc_reserve_mutex); } +out: atomic_inc(&active_events); return 0; @@ -398,11 +399,15 @@ fail_unlock: void x86_del_exclusive(unsigned int what) { + atomic_dec(&active_events); + + /* + * See the comment in x86_add_exclusive(). + */ if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt) return; atomic_dec(&x86_pmu.lbr_exclusive[what]); - atomic_dec(&active_events); } int x86_setup_perfctr(struct perf_event *event) @@ -1642,9 +1647,12 @@ static struct attribute_group x86_pmu_format_group __ro_after_init = { ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, char *page) { - struct perf_pmu_events_attr *pmu_attr = \ + struct perf_pmu_events_attr *pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); - u64 config = x86_pmu.event_map(pmu_attr->id); + u64 config = 0; + + if (pmu_attr->id < x86_pmu.max_events) + config = x86_pmu.event_map(pmu_attr->id); /* string trumps id */ if (pmu_attr->event_str) @@ -1713,6 +1721,9 @@ is_visible(struct kobject *kobj, struct attribute *attr, int idx) { struct perf_pmu_events_attr *pmu_attr; + if (idx >= x86_pmu.max_events) + return 0; + pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr); /* str trumps id */ return pmu_attr->event_str || x86_pmu.event_map(idx) ? attr->mode : 0; diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c index 38de4a7f6752b23a58ed8e68934017def83969ed..6a3b599ee0fe7df0fe8010218100e8f9db08acf5 100644 --- a/arch/x86/events/intel/bts.c +++ b/arch/x86/events/intel/bts.c @@ -63,9 +63,17 @@ struct bts_buffer { static struct pmu bts_pmu; +static int buf_nr_pages(struct page *page) +{ + if (!PagePrivate(page)) + return 1; + + return 1 << page_private(page); +} + static size_t buf_size(struct page *page) { - return 1 << (PAGE_SHIFT + page_private(page)); + return buf_nr_pages(page) * PAGE_SIZE; } static void * @@ -83,9 +91,7 @@ bts_buffer_setup_aux(struct perf_event *event, void **pages, /* count all the high order buffers */ for (pg = 0, nbuf = 0; pg < nr_pages;) { page = virt_to_page(pages[pg]); - if (WARN_ON_ONCE(!PagePrivate(page) && nr_pages > 1)) - return NULL; - pg += 1 << page_private(page); + pg += buf_nr_pages(page); nbuf++; } @@ -109,7 +115,7 @@ bts_buffer_setup_aux(struct perf_event *event, void **pages, unsigned int __nr_pages; page = virt_to_page(pages[pg]); - __nr_pages = PagePrivate(page) ? 1 << page_private(page) : 1; + __nr_pages = buf_nr_pages(page); buf->buf[nbuf].page = page; buf->buf[nbuf].offset = offset; buf->buf[nbuf].displacement = (pad ? BTS_RECORD_SIZE - pad : 0); diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c index dbaa1b088a30e6106b680a666540016180c694ad..c37cb12d0ef68db47b1054113ce53f7e70a6486d 100644 --- a/arch/x86/events/intel/uncore_snb.c +++ b/arch/x86/events/intel/uncore_snb.c @@ -15,6 +15,7 @@ #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910 #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f +#define PCI_DEVICE_ID_INTEL_SKL_E3_IMC 0x1918 #define PCI_DEVICE_ID_INTEL_KBL_Y_IMC 0x590c #define PCI_DEVICE_ID_INTEL_KBL_U_IMC 0x5904 #define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC 0x5914 @@ -657,6 +658,10 @@ static const struct pci_device_id skl_uncore_pci_ids[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC), .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_E3_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, { /* IMC */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_Y_IMC), .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), @@ -826,6 +831,7 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = { IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */ IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */ IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */ + IMC_DEV(SKL_E3_IMC, &skl_uncore_pci_driver), /* Xeon E3 V5 Gen Core processor */ IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver), /* 7th Gen Core Y */ IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U */ IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U Quad Core */ diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index b10a5ec79e48aac1af8d9edb1396e22449dd8d14..ad20220af303ad8a28acb865123c4a41267c87e9 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -369,11 +369,6 @@ #define SNR_M2M_PCI_PMON_BOX_CTL 0x438 #define SNR_M2M_PCI_PMON_UMASK_EXT 0xff -/* SNR PCIE3 */ -#define SNR_PCIE3_PCI_PMON_CTL0 0x508 -#define SNR_PCIE3_PCI_PMON_CTR0 0x4e8 -#define SNR_PCIE3_PCI_PMON_BOX_CTL 0x4e4 - /* SNR IMC */ #define SNR_IMC_MMIO_PMON_FIXED_CTL 0x54 #define SNR_IMC_MMIO_PMON_FIXED_CTR 0x38 @@ -4328,27 +4323,12 @@ static struct intel_uncore_type snr_uncore_m2m = { .format_group = &snr_m2m_uncore_format_group, }; -static struct intel_uncore_type snr_uncore_pcie3 = { - .name = "pcie3", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .perf_ctr = SNR_PCIE3_PCI_PMON_CTR0, - .event_ctl = SNR_PCIE3_PCI_PMON_CTL0, - .event_mask = SNBEP_PMON_RAW_EVENT_MASK, - .box_ctl = SNR_PCIE3_PCI_PMON_BOX_CTL, - .ops = &ivbep_uncore_pci_ops, - .format_group = &ivbep_uncore_format_group, -}; - enum { SNR_PCI_UNCORE_M2M, - SNR_PCI_UNCORE_PCIE3, }; static struct intel_uncore_type *snr_pci_uncores[] = { [SNR_PCI_UNCORE_M2M] = &snr_uncore_m2m, - [SNR_PCI_UNCORE_PCIE3] = &snr_uncore_pcie3, NULL, }; @@ -4357,10 +4337,6 @@ static const struct pci_device_id snr_uncore_pci_ids[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a), .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0), }, - { /* PCIe3 */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a), - .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0), - }, { /* end: all zeroes */ } }; @@ -4536,6 +4512,7 @@ static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = { INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"), INTEL_UNCORE_EVENT_DESC(write.scale, "3.814697266e-6"), INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"), + { /* end: all zeroes */ }, }; static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = { diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 90f75e5158769c8690f745184c1271ddc6dffd20..62c30279be777b3df46efb4908c2d35ed2e2fefc 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -615,9 +615,9 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c) return; clear_all: - clear_cpu_cap(c, X86_FEATURE_SME); + setup_clear_cpu_cap(X86_FEATURE_SME); clear_sev: - clear_cpu_cap(c, X86_FEATURE_SEV); + setup_clear_cpu_cap(X86_FEATURE_SEV); } } diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c index 5167bd2bb6b14dae63313d1def4d93ed1eaac38d..d6cf5c18a7e04a3b09a07179ff1fce8a4790ec8f 100644 --- a/arch/x86/kernel/cpu/mce/amd.c +++ b/arch/x86/kernel/cpu/mce/amd.c @@ -266,10 +266,10 @@ static void smca_configure(unsigned int bank, unsigned int cpu) smca_set_misc_banks_map(bank, cpu); /* Return early if this bank was already initialized. */ - if (smca_banks[bank].hwid) + if (smca_banks[bank].hwid && smca_banks[bank].hwid->hwid_mcatype != 0) return; - if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) { + if (rdmsr_safe(MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) { pr_warn("Failed to read MCA_IPID for bank %d\n", bank); return; } diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index 5f42f25bac8f8c9643def8bae0b93d513c9fbb8b..2e2a421c8528b11d71c57aa98089f1513e3f8964 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -819,8 +819,8 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, if (quirk_no_way_out) quirk_no_way_out(i, m, regs); + m->bank = i; if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) { - m->bank = i; mce_read_aux(m, i); *msg = tmp; return 1; diff --git a/arch/x86/kernel/cpu/mce/therm_throt.c b/arch/x86/kernel/cpu/mce/therm_throt.c index b38010b541d6edff1f6b6cf885b6d76ba89847d5..6c3e1c92f1835ac30415649981ba7f411472c17d 100644 --- a/arch/x86/kernel/cpu/mce/therm_throt.c +++ b/arch/x86/kernel/cpu/mce/therm_throt.c @@ -467,6 +467,7 @@ static int thermal_throttle_online(unsigned int cpu) { struct thermal_state *state = &per_cpu(thermal_state, cpu); struct device *dev = get_cpu_device(cpu); + u32 l; state->package_throttle.level = PACKAGE_LEVEL; state->core_throttle.level = CORE_LEVEL; @@ -474,6 +475,10 @@ static int thermal_throttle_online(unsigned int cpu) INIT_DELAYED_WORK(&state->package_throttle.therm_work, throttle_active_work); INIT_DELAYED_WORK(&state->core_throttle.therm_work, throttle_active_work); + /* Unmask the thermal vector after the above workqueues are initialized. */ + l = apic_read(APIC_LVTTHMR); + apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); + return thermal_throttle_add_dev(dev, cpu); } @@ -722,10 +727,6 @@ void intel_init_thermal(struct cpuinfo_x86 *c) rdmsr(MSR_IA32_MISC_ENABLE, l, h); wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h); - /* Unmask the thermal vector: */ - l = apic_read(APIC_LVTTHMR); - apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); - pr_info_once("CPU0: Thermal monitoring enabled (%s)\n", tm2 ? "TM2" : "TM1"); diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 03eb90d00af0fb693a2298f0e956d05f7116ce04..89049b343c7a8f27b0af635253c911d15f393f2b 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -618,7 +618,7 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r) if (static_branch_unlikely(&rdt_mon_enable_key)) rmdir_mondata_subdir_allrdtgrp(r, d->id); list_del(&d->list); - if (is_mbm_enabled()) + if (r->mon_capable && is_mbm_enabled()) cancel_delayed_work(&d->mbm_over); if (is_llc_occupancy_enabled() && has_busy_rmid(r, d)) { /* diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 2e3b06d6bbc6df9b311bd6e9b942ea0ea4b1fc67..dac7209a0708438ebc2293d9c996b1595ea482a2 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -1741,9 +1741,6 @@ static int set_cache_qos_cfg(int level, bool enable) struct rdt_domain *d; int cpu; - if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) - return -ENOMEM; - if (level == RDT_RESOURCE_L3) update = l3_qos_cfg_update; else if (level == RDT_RESOURCE_L2) @@ -1751,6 +1748,9 @@ static int set_cache_qos_cfg(int level, bool enable) else return -EINVAL; + if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) + return -ENOMEM; + r_l = &rdt_resources_all[level]; list_for_each_entry(d, &r_l->domains, list) { /* Pick one CPU from each domain instance to update MSR */ diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 4cba91ec80492ab42dda142a6555dd6dcf3dfbc0..2f9ec14be3b11adfb7526b0513196e333078ab7f 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -710,8 +710,12 @@ static struct chipset early_qrk[] __initdata = { */ { PCI_VENDOR_ID_INTEL, 0x0f00, PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet}, + { PCI_VENDOR_ID_INTEL, 0x3e20, + PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet}, { PCI_VENDOR_ID_INTEL, 0x3ec4, PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet}, + { PCI_VENDOR_ID_INTEL, 0x8a12, + PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet}, { PCI_VENDOR_ID_BROADCOM, 0x4331, PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset}, {} diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index cfafa320a8cf75e8437153fc36310a1f5bc3d4b3..cf55629ff0ff642ea85fb5783f2fd4df678b9d7c 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -402,7 +402,8 @@ static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry, int index) entry->edx |= F(SPEC_CTRL); if (boot_cpu_has(X86_FEATURE_STIBP)) entry->edx |= F(INTEL_STIBP); - if (boot_cpu_has(X86_FEATURE_SSBD)) + if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || + boot_cpu_has(X86_FEATURE_AMD_SSBD)) entry->edx |= F(SPEC_CTRL_SSBD); /* * We emulate ARCH_CAPABILITIES in software even @@ -759,7 +760,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function, entry->ebx |= F(AMD_IBRS); if (boot_cpu_has(X86_FEATURE_STIBP)) entry->ebx |= F(AMD_STIBP); - if (boot_cpu_has(X86_FEATURE_SSBD)) + if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || + boot_cpu_has(X86_FEATURE_AMD_SSBD)) entry->ebx |= F(AMD_SSBD); if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) entry->ebx |= F(AMD_SSB_NO); diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 930edeb41ec33417755d38293f85ad1717cdee90..0a74407ef92ea939aaa8425613ae8bcda2419b70 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -865,10 +865,8 @@ void arch_remove_memory(int nid, u64 start, u64 size, { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - struct zone *zone; - zone = page_zone(pfn_to_page(start_pfn)); - __remove_pages(zone, start_pfn, nr_pages, altmap); + __remove_pages(start_pfn, nr_pages, altmap); } #endif diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index dcb9bc961b39c3e2928eb4d15f681c8dad29591e..bcfede46fe02b54d0b73d74de7112e351a2b4272 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1212,10 +1212,8 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size, { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap); - struct zone *zone = page_zone(page); - __remove_pages(zone, start_pfn, nr_pages, altmap); + __remove_pages(start_pfn, nr_pages, altmap); kernel_physical_mapping_remove(start, start + size); } #endif /* CONFIG_MEMORY_HOTPLUG */ diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c index 7675cf754d9090f067be7c2d1eb24a74d81d7ea3..f8f0220b6a665a1c26b9a2e15f12cb6c4fb37f2f 100644 --- a/arch/x86/platform/efi/quirks.c +++ b/arch/x86/platform/efi/quirks.c @@ -260,10 +260,6 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size) return; } - /* No need to reserve regions that will never be freed. */ - if (md.attribute & EFI_MEMORY_RUNTIME) - return; - size += addr % EFI_PAGE_SIZE; size = round_up(size, EFI_PAGE_SIZE); addr = round_down(addr, EFI_PAGE_SIZE); @@ -293,6 +289,8 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size) early_memunmap(new, new_size); efi_memmap_install(new_phys, num_entries); + e820__range_update(addr, size, E820_TYPE_RAM, E820_TYPE_RESERVED); + e820__update_table(e820_table); } /* diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c index 5bd949da7a4a5deb659f19e849dcd6036a2843ac..ac8eee093f9cd0fa112fe89e945202eb60203a84 100644 --- a/arch/x86/um/tls_32.c +++ b/arch/x86/um/tls_32.c @@ -215,14 +215,12 @@ static int set_tls_entry(struct task_struct* task, struct user_desc *info, return 0; } -int arch_copy_tls(struct task_struct *new) +int arch_set_tls(struct task_struct *new, unsigned long tls) { struct user_desc info; int idx, ret = -EFAULT; - if (copy_from_user(&info, - (void __user *) UPT_SI(&new->thread.regs.regs), - sizeof(info))) + if (copy_from_user(&info, (void __user *) tls, sizeof(info))) goto out; ret = -EINVAL; diff --git a/arch/x86/um/tls_64.c b/arch/x86/um/tls_64.c index 3a621e0d39253aa30a7d5eadb523cd4a2e1315cb..ebd3855d9b132379b7065222316a948e06b2b72e 100644 --- a/arch/x86/um/tls_64.c +++ b/arch/x86/um/tls_64.c @@ -6,14 +6,13 @@ void clear_flushed_tls(struct task_struct *task) { } -int arch_copy_tls(struct task_struct *t) +int arch_set_tls(struct task_struct *t, unsigned long tls) { /* * If CLONE_SETTLS is set, we need to save the thread id - * (which is argument 5, child_tid, of clone) so it can be set - * during context switches. + * so it can be set during context switches. */ - t->thread.arch.fs = t->thread.regs.regs.gp[R8 / sizeof(long)]; + t->thread.arch.fs = tls; return 0; } diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 4a3fa295d8fed4a9427ea8c1eecf9832040d766a..296c5324dacef5cbdf3c003b151c8e34d529ad54 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -24,6 +24,7 @@ config XTENSA select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL select HAVE_ARCH_TRACEHOOK + select HAVE_COPY_THREAD_TLS select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_CONTIGUOUS select HAVE_EXIT_THREAD diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index 9e1c49134c07eca6a172e4b0f43e5ffeeef61f50..3edecc41ef8c36ffefb6a6b6ac1be62c1d9a296d 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -202,8 +202,9 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) * involved. Much simpler to just not copy those live frames across. */ -int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn, - unsigned long thread_fn_arg, struct task_struct *p) +int copy_thread_tls(unsigned long clone_flags, unsigned long usp_thread_fn, + unsigned long thread_fn_arg, struct task_struct *p, + unsigned long tls) { struct pt_regs *childregs = task_pt_regs(p); @@ -266,9 +267,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn, childregs->syscall = regs->syscall; - /* The thread pointer is passed in the '4th argument' (= a5) */ if (clone_flags & CLONE_SETTLS) - childregs->threadptr = childregs->areg[5]; + childregs->threadptr = tls; } else { p->thread.ra = MAKE_RA_FOR_CALL( (unsigned long)ret_from_kernel_thread, 1); diff --git a/block/bio.c b/block/bio.c index a5d75f6bf4c7eedc96454bade72dbc9fe88af74d..94d697217887aa1b9e8b214c7606a6f1e7bf8b42 100644 --- a/block/bio.c +++ b/block/bio.c @@ -538,6 +538,55 @@ void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start) } EXPORT_SYMBOL(zero_fill_bio_iter); +/** + * bio_truncate - truncate the bio to small size of @new_size + * @bio: the bio to be truncated + * @new_size: new size for truncating the bio + * + * Description: + * Truncate the bio to new size of @new_size. If bio_op(bio) is + * REQ_OP_READ, zero the truncated part. This function should only + * be used for handling corner cases, such as bio eod. + */ +void bio_truncate(struct bio *bio, unsigned new_size) +{ + struct bio_vec bv; + struct bvec_iter iter; + unsigned int done = 0; + bool truncated = false; + + if (new_size >= bio->bi_iter.bi_size) + return; + + if (bio_op(bio) != REQ_OP_READ) + goto exit; + + bio_for_each_segment(bv, bio, iter) { + if (done + bv.bv_len > new_size) { + unsigned offset; + + if (!truncated) + offset = new_size - done; + else + offset = 0; + zero_user(bv.bv_page, offset, bv.bv_len - offset); + truncated = true; + } + done += bv.bv_len; + } + + exit: + /* + * Don't touch bvec table here and make it really immutable, since + * fs bio user has to retrieve all pages via bio_for_each_segment_all + * in its .end_bio() callback. + * + * It is enough to truncate bio by updating .bi_size since we can make + * correct bvec with the updated .bi_size for drivers. + */ + bio->bi_iter.bi_size = new_size; +} + /** * bio_put - release a reference to a bio * @bio: bio to release reference to diff --git a/block/blk-core.c b/block/blk-core.c index e0a094fddee5f3e16b660c8cab189987663cb7f7..089e890ab208fd01efbda68aea4fdb47aaab1353 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -885,11 +885,14 @@ generic_make_request_checks(struct bio *bio) } /* - * For a REQ_NOWAIT based request, return -EOPNOTSUPP - * if queue is not a request based queue. + * Non-mq queues do not honor REQ_NOWAIT, so complete a bio + * with BLK_STS_AGAIN status in order to catch -EAGAIN and + * to give a chance to the caller to repeat request gracefully. */ - if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q)) - goto not_supported; + if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q)) { + status = BLK_STS_AGAIN; + goto end_io; + } if (should_fail_bio(bio)) goto end_io; diff --git a/block/blk-flush.c b/block/blk-flush.c index 1777346baf06f23d6c4411b77bcd12e573eea3b6..3f977c517960e61dca951551d437b52abcacd588 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -69,6 +69,7 @@ #include #include #include +#include #include "blk.h" #include "blk-mq.h" @@ -505,6 +506,9 @@ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q, INIT_LIST_HEAD(&fq->flush_queue[1]); INIT_LIST_HEAD(&fq->flush_data_in_flight); + lockdep_register_key(&fq->key); + lockdep_set_class(&fq->mq_flush_lock, &fq->key); + return fq; fail_rq: @@ -519,6 +523,7 @@ void blk_free_flush_queue(struct blk_flush_queue *fq) if (!fq) return; + lockdep_unregister_key(&fq->key); kfree(fq->flush_rq); kfree(fq); } diff --git a/block/blk-iocost.c b/block/blk-iocost.c index e01267f9918390f38424337140778c2c983bb7a4..27ca68621137ad5418e647c859b39b12a588f9c2 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -1212,7 +1212,7 @@ static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer) return HRTIMER_NORESTART; } -static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost) +static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost) { struct ioc *ioc = iocg->ioc; struct blkcg_gq *blkg = iocg_to_blkg(iocg); @@ -1229,11 +1229,11 @@ static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost) /* clear or maintain depending on the overage */ if (time_before_eq64(vtime, now->vnow)) { blkcg_clear_delay(blkg); - return; + return false; } if (!atomic_read(&blkg->use_delay) && time_before_eq64(vtime, now->vnow + vmargin)) - return; + return false; /* use delay */ if (cost) { @@ -1250,10 +1250,11 @@ static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost) oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->delay_timer)); if (hrtimer_is_queued(&iocg->delay_timer) && abs(oexpires - expires) <= margin_ns / 4) - return; + return true; hrtimer_start_range_ns(&iocg->delay_timer, ns_to_ktime(expires), margin_ns / 4, HRTIMER_MODE_ABS); + return true; } static enum hrtimer_restart iocg_delay_timer_fn(struct hrtimer *timer) @@ -1739,7 +1740,9 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio) */ if (bio_issue_as_root_blkg(bio) || fatal_signal_pending(current)) { atomic64_add(abs_cost, &iocg->abs_vdebt); - iocg_kick_delay(iocg, &now, cost); + if (iocg_kick_delay(iocg, &now, cost)) + blkcg_schedule_throttle(rqos->q, + (bio->bi_opf & REQ_SWAP) == REQ_SWAP); return; } diff --git a/block/blk-map.c b/block/blk-map.c index 3a62e471d81bd1b70aae0052d4a72101504fbf46..b0790268ed9d9ad804b8b3d396ae16cdc8e229aa 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -151,7 +151,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, return 0; unmap_rq: - __blk_rq_unmap_user(bio); + blk_rq_unmap_user(bio); fail: rq->bio = NULL; return ret; diff --git a/block/blk-merge.c b/block/blk-merge.c index d783bdc4559b4de54b19364579326b9f618ebe15..1534ed736363fd807998525d3ddd828e566004f8 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -157,17 +157,20 @@ static inline unsigned get_max_io_size(struct request_queue *q, return sectors & (lbs - 1); } -static unsigned get_max_segment_size(const struct request_queue *q, - unsigned offset) +static inline unsigned get_max_segment_size(const struct request_queue *q, + struct page *start_page, + unsigned long offset) { unsigned long mask = queue_segment_boundary(q); - /* default segment boundary mask means no boundary limit */ - if (mask == BLK_SEG_BOUNDARY_MASK) - return queue_max_segment_size(q); + offset = mask & (page_to_phys(start_page) + offset); - return min_t(unsigned long, mask - (mask & offset) + 1, - queue_max_segment_size(q)); + /* + * overflow may be triggered in case of zero page physical address + * on 32bit arch, use queue's max segment size when that happens. + */ + return min_not_zero(mask - offset + 1, + (unsigned long)queue_max_segment_size(q)); } /** @@ -201,7 +204,8 @@ static bool bvec_split_segs(const struct request_queue *q, unsigned seg_size = 0; while (len && *nsegs < max_segs) { - seg_size = get_max_segment_size(q, bv->bv_offset + total_len); + seg_size = get_max_segment_size(q, bv->bv_page, + bv->bv_offset + total_len); seg_size = min(seg_size, len); (*nsegs)++; @@ -419,7 +423,8 @@ static unsigned blk_bvec_map_sg(struct request_queue *q, while (nbytes > 0) { unsigned offset = bvec->bv_offset + total; - unsigned len = min(get_max_segment_size(q, offset), nbytes); + unsigned len = min(get_max_segment_size(q, bvec->bv_page, + offset), nbytes); struct page *page = bvec->bv_page; /* diff --git a/block/blk-settings.c b/block/blk-settings.c index 5f6dcc7a47bd92745341feee781cb2f2d3fa58b2..c8eda2e7b91e492453a9a454691865f4b8768a5d 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -328,7 +328,7 @@ EXPORT_SYMBOL(blk_queue_max_segment_size); * storage device can address. The default of 512 covers most * hardware. **/ -void blk_queue_logical_block_size(struct request_queue *q, unsigned short size) +void blk_queue_logical_block_size(struct request_queue *q, unsigned int size) { q->limits.logical_block_size = size; diff --git a/block/blk.h b/block/blk.h index 6842f28c033e79fd3571c3514bdc4893d43a3871..0b8884353f6bf43694b23a83158781c79df0f046 100644 --- a/block/blk.h +++ b/block/blk.h @@ -30,6 +30,7 @@ struct blk_flush_queue { * at the same time */ struct request *orig_rq; + struct lock_class_key key; spinlock_t mq_flush_lock; }; diff --git a/block/bsg-lib.c b/block/bsg-lib.c index 347dda16c2f46a48d87cc234a57a4ced250a4646..6cbb7926534cd7985e22ce280c4b29c66a900826 100644 --- a/block/bsg-lib.c +++ b/block/bsg-lib.c @@ -266,7 +266,7 @@ static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req = bd->rq; struct bsg_set *bset = container_of(q->tag_set, struct bsg_set, tag_set); - int sts = BLK_STS_IOERR; + blk_status_t sts = BLK_STS_IOERR; int ret; blk_mq_start_request(req); diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c index 6ca015f92766e9052d444f9e81ba3c307846166c..3ed7a0f144a994b28aa2bc269cadece8c6a821d8 100644 --- a/block/compat_ioctl.c +++ b/block/compat_ioctl.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -354,6 +355,13 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) * but we call blkdev_ioctl, which gets the lock for us */ case BLKRRPART: + case BLKREPORTZONE: + case BLKRESETZONE: + case BLKOPENZONE: + case BLKCLOSEZONE: + case BLKFINISHZONE: + case BLKGETZONESZ: + case BLKGETNRZONES: return blkdev_ioctl(bdev, mode, cmd, (unsigned long)compat_ptr(arg)); case BLKBSZSET_32: @@ -401,6 +409,14 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) case BLKTRACETEARDOWN: /* compatible */ ret = blk_trace_ioctl(bdev, cmd, compat_ptr(arg)); return ret; + case IOC_PR_REGISTER: + case IOC_PR_RESERVE: + case IOC_PR_RELEASE: + case IOC_PR_PREEMPT: + case IOC_PR_PREEMPT_ABORT: + case IOC_PR_CLEAR: + return blkdev_ioctl(bdev, mode, cmd, + (unsigned long)compat_ptr(arg)); default: if (disk->fops->compat_ioctl) ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg); diff --git a/crypto/asymmetric_keys/asym_tpm.c b/crypto/asymmetric_keys/asym_tpm.c index d16d893bd1959ca7d69eab2b011faa13c8e410c9..378b18b9bc342a5aa9afb4efb8cf180f0a1edd76 100644 --- a/crypto/asymmetric_keys/asym_tpm.c +++ b/crypto/asymmetric_keys/asym_tpm.c @@ -470,6 +470,7 @@ static int tpm_key_encrypt(struct tpm_key *tk, if (ret < 0) goto error_free_tfm; + ret = -ENOMEM; req = akcipher_request_alloc(tfm, GFP_KERNEL); if (!req) goto error_free_tfm; diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index 364b9df9d631ff672a8fca0b74ecda0fb5b4ec11..d7f43d4ea925a0dcaa1fbce77dbef3b81fc4bc62 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c @@ -184,6 +184,7 @@ static int software_key_eds_op(struct kernel_pkey_params *params, if (IS_ERR(tfm)) return PTR_ERR(tfm); + ret = -ENOMEM; req = akcipher_request_alloc(tfm, GFP_KERNEL); if (!req) goto error_free_tfm; diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c index f41744b9b38a6d5b47e11f5b98777a6b195be3e2..66a570d0da83743caf73b26d0a4c9212ef5e4f26 100644 --- a/drivers/ata/ahci_brcm.c +++ b/drivers/ata/ahci_brcm.c @@ -76,8 +76,7 @@ enum brcm_ahci_version { }; enum brcm_ahci_quirks { - BRCM_AHCI_QUIRK_NO_NCQ = BIT(0), - BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE = BIT(1), + BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE = BIT(0), }; struct brcm_ahci_priv { @@ -213,19 +212,12 @@ static void brcm_sata_phys_disable(struct brcm_ahci_priv *priv) brcm_sata_phy_disable(priv, i); } -static u32 brcm_ahci_get_portmask(struct platform_device *pdev, +static u32 brcm_ahci_get_portmask(struct ahci_host_priv *hpriv, struct brcm_ahci_priv *priv) { - void __iomem *ahci; - struct resource *res; u32 impl; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ahci"); - ahci = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(ahci)) - return 0; - - impl = readl(ahci + HOST_PORTS_IMPL); + impl = readl(hpriv->mmio + HOST_PORTS_IMPL); if (fls(impl) > SATA_TOP_MAX_PHYS) dev_warn(priv->dev, "warning: more ports than PHYs (%#x)\n", @@ -233,9 +225,6 @@ static u32 brcm_ahci_get_portmask(struct platform_device *pdev, else if (!impl) dev_info(priv->dev, "no ports found\n"); - devm_iounmap(&pdev->dev, ahci); - devm_release_mem_region(&pdev->dev, res->start, resource_size(res)); - return impl; } @@ -285,6 +274,13 @@ static unsigned int brcm_ahci_read_id(struct ata_device *dev, /* Perform the SATA PHY reset sequence */ brcm_sata_phy_disable(priv, ap->port_no); + /* Reset the SATA clock */ + ahci_platform_disable_clks(hpriv); + msleep(10); + + ahci_platform_enable_clks(hpriv); + msleep(10); + /* Bring the PHY back on */ brcm_sata_phy_enable(priv, ap->port_no); @@ -347,11 +343,10 @@ static int brcm_ahci_suspend(struct device *dev) struct ata_host *host = dev_get_drvdata(dev); struct ahci_host_priv *hpriv = host->private_data; struct brcm_ahci_priv *priv = hpriv->plat_data; - int ret; - ret = ahci_platform_suspend(dev); brcm_sata_phys_disable(priv); - return ret; + + return ahci_platform_suspend(dev); } static int brcm_ahci_resume(struct device *dev) @@ -359,11 +354,44 @@ static int brcm_ahci_resume(struct device *dev) struct ata_host *host = dev_get_drvdata(dev); struct ahci_host_priv *hpriv = host->private_data; struct brcm_ahci_priv *priv = hpriv->plat_data; + int ret; + + /* Make sure clocks are turned on before re-configuration */ + ret = ahci_platform_enable_clks(hpriv); + if (ret) + return ret; brcm_sata_init(priv); brcm_sata_phys_enable(priv); brcm_sata_alpm_init(hpriv); - return ahci_platform_resume(dev); + + /* Since we had to enable clocks earlier on, we cannot use + * ahci_platform_resume() as-is since a second call to + * ahci_platform_enable_resources() would bump up the resources + * (regulators, clocks, PHYs) count artificially so we copy the part + * after ahci_platform_enable_resources(). + */ + ret = ahci_platform_enable_phys(hpriv); + if (ret) + goto out_disable_phys; + + ret = ahci_platform_resume_host(dev); + if (ret) + goto out_disable_platform_phys; + + /* We resumed so update PM runtime state */ + pm_runtime_disable(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + + return 0; + +out_disable_platform_phys: + ahci_platform_disable_phys(hpriv); +out_disable_phys: + brcm_sata_phys_disable(priv); + ahci_platform_disable_clks(hpriv); + return ret; } #endif @@ -410,44 +438,71 @@ static int brcm_ahci_probe(struct platform_device *pdev) if (!IS_ERR_OR_NULL(priv->rcdev)) reset_control_deassert(priv->rcdev); - if ((priv->version == BRCM_SATA_BCM7425) || - (priv->version == BRCM_SATA_NSP)) { - priv->quirks |= BRCM_AHCI_QUIRK_NO_NCQ; + hpriv = ahci_platform_get_resources(pdev, 0); + if (IS_ERR(hpriv)) { + ret = PTR_ERR(hpriv); + goto out_reset; + } + + hpriv->plat_data = priv; + hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP | AHCI_HFLAG_NO_WRITE_TO_RO; + + switch (priv->version) { + case BRCM_SATA_BCM7425: + hpriv->flags |= AHCI_HFLAG_DELAY_ENGINE; + /* fall through */ + case BRCM_SATA_NSP: + hpriv->flags |= AHCI_HFLAG_NO_NCQ; priv->quirks |= BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE; + break; + default: + break; } + ret = ahci_platform_enable_clks(hpriv); + if (ret) + goto out_reset; + + /* Must be first so as to configure endianness including that + * of the standard AHCI register space. + */ brcm_sata_init(priv); - priv->port_mask = brcm_ahci_get_portmask(pdev, priv); - if (!priv->port_mask) - return -ENODEV; + /* Initializes priv->port_mask which is used below */ + priv->port_mask = brcm_ahci_get_portmask(hpriv, priv); + if (!priv->port_mask) { + ret = -ENODEV; + goto out_disable_clks; + } + /* Must be done before ahci_platform_enable_phys() */ brcm_sata_phys_enable(priv); - hpriv = ahci_platform_get_resources(pdev, 0); - if (IS_ERR(hpriv)) - return PTR_ERR(hpriv); - hpriv->plat_data = priv; - hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP; - brcm_sata_alpm_init(hpriv); - ret = ahci_platform_enable_resources(hpriv); + ret = ahci_platform_enable_phys(hpriv); if (ret) - return ret; - - if (priv->quirks & BRCM_AHCI_QUIRK_NO_NCQ) - hpriv->flags |= AHCI_HFLAG_NO_NCQ; - hpriv->flags |= AHCI_HFLAG_NO_WRITE_TO_RO; + goto out_disable_phys; ret = ahci_platform_init_host(pdev, hpriv, &ahci_brcm_port_info, &ahci_platform_sht); if (ret) - return ret; + goto out_disable_platform_phys; dev_info(dev, "Broadcom AHCI SATA3 registered\n"); return 0; + +out_disable_platform_phys: + ahci_platform_disable_phys(hpriv); +out_disable_phys: + brcm_sata_phys_disable(priv); +out_disable_clks: + ahci_platform_disable_clks(hpriv); +out_reset: + if (!IS_ERR_OR_NULL(priv->rcdev)) + reset_control_assert(priv->rcdev); + return ret; } static int brcm_ahci_remove(struct platform_device *pdev) @@ -457,12 +512,12 @@ static int brcm_ahci_remove(struct platform_device *pdev) struct brcm_ahci_priv *priv = hpriv->plat_data; int ret; + brcm_sata_phys_disable(priv); + ret = ata_platform_remove_one(pdev); if (ret) return ret; - brcm_sata_phys_disable(priv); - return 0; } diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c index 8befce036af84810b0a3c815829b87f6fd904d8c..129556fcf6be760ce48834873ea3ea057c4bd8a5 100644 --- a/drivers/ata/libahci_platform.c +++ b/drivers/ata/libahci_platform.c @@ -43,7 +43,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_ops); * RETURNS: * 0 on success otherwise a negative error code */ -static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv) +int ahci_platform_enable_phys(struct ahci_host_priv *hpriv) { int rc, i; @@ -74,6 +74,7 @@ disable_phys: } return rc; } +EXPORT_SYMBOL_GPL(ahci_platform_enable_phys); /** * ahci_platform_disable_phys - Disable PHYs @@ -81,7 +82,7 @@ disable_phys: * * This function disables all PHYs found in hpriv->phys. */ -static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv) +void ahci_platform_disable_phys(struct ahci_host_priv *hpriv) { int i; @@ -90,6 +91,7 @@ static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv) phy_exit(hpriv->phys[i]); } } +EXPORT_SYMBOL_GPL(ahci_platform_disable_phys); /** * ahci_platform_enable_clks - Enable platform clocks diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index e9017c570bc51667c127847eb13561f09333794d..6f4ab5c5b52dde6f072a9eea90427d7fd8a7a791 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -5328,6 +5328,30 @@ void ata_qc_complete(struct ata_queued_cmd *qc) } } +/** + * ata_qc_get_active - get bitmask of active qcs + * @ap: port in question + * + * LOCKING: + * spin_lock_irqsave(host lock) + * + * RETURNS: + * Bitmask of active qcs + */ +u64 ata_qc_get_active(struct ata_port *ap) +{ + u64 qc_active = ap->qc_active; + + /* ATA_TAG_INTERNAL is sent to hw as tag 0 */ + if (qc_active & (1ULL << ATA_TAG_INTERNAL)) { + qc_active |= (1 << 0); + qc_active &= ~(1ULL << ATA_TAG_INTERNAL); + } + + return qc_active; +} +EXPORT_SYMBOL_GPL(ata_qc_get_active); + /** * ata_qc_complete_multiple - Complete multiple qcs successfully * @ap: port in question diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index 9239615d8a0472967fa534f0dcd8523da944ea1a..d55ee244d6931fcae2089dce4e3a07409685276d 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c @@ -1280,7 +1280,7 @@ static void sata_fsl_host_intr(struct ata_port *ap) i, ioread32(hcr_base + CC), ioread32(hcr_base + CA)); } - ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); + ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask); return; } else if ((ap->qc_active & (1ULL << ATA_TAG_INTERNAL))) { diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 277f11909fc1ab37338f3390781db00814104a6a..d7228f8e9297c001e730c7f62c8901ebad20715f 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -2829,7 +2829,7 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp } if (work_done) { - ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); + ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask); /* Update the software queue position index in hardware */ writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index f3e62f5528bdb17735f28d847f8cc485bc2e5164..eb9dc14e5147aaebbc210c1481ce146a6d9c57a4 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c @@ -984,7 +984,7 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance) check_commands = 0; check_commands &= ~(1 << pos); } - ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); + ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask); } } diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c index b23d1e4bad33b20653b213047d815f388dfd0d47..9d0d65efcd94e97da049e48dd7e9f5aedfccc8f3 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c @@ -374,7 +374,7 @@ static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb, here = (eni_vcc->descr+skip) & (eni_vcc->words-1); dma[j++] = (here << MID_DMA_COUNT_SHIFT) | (vcc->vci << MID_DMA_VCI_SHIFT) | MID_DT_JK; - j++; + dma[j++] = 0; } here = (eni_vcc->descr+size+skip) & (eni_vcc->words-1); if (!eff) size += skip; @@ -447,7 +447,7 @@ static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb, if (size != eff) { dma[j++] = (here << MID_DMA_COUNT_SHIFT) | (vcc->vci << MID_DMA_VCI_SHIFT) | MID_DT_JK; - j++; + dma[j++] = 0; } if (!j || j > 2*RX_DMA_BUF) { printk(KERN_CRIT DEV_LABEL "!j or j too big!!!\n"); diff --git a/drivers/base/firmware_loader/builtin/Makefile b/drivers/base/firmware_loader/builtin/Makefile index 4a66888e7253d34d00674824d7fab6593dd794fd..5fa7ce3745a0d63e7c7736de89386e06ee38fe0c 100644 --- a/drivers/base/firmware_loader/builtin/Makefile +++ b/drivers/base/firmware_loader/builtin/Makefile @@ -17,7 +17,7 @@ PROGBITS = $(if $(CONFIG_ARM),%,@)progbits filechk_fwbin = \ echo "/* Generated by $(src)/Makefile */" ;\ echo " .section .rodata" ;\ - echo " .p2align $(ASM_ALIGN)" ;\ + echo " .p2align 4" ;\ echo "_fw_$(FWSTR)_bin:" ;\ echo " .incbin \"$(fwdir)/$(FWNAME)\"" ;\ echo "_fw_end:" ;\ diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 57532465fb83a7dede4856ddaaf6466f330391d3..b4607dd9618521020bf10ee42c5d3a17b8992d59 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -1296,10 +1296,10 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b mutex_unlock(&nbd->config_lock); ret = wait_event_interruptible(config->recv_wq, atomic_read(&config->recv_threads) == 0); - if (ret) { + if (ret) sock_shutdown(nbd); - flush_workqueue(nbd->recv_workq); - } + flush_workqueue(nbd->recv_workq); + mutex_lock(&nbd->config_lock); nbd_bdev_reset(bdev); /* user requested, ignore socket errors */ diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c index d4d88b5818225b6e3fe441b9190c4e7d82a0e04a..ed34785dd64bd7b913d8003993e4b741fd58eb41 100644 --- a/drivers/block/null_blk_zoned.c +++ b/drivers/block/null_blk_zoned.c @@ -129,11 +129,13 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, return BLK_STS_IOERR; case BLK_ZONE_COND_EMPTY: case BLK_ZONE_COND_IMP_OPEN: + case BLK_ZONE_COND_EXP_OPEN: + case BLK_ZONE_COND_CLOSED: /* Writes must be at the write pointer position */ if (sector != zone->wp) return BLK_STS_IOERR; - if (zone->cond == BLK_ZONE_COND_EMPTY) + if (zone->cond != BLK_ZONE_COND_EXP_OPEN) zone->cond = BLK_ZONE_COND_IMP_OPEN; zone->wp += nr_sectors; @@ -186,7 +188,10 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, if (zone->cond == BLK_ZONE_COND_FULL) return BLK_STS_IOERR; - zone->cond = BLK_ZONE_COND_CLOSED; + if (zone->wp == zone->start) + zone->cond = BLK_ZONE_COND_EMPTY; + else + zone->cond = BLK_ZONE_COND_CLOSED; break; case REQ_OP_ZONE_FINISH: if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index ee67bf929fac8d57e5750c60dacf3ca58a73891d..861fc65a1b751a5a19ae8a5be8c1ede985df26ad 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -2707,7 +2707,7 @@ static const struct block_device_operations pktcdvd_ops = { .release = pkt_close, .ioctl = pkt_ioctl, #ifdef CONFIG_COMPAT - .ioctl = pkt_compat_ioctl, + .compat_ioctl = pkt_compat_ioctl, #endif .check_events = pkt_check_events, }; diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index d6a6adfd5159d5c6f2587fa10fce6f9d9a5e1a0b..4c5d99f8781361867344f3f49ddc9f825bcc285a 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -190,6 +190,9 @@ static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref, { int err; struct xen_blkif *blkif = ring->blkif; + const struct blkif_common_sring *sring_common; + RING_IDX rsp_prod, req_prod; + unsigned int size; /* Already connected through? */ if (ring->irq) @@ -200,46 +203,62 @@ static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref, if (err < 0) return err; + sring_common = (struct blkif_common_sring *)ring->blk_ring; + rsp_prod = READ_ONCE(sring_common->rsp_prod); + req_prod = READ_ONCE(sring_common->req_prod); + switch (blkif->blk_protocol) { case BLKIF_PROTOCOL_NATIVE: { - struct blkif_sring *sring; - sring = (struct blkif_sring *)ring->blk_ring; - BACK_RING_INIT(&ring->blk_rings.native, sring, - XEN_PAGE_SIZE * nr_grefs); + struct blkif_sring *sring_native = + (struct blkif_sring *)ring->blk_ring; + + BACK_RING_ATTACH(&ring->blk_rings.native, sring_native, + rsp_prod, XEN_PAGE_SIZE * nr_grefs); + size = __RING_SIZE(sring_native, XEN_PAGE_SIZE * nr_grefs); break; } case BLKIF_PROTOCOL_X86_32: { - struct blkif_x86_32_sring *sring_x86_32; - sring_x86_32 = (struct blkif_x86_32_sring *)ring->blk_ring; - BACK_RING_INIT(&ring->blk_rings.x86_32, sring_x86_32, - XEN_PAGE_SIZE * nr_grefs); + struct blkif_x86_32_sring *sring_x86_32 = + (struct blkif_x86_32_sring *)ring->blk_ring; + + BACK_RING_ATTACH(&ring->blk_rings.x86_32, sring_x86_32, + rsp_prod, XEN_PAGE_SIZE * nr_grefs); + size = __RING_SIZE(sring_x86_32, XEN_PAGE_SIZE * nr_grefs); break; } case BLKIF_PROTOCOL_X86_64: { - struct blkif_x86_64_sring *sring_x86_64; - sring_x86_64 = (struct blkif_x86_64_sring *)ring->blk_ring; - BACK_RING_INIT(&ring->blk_rings.x86_64, sring_x86_64, - XEN_PAGE_SIZE * nr_grefs); + struct blkif_x86_64_sring *sring_x86_64 = + (struct blkif_x86_64_sring *)ring->blk_ring; + + BACK_RING_ATTACH(&ring->blk_rings.x86_64, sring_x86_64, + rsp_prod, XEN_PAGE_SIZE * nr_grefs); + size = __RING_SIZE(sring_x86_64, XEN_PAGE_SIZE * nr_grefs); break; } default: BUG(); } + err = -EIO; + if (req_prod - rsp_prod > size) + goto fail; + err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn, xen_blkif_be_int, 0, "blkif-backend", ring); - if (err < 0) { - xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring); - ring->blk_rings.common.sring = NULL; - return err; - } + if (err < 0) + goto fail; ring->irq = err; return 0; + +fail: + xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring); + ring->blk_rings.common.sring = NULL; + return err; } static int xen_blkif_disconnect(struct xen_blkif *blkif) @@ -1131,7 +1150,8 @@ static struct xenbus_driver xen_blkbk_driver = { .ids = xen_blkbk_ids, .probe = xen_blkbk_probe, .remove = xen_blkbk_remove, - .otherend_changed = frontend_changed + .otherend_changed = frontend_changed, + .allow_rebind = true, }; int xen_blkif_xenbus_init(void) diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index a74d03913822df88989b9b0198da99dea490eef1..c02be06c529950ee89a08bc985cab6531f432342 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -1113,8 +1113,8 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, if (!VDEV_IS_EXTENDED(info->vdevice)) { err = xen_translate_vdev(info->vdevice, &minor, &offset); if (err) - return err; - nr_parts = PARTS_PER_DISK; + return err; + nr_parts = PARTS_PER_DISK; } else { minor = BLKIF_MINOR_EXT(info->vdevice); nr_parts = PARTS_PER_EXT_DISK; diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 56887c6877a7b519c2bba19f9c15c273d6035eb2..ccb44fe790a71ebbc5ccfa07216cbffaad29d6bd 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -343,6 +343,12 @@ static int sysc_get_clocks(struct sysc *ddata) return -EINVAL; } + /* Always add a slot for main clocks fck and ick even if unused */ + if (!nr_fck) + ddata->nr_clocks++; + if (!nr_ick) + ddata->nr_clocks++; + ddata->clocks = devm_kcalloc(ddata->dev, ddata->nr_clocks, sizeof(*ddata->clocks), GFP_KERNEL); @@ -421,7 +427,7 @@ static int sysc_enable_opt_clocks(struct sysc *ddata) struct clk *clock; int i, error; - if (!ddata->clocks) + if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1) return 0; for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) { @@ -455,7 +461,7 @@ static void sysc_disable_opt_clocks(struct sysc *ddata) struct clk *clock; int i; - if (!ddata->clocks) + if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1) return; for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) { @@ -981,7 +987,8 @@ static int sysc_disable_module(struct device *dev) return ret; } - if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_MSTANDBY) + if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_MSTANDBY) || + ddata->cfg.quirks & (SYSC_QUIRK_FORCE_MSTANDBY)) best_mode = SYSC_IDLE_FORCE; reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift); @@ -1583,6 +1590,10 @@ static int sysc_reset(struct sysc *ddata) sysc_val |= sysc_mask; sysc_write(ddata, sysc_offset, sysc_val); + if (ddata->cfg.srst_udelay) + usleep_range(ddata->cfg.srst_udelay, + ddata->cfg.srst_udelay * 2); + if (ddata->clk_enable_quirk) ddata->clk_enable_quirk(ddata); diff --git a/drivers/char/agp/isoch.c b/drivers/char/agp/isoch.c index 31c374b1b91b038827254824b48bb91b8b577d68..7ecf20a6d19cb62564f9b17878717483c009755a 100644 --- a/drivers/char/agp/isoch.c +++ b/drivers/char/agp/isoch.c @@ -84,7 +84,6 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge, unsigned int cdev = 0; u32 mnistat, tnistat, tstatus, mcmd; u16 tnicmd, mnicmd; - u8 mcapndx; u32 tot_bw = 0, tot_n = 0, tot_rq = 0, y_max, rq_isoch, rq_async; u32 step, rem, rem_isoch, rem_async; int ret = 0; @@ -138,8 +137,6 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge, cur = list_entry(pos, struct agp_3_5_dev, list); dev = cur->dev; - mcapndx = cur->capndx; - pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &mnistat); master[cdev].maxbw = (mnistat >> 16) & 0xff; @@ -251,8 +248,6 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge, cur = master[cdev].dev; dev = cur->dev; - mcapndx = cur->capndx; - master[cdev].rq += (cdev == ndevs - 1) ? (rem_async + rem_isoch) : step; @@ -319,7 +314,7 @@ int agp_3_5_enable(struct agp_bridge_data *bridge) { struct pci_dev *td = bridge->dev, *dev = NULL; u8 mcapndx; - u32 isoch, arqsz; + u32 isoch; u32 tstatus, mstatus, ncapid; u32 mmajor; u16 mpstat; @@ -334,8 +329,6 @@ int agp_3_5_enable(struct agp_bridge_data *bridge) if (isoch == 0) /* isoch xfers not available, bail out. */ return -ENODEV; - arqsz = (tstatus >> 13) & 0x7; - /* * Allocate a head for our AGP 3.5 device list * (multiple AGP v3 devices are allowed behind a single bridge). diff --git a/drivers/char/random.c b/drivers/char/random.c index 909e0c3d82ea443bc5a34c3e085fa7835f2df1b5..cda12933a17dadd1f25c676e4414750175ad4ce8 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -2175,6 +2175,7 @@ const struct file_operations urandom_fops = { .read = urandom_read, .write = random_write, .unlocked_ioctl = random_ioctl, + .compat_ioctl = compat_ptr_ioctl, .fasync = random_fasync, .llseek = noop_llseek, }; diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c index 2ec47a69a2a6c1d061d1f94dca7d7f7be4421d57..87f4493402021b6af277881cbdb928cddbdf10db 100644 --- a/drivers/char/tpm/tpm-dev-common.c +++ b/drivers/char/tpm/tpm-dev-common.c @@ -61,6 +61,12 @@ static void tpm_dev_async_work(struct work_struct *work) mutex_lock(&priv->buffer_mutex); priv->command_enqueued = false; + ret = tpm_try_get_ops(priv->chip); + if (ret) { + priv->response_length = ret; + goto out; + } + ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer, sizeof(priv->data_buffer)); tpm_put_ops(priv->chip); @@ -68,6 +74,7 @@ static void tpm_dev_async_work(struct work_struct *work) priv->response_length = ret; mod_timer(&priv->user_read_timer, jiffies + (120 * HZ)); } +out: mutex_unlock(&priv->buffer_mutex); wake_up_interruptible(&priv->async_wait); } @@ -123,7 +130,7 @@ ssize_t tpm_common_read(struct file *file, char __user *buf, priv->response_read = true; ret_size = min_t(ssize_t, size, priv->response_length); - if (!ret_size) { + if (ret_size <= 0) { priv->response_length = 0; goto out; } @@ -204,6 +211,7 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf, if (file->f_flags & O_NONBLOCK) { priv->command_enqueued = true; queue_work(tpm_dev_wq, &priv->async_work); + tpm_put_ops(priv->chip); mutex_unlock(&priv->buffer_mutex); return size; } diff --git a/drivers/char/tpm/tpm-dev.h b/drivers/char/tpm/tpm-dev.h index 1089fc0bb290cb59098321fbf6605872167f7a15..f3742bcc73e377ce8b1bbc35fdbe1c4a2978d7af 100644 --- a/drivers/char/tpm/tpm-dev.h +++ b/drivers/char/tpm/tpm-dev.h @@ -14,7 +14,7 @@ struct file_priv { struct work_struct timeout_work; struct work_struct async_work; wait_queue_head_t async_wait; - size_t response_length; + ssize_t response_length; bool response_read; bool command_enqueued; diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index b9e1547be6b51e1ea60491cfbdb9705465ec6626..5620747da0cfd7e75d0af923fe749212cf14938b 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -218,7 +218,6 @@ int tpm2_pcr_read(struct tpm_chip *chip, u32 pcr_idx, int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, struct tpm_digest *digests); int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max); -void tpm2_flush_context(struct tpm_chip *chip, u32 handle); ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value, const char *desc); diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index fdb457704aa798437c1d56ad7c1a3de9a06c36ba..13696deceae8e7fb73862ea99d731a58fe647f67 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c @@ -362,6 +362,7 @@ void tpm2_flush_context(struct tpm_chip *chip, u32 handle) tpm_transmit_cmd(chip, &buf, 0, "flushing context"); tpm_buf_destroy(&buf); } +EXPORT_SYMBOL_GPL(tpm2_flush_context); struct tpm2_get_cap_out { u8 more_data; diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c index 6640a14dbe48cf137f2ed5212e699888730cfebd..22bf553ccf9df35e61c411707a78373f4ea8aa38 100644 --- a/drivers/char/tpm/tpm_ftpm_tee.c +++ b/drivers/char/tpm/tpm_ftpm_tee.c @@ -32,7 +32,7 @@ static const uuid_t ftpm_ta_uuid = 0x82, 0xCB, 0x34, 0x3F, 0xB7, 0xF3, 0x78, 0x96); /** - * ftpm_tee_tpm_op_recv - retrieve fTPM response. + * ftpm_tee_tpm_op_recv() - retrieve fTPM response. * @chip: the tpm_chip description as specified in driver/char/tpm/tpm.h. * @buf: the buffer to store data. * @count: the number of bytes to read. @@ -61,7 +61,7 @@ static int ftpm_tee_tpm_op_recv(struct tpm_chip *chip, u8 *buf, size_t count) } /** - * ftpm_tee_tpm_op_send - send TPM commands through the TEE shared memory. + * ftpm_tee_tpm_op_send() - send TPM commands through the TEE shared memory. * @chip: the tpm_chip description as specified in driver/char/tpm/tpm.h * @buf: the buffer to send. * @len: the number of bytes to send. @@ -208,7 +208,7 @@ static int ftpm_tee_match(struct tee_ioctl_version_data *ver, const void *data) } /** - * ftpm_tee_probe - initialize the fTPM + * ftpm_tee_probe() - initialize the fTPM * @pdev: the platform_device description. * * Return: @@ -298,7 +298,7 @@ out_tee_session: } /** - * ftpm_tee_remove - remove the TPM device + * ftpm_tee_remove() - remove the TPM device * @pdev: the platform_device description. * * Return: @@ -328,6 +328,19 @@ static int ftpm_tee_remove(struct platform_device *pdev) return 0; } +/** + * ftpm_tee_shutdown() - shutdown the TPM device + * @pdev: the platform_device description. + */ +static void ftpm_tee_shutdown(struct platform_device *pdev) +{ + struct ftpm_tee_private *pvt_data = dev_get_drvdata(&pdev->dev); + + tee_shm_free(pvt_data->shm); + tee_client_close_session(pvt_data->ctx, pvt_data->session); + tee_client_close_context(pvt_data->ctx); +} + static const struct of_device_id of_ftpm_tee_ids[] = { { .compatible = "microsoft,ftpm" }, { } @@ -341,6 +354,7 @@ static struct platform_driver ftpm_tee_driver = { }, .probe = ftpm_tee_probe, .remove = ftpm_tee_remove, + .shutdown = ftpm_tee_shutdown, }; module_platform_driver(ftpm_tee_driver); diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index 8af2cee1a762cd1ad1092cabee4f0b757f2f4ff9..27c6ca031e23ec8a26401dc281271404e8e3a008 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -1059,8 +1059,6 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, goto out_err; } - tpm_chip_start(chip); - chip->flags |= TPM_CHIP_FLAG_IRQ; if (irq) { tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED, irq); @@ -1070,7 +1068,6 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, } else { tpm_tis_probe_irq(chip, intmask); } - tpm_chip_stop(chip); } rc = tpm_chip_register(chip); diff --git a/drivers/clk/at91/at91sam9260.c b/drivers/clk/at91/at91sam9260.c index 0aabe49aed09449b153de865adc739dc8e22eacf..a9d4234758d7d42258a6686866bb20ba7aa0f847 100644 --- a/drivers/clk/at91/at91sam9260.c +++ b/drivers/clk/at91/at91sam9260.c @@ -348,7 +348,7 @@ static void __init at91sam926x_pmc_setup(struct device_node *np, return; mainxtal_name = of_clk_get_parent_name(np, i); - regmap = syscon_node_to_regmap(np); + regmap = device_node_to_regmap(np); if (IS_ERR(regmap)) return; diff --git a/drivers/clk/at91/at91sam9rl.c b/drivers/clk/at91/at91sam9rl.c index 0ac34cdaa106932fb797c534dae04273d26b8aad..77fe83a73bf480815e3aca23678231fee960235d 100644 --- a/drivers/clk/at91/at91sam9rl.c +++ b/drivers/clk/at91/at91sam9rl.c @@ -83,7 +83,7 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np) return; mainxtal_name = of_clk_get_parent_name(np, i); - regmap = syscon_node_to_regmap(np); + regmap = device_node_to_regmap(np); if (IS_ERR(regmap)) return; diff --git a/drivers/clk/at91/at91sam9x5.c b/drivers/clk/at91/at91sam9x5.c index 0855f3a80cc79ea8ea6933192964e02c0a2ef079..086cf0b4955c2ec5591e740785abe658452853fb 100644 --- a/drivers/clk/at91/at91sam9x5.c +++ b/drivers/clk/at91/at91sam9x5.c @@ -146,7 +146,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np, return; mainxtal_name = of_clk_get_parent_name(np, i); - regmap = syscon_node_to_regmap(np); + regmap = device_node_to_regmap(np); if (IS_ERR(regmap)) return; diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c index 0b03cfae3a9ddcae4c7906edc77c82ac40d30c6c..b71515acdec1f0eaebdf865fb184cdbed40d27b8 100644 --- a/drivers/clk/at91/pmc.c +++ b/drivers/clk/at91/pmc.c @@ -275,7 +275,7 @@ static int __init pmc_register_ops(void) np = of_find_matching_node(NULL, sama5d2_pmc_dt_ids); - pmcreg = syscon_node_to_regmap(np); + pmcreg = device_node_to_regmap(np); if (IS_ERR(pmcreg)) return PTR_ERR(pmcreg); diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c index 0de1108737db9326de1507dc8bb7b6e48b5df1c9..ff7e3f727082e261a5990d332b16bcec6750ee3d 100644 --- a/drivers/clk/at91/sama5d2.c +++ b/drivers/clk/at91/sama5d2.c @@ -162,7 +162,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np) return; mainxtal_name = of_clk_get_parent_name(np, i); - regmap = syscon_node_to_regmap(np); + regmap = device_node_to_regmap(np); if (IS_ERR(regmap)) return; diff --git a/drivers/clk/at91/sama5d4.c b/drivers/clk/at91/sama5d4.c index 25b156d4e645f6daf4edea37f16cede5ac3ce4b9..a6dee4a3b6e48eade40a187be4b195cf0c7b17fe 100644 --- a/drivers/clk/at91/sama5d4.c +++ b/drivers/clk/at91/sama5d4.c @@ -136,7 +136,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np) return; mainxtal_name = of_clk_get_parent_name(np, i); - regmap = syscon_node_to_regmap(np); + regmap = device_node_to_regmap(np); if (IS_ERR(regmap)) return; diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index b68e200829f202d78f49af69cd20c291f0d92022..772258de2d1f3c270e0c5747fb848be5e7c506e4 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -3249,6 +3249,34 @@ static inline void clk_debug_unregister(struct clk_core *core) } #endif +static void clk_core_reparent_orphans_nolock(void) +{ + struct clk_core *orphan; + struct hlist_node *tmp2; + + /* + * walk the list of orphan clocks and reparent any that newly finds a + * parent. + */ + hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { + struct clk_core *parent = __clk_init_parent(orphan); + + /* + * We need to use __clk_set_parent_before() and _after() to + * to properly migrate any prepare/enable count of the orphan + * clock. This is important for CLK_IS_CRITICAL clocks, which + * are enabled during init but might not have a parent yet. + */ + if (parent) { + /* update the clk tree topology */ + __clk_set_parent_before(orphan, parent); + __clk_set_parent_after(orphan, parent, NULL); + __clk_recalc_accuracies(orphan); + __clk_recalc_rates(orphan, 0); + } + } +} + /** * __clk_core_init - initialize the data structures in a struct clk_core * @core: clk_core being initialized @@ -3259,8 +3287,6 @@ static inline void clk_debug_unregister(struct clk_core *core) static int __clk_core_init(struct clk_core *core) { int ret; - struct clk_core *orphan; - struct hlist_node *tmp2; unsigned long rate; if (!core) @@ -3400,34 +3426,21 @@ static int __clk_core_init(struct clk_core *core) if (core->flags & CLK_IS_CRITICAL) { unsigned long flags; - clk_core_prepare(core); + ret = clk_core_prepare(core); + if (ret) + goto out; flags = clk_enable_lock(); - clk_core_enable(core); + ret = clk_core_enable(core); clk_enable_unlock(flags); + if (ret) { + clk_core_unprepare(core); + goto out; + } } - /* - * walk the list of orphan clocks and reparent any that newly finds a - * parent. - */ - hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { - struct clk_core *parent = __clk_init_parent(orphan); + clk_core_reparent_orphans_nolock(); - /* - * We need to use __clk_set_parent_before() and _after() to - * to properly migrate any prepare/enable count of the orphan - * clock. This is important for CLK_IS_CRITICAL clocks, which - * are enabled during init but might not have a parent yet. - */ - if (parent) { - /* update the clk tree topology */ - __clk_set_parent_before(orphan, parent); - __clk_set_parent_after(orphan, parent, NULL); - __clk_recalc_accuracies(orphan); - __clk_recalc_rates(orphan, 0); - } - } kref_init(&core->ref); out: @@ -4179,6 +4192,13 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) EXPORT_SYMBOL_GPL(clk_notifier_unregister); #ifdef CONFIG_OF +static void clk_core_reparent_orphans(void) +{ + clk_prepare_lock(); + clk_core_reparent_orphans_nolock(); + clk_prepare_unlock(); +} + /** * struct of_clk_provider - Clock provider registration structure * @link: Entry in global list of clock providers @@ -4274,6 +4294,8 @@ int of_clk_add_provider(struct device_node *np, mutex_unlock(&of_clk_mutex); pr_debug("Added clock from %pOF\n", np); + clk_core_reparent_orphans(); + ret = of_clk_set_defaults(np, true); if (ret < 0) of_clk_del_provider(np); @@ -4309,6 +4331,8 @@ int of_clk_add_hw_provider(struct device_node *np, mutex_unlock(&of_clk_mutex); pr_debug("Added clk_hw provider from %pOF\n", np); + clk_core_reparent_orphans(); + ret = of_clk_set_defaults(np, true); if (ret < 0) of_clk_del_provider(np); diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c index 388bdb94f841da0a153cb024346832d282e853b8..d3486ee79ab54ebf186e94deb7b37d759a553f76 100644 --- a/drivers/clk/imx/clk-composite-8m.c +++ b/drivers/clk/imx/clk-composite-8m.c @@ -142,6 +142,7 @@ struct clk *imx8m_clk_composite_flags(const char *name, mux->reg = reg; mux->shift = PCG_PCS_SHIFT; mux->mask = PCG_PCS_MASK; + mux->lock = &imx_ccm_lock; div = kzalloc(sizeof(*div), GFP_KERNEL); if (!div) @@ -161,6 +162,7 @@ struct clk *imx8m_clk_composite_flags(const char *name, gate_hw = &gate->hw; gate->reg = reg; gate->bit_idx = PCG_CGC_SHIFT; + gate->lock = &imx_ccm_lock; hw = clk_hw_register_composite(NULL, name, parent_names, num_parents, mux_hw, &clk_mux_ops, div_hw, diff --git a/drivers/clk/imx/clk-imx7ulp.c b/drivers/clk/imx/clk-imx7ulp.c index 3fdf3d494f0afc305c0212fb57914f1abbbf498c..281191b55b3af5b59a43384bb2d1949d7393e98d 100644 --- a/drivers/clk/imx/clk-imx7ulp.c +++ b/drivers/clk/imx/clk-imx7ulp.c @@ -40,6 +40,7 @@ static const struct clk_div_table ulp_div_table[] = { { .val = 5, .div = 16, }, { .val = 6, .div = 32, }, { .val = 7, .div = 64, }, + { /* sentinel */ }, }; static const int pcc2_uart_clk_ids[] __initconst = { diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c index 5c458199060a6e9557bb36b02fb488bb0b76a1bc..3636c8035c7d95e12012a2e9ff0bb40dc659c50b 100644 --- a/drivers/clk/imx/clk-pll14xx.c +++ b/drivers/clk/imx/clk-pll14xx.c @@ -159,7 +159,7 @@ static int clk_pll14xx_wait_lock(struct clk_pll14xx *pll) { u32 val; - return readl_poll_timeout(pll->base, val, val & LOCK_TIMEOUT_US, 0, + return readl_poll_timeout(pll->base, val, val & LOCK_STATUS, 0, LOCK_TIMEOUT_US); } diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c index a60a1be937ad6566a50270fef46b458ba8edf954..b4a95cbbda989fae6dbc2aca8bd90f4ea945ee77 100644 --- a/drivers/clk/mmp/clk-of-mmp2.c +++ b/drivers/clk/mmp/clk-of-mmp2.c @@ -134,7 +134,7 @@ static DEFINE_SPINLOCK(ssp3_lock); static const char *ssp_parent_names[] = {"vctcxo_4", "vctcxo_2", "vctcxo", "pll1_16"}; static DEFINE_SPINLOCK(timer_lock); -static const char *timer_parent_names[] = {"clk32", "vctcxo_2", "vctcxo_4", "vctcxo"}; +static const char *timer_parent_names[] = {"clk32", "vctcxo_4", "vctcxo_2", "vctcxo"}; static DEFINE_SPINLOCK(reset_lock); diff --git a/drivers/clk/qcom/gcc-sc7180.c b/drivers/clk/qcom/gcc-sc7180.c index 38424e63bcae2e5f03494e58ba0d24f4d17b8765..7f59fb8da0337c64bb04cabd00c2de8efe2c05b1 100644 --- a/drivers/clk/qcom/gcc-sc7180.c +++ b/drivers/clk/qcom/gcc-sc7180.c @@ -2186,7 +2186,8 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = { .pd = { .name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc", }, - .pwrsts = PWRSTS_OFF_ON | VOTABLE, + .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = { @@ -2194,7 +2195,8 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = { .pd = { .name = "hlos1_vote_mmnoc_mmu_tbu_sf_gdsc", }, - .pwrsts = PWRSTS_OFF_ON | VOTABLE, + .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc *gcc_sc7180_gdscs[] = { diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c index f7b370f3acef6f15de4887e95aa22f448e88d863..f6ce888098be9b3fbeab9852e84359614ac45d54 100644 --- a/drivers/clk/qcom/gcc-sdm845.c +++ b/drivers/clk/qcom/gcc-sdm845.c @@ -3255,6 +3255,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc = { .name = "hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc = { @@ -3263,6 +3264,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc = { .name = "hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc hlos1_vote_aggre_noc_mmu_tbu1_gdsc = { @@ -3271,6 +3273,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_tbu1_gdsc = { .name = "hlos1_vote_aggre_noc_mmu_tbu1_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc hlos1_vote_aggre_noc_mmu_tbu2_gdsc = { @@ -3279,6 +3282,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_tbu2_gdsc = { .name = "hlos1_vote_aggre_noc_mmu_tbu2_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = { @@ -3287,6 +3291,7 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = { .name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = { @@ -3295,6 +3300,7 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = { .name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = { @@ -3303,6 +3309,7 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = { .name = "hlos1_vote_mmnoc_mmu_tbu_sf_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct clk_regmap *gcc_sdm845_clocks[] = { diff --git a/drivers/clk/qcom/gpucc-msm8998.c b/drivers/clk/qcom/gpucc-msm8998.c index e5e2492b20c5e840cb999142c70bb4360edc2035..9b3923af02a142dcf7c821114adee5522a3c542f 100644 --- a/drivers/clk/qcom/gpucc-msm8998.c +++ b/drivers/clk/qcom/gpucc-msm8998.c @@ -242,10 +242,12 @@ static struct clk_branch gfx3d_isense_clk = { static struct gdsc gpu_cx_gdsc = { .gdscr = 0x1004, + .gds_hw_ctrl = 0x1008, .pd = { .name = "gpu_cx", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc gpu_gx_gdsc = { diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c index 3a991ca1ee3605af87831a603914db9ee3fe5deb..c9e5a1fb66539eed0f078d01849aa947752c2d5b 100644 --- a/drivers/clk/samsung/clk-exynos5420.c +++ b/drivers/clk/samsung/clk-exynos5420.c @@ -12,6 +12,7 @@ #include #include #include +#include #include "clk.h" #include "clk-cpu.h" @@ -1646,6 +1647,13 @@ static void __init exynos5x_clk_init(struct device_node *np, exynos5x_subcmus); } + /* + * Keep top part of G3D clock path enabled permanently to ensure + * that the internal busses get their clock regardless of the + * main G3D clock enablement status. + */ + clk_prepare_enable(__clk_lookup("mout_sw_aclk_g3d")); + samsung_clk_of_add_provider(np, ctx); } diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c index 45a1ed3fe6742fd7ed9edc8f797a0f80e63a220b..50f8d1bc7046552804a1bad0576be558e9b89cb6 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c @@ -23,9 +23,9 @@ */ static const char * const ar100_r_apb2_parents[] = { "osc24M", "osc32k", - "pll-periph0", "iosc" }; + "iosc", "pll-periph0" }; static const struct ccu_mux_var_prediv ar100_r_apb2_predivs[] = { - { .index = 2, .shift = 0, .width = 5 }, + { .index = 3, .shift = 0, .width = 5 }, }; static struct ccu_div ar100_clk = { @@ -51,17 +51,7 @@ static struct ccu_div ar100_clk = { static CLK_FIXED_FACTOR_HW(r_ahb_clk, "r-ahb", &ar100_clk.common.hw, 1, 1, 0); -static struct ccu_div r_apb1_clk = { - .div = _SUNXI_CCU_DIV(0, 2), - - .common = { - .reg = 0x00c, - .hw.init = CLK_HW_INIT("r-apb1", - "r-ahb", - &ccu_div_ops, - 0), - }, -}; +static SUNXI_CCU_M(r_apb1_clk, "r-apb1", "r-ahb", 0x00c, 0, 2, 0); static struct ccu_div r_apb2_clk = { .div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO), diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r.c b/drivers/clk/sunxi-ng/ccu-sun8i-r.c index 4646fdc61053b0c65d857a8424c71e5e517a969d..4c8c491b87c2777d636cdd15b603e0c45c72cc71 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-r.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-r.c @@ -51,19 +51,7 @@ static struct ccu_div ar100_clk = { static CLK_FIXED_FACTOR_HW(ahb0_clk, "ahb0", &ar100_clk.common.hw, 1, 1, 0); -static struct ccu_div apb0_clk = { - .div = _SUNXI_CCU_DIV_FLAGS(0, 2, CLK_DIVIDER_POWER_OF_TWO), - - .common = { - .reg = 0x0c, - .hw.init = CLK_HW_INIT_HW("apb0", - &ahb0_clk.hw, - &ccu_div_ops, - 0), - }, -}; - -static SUNXI_CCU_M(a83t_apb0_clk, "apb0", "ahb0", 0x0c, 0, 2, 0); +static SUNXI_CCU_M(apb0_clk, "apb0", "ahb0", 0x0c, 0, 2, 0); /* * Define the parent as an array that can be reused to save space @@ -127,7 +115,7 @@ static struct ccu_mp a83t_ir_clk = { static struct ccu_common *sun8i_a83t_r_ccu_clks[] = { &ar100_clk.common, - &a83t_apb0_clk.common, + &apb0_clk.common, &apb0_pio_clk.common, &apb0_ir_clk.common, &apb0_timer_clk.common, @@ -167,7 +155,7 @@ static struct clk_hw_onecell_data sun8i_a83t_r_hw_clks = { .hws = { [CLK_AR100] = &ar100_clk.common.hw, [CLK_AHB0] = &ahb0_clk.hw, - [CLK_APB0] = &a83t_apb0_clk.common.hw, + [CLK_APB0] = &apb0_clk.common.hw, [CLK_APB0_PIO] = &apb0_pio_clk.common.hw, [CLK_APB0_IR] = &apb0_ir_clk.common.hw, [CLK_APB0_TIMER] = &apb0_timer_clk.common.hw, @@ -282,9 +270,6 @@ static void __init sunxi_r_ccu_init(struct device_node *node, static void __init sun8i_a83t_r_ccu_setup(struct device_node *node) { - /* Fix apb0 bus gate parents here */ - apb0_gate_parent[0] = &a83t_apb0_clk.common.hw; - sunxi_r_ccu_init(node, &sun8i_a83t_r_ccu_desc); } CLK_OF_DECLARE(sun8i_a83t_r_ccu, "allwinner,sun8i-a83t-r-ccu", diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c index 897490800102f3612b79d9c6e37efed0e74ef08e..23bfe1d12f217efdde824e3288d0727d29064e73 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c @@ -761,7 +761,8 @@ static struct ccu_mp outa_clk = { .reg = 0x1f0, .features = CCU_FEATURE_FIXED_PREDIV, .hw.init = CLK_HW_INIT_PARENTS("outa", out_parents, - &ccu_mp_ops, 0), + &ccu_mp_ops, + CLK_SET_RATE_PARENT), } }; @@ -779,7 +780,8 @@ static struct ccu_mp outb_clk = { .reg = 0x1f4, .features = CCU_FEATURE_FIXED_PREDIV, .hw.init = CLK_HW_INIT_PARENTS("outb", out_parents, - &ccu_mp_ops, 0), + &ccu_mp_ops, + CLK_SET_RATE_PARENT), } }; diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c index 5c779eec454b6edc734ced3a9fd7d5b1a020f4de..0e36ca3bf3d528d1788c195f84bdeea59ef5897b 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c @@ -618,7 +618,7 @@ static struct clk_hw_onecell_data sun8i_v3s_hw_clks = { [CLK_MBUS] = &mbus_clk.common.hw, [CLK_MIPI_CSI] = &mipi_csi_clk.common.hw, }, - .num = CLK_NUMBER, + .num = CLK_PLL_DDR1 + 1, }; static struct clk_hw_onecell_data sun8i_v3_hw_clks = { @@ -700,7 +700,7 @@ static struct clk_hw_onecell_data sun8i_v3_hw_clks = { [CLK_MBUS] = &mbus_clk.common.hw, [CLK_MIPI_CSI] = &mipi_csi_clk.common.hw, }, - .num = CLK_NUMBER, + .num = CLK_I2S0 + 1, }; static struct ccu_reset_map sun8i_v3s_ccu_resets[] = { diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h index b0160d305a6775a3721a5438b4223e8a77251367..108eeeedcbf7602dfbe55b40624ccae9786b27d0 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h +++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h @@ -51,6 +51,4 @@ #define CLK_PLL_DDR1 74 -#define CLK_NUMBER (CLK_I2S0 + 1) - #endif /* _CCU_SUN8I_H3_H_ */ diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c index e6bd6d1ea0128525f76fb4b07f12e7a31054f133..f6cdce441cf7ac4e45eb9bb26576187c47d546e1 100644 --- a/drivers/clk/tegra/clk.c +++ b/drivers/clk/tegra/clk.c @@ -231,8 +231,10 @@ struct clk ** __init tegra_clk_init(void __iomem *regs, int num, int banks) periph_banks = banks; clks = kcalloc(num, sizeof(struct clk *), GFP_KERNEL); - if (!clks) + if (!clks) { kfree(periph_clk_enb_refcnt); + return NULL; + } clk_num = num; diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c index f65e16c4f3c4b8585ea56cc97633730a6882a01f..8d4c08b034bdde50e77b60bd15cacba7860698b2 100644 --- a/drivers/clk/ti/clk-dra7-atl.c +++ b/drivers/clk/ti/clk-dra7-atl.c @@ -233,7 +233,6 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev) cinfo->iobase = of_iomap(node, 0); cinfo->dev = &pdev->dev; pm_runtime_enable(cinfo->dev); - pm_runtime_irq_safe(cinfo->dev); pm_runtime_get_sync(cinfo->dev); atl_write(cinfo, DRA7_ATL_PCLKMUX_REG(0), DRA7_ATL_PCLKMUX); diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c index 4e54856ce2a5f3c79d36d7d650b5c538128f66d8..c4f15c4068c02b0cc37831543a33a2c87fe9aa51 100644 --- a/drivers/clocksource/timer-riscv.c +++ b/drivers/clocksource/timer-riscv.c @@ -56,7 +56,7 @@ static unsigned long long riscv_clocksource_rdtime(struct clocksource *cs) return get_cycles64(); } -static u64 riscv_sched_clock(void) +static u64 notrace riscv_sched_clock(void) { return get_cycles64(); } diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index f1d170dcf4d310cfa7c936ab1638b93b67f109ca..aba591d57c67468bf323f0a9d70c53a830b0240f 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -121,6 +121,8 @@ static const struct of_device_id blacklist[] __initconst = { { .compatible = "mediatek,mt8176", }, { .compatible = "mediatek,mt8183", }, + { .compatible = "nvidia,tegra20", }, + { .compatible = "nvidia,tegra30", }, { .compatible = "nvidia,tegra124", }, { .compatible = "nvidia,tegra210", }, diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c index 506e3f2bf53aca6c62242c0f38ab3ebe3350ddf8..83c85d3d67e355495695b278b37586869e466f0c 100644 --- a/drivers/cpufreq/vexpress-spc-cpufreq.c +++ b/drivers/cpufreq/vexpress-spc-cpufreq.c @@ -434,7 +434,7 @@ static int ve_spc_cpufreq_init(struct cpufreq_policy *policy) if (cur_cluster < MAX_CLUSTERS) { int cpu; - cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); + dev_pm_opp_get_sharing_cpus(cpu_dev, policy->cpus); for_each_cpu(cpu, policy->cpus) per_cpu(physical_cluster, cpu) = cur_cluster; diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c index de7e706efd460e86dea44c7b21fc5d078d6bf8f4..6deaaf5f05b5765598115132492edc4f654df9da 100644 --- a/drivers/cpuidle/governors/teo.c +++ b/drivers/cpuidle/governors/teo.c @@ -198,7 +198,7 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) * pattern detection. */ cpu_data->intervals[cpu_data->interval_idx++] = measured_ns; - if (cpu_data->interval_idx > INTERVALS) + if (cpu_data->interval_idx >= INTERVALS) cpu_data->interval_idx = 0; } diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index 26754d0570ba9cffcf829b873ec2ecab69c2abe6..b846d73d9a855ceafb74129377c581460760821c 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -40,7 +40,7 @@ struct sec_req { int req_id; /* Status of the SEC request */ - int fake_busy; + atomic_t fake_busy; }; /** @@ -132,8 +132,8 @@ struct sec_debug_file { }; struct sec_dfx { - u64 send_cnt; - u64 recv_cnt; + atomic64_t send_cnt; + atomic64_t recv_cnt; }; struct sec_debug { diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 62b04e19067c3cfb5fb1856ab15885f9e7197237..0a5391fff485c43447498f837fb05f4f6803f4a9 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -120,7 +120,7 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp) return; } - __sync_add_and_fetch(&req->ctx->sec->debug.dfx.recv_cnt, 1); + atomic64_inc(&req->ctx->sec->debug.dfx.recv_cnt); req->ctx->req_op->buf_unmap(req->ctx, req); @@ -135,13 +135,13 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) mutex_lock(&qp_ctx->req_lock); ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe); mutex_unlock(&qp_ctx->req_lock); - __sync_add_and_fetch(&ctx->sec->debug.dfx.send_cnt, 1); + atomic64_inc(&ctx->sec->debug.dfx.send_cnt); if (ret == -EBUSY) return -ENOBUFS; if (!ret) { - if (req->fake_busy) + if (atomic_read(&req->fake_busy)) ret = -EBUSY; else ret = -EINPROGRESS; @@ -641,7 +641,7 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req) if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt) sec_update_iv(req); - if (__sync_bool_compare_and_swap(&req->fake_busy, 1, 0)) + if (atomic_cmpxchg(&req->fake_busy, 1, 0) != 1) sk_req->base.complete(&sk_req->base, -EINPROGRESS); sk_req->base.complete(&sk_req->base, req->err_type); @@ -672,9 +672,9 @@ static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req) } if (ctx->fake_req_limit <= atomic_inc_return(&qp_ctx->pending_reqs)) - req->fake_busy = 1; + atomic_set(&req->fake_busy, 1); else - req->fake_busy = 0; + atomic_set(&req->fake_busy, 0); ret = ctx->req_op->get_res(ctx, req); if (ret) { diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 74f0654028c968c7582ddb133ea30a74e13b2b74..ab742dfbab997562b9a6a9b55a2b1fe700797258 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -608,6 +608,14 @@ static const struct file_operations sec_dbg_fops = { .write = sec_debug_write, }; +static int debugfs_atomic64_t_get(void *data, u64 *val) +{ + *val = atomic64_read((atomic64_t *)data); + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic64_t_ro, debugfs_atomic64_t_get, NULL, + "%lld\n"); + static int sec_core_debug_init(struct sec_dev *sec) { struct hisi_qm *qm = &sec->qm; @@ -628,9 +636,11 @@ static int sec_core_debug_init(struct sec_dev *sec) debugfs_create_regset32("regs", 0444, tmp_d, regset); - debugfs_create_u64("send_cnt", 0444, tmp_d, &dfx->send_cnt); + debugfs_create_file("send_cnt", 0444, tmp_d, &dfx->send_cnt, + &fops_atomic64_t_ro); - debugfs_create_u64("recv_cnt", 0444, tmp_d, &dfx->recv_cnt); + debugfs_create_file("recv_cnt", 0444, tmp_d, &dfx->recv_cnt, + &fops_atomic64_t_ro); return 0; } diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig index defe1d4387105e4a4ff368b57d8a0fbcadb9fd61..35535833b6f78e314eea7dea48f31093b6790845 100644 --- a/drivers/devfreq/Kconfig +++ b/drivers/devfreq/Kconfig @@ -83,7 +83,6 @@ config ARM_EXYNOS_BUS_DEVFREQ select DEVFREQ_GOV_PASSIVE select DEVFREQ_EVENT_EXYNOS_PPMU select PM_DEVFREQ_EVENT - select PM_OPP help This adds the common DEVFREQ driver for Exynos Memory bus. Exynos Memory bus has one more group of memory bus (e.g, MIF and INT block). @@ -98,7 +97,7 @@ config ARM_TEGRA_DEVFREQ ARCH_TEGRA_132_SOC || ARCH_TEGRA_124_SOC || \ ARCH_TEGRA_210_SOC || \ COMPILE_TEST - select PM_OPP + depends on COMMON_CLK help This adds the DEVFREQ driver for the Tegra family of SoCs. It reads ACTMON counters of memory controllers and adjusts the @@ -109,7 +108,6 @@ config ARM_TEGRA20_DEVFREQ depends on (TEGRA_MC && TEGRA20_EMC) || COMPILE_TEST depends on COMMON_CLK select DEVFREQ_GOV_SIMPLE_ONDEMAND - select PM_OPP help This adds the DEVFREQ driver for the Tegra20 family of SoCs. It reads Memory Controller counters and adjusts the operating @@ -121,7 +119,6 @@ config ARM_RK3399_DMC_DEVFREQ select DEVFREQ_EVENT_ROCKCHIP_DFI select DEVFREQ_GOV_SIMPLE_ONDEMAND select PM_DEVFREQ_EVENT - select PM_OPP help This adds the DEVFREQ driver for the RK3399 DMC(Dynamic Memory Controller). It sets the frequency for the memory controller and reads the usage counts diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index fa626acdc9b961919208a316d719ad9d2f25118b..44af435628f805ec856769386625be62d437e1fb 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -999,7 +999,8 @@ static const struct jz4780_dma_soc_data jz4740_dma_soc_data = { static const struct jz4780_dma_soc_data jz4725b_dma_soc_data = { .nb_channels = 6, .transfer_ord_max = 5, - .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC, + .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC | + JZ_SOC_DATA_BREAK_LINKS, }; static const struct jz4780_dma_soc_data jz4770_dma_soc_data = { diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 1a422a8b43cfb29806026b0be2996dc1dee4aaa5..18c011e57592ebc838a30d118b7465d8c8a09a46 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -377,10 +377,11 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags) descs->virt = dma_alloc_coherent(to_dev(ioat_chan), SZ_2M, &descs->hw, flags); - if (!descs->virt && (i > 0)) { + if (!descs->virt) { int idx; for (idx = 0; idx < i; idx++) { + descs = &ioat_chan->descs[idx]; dma_free_coherent(to_dev(ioat_chan), SZ_2M, descs->virt, descs->hw); descs->virt = NULL; diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index adecea51814f0a2cb6a26f8593bbf86e9c931e3d..c5c1aa0dcaeddc9c52a5a7f6004e2c20c58dd515 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -229,9 +229,11 @@ static irqreturn_t k3_dma_int_handler(int irq, void *dev_id) c = p->vchan; if (c && (tc1 & BIT(i))) { spin_lock_irqsave(&c->vc.lock, flags); - vchan_cookie_complete(&p->ds_run->vd); - p->ds_done = p->ds_run; - p->ds_run = NULL; + if (p->ds_run != NULL) { + vchan_cookie_complete(&p->ds_run->vd); + p->ds_done = p->ds_run; + p->ds_run = NULL; + } spin_unlock_irqrestore(&c->vc.lock, flags); } if (c && (tc2 & BIT(i))) { @@ -271,6 +273,10 @@ static int k3_dma_start_txd(struct k3_dma_chan *c) if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d)) return -EAGAIN; + /* Avoid losing track of ds_run if a transaction is in flight */ + if (c->phy->ds_run) + return -EAGAIN; + if (vd) { struct k3_dma_desc_sw *ds = container_of(vd, struct k3_dma_desc_sw, vd); diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c index ec4adf4260a098488fff4471e82c2f1a89b90c54..256fc662c500f389fc898f2e5bc4da7a708695ae 100644 --- a/drivers/dma/virt-dma.c +++ b/drivers/dma/virt-dma.c @@ -104,9 +104,8 @@ static void vchan_complete(unsigned long arg) dmaengine_desc_get_callback(&vd->tx, &cb); list_del(&vd->node); - vchan_vdesc_fini(vd); - dmaengine_desc_callback_invoke(&cb, &vd->tx_result); + vchan_vdesc_fini(vd); } } diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 417dad6355268934383074c1759154535d114a33..5c8272329a65d3d4b3fe68a7f37d2f26ecde2ca5 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -462,7 +462,7 @@ config EDAC_ALTERA_SDMMC config EDAC_SIFIVE bool "Sifive platform EDAC driver" - depends on EDAC=y && RISCV + depends on EDAC=y && SIFIVE_L2 help Support for error detection and correction on the SiFive SoCs. diff --git a/drivers/edac/sifive_edac.c b/drivers/edac/sifive_edac.c index 413cdb4a591db1758d799277646bcce6601c31f1..c0cc72a3b2be93e3dd3fad4f13b7cf532e37c7ac 100644 --- a/drivers/edac/sifive_edac.c +++ b/drivers/edac/sifive_edac.c @@ -10,7 +10,7 @@ #include #include #include "edac_module.h" -#include +#include #define DRVNAME "sifive_edac" diff --git a/drivers/firmware/broadcom/tee_bnxt_fw.c b/drivers/firmware/broadcom/tee_bnxt_fw.c index 5b7ef89eb70143875ad4dd6a1d07da0bfc67c973..ed10da5313e8652b3ff72bc16283a9315eb9370e 100644 --- a/drivers/firmware/broadcom/tee_bnxt_fw.c +++ b/drivers/firmware/broadcom/tee_bnxt_fw.c @@ -215,7 +215,6 @@ static int tee_bnxt_fw_probe(struct device *dev) fw_shm_pool = tee_shm_alloc(pvt_data.ctx, MAX_SHM_MEM_SZ, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF); if (IS_ERR(fw_shm_pool)) { - tee_client_close_context(pvt_data.ctx); dev_err(pvt_data.dev, "tee_shm_alloc failed\n"); err = PTR_ERR(fw_shm_pool); goto out_sess; diff --git a/drivers/firmware/efi/earlycon.c b/drivers/firmware/efi/earlycon.c index c9a0efca17b01b9d3959cd055f019683eeceae5d..5d4f84781aa036bf83d7b842442d24e438111649 100644 --- a/drivers/firmware/efi/earlycon.c +++ b/drivers/firmware/efi/earlycon.c @@ -13,18 +13,58 @@ #include +static const struct console *earlycon_console __initdata; static const struct font_desc *font; static u32 efi_x, efi_y; static u64 fb_base; -static pgprot_t fb_prot; +static bool fb_wb; +static void *efi_fb; + +/* + * EFI earlycon needs to use early_memremap() to map the framebuffer. + * But early_memremap() is not usable for 'earlycon=efifb keep_bootcon', + * memremap() should be used instead. memremap() will be available after + * paging_init() which is earlier than initcall callbacks. Thus adding this + * early initcall function early_efi_map_fb() to map the whole EFI framebuffer. + */ +static int __init efi_earlycon_remap_fb(void) +{ + /* bail if there is no bootconsole or it has been disabled already */ + if (!earlycon_console || !(earlycon_console->flags & CON_ENABLED)) + return 0; + + efi_fb = memremap(fb_base, screen_info.lfb_size, + fb_wb ? MEMREMAP_WB : MEMREMAP_WC); + + return efi_fb ? 0 : -ENOMEM; +} +early_initcall(efi_earlycon_remap_fb); + +static int __init efi_earlycon_unmap_fb(void) +{ + /* unmap the bootconsole fb unless keep_bootcon has left it enabled */ + if (efi_fb && !(earlycon_console->flags & CON_ENABLED)) + memunmap(efi_fb); + return 0; +} +late_initcall(efi_earlycon_unmap_fb); static __ref void *efi_earlycon_map(unsigned long start, unsigned long len) { + pgprot_t fb_prot; + + if (efi_fb) + return efi_fb + start; + + fb_prot = fb_wb ? PAGE_KERNEL : pgprot_writecombine(PAGE_KERNEL); return early_memremap_prot(fb_base + start, len, pgprot_val(fb_prot)); } static __ref void efi_earlycon_unmap(void *addr, unsigned long len) { + if (efi_fb) + return; + early_memunmap(addr, len); } @@ -176,10 +216,7 @@ static int __init efi_earlycon_setup(struct earlycon_device *device, if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE) fb_base |= (u64)screen_info.ext_lfb_base << 32; - if (opt && !strcmp(opt, "ram")) - fb_prot = PAGE_KERNEL; - else - fb_prot = pgprot_writecombine(PAGE_KERNEL); + fb_wb = opt && !strcmp(opt, "ram"); si = &screen_info; xres = si->lfb_width; @@ -201,6 +238,7 @@ static int __init efi_earlycon_setup(struct earlycon_device *device, efi_earlycon_scroll_up(); device->con->write = efi_earlycon_write; + earlycon_console = device->con; return 0; } EARLYCON_DECLARE(efifb, efi_earlycon_setup); diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 407816da9fcb46509feb3914cce58d4cda41e5ce..2b02cb165f16481cc7542610c0aa0dc125f1a21b 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -979,6 +979,24 @@ static int __init efi_memreserve_map_root(void) return 0; } +static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size) +{ + struct resource *res, *parent; + + res = kzalloc(sizeof(struct resource), GFP_ATOMIC); + if (!res) + return -ENOMEM; + + res->name = "reserved"; + res->flags = IORESOURCE_MEM; + res->start = addr; + res->end = addr + size - 1; + + /* we expect a conflict with a 'System RAM' region */ + parent = request_resource_conflict(&iomem_resource, res); + return parent ? request_resource(parent, res) : 0; +} + int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) { struct linux_efi_memreserve *rsv; @@ -1003,7 +1021,7 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) rsv->entry[index].size = size; memunmap(rsv); - return 0; + return efi_mem_reserve_iomem(addr, size); } memunmap(rsv); } @@ -1013,6 +1031,12 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) if (!rsv) return -ENOMEM; + rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K); + if (rc) { + free_page((unsigned long)rsv); + return rc; + } + /* * The memremap() call above assumes that a linux_efi_memreserve entry * never crosses a page boundary, so let's ensure that this remains true @@ -1029,7 +1053,7 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) efi_memreserve_root->next = __pa(rsv); spin_unlock(&efi_mem_reserve_persistent_lock); - return 0; + return efi_mem_reserve_iomem(addr, size); } static int __init efi_memreserve_root_init(void) diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c index 0101ca4c13b1a8fc3c81c59be3084ef9b7d80fbf..b7bf1e993b8bb4d9c32596bb70835cbff98f819b 100644 --- a/drivers/firmware/efi/libstub/gop.c +++ b/drivers/firmware/efi/libstub/gop.c @@ -83,30 +83,6 @@ setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line, } } -static efi_status_t -__gop_query32(efi_system_table_t *sys_table_arg, - struct efi_graphics_output_protocol_32 *gop32, - struct efi_graphics_output_mode_info **info, - unsigned long *size, u64 *fb_base) -{ - struct efi_graphics_output_protocol_mode_32 *mode; - efi_graphics_output_protocol_query_mode query_mode; - efi_status_t status; - unsigned long m; - - m = gop32->mode; - mode = (struct efi_graphics_output_protocol_mode_32 *)m; - query_mode = (void *)(unsigned long)gop32->query_mode; - - status = __efi_call_early(query_mode, (void *)gop32, mode->mode, size, - info); - if (status != EFI_SUCCESS) - return status; - - *fb_base = mode->frame_buffer_base; - return status; -} - static efi_status_t setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si, efi_guid_t *proto, unsigned long size, void **gop_handle) @@ -119,7 +95,7 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si, u64 fb_base; struct efi_pixel_bitmask pixel_info; int pixel_format; - efi_status_t status = EFI_NOT_FOUND; + efi_status_t status; u32 *handles = (u32 *)(unsigned long)gop_handle; int i; @@ -128,6 +104,7 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si, nr_gops = size / sizeof(u32); for (i = 0; i < nr_gops; i++) { + struct efi_graphics_output_protocol_mode_32 *mode; struct efi_graphics_output_mode_info *info = NULL; efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID; bool conout_found = false; @@ -145,9 +122,11 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si, if (status == EFI_SUCCESS) conout_found = true; - status = __gop_query32(sys_table_arg, gop32, &info, &size, - ¤t_fb_base); - if (status == EFI_SUCCESS && (!first_gop || conout_found) && + mode = (void *)(unsigned long)gop32->mode; + info = (void *)(unsigned long)mode->info; + current_fb_base = mode->frame_buffer_base; + + if ((!first_gop || conout_found) && info->pixel_format != PIXEL_BLT_ONLY) { /* * Systems that use the UEFI Console Splitter may @@ -175,7 +154,7 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si, /* Did we find any GOPs? */ if (!first_gop) - goto out; + return EFI_NOT_FOUND; /* EFI framebuffer */ si->orig_video_isVGA = VIDEO_TYPE_EFI; @@ -197,32 +176,8 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si, si->lfb_size = si->lfb_linelength * si->lfb_height; si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS; -out: - return status; -} - -static efi_status_t -__gop_query64(efi_system_table_t *sys_table_arg, - struct efi_graphics_output_protocol_64 *gop64, - struct efi_graphics_output_mode_info **info, - unsigned long *size, u64 *fb_base) -{ - struct efi_graphics_output_protocol_mode_64 *mode; - efi_graphics_output_protocol_query_mode query_mode; - efi_status_t status; - unsigned long m; - - m = gop64->mode; - mode = (struct efi_graphics_output_protocol_mode_64 *)m; - query_mode = (void *)(unsigned long)gop64->query_mode; - - status = __efi_call_early(query_mode, (void *)gop64, mode->mode, size, - info); - if (status != EFI_SUCCESS) - return status; - *fb_base = mode->frame_buffer_base; - return status; + return EFI_SUCCESS; } static efi_status_t @@ -237,7 +192,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si, u64 fb_base; struct efi_pixel_bitmask pixel_info; int pixel_format; - efi_status_t status = EFI_NOT_FOUND; + efi_status_t status; u64 *handles = (u64 *)(unsigned long)gop_handle; int i; @@ -246,6 +201,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si, nr_gops = size / sizeof(u64); for (i = 0; i < nr_gops; i++) { + struct efi_graphics_output_protocol_mode_64 *mode; struct efi_graphics_output_mode_info *info = NULL; efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID; bool conout_found = false; @@ -263,9 +219,11 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si, if (status == EFI_SUCCESS) conout_found = true; - status = __gop_query64(sys_table_arg, gop64, &info, &size, - ¤t_fb_base); - if (status == EFI_SUCCESS && (!first_gop || conout_found) && + mode = (void *)(unsigned long)gop64->mode; + info = (void *)(unsigned long)mode->info; + current_fb_base = mode->frame_buffer_base; + + if ((!first_gop || conout_found) && info->pixel_format != PIXEL_BLT_ONLY) { /* * Systems that use the UEFI Console Splitter may @@ -293,7 +251,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si, /* Did we find any GOPs? */ if (!first_gop) - goto out; + return EFI_NOT_FOUND; /* EFI framebuffer */ si->orig_video_isVGA = VIDEO_TYPE_EFI; @@ -315,8 +273,8 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si, si->lfb_size = si->lfb_linelength * si->lfb_height; si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS; -out: - return status; + + return EFI_SUCCESS; } /* diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c index 35edd7cfb6a13d2002101d3e2b6874180d52c3da..97378cf96a2ee29198c9e8d8f9c70d46f0a27c6c 100644 --- a/drivers/firmware/efi/libstub/random.c +++ b/drivers/firmware/efi/libstub/random.c @@ -33,7 +33,7 @@ efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table_arg, { efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID; efi_status_t status; - struct efi_rng_protocol *rng; + struct efi_rng_protocol *rng = NULL; status = efi_call_early(locate_protocol, &rng_proto, NULL, (void **)&rng); @@ -162,8 +162,8 @@ efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg) efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID; efi_guid_t rng_algo_raw = EFI_RNG_ALGORITHM_RAW; efi_guid_t rng_table_guid = LINUX_EFI_RANDOM_SEED_TABLE_GUID; - struct efi_rng_protocol *rng; - struct linux_efi_random_seed *seed; + struct efi_rng_protocol *rng = NULL; + struct linux_efi_random_seed *seed = NULL; efi_status_t status; status = efi_call_early(locate_protocol, &rng_proto, NULL, diff --git a/drivers/firmware/efi/rci2-table.c b/drivers/firmware/efi/rci2-table.c index 76b0c354a0277ecd1cb3531fe6ca60ef3eed3eb0..de1a9a1f9f146743e1aff99fc50b36aa49a6e162 100644 --- a/drivers/firmware/efi/rci2-table.c +++ b/drivers/firmware/efi/rci2-table.c @@ -81,6 +81,9 @@ static int __init efi_rci2_sysfs_init(void) struct kobject *tables_kobj; int ret = -ENOMEM; + if (rci2_table_phys == EFI_INVALID_TABLE_ADDR) + return 0; + rci2_base = memremap(rci2_table_phys, sizeof(struct rci2_table_global_hdr), MEMREMAP_WB); diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 8adffd42f8cb0559031837899b25c7f6ba23c313..4b6d2ef15c39dde4fab406bfe68167cd8f71ec0b 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -553,8 +553,8 @@ config GPIO_TEGRA config GPIO_TEGRA186 tristate "NVIDIA Tegra186 GPIO support" - default ARCH_TEGRA_186_SOC - depends on ARCH_TEGRA_186_SOC || COMPILE_TEST + default ARCH_TEGRA_186_SOC || ARCH_TEGRA_194_SOC + depends on ARCH_TEGRA_186_SOC || ARCH_TEGRA_194_SOC || COMPILE_TEST depends on OF_GPIO select GPIOLIB_IRQCHIP select IRQ_DOMAIN_HIERARCHY @@ -573,7 +573,6 @@ config GPIO_THUNDERX tristate "Cavium ThunderX/OCTEON-TX GPIO" depends on ARCH_THUNDER || (64BIT && COMPILE_TEST) depends on PCI_MSI - select GPIOLIB_IRQCHIP select IRQ_DOMAIN_HIERARCHY select IRQ_FASTEOI_HIERARCHY_HANDLERS help @@ -1148,6 +1147,7 @@ config GPIO_MADERA config GPIO_MAX77620 tristate "GPIO support for PMIC MAX77620 and MAX20024" depends on MFD_MAX77620 + select GPIOLIB_IRQCHIP help GPIO driver for MAX77620 and MAX20024 PMIC from Maxim Semiconductor. MAX77620 PMIC has 8 pins that can be configured as GPIOs. The diff --git a/drivers/gpio/gpio-aspeed-sgpio.c b/drivers/gpio/gpio-aspeed-sgpio.c index 7e99860ca447ed5f5a345d5cb070955d5cbb3c60..8319812593e31295c40638e3667cfa5a1fda2234 100644 --- a/drivers/gpio/gpio-aspeed-sgpio.c +++ b/drivers/gpio/gpio-aspeed-sgpio.c @@ -107,7 +107,7 @@ static void __iomem *bank_reg(struct aspeed_sgpio *gpio, return gpio->base + bank->irq_regs + GPIO_IRQ_STATUS; default: /* acturally if code runs to here, it's an error case */ - BUG_ON(1); + BUG(); } } diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c index 56d647a30e3eafee0178b6d416952e697635a745..94b8d3ae27bc306f13d981f0027f43a75a403e81 100644 --- a/drivers/gpio/gpio-mockup.c +++ b/drivers/gpio/gpio-mockup.c @@ -156,7 +156,7 @@ static int gpio_mockup_apply_pull(struct gpio_mockup_chip *chip, mutex_lock(&chip->lock); if (test_bit(FLAG_REQUESTED, &desc->flags) && - !test_bit(FLAG_IS_OUT, &desc->flags)) { + !test_bit(FLAG_IS_OUT, &desc->flags)) { curr = __gpio_mockup_get(chip, offset); if (curr == value) goto out; @@ -165,7 +165,7 @@ static int gpio_mockup_apply_pull(struct gpio_mockup_chip *chip, irq_type = irq_get_trigger_type(irq); if ((value == 1 && (irq_type & IRQ_TYPE_EDGE_RISING)) || - (value == 0 && (irq_type & IRQ_TYPE_EDGE_FALLING))) + (value == 0 && (irq_type & IRQ_TYPE_EDGE_FALLING))) irq_sim_fire(sim, offset); } @@ -226,7 +226,7 @@ static int gpio_mockup_get_direction(struct gpio_chip *gc, unsigned int offset) int direction; mutex_lock(&chip->lock); - direction = !chip->lines[offset].dir; + direction = chip->lines[offset].dir; mutex_unlock(&chip->lock); return direction; @@ -395,7 +395,7 @@ static int gpio_mockup_probe(struct platform_device *pdev) struct gpio_chip *gc; struct device *dev; const char *name; - int rv, base; + int rv, base, i; u16 ngpio; dev = &pdev->dev; @@ -447,6 +447,9 @@ static int gpio_mockup_probe(struct platform_device *pdev) if (!chip->lines) return -ENOMEM; + for (i = 0; i < gc->ngpio; i++) + chip->lines[i].dir = GPIO_LINE_DIRECTION_IN; + if (device_property_read_bool(dev, "named-gpio-lines")) { rv = gpio_mockup_name_lines(dev, chip); if (rv) diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c index f1e164cecff80375ddcfa89017d15fe2af277886..5ae30de3490ac84118da2da0b169f8f24b9da4b9 100644 --- a/drivers/gpio/gpio-mpc8xxx.c +++ b/drivers/gpio/gpio-mpc8xxx.c @@ -346,6 +346,7 @@ static int mpc8xxx_probe(struct platform_device *pdev) return -ENOMEM; gc = &mpc8xxx_gc->gc; + gc->parent = &pdev->dev; if (of_property_read_bool(np, "little-endian")) { ret = bgpio_init(gc, &pdev->dev, 4, diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index 6652bee01966dc1e2c883c0909820b374599f293..9853547e72766678e35f2f4fbab9e1fb908675bb 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -568,16 +568,18 @@ static void pca953x_irq_mask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct pca953x_chip *chip = gpiochip_get_data(gc); + irq_hw_number_t hwirq = irqd_to_hwirq(d); - chip->irq_mask[d->hwirq / BANK_SZ] &= ~BIT(d->hwirq % BANK_SZ); + clear_bit(hwirq, chip->irq_mask); } static void pca953x_irq_unmask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct pca953x_chip *chip = gpiochip_get_data(gc); + irq_hw_number_t hwirq = irqd_to_hwirq(d); - chip->irq_mask[d->hwirq / BANK_SZ] |= BIT(d->hwirq % BANK_SZ); + set_bit(hwirq, chip->irq_mask); } static int pca953x_irq_set_wake(struct irq_data *d, unsigned int on) @@ -635,8 +637,7 @@ static int pca953x_irq_set_type(struct irq_data *d, unsigned int type) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct pca953x_chip *chip = gpiochip_get_data(gc); - int bank_nb = d->hwirq / BANK_SZ; - u8 mask = BIT(d->hwirq % BANK_SZ); + irq_hw_number_t hwirq = irqd_to_hwirq(d); if (!(type & IRQ_TYPE_EDGE_BOTH)) { dev_err(&chip->client->dev, "irq %d: unsupported type %d\n", @@ -644,15 +645,8 @@ static int pca953x_irq_set_type(struct irq_data *d, unsigned int type) return -EINVAL; } - if (type & IRQ_TYPE_EDGE_FALLING) - chip->irq_trig_fall[bank_nb] |= mask; - else - chip->irq_trig_fall[bank_nb] &= ~mask; - - if (type & IRQ_TYPE_EDGE_RISING) - chip->irq_trig_raise[bank_nb] |= mask; - else - chip->irq_trig_raise[bank_nb] &= ~mask; + assign_bit(hwirq, chip->irq_trig_fall, type & IRQ_TYPE_EDGE_FALLING); + assign_bit(hwirq, chip->irq_trig_raise, type & IRQ_TYPE_EDGE_RISING); return 0; } @@ -661,10 +655,10 @@ static void pca953x_irq_shutdown(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct pca953x_chip *chip = gpiochip_get_data(gc); - u8 mask = BIT(d->hwirq % BANK_SZ); + irq_hw_number_t hwirq = irqd_to_hwirq(d); - chip->irq_trig_raise[d->hwirq / BANK_SZ] &= ~mask; - chip->irq_trig_fall[d->hwirq / BANK_SZ] &= ~mask; + clear_bit(hwirq, chip->irq_trig_raise); + clear_bit(hwirq, chip->irq_trig_fall); } static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pending) diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c index d08d86a22b1f531854b85d571870a6b99b4ab359..46277047904571d2bd12f34d7a66b79bb349db4c 100644 --- a/drivers/gpio/gpio-thunderx.c +++ b/drivers/gpio/gpio-thunderx.c @@ -53,6 +53,7 @@ struct thunderx_line { struct thunderx_gpio { struct gpio_chip chip; u8 __iomem *register_base; + struct irq_domain *irqd; struct msix_entry *msix_entries; /* per line MSI-X */ struct thunderx_line *line_entries; /* per line irq info */ raw_spinlock_t lock; @@ -285,60 +286,54 @@ static void thunderx_gpio_set_multiple(struct gpio_chip *chip, } } -static void thunderx_gpio_irq_ack(struct irq_data *d) +static void thunderx_gpio_irq_ack(struct irq_data *data) { - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - struct thunderx_gpio *txgpio = gpiochip_get_data(gc); + struct thunderx_line *txline = irq_data_get_irq_chip_data(data); writeq(GPIO_INTR_INTR, - txgpio->register_base + intr_reg(irqd_to_hwirq(d))); + txline->txgpio->register_base + intr_reg(txline->line)); } -static void thunderx_gpio_irq_mask(struct irq_data *d) +static void thunderx_gpio_irq_mask(struct irq_data *data) { - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - struct thunderx_gpio *txgpio = gpiochip_get_data(gc); + struct thunderx_line *txline = irq_data_get_irq_chip_data(data); writeq(GPIO_INTR_ENA_W1C, - txgpio->register_base + intr_reg(irqd_to_hwirq(d))); + txline->txgpio->register_base + intr_reg(txline->line)); } -static void thunderx_gpio_irq_mask_ack(struct irq_data *d) +static void thunderx_gpio_irq_mask_ack(struct irq_data *data) { - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - struct thunderx_gpio *txgpio = gpiochip_get_data(gc); + struct thunderx_line *txline = irq_data_get_irq_chip_data(data); writeq(GPIO_INTR_ENA_W1C | GPIO_INTR_INTR, - txgpio->register_base + intr_reg(irqd_to_hwirq(d))); + txline->txgpio->register_base + intr_reg(txline->line)); } -static void thunderx_gpio_irq_unmask(struct irq_data *d) +static void thunderx_gpio_irq_unmask(struct irq_data *data) { - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - struct thunderx_gpio *txgpio = gpiochip_get_data(gc); + struct thunderx_line *txline = irq_data_get_irq_chip_data(data); writeq(GPIO_INTR_ENA_W1S, - txgpio->register_base + intr_reg(irqd_to_hwirq(d))); + txline->txgpio->register_base + intr_reg(txline->line)); } -static int thunderx_gpio_irq_set_type(struct irq_data *d, +static int thunderx_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type) { - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - struct thunderx_gpio *txgpio = gpiochip_get_data(gc); - struct thunderx_line *txline = - &txgpio->line_entries[irqd_to_hwirq(d)]; + struct thunderx_line *txline = irq_data_get_irq_chip_data(data); + struct thunderx_gpio *txgpio = txline->txgpio; u64 bit_cfg; - irqd_set_trigger_type(d, flow_type); + irqd_set_trigger_type(data, flow_type); bit_cfg = txline->fil_bits | GPIO_BIT_CFG_INT_EN; if (flow_type & IRQ_TYPE_EDGE_BOTH) { - irq_set_handler_locked(d, handle_fasteoi_ack_irq); + irq_set_handler_locked(data, handle_fasteoi_ack_irq); bit_cfg |= GPIO_BIT_CFG_INT_TYPE; } else { - irq_set_handler_locked(d, handle_fasteoi_mask_irq); + irq_set_handler_locked(data, handle_fasteoi_mask_irq); } raw_spin_lock(&txgpio->lock); @@ -367,6 +362,33 @@ static void thunderx_gpio_irq_disable(struct irq_data *data) irq_chip_disable_parent(data); } +static int thunderx_gpio_irq_request_resources(struct irq_data *data) +{ + struct thunderx_line *txline = irq_data_get_irq_chip_data(data); + struct thunderx_gpio *txgpio = txline->txgpio; + int r; + + r = gpiochip_lock_as_irq(&txgpio->chip, txline->line); + if (r) + return r; + + r = irq_chip_request_resources_parent(data); + if (r) + gpiochip_unlock_as_irq(&txgpio->chip, txline->line); + + return r; +} + +static void thunderx_gpio_irq_release_resources(struct irq_data *data) +{ + struct thunderx_line *txline = irq_data_get_irq_chip_data(data); + struct thunderx_gpio *txgpio = txline->txgpio; + + irq_chip_release_resources_parent(data); + + gpiochip_unlock_as_irq(&txgpio->chip, txline->line); +} + /* * Interrupts are chained from underlying MSI-X vectors. We have * these irq_chip functions to be able to handle level triggering @@ -383,24 +405,50 @@ static struct irq_chip thunderx_gpio_irq_chip = { .irq_unmask = thunderx_gpio_irq_unmask, .irq_eoi = irq_chip_eoi_parent, .irq_set_affinity = irq_chip_set_affinity_parent, + .irq_request_resources = thunderx_gpio_irq_request_resources, + .irq_release_resources = thunderx_gpio_irq_release_resources, .irq_set_type = thunderx_gpio_irq_set_type, .flags = IRQCHIP_SET_TYPE_MASKED }; -static int thunderx_gpio_child_to_parent_hwirq(struct gpio_chip *gc, - unsigned int child, - unsigned int child_type, - unsigned int *parent, - unsigned int *parent_type) +static int thunderx_gpio_irq_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + irq_hw_number_t *hwirq, + unsigned int *type) { - struct thunderx_gpio *txgpio = gpiochip_get_data(gc); - - *parent = txgpio->base_msi + (2 * child); - *parent_type = IRQ_TYPE_LEVEL_HIGH; + struct thunderx_gpio *txgpio = d->host_data; + + if (WARN_ON(fwspec->param_count < 2)) + return -EINVAL; + if (fwspec->param[0] >= txgpio->chip.ngpio) + return -EINVAL; + *hwirq = fwspec->param[0]; + *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; return 0; } +static int thunderx_gpio_irq_alloc(struct irq_domain *d, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + struct thunderx_line *txline = arg; + + return irq_domain_set_hwirq_and_chip(d, virq, txline->line, + &thunderx_gpio_irq_chip, txline); +} + +static const struct irq_domain_ops thunderx_gpio_irqd_ops = { + .alloc = thunderx_gpio_irq_alloc, + .translate = thunderx_gpio_irq_translate +}; + +static int thunderx_gpio_to_irq(struct gpio_chip *chip, unsigned int offset) +{ + struct thunderx_gpio *txgpio = gpiochip_get_data(chip); + + return irq_find_mapping(txgpio->irqd, offset); +} + static int thunderx_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id) { @@ -408,7 +456,6 @@ static int thunderx_gpio_probe(struct pci_dev *pdev, struct device *dev = &pdev->dev; struct thunderx_gpio *txgpio; struct gpio_chip *chip; - struct gpio_irq_chip *girq; int ngpio, i; int err = 0; @@ -453,8 +500,8 @@ static int thunderx_gpio_probe(struct pci_dev *pdev, } txgpio->msix_entries = devm_kcalloc(dev, - ngpio, sizeof(struct msix_entry), - GFP_KERNEL); + ngpio, sizeof(struct msix_entry), + GFP_KERNEL); if (!txgpio->msix_entries) { err = -ENOMEM; goto out; @@ -495,6 +542,27 @@ static int thunderx_gpio_probe(struct pci_dev *pdev, if (err < 0) goto out; + /* + * Push GPIO specific irqdomain on hierarchy created as a side + * effect of the pci_enable_msix() + */ + txgpio->irqd = irq_domain_create_hierarchy(irq_get_irq_data(txgpio->msix_entries[0].vector)->domain, + 0, 0, of_node_to_fwnode(dev->of_node), + &thunderx_gpio_irqd_ops, txgpio); + if (!txgpio->irqd) { + err = -ENOMEM; + goto out; + } + + /* Push on irq_data and the domain for each line. */ + for (i = 0; i < ngpio; i++) { + err = irq_domain_push_irq(txgpio->irqd, + txgpio->msix_entries[i].vector, + &txgpio->line_entries[i]); + if (err < 0) + dev_err(dev, "irq_domain_push_irq: %d\n", err); + } + chip->label = KBUILD_MODNAME; chip->parent = dev; chip->owner = THIS_MODULE; @@ -509,28 +577,11 @@ static int thunderx_gpio_probe(struct pci_dev *pdev, chip->set = thunderx_gpio_set; chip->set_multiple = thunderx_gpio_set_multiple; chip->set_config = thunderx_gpio_set_config; - girq = &chip->irq; - girq->chip = &thunderx_gpio_irq_chip; - girq->fwnode = of_node_to_fwnode(dev->of_node); - girq->parent_domain = - irq_get_irq_data(txgpio->msix_entries[0].vector)->domain; - girq->child_to_parent_hwirq = thunderx_gpio_child_to_parent_hwirq; - girq->handler = handle_bad_irq; - girq->default_type = IRQ_TYPE_NONE; - + chip->to_irq = thunderx_gpio_to_irq; err = devm_gpiochip_add_data(dev, chip, txgpio); if (err) goto out; - /* Push on irq_data and the domain for each line. */ - for (i = 0; i < ngpio; i++) { - err = irq_domain_push_irq(chip->irq.domain, - txgpio->msix_entries[i].vector, - chip); - if (err < 0) - dev_err(dev, "irq_domain_push_irq: %d\n", err); - } - dev_info(dev, "ThunderX GPIO: %d lines with base %d.\n", ngpio, chip->base); return 0; @@ -545,10 +596,10 @@ static void thunderx_gpio_remove(struct pci_dev *pdev) struct thunderx_gpio *txgpio = pci_get_drvdata(pdev); for (i = 0; i < txgpio->chip.ngpio; i++) - irq_domain_pop_irq(txgpio->chip.irq.domain, + irq_domain_pop_irq(txgpio->irqd, txgpio->msix_entries[i].vector); - irq_domain_remove(txgpio->chip.irq.domain); + irq_domain_remove(txgpio->irqd); pci_set_drvdata(pdev, NULL); } diff --git a/drivers/gpio/gpio-xgs-iproc.c b/drivers/gpio/gpio-xgs-iproc.c index 773e5c24309e49b3f989a453df40373311afc15b..b21c2e436b61023ca38a83a1e5b690359f2191b7 100644 --- a/drivers/gpio/gpio-xgs-iproc.c +++ b/drivers/gpio/gpio-xgs-iproc.c @@ -280,7 +280,7 @@ static int iproc_gpio_probe(struct platform_device *pdev) return 0; } -static int __exit iproc_gpio_remove(struct platform_device *pdev) +static int iproc_gpio_remove(struct platform_device *pdev) { struct iproc_gpio_chip *chip; diff --git a/drivers/gpio/gpio-xtensa.c b/drivers/gpio/gpio-xtensa.c index 08d7c3b3203869b5dd75cc42a4355746ea19ec48..c8af34a6368f4f69f9960fff7f8c27a2c8ddea8b 100644 --- a/drivers/gpio/gpio-xtensa.c +++ b/drivers/gpio/gpio-xtensa.c @@ -44,15 +44,14 @@ static inline unsigned long enable_cp(unsigned long *cpenable) unsigned long flags; local_irq_save(flags); - RSR_CPENABLE(*cpenable); - WSR_CPENABLE(*cpenable | BIT(XCHAL_CP_ID_XTIOP)); - + *cpenable = xtensa_get_sr(cpenable); + xtensa_set_sr(*cpenable | BIT(XCHAL_CP_ID_XTIOP), cpenable); return flags; } static inline void disable_cp(unsigned long flags, unsigned long cpenable) { - WSR_CPENABLE(cpenable); + xtensa_set_sr(cpenable, cpenable); local_irq_restore(flags); } diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c index 4c3f6370eab4c104a8975356ea0f652e3f92372e..05ba16fffdad0e7e285a6cf520f87ff7630a917e 100644 --- a/drivers/gpio/gpio-zynq.c +++ b/drivers/gpio/gpio-zynq.c @@ -684,6 +684,8 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio) unsigned int bank_num; for (bank_num = 0; bank_num < gpio->p_data->max_bank; bank_num++) { + writel_relaxed(ZYNQ_GPIO_IXR_DISABLE_ALL, gpio->base_addr + + ZYNQ_GPIO_INTDIS_OFFSET(bank_num)); writel_relaxed(gpio->context.datalsw[bank_num], gpio->base_addr + ZYNQ_GPIO_DATA_LSW_OFFSET(bank_num)); @@ -693,9 +695,6 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio) writel_relaxed(gpio->context.dirm[bank_num], gpio->base_addr + ZYNQ_GPIO_DIRM_OFFSET(bank_num)); - writel_relaxed(gpio->context.int_en[bank_num], - gpio->base_addr + - ZYNQ_GPIO_INTEN_OFFSET(bank_num)); writel_relaxed(gpio->context.int_type[bank_num], gpio->base_addr + ZYNQ_GPIO_INTTYPE_OFFSET(bank_num)); @@ -705,6 +704,9 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio) writel_relaxed(gpio->context.int_any[bank_num], gpio->base_addr + ZYNQ_GPIO_INTANY_OFFSET(bank_num)); + writel_relaxed(~(gpio->context.int_en[bank_num]), + gpio->base_addr + + ZYNQ_GPIO_INTEN_OFFSET(bank_num)); } } diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index d30e57dc755cf6114d984b65cf56175e78757bb9..31fee5e918b7d14fafde0477231316fee14dbb53 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -21,11 +21,19 @@ #include "gpiolib.h" #include "gpiolib-acpi.h" +#define QUIRK_NO_EDGE_EVENTS_ON_BOOT 0x01l +#define QUIRK_NO_WAKEUP 0x02l + static int run_edge_events_on_boot = -1; module_param(run_edge_events_on_boot, int, 0444); MODULE_PARM_DESC(run_edge_events_on_boot, "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto"); +static int honor_wakeup = -1; +module_param(honor_wakeup, int, 0444); +MODULE_PARM_DESC(honor_wakeup, + "Honor the ACPI wake-capable flag: 0=no, 1=yes, -1=auto"); + /** * struct acpi_gpio_event - ACPI GPIO event handler data * @@ -281,7 +289,7 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares, event->handle = evt_handle; event->handler = handler; event->irq = irq; - event->irq_is_wake = agpio->wake_capable == ACPI_WAKE_CAPABLE; + event->irq_is_wake = honor_wakeup && agpio->wake_capable == ACPI_WAKE_CAPABLE; event->pin = pin; event->desc = desc; @@ -1309,7 +1317,7 @@ static int acpi_gpio_handle_deferred_request_irqs(void) /* We must use _sync so that this runs after the first deferred_probe run */ late_initcall_sync(acpi_gpio_handle_deferred_request_irqs); -static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = { +static const struct dmi_system_id gpiolib_acpi_quirks[] = { { /* * The Minix Neo Z83-4 has a micro-USB-B id-pin handler for @@ -1319,7 +1327,8 @@ static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "MINIX"), DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"), - } + }, + .driver_data = (void *)QUIRK_NO_EDGE_EVENTS_ON_BOOT, }, { /* @@ -1331,20 +1340,52 @@ static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Wortmann_AG"), DMI_MATCH(DMI_PRODUCT_NAME, "TERRA_PAD_1061"), - } + }, + .driver_data = (void *)QUIRK_NO_EDGE_EVENTS_ON_BOOT, + }, + { + /* + * Various HP X2 10 Cherry Trail models use an external + * embedded-controller connected via I2C + an ACPI GPIO + * event handler. The embedded controller generates various + * spurious wakeup events when suspended. So disable wakeup + * for its handler (it uses the only ACPI GPIO event handler). + * This breaks wakeup when opening the lid, the user needs + * to press the power-button to wakeup the system. The + * alternative is suspend simply not working, which is worse. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP x2 Detachable 10-p0XX"), + }, + .driver_data = (void *)QUIRK_NO_WAKEUP, }, {} /* Terminating entry */ }; static int acpi_gpio_setup_params(void) { + const struct dmi_system_id *id; + long quirks = 0; + + id = dmi_first_match(gpiolib_acpi_quirks); + if (id) + quirks = (long)id->driver_data; + if (run_edge_events_on_boot < 0) { - if (dmi_check_system(run_edge_events_on_boot_blacklist)) + if (quirks & QUIRK_NO_EDGE_EVENTS_ON_BOOT) run_edge_events_on_boot = 0; else run_edge_events_on_boot = 1; } + if (honor_wakeup < 0) { + if (quirks & QUIRK_NO_WAKEUP) + honor_wakeup = 0; + else + honor_wakeup = 1; + } + return 0; } diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index dc27b1a88e9343a8a029832cef5c6095a7be6238..b696e4598a240ea4237bacf42d90b22424320616 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -23,6 +23,29 @@ #include "gpiolib.h" #include "gpiolib-of.h" +/** + * of_gpio_spi_cs_get_count() - special GPIO counting for SPI + * Some elder GPIO controllers need special quirks. Currently we handle + * the Freescale GPIO controller with bindings that doesn't use the + * established "cs-gpios" for chip selects but instead rely on + * "gpios" for the chip select lines. If we detect this, we redirect + * the counting of "cs-gpios" to count "gpios" transparent to the + * driver. + */ +static int of_gpio_spi_cs_get_count(struct device *dev, const char *con_id) +{ + struct device_node *np = dev->of_node; + + if (!IS_ENABLED(CONFIG_SPI_MASTER)) + return 0; + if (!con_id || strcmp(con_id, "cs")) + return 0; + if (!of_device_is_compatible(np, "fsl,spi") && + !of_device_is_compatible(np, "aeroflexgaisler,spictrl")) + return 0; + return of_gpio_named_count(np, "gpios"); +} + /* * This is used by external users of of_gpio_count() from * @@ -35,6 +58,10 @@ int of_gpio_get_count(struct device *dev, const char *con_id) char propname[32]; unsigned int i; + ret = of_gpio_spi_cs_get_count(dev, con_id); + if (ret > 0) + return ret; + for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) { if (con_id) snprintf(propname, sizeof(propname), "%s-%s", diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 9913886ede904bb09e40cca32792a01b39bb2822..78a16e42f222ebd81dc38f80da606f10daaef57e 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -220,6 +220,14 @@ int gpiod_get_direction(struct gpio_desc *desc) chip = gpiod_to_chip(desc); offset = gpio_chip_hwgpio(desc); + /* + * Open drain emulation using input mode may incorrectly report + * input here, fix that up. + */ + if (test_bit(FLAG_OPEN_DRAIN, &desc->flags) && + test_bit(FLAG_IS_OUT, &desc->flags)) + return 0; + if (!chip->get_direction) return -ENOTSUPP; @@ -4472,8 +4480,9 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id, if (chip->ngpio <= p->chip_hwnum) { dev_err(dev, - "requested GPIO %d is out of range [0..%d] for chip %s\n", - idx, chip->ngpio, chip->label); + "requested GPIO %u (%u) is out of range [0..%u] for chip %s\n", + idx, p->chip_hwnum, chip->ngpio - 1, + chip->label); return ERR_PTR(-EINVAL); } diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 7041323a7bff36c18d49538cefc0b479b41f1ba7..d0aa6cff2e022dc1e78c5b83aa43231723f4ca13 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -168,6 +168,7 @@ config DRM_LOAD_EDID_FIRMWARE config DRM_DP_CEC bool "Enable DisplayPort CEC-Tunneling-over-AUX HDMI support" + depends on DRM select CEC_CORE help Choose this option if you want to enable HDMI CEC support for diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 7ae3b22c5628bdcee07db8ddc49b839e6f29c385..c2bbcdd9c875c25efac9bdc7cf63fd249d306c0e 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -120,6 +120,7 @@ amdgpu-y += \ amdgpu_rlc.o \ gfx_v8_0.o \ gfx_v9_0.o \ + gfx_v9_4.o \ gfx_v10_0.o # add async DMA block diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 81a531b652aaca3eca6fea7115362971d8865e6f..da3bcff61b9766954475e02c7cb3e5c1471993c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -90,6 +90,7 @@ #include "amdgpu_mes.h" #include "amdgpu_umc.h" #include "amdgpu_mmhub.h" +#include "amdgpu_df.h" #define MAX_GPU_INSTANCE 16 @@ -636,9 +637,8 @@ struct amdgpu_fw_vram_usage { struct amdgpu_bo *reserved_bo; void *va; - /* Offset on the top of VRAM, used as c2p write buffer. + /* GDDR6 training support flag. */ - u64 mem_train_fb_loc; bool mem_train_support; }; @@ -665,29 +665,6 @@ struct amdgpu_mmio_remap { resource_size_t bus_addr; }; -struct amdgpu_df_funcs { - void (*sw_init)(struct amdgpu_device *adev); - void (*sw_fini)(struct amdgpu_device *adev); - void (*enable_broadcast_mode)(struct amdgpu_device *adev, - bool enable); - u32 (*get_fb_channel_number)(struct amdgpu_device *adev); - u32 (*get_hbm_channel_number)(struct amdgpu_device *adev); - void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev, - bool enable); - void (*get_clockgating_state)(struct amdgpu_device *adev, - u32 *flags); - void (*enable_ecc_force_par_wr_rmw)(struct amdgpu_device *adev, - bool enable); - int (*pmc_start)(struct amdgpu_device *adev, uint64_t config, - int is_enable); - int (*pmc_stop)(struct amdgpu_device *adev, uint64_t config, - int is_disable); - void (*pmc_get_count)(struct amdgpu_device *adev, uint64_t config, - uint64_t *count); - uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val); - void (*set_fica)(struct amdgpu_device *adev, uint32_t ficaa_val, - uint32_t ficadl_val, uint32_t ficadh_val); -}; /* Define the HW IP blocks will be used in driver , add more if necessary */ enum amd_hw_ip_block_type { GC_HWIP = 1, @@ -931,6 +908,9 @@ struct amdgpu_device { bool enable_mes; struct amdgpu_mes mes; + /* df */ + struct amdgpu_df df; + struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM]; int num_ip_blocks; struct mutex mn_lock; @@ -944,8 +924,6 @@ struct amdgpu_device { /* soc15 register offset based on ip, instance and segment */ uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE]; - const struct amdgpu_df_funcs *df_funcs; - /* delayed work_func for deferring clockgating during resume */ struct delayed_work delayed_init_work; @@ -994,8 +972,6 @@ struct amdgpu_device { bool pm_sysfs_en; bool ucode_sysfs_en; - - bool in_baco; }; static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) @@ -1033,10 +1009,14 @@ int emu_soc_asic_init(struct amdgpu_device *adev); #define AMDGPU_REGS_IDX (1<<0) #define AMDGPU_REGS_NO_KIQ (1<<1) +#define AMDGPU_REGS_KIQ (1<<2) #define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ) #define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ) +#define RREG32_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_KIQ) +#define WREG32_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_KIQ) + #define RREG8(reg) amdgpu_mm_rreg8(adev, (reg)) #define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index 82155ac3288a07596fe4b36ae3846704c4c64b7c..12247a32f9ef94a1c3a23e81f16341143314a218 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c @@ -527,7 +527,7 @@ static int acp_set_powergating_state(void *handle, enum amd_powergating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = state == AMD_PG_STATE_GATE ? true : false; + bool enable = (state == AMD_PG_STATE_GATE); if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_powergating_by_smu) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index d3da9dde4ee124e84a3cc73e0bbdec423ecc8355..8609287620ead9e747457c2dcb6f292c0a884fbd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -613,15 +613,9 @@ void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - if (is_support_sw_smu(adev)) - smu_switch_power_profile(&adev->smu, - PP_SMC_POWER_PROFILE_COMPUTE, - !idle); - else if (adev->powerplay.pp_funcs && - adev->powerplay.pp_funcs->switch_power_profile) - amdgpu_dpm_switch_power_profile(adev, - PP_SMC_POWER_PROFILE_COMPUTE, - !idle); + amdgpu_dpm_switch_power_profile(adev, + PP_SMC_POWER_PROFILE_COMPUTE, + !idle); } bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid) @@ -634,6 +628,38 @@ bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid) return false; } +int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)kgd; + + if (adev->family == AMDGPU_FAMILY_AI) { + int i; + + for (i = 0; i < adev->num_vmhubs; i++) + amdgpu_gmc_flush_gpu_tlb(adev, vmid, i, 0); + } else { + amdgpu_gmc_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB_0, 0); + } + + return 0; +} + +int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)kgd; + uint32_t flush_type = 0; + bool all_hub = false; + + if (adev->gmc.xgmi.num_physical_nodes && + adev->asic_type == CHIP_VEGA20) + flush_type = 2; + + if (adev->family == AMDGPU_FAMILY_AI) + all_hub = true; + + return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub); +} + bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 069d5d230810c96bfd6a582dc81072dc47ce0de3..47b0f2957d1f6fe548bccf9fa188754d04e20add 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -136,6 +136,8 @@ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine, uint32_t *ib_cmd, uint32_t ib_len); void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle); bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd); +int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid); +int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid); bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c index b6713e0ed1b2a26fb0d1ed12ee53d3c943230d10..4bcc175a149d070cde9de9d116edb2f2459fb329 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -46,6 +46,8 @@ #include "soc15.h" #include "soc15d.h" #include "amdgpu_amdkfd_gfx_v9.h" +#include "gfxhub_v1_0.h" +#include "mmhub_v9_4.h" #define HQD_N_REGS 56 #define DUMP_REG(addr) do { \ @@ -69,32 +71,56 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev, unsigned int engine_id, unsigned int queue_id) { - uint32_t sdma_engine_reg_base[8] = { - SOC15_REG_OFFSET(SDMA0, 0, - mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL, - SOC15_REG_OFFSET(SDMA1, 0, - mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL, - SOC15_REG_OFFSET(SDMA2, 0, - mmSDMA2_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL, - SOC15_REG_OFFSET(SDMA3, 0, - mmSDMA3_RLC0_RB_CNTL) - mmSDMA3_RLC0_RB_CNTL, - SOC15_REG_OFFSET(SDMA4, 0, - mmSDMA4_RLC0_RB_CNTL) - mmSDMA4_RLC0_RB_CNTL, - SOC15_REG_OFFSET(SDMA5, 0, - mmSDMA5_RLC0_RB_CNTL) - mmSDMA5_RLC0_RB_CNTL, - SOC15_REG_OFFSET(SDMA6, 0, - mmSDMA6_RLC0_RB_CNTL) - mmSDMA6_RLC0_RB_CNTL, - SOC15_REG_OFFSET(SDMA7, 0, - mmSDMA7_RLC0_RB_CNTL) - mmSDMA7_RLC0_RB_CNTL - }; - - uint32_t retval = sdma_engine_reg_base[engine_id] + uint32_t sdma_engine_reg_base = 0; + uint32_t sdma_rlc_reg_offset; + + switch (engine_id) { + default: + dev_warn(adev->dev, + "Invalid sdma engine id (%d), using engine id 0\n", + engine_id); + /* fall through */ + case 0: + sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0, + mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL; + break; + case 1: + sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0, + mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL; + break; + case 2: + sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA2, 0, + mmSDMA2_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL; + break; + case 3: + sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA3, 0, + mmSDMA3_RLC0_RB_CNTL) - mmSDMA3_RLC0_RB_CNTL; + break; + case 4: + sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA4, 0, + mmSDMA4_RLC0_RB_CNTL) - mmSDMA4_RLC0_RB_CNTL; + break; + case 5: + sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA5, 0, + mmSDMA5_RLC0_RB_CNTL) - mmSDMA5_RLC0_RB_CNTL; + break; + case 6: + sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA6, 0, + mmSDMA6_RLC0_RB_CNTL) - mmSDMA6_RLC0_RB_CNTL; + break; + case 7: + sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA7, 0, + mmSDMA7_RLC0_RB_CNTL) - mmSDMA7_RLC0_RB_CNTL; + break; + } + + sdma_rlc_reg_offset = sdma_engine_reg_base + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL); pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id, - queue_id, retval); + queue_id, sdma_rlc_reg_offset); - return retval; + return sdma_rlc_reg_offset; } static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, @@ -258,11 +284,28 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, return 0; } +static void kgd_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, + uint64_t page_table_base) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + + if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { + pr_err("trying to set page table base for wrong VMID %u\n", + vmid); + return; + } + + mmhub_v9_4_setup_vm_pt_regs(adev, vmid, page_table_base); + + gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base); +} + const struct kfd2kgd_calls arcturus_kfd2kgd = { .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping, .init_interrupts = kgd_gfx_v9_init_interrupts, .hqd_load = kgd_gfx_v9_hqd_load, + .hiq_mqd_load = kgd_gfx_v9_hiq_mqd_load, .hqd_sdma_load = kgd_hqd_sdma_load, .hqd_dump = kgd_gfx_v9_hqd_dump, .hqd_sdma_dump = kgd_hqd_sdma_dump, @@ -277,8 +320,6 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = { .get_atc_vmid_pasid_mapping_info = kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, .get_tile_config = kgd_gfx_v9_get_tile_config, - .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base, - .invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs, - .invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid, + .set_vm_context_page_table_base = kgd_set_vm_context_page_table_base, .get_hive_id = amdgpu_amdkfd_get_hive_id, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index 61cd707158e44b00781dcabf89a5ce4e8b286370..a7b17c8deb00332a13571c61a4b42e1fd346763e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -107,13 +107,13 @@ static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, lock_srbm(kgd, mec, pipe, queue_id, 0); } -static uint32_t get_queue_mask(struct amdgpu_device *adev, +static uint64_t get_queue_mask(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id) { - unsigned int bit = (pipe_id * adev->gfx.mec.num_queue_per_pipe + - queue_id) & 31; + unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe + + queue_id; - return ((uint32_t)1) << bit; + return 1ull << bit; } static void release_queue(struct kgd_dev *kgd) @@ -268,21 +268,6 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, pr_debug("Load hqd of pipe %d queue %d\n", pipe_id, queue_id); acquire_queue(kgd, pipe_id, queue_id); - /* HIQ is set during driver init period with vmid set to 0*/ - if (m->cp_hqd_vmid == 0) { - uint32_t value, mec, pipe; - - mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; - pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - - pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n", - mec, pipe, queue_id); - value = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS)); - value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1, - ((mec << 5) | (pipe << 3) | queue_id | 0x80)); - WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS), value); - } - /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */ mqd_hqd = &m->cp_mqd_base_addr_lo; hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR); @@ -332,9 +317,10 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, lower_32_bits((uint64_t)wptr)); WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI), upper_32_bits((uint64_t)wptr)); - pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__, get_queue_mask(adev, pipe_id, queue_id)); + pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__, + (uint32_t)get_queue_mask(adev, pipe_id, queue_id)); WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1), - get_queue_mask(adev, pipe_id, queue_id)); + (uint32_t)get_queue_mask(adev, pipe_id, queue_id)); } /* Start the EOP fetcher */ @@ -350,6 +336,59 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, return 0; } +static int kgd_hiq_mqd_load(struct kgd_dev *kgd, void *mqd, + uint32_t pipe_id, uint32_t queue_id, + uint32_t doorbell_off) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; + struct v10_compute_mqd *m; + uint32_t mec, pipe; + int r; + + m = get_mqd(mqd); + + acquire_queue(kgd, pipe_id, queue_id); + + mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; + pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); + + pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n", + mec, pipe, queue_id); + + spin_lock(&adev->gfx.kiq.ring_lock); + r = amdgpu_ring_alloc(kiq_ring, 7); + if (r) { + pr_err("Failed to alloc KIQ (%d).\n", r); + goto out_unlock; + } + + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); + amdgpu_ring_write(kiq_ring, + PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */ + PACKET3_MAP_QUEUES_VMID(m->cp_hqd_vmid) | /* VMID */ + PACKET3_MAP_QUEUES_QUEUE(queue_id) | + PACKET3_MAP_QUEUES_PIPE(pipe) | + PACKET3_MAP_QUEUES_ME((mec - 1)) | + PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */ + PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */ + PACKET3_MAP_QUEUES_ENGINE_SEL(1) | /* engine_sel: hiq */ + PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */ + amdgpu_ring_write(kiq_ring, + PACKET3_MAP_QUEUES_DOORBELL_OFFSET(doorbell_off)); + amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_lo); + amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_hi); + amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_lo); + amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_hi); + amdgpu_ring_commit(kiq_ring); + +out_unlock: + spin_unlock(&adev->gfx.kiq.ring_lock); + release_queue(kgd); + + return r; +} + static int kgd_hqd_dump(struct kgd_dev *kgd, uint32_t pipe_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) @@ -686,71 +725,6 @@ static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); } -static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid) -{ - signed long r; - uint32_t seq; - struct amdgpu_ring *ring = &adev->gfx.kiq.ring; - - spin_lock(&adev->gfx.kiq.ring_lock); - amdgpu_ring_alloc(ring, 12); /* fence + invalidate_tlbs package*/ - amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0)); - amdgpu_ring_write(ring, - PACKET3_INVALIDATE_TLBS_DST_SEL(1) | - PACKET3_INVALIDATE_TLBS_PASID(pasid)); - amdgpu_fence_emit_polling(ring, &seq); - amdgpu_ring_commit(ring); - spin_unlock(&adev->gfx.kiq.ring_lock); - - r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout); - if (r < 1) { - DRM_ERROR("wait for kiq fence error: %ld.\n", r); - return -ETIME; - } - - return 0; -} - -static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid) -{ - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - int vmid; - uint16_t queried_pasid; - bool ret; - struct amdgpu_ring *ring = &adev->gfx.kiq.ring; - - if (amdgpu_emu_mode == 0 && ring->sched.ready) - return invalidate_tlbs_with_kiq(adev, pasid); - - for (vmid = 0; vmid < 16; vmid++) { - if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) - continue; - - ret = get_atc_vmid_pasid_mapping_info(kgd, vmid, - &queried_pasid); - if (ret && queried_pasid == pasid) { - amdgpu_gmc_flush_gpu_tlb(adev, vmid, - AMDGPU_GFXHUB_0, 0); - break; - } - } - - return 0; -} - -static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid) -{ - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - - if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { - pr_err("non kfd vmid %d\n", vmid); - return 0; - } - - amdgpu_gmc_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB_0, 0); - return 0; -} - static int kgd_address_watch_disable(struct kgd_dev *kgd) { return 0; @@ -817,6 +791,7 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = { .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, .init_interrupts = kgd_init_interrupts, .hqd_load = kgd_hqd_load, + .hiq_mqd_load = kgd_hiq_mqd_load, .hqd_sdma_load = kgd_hqd_sdma_load, .hqd_dump = kgd_hqd_dump, .hqd_sdma_dump = kgd_hqd_sdma_dump, @@ -832,7 +807,5 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = { get_atc_vmid_pasid_mapping_info, .get_tile_config = amdgpu_amdkfd_get_tile_config, .set_vm_context_page_table_base = set_vm_context_page_table_base, - .invalidate_tlbs = invalidate_tlbs, - .invalidate_tlbs_vmid = invalidate_tlbs_vmid, .get_hive_id = amdgpu_amdkfd_get_hive_id, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index 6e6f0a99ec066cabcefd755a02b1f98835260c36..8f052e98a3c6cfc5c18e14cb0a21c4fcd50293c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -696,45 +696,6 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, lower_32_bits(page_table_base)); } -static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid) -{ - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - int vmid; - unsigned int tmp; - - if (adev->in_gpu_reset) - return -EIO; - - for (vmid = 0; vmid < 16; vmid++) { - if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) - continue; - - tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); - if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) && - (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) { - WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); - RREG32(mmVM_INVALIDATE_RESPONSE); - break; - } - } - - return 0; -} - -static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid) -{ - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - - if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { - pr_err("non kfd vmid\n"); - return 0; - } - - WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); - RREG32(mmVM_INVALIDATE_RESPONSE); - return 0; -} - /** * read_vmid_from_vmfault_reg - read vmid from register * @@ -771,7 +732,5 @@ const struct kfd2kgd_calls gfx_v7_kfd2kgd = { .set_scratch_backing_va = set_scratch_backing_va, .get_tile_config = get_tile_config, .set_vm_context_page_table_base = set_vm_context_page_table_base, - .invalidate_tlbs = invalidate_tlbs, - .invalidate_tlbs_vmid = invalidate_tlbs_vmid, .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index bfbddedb2380c9a92682e7f0bab623a2af829877..19a10db93d6883942679863418f2540194d0ee18 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -657,45 +657,6 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, lower_32_bits(page_table_base)); } -static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid) -{ - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - int vmid; - unsigned int tmp; - - if (adev->in_gpu_reset) - return -EIO; - - for (vmid = 0; vmid < 16; vmid++) { - if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) - continue; - - tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); - if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) && - (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) { - WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); - RREG32(mmVM_INVALIDATE_RESPONSE); - break; - } - } - - return 0; -} - -static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid) -{ - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - - if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { - pr_err("non kfd vmid %d\n", vmid); - return -EINVAL; - } - - WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); - RREG32(mmVM_INVALIDATE_RESPONSE); - return 0; -} - const struct kfd2kgd_calls gfx_v8_kfd2kgd = { .program_sh_mem_settings = kgd_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, @@ -717,6 +678,4 @@ const struct kfd2kgd_calls gfx_v8_kfd2kgd = { .set_scratch_backing_va = set_scratch_backing_va, .get_tile_config = get_tile_config, .set_vm_context_page_table_base = set_vm_context_page_table_base, - .invalidate_tlbs = invalidate_tlbs, - .invalidate_tlbs_vmid = invalidate_tlbs_vmid, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 6f1a4676dddee3aba12ea2a58ac417292372fda3..8562afe5b761382c5492b230493c56998068bc1c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -40,7 +40,6 @@ #include "soc15d.h" #include "mmhub_v1_0.h" #include "gfxhub_v1_0.h" -#include "mmhub_v9_4.h" enum hqd_dequeue_request_type { @@ -104,13 +103,13 @@ static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, lock_srbm(kgd, mec, pipe, queue_id, 0); } -static uint32_t get_queue_mask(struct amdgpu_device *adev, +static uint64_t get_queue_mask(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id) { - unsigned int bit = (pipe_id * adev->gfx.mec.num_queue_per_pipe + - queue_id) & 31; + unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe + + queue_id; - return ((uint32_t)1) << bit; + return 1ull << bit; } static void release_queue(struct kgd_dev *kgd) @@ -259,21 +258,6 @@ int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, acquire_queue(kgd, pipe_id, queue_id); - /* HIQ is set during driver init period with vmid set to 0*/ - if (m->cp_hqd_vmid == 0) { - uint32_t value, mec, pipe; - - mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; - pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - - pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n", - mec, pipe, queue_id); - value = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS)); - value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1, - ((mec << 5) | (pipe << 3) | queue_id | 0x80)); - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS), value); - } - /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */ mqd_hqd = &m->cp_mqd_base_addr_lo; hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR); @@ -324,7 +308,7 @@ int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI), upper_32_bits((uintptr_t)wptr)); WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1), - get_queue_mask(adev, pipe_id, queue_id)); + (uint32_t)get_queue_mask(adev, pipe_id, queue_id)); } /* Start the EOP fetcher */ @@ -340,6 +324,59 @@ int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, return 0; } +int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd, + uint32_t pipe_id, uint32_t queue_id, + uint32_t doorbell_off) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; + struct v9_mqd *m; + uint32_t mec, pipe; + int r; + + m = get_mqd(mqd); + + acquire_queue(kgd, pipe_id, queue_id); + + mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; + pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); + + pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n", + mec, pipe, queue_id); + + spin_lock(&adev->gfx.kiq.ring_lock); + r = amdgpu_ring_alloc(kiq_ring, 7); + if (r) { + pr_err("Failed to alloc KIQ (%d).\n", r); + goto out_unlock; + } + + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); + amdgpu_ring_write(kiq_ring, + PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */ + PACKET3_MAP_QUEUES_VMID(m->cp_hqd_vmid) | /* VMID */ + PACKET3_MAP_QUEUES_QUEUE(queue_id) | + PACKET3_MAP_QUEUES_PIPE(pipe) | + PACKET3_MAP_QUEUES_ME((mec - 1)) | + PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */ + PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */ + PACKET3_MAP_QUEUES_ENGINE_SEL(1) | /* engine_sel: hiq */ + PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */ + amdgpu_ring_write(kiq_ring, + PACKET3_MAP_QUEUES_DOORBELL_OFFSET(doorbell_off)); + amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_lo); + amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_hi); + amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_lo); + amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_hi); + amdgpu_ring_commit(kiq_ring); + +out_unlock: + spin_unlock(&adev->gfx.kiq.ring_lock); + release_queue(kgd); + + return r; +} + int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd, uint32_t pipe_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) @@ -618,100 +655,6 @@ bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); } -static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid, - uint32_t flush_type) -{ - signed long r; - uint32_t seq; - struct amdgpu_ring *ring = &adev->gfx.kiq.ring; - - spin_lock(&adev->gfx.kiq.ring_lock); - amdgpu_ring_alloc(ring, 12); /* fence + invalidate_tlbs package*/ - amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0)); - amdgpu_ring_write(ring, - PACKET3_INVALIDATE_TLBS_DST_SEL(1) | - PACKET3_INVALIDATE_TLBS_ALL_HUB(1) | - PACKET3_INVALIDATE_TLBS_PASID(pasid) | - PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type)); - amdgpu_fence_emit_polling(ring, &seq); - amdgpu_ring_commit(ring); - spin_unlock(&adev->gfx.kiq.ring_lock); - - r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout); - if (r < 1) { - DRM_ERROR("wait for kiq fence error: %ld.\n", r); - return -ETIME; - } - - return 0; -} - -int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid) -{ - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - int vmid, i; - uint16_t queried_pasid; - bool ret; - struct amdgpu_ring *ring = &adev->gfx.kiq.ring; - uint32_t flush_type = 0; - - if (adev->in_gpu_reset) - return -EIO; - if (adev->gmc.xgmi.num_physical_nodes && - adev->asic_type == CHIP_VEGA20) - flush_type = 2; - - if (ring->sched.ready) - return invalidate_tlbs_with_kiq(adev, pasid, flush_type); - - for (vmid = 0; vmid < 16; vmid++) { - if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) - continue; - - ret = kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(kgd, vmid, - &queried_pasid); - if (ret && queried_pasid == pasid) { - for (i = 0; i < adev->num_vmhubs; i++) - amdgpu_gmc_flush_gpu_tlb(adev, vmid, - i, flush_type); - break; - } - } - - return 0; -} - -int kgd_gfx_v9_invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid) -{ - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - int i; - - if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { - pr_err("non kfd vmid %d\n", vmid); - return 0; - } - - /* Use legacy mode tlb invalidation. - * - * Currently on Raven the code below is broken for anything but - * legacy mode due to a MMHUB power gating problem. A workaround - * is for MMHUB to wait until the condition PER_VMID_INVALIDATE_REQ - * == PER_VMID_INVALIDATE_ACK instead of simply waiting for the ack - * bit. - * - * TODO 1: agree on the right set of invalidation registers for - * KFD use. Use the last one for now. Invalidate both GC and - * MMHUB. - * - * TODO 2: support range-based invalidation, requires kfg2kgd - * interface change - */ - for (i = 0; i < adev->num_vmhubs; i++) - amdgpu_gmc_flush_gpu_tlb(adev, vmid, i, 0); - - return 0; -} - int kgd_gfx_v9_address_watch_disable(struct kgd_dev *kgd) { return 0; @@ -758,8 +701,8 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd, return 0; } -void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, - uint64_t page_table_base) +static void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, + uint32_t vmid, uint64_t page_table_base) { struct amdgpu_device *adev = get_amdgpu_device(kgd); @@ -769,14 +712,7 @@ void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmi return; } - /* TODO: take advantage of per-process address space size. For - * now, all processes share the same address space size, like - * on GFX8 and older. - */ - if (adev->asic_type == CHIP_ARCTURUS) { - mmhub_v9_4_setup_vm_pt_regs(adev, vmid, page_table_base); - } else - mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base); + mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base); gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base); } @@ -786,6 +722,7 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = { .set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping, .init_interrupts = kgd_gfx_v9_init_interrupts, .hqd_load = kgd_gfx_v9_hqd_load, + .hiq_mqd_load = kgd_gfx_v9_hiq_mqd_load, .hqd_sdma_load = kgd_hqd_sdma_load, .hqd_dump = kgd_gfx_v9_hqd_dump, .hqd_sdma_dump = kgd_hqd_sdma_dump, @@ -801,7 +738,5 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = { kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, .get_tile_config = kgd_gfx_v9_get_tile_config, .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base, - .invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs, - .invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid, .get_hive_id = amdgpu_amdkfd_get_hive_id, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h index d9e9ad22b2bd98763a01a859301331d582840812..63d3e6683dfe1d800424990cb50da8ac26c175e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h @@ -33,6 +33,9 @@ int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, uint32_t wptr_shift, uint32_t wptr_mask, struct mm_struct *mm); +int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd, + uint32_t pipe_id, uint32_t queue_id, + uint32_t doorbell_off); int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd, uint32_t pipe_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs); @@ -57,9 +60,5 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd, bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, uint8_t vmid, uint16_t *p_pasid); -void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, - uint64_t page_table_base); -int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid); -int kgd_gfx_v9_invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid); int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd, struct tile_config *config); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index b2487f4f271b1d87d23596b6a884abcab62e56c1..fa8ac9d19a7a6d627754ca7f2e6a695608186562 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -2129,6 +2129,7 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem return -ENOMEM; mutex_init(&(*mem)->lock); + INIT_LIST_HEAD(&(*mem)->bo_va_list); (*mem)->bo = amdgpu_bo_ref(gws_bo); (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS; (*mem)->process_info = process_info; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 9ba80d828876e511544ab551c33c548f4520d758..fdd52d86a4d7558184401ca454ee831d1ca4ac9b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -2022,7 +2022,7 @@ int amdgpu_atombios_init(struct amdgpu_device *adev) if (adev->is_atom_fw) { amdgpu_atomfirmware_scratch_regs_init(adev); amdgpu_atomfirmware_allocate_fb_scratch(adev); - ret = amdgpu_atomfirmware_get_mem_train_fb_loc(adev); + ret = amdgpu_atomfirmware_get_mem_train_info(adev); if (ret) { DRM_ERROR("Failed to get mem train fb location.\n"); return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index ff4eb96bdfb542bd259af896d412bba4a2827946..58f9d8c3a17ab9692f3cf3694e53aad6d72553ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -525,16 +525,12 @@ static int gddr6_mem_train_support(struct amdgpu_device *adev) return ret; } -int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev) +int amdgpu_atomfirmware_get_mem_train_info(struct amdgpu_device *adev) { struct atom_context *ctx = adev->mode_info.atom_context; - unsigned char *bios = ctx->bios; - struct vram_reserve_block *reserved_block; - int index, block_number; + int index; uint8_t frev, crev; uint16_t data_offset, size; - uint32_t start_address_in_kb; - uint64_t offset; int ret; adev->fw_vram_usage.mem_train_support = false; @@ -569,32 +565,6 @@ int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev) return -EINVAL; } - reserved_block = (struct vram_reserve_block *) - (bios + data_offset + sizeof(struct atom_common_table_header)); - block_number = ((unsigned int)size - sizeof(struct atom_common_table_header)) - / sizeof(struct vram_reserve_block); - reserved_block += (block_number > 0) ? block_number-1 : 0; - DRM_DEBUG("block_number:0x%04x, last block: 0x%08xkb sz, %dkb fw, %dkb drv.\n", - block_number, - le32_to_cpu(reserved_block->start_address_in_kb), - le16_to_cpu(reserved_block->used_by_firmware_in_kb), - le16_to_cpu(reserved_block->used_by_driver_in_kb)); - if (reserved_block->used_by_firmware_in_kb > 0) { - start_address_in_kb = le32_to_cpu(reserved_block->start_address_in_kb); - offset = (uint64_t)start_address_in_kb * ONE_KiB; - if ((offset & (ONE_MiB - 1)) < (4 * ONE_KiB + 1) ) { - offset -= ONE_MiB; - } - - offset &= ~(ONE_MiB - 1); - adev->fw_vram_usage.mem_train_fb_loc = offset; - adev->fw_vram_usage.mem_train_support = true; - DRM_DEBUG("mem_train_fb_loc:0x%09llx.\n", offset); - ret = 0; - } else { - DRM_ERROR("used_by_firmware_in_kb is 0!\n"); - ret = -EINVAL; - } - - return ret; + adev->fw_vram_usage.mem_train_support = true; + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h index f871af5ea6f37ba30ee72f0f4f78e682d954c9f3..434fe2fa0089d485fa8387216fa2334cd6fa7f94 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h @@ -31,7 +31,7 @@ void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev); int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev); int amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev, int *vram_width, int *vram_type, int *vram_vendor); -int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev); +int amdgpu_atomfirmware_get_mem_train_info(struct amdgpu_device *adev); int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev); int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev); bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index a97fb759e2f42a94b004743219881f47e79a20ee..3e35a8f2c5e553e4c714deff5e838ac1510a2f59 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -613,7 +613,17 @@ static bool amdgpu_atpx_detect(void) bool d3_supported = false; struct pci_dev *parent_pdev; - while ((pdev = pci_get_class(PCI_BASE_CLASS_DISPLAY << 16, pdev)) != NULL) { + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { + vga_count++; + + has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true); + + parent_pdev = pci_upstream_bridge(pdev); + d3_supported |= parent_pdev && parent_pdev->bridge_d3; + amdgpu_atpx_get_quirks(pdev); + } + + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) { vga_count++; has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 5b330f69194b4e7b243008191082bb8d5b8ea540..a52a084158b13742c51603022f66de13066109a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -909,6 +909,11 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, if (parser->entity && parser->entity != entity) return -EINVAL; + /* Return if there is no run queue associated with this entity. + * Possibly because of disabled HW IP*/ + if (entity->rq == NULL) + return -EINVAL; + parser->entity = entity; ring = to_amdgpu_ring(entity->rq->sched); @@ -1229,7 +1234,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, goto error_abort; } - job->owner = p->filp; p->fence = dma_fence_get(&job->base.s_fence->finished); amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 1d2bbf10614e5faa308b1fcc063345bc0836738f..94a6c42f29ea59a42f62bc1ef1077e21566df3a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -42,19 +42,12 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = { [AMDGPU_HW_IP_VCN_JPEG] = 1, }; -static int amdgpu_ctx_total_num_entities(void) -{ - unsigned i, num_entities = 0; - - for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) - num_entities += amdgpu_ctx_num_entities[i]; - - return num_entities; -} - static int amdgpu_ctx_priority_permit(struct drm_file *filp, enum drm_sched_priority priority) { + if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX) + return -EINVAL; + /* NORMAL and below are accessible by everyone */ if (priority <= DRM_SCHED_PRIORITY_NORMAL) return 0; @@ -68,47 +61,94 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp, return -EACCES; } +static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const u32 ring) +{ + struct amdgpu_device *adev = ctx->adev; + struct amdgpu_ctx_entity *entity; + struct drm_gpu_scheduler **scheds = NULL, *sched = NULL; + unsigned num_scheds = 0; + enum drm_sched_priority priority; + int r; + + entity = kcalloc(1, offsetof(typeof(*entity), fences[amdgpu_sched_jobs]), + GFP_KERNEL); + if (!entity) + return -ENOMEM; + + entity->sequence = 1; + priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ? + ctx->init_priority : ctx->override_priority; + switch (hw_ip) { + case AMDGPU_HW_IP_GFX: + sched = &adev->gfx.gfx_ring[0].sched; + scheds = &sched; + num_scheds = 1; + break; + case AMDGPU_HW_IP_COMPUTE: + scheds = adev->gfx.compute_sched; + num_scheds = adev->gfx.num_compute_sched; + break; + case AMDGPU_HW_IP_DMA: + scheds = adev->sdma.sdma_sched; + num_scheds = adev->sdma.num_sdma_sched; + break; + case AMDGPU_HW_IP_UVD: + sched = &adev->uvd.inst[0].ring.sched; + scheds = &sched; + num_scheds = 1; + break; + case AMDGPU_HW_IP_VCE: + sched = &adev->vce.ring[0].sched; + scheds = &sched; + num_scheds = 1; + break; + case AMDGPU_HW_IP_UVD_ENC: + sched = &adev->uvd.inst[0].ring_enc[0].sched; + scheds = &sched; + num_scheds = 1; + break; + case AMDGPU_HW_IP_VCN_DEC: + scheds = adev->vcn.vcn_dec_sched; + num_scheds = adev->vcn.num_vcn_dec_sched; + break; + case AMDGPU_HW_IP_VCN_ENC: + scheds = adev->vcn.vcn_enc_sched; + num_scheds = adev->vcn.num_vcn_enc_sched; + break; + case AMDGPU_HW_IP_VCN_JPEG: + scheds = adev->jpeg.jpeg_sched; + num_scheds = adev->jpeg.num_jpeg_sched; + break; + } + + r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds, + &ctx->guilty); + if (r) + goto error_free_entity; + + ctx->entities[hw_ip][ring] = entity; + return 0; + +error_free_entity: + kfree(entity); + + return r; +} + static int amdgpu_ctx_init(struct amdgpu_device *adev, enum drm_sched_priority priority, struct drm_file *filp, struct amdgpu_ctx *ctx) { - unsigned num_entities = amdgpu_ctx_total_num_entities(); - unsigned i, j, k; int r; - if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX) - return -EINVAL; - r = amdgpu_ctx_priority_permit(filp, priority); if (r) return r; memset(ctx, 0, sizeof(*ctx)); - ctx->adev = adev; - - ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities, - sizeof(struct dma_fence*), GFP_KERNEL); - if (!ctx->fences) - return -ENOMEM; - - ctx->entities[0] = kcalloc(num_entities, - sizeof(struct amdgpu_ctx_entity), - GFP_KERNEL); - if (!ctx->entities[0]) { - r = -ENOMEM; - goto error_free_fences; - } - - for (i = 0; i < num_entities; ++i) { - struct amdgpu_ctx_entity *entity = &ctx->entities[0][i]; - entity->sequence = 1; - entity->fences = &ctx->fences[amdgpu_sched_jobs * i]; - } - for (i = 1; i < AMDGPU_HW_IP_NUM; ++i) - ctx->entities[i] = ctx->entities[i - 1] + - amdgpu_ctx_num_entities[i - 1]; + ctx->adev = adev; kref_init(&ctx->refcount); spin_lock_init(&ctx->ring_lock); @@ -120,114 +160,49 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, ctx->init_priority = priority; ctx->override_priority = DRM_SCHED_PRIORITY_UNSET; - for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { - struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; - struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS]; - unsigned num_rings = 0; - unsigned num_rqs = 0; - - switch (i) { - case AMDGPU_HW_IP_GFX: - rings[0] = &adev->gfx.gfx_ring[0]; - num_rings = 1; - break; - case AMDGPU_HW_IP_COMPUTE: - for (j = 0; j < adev->gfx.num_compute_rings; ++j) - rings[j] = &adev->gfx.compute_ring[j]; - num_rings = adev->gfx.num_compute_rings; - break; - case AMDGPU_HW_IP_DMA: - for (j = 0; j < adev->sdma.num_instances; ++j) - rings[j] = &adev->sdma.instance[j].ring; - num_rings = adev->sdma.num_instances; - break; - case AMDGPU_HW_IP_UVD: - rings[0] = &adev->uvd.inst[0].ring; - num_rings = 1; - break; - case AMDGPU_HW_IP_VCE: - rings[0] = &adev->vce.ring[0]; - num_rings = 1; - break; - case AMDGPU_HW_IP_UVD_ENC: - rings[0] = &adev->uvd.inst[0].ring_enc[0]; - num_rings = 1; - break; - case AMDGPU_HW_IP_VCN_DEC: - for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { - if (adev->vcn.harvest_config & (1 << j)) - continue; - rings[num_rings++] = &adev->vcn.inst[j].ring_dec; - } - break; - case AMDGPU_HW_IP_VCN_ENC: - for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { - if (adev->vcn.harvest_config & (1 << j)) - continue; - for (k = 0; k < adev->vcn.num_enc_rings; ++k) - rings[num_rings++] = &adev->vcn.inst[j].ring_enc[k]; - } - break; - case AMDGPU_HW_IP_VCN_JPEG: - for (j = 0; j < adev->jpeg.num_jpeg_inst; ++j) { - if (adev->jpeg.harvest_config & (1 << j)) - continue; - rings[num_rings++] = &adev->jpeg.inst[j].ring_dec; - } - break; - } + return 0; - for (j = 0; j < num_rings; ++j) { - if (!rings[j]->adev) - continue; +} - rqs[num_rqs++] = &rings[j]->sched.sched_rq[priority]; - } +static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity) +{ - for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) - r = drm_sched_entity_init(&ctx->entities[i][j].entity, - rqs, num_rqs, &ctx->guilty); - if (r) - goto error_cleanup_entities; - } + int i; - return 0; + if (!entity) + return; -error_cleanup_entities: - for (i = 0; i < num_entities; ++i) - drm_sched_entity_destroy(&ctx->entities[0][i].entity); - kfree(ctx->entities[0]); + for (i = 0; i < amdgpu_sched_jobs; ++i) + dma_fence_put(entity->fences[i]); -error_free_fences: - kfree(ctx->fences); - ctx->fences = NULL; - return r; + kfree(entity); } static void amdgpu_ctx_fini(struct kref *ref) { struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount); - unsigned num_entities = amdgpu_ctx_total_num_entities(); struct amdgpu_device *adev = ctx->adev; unsigned i, j; if (!adev) return; - for (i = 0; i < num_entities; ++i) - for (j = 0; j < amdgpu_sched_jobs; ++j) - dma_fence_put(ctx->entities[0][i].fences[j]); - kfree(ctx->fences); - kfree(ctx->entities[0]); + for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { + for (j = 0; j < AMDGPU_MAX_ENTITY_NUM; ++j) { + amdgpu_ctx_fini_entity(ctx->entities[i][j]); + ctx->entities[i][j] = NULL; + } + } mutex_destroy(&ctx->lock); - kfree(ctx); } int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance, u32 ring, struct drm_sched_entity **entity) { + int r; + if (hw_ip >= AMDGPU_HW_IP_NUM) { DRM_ERROR("unknown HW IP type: %d\n", hw_ip); return -EINVAL; @@ -244,7 +219,13 @@ int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance, return -EINVAL; } - *entity = &ctx->entities[hw_ip][ring].entity; + if (ctx->entities[hw_ip][ring] == NULL) { + r = amdgpu_ctx_init_entity(ctx, hw_ip, ring); + if (r) + return r; + } + + *entity = &ctx->entities[hw_ip][ring]->entity; return 0; } @@ -284,14 +265,17 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev, static void amdgpu_ctx_do_release(struct kref *ref) { struct amdgpu_ctx *ctx; - unsigned num_entities; - u32 i; + u32 i, j; ctx = container_of(ref, struct amdgpu_ctx, refcount); + for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { + for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) { + if (!ctx->entities[i][j]) + continue; - num_entities = amdgpu_ctx_total_num_entities(); - for (i = 0; i < num_entities; i++) - drm_sched_entity_destroy(&ctx->entities[0][i].entity); + drm_sched_entity_destroy(&ctx->entities[i][j]->entity); + } + } amdgpu_ctx_fini(ref); } @@ -521,19 +505,23 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, enum drm_sched_priority priority) { - unsigned num_entities = amdgpu_ctx_total_num_entities(); enum drm_sched_priority ctx_prio; - unsigned i; + unsigned i, j; ctx->override_priority = priority; ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ? ctx->init_priority : ctx->override_priority; + for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { + for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) { + struct drm_sched_entity *entity; - for (i = 0; i < num_entities; i++) { - struct drm_sched_entity *entity = &ctx->entities[0][i].entity; + if (!ctx->entities[i][j]) + continue; - drm_sched_entity_set_priority(entity, ctx_prio); + entity = &ctx->entities[i][j]->entity; + drm_sched_entity_set_priority(entity, ctx_prio); + } } } @@ -569,20 +557,24 @@ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr) long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout) { - unsigned num_entities = amdgpu_ctx_total_num_entities(); struct amdgpu_ctx *ctx; struct idr *idp; - uint32_t id, i; + uint32_t id, i, j; idp = &mgr->ctx_handles; mutex_lock(&mgr->lock); idr_for_each_entry(idp, ctx, id) { - for (i = 0; i < num_entities; i++) { - struct drm_sched_entity *entity; + for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { + for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) { + struct drm_sched_entity *entity; + + if (!ctx->entities[i][j]) + continue; - entity = &ctx->entities[0][i].entity; - timeout = drm_sched_entity_flush(entity, timeout); + entity = &ctx->entities[i][j]->entity; + timeout = drm_sched_entity_flush(entity, timeout); + } } } mutex_unlock(&mgr->lock); @@ -591,10 +583,9 @@ long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout) void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) { - unsigned num_entities = amdgpu_ctx_total_num_entities(); struct amdgpu_ctx *ctx; struct idr *idp; - uint32_t id, i; + uint32_t id, i, j; idp = &mgr->ctx_handles; @@ -604,8 +595,17 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) continue; } - for (i = 0; i < num_entities; i++) - drm_sched_entity_fini(&ctx->entities[0][i].entity); + for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { + for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) { + struct drm_sched_entity *entity; + + if (!ctx->entities[i][j]) + continue; + + entity = &ctx->entities[i][j]->entity; + drm_sched_entity_fini(entity); + } + } } } @@ -627,3 +627,45 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr) idr_destroy(&mgr->ctx_handles); mutex_destroy(&mgr->lock); } + +void amdgpu_ctx_init_sched(struct amdgpu_device *adev) +{ + int i, j; + + for (i = 0; i < adev->gfx.num_gfx_rings; i++) { + adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched; + adev->gfx.num_gfx_sched++; + } + + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + adev->gfx.compute_sched[i] = &adev->gfx.compute_ring[i].sched; + adev->gfx.num_compute_sched++; + } + + for (i = 0; i < adev->sdma.num_instances; i++) { + adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched; + adev->sdma.num_sdma_sched++; + } + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + adev->vcn.vcn_dec_sched[adev->vcn.num_vcn_dec_sched++] = + &adev->vcn.inst[i].ring_dec.sched; + } + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + for (j = 0; j < adev->vcn.num_enc_rings; ++j) + adev->vcn.vcn_enc_sched[adev->vcn.num_vcn_enc_sched++] = + &adev->vcn.inst[i].ring_enc[j].sched; + } + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + adev->jpeg.jpeg_sched[adev->jpeg.num_jpeg_sched++] = + &adev->jpeg.inst[i].ring_dec.sched; + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h index da808633732b18a1ff3c9eff4ed8ff4c31bcb8ce..de490f183af2bcf115182e25a24f8b9d8a34bf17 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h @@ -29,10 +29,12 @@ struct drm_device; struct drm_file; struct amdgpu_fpriv; +#define AMDGPU_MAX_ENTITY_NUM 4 + struct amdgpu_ctx_entity { uint64_t sequence; - struct dma_fence **fences; struct drm_sched_entity entity; + struct dma_fence *fences[]; }; struct amdgpu_ctx { @@ -42,8 +44,7 @@ struct amdgpu_ctx { unsigned reset_counter_query; uint32_t vram_lost_counter; spinlock_t ring_lock; - struct dma_fence **fences; - struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM]; + struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM][AMDGPU_MAX_ENTITY_NUM]; bool preamble_presented; enum drm_sched_priority init_priority; enum drm_sched_priority override_priority; @@ -87,4 +88,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr); long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout); void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr); +void amdgpu_ctx_init_sched(struct amdgpu_device *adev); + + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 8e6726e0d035fa261c2bc8e8cf9de6fe61f8c061..f24ed9a1a3e56a9513a70b054e6bd1746e2b2b1b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -26,6 +26,7 @@ #include #include #include +#include #include @@ -129,7 +130,7 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, sh_bank = 0xFFFFFFFF; if (instance_bank == 0x3FF) instance_bank = 0xFFFFFFFF; - use_bank = 1; + use_bank = true; } else if (*pos & (1ULL << 61)) { me = (*pos & GENMASK_ULL(33, 24)) >> 24; @@ -137,17 +138,24 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, queue = (*pos & GENMASK_ULL(53, 44)) >> 44; vmid = (*pos & GENMASK_ULL(58, 54)) >> 54; - use_ring = 1; + use_ring = true; } else { - use_bank = use_ring = 0; + use_bank = use_ring = false; } *pos &= (1UL << 22) - 1; + r = pm_runtime_get_sync(adev->ddev->dev); + if (r < 0) + return r; + if (use_bank) { if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || - (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) + (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) { + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); return -EINVAL; + } mutex_lock(&adev->grbm_idx_mutex); amdgpu_gfx_select_se_sh(adev, se_bank, sh_bank, instance_bank); @@ -193,6 +201,9 @@ end: if (pm_pg_lock) mutex_unlock(&adev->pm.mutex); + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + return result; } @@ -237,13 +248,20 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, if (size & 0x3 || *pos & 0x3) return -EINVAL; + r = pm_runtime_get_sync(adev->ddev->dev); + if (r < 0) + return r; + while (size) { uint32_t value; value = RREG32_PCIE(*pos >> 2); r = put_user(value, (uint32_t *)buf); - if (r) + if (r) { + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } result += 4; buf += 4; @@ -251,6 +269,9 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, size -= 4; } + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + return result; } @@ -276,12 +297,19 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user if (size & 0x3 || *pos & 0x3) return -EINVAL; + r = pm_runtime_get_sync(adev->ddev->dev); + if (r < 0) + return r; + while (size) { uint32_t value; r = get_user(value, (uint32_t *)buf); - if (r) + if (r) { + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } WREG32_PCIE(*pos >> 2, value); @@ -291,6 +319,9 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user size -= 4; } + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + return result; } @@ -316,13 +347,20 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, if (size & 0x3 || *pos & 0x3) return -EINVAL; + r = pm_runtime_get_sync(adev->ddev->dev); + if (r < 0) + return r; + while (size) { uint32_t value; value = RREG32_DIDT(*pos >> 2); r = put_user(value, (uint32_t *)buf); - if (r) + if (r) { + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } result += 4; buf += 4; @@ -330,6 +368,9 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, size -= 4; } + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + return result; } @@ -355,12 +396,19 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user if (size & 0x3 || *pos & 0x3) return -EINVAL; + r = pm_runtime_get_sync(adev->ddev->dev); + if (r < 0) + return r; + while (size) { uint32_t value; r = get_user(value, (uint32_t *)buf); - if (r) + if (r) { + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } WREG32_DIDT(*pos >> 2, value); @@ -370,6 +418,9 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user size -= 4; } + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + return result; } @@ -395,13 +446,20 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, if (size & 0x3 || *pos & 0x3) return -EINVAL; + r = pm_runtime_get_sync(adev->ddev->dev); + if (r < 0) + return r; + while (size) { uint32_t value; value = RREG32_SMC(*pos); r = put_user(value, (uint32_t *)buf); - if (r) + if (r) { + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } result += 4; buf += 4; @@ -409,6 +467,9 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, size -= 4; } + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + return result; } @@ -434,12 +495,19 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user * if (size & 0x3 || *pos & 0x3) return -EINVAL; + r = pm_runtime_get_sync(adev->ddev->dev); + if (r < 0) + return r; + while (size) { uint32_t value; r = get_user(value, (uint32_t *)buf); - if (r) + if (r) { + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); return r; + } WREG32_SMC(*pos, value); @@ -449,6 +517,9 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user * size -= 4; } + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + return result; } @@ -572,7 +643,16 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, idx = *pos >> 2; valuesize = sizeof(values); + + r = pm_runtime_get_sync(adev->ddev->dev); + if (r < 0) + return r; + r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize); + + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + if (r) return r; @@ -633,6 +713,10 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, wave = (*pos & GENMASK_ULL(36, 31)) >> 31; simd = (*pos & GENMASK_ULL(44, 37)) >> 37; + r = pm_runtime_get_sync(adev->ddev->dev); + if (r < 0) + return r; + /* switch to the specific se/sh/cu */ mutex_lock(&adev->grbm_idx_mutex); amdgpu_gfx_select_se_sh(adev, se, sh, cu); @@ -644,6 +728,9 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); mutex_unlock(&adev->grbm_idx_mutex); + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + if (!x) return -EINVAL; @@ -711,6 +798,10 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, if (!data) return -ENOMEM; + r = pm_runtime_get_sync(adev->ddev->dev); + if (r < 0) + return r; + /* switch to the specific se/sh/cu */ mutex_lock(&adev->grbm_idx_mutex); amdgpu_gfx_select_se_sh(adev, se, sh, cu); @@ -726,6 +817,9 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); mutex_unlock(&adev->grbm_idx_mutex); + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + while (size) { uint32_t value; @@ -859,6 +953,10 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data) struct amdgpu_device *adev = dev->dev_private; int r = 0, i; + r = pm_runtime_get_sync(dev->dev); + if (r < 0) + return r; + /* Avoid accidently unparking the sched thread during GPU reset */ mutex_lock(&adev->lock_reset); @@ -889,6 +987,9 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data) mutex_unlock(&adev->lock_reset); + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); + return 0; } @@ -907,8 +1008,17 @@ static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_device *dev = node->minor->dev; struct amdgpu_device *adev = dev->dev_private; + int r; + + r = pm_runtime_get_sync(dev->dev); + if (r < 0) + return r; seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev)); + + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); + return 0; } @@ -917,8 +1027,17 @@ static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_device *dev = node->minor->dev; struct amdgpu_device *adev = dev->dev_private; + int r; + + r = pm_runtime_get_sync(dev->dev); + if (r < 0) + return r; seq_printf(m, "(%d)\n", ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_TT)); + + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index a979468780248bb4794da4ad469e96c901318eea..39cd545976b7ea57314717a021951674eb9de48d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -66,6 +66,7 @@ #include "amdgpu_pmu.h" #include +#include MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin"); @@ -215,8 +216,8 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, { uint32_t ret; - if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) - return amdgpu_virt_kiq_rreg(adev, reg); + if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))) + return amdgpu_kiq_rreg(adev, reg); if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); @@ -293,8 +294,8 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, adev->last_mm_index = v; } - if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) - return amdgpu_virt_kiq_wreg(adev, reg, v); + if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))) + return amdgpu_kiq_wreg(adev, reg, v); if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); @@ -984,7 +985,7 @@ static void amdgpu_device_check_vm_size(struct amdgpu_device *adev) static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev) { struct sysinfo si; - bool is_os_64 = (sizeof(void *) == 8) ? true : false; + bool is_os_64 = (sizeof(void *) == 8); uint64_t total_memory; uint64_t dram_size_seven_GB = 0x1B8000000; uint64_t dram_size_three_GB = 0xB8000000; @@ -1031,8 +1032,6 @@ def_value: */ static int amdgpu_device_check_arguments(struct amdgpu_device *adev) { - int ret = 0; - if (amdgpu_sched_jobs < 4) { dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n", amdgpu_sched_jobs); @@ -1072,7 +1071,7 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev) adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type); - return ret; + return 0; } /** @@ -1810,7 +1809,8 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev) } } - r = amdgpu_pm_load_smu_firmware(adev, &smu_version); + if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA) + r = amdgpu_pm_load_smu_firmware(adev, &smu_version); return r; } @@ -2345,14 +2345,7 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) adev->ip_blocks[i].status.hw = false; /* handle putting the SMC in the appropriate state */ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { - if (is_support_sw_smu(adev)) { - r = smu_set_mp1_state(&adev->smu, adev->mp1_state); - } else if (adev->powerplay.pp_funcs && - adev->powerplay.pp_funcs->set_mp1_state) { - r = adev->powerplay.pp_funcs->set_mp1_state( - adev->powerplay.pp_handle, - adev->mp1_state); - } + r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state); if (r) { DRM_ERROR("SMC failed to set mp1 state %d, %d\n", adev->mp1_state, r); @@ -2439,7 +2432,8 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev) AMD_IP_BLOCK_TYPE_GFX, AMD_IP_BLOCK_TYPE_SDMA, AMD_IP_BLOCK_TYPE_UVD, - AMD_IP_BLOCK_TYPE_VCE + AMD_IP_BLOCK_TYPE_VCE, + AMD_IP_BLOCK_TYPE_VCN }; for (i = 0; i < ARRAY_SIZE(ip_order); i++) { @@ -2454,7 +2448,11 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev) block->status.hw) continue; - r = block->version->funcs->hw_init(adev); + if (block->version->type == AMD_IP_BLOCK_TYPE_SMC) + r = block->version->funcs->resume(adev); + else + r = block->version->funcs->hw_init(adev); + DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); if (r) return r; @@ -2663,14 +2661,38 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work) { struct amdgpu_device *adev = container_of(__work, struct amdgpu_device, xgmi_reset_work); + struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0); - if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) - adev->asic_reset_res = (adev->in_baco == false) ? - amdgpu_device_baco_enter(adev->ddev) : - amdgpu_device_baco_exit(adev->ddev); - else - adev->asic_reset_res = amdgpu_asic_reset(adev); + /* It's a bug to not have a hive within this function */ + if (WARN_ON(!hive)) + return; + + /* + * Use task barrier to synchronize all xgmi reset works across the + * hive. task_barrier_enter and task_barrier_exit will block + * until all the threads running the xgmi reset works reach + * those points. task_barrier_full will do both blocks. + */ + if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { + + task_barrier_enter(&hive->tb); + adev->asic_reset_res = amdgpu_device_baco_enter(adev->ddev); + + if (adev->asic_reset_res) + goto fail; + + task_barrier_exit(&hive->tb); + adev->asic_reset_res = amdgpu_device_baco_exit(adev->ddev); + + if (adev->asic_reset_res) + goto fail; + } else { + task_barrier_full(&hive->tb); + adev->asic_reset_res = amdgpu_asic_reset(adev); + } + +fail: if (adev->asic_reset_res) DRM_WARN("ASIC reset failed with error, %d for drm dev, %s", adev->asic_reset_res, adev->ddev->unique); @@ -2785,7 +2807,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->mman.buffer_funcs = NULL; adev->mman.buffer_funcs_ring = NULL; adev->vm_manager.vm_pte_funcs = NULL; - adev->vm_manager.vm_pte_num_rqs = 0; + adev->vm_manager.vm_pte_num_scheds = 0; adev->gmc.gmc_funcs = NULL; adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); @@ -2826,6 +2848,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, hash_init(adev->mn_hash); mutex_init(&adev->lock_reset); mutex_init(&adev->psp.mutex); + mutex_init(&adev->notifier_lock); r = amdgpu_device_check_arguments(adev); if (r) @@ -3029,6 +3052,14 @@ fence_driver_init: goto failed; } + DRM_DEBUG("SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n", + adev->gfx.config.max_shader_engines, + adev->gfx.config.max_sh_per_se, + adev->gfx.config.max_cu_per_sh, + adev->gfx.cu_info.number); + + amdgpu_ctx_init_sched(adev); + adev->accel_working = true; amdgpu_vm_check_compute_bug(adev); @@ -3660,8 +3691,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, if (r) return r; - amdgpu_amdkfd_pre_reset(adev); - /* Resume IP prior to SMC */ r = amdgpu_device_ip_reinit_early_sriov(adev); if (r) @@ -3730,6 +3759,11 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev) case CHIP_VEGA10: case CHIP_VEGA12: case CHIP_RAVEN: + case CHIP_ARCTURUS: + case CHIP_RENOIR: + case CHIP_NAVI10: + case CHIP_NAVI14: + case CHIP_NAVI12: break; default: goto disabled; @@ -3790,18 +3824,13 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, return r; } -static int amdgpu_do_asic_reset(struct amdgpu_device *adev, - struct amdgpu_hive_info *hive, +static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive, struct list_head *device_list_handle, bool *need_full_reset_arg) { struct amdgpu_device *tmp_adev = NULL; bool need_full_reset = *need_full_reset_arg, vram_lost = false; int r = 0; - int cpu = smp_processor_id(); - bool use_baco = - (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) ? - true : false; /* * ASIC reset has to be done on all HGMI hive nodes ASAP @@ -3809,62 +3838,22 @@ static int amdgpu_do_asic_reset(struct amdgpu_device *adev, */ if (need_full_reset) { list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { - /* - * For XGMI run all resets in parallel to speed up the - * process by scheduling the highpri wq on different - * cpus. For XGMI with baco reset, all nodes must enter - * baco within close proximity before anyone exit. - */ + /* For XGMI run all resets in parallel to speed up the process */ if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { - if (!queue_work_on(cpu, system_highpri_wq, - &tmp_adev->xgmi_reset_work)) + if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work)) r = -EALREADY; - cpu = cpumask_next(cpu, cpu_online_mask); } else r = amdgpu_asic_reset(tmp_adev); - if (r) - break; - } - - /* For XGMI wait for all work to complete before proceed */ - if (!r) { - list_for_each_entry(tmp_adev, device_list_handle, - gmc.xgmi.head) { - if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { - flush_work(&tmp_adev->xgmi_reset_work); - r = tmp_adev->asic_reset_res; - if (r) - break; - if (use_baco) - tmp_adev->in_baco = true; - } - } - } - - /* - * For XGMI with baco reset, need exit baco phase by scheduling - * xgmi_reset_work one more time. PSP reset and sGPU skips this - * phase. Not assume the situation that PSP reset and baco reset - * coexist within an XGMI hive. - */ - if (!r && use_baco) { - cpu = smp_processor_id(); - list_for_each_entry(tmp_adev, device_list_handle, - gmc.xgmi.head) { - if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { - if (!queue_work_on(cpu, - system_highpri_wq, - &tmp_adev->xgmi_reset_work)) - r = -EALREADY; - if (r) - break; - cpu = cpumask_next(cpu, cpu_online_mask); - } + if (r) { + DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s", + r, tmp_adev->ddev->unique); + break; } } - if (!r && use_baco) { + /* For XGMI wait for all resets to complete before proceed */ + if (!r) { list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { @@ -3872,16 +3861,9 @@ static int amdgpu_do_asic_reset(struct amdgpu_device *adev, r = tmp_adev->asic_reset_res; if (r) break; - tmp_adev->in_baco = false; } } } - - if (r) { - DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s", - r, tmp_adev->ddev->unique); - goto end; - } } if (!r && amdgpu_ras_intr_triggered()) @@ -3974,7 +3956,7 @@ static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock) mutex_lock(&adev->lock_reset); atomic_inc(&adev->gpu_reset_counter); - adev->in_gpu_reset = 1; + adev->in_gpu_reset = true; switch (amdgpu_asic_reset_method(adev)) { case AMD_RESET_METHOD_MODE1: adev->mp1_state = PP_MP1_STATE_SHUTDOWN; @@ -3994,7 +3976,7 @@ static void amdgpu_device_unlock_adev(struct amdgpu_device *adev) { amdgpu_vf_error_trans_all(adev); adev->mp1_state = PP_MP1_STATE_NONE; - adev->in_gpu_reset = 0; + adev->in_gpu_reset = false; mutex_unlock(&adev->lock_reset); } @@ -4175,8 +4157,7 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */ if (r) adev->asic_reset_res = r; } else { - r = amdgpu_do_asic_reset(adev, hive, device_list_handle, - &need_full_reset); + r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset); if (r && r == -EAGAIN) goto retry; } @@ -4377,55 +4358,21 @@ int amdgpu_device_baco_enter(struct drm_device *dev) if (ras && ras->supported) adev->nbio.funcs->enable_doorbell_interrupt(adev, false); - if (is_support_sw_smu(adev)) { - struct smu_context *smu = &adev->smu; - int ret; - - ret = smu_baco_enter(smu); - if (ret) - return ret; - } else { - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - - if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state) - return -ENOENT; - - /* enter BACO state */ - if (pp_funcs->set_asic_baco_state(pp_handle, 1)) - return -EIO; - } - - return 0; + return amdgpu_dpm_baco_enter(adev); } int amdgpu_device_baco_exit(struct drm_device *dev) { struct amdgpu_device *adev = dev->dev_private; struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + int ret = 0; if (!amdgpu_device_supports_baco(adev->ddev)) return -ENOTSUPP; - if (is_support_sw_smu(adev)) { - struct smu_context *smu = &adev->smu; - int ret; - - ret = smu_baco_exit(smu); - if (ret) - return ret; - - } else { - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - - if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state) - return -ENOENT; - - /* exit BACO state */ - if (pp_funcs->set_asic_baco_state(pp_handle, 0)) - return -EIO; - } + ret = amdgpu_dpm_baco_exit(adev); + if (ret) + return ret; if (ras && ras->supported) adev->nbio.funcs->enable_doorbell_interrupt(adev, true); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h new file mode 100644 index 0000000000000000000000000000000000000000..057f6ea645d7eb12384007a96365002ef5e0aa17 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h @@ -0,0 +1,65 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __AMDGPU_DF_H__ +#define __AMDGPU_DF_H__ + +struct amdgpu_df_hash_status { + bool hash_64k; + bool hash_2m; + bool hash_1g; +}; + +struct amdgpu_df_funcs { + void (*sw_init)(struct amdgpu_device *adev); + void (*sw_fini)(struct amdgpu_device *adev); + void (*enable_broadcast_mode)(struct amdgpu_device *adev, + bool enable); + u32 (*get_fb_channel_number)(struct amdgpu_device *adev); + u32 (*get_hbm_channel_number)(struct amdgpu_device *adev); + void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev, + bool enable); + void (*get_clockgating_state)(struct amdgpu_device *adev, + u32 *flags); + void (*enable_ecc_force_par_wr_rmw)(struct amdgpu_device *adev, + bool enable); + int (*pmc_start)(struct amdgpu_device *adev, uint64_t config, + int is_enable); + int (*pmc_stop)(struct amdgpu_device *adev, uint64_t config, + int is_disable); + void (*pmc_get_count)(struct amdgpu_device *adev, uint64_t config, + uint64_t *count); + uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val); + void (*set_fica)(struct amdgpu_device *adev, uint32_t ficaa_val, + uint32_t ficadl_val, uint32_t ficadh_val); + uint64_t (*get_dram_base_addr)(struct amdgpu_device *adev, + uint32_t df_inst); + uint32_t (*get_df_inst_id)(struct amdgpu_device *adev); +}; + +struct amdgpu_df { + struct amdgpu_df_hash_status hash_status; + const struct amdgpu_df_funcs *funcs; +}; + +#endif /* __AMDGPU_DF_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 4e699071d1443deefe71aaf3841ac6f6b7bd0788..6d520a3eec4090732ae62d91eaa6cd5a1babc9f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -513,13 +513,23 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev, * will not allow USWC mappings. * Also, don't allow GTT domain if the BO doens't have USWC falg set. */ - if (adev->asic_type >= CHIP_CARRIZO && - adev->asic_type < CHIP_RAVEN && - (adev->flags & AMD_IS_APU) && - (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) && + if ((bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) && amdgpu_bo_support_uswc(bo_flags) && - amdgpu_device_asic_has_dc_support(adev->asic_type)) - domain |= AMDGPU_GEM_DOMAIN_GTT; + amdgpu_device_asic_has_dc_support(adev->asic_type)) { + switch (adev->asic_type) { + case CHIP_CARRIZO: + case CHIP_STONEY: + domain |= AMDGPU_GEM_DOMAIN_GTT; + break; + case CHIP_RAVEN: + /* enable S/G on PCO and RV2 */ + if (adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) + domain |= AMDGPU_GEM_DOMAIN_GTT; + break; + default: + break; + } + } #endif return domain; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c index 9cc270efee7cf00fd0c0cfec9ca7d6cfe7e91e6b..a2e8c3dfb4f158760c6fda0a8eddf37ad85be58b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c @@ -946,20 +946,63 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block bool swsmu = is_support_sw_smu(adev); switch (block_type) { - case AMD_IP_BLOCK_TYPE_GFX: case AMD_IP_BLOCK_TYPE_UVD: - case AMD_IP_BLOCK_TYPE_VCN: case AMD_IP_BLOCK_TYPE_VCE: + if (swsmu) { + ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate); + } else if (adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->set_powergating_by_smu) { + /* + * TODO: need a better lock mechanism + * + * Here adev->pm.mutex lock protection is enforced on + * UVD and VCE cases only. Since for other cases, there + * may be already lock protection in amdgpu_pm.c. + * This is a quick fix for the deadlock issue below. + * NFO: task ocltst:2028 blocked for more than 120 seconds. + * Tainted: G OE 5.0.0-37-generic #40~18.04.1-Ubuntu + * echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. + * cltst D 0 2028 2026 0x00000000 + * all Trace: + * __schedule+0x2c0/0x870 + * schedule+0x2c/0x70 + * schedule_preempt_disabled+0xe/0x10 + * __mutex_lock.isra.9+0x26d/0x4e0 + * __mutex_lock_slowpath+0x13/0x20 + * ? __mutex_lock_slowpath+0x13/0x20 + * mutex_lock+0x2f/0x40 + * amdgpu_dpm_set_powergating_by_smu+0x64/0xe0 [amdgpu] + * gfx_v8_0_enable_gfx_static_mg_power_gating+0x3c/0x70 [amdgpu] + * gfx_v8_0_set_powergating_state+0x66/0x260 [amdgpu] + * amdgpu_device_ip_set_powergating_state+0x62/0xb0 [amdgpu] + * pp_dpm_force_performance_level+0xe7/0x100 [amdgpu] + * amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu] + */ + mutex_lock(&adev->pm.mutex); + ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu( + (adev)->powerplay.pp_handle, block_type, gate)); + mutex_unlock(&adev->pm.mutex); + } + break; + case AMD_IP_BLOCK_TYPE_GFX: + case AMD_IP_BLOCK_TYPE_VCN: case AMD_IP_BLOCK_TYPE_SDMA: if (swsmu) ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate); - else + else if (adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->set_powergating_by_smu) ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu( (adev)->powerplay.pp_handle, block_type, gate)); break; + case AMD_IP_BLOCK_TYPE_JPEG: + if (swsmu) + ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate); + break; case AMD_IP_BLOCK_TYPE_GMC: case AMD_IP_BLOCK_TYPE_ACP: - ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu( + if (adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->set_powergating_by_smu) + ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu( (adev)->powerplay.pp_handle, block_type, gate)); break; default: @@ -968,3 +1011,163 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block return ret; } + +int amdgpu_dpm_baco_enter(struct amdgpu_device *adev) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + void *pp_handle = adev->powerplay.pp_handle; + struct smu_context *smu = &adev->smu; + int ret = 0; + + if (is_support_sw_smu(adev)) { + ret = smu_baco_enter(smu); + } else { + if (!pp_funcs || !pp_funcs->set_asic_baco_state) + return -ENOENT; + + /* enter BACO state */ + ret = pp_funcs->set_asic_baco_state(pp_handle, 1); + } + + return ret; +} + +int amdgpu_dpm_baco_exit(struct amdgpu_device *adev) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + void *pp_handle = adev->powerplay.pp_handle; + struct smu_context *smu = &adev->smu; + int ret = 0; + + if (is_support_sw_smu(adev)) { + ret = smu_baco_exit(smu); + } else { + if (!pp_funcs || !pp_funcs->set_asic_baco_state) + return -ENOENT; + + /* exit BACO state */ + ret = pp_funcs->set_asic_baco_state(pp_handle, 0); + } + + return ret; +} + +int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev, + enum pp_mp1_state mp1_state) +{ + int ret = 0; + + if (is_support_sw_smu(adev)) { + ret = smu_set_mp1_state(&adev->smu, mp1_state); + } else if (adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->set_mp1_state) { + ret = adev->powerplay.pp_funcs->set_mp1_state( + adev->powerplay.pp_handle, + mp1_state); + } + + return ret; +} + +bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + void *pp_handle = adev->powerplay.pp_handle; + struct smu_context *smu = &adev->smu; + bool baco_cap; + + if (is_support_sw_smu(adev)) { + return smu_baco_is_support(smu); + } else { + if (!pp_funcs || !pp_funcs->get_asic_baco_capability) + return false; + + if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap)) + return false; + + return baco_cap ? true : false; + } +} + +int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + void *pp_handle = adev->powerplay.pp_handle; + struct smu_context *smu = &adev->smu; + + if (is_support_sw_smu(adev)) { + return smu_mode2_reset(smu); + } else { + if (!pp_funcs || !pp_funcs->asic_reset_mode_2) + return -ENOENT; + + return pp_funcs->asic_reset_mode_2(pp_handle); + } +} + +int amdgpu_dpm_baco_reset(struct amdgpu_device *adev) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + void *pp_handle = adev->powerplay.pp_handle; + struct smu_context *smu = &adev->smu; + int ret = 0; + + dev_info(adev->dev, "GPU BACO reset\n"); + + if (is_support_sw_smu(adev)) { + ret = smu_baco_enter(smu); + if (ret) + return ret; + + ret = smu_baco_exit(smu); + if (ret) + return ret; + } else { + if (!pp_funcs + || !pp_funcs->set_asic_baco_state) + return -ENOENT; + + /* enter BACO state */ + ret = pp_funcs->set_asic_baco_state(pp_handle, 1); + if (ret) + return ret; + + /* exit BACO state */ + ret = pp_funcs->set_asic_baco_state(pp_handle, 0); + if (ret) + return ret; + } + + return 0; +} + +int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, + enum PP_SMC_POWER_PROFILE type, + bool en) +{ + int ret = 0; + + if (is_support_sw_smu(adev)) + ret = smu_switch_power_profile(&adev->smu, type, en); + else if (adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->switch_power_profile) + ret = adev->powerplay.pp_funcs->switch_power_profile( + adev->powerplay.pp_handle, type, en); + + return ret; +} + +int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev, + uint32_t pstate) +{ + int ret = 0; + + if (is_support_sw_smu_xgmi(adev)) + ret = smu_set_xgmi_pstate(&adev->smu, pstate); + else if (adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->set_xgmi_pstate) + ret = adev->powerplay.pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle, + pstate); + + return ret; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h index 2cfb677272af3f0906569a5b5d7715d1337894be..902ca6c00cca4508016471a9239456204bf3d5a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h @@ -341,10 +341,6 @@ enum amdgpu_pcie_gen { ((adev)->powerplay.pp_funcs->reset_power_profile_state(\ (adev)->powerplay.pp_handle, request)) -#define amdgpu_dpm_switch_power_profile(adev, type, en) \ - ((adev)->powerplay.pp_funcs->switch_power_profile(\ - (adev)->powerplay.pp_handle, type, en)) - #define amdgpu_dpm_set_clockgating_by_smu(adev, msg_id) \ ((adev)->powerplay.pp_funcs->set_clockgating_by_smu(\ (adev)->powerplay.pp_handle, msg_id)) @@ -517,4 +513,24 @@ extern int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low); extern int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low); +int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev, + uint32_t pstate); + +int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, + enum PP_SMC_POWER_PROFILE type, + bool en); + +int amdgpu_dpm_baco_reset(struct amdgpu_device *adev); + +int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev); + +bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev); + +int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev, + enum pp_mp1_state mp1_state); + +int amdgpu_dpm_baco_exit(struct amdgpu_device *adev); + +int amdgpu_dpm_baco_enter(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 3f6f14ce1511a9bc1ff2741226690c64c1206d4b..a9c4edca70c9e2187a371d5d2fcb66b658a4f456 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -142,7 +142,7 @@ int amdgpu_async_gfx_ring = 1; int amdgpu_mcbp = 0; int amdgpu_discovery = -1; int amdgpu_mes = 0; -int amdgpu_noretry = 1; +int amdgpu_noretry; int amdgpu_force_asic_type = -1; struct amdgpu_mgpu_info mgpu_info = { @@ -588,7 +588,7 @@ MODULE_PARM_DESC(mes, module_param_named(mes, amdgpu_mes, int, 0444); MODULE_PARM_DESC(noretry, - "Disable retry faults (0 = retry enabled, 1 = retry disabled (default))"); + "Disable retry faults (0 = retry enabled (default), 1 = retry disabled)"); module_param_named(noretry, amdgpu_noretry, int, 0644); /** @@ -1203,13 +1203,23 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); struct amdgpu_device *adev = drm_dev->dev_private; - int ret; + int ret, i; if (!adev->runpm) { pm_runtime_forbid(dev); return -EBUSY; } + /* wait for all rings to drain before suspending */ + for (i = 0; i < AMDGPU_MAX_RINGS; i++) { + struct amdgpu_ring *ring = adev->rings[i]; + if (ring && ring->sched.ready) { + ret = amdgpu_fence_wait_empty(ring); + if (ret) + return -EBUSY; + } + } + if (amdgpu_device_supports_boco(drm_dev)) drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; drm_kms_helper_poll_disable(drm_dev); @@ -1381,7 +1391,8 @@ static struct drm_driver kms_driver = { .driver_features = DRIVER_USE_AGP | DRIVER_ATOMIC | DRIVER_GEM | - DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ, + DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ | + DRIVER_SYNCOBJ_TIMELINE, .load = amdgpu_driver_load_kms, .open = amdgpu_driver_open_kms, .postclose = amdgpu_driver_postclose_kms, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 377fe20bce23ca540f5d4eba6996f8ec7674f2af..3c01252b1e0ea3ba4ce81159333c54a82922bc59 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -34,6 +34,7 @@ #include #include #include +#include #include @@ -154,7 +155,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, seq); amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, seq, flags | AMDGPU_FENCE_FLAG_INT); - + pm_runtime_get_noresume(adev->ddev->dev); ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; if (unlikely(rcu_dereference_protected(*ptr, 1))) { struct dma_fence *old; @@ -234,6 +235,7 @@ static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring) bool amdgpu_fence_process(struct amdgpu_ring *ring) { struct amdgpu_fence_driver *drv = &ring->fence_drv; + struct amdgpu_device *adev = ring->adev; uint32_t seq, last_seq; int r; @@ -274,6 +276,8 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring) BUG(); dma_fence_put(fence); + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); } while (last_seq != seq); return true; @@ -737,10 +741,18 @@ static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct amdgpu_device *adev = dev->dev_private; + int r; + + r = pm_runtime_get_sync(dev->dev); + if (r < 0) + return 0; seq_printf(m, "gpu recover\n"); amdgpu_device_gpu_recover(adev, NULL); + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index e00b46180d2ecdfb8ab38ce415fd0dcf53bd001d..0f960b498792140fff86075fd8c8a95e835b5d30 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -296,7 +296,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, spin_lock_init(&kiq->ring_lock); - r = amdgpu_device_wb_get(adev, &adev->virt.reg_val_offs); + r = amdgpu_device_wb_get(adev, &kiq->reg_val_offs); if (r) return r; @@ -321,7 +321,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring) { - amdgpu_device_wb_free(ring->adev, ring->adev->virt.reg_val_offs); + amdgpu_device_wb_free(ring->adev, ring->adev->gfx.kiq.reg_val_offs); amdgpu_ring_fini(ring); } @@ -543,12 +543,6 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable) if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) return; - if (!is_support_sw_smu(adev) && - (!adev->powerplay.pp_funcs || - !adev->powerplay.pp_funcs->set_powergating_by_smu)) - return; - - mutex_lock(&adev->gfx.gfx_off_mutex); if (!enable) @@ -641,7 +635,7 @@ int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); if (adev->gfx.funcs->query_ras_error_count) adev->gfx.funcs->query_ras_error_count(adev, err_data); - amdgpu_ras_reset_gpu(adev, 0); + amdgpu_ras_reset_gpu(adev); } return AMDGPU_RAS_SUCCESS; } @@ -664,3 +658,95 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, amdgpu_ras_interrupt_dispatch(adev, &ih_data); return 0; } + +uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) +{ + signed long r, cnt = 0; + unsigned long flags; + uint32_t seq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_ring *ring = &kiq->ring; + + BUG_ON(!ring->funcs->emit_rreg); + + spin_lock_irqsave(&kiq->ring_lock, flags); + amdgpu_ring_alloc(ring, 32); + amdgpu_ring_emit_rreg(ring, reg); + amdgpu_fence_emit_polling(ring, &seq); + amdgpu_ring_commit(ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); + + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + + /* don't wait anymore for gpu reset case because this way may + * block gpu_recover() routine forever, e.g. this virt_kiq_rreg + * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will + * never return if we keep waiting in virt_kiq_rreg, which cause + * gpu_recover() hang there. + * + * also don't wait anymore for IRQ context + * */ + if (r < 1 && (adev->in_gpu_reset || in_interrupt())) + goto failed_kiq_read; + + might_sleep(); + while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { + msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + } + + if (cnt > MAX_KIQ_REG_TRY) + goto failed_kiq_read; + + return adev->wb.wb[kiq->reg_val_offs]; + +failed_kiq_read: + pr_err("failed to read reg:%x\n", reg); + return ~0; +} + +void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) +{ + signed long r, cnt = 0; + unsigned long flags; + uint32_t seq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_ring *ring = &kiq->ring; + + BUG_ON(!ring->funcs->emit_wreg); + + spin_lock_irqsave(&kiq->ring_lock, flags); + amdgpu_ring_alloc(ring, 32); + amdgpu_ring_emit_wreg(ring, reg, v); + amdgpu_fence_emit_polling(ring, &seq); + amdgpu_ring_commit(ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); + + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + + /* don't wait anymore for gpu reset case because this way may + * block gpu_recover() routine forever, e.g. this virt_kiq_rreg + * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will + * never return if we keep waiting in virt_kiq_rreg, which cause + * gpu_recover() hang there. + * + * also don't wait anymore for IRQ context + * */ + if (r < 1 && (adev->in_gpu_reset || in_interrupt())) + goto failed_kiq_write; + + might_sleep(); + while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { + + msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + } + + if (cnt > MAX_KIQ_REG_TRY) + goto failed_kiq_write; + + return; + +failed_kiq_write: + pr_err("failed to write reg:%x\n", reg); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 0ae0a2715b0d407000bb396664174fa6259bd6f6..ca17ffb013012dc2a3e04f343976de0a4ae4784a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -76,11 +76,15 @@ struct kiq_pm4_funcs { struct amdgpu_ring *ring, u64 addr, u64 seq); + void (*kiq_invalidate_tlbs)(struct amdgpu_ring *kiq_ring, + uint16_t pasid, uint32_t flush_type, + bool all_hub); /* Packet sizes */ int set_resources_size; int map_queues_size; int unmap_queues_size; int query_status_size; + int invalidate_tlbs_size; }; struct amdgpu_kiq { @@ -90,6 +94,7 @@ struct amdgpu_kiq { struct amdgpu_ring ring; struct amdgpu_irq_src irq; const struct kiq_pm4_funcs *pmf; + uint32_t reg_val_offs; }; /* @@ -269,8 +274,12 @@ struct amdgpu_gfx { bool me_fw_write_wait; bool cp_fw_write_wait; struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS]; + struct drm_gpu_scheduler *gfx_sched[AMDGPU_MAX_GFX_RINGS]; + uint32_t num_gfx_sched; unsigned num_gfx_rings; struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS]; + struct drm_gpu_scheduler *compute_sched[AMDGPU_MAX_COMPUTE_RINGS]; + uint32_t num_compute_sched; unsigned num_compute_rings; struct amdgpu_irq_src eop_irq; struct amdgpu_irq_src priv_reg_irq; @@ -367,4 +376,6 @@ int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry); +uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); +void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index a12f33c0f5df0d15e4295f8a6f072c5a03ad0c9a..5884ab590486e6f941d67f931b34c4f0d2f9b01c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -223,7 +223,7 @@ void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) u64 size_af, size_bf; if (amdgpu_sriov_vf(adev)) { - mc->agp_start = 0xffffffff; + mc->agp_start = 0xffffffffffff; mc->agp_end = 0x0; mc->agp_size = 0; @@ -333,3 +333,43 @@ void amdgpu_gmc_ras_fini(struct amdgpu_device *adev) amdgpu_mmhub_ras_fini(adev); amdgpu_xgmi_ras_fini(adev); } + + /* + * The latest engine allocation on gfx9/10 is: + * Engine 2, 3: firmware + * Engine 0, 1, 4~16: amdgpu ring, + * subject to change when ring number changes + * Engine 17: Gart flushes + */ +#define GFXHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3 +#define MMHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3 + +int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] = + {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP, + GFXHUB_FREE_VM_INV_ENGS_BITMAP}; + unsigned i; + unsigned vmhub, inv_eng; + + for (i = 0; i < adev->num_rings; ++i) { + ring = adev->rings[i]; + vmhub = ring->funcs->vmhub; + + inv_eng = ffs(vm_inv_engs[vmhub]); + if (!inv_eng) { + dev_err(adev->dev, "no VM inv eng for ring %s\n", + ring->name); + return -EINVAL; + } + + ring->vm_inv_eng = inv_eng - 1; + vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng); + + dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n", + ring->name, ring->vm_inv_eng, ring->funcs->vmhub); + } + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index b499a3de8bb63dc4f3660ca1928b911c9e51c4ad..d3c27a3c43f68ad5ed11ff5b0379d8d06c648d13 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -92,6 +92,9 @@ struct amdgpu_gmc_funcs { /* flush the vm tlb via mmio */ void (*flush_gpu_tlb)(struct amdgpu_device *adev, uint32_t vmid, uint32_t vmhub, uint32_t flush_type); + /* flush the vm tlb via pasid */ + int (*flush_gpu_tlb_pasid)(struct amdgpu_device *adev, uint16_t pasid, + uint32_t flush_type, bool all_hub); /* flush the vm tlb via ring */ uint64_t (*emit_flush_gpu_tlb)(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr); @@ -216,6 +219,9 @@ struct amdgpu_gmc { }; #define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type))) +#define amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, type, allhub) \ + ((adev)->gmc.gmc_funcs->flush_gpu_tlb_pasid \ + ((adev), (pasid), (type), (allhub))) #define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr)) #define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid)) #define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags)) @@ -267,5 +273,6 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr, uint16_t pasid, uint64_t timestamp); int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev); void amdgpu_gmc_ras_fini(struct amdgpu_device *adev); +int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 73328d0c741da6f146aac152c86a8d6acd5bb167..d42be880a236bf8b1d0278a5c9586a0fc25ccdd9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -153,7 +153,6 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, if (r) return r; - job->owner = owner; *f = dma_fence_get(&job->base.s_fence->finished); amdgpu_job_free_resources(job); priority = job->base.s_priority; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h index dc7ee9358dcd6ed01cd4cda96ce97a612c4ac61f..3f7b8433d17904fd8e535219108a47f0f54c9e3d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h @@ -49,7 +49,6 @@ struct amdgpu_job { uint32_t preamble_status; uint32_t preemption_status; uint32_t num_ibs; - void *owner; bool vm_needs_flush; uint64_t vm_pd_addr; unsigned vmid; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h index 5131a0a1bc8aa90c8c9dde5482ff466ee0ce8977..bd9ef9cc86deae6c7183a6ccab20a33dbb869cce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h @@ -43,6 +43,8 @@ struct amdgpu_jpeg { uint8_t num_jpeg_inst; struct amdgpu_jpeg_inst inst[AMDGPU_MAX_JPEG_INSTANCES]; struct amdgpu_jpeg_reg internal; + struct drm_gpu_scheduler *jpeg_sched[AMDGPU_MAX_JPEG_INSTANCES]; + uint32_t num_jpeg_sched; unsigned harvest_config; struct delayed_work idle_work; enum amd_powergating_state cur_state; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index b32adda70bbc64dd3f92dccbf9f8ab3f56de27c0..b03b1eb7ba0424fd4d70f930cbccd009949b4043 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -37,6 +37,7 @@ #include #include #include +#include #include "hwmgr.h" #define WIDTH_4K 3840 @@ -158,10 +159,15 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; enum amd_pm_state_type pm; + int ret; if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) return 0; + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) + return ret; + if (is_support_sw_smu(adev)) { if (adev->smu.ppt_funcs->get_current_power_state) pm = smu_get_current_power_state(&adev->smu); @@ -173,6 +179,9 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev, pm = adev->pm.dpm.user_state; } + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + return snprintf(buf, PAGE_SIZE, "%s\n", (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); @@ -186,6 +195,7 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; enum amd_pm_state_type state; + int ret; if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) return -EINVAL; @@ -196,10 +206,12 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev, state = POWER_STATE_TYPE_BALANCED; else if (strncmp("performance", buf, strlen("performance")) == 0) state = POWER_STATE_TYPE_PERFORMANCE; - else { - count = -EINVAL; - goto fail; - } + else + return -EINVAL; + + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) + return ret; if (is_support_sw_smu(adev)) { mutex_lock(&adev->pm.mutex); @@ -212,12 +224,11 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev, adev->pm.dpm.user_state = state; mutex_unlock(&adev->pm.mutex); - /* Can't set dpm state when the card is off */ - if (!(adev->flags & AMD_IS_PX) || - (ddev->switch_power_state == DRM_SWITCH_POWER_ON)) - amdgpu_pm_compute_clocks(adev); + amdgpu_pm_compute_clocks(adev); } -fail: + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + return count; } @@ -288,13 +299,14 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; enum amd_dpm_forced_level level = 0xff; + int ret; if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) return 0; - if ((adev->flags & AMD_IS_PX) && - (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) - return snprintf(buf, PAGE_SIZE, "off\n"); + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) + return ret; if (is_support_sw_smu(adev)) level = smu_get_performance_level(&adev->smu); @@ -303,6 +315,9 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev, else level = adev->pm.dpm.forced_level; + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + return snprintf(buf, PAGE_SIZE, "%s\n", (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" : (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : @@ -329,11 +344,6 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) return -EINVAL; - /* Can't force performance level when the card is off */ - if ((adev->flags & AMD_IS_PX) && - (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) - return -EINVAL; - if (strncmp("low", buf, strlen("low")) == 0) { level = AMD_DPM_FORCED_LEVEL_LOW; } else if (strncmp("high", buf, strlen("high")) == 0) { @@ -353,17 +363,23 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) { level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; } else { - count = -EINVAL; - goto fail; + return -EINVAL; } + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) + return ret; + if (is_support_sw_smu(adev)) current_level = smu_get_performance_level(&adev->smu); else if (adev->powerplay.pp_funcs->get_performance_level) current_level = amdgpu_dpm_get_performance_level(adev); - if (current_level == level) + if (current_level == level) { + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); return count; + } /* profile_exit setting is valid only when current mode is in profile mode */ if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | @@ -372,29 +388,40 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) && (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) { pr_err("Currently not in any profile mode!\n"); + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); return -EINVAL; } if (is_support_sw_smu(adev)) { ret = smu_force_performance_level(&adev->smu, level); - if (ret) - count = -EINVAL; + if (ret) { + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + return -EINVAL; + } } else if (adev->powerplay.pp_funcs->force_performance_level) { mutex_lock(&adev->pm.mutex); if (adev->pm.dpm.thermal_active) { - count = -EINVAL; mutex_unlock(&adev->pm.mutex); - goto fail; + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + return -EINVAL; } ret = amdgpu_dpm_force_performance_level(adev, level); - if (ret) - count = -EINVAL; - else + if (ret) { + mutex_unlock(&adev->pm.mutex); + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + return -EINVAL; + } else { adev->pm.dpm.forced_level = level; + } mutex_unlock(&adev->pm.mutex); } + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); -fail: return count; } @@ -407,6 +434,10 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev, struct pp_states_info data; int i, buf_len, ret; + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) + return ret; + if (is_support_sw_smu(adev)) { ret = smu_get_power_num_states(&adev->smu, &data); if (ret) @@ -414,6 +445,9 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev, } else if (adev->powerplay.pp_funcs->get_pp_num_states) amdgpu_dpm_get_pp_num_states(adev, &data); + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums); for (i = 0; i < data.nums; i++) buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i, @@ -439,6 +473,10 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev, if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) return 0; + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) + return ret; + if (is_support_sw_smu(adev)) { pm = smu_get_current_power_state(smu); ret = smu_get_power_num_states(smu, &data); @@ -450,6 +488,9 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev, amdgpu_dpm_get_pp_num_states(adev, &data); } + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + for (i = 0; i < data.nums; i++) { if (pm == data.states[i]) break; @@ -500,14 +541,18 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev, struct pp_states_info data; ret = kstrtoul(buf, 0, &idx); - if (ret || idx >= ARRAY_SIZE(data.states)) { - count = -EINVAL; - goto fail; - } + if (ret || idx >= ARRAY_SIZE(data.states)) + return -EINVAL; + idx = array_index_nospec(idx, ARRAY_SIZE(data.states)); amdgpu_dpm_get_pp_num_states(adev, &data); state = data.states[idx]; + + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) + return ret; + /* only set user selected power states */ if (state != POWER_STATE_TYPE_INTERNAL_BOOT && state != POWER_STATE_TYPE_DEFAULT) { @@ -515,8 +560,10 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev, AMD_PP_TASK_ENABLE_USER_STATE, &state); adev->pp_force_state_enabled = true; } + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); } -fail: + return count; } @@ -538,20 +585,32 @@ static ssize_t amdgpu_get_pp_table(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; char *table = NULL; - int size; + int size, ret; if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) return 0; + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) + return ret; + if (is_support_sw_smu(adev)) { size = smu_sys_get_pp_table(&adev->smu, (void **)&table); + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); if (size < 0) return size; - } - else if (adev->powerplay.pp_funcs->get_pp_table) + } else if (adev->powerplay.pp_funcs->get_pp_table) { size = amdgpu_dpm_get_pp_table(adev, &table); - else + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + if (size < 0) + return size; + } else { + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); return 0; + } if (size >= PAGE_SIZE) size = PAGE_SIZE - 1; @@ -573,13 +632,23 @@ static ssize_t amdgpu_set_pp_table(struct device *dev, if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) return -EINVAL; + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) + return ret; + if (is_support_sw_smu(adev)) { ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count); - if (ret) + if (ret) { + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); return ret; + } } else if (adev->powerplay.pp_funcs->set_pp_table) amdgpu_dpm_set_pp_table(adev, buf, count); + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + return count; } @@ -703,18 +772,28 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, tmp_str++; } + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) + return ret; + if (is_support_sw_smu(adev)) { ret = smu_od_edit_dpm_table(&adev->smu, type, parameter, parameter_size); - if (ret) + if (ret) { + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); return -EINVAL; + } } else { if (adev->powerplay.pp_funcs->odn_edit_dpm_table) { ret = amdgpu_dpm_odn_edit_dpm_table(adev, type, parameter, parameter_size); - if (ret) + if (ret) { + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); return -EINVAL; + } } if (type == PP_OD_COMMIT_DPM_TABLE) { @@ -722,12 +801,18 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL); + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); return count; } else { + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); return -EINVAL; } } } + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); return count; } @@ -738,27 +823,33 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - uint32_t size = 0; + ssize_t size; + int ret; if (amdgpu_sriov_vf(adev)) return 0; + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) + return ret; + if (is_support_sw_smu(adev)) { size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf); size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size); size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, buf+size); size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, buf+size); - return size; } else if (adev->powerplay.pp_funcs->print_clock_levels) { size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf); size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size); size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size); size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size); - return size; } else { - return snprintf(buf, PAGE_SIZE, "\n"); + size = snprintf(buf, PAGE_SIZE, "\n"); } + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + return size; } /** @@ -796,15 +887,27 @@ static ssize_t amdgpu_set_pp_feature_status(struct device *dev, pr_debug("featuremask = 0x%llx\n", featuremask); + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) + return ret; + if (is_support_sw_smu(adev)) { ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask); - if (ret) + if (ret) { + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); return -EINVAL; + } } else if (adev->powerplay.pp_funcs->set_ppfeature_status) { ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask); - if (ret) + if (ret) { + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); return -EINVAL; + } } + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); return count; } @@ -815,16 +918,27 @@ static ssize_t amdgpu_get_pp_feature_status(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; + ssize_t size; + int ret; if (amdgpu_sriov_vf(adev)) return 0; - if (is_support_sw_smu(adev)) { - return smu_sys_get_pp_feature_mask(&adev->smu, buf); - } else if (adev->powerplay.pp_funcs->get_ppfeature_status) - return amdgpu_dpm_get_ppfeature_status(adev, buf); + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) + return ret; + + if (is_support_sw_smu(adev)) + size = smu_sys_get_pp_feature_mask(&adev->smu, buf); + else if (adev->powerplay.pp_funcs->get_ppfeature_status) + size = amdgpu_dpm_get_ppfeature_status(adev, buf); + else + size = snprintf(buf, PAGE_SIZE, "\n"); + + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); - return snprintf(buf, PAGE_SIZE, "\n"); + return size; } /** @@ -863,16 +977,27 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; + ssize_t size; + int ret; if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) return 0; + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) + return ret; + if (is_support_sw_smu(adev)) - return smu_print_clk_levels(&adev->smu, SMU_SCLK, buf); + size = smu_print_clk_levels(&adev->smu, SMU_SCLK, buf); else if (adev->powerplay.pp_funcs->print_clock_levels) - return amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf); + size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf); else - return snprintf(buf, PAGE_SIZE, "\n"); + size = snprintf(buf, PAGE_SIZE, "\n"); + + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + + return size; } /* @@ -928,11 +1053,18 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, if (ret) return ret; + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) + return ret; + if (is_support_sw_smu(adev)) ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask, true); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + if (ret) return -EINVAL; @@ -945,16 +1077,27 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; + ssize_t size; + int ret; if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) return 0; + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) + return ret; + if (is_support_sw_smu(adev)) - return smu_print_clk_levels(&adev->smu, SMU_MCLK, buf); + size = smu_print_clk_levels(&adev->smu, SMU_MCLK, buf); else if (adev->powerplay.pp_funcs->print_clock_levels) - return amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf); + size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf); else - return snprintf(buf, PAGE_SIZE, "\n"); + size = snprintf(buf, PAGE_SIZE, "\n"); + + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + + return size; } static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, @@ -964,8 +1107,8 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - int ret; uint32_t mask = 0; + int ret; if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) return -EINVAL; @@ -974,11 +1117,18 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, if (ret) return ret; + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) + return ret; + if (is_support_sw_smu(adev)) ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask, true); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + if (ret) return -EINVAL; @@ -991,16 +1141,27 @@ static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; + ssize_t size; + int ret; if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) return 0; + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) + return ret; + if (is_support_sw_smu(adev)) - return smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf); + size = smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf); else if (adev->powerplay.pp_funcs->print_clock_levels) - return amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf); + size = amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf); else - return snprintf(buf, PAGE_SIZE, "\n"); + size = snprintf(buf, PAGE_SIZE, "\n"); + + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + + return size; } static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev, @@ -1020,10 +1181,19 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev, if (ret) return ret; + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) + return ret; + if (is_support_sw_smu(adev)) ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask, true); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask); + else + ret = 0; + + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); if (ret) return -EINVAL; @@ -1037,16 +1207,27 @@ static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; + ssize_t size; + int ret; if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) return 0; + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) + return ret; + if (is_support_sw_smu(adev)) - return smu_print_clk_levels(&adev->smu, SMU_FCLK, buf); + size = smu_print_clk_levels(&adev->smu, SMU_FCLK, buf); else if (adev->powerplay.pp_funcs->print_clock_levels) - return amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf); + size = amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf); else - return snprintf(buf, PAGE_SIZE, "\n"); + size = snprintf(buf, PAGE_SIZE, "\n"); + + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + + return size; } static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev, @@ -1066,10 +1247,19 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev, if (ret) return ret; + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) + return ret; + if (is_support_sw_smu(adev)) ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask, true); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask); + else + ret = 0; + + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); if (ret) return -EINVAL; @@ -1083,16 +1273,27 @@ static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; + ssize_t size; + int ret; if (amdgpu_sriov_vf(adev)) return 0; + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) + return ret; + if (is_support_sw_smu(adev)) - return smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf); + size = smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf); else if (adev->powerplay.pp_funcs->print_clock_levels) - return amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf); + size = amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf); else - return snprintf(buf, PAGE_SIZE, "\n"); + size = snprintf(buf, PAGE_SIZE, "\n"); + + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + + return size; } static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev, @@ -1112,10 +1313,19 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev, if (ret) return ret; + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) + return ret; + if (is_support_sw_smu(adev)) ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask, true); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask); + else + ret = 0; + + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); if (ret) return -EINVAL; @@ -1129,16 +1339,27 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; + ssize_t size; + int ret; if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) return 0; + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) + return ret; + if (is_support_sw_smu(adev)) - return smu_print_clk_levels(&adev->smu, SMU_PCIE, buf); + size = smu_print_clk_levels(&adev->smu, SMU_PCIE, buf); else if (adev->powerplay.pp_funcs->print_clock_levels) - return amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf); + size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf); else - return snprintf(buf, PAGE_SIZE, "\n"); + size = snprintf(buf, PAGE_SIZE, "\n"); + + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + + return size; } static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, @@ -1158,10 +1379,19 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, if (ret) return ret; + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) + return ret; + if (is_support_sw_smu(adev)) ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask, true); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); + else + ret = 0; + + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); if (ret) return -EINVAL; @@ -1176,15 +1406,23 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; uint32_t value = 0; + int ret; if (amdgpu_sriov_vf(adev)) return 0; + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) + return ret; + if (is_support_sw_smu(adev)) value = smu_get_od_percentage(&(adev->smu), SMU_OD_SCLK); else if (adev->powerplay.pp_funcs->get_sclk_od) value = amdgpu_dpm_get_sclk_od(adev); + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + return snprintf(buf, PAGE_SIZE, "%d\n", value); } @@ -1203,10 +1441,12 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, ret = kstrtol(buf, 0, &value); - if (ret) { - count = -EINVAL; - goto fail; - } + if (ret) + return -EINVAL; + + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) + return ret; if (is_support_sw_smu(adev)) { value = smu_set_od_percentage(&(adev->smu), SMU_OD_SCLK, (uint32_t)value); @@ -1222,7 +1462,9 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, } } -fail: + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + return count; } @@ -1233,15 +1475,23 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; uint32_t value = 0; + int ret; if (amdgpu_sriov_vf(adev)) return 0; + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) + return ret; + if (is_support_sw_smu(adev)) value = smu_get_od_percentage(&(adev->smu), SMU_OD_MCLK); else if (adev->powerplay.pp_funcs->get_mclk_od) value = amdgpu_dpm_get_mclk_od(adev); + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + return snprintf(buf, PAGE_SIZE, "%d\n", value); } @@ -1260,10 +1510,12 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, ret = kstrtol(buf, 0, &value); - if (ret) { - count = -EINVAL; - goto fail; - } + if (ret) + return -EINVAL; + + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) + return ret; if (is_support_sw_smu(adev)) { value = smu_set_od_percentage(&(adev->smu), SMU_OD_MCLK, (uint32_t)value); @@ -1279,7 +1531,9 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, } } -fail: + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + return count; } @@ -1309,16 +1563,27 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; + ssize_t size; + int ret; if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) return 0; + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) + return ret; + if (is_support_sw_smu(adev)) - return smu_get_power_profile_mode(&adev->smu, buf); + size = smu_get_power_profile_mode(&adev->smu, buf); else if (adev->powerplay.pp_funcs->get_power_profile_mode) - return amdgpu_dpm_get_power_profile_mode(adev, buf); + size = amdgpu_dpm_get_power_profile_mode(adev, buf); + else + size = snprintf(buf, PAGE_SIZE, "\n"); - return snprintf(buf, PAGE_SIZE, "\n"); + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + + return size; } @@ -1343,7 +1608,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, tmp[1] = '\0'; ret = kstrtol(tmp, 0, &profile_mode); if (ret) - goto fail; + return -EINVAL; if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) return -EINVAL; @@ -1358,23 +1623,30 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, while (tmp_str[0]) { sub_str = strsep(&tmp_str, delimiter); ret = kstrtol(sub_str, 0, ¶meter[parameter_size]); - if (ret) { - count = -EINVAL; - goto fail; - } + if (ret) + return -EINVAL; parameter_size++; while (isspace(*tmp_str)) tmp_str++; } } parameter[parameter_size] = profile_mode; + + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) + return ret; + if (is_support_sw_smu(adev)) ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true); else if (adev->powerplay.pp_funcs->set_power_profile_mode) ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size); + + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + if (!ret) return count; -fail: + return -EINVAL; } @@ -1397,10 +1669,17 @@ static ssize_t amdgpu_get_busy_percent(struct device *dev, if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) return 0; + r = pm_runtime_get_sync(ddev->dev); + if (r < 0) + return r; + /* read the IP busy sensor */ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size); + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + if (r) return r; @@ -1426,10 +1705,17 @@ static ssize_t amdgpu_get_memory_busy_percent(struct device *dev, if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) return 0; + r = pm_runtime_get_sync(ddev->dev); + if (r < 0) + return r; + /* read the IP busy sensor */ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size); + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + if (r) return r; @@ -1455,11 +1741,20 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; uint64_t count0, count1; + int ret; if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) return 0; + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) + return ret; + amdgpu_asic_get_pcie_usage(adev, &count0, &count1); + + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n", count0, count1, pcie_get_mps(adev->pdev)); } @@ -1547,42 +1842,43 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev, char *buf) { struct amdgpu_device *adev = dev_get_drvdata(dev); - struct drm_device *ddev = adev->ddev; int channel = to_sensor_dev_attr(attr)->index; int r, temp = 0, size = sizeof(temp); - /* Can't get temperature when the card is off */ - if ((adev->flags & AMD_IS_PX) && - (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) - return -EINVAL; - if (channel >= PP_TEMP_MAX) return -EINVAL; + r = pm_runtime_get_sync(adev->ddev->dev); + if (r < 0) + return r; + switch (channel) { case PP_TEMP_JUNCTION: /* get current junction temperature */ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP, (void *)&temp, &size); - if (r) - return r; break; case PP_TEMP_EDGE: /* get current edge temperature */ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP, (void *)&temp, &size); - if (r) - return r; break; case PP_TEMP_MEM: /* get current memory temperature */ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP, (void *)&temp, &size); - if (r) - return r; + break; + default: + r = -EINVAL; break; } + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + + if (r) + return r; + return snprintf(buf, PAGE_SIZE, "%d\n", temp); } @@ -1678,16 +1974,27 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, { struct amdgpu_device *adev = dev_get_drvdata(dev); u32 pwm_mode = 0; + int ret; + + ret = pm_runtime_get_sync(adev->ddev->dev); + if (ret < 0) + return ret; if (is_support_sw_smu(adev)) { pwm_mode = smu_get_fan_control_mode(&adev->smu); } else { - if (!adev->powerplay.pp_funcs->get_fan_control_mode) + if (!adev->powerplay.pp_funcs->get_fan_control_mode) { + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); return -EINVAL; + } pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); } + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + return sprintf(buf, "%i\n", pwm_mode); } @@ -1697,27 +2004,32 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, size_t count) { struct amdgpu_device *adev = dev_get_drvdata(dev); - int err; + int err, ret; int value; - /* Can't adjust fan when the card is off */ - if ((adev->flags & AMD_IS_PX) && - (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) - return -EINVAL; - err = kstrtoint(buf, 10, &value); if (err) return err; + ret = pm_runtime_get_sync(adev->ddev->dev); + if (ret < 0) + return ret; + if (is_support_sw_smu(adev)) { smu_set_fan_control_mode(&adev->smu, value); } else { - if (!adev->powerplay.pp_funcs->set_fan_control_mode) + if (!adev->powerplay.pp_funcs->set_fan_control_mode) { + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); return -EINVAL; + } amdgpu_dpm_set_fan_control_mode(adev, value); } + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + return count; } @@ -1744,34 +2056,43 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, u32 value; u32 pwm_mode; - /* Can't adjust fan when the card is off */ - if ((adev->flags & AMD_IS_PX) && - (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) - return -EINVAL; + err = pm_runtime_get_sync(adev->ddev->dev); + if (err < 0) + return err; + if (is_support_sw_smu(adev)) pwm_mode = smu_get_fan_control_mode(&adev->smu); else pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); + if (pwm_mode != AMD_FAN_CTRL_MANUAL) { pr_info("manual fan speed control should be enabled first\n"); + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); return -EINVAL; } err = kstrtou32(buf, 10, &value); - if (err) + if (err) { + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); return err; + } value = (value * 100) / 255; - if (is_support_sw_smu(adev)) { + if (is_support_sw_smu(adev)) err = smu_set_fan_speed_percent(&adev->smu, value); - if (err) - return err; - } else if (adev->powerplay.pp_funcs->set_fan_speed_percent) { + else if (adev->powerplay.pp_funcs->set_fan_speed_percent) err = amdgpu_dpm_set_fan_speed_percent(adev, value); - if (err) - return err; - } + else + err = -EINVAL; + + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + + if (err) + return err; return count; } @@ -1784,20 +2105,22 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev, int err; u32 speed = 0; - /* Can't adjust fan when the card is off */ - if ((adev->flags & AMD_IS_PX) && - (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) - return -EINVAL; + err = pm_runtime_get_sync(adev->ddev->dev); + if (err < 0) + return err; - if (is_support_sw_smu(adev)) { + if (is_support_sw_smu(adev)) err = smu_get_fan_speed_percent(&adev->smu, &speed); - if (err) - return err; - } else if (adev->powerplay.pp_funcs->get_fan_speed_percent) { + else if (adev->powerplay.pp_funcs->get_fan_speed_percent) err = amdgpu_dpm_get_fan_speed_percent(adev, &speed); - if (err) - return err; - } + else + err = -EINVAL; + + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + + if (err) + return err; speed = (speed * 255) / 100; @@ -1812,20 +2135,22 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, int err; u32 speed = 0; - /* Can't adjust fan when the card is off */ - if ((adev->flags & AMD_IS_PX) && - (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) - return -EINVAL; + err = pm_runtime_get_sync(adev->ddev->dev); + if (err < 0) + return err; - if (is_support_sw_smu(adev)) { + if (is_support_sw_smu(adev)) err = smu_get_fan_speed_rpm(&adev->smu, &speed); - if (err) - return err; - } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { + else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed); - if (err) - return err; - } + else + err = -EINVAL; + + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + + if (err) + return err; return sprintf(buf, "%i\n", speed); } @@ -1839,8 +2164,16 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev, u32 size = sizeof(min_rpm); int r; + r = pm_runtime_get_sync(adev->ddev->dev); + if (r < 0) + return r; + r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM, (void *)&min_rpm, &size); + + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + if (r) return r; @@ -1856,8 +2189,16 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev, u32 size = sizeof(max_rpm); int r; + r = pm_runtime_get_sync(adev->ddev->dev); + if (r < 0) + return r; + r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM, (void *)&max_rpm, &size); + + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + if (r) return r; @@ -1872,20 +2213,22 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, int err; u32 rpm = 0; - /* Can't adjust fan when the card is off */ - if ((adev->flags & AMD_IS_PX) && - (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) - return -EINVAL; + err = pm_runtime_get_sync(adev->ddev->dev); + if (err < 0) + return err; - if (is_support_sw_smu(adev)) { + if (is_support_sw_smu(adev)) err = smu_get_fan_speed_rpm(&adev->smu, &rpm); - if (err) - return err; - } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { + else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm); - if (err) - return err; - } + else + err = -EINVAL; + + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + + if (err) + return err; return sprintf(buf, "%i\n", rpm); } @@ -1899,32 +2242,40 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev, u32 value; u32 pwm_mode; + err = pm_runtime_get_sync(adev->ddev->dev); + if (err < 0) + return err; + if (is_support_sw_smu(adev)) pwm_mode = smu_get_fan_control_mode(&adev->smu); else pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); - if (pwm_mode != AMD_FAN_CTRL_MANUAL) + if (pwm_mode != AMD_FAN_CTRL_MANUAL) { + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); return -ENODATA; - - /* Can't adjust fan when the card is off */ - if ((adev->flags & AMD_IS_PX) && - (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) - return -EINVAL; + } err = kstrtou32(buf, 10, &value); - if (err) + if (err) { + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); return err; + } - if (is_support_sw_smu(adev)) { + if (is_support_sw_smu(adev)) err = smu_set_fan_speed_rpm(&adev->smu, value); - if (err) - return err; - } else if (adev->powerplay.pp_funcs->set_fan_speed_rpm) { + else if (adev->powerplay.pp_funcs->set_fan_speed_rpm) err = amdgpu_dpm_set_fan_speed_rpm(adev, value); - if (err) - return err; - } + else + err = -EINVAL; + + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + + if (err) + return err; return count; } @@ -1935,15 +2286,27 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev, { struct amdgpu_device *adev = dev_get_drvdata(dev); u32 pwm_mode = 0; + int ret; + + ret = pm_runtime_get_sync(adev->ddev->dev); + if (ret < 0) + return ret; if (is_support_sw_smu(adev)) { pwm_mode = smu_get_fan_control_mode(&adev->smu); } else { - if (!adev->powerplay.pp_funcs->get_fan_control_mode) + if (!adev->powerplay.pp_funcs->get_fan_control_mode) { + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); return -EINVAL; + } pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); } + + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1); } @@ -1957,12 +2320,6 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev, int value; u32 pwm_mode; - /* Can't adjust fan when the card is off */ - if ((adev->flags & AMD_IS_PX) && - (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) - return -EINVAL; - - err = kstrtoint(buf, 10, &value); if (err) return err; @@ -1974,14 +2331,24 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev, else return -EINVAL; + err = pm_runtime_get_sync(adev->ddev->dev); + if (err < 0) + return err; + if (is_support_sw_smu(adev)) { smu_set_fan_control_mode(&adev->smu, pwm_mode); } else { - if (!adev->powerplay.pp_funcs->set_fan_control_mode) + if (!adev->powerplay.pp_funcs->set_fan_control_mode) { + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); return -EINVAL; + } amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); } + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + return count; } @@ -1990,18 +2357,20 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev, char *buf) { struct amdgpu_device *adev = dev_get_drvdata(dev); - struct drm_device *ddev = adev->ddev; u32 vddgfx; int r, size = sizeof(vddgfx); - /* Can't get voltage when the card is off */ - if ((adev->flags & AMD_IS_PX) && - (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) - return -EINVAL; + r = pm_runtime_get_sync(adev->ddev->dev); + if (r < 0) + return r; /* get the voltage */ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&vddgfx, &size); + + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + if (r) return r; @@ -2020,7 +2389,6 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev, char *buf) { struct amdgpu_device *adev = dev_get_drvdata(dev); - struct drm_device *ddev = adev->ddev; u32 vddnb; int r, size = sizeof(vddnb); @@ -2028,14 +2396,17 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev, if (!(adev->flags & AMD_IS_APU)) return -EINVAL; - /* Can't get voltage when the card is off */ - if ((adev->flags & AMD_IS_PX) && - (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) - return -EINVAL; + r = pm_runtime_get_sync(adev->ddev->dev); + if (r < 0) + return r; /* get the voltage */ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&vddnb, &size); + + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + if (r) return r; @@ -2054,19 +2425,21 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev, char *buf) { struct amdgpu_device *adev = dev_get_drvdata(dev); - struct drm_device *ddev = adev->ddev; u32 query = 0; int r, size = sizeof(u32); unsigned uw; - /* Can't get power when the card is off */ - if ((adev->flags & AMD_IS_PX) && - (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) - return -EINVAL; + r = pm_runtime_get_sync(adev->ddev->dev); + if (r < 0) + return r; /* get the voltage */ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size); + + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + if (r) return r; @@ -2089,16 +2462,27 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev, { struct amdgpu_device *adev = dev_get_drvdata(dev); uint32_t limit = 0; + ssize_t size; + int r; + + r = pm_runtime_get_sync(adev->ddev->dev); + if (r < 0) + return r; if (is_support_sw_smu(adev)) { smu_get_power_limit(&adev->smu, &limit, true, true); - return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); + size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true); - return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); + size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); } else { - return snprintf(buf, PAGE_SIZE, "\n"); + size = snprintf(buf, PAGE_SIZE, "\n"); } + + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + + return size; } static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev, @@ -2107,16 +2491,27 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev, { struct amdgpu_device *adev = dev_get_drvdata(dev); uint32_t limit = 0; + ssize_t size; + int r; + + r = pm_runtime_get_sync(adev->ddev->dev); + if (r < 0) + return r; if (is_support_sw_smu(adev)) { smu_get_power_limit(&adev->smu, &limit, false, true); - return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); + size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false); - return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); + size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); } else { - return snprintf(buf, PAGE_SIZE, "\n"); + size = snprintf(buf, PAGE_SIZE, "\n"); } + + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + + return size; } @@ -2138,13 +2533,20 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, value = value / 1000000; /* convert to Watt */ - if (is_support_sw_smu(adev)) { + + err = pm_runtime_get_sync(adev->ddev->dev); + if (err < 0) + return err; + + if (is_support_sw_smu(adev)) err = smu_set_power_limit(&adev->smu, value); - } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) { + else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value); - } else { + else err = -EINVAL; - } + + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); if (err) return err; @@ -2157,18 +2559,20 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev, char *buf) { struct amdgpu_device *adev = dev_get_drvdata(dev); - struct drm_device *ddev = adev->ddev; uint32_t sclk; int r, size = sizeof(sclk); - /* Can't get voltage when the card is off */ - if ((adev->flags & AMD_IS_PX) && - (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) - return -EINVAL; + r = pm_runtime_get_sync(adev->ddev->dev); + if (r < 0) + return r; /* get the sclk */ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&sclk, &size); + + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + if (r) return r; @@ -2187,18 +2591,20 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev, char *buf) { struct amdgpu_device *adev = dev_get_drvdata(dev); - struct drm_device *ddev = adev->ddev; uint32_t mclk; int r, size = sizeof(mclk); - /* Can't get voltage when the card is off */ - if ((adev->flags & AMD_IS_PX) && - (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) - return -EINVAL; + r = pm_runtime_get_sync(adev->ddev->dev); + if (r < 0) + return r; /* get the sclk */ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&mclk, &size); + + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + if (r) return r; @@ -2762,17 +3168,12 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) { int ret = 0; - if (is_support_sw_smu(adev)) { - ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_UVD, enable); - if (ret) - DRM_ERROR("[SW SMU]: dpm enable uvd failed, state = %s, ret = %d. \n", - enable ? "true" : "false", ret); - } else if (adev->powerplay.pp_funcs->set_powergating_by_smu) { - /* enable/disable UVD */ - mutex_lock(&adev->pm.mutex); - amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); - mutex_unlock(&adev->pm.mutex); - } + + ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); + if (ret) + DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", + enable ? "enable" : "disable", ret); + /* enable/disable Low Memory PState for UVD (4k videos) */ if (adev->asic_type == CHIP_STONEY && adev->uvd.decode_image_width >= WIDTH_4K) { @@ -2789,17 +3190,11 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) { int ret = 0; - if (is_support_sw_smu(adev)) { - ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_VCE, enable); - if (ret) - DRM_ERROR("[SW SMU]: dpm enable vce failed, state = %s, ret = %d. \n", - enable ? "true" : "false", ret); - } else if (adev->powerplay.pp_funcs->set_powergating_by_smu) { - /* enable/disable VCE */ - mutex_lock(&adev->pm.mutex); - amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); - mutex_unlock(&adev->pm.mutex); - } + + ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); + if (ret) + DRM_ERROR("Dpm %s vce failed, ret = %d. \n", + enable ? "enable" : "disable", ret); } void amdgpu_pm_print_power_states(struct amdgpu_device *adev) @@ -2818,12 +3213,10 @@ void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) { int ret = 0; - if (is_support_sw_smu(adev)) { - ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_JPEG, enable); - if (ret) - DRM_ERROR("[SW SMU]: dpm enable jpeg failed, state = %s, ret = %d. \n", - enable ? "true" : "false", ret); - } + ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable); + if (ret) + DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n", + enable ? "enable" : "disable", ret); } int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) @@ -3233,8 +3626,12 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct amdgpu_device *adev = dev->dev_private; - struct drm_device *ddev = adev->ddev; u32 flags = 0; + int r; + + r = pm_runtime_get_sync(dev->dev); + if (r < 0) + return r; amdgpu_device_ip_get_clockgating_state(adev, &flags); seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags); @@ -3243,23 +3640,28 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) if (!adev->pm.dpm_enabled) { seq_printf(m, "dpm not enabled\n"); + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); return 0; } - if ((adev->flags & AMD_IS_PX) && - (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { - seq_printf(m, "PX asic powered off\n"); - } else if (!is_support_sw_smu(adev) && adev->powerplay.pp_funcs->debugfs_print_current_performance_level) { + + if (!is_support_sw_smu(adev) && + adev->powerplay.pp_funcs->debugfs_print_current_performance_level) { mutex_lock(&adev->pm.mutex); if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level) adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m); else seq_printf(m, "Debugfs support not implemented for this asic\n"); mutex_unlock(&adev->pm.mutex); + r = 0; } else { - return amdgpu_debugfs_pm_info_pp(m, adev); + r = amdgpu_debugfs_pm_info_pp(m, adev); } - return 0; + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); + + return r; } static const struct drm_info_list amdgpu_pm_info_list[] = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c index 0e6dba9f60f072210ffd81e3692936a7534a5c9c..07914e34bc2570b356c8b751fb29f8f120e04e69 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c @@ -74,9 +74,9 @@ static void amdgpu_perf_start(struct perf_event *event, int flags) switch (pe->pmu_perf_type) { case PERF_TYPE_AMDGPU_DF: if (!(flags & PERF_EF_RELOAD)) - pe->adev->df_funcs->pmc_start(pe->adev, hwc->conf, 1); + pe->adev->df.funcs->pmc_start(pe->adev, hwc->conf, 1); - pe->adev->df_funcs->pmc_start(pe->adev, hwc->conf, 0); + pe->adev->df.funcs->pmc_start(pe->adev, hwc->conf, 0); break; default: break; @@ -101,13 +101,13 @@ static void amdgpu_perf_read(struct perf_event *event) switch (pe->pmu_perf_type) { case PERF_TYPE_AMDGPU_DF: - pe->adev->df_funcs->pmc_get_count(pe->adev, hwc->conf, + pe->adev->df.funcs->pmc_get_count(pe->adev, hwc->conf, &count); break; default: count = 0; break; - }; + } } while (local64_cmpxchg(&hwc->prev_count, prev, count) != prev); local64_add(count - prev, &event->count); @@ -126,11 +126,11 @@ static void amdgpu_perf_stop(struct perf_event *event, int flags) switch (pe->pmu_perf_type) { case PERF_TYPE_AMDGPU_DF: - pe->adev->df_funcs->pmc_stop(pe->adev, hwc->conf, 0); + pe->adev->df.funcs->pmc_stop(pe->adev, hwc->conf, 0); break; default: break; - }; + } WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); hwc->state |= PERF_HES_STOPPED; @@ -156,11 +156,11 @@ static int amdgpu_perf_add(struct perf_event *event, int flags) switch (pe->pmu_perf_type) { case PERF_TYPE_AMDGPU_DF: - retval = pe->adev->df_funcs->pmc_start(pe->adev, hwc->conf, 1); + retval = pe->adev->df.funcs->pmc_start(pe->adev, hwc->conf, 1); break; default: return 0; - }; + } if (retval) return retval; @@ -184,11 +184,11 @@ static void amdgpu_perf_del(struct perf_event *event, int flags) switch (pe->pmu_perf_type) { case PERF_TYPE_AMDGPU_DF: - pe->adev->df_funcs->pmc_stop(pe->adev, hwc->conf, 1); + pe->adev->df.funcs->pmc_stop(pe->adev, hwc->conf, 1); break; default: break; - }; + } perf_event_update_userpage(event); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index c14f2ccd0677768fadb69dec721058d83dd789bc..3a1570dafe3482ac93992c0e76e1777d185721d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -191,9 +191,9 @@ psp_cmd_submit_buf(struct psp_context *psp, if (ucode) DRM_WARN("failed to load ucode id (%d) ", ucode->ucode_id); - DRM_DEBUG_DRIVER("psp command (0x%X) failed and response status is (0x%X)\n", + DRM_WARN("psp command (0x%X) failed and response status is (0x%X)\n", psp->cmd_buf_mem->cmd_id, - psp->cmd_buf_mem->resp.status & GFX_CMD_STATUS_MASK); + psp->cmd_buf_mem->resp.status); if (!timeout) { mutex_unlock(&psp->mutex); return -EINVAL; @@ -365,11 +365,11 @@ static int psp_asd_load(struct psp_context *psp) return ret; } -static void psp_prep_asd_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, - uint32_t asd_session_id) +static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, + uint32_t session_id) { cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; - cmd->cmd.cmd_unload_ta.session_id = asd_session_id; + cmd->cmd.cmd_unload_ta.session_id = session_id; } static int psp_asd_unload(struct psp_context *psp) @@ -387,7 +387,7 @@ static int psp_asd_unload(struct psp_context *psp) if (!cmd) return -ENOMEM; - psp_prep_asd_unload_cmd_buf(cmd, psp->asd_context.session_id); + psp_prep_ta_unload_cmd_buf(cmd, psp->asd_context.session_id); ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); @@ -427,18 +427,20 @@ int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg, return ret; } -static void psp_prep_xgmi_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, - uint64_t xgmi_ta_mc, uint64_t xgmi_mc_shared, - uint32_t xgmi_ta_size, uint32_t shared_size) +static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, + uint64_t ta_bin_mc, + uint32_t ta_bin_size, + uint64_t ta_shared_mc, + uint32_t ta_shared_size) { - cmd->cmd_id = GFX_CMD_ID_LOAD_TA; - cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(xgmi_ta_mc); - cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(xgmi_ta_mc); - cmd->cmd.cmd_load_ta.app_len = xgmi_ta_size; + cmd->cmd_id = GFX_CMD_ID_LOAD_TA; + cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc); + cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc); + cmd->cmd.cmd_load_ta.app_len = ta_bin_size; - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(xgmi_mc_shared); - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(xgmi_mc_shared); - cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size; + cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ta_shared_mc); + cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ta_shared_mc); + cmd->cmd.cmd_load_ta.cmd_buf_len = ta_shared_size; } static int psp_xgmi_init_shared_buf(struct psp_context *psp) @@ -458,6 +460,36 @@ static int psp_xgmi_init_shared_buf(struct psp_context *psp) return ret; } +static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, + uint32_t ta_cmd_id, + uint32_t session_id) +{ + cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; + cmd->cmd.cmd_invoke_cmd.session_id = session_id; + cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; +} + +int psp_ta_invoke(struct psp_context *psp, + uint32_t ta_cmd_id, + uint32_t session_id) +{ + int ret; + struct psp_gfx_cmd_resp *cmd; + + cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, session_id); + + ret = psp_cmd_submit_buf(psp, NULL, cmd, + psp->fence_buf_mc_addr); + + kfree(cmd); + + return ret; +} + static int psp_xgmi_load(struct psp_context *psp) { int ret; @@ -466,8 +498,6 @@ static int psp_xgmi_load(struct psp_context *psp) /* * TODO: bypass the loading in sriov for now */ - if (amdgpu_sriov_vf(psp->adev)) - return 0; cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); if (!cmd) @@ -476,9 +506,11 @@ static int psp_xgmi_load(struct psp_context *psp) memset(psp->fw_pri_buf, 0, PSP_1_MEG); memcpy(psp->fw_pri_buf, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size); - psp_prep_xgmi_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, - psp->xgmi_context.xgmi_shared_mc_addr, - psp->ta_xgmi_ucode_size, PSP_XGMI_SHARED_MEM_SIZE); + psp_prep_ta_load_cmd_buf(cmd, + psp->fw_pri_mc_addr, + psp->ta_xgmi_ucode_size, + psp->xgmi_context.xgmi_shared_mc_addr, + PSP_XGMI_SHARED_MEM_SIZE); ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); @@ -493,29 +525,25 @@ static int psp_xgmi_load(struct psp_context *psp) return ret; } -static void psp_prep_xgmi_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, - uint32_t xgmi_session_id) -{ - cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; - cmd->cmd.cmd_unload_ta.session_id = xgmi_session_id; -} - static int psp_xgmi_unload(struct psp_context *psp) { int ret; struct psp_gfx_cmd_resp *cmd; + struct amdgpu_device *adev = psp->adev; + + /* XGMI TA unload currently is not supported on Arcturus */ + if (adev->asic_type == CHIP_ARCTURUS) + return 0; /* * TODO: bypass the unloading in sriov for now */ - if (amdgpu_sriov_vf(psp->adev)) - return 0; cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); if (!cmd) return -ENOMEM; - psp_prep_xgmi_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id); + psp_prep_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id); ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); @@ -525,40 +553,9 @@ static int psp_xgmi_unload(struct psp_context *psp) return ret; } -static void psp_prep_xgmi_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, - uint32_t ta_cmd_id, - uint32_t xgmi_session_id) -{ - cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; - cmd->cmd.cmd_invoke_cmd.session_id = xgmi_session_id; - cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; - /* Note: cmd_invoke_cmd.buf is not used for now */ -} - int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id) { - int ret; - struct psp_gfx_cmd_resp *cmd; - - /* - * TODO: bypass the loading in sriov for now - */ - if (amdgpu_sriov_vf(psp->adev)) - return 0; - - cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); - if (!cmd) - return -ENOMEM; - - psp_prep_xgmi_ta_invoke_cmd_buf(cmd, ta_cmd_id, - psp->xgmi_context.session_id); - - ret = psp_cmd_submit_buf(psp, NULL, cmd, - psp->fence_buf_mc_addr); - - kfree(cmd); - - return ret; + return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.session_id); } static int psp_xgmi_terminate(struct psp_context *psp) @@ -614,20 +611,6 @@ static int psp_xgmi_initialize(struct psp_context *psp) } // ras begin -static void psp_prep_ras_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, - uint64_t ras_ta_mc, uint64_t ras_mc_shared, - uint32_t ras_ta_size, uint32_t shared_size) -{ - cmd->cmd_id = GFX_CMD_ID_LOAD_TA; - cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ras_ta_mc); - cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ras_ta_mc); - cmd->cmd.cmd_load_ta.app_len = ras_ta_size; - - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ras_mc_shared); - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ras_mc_shared); - cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size; -} - static int psp_ras_init_shared_buf(struct psp_context *psp) { int ret; @@ -663,15 +646,17 @@ static int psp_ras_load(struct psp_context *psp) memset(psp->fw_pri_buf, 0, PSP_1_MEG); memcpy(psp->fw_pri_buf, psp->ta_ras_start_addr, psp->ta_ras_ucode_size); - psp_prep_ras_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, - psp->ras.ras_shared_mc_addr, - psp->ta_ras_ucode_size, PSP_RAS_SHARED_MEM_SIZE); + psp_prep_ta_load_cmd_buf(cmd, + psp->fw_pri_mc_addr, + psp->ta_ras_ucode_size, + psp->ras.ras_shared_mc_addr, + PSP_RAS_SHARED_MEM_SIZE); ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); if (!ret) { - psp->ras.ras_initialized = 1; + psp->ras.ras_initialized = true; psp->ras.session_id = cmd->resp.session_id; } @@ -680,13 +665,6 @@ static int psp_ras_load(struct psp_context *psp) return ret; } -static void psp_prep_ras_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, - uint32_t ras_session_id) -{ - cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; - cmd->cmd.cmd_unload_ta.session_id = ras_session_id; -} - static int psp_ras_unload(struct psp_context *psp) { int ret; @@ -702,7 +680,7 @@ static int psp_ras_unload(struct psp_context *psp) if (!cmd) return -ENOMEM; - psp_prep_ras_ta_unload_cmd_buf(cmd, psp->ras.session_id); + psp_prep_ta_unload_cmd_buf(cmd, psp->ras.session_id); ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); @@ -712,40 +690,15 @@ static int psp_ras_unload(struct psp_context *psp) return ret; } -static void psp_prep_ras_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, - uint32_t ta_cmd_id, - uint32_t ras_session_id) -{ - cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; - cmd->cmd.cmd_invoke_cmd.session_id = ras_session_id; - cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; - /* Note: cmd_invoke_cmd.buf is not used for now */ -} - int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) { - int ret; - struct psp_gfx_cmd_resp *cmd; - /* * TODO: bypass the loading in sriov for now */ if (amdgpu_sriov_vf(psp->adev)) return 0; - cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); - if (!cmd) - return -ENOMEM; - - psp_prep_ras_ta_invoke_cmd_buf(cmd, ta_cmd_id, - psp->ras.session_id); - - ret = psp_cmd_submit_buf(psp, NULL, cmd, - psp->fence_buf_mc_addr); - - kfree(cmd); - - return ret; + return psp_ta_invoke(psp, ta_cmd_id, psp->ras.session_id); } int psp_ras_enable_features(struct psp_context *psp, @@ -791,7 +744,7 @@ static int psp_ras_terminate(struct psp_context *psp) if (ret) return ret; - psp->ras.ras_initialized = 0; + psp->ras.ras_initialized = false; /* free ras shared memory */ amdgpu_bo_free_kernel(&psp->ras.ras_shared_bo, @@ -832,24 +785,6 @@ static int psp_ras_initialize(struct psp_context *psp) // ras end // HDCP start -static void psp_prep_hdcp_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, - uint64_t hdcp_ta_mc, - uint64_t hdcp_mc_shared, - uint32_t hdcp_ta_size, - uint32_t shared_size) -{ - cmd->cmd_id = GFX_CMD_ID_LOAD_TA; - cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(hdcp_ta_mc); - cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(hdcp_ta_mc); - cmd->cmd.cmd_load_ta.app_len = hdcp_ta_size; - - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = - lower_32_bits(hdcp_mc_shared); - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = - upper_32_bits(hdcp_mc_shared); - cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size; -} - static int psp_hdcp_init_shared_buf(struct psp_context *psp) { int ret; @@ -886,15 +821,16 @@ static int psp_hdcp_load(struct psp_context *psp) memcpy(psp->fw_pri_buf, psp->ta_hdcp_start_addr, psp->ta_hdcp_ucode_size); - psp_prep_hdcp_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, - psp->hdcp_context.hdcp_shared_mc_addr, - psp->ta_hdcp_ucode_size, - PSP_HDCP_SHARED_MEM_SIZE); + psp_prep_ta_load_cmd_buf(cmd, + psp->fw_pri_mc_addr, + psp->ta_hdcp_ucode_size, + psp->hdcp_context.hdcp_shared_mc_addr, + PSP_HDCP_SHARED_MEM_SIZE); ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); if (!ret) { - psp->hdcp_context.hdcp_initialized = 1; + psp->hdcp_context.hdcp_initialized = true; psp->hdcp_context.session_id = cmd->resp.session_id; } @@ -930,12 +866,6 @@ static int psp_hdcp_initialize(struct psp_context *psp) return 0; } -static void psp_prep_hdcp_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, - uint32_t hdcp_session_id) -{ - cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; - cmd->cmd.cmd_unload_ta.session_id = hdcp_session_id; -} static int psp_hdcp_unload(struct psp_context *psp) { @@ -952,7 +882,7 @@ static int psp_hdcp_unload(struct psp_context *psp) if (!cmd) return -ENOMEM; - psp_prep_hdcp_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id); + psp_prep_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id); ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); @@ -961,39 +891,15 @@ static int psp_hdcp_unload(struct psp_context *psp) return ret; } -static void psp_prep_hdcp_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, - uint32_t ta_cmd_id, - uint32_t hdcp_session_id) -{ - cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; - cmd->cmd.cmd_invoke_cmd.session_id = hdcp_session_id; - cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; - /* Note: cmd_invoke_cmd.buf is not used for now */ -} - int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id) { - int ret; - struct psp_gfx_cmd_resp *cmd; - /* * TODO: bypass the loading in sriov for now */ if (amdgpu_sriov_vf(psp->adev)) return 0; - cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); - if (!cmd) - return -ENOMEM; - - psp_prep_hdcp_ta_invoke_cmd_buf(cmd, ta_cmd_id, - psp->hdcp_context.session_id); - - ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); - - kfree(cmd); - - return ret; + return psp_ta_invoke(psp, ta_cmd_id, psp->hdcp_context.session_id); } static int psp_hdcp_terminate(struct psp_context *psp) @@ -1013,7 +919,7 @@ static int psp_hdcp_terminate(struct psp_context *psp) if (ret) return ret; - psp->hdcp_context.hdcp_initialized = 0; + psp->hdcp_context.hdcp_initialized = false; /* free hdcp shared memory */ amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo, @@ -1025,22 +931,6 @@ static int psp_hdcp_terminate(struct psp_context *psp) // HDCP end // DTM start -static void psp_prep_dtm_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, - uint64_t dtm_ta_mc, - uint64_t dtm_mc_shared, - uint32_t dtm_ta_size, - uint32_t shared_size) -{ - cmd->cmd_id = GFX_CMD_ID_LOAD_TA; - cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(dtm_ta_mc); - cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(dtm_ta_mc); - cmd->cmd.cmd_load_ta.app_len = dtm_ta_size; - - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(dtm_mc_shared); - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(dtm_mc_shared); - cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size; -} - static int psp_dtm_init_shared_buf(struct psp_context *psp) { int ret; @@ -1076,15 +966,16 @@ static int psp_dtm_load(struct psp_context *psp) memset(psp->fw_pri_buf, 0, PSP_1_MEG); memcpy(psp->fw_pri_buf, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size); - psp_prep_dtm_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, - psp->dtm_context.dtm_shared_mc_addr, - psp->ta_dtm_ucode_size, - PSP_DTM_SHARED_MEM_SIZE); + psp_prep_ta_load_cmd_buf(cmd, + psp->fw_pri_mc_addr, + psp->ta_dtm_ucode_size, + psp->dtm_context.dtm_shared_mc_addr, + PSP_DTM_SHARED_MEM_SIZE); ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); if (!ret) { - psp->dtm_context.dtm_initialized = 1; + psp->dtm_context.dtm_initialized = true; psp->dtm_context.session_id = cmd->resp.session_id; } @@ -1122,39 +1013,15 @@ static int psp_dtm_initialize(struct psp_context *psp) return 0; } -static void psp_prep_dtm_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, - uint32_t ta_cmd_id, - uint32_t dtm_session_id) -{ - cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; - cmd->cmd.cmd_invoke_cmd.session_id = dtm_session_id; - cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; - /* Note: cmd_invoke_cmd.buf is not used for now */ -} - int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id) { - int ret; - struct psp_gfx_cmd_resp *cmd; - /* * TODO: bypass the loading in sriov for now */ if (amdgpu_sriov_vf(psp->adev)) return 0; - cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); - if (!cmd) - return -ENOMEM; - - psp_prep_dtm_ta_invoke_cmd_buf(cmd, ta_cmd_id, - psp->dtm_context.session_id); - - ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); - - kfree(cmd); - - return ret; + return psp_ta_invoke(psp, ta_cmd_id, psp->dtm_context.session_id); } static int psp_dtm_terminate(struct psp_context *psp) @@ -1174,7 +1041,7 @@ static int psp_dtm_terminate(struct psp_context *psp) if (ret) return ret; - psp->dtm_context.dtm_initialized = 0; + psp->dtm_context.dtm_initialized = false; /* free hdcp shared memory */ amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo, @@ -1310,6 +1177,9 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode, case AMDGPU_UCODE_ID_VCN: *type = GFX_FW_TYPE_VCN; break; + case AMDGPU_UCODE_ID_VCN1: + *type = GFX_FW_TYPE_VCN1; + break; case AMDGPU_UCODE_ID_DMCU_ERAM: *type = GFX_FW_TYPE_DMCU_ERAM; break; @@ -1454,7 +1324,8 @@ out: || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM - || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM)) + || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM + || ucode->ucode_id == AMDGPU_UCODE_ID_SMC)) /*skip ucode loading in SRIOV VF */ continue; @@ -1472,7 +1343,7 @@ out: /* Start rlc autoload after psp recieved all the gfx firmware */ if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ? - AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM)) { + AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_G)) { ret = psp_rlc_autoload(psp); if (ret) { DRM_ERROR("Failed to start rlc autoload\n"); @@ -1503,16 +1374,13 @@ static int psp_load_fw(struct amdgpu_device *adev) if (!psp->cmd) return -ENOMEM; - /* this fw pri bo is not used under SRIOV */ - if (!amdgpu_sriov_vf(psp->adev)) { - ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, - AMDGPU_GEM_DOMAIN_GTT, - &psp->fw_pri_bo, - &psp->fw_pri_mc_addr, - &psp->fw_pri_buf); - if (ret) - goto failed; - } + ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, + AMDGPU_GEM_DOMAIN_GTT, + &psp->fw_pri_bo, + &psp->fw_pri_mc_addr, + &psp->fw_pri_buf); + if (ret) + goto failed; ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 5f8fd3e3535bcf6481cf7cc12fac1645e5eb10b8..611021514c5250d4f07e2a507c69abebeda8a6c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -172,6 +172,8 @@ struct psp_dtm_context { #define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942 #define GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES 0x1000 #define GDDR6_MEM_TRAINING_OFFSET 0x8000 +/*Define the VRAM size that will be encroached by BIST training.*/ +#define GDDR6_MEM_TRAINING_ENCROACHED_SIZE 0x2000000 enum psp_memory_training_init_flag { PSP_MEM_TRAIN_NOT_SUPPORT = 0x0, @@ -202,7 +204,6 @@ struct psp_memory_training_context { /*vram offset of the p2c training data*/ u64 p2c_train_data_offset; - struct amdgpu_bo *p2c_bo; /*vram offset of the c2p training data*/ u64 c2p_train_data_offset; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 04394c45aa031211da38f4688d42a1666d6bc64a..cef94e2169fe4532cf37ff91f975eca14845369c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -315,7 +315,7 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user * default: ret = -EINVAL; break; - }; + } if (ret) return -EINVAL; @@ -686,6 +686,7 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev, { struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); struct ras_err_data err_data = {0, 0, 0, NULL}; + int i; if (!obj) return -EINVAL; @@ -700,6 +701,13 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev, if (adev->umc.funcs->query_ras_error_address) adev->umc.funcs->query_ras_error_address(adev, &err_data); break; + case AMDGPU_RAS_BLOCK__SDMA: + if (adev->sdma.funcs->query_ras_error_count) { + for (i = 0; i < adev->sdma.num_instances; i++) + adev->sdma.funcs->query_ras_error_count(adev, i, + &err_data); + } + break; case AMDGPU_RAS_BLOCK__GFX: if (adev->gfx.funcs->query_ras_error_count) adev->gfx.funcs->query_ras_error_count(adev, &err_data); @@ -734,6 +742,20 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev, return 0; } +uint64_t get_xgmi_relative_phy_addr(struct amdgpu_device *adev, uint64_t addr) +{ + uint32_t df_inst_id; + + if ((!adev->df.funcs) || + (!adev->df.funcs->get_df_inst_id) || + (!adev->df.funcs->get_dram_base_addr)) + return addr; + + df_inst_id = adev->df.funcs->get_df_inst_id(adev); + + return addr + adev->df.funcs->get_dram_base_addr(adev, df_inst_id); +} + /* wrapper of psp_ras_trigger_error */ int amdgpu_ras_error_inject(struct amdgpu_device *adev, struct ras_inject_if *info) @@ -751,6 +773,12 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, if (!obj) return -EINVAL; + /* Calculate XGMI relative offset */ + if (adev->gmc.xgmi.num_physical_nodes > 1) { + block_info.address = get_xgmi_relative_phy_addr(adev, + block_info.address); + } + switch (info->head.block) { case AMDGPU_RAS_BLOCK__GFX: if (adev->gfx.funcs->ras_error_inject) @@ -1311,6 +1339,7 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, data = con->eh_data; if (!data || data->count == 0) { *bps = NULL; + ret = -EINVAL; goto out; } @@ -1344,7 +1373,8 @@ static void amdgpu_ras_do_recovery(struct work_struct *work) struct amdgpu_ras *ras = container_of(work, struct amdgpu_ras, recovery_work); - amdgpu_device_gpu_recover(ras->adev, 0); + if (amdgpu_device_should_recover_gpu(ras->adev)) + amdgpu_device_gpu_recover(ras->adev, 0); atomic_set(&ras->in_recovery, 0); } @@ -1870,7 +1900,7 @@ void amdgpu_ras_resume(struct amdgpu_device *adev) * See feature_enable_on_boot */ amdgpu_ras_disable_all_features(adev, 1); - amdgpu_ras_reset_gpu(adev, 0); + amdgpu_ras_reset_gpu(adev); } } @@ -1933,6 +1963,6 @@ void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev) if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) { DRM_WARN("RAS event of type ERREVENT_ATHUB_INTERRUPT detected!\n"); - amdgpu_ras_reset_gpu(adev, false); + amdgpu_ras_reset_gpu(adev); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index d4ade47392455e5810688198e9a6d35711c1f622..a5fe29a9373eab17ad61d77eab525e8c7946e0be 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -494,8 +494,7 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev); -static inline int amdgpu_ras_reset_gpu(struct amdgpu_device *adev, - bool is_baco) +static inline int amdgpu_ras_reset_gpu(struct amdgpu_device *adev) { struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c index 6010999d90208321378e660c35ce5ab6b9c7af13..a2ee30b16212811648f52e818e26af77f748d14f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c @@ -160,7 +160,7 @@ int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry) { kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); - amdgpu_ras_reset_gpu(adev, 0); + amdgpu_ras_reset_gpu(adev); return AMDGPU_RAS_SUCCESS; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index 761ff8be63147559621219d13a4d25ba30a70024..485335267d780de5b082c78a288f41f570212a2b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -50,8 +50,18 @@ struct amdgpu_sdma_instance { bool burst_nop; }; +struct amdgpu_sdma_ras_funcs { + int (*ras_late_init)(struct amdgpu_device *adev, + void *ras_ih_info); + void (*ras_fini)(struct amdgpu_device *adev); + int (*query_ras_error_count)(struct amdgpu_device *adev, + uint32_t instance, void *ras_error_status); +}; + struct amdgpu_sdma { struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES]; + struct drm_gpu_scheduler *sdma_sched[AMDGPU_MAX_SDMA_INSTANCES]; + uint32_t num_sdma_sched; struct amdgpu_irq_src trap_irq; struct amdgpu_irq_src illegal_inst_irq; struct amdgpu_irq_src ecc_irq; @@ -59,6 +69,7 @@ struct amdgpu_sdma { uint32_t srbm_soft_reset; bool has_page_queue; struct ras_common_if *ras_if; + const struct amdgpu_sdma_ras_funcs *funcs; }; /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 445de594c214bfd53afe59fef3c5fdf9c65580b1..dee446278417c01653ff6fb8a981ba9835a6d7c3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include @@ -1714,12 +1715,17 @@ static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev) amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL); ctx->c2p_bo = NULL; - amdgpu_bo_free_kernel(&ctx->p2c_bo, NULL, NULL); - ctx->p2c_bo = NULL; - return 0; } +static u64 amdgpu_ttm_training_get_c2p_offset(u64 vram_size) +{ + if ((vram_size & (SZ_1M - 1)) < (SZ_4K + 1) ) + vram_size -= SZ_1M; + + return ALIGN(vram_size, SZ_1M); +} + /** * amdgpu_ttm_training_reserve_vram_init - create bo vram reservation from memory training * @@ -1738,7 +1744,7 @@ static int amdgpu_ttm_training_reserve_vram_init(struct amdgpu_device *adev) return 0; } - ctx->c2p_train_data_offset = adev->fw_vram_usage.mem_train_fb_loc; + ctx->c2p_train_data_offset = amdgpu_ttm_training_get_c2p_offset(adev->gmc.mc_vram_size); ctx->p2c_train_data_offset = (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET); ctx->train_data_size = GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES; @@ -1747,17 +1753,6 @@ static int amdgpu_ttm_training_reserve_vram_init(struct amdgpu_device *adev) ctx->p2c_train_data_offset, ctx->c2p_train_data_offset); - ret = amdgpu_bo_create_kernel_at(adev, - ctx->p2c_train_data_offset, - ctx->train_data_size, - AMDGPU_GEM_DOMAIN_VRAM, - &ctx->p2c_bo, - NULL); - if (ret) { - DRM_ERROR("alloc p2c_bo failed(%d)!\n", ret); - goto Err_out; - } - ret = amdgpu_bo_create_kernel_at(adev, ctx->c2p_train_data_offset, ctx->train_data_size, @@ -1766,15 +1761,12 @@ static int amdgpu_ttm_training_reserve_vram_init(struct amdgpu_device *adev) NULL); if (ret) { DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret); - goto Err_out; + amdgpu_ttm_training_reserve_vram_fini(adev); + return ret; } ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS; return 0; - -Err_out: - amdgpu_ttm_training_reserve_vram_fini(adev); - return ret; } /** @@ -1987,11 +1979,13 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) if (enable) { struct amdgpu_ring *ring; - struct drm_sched_rq *rq; + struct drm_gpu_scheduler *sched; ring = adev->mman.buffer_funcs_ring; - rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL]; - r = drm_sched_entity_init(&adev->mman.entity, &rq, 1, NULL); + sched = &ring->sched; + r = drm_sched_entity_init(&adev->mman.entity, + DRM_SCHED_PRIORITY_KERNEL, &sched, + 1, NULL); if (r) { DRM_ERROR("Failed setting up TTM BO move entity (%d)\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index eaf2d5b9c92fea2b9d5c802e2a404792acaf37d3..b0e656409c0387f75c90153be14049563d26e20f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -300,10 +300,10 @@ enum AMDGPU_UCODE_ID { AMDGPU_UCODE_ID_CP_MEC2_JT, AMDGPU_UCODE_ID_CP_MES, AMDGPU_UCODE_ID_CP_MES_DATA, - AMDGPU_UCODE_ID_RLC_G, AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL, AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM, AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM, + AMDGPU_UCODE_ID_RLC_G, AMDGPU_UCODE_ID_STORAGE, AMDGPU_UCODE_ID_SMC, AMDGPU_UCODE_ID_UVD, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index d4fb9cf27e2120dcab060fa633f272149e06f164..f4d40855147b6e350198436be1ad1cce1892f503 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -95,13 +95,6 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev, { struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; - /* When “Full RAS” is enabled, the per-IP interrupt sources should - * be disabled and the driver should only look for the aggregated - * interrupt via sync flood - */ - if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) - return AMDGPU_RAS_SUCCESS; - kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); if (adev->umc.funcs && adev->umc.funcs->query_ras_error_count) @@ -113,6 +106,7 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev, err_data->err_addr = kcalloc(adev->umc.max_ras_err_cnt_per_query, sizeof(struct eeprom_table_record), GFP_KERNEL); + /* still call query_ras_error_address to clear error status * even NOMEM error is encountered */ @@ -132,7 +126,7 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev, err_data->err_addr_cnt)) DRM_WARN("Failed to add ras bad page!\n"); - amdgpu_ras_reset_gpu(adev, 0); + amdgpu_ras_reset_gpu(adev); } kfree(err_data->err_addr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h index 3283032a78e50baa7d4febb9d626296116efcf87..a615a1eb750be4b734f4bd130d9c4ab1ada7f603 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h @@ -21,38 +21,6 @@ #ifndef __AMDGPU_UMC_H__ #define __AMDGPU_UMC_H__ -/* implement 64 bits REG operations via 32 bits interface */ -#define RREG64_UMC(reg) (RREG32(reg) | \ - ((uint64_t)RREG32((reg) + 1) << 32)) -#define WREG64_UMC(reg, v) \ - do { \ - WREG32((reg), lower_32_bits(v)); \ - WREG32((reg) + 1, upper_32_bits(v)); \ - } while (0) - -/* - * void (*func)(struct amdgpu_device *adev, struct ras_err_data *err_data, - * uint32_t umc_reg_offset, uint32_t channel_index) - */ -#define amdgpu_umc_for_each_channel(func) \ - struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; \ - uint32_t umc_inst, channel_inst, umc_reg_offset, channel_index; \ - for (umc_inst = 0; umc_inst < adev->umc.umc_inst_num; umc_inst++) { \ - /* enable the index mode to query eror count per channel */ \ - adev->umc.funcs->enable_umc_index_mode(adev, umc_inst); \ - for (channel_inst = 0; \ - channel_inst < adev->umc.channel_inst_num; \ - channel_inst++) { \ - /* calc the register offset according to channel instance */ \ - umc_reg_offset = adev->umc.channel_offs * channel_inst; \ - /* get channel index of interleaved memory */ \ - channel_index = adev->umc.channel_idx_tbl[ \ - umc_inst * adev->umc.channel_inst_num + channel_inst]; \ - (func)(adev, err_data, umc_reg_offset, channel_index); \ - } \ - } \ - adev->umc.funcs->disable_umc_index_mode(adev); - struct amdgpu_umc_funcs { void (*err_cnt_init)(struct amdgpu_device *adev); int (*ras_late_init)(struct amdgpu_device *adev); @@ -60,9 +28,6 @@ struct amdgpu_umc_funcs { void *ras_error_status); void (*query_ras_error_address)(struct amdgpu_device *adev, void *ras_error_status); - void (*enable_umc_index_mode)(struct amdgpu_device *adev, - uint32_t umc_instance); - void (*disable_umc_index_mode)(struct amdgpu_device *adev); void (*init_registers)(struct amdgpu_device *adev); }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index d587ffe2af8eec6ed491da08e19a9c1d0a8645b2..a92f3b18e65745800dbb2455d30846fccaeb9982 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -330,12 +330,13 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) int amdgpu_uvd_entity_init(struct amdgpu_device *adev) { struct amdgpu_ring *ring; - struct drm_sched_rq *rq; + struct drm_gpu_scheduler *sched; int r; ring = &adev->uvd.inst[0].ring; - rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; - r = drm_sched_entity_init(&adev->uvd.entity, &rq, 1, NULL); + sched = &ring->sched; + r = drm_sched_entity_init(&adev->uvd.entity, DRM_SCHED_PRIORITY_NORMAL, + &sched, 1, NULL); if (r) { DRM_ERROR("Failed setting up UVD kernel entity.\n"); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 46b590af2fd27c4ea3b8358c533c801b4e901588..59ddba137946bd5e5a345eab215fb5c823628d3c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -240,12 +240,13 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev) int amdgpu_vce_entity_init(struct amdgpu_device *adev) { struct amdgpu_ring *ring; - struct drm_sched_rq *rq; + struct drm_gpu_scheduler *sched; int r; ring = &adev->vce.ring[0]; - rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; - r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL); + sched = &ring->sched; + r = drm_sched_entity_init(&adev->vce.entity, DRM_SCHED_PRIORITY_NORMAL, + &sched, 1, NULL); if (r != 0) { DRM_ERROR("Failed setting up VCE run queue.\n"); return r; @@ -651,7 +652,7 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, if ((addr + (uint64_t)size) > (mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) { - DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n", + DRM_ERROR("BO too small for addr 0x%010Lx %d %d\n", addr, lo, hi); return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 428cfd58b37de78bb33f64be235778723b0e72a8..f96464e2c157fa6fe575a6ee175790caa87815dc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -28,19 +28,10 @@ #include #include -#include - #include "amdgpu.h" #include "amdgpu_pm.h" #include "amdgpu_vcn.h" #include "soc15d.h" -#include "soc15_common.h" - -#include "vcn/vcn_1_0_offset.h" -#include "vcn/vcn_1_0_sh_mask.h" - -/* 1 second timeout */ -#define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000) /* Firmware Names */ #define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin" @@ -84,6 +75,9 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) break; case CHIP_ARCTURUS: fw_name = FIRMWARE_ARCTURUS; + if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && + (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) + adev->vcn.indirect_sram = true; break; case CHIP_RENOIR: fw_name = FIRMWARE_RENOIR; @@ -174,15 +168,15 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r); return r; } - } - if (adev->vcn.indirect_sram) { - r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.dpg_sram_bo, - &adev->vcn.dpg_sram_gpu_addr, &adev->vcn.dpg_sram_cpu_addr); - if (r) { - dev_err(adev->dev, "(%d) failed to allocate DPG bo\n", r); - return r; + if (adev->vcn.indirect_sram) { + r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo, + &adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr); + if (r) { + dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r); + return r; + } } } @@ -195,15 +189,14 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) cancel_delayed_work_sync(&adev->vcn.idle_work); - if (adev->vcn.indirect_sram) { - amdgpu_bo_free_kernel(&adev->vcn.dpg_sram_bo, - &adev->vcn.dpg_sram_gpu_addr, - (void **)&adev->vcn.dpg_sram_cpu_addr); - } - for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { if (adev->vcn.harvest_config & (1 << j)) continue; + if (adev->vcn.indirect_sram) { + amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo, + &adev->vcn.inst[j].dpg_sram_gpu_addr, + (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr); + } kvfree(adev->vcn.inst[j].saved_bo); amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo, @@ -294,6 +287,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { if (adev->vcn.harvest_config & (1 << j)) continue; + for (i = 0; i < adev->vcn.num_enc_rings; ++i) { fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]); } @@ -306,26 +300,17 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) else new_state.fw_based = VCN_DPG_STATE__UNPAUSE; - if (amdgpu_fence_count_emitted(&adev->jpeg.inst[j].ring_dec)) - new_state.jpeg = VCN_DPG_STATE__PAUSE; - else - new_state.jpeg = VCN_DPG_STATE__UNPAUSE; - - adev->vcn.pause_dpg_mode(adev, &new_state); + adev->vcn.pause_dpg_mode(adev, j, &new_state); } - fence[j] += amdgpu_fence_count_emitted(&adev->jpeg.inst[j].ring_dec); fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec); fences += fence[j]; } if (fences == 0) { amdgpu_gfx_off_ctrl(adev, true); - if (adev->asic_type < CHIP_ARCTURUS && adev->pm.dpm_enabled) - amdgpu_dpm_enable_uvd(adev, false); - else - amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, - AMD_PG_STATE_GATE); + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, + AMD_PG_STATE_GATE); } else { schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT); } @@ -338,11 +323,8 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) if (set_clocks) { amdgpu_gfx_off_ctrl(adev, false); - if (adev->asic_type < CHIP_ARCTURUS && adev->pm.dpm_enabled) - amdgpu_dpm_enable_uvd(adev, true); - else - amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, - AMD_PG_STATE_UNGATE); + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, + AMD_PG_STATE_UNGATE); } if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { @@ -358,17 +340,10 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) else new_state.fw_based = VCN_DPG_STATE__UNPAUSE; - if (amdgpu_fence_count_emitted(&adev->jpeg.inst[ring->me].ring_dec)) - new_state.jpeg = VCN_DPG_STATE__PAUSE; - else - new_state.jpeg = VCN_DPG_STATE__UNPAUSE; - if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) new_state.fw_based = VCN_DPG_STATE__PAUSE; - else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) - new_state.jpeg = VCN_DPG_STATE__PAUSE; - adev->vcn.pause_dpg_mode(adev, &new_state); + adev->vcn.pause_dpg_mode(adev, ring->me, &new_state); } } @@ -518,9 +493,14 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) { + struct amdgpu_device *adev = ring->adev; struct dma_fence *fence; long r; + /* temporarily disable ib test for sriov */ + if (amdgpu_sriov_vf(adev)) + return 0; + r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL); if (r) goto error; @@ -676,10 +656,15 @@ err: int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) { + struct amdgpu_device *adev = ring->adev; struct dma_fence *fence = NULL; struct amdgpu_bo *bo = NULL; long r; + /* temporarily disable ib test for sriov */ + if (amdgpu_sriov_vf(adev)) + return 0; + r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &bo, NULL, NULL); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index 402a5046b98581529b878e4a54d91625dbc184cb..d6deb0eb1e15a4a91f715b2cbeb20525894e3090 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -31,6 +31,7 @@ #define AMDGPU_VCN_MAX_ENC_RINGS 3 #define AMDGPU_MAX_VCN_INSTANCES 2 +#define AMDGPU_MAX_VCN_ENC_RINGS AMDGPU_VCN_MAX_ENC_RINGS * AMDGPU_MAX_VCN_INSTANCES #define AMDGPU_VCN_HARVEST_VCN0 (1 << 0) #define AMDGPU_VCN_HARVEST_VCN1 (1 << 1) @@ -56,33 +57,41 @@ #define VCN_VID_IP_ADDRESS_2_0 0x0 #define VCN_AON_IP_ADDRESS_2_0 0x30000 -#define RREG32_SOC15_DPG_MODE(ip, inst, reg, mask, sram_sel) \ - ({ WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask); \ - WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, \ +#define mmUVD_RBC_XX_IB_REG_CHECK 0x026b +#define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 1 +#define mmUVD_REG_XX_MASK 0x026c +#define mmUVD_REG_XX_MASK_BASE_IDX 1 + +/* 1 second timeout */ +#define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000) + +#define RREG32_SOC15_DPG_MODE(ip, inst_idx, reg, mask, sram_sel) \ + ({ WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_MASK, mask); \ + WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_CTL, \ UVD_DPG_LMA_CTL__MASK_EN_MASK | \ - ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) \ + ((adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg) \ << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | \ (sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \ - RREG32_SOC15(ip, inst, mmUVD_DPG_LMA_DATA); \ + RREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_DATA); \ }) -#define WREG32_SOC15_DPG_MODE(ip, inst, reg, value, mask, sram_sel) \ +#define WREG32_SOC15_DPG_MODE(ip, inst_idx, reg, value, mask, sram_sel) \ do { \ - WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_DATA, value); \ - WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask); \ - WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, \ + WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_DATA, value); \ + WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_MASK, mask); \ + WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_CTL, \ UVD_DPG_LMA_CTL__READ_WRITE_MASK | \ - ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) \ + ((adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg) \ << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | \ (sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \ } while (0) -#define SOC15_DPG_MODE_OFFSET_2_0(ip, inst, reg) \ +#define SOC15_DPG_MODE_OFFSET_2_0(ip, inst_idx, reg) \ ({ \ uint32_t internal_reg_offset, addr; \ bool video_range, aon_range; \ \ - addr = (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \ + addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \ addr <<= 2; \ video_range = ((((0xFFFFF & addr) >= (VCN_VID_SOC_ADDRESS_2_0)) && \ ((0xFFFFF & addr) < ((VCN_VID_SOC_ADDRESS_2_0 + 0x2600))))); \ @@ -100,27 +109,27 @@ internal_reg_offset >>= 2; \ }) -#define RREG32_SOC15_DPG_MODE_2_0(offset, mask_en) \ - ({ \ - WREG32_SOC15(VCN, 0, mmUVD_DPG_LMA_CTL, \ - (0x0 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \ - mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \ - offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \ - RREG32_SOC15(VCN, 0, mmUVD_DPG_LMA_DATA); \ +#define RREG32_SOC15_DPG_MODE_2_0(inst_idx, offset, mask_en) \ + ({ \ + WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_CTL, \ + (0x0 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \ + mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \ + offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \ + RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_DATA); \ }) -#define WREG32_SOC15_DPG_MODE_2_0(offset, value, mask_en, indirect) \ - do { \ - if (!indirect) { \ - WREG32_SOC15(VCN, 0, mmUVD_DPG_LMA_DATA, value); \ - WREG32_SOC15(VCN, 0, mmUVD_DPG_LMA_CTL, \ - (0x1 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \ - mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \ - offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \ - } else { \ - *adev->vcn.dpg_sram_curr_addr++ = offset; \ - *adev->vcn.dpg_sram_curr_addr++ = value; \ - } \ +#define WREG32_SOC15_DPG_MODE_2_0(inst_idx, offset, value, mask_en, indirect) \ + do { \ + if (!indirect) { \ + WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_DATA, value); \ + WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_CTL, \ + (0x1 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \ + mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \ + offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \ + } else { \ + *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = offset; \ + *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = value; \ + } \ } while (0) enum engine_status_constants { @@ -169,6 +178,10 @@ struct amdgpu_vcn_inst { struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS]; struct amdgpu_irq_src irq; struct amdgpu_vcn_reg external; + struct amdgpu_bo *dpg_sram_bo; + void *dpg_sram_cpu_addr; + uint64_t dpg_sram_gpu_addr; + uint32_t *dpg_sram_curr_addr; }; struct amdgpu_vcn { @@ -180,18 +193,18 @@ struct amdgpu_vcn { struct dpg_pause_state pause_state; bool indirect_sram; - struct amdgpu_bo *dpg_sram_bo; - void *dpg_sram_cpu_addr; - uint64_t dpg_sram_gpu_addr; - uint32_t *dpg_sram_curr_addr; uint8_t num_vcn_inst; - struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES]; - struct amdgpu_vcn_reg internal; + struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES]; + struct amdgpu_vcn_reg internal; + struct drm_gpu_scheduler *vcn_enc_sched[AMDGPU_MAX_VCN_ENC_RINGS]; + struct drm_gpu_scheduler *vcn_dec_sched[AMDGPU_MAX_VCN_INSTANCES]; + uint32_t num_vcn_enc_sched; + uint32_t num_vcn_dec_sched; unsigned harvest_config; int (*pause_dpg_mode)(struct amdgpu_device *adev, - struct dpg_pause_state *new_state); + int inst_idx, struct dpg_pause_state *new_state); }; int amdgpu_vcn_sw_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 103033f96f133c27c77b320485eb01d028e89718..adc813cde8e281617118725a544483bc57a64d21 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -45,98 +45,6 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev) adev->pg_flags = 0; } -uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) -{ - signed long r, cnt = 0; - unsigned long flags; - uint32_t seq; - struct amdgpu_kiq *kiq = &adev->gfx.kiq; - struct amdgpu_ring *ring = &kiq->ring; - - BUG_ON(!ring->funcs->emit_rreg); - - spin_lock_irqsave(&kiq->ring_lock, flags); - amdgpu_ring_alloc(ring, 32); - amdgpu_ring_emit_rreg(ring, reg); - amdgpu_fence_emit_polling(ring, &seq); - amdgpu_ring_commit(ring); - spin_unlock_irqrestore(&kiq->ring_lock, flags); - - r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); - - /* don't wait anymore for gpu reset case because this way may - * block gpu_recover() routine forever, e.g. this virt_kiq_rreg - * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will - * never return if we keep waiting in virt_kiq_rreg, which cause - * gpu_recover() hang there. - * - * also don't wait anymore for IRQ context - * */ - if (r < 1 && (adev->in_gpu_reset || in_interrupt())) - goto failed_kiq_read; - - might_sleep(); - while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { - msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); - r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); - } - - if (cnt > MAX_KIQ_REG_TRY) - goto failed_kiq_read; - - return adev->wb.wb[adev->virt.reg_val_offs]; - -failed_kiq_read: - pr_err("failed to read reg:%x\n", reg); - return ~0; -} - -void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) -{ - signed long r, cnt = 0; - unsigned long flags; - uint32_t seq; - struct amdgpu_kiq *kiq = &adev->gfx.kiq; - struct amdgpu_ring *ring = &kiq->ring; - - BUG_ON(!ring->funcs->emit_wreg); - - spin_lock_irqsave(&kiq->ring_lock, flags); - amdgpu_ring_alloc(ring, 32); - amdgpu_ring_emit_wreg(ring, reg, v); - amdgpu_fence_emit_polling(ring, &seq); - amdgpu_ring_commit(ring); - spin_unlock_irqrestore(&kiq->ring_lock, flags); - - r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); - - /* don't wait anymore for gpu reset case because this way may - * block gpu_recover() routine forever, e.g. this virt_kiq_rreg - * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will - * never return if we keep waiting in virt_kiq_rreg, which cause - * gpu_recover() hang there. - * - * also don't wait anymore for IRQ context - * */ - if (r < 1 && (adev->in_gpu_reset || in_interrupt())) - goto failed_kiq_write; - - might_sleep(); - while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { - - msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); - r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); - } - - if (cnt > MAX_KIQ_REG_TRY) - goto failed_kiq_write; - - return; - -failed_kiq_write: - pr_err("failed to write reg:%x\n", reg); -} - void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, uint32_t reg0, uint32_t reg1, uint32_t ref, uint32_t mask) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 4d1ac7612967e3a52255bd9f79b2db8a9b3b9241..daaf909d009aacd476f4f4b2e2c6a8606a4a9a5d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -287,8 +287,6 @@ static inline bool is_virtual_machine(void) bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); void amdgpu_virt_init_setting(struct amdgpu_device *adev); -uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); -void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v); void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, uint32_t reg0, uint32_t rreg1, uint32_t ref, uint32_t mask); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 8f26504a59a7a5cdf9738f636d3312cae259c89a..d16231d6a790bc805002d093f9d7de35a7e93351 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -82,6 +82,32 @@ struct amdgpu_prt_cb { struct dma_fence_cb cb; }; +/** + * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS + * happens while holding this lock anywhere to prevent deadlocks when + * an MMU notifier runs in reclaim-FS context. + */ +static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm) +{ + mutex_lock(&vm->eviction_lock); + vm->saved_flags = memalloc_nofs_save(); +} + +static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm) +{ + if (mutex_trylock(&vm->eviction_lock)) { + vm->saved_flags = memalloc_nofs_save(); + return 1; + } + return 0; +} + +static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm) +{ + memalloc_nofs_restore(vm->saved_flags); + mutex_unlock(&vm->eviction_lock); +} + /** * amdgpu_vm_level_shift - return the addr shift for each level * @@ -678,9 +704,9 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, } } - mutex_lock(&vm->eviction_lock); + amdgpu_vm_eviction_lock(vm); vm->evicting = false; - mutex_unlock(&vm->eviction_lock); + amdgpu_vm_eviction_unlock(vm); return 0; } @@ -1559,7 +1585,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (!(flags & AMDGPU_PTE_VALID)) owner = AMDGPU_FENCE_OWNER_KFD; - mutex_lock(&vm->eviction_lock); + amdgpu_vm_eviction_lock(vm); if (vm->evicting) { r = -EBUSY; goto error_unlock; @@ -1576,7 +1602,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, r = vm->update_funcs->commit(¶ms, fence); error_unlock: - mutex_unlock(&vm->eviction_lock); + amdgpu_vm_eviction_unlock(vm); return r; } @@ -2533,18 +2559,18 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo) return false; /* Try to block ongoing updates */ - if (!mutex_trylock(&bo_base->vm->eviction_lock)) + if (!amdgpu_vm_eviction_trylock(bo_base->vm)) return false; /* Don't evict VM page tables while they are updated */ if (!dma_fence_is_signaled(bo_base->vm->last_direct) || !dma_fence_is_signaled(bo_base->vm->last_delayed)) { - mutex_unlock(&bo_base->vm->eviction_lock); + amdgpu_vm_eviction_unlock(bo_base->vm); return false; } bo_base->vm->evicting = true; - mutex_unlock(&bo_base->vm->eviction_lock); + amdgpu_vm_eviction_unlock(bo_base->vm); return true; } @@ -2753,14 +2779,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, spin_lock_init(&vm->invalidated_lock); INIT_LIST_HEAD(&vm->freed); + /* create scheduler entities for page table updates */ - r = drm_sched_entity_init(&vm->direct, adev->vm_manager.vm_pte_rqs, - adev->vm_manager.vm_pte_num_rqs, NULL); + r = drm_sched_entity_init(&vm->direct, DRM_SCHED_PRIORITY_NORMAL, + adev->vm_manager.vm_pte_scheds, + adev->vm_manager.vm_pte_num_scheds, NULL); if (r) return r; - r = drm_sched_entity_init(&vm->delayed, adev->vm_manager.vm_pte_rqs, - adev->vm_manager.vm_pte_num_rqs, NULL); + r = drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL, + adev->vm_manager.vm_pte_scheds, + adev->vm_manager.vm_pte_num_scheds, NULL); if (r) goto error_free_direct; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 7e0eb36da27deaae5e36d7810f0277d60e184c43..b4640ab38c95991b538e10691e3e7f23cfd70947 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -30,6 +30,7 @@ #include #include #include +#include #include "amdgpu_sync.h" #include "amdgpu_ring.h" @@ -239,9 +240,12 @@ struct amdgpu_vm { /* tree of virtual addresses mapped */ struct rb_root_cached va; - /* Lock to prevent eviction while we are updating page tables */ + /* Lock to prevent eviction while we are updating page tables + * use vm_eviction_lock/unlock(vm) + */ struct mutex eviction_lock; bool evicting; + unsigned int saved_flags; /* BOs who needs a validation */ struct list_head evicted; @@ -327,8 +331,8 @@ struct amdgpu_vm_manager { u64 vram_base_offset; /* vm pte handling */ const struct amdgpu_vm_pte_funcs *vm_pte_funcs; - struct drm_sched_rq *vm_pte_rqs[AMDGPU_MAX_RINGS]; - unsigned vm_pte_num_rqs; + struct drm_gpu_scheduler *vm_pte_scheds[AMDGPU_MAX_RINGS]; + unsigned vm_pte_num_scheds; struct amdgpu_ring *page_fault; /* partial resident texture handling */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 61d13d8b7b20dfc9e0f4c8a558f577d557f110b5..a97af422575a319e7488bb7d16273e46ae5bb079 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -146,16 +146,16 @@ static ssize_t amdgpu_xgmi_show_error(struct device *dev, ficaa_pie_ctl_in = AMDGPU_XGMI_SET_FICAA(0x200); ficaa_pie_status_in = AMDGPU_XGMI_SET_FICAA(0x208); - fica_out = adev->df_funcs->get_fica(adev, ficaa_pie_ctl_in); + fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_ctl_in); if (fica_out != 0x1f) pr_err("xGMI error counters not enabled!\n"); - fica_out = adev->df_funcs->get_fica(adev, ficaa_pie_status_in); + fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_status_in); if ((fica_out & 0xffff) == 2) error_count = ((fica_out >> 62) & 0x1) + (fica_out >> 63); - adev->df_funcs->set_fica(adev, ficaa_pie_status_in, 0, 0); + adev->df.funcs->set_fica(adev, ficaa_pie_status_in, 0, 0); return snprintf(buf, PAGE_SIZE, "%d\n", error_count); } @@ -261,6 +261,7 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lo INIT_LIST_HEAD(&tmp->device_list); mutex_init(&tmp->hive_lock); mutex_init(&tmp->reset_lock); + task_barrier_init(&tmp->tb); if (lock) mutex_lock(&tmp->hive_lock); @@ -290,13 +291,7 @@ int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate) dev_dbg(adev->dev, "Set xgmi pstate %d.\n", pstate); - if (is_support_sw_smu_xgmi(adev)) - ret = smu_set_xgmi_pstate(&adev->smu, pstate); - else if (adev->powerplay.pp_funcs && - adev->powerplay.pp_funcs->set_xgmi_pstate) - ret = adev->powerplay.pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle, - pstate); - + ret = amdgpu_dpm_set_xgmi_pstate(adev, pstate); if (ret) { dev_err(adev->dev, "XGMI: Set pstate failure on device %llx, hive %llx, ret %d", @@ -408,6 +403,8 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) top_info->num_nodes = count; hive->number_devices = count; + task_barrier_add_task(&hive->tb); + if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) { list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { /* update node list for other device in the hive */ @@ -470,6 +467,7 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev) mutex_destroy(&hive->hive_lock); mutex_destroy(&hive->reset_lock); } else { + task_barrier_rem_task(&hive->tb); amdgpu_xgmi_sysfs_rem_dev_info(adev, hive); mutex_unlock(&hive->hive_lock); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h index bbf504ff7051f1d34eba4c2eaae2704cd5b70c7b..74011fbc225190cbbb8f3c11b08270b8cbeca691 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h @@ -22,6 +22,7 @@ #ifndef __AMDGPU_XGMI_H__ #define __AMDGPU_XGMI_H__ +#include #include "amdgpu_psp.h" struct amdgpu_hive_info { @@ -33,6 +34,7 @@ struct amdgpu_hive_info { struct device_attribute dev_attr; struct amdgpu_device *adev; int pstate; /*0 -- low , 1 -- high , -1 unknown*/ + struct task_barrier tb; }; struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock); diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c index d9cc746af5e66b4787094c9b2da48f0a4f7ff84e..847ca9b3ce4eabe0786884f84d5ad21c1ce843b0 100644 --- a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c @@ -74,9 +74,9 @@ int athub_v1_0_set_clockgating(struct amdgpu_device *adev, case CHIP_VEGA20: case CHIP_RAVEN: athub_update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); athub_update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c index ceb9aa4df0e73ba27aa22e61c7116ba832099407..921a69abda55526aab2c40036a9e945547d82f4f 100644 --- a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c @@ -77,9 +77,9 @@ int athub_v2_0_set_clockgating(struct amdgpu_device *adev, case CHIP_NAVI14: case CHIP_NAVI12: athub_v2_0_update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); athub_v2_0_update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index e9822ea8bb19957ea09142b1ae733a2302d5f3fe..006f21ef7ddf09a375ead24f7b31af1fa41fd207 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1312,19 +1312,13 @@ static int cik_asic_pci_config_reset(struct amdgpu_device *adev) static bool cik_asic_supports_baco(struct amdgpu_device *adev) { - bool baco_support; - switch (adev->asic_type) { case CHIP_BONAIRE: case CHIP_HAWAII: - smu7_asic_get_baco_capability(adev, &baco_support); - break; + return amdgpu_dpm_is_baco_supported(adev); default: - baco_support = false; - break; + return false; } - - return baco_support; } static enum amd_reset_method @@ -1366,7 +1360,7 @@ static int cik_asic_reset(struct amdgpu_device *adev) if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { if (!adev->in_suspend) amdgpu_inc_vram_lost(adev); - r = smu7_asic_baco_reset(adev); + r = amdgpu_dpm_baco_reset(adev); } else { r = cik_asic_pci_config_reset(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/cik.h b/drivers/gpu/drm/amd/amdgpu/cik.h index 9870bf27870e060dbc5b22ad416782a8bbafd69b..f91ab4c246b740f0304e0ddcd31c0667eae7f9f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.h +++ b/drivers/gpu/drm/amd/amdgpu/cik.h @@ -31,7 +31,5 @@ void cik_srbm_select(struct amdgpu_device *adev, int cik_set_ip_blocks(struct amdgpu_device *adev); void legacy_doorbell_index_init(struct amdgpu_device *adev); -int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap); -int smu7_asic_baco_reset(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index c45304f1047c537b5e93a86b1f6697131ff3f922..580d3f93d67093ec1925e671d8fea7d2a6f5e2ac 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -228,7 +228,7 @@ static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring, u32 extra_bits = vmid & 0xf; /* IB packet must end on a 8 DW boundary */ - cik_sdma_ring_insert_nop(ring, (12 - (lower_32_bits(ring->wptr) & 7)) % 8); + cik_sdma_ring_insert_nop(ring, (4 - lower_32_bits(ring->wptr)) & 7); amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits)); amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */ @@ -811,7 +811,7 @@ static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) u32 pad_count; int i; - pad_count = (8 - (ib->length_dw & 0x7)) % 8; + pad_count = (-ib->length_dw) & 7; for (i = 0; i < pad_count; i++) if (sdma && sdma->burst_nop && (i == 0)) ib->ptr[ib->length_dw++] = @@ -1372,16 +1372,14 @@ static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = { static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev) { - struct drm_gpu_scheduler *sched; unsigned i; adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs; for (i = 0; i < adev->sdma.num_instances; i++) { - sched = &adev->sdma.instance[i].ring.sched; - adev->vm_manager.vm_pte_rqs[i] = - &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; + adev->vm_manager.vm_pte_scheds[i] = + &adev->sdma.instance[i].ring.sched; } - adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; + adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; } const struct amdgpu_ip_block_version cik_sdma_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c index d6221298b477fef6340c9086ca97da78ae5720eb..d6aca1c080687e6fbad20828024cb78e2fd6705e 100644 --- a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c +++ b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c @@ -31,6 +31,9 @@ static u32 df_v1_7_channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2}; static void df_v1_7_sw_init(struct amdgpu_device *adev) { + adev->df.hash_status.hash_64k = false; + adev->df.hash_status.hash_2m = false; + adev->df.hash_status.hash_1g = false; } static void df_v1_7_sw_fini(struct amdgpu_device *adev) @@ -66,7 +69,7 @@ static u32 df_v1_7_get_hbm_channel_number(struct amdgpu_device *adev) { int fb_channel_number; - fb_channel_number = adev->df_funcs->get_fb_channel_number(adev); + fb_channel_number = adev->df.funcs->get_fb_channel_number(adev); return df_v1_7_channel_number[fb_channel_number]; } @@ -77,7 +80,7 @@ static void df_v1_7_update_medium_grain_clock_gating(struct amdgpu_device *adev, u32 tmp; /* Put DF on broadcast mode */ - adev->df_funcs->enable_broadcast_mode(adev, true); + adev->df.funcs->enable_broadcast_mode(adev, true); if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) { tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater); @@ -92,7 +95,7 @@ static void df_v1_7_update_medium_grain_clock_gating(struct amdgpu_device *adev, } /* Exit boradcast mode */ - adev->df_funcs->enable_broadcast_mode(adev, false); + adev->df.funcs->enable_broadcast_mode(adev, false); } static void df_v1_7_get_clockgating_state(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c index 4043ebcea5de6e1efca0a89dad8356e5c5de385e..5a1bd8ed1a6c51ab34d0d65f834c3d5a8ccd666a 100644 --- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c +++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c @@ -27,6 +27,9 @@ #include "df/df_3_6_offset.h" #include "df/df_3_6_sh_mask.h" +#define DF_3_6_SMN_REG_INST_DIST 0x8 +#define DF_3_6_INST_CNT 8 + static u32 df_v3_6_channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 32, 0, 0, 0, 2, 4, 8}; @@ -183,6 +186,61 @@ static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr, spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); } +/* same as perfmon_wreg but return status on write value check */ +static int df_v3_6_perfmon_arm_with_status(struct amdgpu_device *adev, + uint32_t lo_addr, uint32_t lo_val, + uint32_t hi_addr, uint32_t hi_val) +{ + unsigned long flags, address, data; + uint32_t lo_val_rb, hi_val_rb; + + address = adev->nbio.funcs->get_pcie_index_offset(adev); + data = adev->nbio.funcs->get_pcie_data_offset(adev); + + spin_lock_irqsave(&adev->pcie_idx_lock, flags); + WREG32(address, lo_addr); + WREG32(data, lo_val); + WREG32(address, hi_addr); + WREG32(data, hi_val); + + WREG32(address, lo_addr); + lo_val_rb = RREG32(data); + WREG32(address, hi_addr); + hi_val_rb = RREG32(data); + spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); + + if (!(lo_val == lo_val_rb && hi_val == hi_val_rb)) + return -EBUSY; + + return 0; +} + + +/* + * retry arming counters every 100 usecs within 1 millisecond interval. + * if retry fails after time out, return error. + */ +#define ARM_RETRY_USEC_TIMEOUT 1000 +#define ARM_RETRY_USEC_INTERVAL 100 +static int df_v3_6_perfmon_arm_with_retry(struct amdgpu_device *adev, + uint32_t lo_addr, uint32_t lo_val, + uint32_t hi_addr, uint32_t hi_val) +{ + int countdown = ARM_RETRY_USEC_TIMEOUT; + + while (countdown) { + + if (!df_v3_6_perfmon_arm_with_status(adev, lo_addr, lo_val, + hi_addr, hi_val)) + break; + + countdown -= ARM_RETRY_USEC_INTERVAL; + udelay(ARM_RETRY_USEC_INTERVAL); + } + + return countdown > 0 ? 0 : -ETIME; +} + /* get the number of df counters available */ static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev, struct device_attribute *attr, @@ -207,6 +265,32 @@ static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev, /* device attr for available perfmon counters */ static DEVICE_ATTR(df_cntr_avail, S_IRUGO, df_v3_6_get_df_cntr_avail, NULL); +static void df_v3_6_query_hashes(struct amdgpu_device *adev) +{ + u32 tmp; + + adev->df.hash_status.hash_64k = false; + adev->df.hash_status.hash_2m = false; + adev->df.hash_status.hash_1g = false; + + if (adev->asic_type != CHIP_ARCTURUS) + return; + + /* encoding for hash-enabled on Arcturus */ + if (adev->df.funcs->get_fb_channel_number(adev) == 0xe) { + tmp = RREG32_SOC15(DF, 0, mmDF_CS_UMC_AON0_DfGlobalCtrl); + adev->df.hash_status.hash_64k = REG_GET_FIELD(tmp, + DF_CS_UMC_AON0_DfGlobalCtrl, + GlbHashIntlvCtl64K); + adev->df.hash_status.hash_2m = REG_GET_FIELD(tmp, + DF_CS_UMC_AON0_DfGlobalCtrl, + GlbHashIntlvCtl2M); + adev->df.hash_status.hash_1g = REG_GET_FIELD(tmp, + DF_CS_UMC_AON0_DfGlobalCtrl, + GlbHashIntlvCtl1G); + } +} + /* init perfmons */ static void df_v3_6_sw_init(struct amdgpu_device *adev) { @@ -218,6 +302,8 @@ static void df_v3_6_sw_init(struct amdgpu_device *adev) for (i = 0; i < AMDGPU_MAX_DF_PERFMONS; i++) adev->df_perfmon_config_assign_mask[i] = 0; + + df_v3_6_query_hashes(adev); } static void df_v3_6_sw_fini(struct amdgpu_device *adev) @@ -256,7 +342,7 @@ static u32 df_v3_6_get_hbm_channel_number(struct amdgpu_device *adev) { int fb_channel_number; - fb_channel_number = adev->df_funcs->get_fb_channel_number(adev); + fb_channel_number = adev->df.funcs->get_fb_channel_number(adev); if (fb_channel_number >= ARRAY_SIZE(df_v3_6_channel_number)) fb_channel_number = 0; @@ -270,7 +356,7 @@ static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev, if (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG) { /* Put DF on broadcast mode */ - adev->df_funcs->enable_broadcast_mode(adev, true); + adev->df.funcs->enable_broadcast_mode(adev, true); if (enable) { tmp = RREG32_SOC15(DF, 0, @@ -289,7 +375,7 @@ static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev, } /* Exit broadcast mode */ - adev->df_funcs->enable_broadcast_mode(adev, false); + adev->df.funcs->enable_broadcast_mode(adev, false); } } @@ -334,20 +420,20 @@ static void df_v3_6_pmc_get_addr(struct amdgpu_device *adev, switch (target_cntr) { case 0: - *lo_base_addr = is_ctrl ? smnPerfMonCtlLo0 : smnPerfMonCtrLo0; - *hi_base_addr = is_ctrl ? smnPerfMonCtlHi0 : smnPerfMonCtrHi0; + *lo_base_addr = is_ctrl ? smnPerfMonCtlLo4 : smnPerfMonCtrLo4; + *hi_base_addr = is_ctrl ? smnPerfMonCtlHi4 : smnPerfMonCtrHi4; break; case 1: - *lo_base_addr = is_ctrl ? smnPerfMonCtlLo1 : smnPerfMonCtrLo1; - *hi_base_addr = is_ctrl ? smnPerfMonCtlHi1 : smnPerfMonCtrHi1; + *lo_base_addr = is_ctrl ? smnPerfMonCtlLo5 : smnPerfMonCtrLo5; + *hi_base_addr = is_ctrl ? smnPerfMonCtlHi5 : smnPerfMonCtrHi5; break; case 2: - *lo_base_addr = is_ctrl ? smnPerfMonCtlLo2 : smnPerfMonCtrLo2; - *hi_base_addr = is_ctrl ? smnPerfMonCtlHi2 : smnPerfMonCtrHi2; + *lo_base_addr = is_ctrl ? smnPerfMonCtlLo6 : smnPerfMonCtrLo6; + *hi_base_addr = is_ctrl ? smnPerfMonCtlHi6 : smnPerfMonCtrHi6; break; case 3: - *lo_base_addr = is_ctrl ? smnPerfMonCtlLo3 : smnPerfMonCtrLo3; - *hi_base_addr = is_ctrl ? smnPerfMonCtlHi3 : smnPerfMonCtrHi3; + *lo_base_addr = is_ctrl ? smnPerfMonCtlLo7 : smnPerfMonCtrLo7; + *hi_base_addr = is_ctrl ? smnPerfMonCtlHi7 : smnPerfMonCtrHi7; break; } @@ -422,6 +508,44 @@ static int df_v3_6_pmc_add_cntr(struct amdgpu_device *adev, return -ENOSPC; } +#define DEFERRED_ARM_MASK (1 << 31) +static int df_v3_6_pmc_set_deferred(struct amdgpu_device *adev, + uint64_t config, bool is_deferred) +{ + int target_cntr; + + target_cntr = df_v3_6_pmc_config_2_cntr(adev, config); + + if (target_cntr < 0) + return -EINVAL; + + if (is_deferred) + adev->df_perfmon_config_assign_mask[target_cntr] |= + DEFERRED_ARM_MASK; + else + adev->df_perfmon_config_assign_mask[target_cntr] &= + ~DEFERRED_ARM_MASK; + + return 0; +} + +static bool df_v3_6_pmc_is_deferred(struct amdgpu_device *adev, + uint64_t config) +{ + int target_cntr; + + target_cntr = df_v3_6_pmc_config_2_cntr(adev, config); + + /* + * we never get target_cntr < 0 since this funciton is only called in + * pmc_count for now but we should check anyways. + */ + return (target_cntr >= 0 && + (adev->df_perfmon_config_assign_mask[target_cntr] + & DEFERRED_ARM_MASK)); + +} + /* release performance counter */ static void df_v3_6_pmc_release_cntr(struct amdgpu_device *adev, uint64_t config) @@ -451,29 +575,33 @@ static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config, int is_enable) { uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val; - int ret = 0; + int err = 0, ret = 0; switch (adev->asic_type) { case CHIP_VEGA20: + if (is_enable) + return df_v3_6_pmc_add_cntr(adev, config); df_v3_6_reset_perfmon_cntr(adev, config); - if (is_enable) { - ret = df_v3_6_pmc_add_cntr(adev, config); - } else { - ret = df_v3_6_pmc_get_ctrl_settings(adev, + ret = df_v3_6_pmc_get_ctrl_settings(adev, config, &lo_base_addr, &hi_base_addr, &lo_val, &hi_val); - if (ret) - return ret; + if (ret) + return ret; + + err = df_v3_6_perfmon_arm_with_retry(adev, + lo_base_addr, + lo_val, + hi_base_addr, + hi_val); - df_v3_6_perfmon_wreg(adev, lo_base_addr, lo_val, - hi_base_addr, hi_val); - } + if (err) + ret = df_v3_6_pmc_set_deferred(adev, config, true); break; default: @@ -501,7 +629,7 @@ static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config, if (ret) return ret; - df_v3_6_perfmon_wreg(adev, lo_base_addr, 0, hi_base_addr, 0); + df_v3_6_reset_perfmon_cntr(adev, config); if (is_disable) df_v3_6_pmc_release_cntr(adev, config); @@ -518,18 +646,29 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev, uint64_t config, uint64_t *count) { - uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val; + uint32_t lo_base_addr, hi_base_addr, lo_val = 0, hi_val = 0; *count = 0; switch (adev->asic_type) { case CHIP_VEGA20: - df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr, &hi_base_addr); if ((lo_base_addr == 0) || (hi_base_addr == 0)) return; + /* rearm the counter or throw away count value on failure */ + if (df_v3_6_pmc_is_deferred(adev, config)) { + int rearm_err = df_v3_6_perfmon_arm_with_status(adev, + lo_base_addr, lo_val, + hi_base_addr, hi_val); + + if (rearm_err) + return; + + df_v3_6_pmc_set_deferred(adev, config, false); + } + df_v3_6_perfmon_rreg(adev, lo_base_addr, &lo_val, hi_base_addr, &hi_val); @@ -542,12 +681,63 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev, config, lo_base_addr, hi_base_addr, lo_val, hi_val); break; - default: break; } } +static uint64_t df_v3_6_get_dram_base_addr(struct amdgpu_device *adev, + uint32_t df_inst) +{ + uint32_t base_addr_reg_val = 0; + uint64_t base_addr = 0; + + base_addr_reg_val = RREG32_PCIE(smnDF_CS_UMC_AON0_DramBaseAddress0 + + df_inst * DF_3_6_SMN_REG_INST_DIST); + + if (REG_GET_FIELD(base_addr_reg_val, + DF_CS_UMC_AON0_DramBaseAddress0, + AddrRngVal) == 0) { + DRM_WARN("address range not valid"); + return 0; + } + + base_addr = REG_GET_FIELD(base_addr_reg_val, + DF_CS_UMC_AON0_DramBaseAddress0, + DramBaseAddr); + + return base_addr << 28; +} + +static uint32_t df_v3_6_get_df_inst_id(struct amdgpu_device *adev) +{ + uint32_t xgmi_node_id = 0; + uint32_t df_inst_id = 0; + + /* Walk through DF dst nodes to find current XGMI node */ + for (df_inst_id = 0; df_inst_id < DF_3_6_INST_CNT; df_inst_id++) { + + xgmi_node_id = RREG32_PCIE(smnDF_CS_UMC_AON0_DramLimitAddress0 + + df_inst_id * DF_3_6_SMN_REG_INST_DIST); + xgmi_node_id = REG_GET_FIELD(xgmi_node_id, + DF_CS_UMC_AON0_DramLimitAddress0, + DstFabricID); + + /* TODO: establish reason dest fabric id is offset by 7 */ + xgmi_node_id = xgmi_node_id >> 7; + + if (adev->gmc.xgmi.physical_node_id == xgmi_node_id) + break; + } + + if (df_inst_id == DF_3_6_INST_CNT) { + DRM_WARN("cant match df dst id with gpu node"); + return 0; + } + + return df_inst_id; +} + const struct amdgpu_df_funcs df_v3_6_funcs = { .sw_init = df_v3_6_sw_init, .sw_fini = df_v3_6_sw_fini, @@ -561,5 +751,7 @@ const struct amdgpu_df_funcs df_v3_6_funcs = { .pmc_stop = df_v3_6_pmc_stop, .pmc_get_count = df_v3_6_pmc_get_count, .get_fica = df_v3_6_get_fica, - .set_fica = df_v3_6_set_fica + .set_fica = df_v3_6_set_fica, + .get_dram_base_addr = df_v3_6_get_dram_base_addr, + .get_df_inst_id = df_v3_6_get_df_inst_id }; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 98db25215d0da8875a7454e4151a6c6c35f54040..1785fdad6ecbaa4631468e7202470708465c3d9c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -40,6 +40,7 @@ #include "ivsrcid/gfx/irqsrcs_gfx_10_1.h" #include "soc15.h" +#include "soc15d.h" #include "soc15_common.h" #include "clearstate_gfx10.h" #include "v10_structs.h" @@ -120,7 +121,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000800, 0x00000820), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL, 0x001f0000, 0x00070104), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x001f0000, 0x00070104), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000100, 0x00000130), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff), @@ -168,7 +169,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000800, 0x00000820), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL, 0x001f0000, 0x00070105), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x001f0000, 0x00070105), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff), @@ -345,15 +346,29 @@ static void gfx10_kiq_query_status(struct amdgpu_ring *kiq_ring, amdgpu_ring_write(kiq_ring, upper_32_bits(seq)); } +static void gfx10_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring, + uint16_t pasid, uint32_t flush_type, + bool all_hub) +{ + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0)); + amdgpu_ring_write(kiq_ring, + PACKET3_INVALIDATE_TLBS_DST_SEL(1) | + PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) | + PACKET3_INVALIDATE_TLBS_PASID(pasid) | + PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type)); +} + static const struct kiq_pm4_funcs gfx_v10_0_kiq_pm4_funcs = { .kiq_set_resources = gfx10_kiq_set_resources, .kiq_map_queues = gfx10_kiq_map_queues, .kiq_unmap_queues = gfx10_kiq_unmap_queues, .kiq_query_status = gfx10_kiq_query_status, + .kiq_invalidate_tlbs = gfx10_kiq_invalidate_tlbs, .set_resources_size = 8, .map_queues_size = 7, .unmap_queues_size = 6, .query_status_size = 7, + .invalidate_tlbs_size = 2, }; static void gfx_v10_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) @@ -471,18 +486,10 @@ static int gfx_v10_0_ring_test_ring(struct amdgpu_ring *ring) else udelay(1); } - if (i < adev->usec_timeout) { - if (amdgpu_emu_mode == 1) - DRM_INFO("ring test on %d succeeded in %d msecs\n", - ring->idx, i); - else - DRM_INFO("ring test on %d succeeded in %d usecs\n", - ring->idx, i); - } else { - DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n", - ring->idx, scratch, tmp); - r = -EINVAL; - } + + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; + amdgpu_gfx_scratch_free(adev, scratch); return r; @@ -532,14 +539,10 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) } tmp = RREG32(scratch); - if (tmp == 0xDEADBEEF) { - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + if (tmp == 0xDEADBEEF) r = 0; - } else { - DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", - scratch, tmp); + else r = -EINVAL; - } err2: amdgpu_ib_free(adev, &ib, NULL); dma_fence_put(f); @@ -588,8 +591,7 @@ static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev) } if (adev->gfx.cp_fw_write_wait == false) - DRM_WARN_ONCE("Warning: check cp_fw_version and update it to realize \ - GRBM requires 1-cycle delay in cp firmware\n"); + DRM_WARN_ONCE("CP firmware version too old, please update!"); } @@ -820,10 +822,11 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G]; info->ucode_id = AMDGPU_UCODE_ID_RLC_G; info->fw = adev->gfx.rlc_fw; - header = (const struct common_firmware_header *)info->fw->data; - adev->firmware.fw_size += - ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); - + if (info->fw) { + header = (const struct common_firmware_header *)info->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); + } if (adev->gfx.rlc.is_rlc_v2_1 && adev->gfx.rlc.save_restore_list_cntl_size_bytes && adev->gfx.rlc.save_restore_list_gpm_size_bytes && @@ -1963,7 +1966,7 @@ static int gfx_v10_0_parse_rlc_toc(struct amdgpu_device *adev) rlc_autoload_info[rlc_toc->id].size = rlc_toc->size * 4; rlc_toc++; - }; + } return 0; } @@ -3334,8 +3337,11 @@ static int gfx_v10_0_compute_mqd_init(struct amdgpu_ring *ring) tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); mqd->cp_hqd_ib_control = tmp; - /* activate the queue */ - mqd->cp_hqd_active = 1; + /* map_queues packet doesn't need activate the queue, + * so only kiq need set this field. + */ + if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) + mqd->cp_hqd_active = 1; return 0; } @@ -3606,23 +3612,16 @@ static int gfx_v10_0_cp_resume(struct amdgpu_device *adev) for (i = 0; i < adev->gfx.num_gfx_rings; i++) { ring = &adev->gfx.gfx_ring[i]; - DRM_INFO("gfx %d ring me %d pipe %d q %d\n", - i, ring->me, ring->pipe, ring->queue); - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->sched.ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) return r; - } } for (i = 0; i < adev->gfx.num_compute_rings; i++) { ring = &adev->gfx.compute_ring[i]; - ring->sched.ready = true; - DRM_INFO("compute ring %d mec %d pipe %d q %d\n", - i, ring->me, ring->pipe, ring->queue); - r = amdgpu_ring_test_ring(ring); + r = amdgpu_ring_test_helper(ring); if (r) - ring->sched.ready = false; + return r; } return 0; @@ -4230,7 +4229,7 @@ static int gfx_v10_0_set_powergating_state(void *handle, enum amd_powergating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_PG_STATE_GATE) ? true : false; + bool enable = (state == AMD_PG_STATE_GATE); switch (adev->asic_type) { case CHIP_NAVI10: case CHIP_NAVI14: @@ -4256,7 +4255,7 @@ static int gfx_v10_0_set_clockgating_state(void *handle, case CHIP_NAVI14: case CHIP_NAVI12: gfx_v10_0_update_gfx_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; @@ -4738,6 +4737,7 @@ static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start) static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) { struct amdgpu_device *adev = ring->adev; + struct amdgpu_kiq *kiq = &adev->gfx.kiq; amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); amdgpu_ring_write(ring, 0 | /* src: register*/ @@ -4746,9 +4746,9 @@ static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) amdgpu_ring_write(ring, reg); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + - adev->virt.reg_val_offs * 4)); + kiq->reg_val_offs * 4)); amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + - adev->virt.reg_val_offs * 4)); + kiq->reg_val_offs * 4)); } static void gfx_v10_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index cfc1403fc855a2d1b1f7871e36a047f1ba03b558..fa245973de12cbd07579a08d9c243529bf49f43a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -4558,8 +4558,11 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring) mqd->cp_hqd_eop_wptr_mem = RREG32(mmCP_HQD_EOP_WPTR_MEM); mqd->cp_hqd_eop_dones = RREG32(mmCP_HQD_EOP_DONES); - /* activate the queue */ - mqd->cp_hqd_active = 1; + /* map_queues packet doesn't need activate the queue, + * so only kiq need set this field. + */ + if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) + mqd->cp_hqd_active = 1; return 0; } @@ -6446,6 +6449,7 @@ static void gfx_v8_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) { struct amdgpu_device *adev = ring->adev; + struct amdgpu_kiq *kiq = &adev->gfx.kiq; amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); amdgpu_ring_write(ring, 0 | /* src: register*/ @@ -6454,9 +6458,9 @@ static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) amdgpu_ring_write(ring, reg); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + - adev->virt.reg_val_offs * 4)); + kiq->reg_val_offs * 4)); amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + - adev->virt.reg_val_offs * 4)); + kiq->reg_val_offs * 4)); } static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 2616f1b59bbd08d49f360ed0831e1eeb010a567e..90f64b8bc3586adc4f271876fb8428100818cc70 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -48,8 +48,8 @@ #include "amdgpu_ras.h" -#include "sdma0/sdma0_4_0_offset.h" -#include "sdma1/sdma1_4_0_offset.h" +#include "gfx_v9_4.h" + #define GFX9_NUM_GFX_RINGS 1 #define GFX9_MEC_HPD_SIZE 4096 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L @@ -738,9 +738,138 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring); static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring); static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status); +static void gfx_v9_0_clear_ras_edc_counter(struct amdgpu_device *adev); static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev, void *inject_if); +static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring, + uint64_t queue_mask) +{ + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); + amdgpu_ring_write(kiq_ring, + PACKET3_SET_RESOURCES_VMID_MASK(0) | + /* vmid_mask:0* queue_type:0 (KIQ) */ + PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); + amdgpu_ring_write(kiq_ring, + lower_32_bits(queue_mask)); /* queue mask lo */ + amdgpu_ring_write(kiq_ring, + upper_32_bits(queue_mask)); /* queue mask hi */ + amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */ + amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */ + amdgpu_ring_write(kiq_ring, 0); /* oac mask */ + amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */ +} + +static void gfx_v9_0_kiq_map_queues(struct amdgpu_ring *kiq_ring, + struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = kiq_ring->adev; + uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); + uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); + uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; + + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); + /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/ + amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ + PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */ + PACKET3_MAP_QUEUES_VMID(0) | /* VMID */ + PACKET3_MAP_QUEUES_QUEUE(ring->queue) | + PACKET3_MAP_QUEUES_PIPE(ring->pipe) | + PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) | + /*queue_type: normal compute queue */ + PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | + /* alloc format: all_on_one_pipe */ + PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | + PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) | + /* num_queues: must be 1 */ + PACKET3_MAP_QUEUES_NUM_QUEUES(1)); + amdgpu_ring_write(kiq_ring, + PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index)); + amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr)); + amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr)); + amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr)); + amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); +} + +static void gfx_v9_0_kiq_unmap_queues(struct amdgpu_ring *kiq_ring, + struct amdgpu_ring *ring, + enum amdgpu_unmap_queues_action action, + u64 gpu_addr, u64 seq) +{ + uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; + + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); + amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ + PACKET3_UNMAP_QUEUES_ACTION(action) | + PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) | + PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) | + PACKET3_UNMAP_QUEUES_NUM_QUEUES(1)); + amdgpu_ring_write(kiq_ring, + PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); + + if (action == PREEMPT_QUEUES_NO_UNMAP) { + amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr)); + amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr)); + amdgpu_ring_write(kiq_ring, seq); + } else { + amdgpu_ring_write(kiq_ring, 0); + amdgpu_ring_write(kiq_ring, 0); + amdgpu_ring_write(kiq_ring, 0); + } +} + +static void gfx_v9_0_kiq_query_status(struct amdgpu_ring *kiq_ring, + struct amdgpu_ring *ring, + u64 addr, + u64 seq) +{ + uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; + + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5)); + amdgpu_ring_write(kiq_ring, + PACKET3_QUERY_STATUS_CONTEXT_ID(0) | + PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) | + PACKET3_QUERY_STATUS_COMMAND(2)); + /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ + amdgpu_ring_write(kiq_ring, + PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) | + PACKET3_QUERY_STATUS_ENG_SEL(eng_sel)); + amdgpu_ring_write(kiq_ring, lower_32_bits(addr)); + amdgpu_ring_write(kiq_ring, upper_32_bits(addr)); + amdgpu_ring_write(kiq_ring, lower_32_bits(seq)); + amdgpu_ring_write(kiq_ring, upper_32_bits(seq)); +} + +static void gfx_v9_0_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring, + uint16_t pasid, uint32_t flush_type, + bool all_hub) +{ + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0)); + amdgpu_ring_write(kiq_ring, + PACKET3_INVALIDATE_TLBS_DST_SEL(1) | + PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) | + PACKET3_INVALIDATE_TLBS_PASID(pasid) | + PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type)); +} + +static const struct kiq_pm4_funcs gfx_v9_0_kiq_pm4_funcs = { + .kiq_set_resources = gfx_v9_0_kiq_set_resources, + .kiq_map_queues = gfx_v9_0_kiq_map_queues, + .kiq_unmap_queues = gfx_v9_0_kiq_unmap_queues, + .kiq_query_status = gfx_v9_0_kiq_query_status, + .kiq_invalidate_tlbs = gfx_v9_0_kiq_invalidate_tlbs, + .set_resources_size = 8, + .map_queues_size = 7, + .unmap_queues_size = 6, + .query_status_size = 7, + .invalidate_tlbs_size = 2, +}; + +static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) +{ + adev->gfx.kiq.pmf = &gfx_v9_0_kiq_pm4_funcs; +} + static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) { switch (adev->asic_type) { @@ -981,8 +1110,7 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev) (adev->gfx.mec_feature_version < 46) || (adev->gfx.pfp_fw_version < 0x000000b7) || (adev->gfx.pfp_feature_version < 46)) - DRM_WARN_ONCE("Warning: check cp_fw_version and update it to realize \ - GRBM requires 1-cycle delay in cp firmware\n"); + DRM_WARN_ONCE("CP firmware version too old, please update!"); switch (adev->asic_type) { case CHIP_VEGA10: @@ -1034,25 +1162,54 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev) } } +struct amdgpu_gfxoff_quirk { + u16 chip_vendor; + u16 chip_device; + u16 subsys_vendor; + u16 subsys_device; + u8 revision; +}; + +static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = { + /* https://bugzilla.kernel.org/show_bug.cgi?id=204689 */ + { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 }, + { 0, 0, 0, 0, 0 }, +}; + +static bool gfx_v9_0_should_disable_gfxoff(struct pci_dev *pdev) +{ + const struct amdgpu_gfxoff_quirk *p = amdgpu_gfxoff_quirk_list; + + while (p && p->chip_device != 0) { + if (pdev->vendor == p->chip_vendor && + pdev->device == p->chip_device && + pdev->subsystem_vendor == p->subsys_vendor && + pdev->subsystem_device == p->subsys_device && + pdev->revision == p->revision) { + return true; + } + ++p; + } + return false; +} + static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev) { + if (gfx_v9_0_should_disable_gfxoff(adev->pdev)) + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; + switch (adev->asic_type) { case CHIP_VEGA10: case CHIP_VEGA12: case CHIP_VEGA20: break; case CHIP_RAVEN: - /* Disable GFXOFF on original raven. There are combinations - * of sbios and platforms that are not stable. - */ - if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)) - adev->pm.pp_feature &= ~PP_GFXOFF_MASK; - else if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) - &&((adev->gfx.rlc_fw_version != 106 && - adev->gfx.rlc_fw_version < 531) || - (adev->gfx.rlc_fw_version == 53815) || - (adev->gfx.rlc_feature_version < 1) || - !adev->gfx.rlc.is_rlc_v2_1)) + if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) && + ((adev->gfx.rlc_fw_version != 106 && + adev->gfx.rlc_fw_version < 531) || + (adev->gfx.rlc_fw_version == 53815) || + (adev->gfx.rlc_feature_version < 1) || + !adev->gfx.rlc.is_rlc_v2_1)) adev->pm.pp_feature &= ~PP_GFXOFF_MASK; if (adev->pm.pp_feature & PP_GFXOFF_MASK) @@ -1831,6 +1988,17 @@ static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = { .query_ras_error_count = &gfx_v9_0_query_ras_error_count }; +static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = { + .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter, + .select_se_sh = &gfx_v9_0_select_se_sh, + .read_wave_data = &gfx_v9_0_read_wave_data, + .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs, + .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs, + .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q, + .ras_error_inject = &gfx_v9_4_ras_error_inject, + .query_ras_error_count = &gfx_v9_4_query_ras_error_count +}; + static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) { u32 gb_addr_config; @@ -1882,6 +2050,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN; break; case CHIP_ARCTURUS: + adev->gfx.funcs = &gfx_v9_4_gfx_funcs; adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_backend = 0x100; @@ -2272,6 +2441,22 @@ static void gfx_v9_0_init_gds_vmid(struct amdgpu_device *adev) } } +static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev) +{ + uint32_t tmp; + + switch (adev->asic_type) { + case CHIP_ARCTURUS: + tmp = RREG32_SOC15(GC, 0, mmSQ_CONFIG); + tmp = REG_SET_FIELD(tmp, SQ_CONFIG, + DISABLE_BARRIER_WAITCNT, 1); + WREG32_SOC15(GC, 0, mmSQ_CONFIG, tmp); + break; + default: + break; + }; +} + static void gfx_v9_0_constants_init(struct amdgpu_device *adev) { u32 tmp; @@ -2317,6 +2502,7 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev) gfx_v9_0_init_compute_vmid(adev); gfx_v9_0_init_gds_vmid(adev); + gfx_v9_0_init_sq_config(adev); } static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev) @@ -3116,74 +3302,6 @@ static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring) WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp); } -static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev) -{ - struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; - uint64_t queue_mask = 0; - int r, i; - - for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) { - if (!test_bit(i, adev->gfx.mec.queue_bitmap)) - continue; - - /* This situation may be hit in the future if a new HW - * generation exposes more than 64 queues. If so, the - * definition of queue_mask needs updating */ - if (WARN_ON(i >= (sizeof(queue_mask)*8))) { - DRM_ERROR("Invalid KCQ enabled: %d\n", i); - break; - } - - queue_mask |= (1ull << i); - } - - r = amdgpu_ring_alloc(kiq_ring, (7 * adev->gfx.num_compute_rings) + 8); - if (r) { - DRM_ERROR("Failed to lock KIQ (%d).\n", r); - return r; - } - - /* set resources */ - amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); - amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) | - PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */ - amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ - amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ - amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */ - amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */ - amdgpu_ring_write(kiq_ring, 0); /* oac mask */ - amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */ - for (i = 0; i < adev->gfx.num_compute_rings; i++) { - struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; - uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); - uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); - - amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); - /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/ - amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ - PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */ - PACKET3_MAP_QUEUES_VMID(0) | /* VMID */ - PACKET3_MAP_QUEUES_QUEUE(ring->queue) | - PACKET3_MAP_QUEUES_PIPE(ring->pipe) | - PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) | - PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */ - PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */ - PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */ - PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */ - amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index)); - amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr)); - amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr)); - amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr)); - amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); - } - - r = amdgpu_ring_test_helper(kiq_ring); - if (r) - DRM_ERROR("KCQ enable failed\n"); - - return r; -} - static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; @@ -3320,8 +3438,11 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring) tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); mqd->cp_hqd_ib_control = tmp; - /* activate the queue */ - mqd->cp_hqd_active = 1; + /* map_queues packet doesn't need activate the queue, + * so only kiq need set this field. + */ + if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) + mqd->cp_hqd_active = 1; return 0; } @@ -3590,7 +3711,7 @@ static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev) goto done; } - r = gfx_v9_0_kiq_kcq_enable(adev); + r = amdgpu_gfx_enable_kcq(adev); done: return r; } @@ -3647,6 +3768,23 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev) return 0; } +static void gfx_v9_0_init_tcp_config(struct amdgpu_device *adev) +{ + u32 tmp; + + if (adev->asic_type != CHIP_ARCTURUS) + return; + + tmp = RREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG); + tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE64KHASH, + adev->df.hash_status.hash_64k); + tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE2MHASH, + adev->df.hash_status.hash_2m); + tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE1GHASH, + adev->df.hash_status.hash_1g); + WREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG, tmp); +} + static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable) { if (adev->asic_type != CHIP_ARCTURUS) @@ -3664,6 +3802,8 @@ static int gfx_v9_0_hw_init(void *handle) gfx_v9_0_constants_init(adev); + gfx_v9_0_init_tcp_config(adev); + r = adev->gfx.rlc.funcs->resume(adev); if (r) return r; @@ -3675,36 +3815,6 @@ static int gfx_v9_0_hw_init(void *handle) return r; } -static int gfx_v9_0_kcq_disable(struct amdgpu_device *adev) -{ - int r, i; - struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; - - r = amdgpu_ring_alloc(kiq_ring, 6 * adev->gfx.num_compute_rings); - if (r) - DRM_ERROR("Failed to lock KIQ (%d).\n", r); - - for (i = 0; i < adev->gfx.num_compute_rings; i++) { - struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; - - amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); - amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ - PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */ - PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) | - PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) | - PACKET3_UNMAP_QUEUES_NUM_QUEUES(1)); - amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); - amdgpu_ring_write(kiq_ring, 0); - amdgpu_ring_write(kiq_ring, 0); - amdgpu_ring_write(kiq_ring, 0); - } - r = amdgpu_ring_test_helper(kiq_ring); - if (r) - DRM_ERROR("KCQ disable failed\n"); - - return r; -} - static int gfx_v9_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -3716,7 +3826,7 @@ static int gfx_v9_0_hw_fini(void *handle) /* DF freeze and kcq disable will fail */ if (!amdgpu_ras_intr_triggered()) /* disable KCQ to avoid CPC touch memory not valid anymore */ - gfx_v9_0_kcq_disable(adev); + amdgpu_gfx_disable_kcq(adev); if (amdgpu_sriov_vf(adev)) { gfx_v9_0_cp_gfx_enable(adev, false); @@ -3933,46 +4043,61 @@ static const u32 sgpr_init_compute_shader[] = 0xbe800080, 0xbf810000, }; +/* When below register arrays changed, please update gpr_reg_size, + and sec_ded_counter_reg_size in function gfx_v9_0_do_edc_gpr_workarounds, + to cover all gfx9 ASICs */ static const struct soc15_reg_entry vgpr_init_regs[] = { - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff }, - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff }, - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff }, - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff }, { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 }, { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 }, { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 }, { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 }, { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x3f }, { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 }, /* 64KB LDS */ + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff }, }; static const struct soc15_reg_entry sgpr1_init_regs[] = { - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x000000ff }, - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x000000ff }, - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x000000ff }, - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x000000ff }, { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 }, { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 }, { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 }, { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 }, { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x000000ff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x000000ff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x000000ff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x000000ff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x000000ff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x000000ff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x000000ff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x000000ff }, }; static const struct soc15_reg_entry sgpr2_init_regs[] = { - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x0000ff00 }, - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x0000ff00 }, - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x0000ff00 }, - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x0000ff00 }, { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 }, { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 }, { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 }, { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 }, { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x0000ff00 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x0000ff00 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x0000ff00 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x0000ff00 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x0000ff00 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x0000ff00 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x0000ff00 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x0000ff00 }, }; -static const struct soc15_reg_entry sec_ded_counter_registers[] = { +static const struct soc15_reg_entry gfx_v9_0_edc_counter_regs[] = { { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1}, { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1}, { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1}, @@ -4006,8 +4131,6 @@ static const struct soc15_reg_entry sec_ded_counter_registers[] = { { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16}, { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2}, { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6}, - { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), 0, 1, 1}, - { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_EDC_COUNTER), 0, 1, 1}, { SOC15_REG_ENTRY(HDP, 0, mmHDP_EDC_CNT), 0, 1, 1}, }; @@ -4063,10 +4186,16 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) struct amdgpu_ring *ring = &adev->gfx.compute_ring[0]; struct amdgpu_ib ib; struct dma_fence *f = NULL; - int r, i, j, k; + int r, i; unsigned total_size, vgpr_offset, sgpr_offset; u64 gpu_addr; + int compute_dim_x = adev->gfx.config.max_shader_engines * + adev->gfx.config.max_cu_per_sh * + adev->gfx.config.max_sh_per_se; + int sgpr_work_group_size = 5; + int gpr_reg_size = compute_dim_x / 16 + 6; + /* only support when RAS is enabled */ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) return 0; @@ -4076,11 +4205,11 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) return 0; total_size = - ((ARRAY_SIZE(vgpr_init_regs) * 3) + 4 + 5 + 2) * 4; + (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* VGPRS */ total_size += - ((ARRAY_SIZE(sgpr1_init_regs) * 3) + 4 + 5 + 2) * 4; + (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS1 */ total_size += - ((ARRAY_SIZE(sgpr2_init_regs) * 3) + 4 + 5 + 2) * 4; + (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS2 */ total_size = ALIGN(total_size, 256); vgpr_offset = total_size; total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256); @@ -4107,7 +4236,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) /* VGPR */ /* write the register state for the compute dispatch */ - for (i = 0; i < ARRAY_SIZE(vgpr_init_regs); i++) { + for (i = 0; i < gpr_reg_size; i++) { ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1); ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs[i]) - PACKET3_SET_SH_REG_START; @@ -4123,7 +4252,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) /* write dispatch packet */ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3); - ib.ptr[ib.length_dw++] = 0x40*2; /* x */ + ib.ptr[ib.length_dw++] = compute_dim_x; /* x */ ib.ptr[ib.length_dw++] = 1; /* y */ ib.ptr[ib.length_dw++] = 1; /* z */ ib.ptr[ib.length_dw++] = @@ -4135,7 +4264,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) /* SGPR1 */ /* write the register state for the compute dispatch */ - for (i = 0; i < ARRAY_SIZE(sgpr1_init_regs); i++) { + for (i = 0; i < gpr_reg_size; i++) { ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1); ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr1_init_regs[i]) - PACKET3_SET_SH_REG_START; @@ -4151,7 +4280,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) /* write dispatch packet */ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3); - ib.ptr[ib.length_dw++] = 0xA0*2; /* x */ + ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */ ib.ptr[ib.length_dw++] = 1; /* y */ ib.ptr[ib.length_dw++] = 1; /* z */ ib.ptr[ib.length_dw++] = @@ -4163,7 +4292,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) /* SGPR2 */ /* write the register state for the compute dispatch */ - for (i = 0; i < ARRAY_SIZE(sgpr2_init_regs); i++) { + for (i = 0; i < gpr_reg_size; i++) { ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1); ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr2_init_regs[i]) - PACKET3_SET_SH_REG_START; @@ -4179,7 +4308,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) /* write dispatch packet */ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3); - ib.ptr[ib.length_dw++] = 0xA0*2; /* x */ + ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */ ib.ptr[ib.length_dw++] = 1; /* y */ ib.ptr[ib.length_dw++] = 1; /* z */ ib.ptr[ib.length_dw++] = @@ -4203,18 +4332,17 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) goto fail; } - /* read back registers to clear the counters */ - mutex_lock(&adev->grbm_idx_mutex); - for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++) { - for (j = 0; j < sec_ded_counter_registers[i].se_num; j++) { - for (k = 0; k < sec_ded_counter_registers[i].instance; k++) { - gfx_v9_0_select_se_sh(adev, j, 0x0, k); - RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i])); - } - } + switch (adev->asic_type) + { + case CHIP_VEGA20: + gfx_v9_0_clear_ras_edc_counter(adev); + break; + case CHIP_ARCTURUS: + gfx_v9_4_clear_ras_edc_counter(adev); + break; + default: + break; } - WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000); - mutex_unlock(&adev->grbm_idx_mutex); fail: amdgpu_ib_free(adev, &ib, NULL); @@ -4232,6 +4360,7 @@ static int gfx_v9_0_early_init(void *handle) else adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS; adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS; + gfx_v9_0_set_kiq_pm4_funcs(adev); gfx_v9_0_set_ring_funcs(adev); gfx_v9_0_set_irq_funcs(adev); gfx_v9_0_set_gds_init(adev); @@ -4576,7 +4705,7 @@ static int gfx_v9_0_set_powergating_state(void *handle, enum amd_powergating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_PG_STATE_GATE) ? true : false; + bool enable = (state == AMD_PG_STATE_GATE); switch (adev->asic_type) { case CHIP_RAVEN: @@ -4638,7 +4767,7 @@ static int gfx_v9_0_set_clockgating_state(void *handle, case CHIP_ARCTURUS: case CHIP_RENOIR: gfx_v9_0_update_gfx_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; @@ -4655,12 +4784,12 @@ static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags) *flags = 0; /* AMD_CG_SUPPORT_GFX_MGCG */ - data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE)); if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK)) *flags |= AMD_CG_SUPPORT_GFX_MGCG; /* AMD_CG_SUPPORT_GFX_CGCG */ - data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL); + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL)); if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK) *flags |= AMD_CG_SUPPORT_GFX_CGCG; @@ -4669,18 +4798,18 @@ static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags) *flags |= AMD_CG_SUPPORT_GFX_CGLS; /* AMD_CG_SUPPORT_GFX_RLC_LS */ - data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL); + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_MEM_SLP_CNTL)); if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS; /* AMD_CG_SUPPORT_GFX_CP_LS */ - data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL); + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmCP_MEM_SLP_CNTL)); if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS; if (adev->asic_type != CHIP_ARCTURUS) { /* AMD_CG_SUPPORT_GFX_3D_CGCG */ - data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D); + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D)); if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK) *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG; @@ -5151,6 +5280,7 @@ static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) { struct amdgpu_device *adev = ring->adev; + struct amdgpu_kiq *kiq = &adev->gfx.kiq; amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); amdgpu_ring_write(ring, 0 | /* src: register*/ @@ -5159,9 +5289,9 @@ static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) amdgpu_ring_write(ring, reg); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + - adev->virt.reg_val_offs * 4)); + kiq->reg_val_offs * 4)); amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + - adev->virt.reg_val_offs * 4)); + kiq->reg_val_offs * 4)); } static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, @@ -5483,7 +5613,7 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev, } -static const struct soc15_ras_field_entry gc_ras_fields_vg20[] = { +static const struct soc15_ras_field_entry gfx_v9_0_ras_fields[] = { { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT), SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT) @@ -5931,7 +6061,7 @@ static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev, int ret; struct ta_ras_trigger_error_input block_info = { 0 }; - if (adev->asic_type != CHIP_VEGA20) + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) return -EINVAL; if (info->head.sub_block_index >= ARRAY_SIZE(ras_gfx_subblocks)) @@ -6056,7 +6186,7 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev, WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255); WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0); - for (i = 0; i < 16; i++) { + for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) { WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i); data = RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT); @@ -6075,7 +6205,7 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev, } } - for (i = 0; i < 7; i++) { + for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) { WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i); data = RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT); @@ -6096,7 +6226,7 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev, } } - for (i = 0; i < 4; i++) { + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) { WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i); data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT); @@ -6108,7 +6238,7 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev, } } - for (i = 0; i < 32; i++) { + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) { WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i); data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT); @@ -6135,36 +6265,36 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev, return 0; } -static int __get_ras_error_count(const struct soc15_reg_entry *reg, +static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg, uint32_t se_id, uint32_t inst_id, uint32_t value, uint32_t *sec_count, uint32_t *ded_count) { uint32_t i; uint32_t sec_cnt, ded_cnt; - for (i = 0; i < ARRAY_SIZE(gc_ras_fields_vg20); i++) { - if(gc_ras_fields_vg20[i].reg_offset != reg->reg_offset || - gc_ras_fields_vg20[i].seg != reg->seg || - gc_ras_fields_vg20[i].inst != reg->inst) + for (i = 0; i < ARRAY_SIZE(gfx_v9_0_ras_fields); i++) { + if(gfx_v9_0_ras_fields[i].reg_offset != reg->reg_offset || + gfx_v9_0_ras_fields[i].seg != reg->seg || + gfx_v9_0_ras_fields[i].inst != reg->inst) continue; sec_cnt = (value & - gc_ras_fields_vg20[i].sec_count_mask) >> - gc_ras_fields_vg20[i].sec_count_shift; + gfx_v9_0_ras_fields[i].sec_count_mask) >> + gfx_v9_0_ras_fields[i].sec_count_shift; if (sec_cnt) { DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n", - gc_ras_fields_vg20[i].name, + gfx_v9_0_ras_fields[i].name, se_id, inst_id, sec_cnt); *sec_count += sec_cnt; } ded_cnt = (value & - gc_ras_fields_vg20[i].ded_count_mask) >> - gc_ras_fields_vg20[i].ded_count_shift; + gfx_v9_0_ras_fields[i].ded_count_mask) >> + gfx_v9_0_ras_fields[i].ded_count_shift; if (ded_cnt) { DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n", - gc_ras_fields_vg20[i].name, + gfx_v9_0_ras_fields[i].name, se_id, inst_id, ded_cnt); *ded_count += ded_cnt; @@ -6174,6 +6304,58 @@ static int __get_ras_error_count(const struct soc15_reg_entry *reg, return 0; } +static void gfx_v9_0_clear_ras_edc_counter(struct amdgpu_device *adev) +{ + int i, j, k; + + /* read back registers to clear the counters */ + mutex_lock(&adev->grbm_idx_mutex); + for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) { + for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) { + for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) { + gfx_v9_0_select_se_sh(adev, j, 0x0, k); + RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i])); + } + } + } + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000); + mutex_unlock(&adev->grbm_idx_mutex); + + WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0); + WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0); + + for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) { + WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i); + RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT); + } + + for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) { + WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i); + RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT); + } + + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) { + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i); + RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT); + } + + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) { + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i); + RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT); + } + + WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255); +} + static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) { @@ -6182,7 +6364,7 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, uint32_t i, j, k; uint32_t reg_value; - if (adev->asic_type != CHIP_VEGA20) + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) return -EINVAL; err_data->ue_count = 0; @@ -6190,14 +6372,14 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, mutex_lock(&adev->grbm_idx_mutex); - for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++) { - for (j = 0; j < sec_ded_counter_registers[i].se_num; j++) { - for (k = 0; k < sec_ded_counter_registers[i].instance; k++) { + for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) { + for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) { + for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) { gfx_v9_0_select_se_sh(adev, j, 0, k); reg_value = - RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i])); + RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i])); if (reg_value) - __get_ras_error_count(&sec_ded_counter_registers[i], + gfx_v9_0_ras_error_count(&gfx_v9_0_edc_counter_regs[i], j, k, reg_value, &sec_count, &ded_count); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c new file mode 100644 index 0000000000000000000000000000000000000000..f099f13d7f1e93d95a0fdcc847cb6b6c0b907270 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c @@ -0,0 +1,978 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include + +#include "amdgpu.h" +#include "amdgpu_gfx.h" +#include "soc15.h" +#include "soc15d.h" +#include "amdgpu_atomfirmware.h" +#include "amdgpu_pm.h" + +#include "gc/gc_9_4_1_offset.h" +#include "gc/gc_9_4_1_sh_mask.h" +#include "soc15_common.h" + +#include "gfx_v9_4.h" +#include "amdgpu_ras.h" + +static const struct soc15_reg_entry gfx_v9_4_edc_counter_regs[] = { + /* CPC */ + { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1 }, + /* DC */ + { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1, 1 }, + /* CPF */ + { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1, 1 }, + /* GDS */ + { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_DED), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, 1 }, + /* SPI */ + { SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 0, 4, 1 }, + /* SQ */ + { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16 }, + { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 4, 16 }, + { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 4, 16 }, + { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 4, 16 }, + /* SQC */ + { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 0, 4, 6 }, + { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6 }, + { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6 }, + { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), 0, 4, 6 }, + /* TA */ + { SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 0, 4, 16 }, + /* TCA */ + { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2 }, + /* TCC */ + { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16 }, + { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16 }, + /* TCI */ + { SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 1, 72 }, + /* TCP */ + { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 0, 4, 16 }, + { SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16 }, + { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16 }, + /* TD */ + { SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16 }, + /* GCEA */ + { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 1, 32 }, + { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 1, 32 }, + { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 1, 32 }, + /* RLC */ + { SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), 0, 1, 1 }, +}; + +static void gfx_v9_4_select_se_sh(struct amdgpu_device *adev, u32 se_num, + u32 sh_num, u32 instance) +{ + u32 data; + + if (instance == 0xffffffff) + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, + INSTANCE_BROADCAST_WRITES, 1); + else + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, + instance); + + if (se_num == 0xffffffff) + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, + 1); + else + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); + + if (sh_num == 0xffffffff) + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, + 1); + else + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); + + WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data); +} + +static const struct soc15_ras_field_entry gfx_v9_4_ras_fields[] = { + /* CPC */ + { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), + SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT), + SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT) }, + { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), + SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT), + SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT) }, + { "CPC_DC_STATE_RAM_ME1", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), + SOC15_REG_FIELD(DC_EDC_STATE_CNT, SEC_COUNT_ME1), + SOC15_REG_FIELD(DC_EDC_STATE_CNT, DED_COUNT_ME1) }, + { "CPC_DC_CSINVOC_RAM_ME1", + SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), + SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, SEC_COUNT_ME1), + SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, DED_COUNT_ME1) }, + { "CPC_DC_RESTORE_RAM_ME1", + SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), + SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, SEC_COUNT_ME1), + SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, DED_COUNT_ME1) }, + { "CPC_DC_CSINVOC_RAM1_ME1", + SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), + SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, SEC_COUNT1_ME1), + SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, DED_COUNT1_ME1) }, + { "CPC_DC_RESTORE_RAM1_ME1", + SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), + SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, SEC_COUNT1_ME1), + SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, DED_COUNT1_ME1) }, + + /* CPF */ + { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), + SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, SEC_COUNT_ME2), + SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, DED_COUNT_ME2) }, + { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), + SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, SEC_COUNT_ME1), + SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, DED_COUNT_ME1) }, + { "CPF_TCIU_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), + SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT), + SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT) }, + + /* GDS */ + { "GDS_GRBM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT), + SOC15_REG_FIELD(GDS_EDC_GRBM_CNT, SEC), + SOC15_REG_FIELD(GDS_EDC_GRBM_CNT, DED) }, + { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), + SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED) }, + { "GDS_PHY_CMD_RAM_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED) }, + { "GDS_PHY_DATA_RAM_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_DED) }, + { "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED) }, + { "GDS_ME1_PIPE0_PIPE_MEM", + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED) }, + { "GDS_ME1_PIPE1_PIPE_MEM", + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED) }, + { "GDS_ME1_PIPE2_PIPE_MEM", + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED) }, + { "GDS_ME1_PIPE3_PIPE_MEM", + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED) }, + + /* SPI */ + { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SEC_COUNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_DED_COUNT) }, + { "SPI_GDS_EXPREQ", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_GDS_EXPREQ_SEC_COUNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_GDS_EXPREQ_DED_COUNT) }, + { "SPI_WB_GRANT_30", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_30_SEC_COUNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_30_DED_COUNT) }, + { "SPI_WB_GRANT_61", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_61_SEC_COUNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_61_DED_COUNT) }, + { "SPI_LIFE_CNT", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_LIFE_CNT_SEC_COUNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_LIFE_CNT_DED_COUNT) }, + + /* SQ */ + { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT) }, + { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT) }, + { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT) }, + { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT) }, + { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT) }, + { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT) }, + { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT) }, + + /* SQC */ + { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT) }, + { "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT) }, + { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT) }, + { "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT) }, + { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT) }, + { "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT) }, + { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT) }, + { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT) }, + { "SQC_INST_BANKA_UTCL1_MISS_FIFO", + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, + INST_BANKA_UTCL1_MISS_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, + INST_BANKA_UTCL1_MISS_FIFO_DED_COUNT) }, + { "SQC_INST_BANKA_MISS_FIFO", + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKA_MISS_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, + INST_BANKA_MISS_FIFO_DED_COUNT) }, + { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT) }, + { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT) }, + { "SQC_DATA_BANKA_HIT_FIFO", + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_HIT_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_HIT_FIFO_DED_COUNT) }, + { "SQC_DATA_BANKA_MISS_FIFO", + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_MISS_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, + DATA_BANKA_MISS_FIFO_DED_COUNT) }, + { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT) }, + { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT) }, + { "SQC_INST_BANKB_UTCL1_MISS_FIFO", + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, + INST_BANKB_UTCL1_MISS_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, + INST_BANKB_UTCL1_MISS_FIFO_DED_COUNT) }, + { "SQC_INST_BANKB_MISS_FIFO", + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKB_MISS_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, + INST_BANKB_MISS_FIFO_DED_COUNT) }, + { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT) }, + { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT) }, + { "SQC_DATA_BANKB_HIT_FIFO", + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_HIT_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_HIT_FIFO_DED_COUNT) }, + { "SQC_DATA_BANKB_MISS_FIFO", + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_MISS_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, + DATA_BANKB_MISS_FIFO_DED_COUNT) }, + { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT) }, + + /* TA */ + { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT) }, + { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_SEC_COUNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_DED_COUNT) }, + { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_DED_COUNT) }, + { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_DED_COUNT) }, + { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SEC_COUNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_DED_COUNT) }, + + /* TCA */ + { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), + SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_DED_COUNT) }, + { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), + SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_DED_COUNT) }, + + /* TCC */ + { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT) }, + { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT) }, + { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT) }, + { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT) }, + { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_DEC_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_DEC_DED_COUNT) }, + { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_TRANSFER_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_TRANSFER_DED_COUNT) }, + { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_DATA_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_DATA_DED_COUNT) }, + { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_CONTROL_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_CONTROL_DED_COUNT) }, + { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, UC_ATOMIC_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, UC_ATOMIC_FIFO_DED_COUNT) }, + { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_DED_COUNT) }, + { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_DED_COUNT) }, + { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT) }, + { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_DED_COUNT) }, + { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_DED_COUNT) }, + { "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_NEXT_RAM_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_NEXT_RAM_DED_COUNT) }, + + /* TCI */ + { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), + SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SEC_COUNT), + SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_DED_COUNT) }, + + /* TCP */ + { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT) }, + { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT) }, + { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_DED_COUNT) }, + { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_DED_COUNT) }, + { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT), 0, 0 }, + { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT) }, + { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT) }, + + /* TD */ + { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), + SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT), + SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT) }, + { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), + SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT), + SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT) }, + { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), + SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_DED_COUNT) }, + + /* EA */ + { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT) }, + { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT) }, + { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT) }, + { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT) }, + { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT) }, + { "EA_GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT) }, + { "EA_GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT) }, + { "EA_GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT) }, + { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), 0, 0 }, + { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT) }, + { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), 0, 0 }, + { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT) }, + { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT), 0, 0 }, + { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, IORD_CMDMEM_DED_COUNT) }, + { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT), 0, 0 }, + { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, IOWR_CMDMEM_DED_COUNT) }, + { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT), 0, 0 }, + { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, IOWR_DATAMEM_DED_COUNT) }, + { "EA_GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), 0, 0 }, + { "EA_GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT) }, + { "EA_GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), 0, 0 }, + { "EA_GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT) }, + { "EA_MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_DED_COUNT) }, + { "EA_MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_DED_COUNT) }, + { "EA_MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_DED_COUNT) }, + { "EA_MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_DED_COUNT) }, + { "EA_MAM_A0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A0MEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A0MEM_DED_COUNT) }, + { "EA_MAM_A1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A1MEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A1MEM_DED_COUNT) }, + { "EA_MAM_A2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A2MEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A2MEM_DED_COUNT) }, + { "EA_MAM_A3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A3MEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A3MEM_DED_COUNT) }, + { "EA_MAM_AFMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, MAM_AFMEM_SEC_COUNT), 0, 0 }, + { "EA_MAM_AFMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_AFMEM_DED_COUNT) }, + + /* RLC */ + { "RLCG_INSTR_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_INSTR_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_INSTR_RAM_DED_COUNT) }, + { "RLCG_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_SCRATCH_RAM_DED_COUNT) }, + { "RLCV_INSTR_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_INSTR_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_INSTR_RAM_DED_COUNT) }, + { "RLCV_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_SCRATCH_RAM_DED_COUNT) }, + { "RLC_TCTAG_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_TCTAG_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_TCTAG_RAM_DED_COUNT) }, + { "RLC_SPM_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SPM_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SPM_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SRM_DATA_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_DATA_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_DATA_RAM_DED_COUNT) }, + { "RLC_SRM_ADDR_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_ADDR_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_ADDR_RAM_DED_COUNT) }, + { "RLC_SPM_SE0_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE0_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE0_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SPM_SE1_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE1_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE1_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SPM_SE2_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE2_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE2_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SPM_SE3_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE3_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE3_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SPM_SE4_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE4_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE4_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SPM_SE5_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE5_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE5_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SPM_SE6_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE6_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE6_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SPM_SE7_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE7_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE7_SCRATCH_RAM_DED_COUNT) }, +}; + +static const char * const vml2_mems[] = { + "UTC_VML2_BANK_CACHE_0_BIGK_MEM0", + "UTC_VML2_BANK_CACHE_0_BIGK_MEM1", + "UTC_VML2_BANK_CACHE_0_4K_MEM0", + "UTC_VML2_BANK_CACHE_0_4K_MEM1", + "UTC_VML2_BANK_CACHE_1_BIGK_MEM0", + "UTC_VML2_BANK_CACHE_1_BIGK_MEM1", + "UTC_VML2_BANK_CACHE_1_4K_MEM0", + "UTC_VML2_BANK_CACHE_1_4K_MEM1", + "UTC_VML2_BANK_CACHE_2_BIGK_MEM0", + "UTC_VML2_BANK_CACHE_2_BIGK_MEM1", + "UTC_VML2_BANK_CACHE_2_4K_MEM0", + "UTC_VML2_BANK_CACHE_2_4K_MEM1", + "UTC_VML2_BANK_CACHE_3_BIGK_MEM0", + "UTC_VML2_BANK_CACHE_3_BIGK_MEM1", + "UTC_VML2_BANK_CACHE_3_4K_MEM0", + "UTC_VML2_BANK_CACHE_3_4K_MEM1", + "UTC_VML2_IFIFO_GROUP0", + "UTC_VML2_IFIFO_GROUP1", + "UTC_VML2_IFIFO_GROUP2", + "UTC_VML2_IFIFO_GROUP3", + "UTC_VML2_IFIFO_GROUP4", + "UTC_VML2_IFIFO_GROUP5", + "UTC_VML2_IFIFO_GROUP6", + "UTC_VML2_IFIFO_GROUP7", + "UTC_VML2_IFIFO_GROUP8", + "UTC_VML2_IFIFO_GROUP9", + "UTC_VML2_IFIFO_GROUP10", + "UTC_VML2_IFIFO_GROUP11", + "UTC_VML2_IFIFO_GROUP12", + "UTC_VML2_IFIFO_GROUP13", + "UTC_VML2_IFIFO_GROUP14", + "UTC_VML2_IFIFO_GROUP15", + "UTC_VML2_IFIFO_GROUP16", + "UTC_VML2_IFIFO_GROUP17", + "UTC_VML2_IFIFO_GROUP18", + "UTC_VML2_IFIFO_GROUP19", + "UTC_VML2_IFIFO_GROUP20", + "UTC_VML2_IFIFO_GROUP21", + "UTC_VML2_IFIFO_GROUP22", + "UTC_VML2_IFIFO_GROUP23", + "UTC_VML2_IFIFO_GROUP24", +}; + +static const char * const vml2_walker_mems[] = { + "UTC_VML2_CACHE_PDE0_MEM0", + "UTC_VML2_CACHE_PDE0_MEM1", + "UTC_VML2_CACHE_PDE1_MEM0", + "UTC_VML2_CACHE_PDE1_MEM1", + "UTC_VML2_CACHE_PDE2_MEM0", + "UTC_VML2_CACHE_PDE2_MEM1", + "UTC_VML2_RDIF_ARADDRS", + "UTC_VML2_RDIF_LOG_FIFO", + "UTC_VML2_QUEUE_REQ", + "UTC_VML2_QUEUE_RET", +}; + +static const char * const utcl2_router_mems[] = { + "UTCL2_ROUTER_GROUP0_VML2_REQ_FIFO0", + "UTCL2_ROUTER_GROUP1_VML2_REQ_FIFO1", + "UTCL2_ROUTER_GROUP2_VML2_REQ_FIFO2", + "UTCL2_ROUTER_GROUP3_VML2_REQ_FIFO3", + "UTCL2_ROUTER_GROUP4_VML2_REQ_FIFO4", + "UTCL2_ROUTER_GROUP5_VML2_REQ_FIFO5", + "UTCL2_ROUTER_GROUP6_VML2_REQ_FIFO6", + "UTCL2_ROUTER_GROUP7_VML2_REQ_FIFO7", + "UTCL2_ROUTER_GROUP8_VML2_REQ_FIFO8", + "UTCL2_ROUTER_GROUP9_VML2_REQ_FIFO9", + "UTCL2_ROUTER_GROUP10_VML2_REQ_FIFO10", + "UTCL2_ROUTER_GROUP11_VML2_REQ_FIFO11", + "UTCL2_ROUTER_GROUP12_VML2_REQ_FIFO12", + "UTCL2_ROUTER_GROUP13_VML2_REQ_FIFO13", + "UTCL2_ROUTER_GROUP14_VML2_REQ_FIFO14", + "UTCL2_ROUTER_GROUP15_VML2_REQ_FIFO15", + "UTCL2_ROUTER_GROUP16_VML2_REQ_FIFO16", + "UTCL2_ROUTER_GROUP17_VML2_REQ_FIFO17", + "UTCL2_ROUTER_GROUP18_VML2_REQ_FIFO18", + "UTCL2_ROUTER_GROUP19_VML2_REQ_FIFO19", + "UTCL2_ROUTER_GROUP20_VML2_REQ_FIFO20", + "UTCL2_ROUTER_GROUP21_VML2_REQ_FIFO21", + "UTCL2_ROUTER_GROUP22_VML2_REQ_FIFO22", + "UTCL2_ROUTER_GROUP23_VML2_REQ_FIFO23", + "UTCL2_ROUTER_GROUP24_VML2_REQ_FIFO24", +}; + +static const char * const atc_l2_cache_2m_mems[] = { + "UTC_ATCL2_CACHE_2M_BANK0_WAY0_MEM", + "UTC_ATCL2_CACHE_2M_BANK0_WAY1_MEM", + "UTC_ATCL2_CACHE_2M_BANK1_WAY0_MEM", + "UTC_ATCL2_CACHE_2M_BANK1_WAY1_MEM", +}; + +static const char * const atc_l2_cache_4k_mems[] = { + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM0", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM1", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM2", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM3", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM4", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM5", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM6", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM7", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM0", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM1", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM2", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM3", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM4", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM5", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM6", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM7", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM0", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM1", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM2", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM3", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM4", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM5", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM6", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM7", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM0", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM1", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM2", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM3", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM4", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM5", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM6", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM7", +}; + +static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev, + struct ras_err_data *err_data) +{ + uint32_t i, data; + uint32_t sec_count, ded_count; + + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL, 0); + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL, 0); + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL, 0); + + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0); + + for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) { + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, i); + data = RREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL); + + sec_count = REG_GET_FIELD(data, VML2_MEM_ECC_CNTL, SEC_COUNT); + if (sec_count) { + DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, + vml2_mems[i], sec_count); + err_data->ce_count += sec_count; + } + + ded_count = REG_GET_FIELD(data, VML2_MEM_ECC_CNTL, DED_COUNT); + if (ded_count) { + DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, + vml2_mems[i], ded_count); + err_data->ue_count += ded_count; + } + } + + for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) { + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, i); + data = RREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL); + + sec_count = REG_GET_FIELD(data, VML2_WALKER_MEM_ECC_CNTL, + SEC_COUNT); + if (sec_count) { + DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, + vml2_walker_mems[i], sec_count); + err_data->ce_count += sec_count; + } + + ded_count = REG_GET_FIELD(data, VML2_WALKER_MEM_ECC_CNTL, + DED_COUNT); + if (ded_count) { + DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, + vml2_walker_mems[i], ded_count); + err_data->ue_count += ded_count; + } + } + + for (i = 0; i < ARRAY_SIZE(utcl2_router_mems); i++) { + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, i); + data = RREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL); + + sec_count = REG_GET_FIELD(data, UTCL2_MEM_ECC_CNTL, SEC_COUNT); + if (sec_count) { + DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, + utcl2_router_mems[i], sec_count); + err_data->ce_count += sec_count; + } + + ded_count = REG_GET_FIELD(data, UTCL2_MEM_ECC_CNTL, DED_COUNT); + if (ded_count) { + DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, + utcl2_router_mems[i], ded_count); + err_data->ue_count += ded_count; + } + } + + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) { + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, i); + data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL); + + sec_count = REG_GET_FIELD(data, ATC_L2_CACHE_2M_DSM_CNTL, + SEC_COUNT); + if (sec_count) { + DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, + atc_l2_cache_2m_mems[i], sec_count); + err_data->ce_count += sec_count; + } + + ded_count = REG_GET_FIELD(data, ATC_L2_CACHE_2M_DSM_CNTL, + DED_COUNT); + if (ded_count) { + DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, + atc_l2_cache_2m_mems[i], ded_count); + err_data->ue_count += ded_count; + } + } + + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) { + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, i); + data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_CNTL); + + sec_count = REG_GET_FIELD(data, ATC_L2_CACHE_4K_DSM_CNTL, + SEC_COUNT); + if (sec_count) { + DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, + atc_l2_cache_4k_mems[i], sec_count); + err_data->ce_count += sec_count; + } + + ded_count = REG_GET_FIELD(data, ATC_L2_CACHE_4K_DSM_CNTL, + DED_COUNT); + if (ded_count) { + DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, + atc_l2_cache_4k_mems[i], ded_count); + err_data->ue_count += ded_count; + } + } + + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255); + + return 0; +} + +static int gfx_v9_4_ras_error_count(const struct soc15_reg_entry *reg, + uint32_t se_id, uint32_t inst_id, + uint32_t value, uint32_t *sec_count, + uint32_t *ded_count) +{ + uint32_t i; + uint32_t sec_cnt, ded_cnt; + + for (i = 0; i < ARRAY_SIZE(gfx_v9_4_ras_fields); i++) { + if (gfx_v9_4_ras_fields[i].reg_offset != reg->reg_offset || + gfx_v9_4_ras_fields[i].seg != reg->seg || + gfx_v9_4_ras_fields[i].inst != reg->inst) + continue; + + sec_cnt = (value & gfx_v9_4_ras_fields[i].sec_count_mask) >> + gfx_v9_4_ras_fields[i].sec_count_shift; + if (sec_cnt) { + DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n", + gfx_v9_4_ras_fields[i].name, se_id, inst_id, + sec_cnt); + *sec_count += sec_cnt; + } + + ded_cnt = (value & gfx_v9_4_ras_fields[i].ded_count_mask) >> + gfx_v9_4_ras_fields[i].ded_count_shift; + if (ded_cnt) { + DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n", + gfx_v9_4_ras_fields[i].name, se_id, inst_id, + ded_cnt); + *ded_count += ded_cnt; + } + } + + return 0; +} + +int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + uint32_t sec_count = 0, ded_count = 0; + uint32_t i, j, k; + uint32_t reg_value; + + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + return -EINVAL; + + err_data->ue_count = 0; + err_data->ce_count = 0; + + mutex_lock(&adev->grbm_idx_mutex); + + for (i = 0; i < ARRAY_SIZE(gfx_v9_4_edc_counter_regs); i++) { + for (j = 0; j < gfx_v9_4_edc_counter_regs[i].se_num; j++) { + for (k = 0; k < gfx_v9_4_edc_counter_regs[i].instance; + k++) { + gfx_v9_4_select_se_sh(adev, j, 0, k); + reg_value = RREG32(SOC15_REG_ENTRY_OFFSET( + gfx_v9_4_edc_counter_regs[i])); + if (reg_value) + gfx_v9_4_ras_error_count( + &gfx_v9_4_edc_counter_regs[i], + j, k, reg_value, &sec_count, + &ded_count); + } + } + } + + err_data->ce_count += sec_count; + err_data->ue_count += ded_count; + + gfx_v9_4_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); + + gfx_v9_4_query_utc_edc_status(adev, err_data); + + return 0; +} + +void gfx_v9_4_clear_ras_edc_counter(struct amdgpu_device *adev) +{ + int i, j, k; + + mutex_lock(&adev->grbm_idx_mutex); + for (i = 0; i < ARRAY_SIZE(gfx_v9_4_edc_counter_regs); i++) { + for (j = 0; j < gfx_v9_4_edc_counter_regs[i].se_num; j++) { + for (k = 0; k < gfx_v9_4_edc_counter_regs[i].instance; + k++) { + gfx_v9_4_select_se_sh(adev, j, 0x0, k); + RREG32(SOC15_REG_ENTRY_OFFSET( + gfx_v9_4_edc_counter_regs[i])); + } + } + } + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000); + mutex_unlock(&adev->grbm_idx_mutex); + + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL, 0); + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL, 0); + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL, 0); + + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0); + + for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) { + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, i); + RREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL); + } + + for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) { + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, i); + RREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL); + } + + for (i = 0; i < ARRAY_SIZE(utcl2_router_mems); i++) { + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, i); + RREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL); + } + + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) { + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, i); + RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL); + } + + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) { + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, i); + RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_CNTL); + } + + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255); +} + +int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev, void *inject_if) +{ + struct ras_inject_if *info = (struct ras_inject_if *)inject_if; + int ret; + struct ta_ras_trigger_error_input block_info = { 0 }; + + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + return -EINVAL; + + block_info.block_id = amdgpu_ras_block_to_ta(info->head.block); + block_info.sub_block_index = info->head.sub_block_index; + block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type); + block_info.address = info->address; + block_info.value = info->value; + + mutex_lock(&adev->grbm_idx_mutex); + ret = psp_ras_trigger_error(&adev->psp, &block_info); + mutex_unlock(&adev->grbm_idx_mutex); + + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gp102.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h similarity index 56% rename from drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gp102.c rename to drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h index fde6328c6d7134244b4e7a99693031420902ef6e..2e3f6f755ad47de7d9799bbe5a77aa7e2ac7f6da 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gp102.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * Copyright 2020 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -14,17 +14,22 @@ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * */ -#include "priv.h" +#ifndef __GFX_V9_4_H__ +#define __GFX_V9_4_H__ + +void gfx_v9_4_clear_ras_edc_counter(struct amdgpu_device *adev); + +int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status); + +int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev, + void *inject_if); -int -gp102_nvdec_new(struct nvkm_device *device, int index, - struct nvkm_nvdec **pnvdec) -{ - return nvkm_nvdec_new_(device, index, pnvdec); -} +#endif /* __GFX_V9_4_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index e91bd79457779ed2a91fa03ef6a374c193f1d6a4..1a2f18b908fee4040a3e4c93953331ca6c3ead14 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -75,40 +75,45 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); - /* Program the system aperture low logical page number. */ - WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, - min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); - - if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8) - /* - * Raven2 has a HW issue that it is unable to use the vram which - * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the - * workaround that increase system aperture high address (add 1) - * to get rid of the VM fault and hardware hang. - */ - WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, - max((adev->gmc.fb_end >> 18) + 0x1, - adev->gmc.agp_end >> 18)); - else - WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, - max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); - - /* Set default page address. */ - value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start - + adev->vm_manager.vram_base_offset; - WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, - (u32)(value >> 12)); - WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, - (u32)(value >> 44)); - - /* Program "protection fault". */ - WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, - (u32)(adev->dummy_page_addr >> 12)); - WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, - (u32)((u64)adev->dummy_page_addr >> 44)); - - WREG32_FIELD15(GC, 0, VM_L2_PROTECTION_FAULT_CNTL2, - ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1); + if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) { + /* Program the system aperture low logical page number. */ + WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, + min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); + + if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8) + /* + * Raven2 has a HW issue that it is unable to use the + * vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. + * So here is the workaround that increase system + * aperture high address (add 1) to get rid of the VM + * fault and hardware hang. + */ + WREG32_SOC15_RLC(GC, 0, + mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, + max((adev->gmc.fb_end >> 18) + 0x1, + adev->gmc.agp_end >> 18)); + else + WREG32_SOC15_RLC( + GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, + max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); + + /* Set default page address. */ + value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + + adev->vm_manager.vram_base_offset; + WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, + (u32)(value >> 12)); + WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, + (u32)(value >> 44)); + + /* Program "protection fault". */ + WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, + (u32)(adev->dummy_page_addr >> 12)); + WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, + (u32)((u64)adev->dummy_page_addr >> 44)); + + WREG32_FIELD15(GC, 0, VM_L2_PROTECTION_FAULT_CNTL2, + ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1); + } } static void gfxhub_v1_0_init_tlb_regs(struct amdgpu_device *adev) @@ -264,7 +269,7 @@ static void gfxhub_v1_0_program_invalidation(struct amdgpu_device *adev) int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev) { - if (amdgpu_sriov_vf(adev)) { + if (amdgpu_sriov_vf(adev) && adev->asic_type != CHIP_ARCTURUS) { /* * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are * VF copy registers so vbios post doesn't program them, for @@ -280,10 +285,12 @@ int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev) gfxhub_v1_0_init_gart_aperture_regs(adev); gfxhub_v1_0_init_system_aperture_regs(adev); gfxhub_v1_0_init_tlb_regs(adev); - gfxhub_v1_0_init_cache_regs(adev); + if (!amdgpu_sriov_vf(adev)) + gfxhub_v1_0_init_cache_regs(adev); gfxhub_v1_0_enable_system_domain(adev); - gfxhub_v1_0_disable_identity_aperture(adev); + if (!amdgpu_sriov_vf(adev)) + gfxhub_v1_0_disable_identity_aperture(adev); gfxhub_v1_0_setup_vmid_config(adev); gfxhub_v1_0_program_invalidation(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index f5725336a5f26dbc3082bd52a90bc260c557a026..9775eca6fe434e044d21741eb5123ca16f2ec340 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -30,6 +30,8 @@ #include "hdp/hdp_5_0_0_sh_mask.h" #include "gc/gc_10_1_0_sh_mask.h" #include "mmhub/mmhub_2_0_0_sh_mask.h" +#include "athub/athub_2_0_0_sh_mask.h" +#include "athub/athub_2_0_0_offset.h" #include "dcn/dcn_2_0_0_offset.h" #include "dcn/dcn_2_0_0_sh_mask.h" #include "oss/osssys_5_0_0_offset.h" @@ -37,6 +39,7 @@ #include "navi10_enum.h" #include "soc15.h" +#include "soc15d.h" #include "soc15_common.h" #include "nbio_v2_3.h" @@ -234,6 +237,19 @@ static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev, (!amdgpu_sriov_vf(adev))); } +static bool gmc_v10_0_get_atc_vmid_pasid_mapping_info( + struct amdgpu_device *adev, + uint8_t vmid, uint16_t *p_pasid) +{ + uint32_t value; + + value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + + vmid); + *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK; + + return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); +} + /* * GART * VMID 0 is the physical GPU addresses as used by the kernel. @@ -246,7 +262,8 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, { bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub); struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; - u32 tmp = gmc_v10_0_get_invalidate_req(vmid, flush_type); + u32 inv_req = gmc_v10_0_get_invalidate_req(vmid, flush_type); + u32 tmp; /* Use register 17 for GART */ const unsigned eng = 17; unsigned int i; @@ -273,7 +290,7 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n"); } - WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp); + WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req); /* * Issue a dummy read to wait for the ACK register to be cleared @@ -380,6 +397,64 @@ error_alloc: DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r); } +/** + * gmc_v10_0_flush_gpu_tlb_pasid - tlb flush via pasid + * + * @adev: amdgpu_device pointer + * @pasid: pasid to be flush + * + * Flush the TLB for the requested pasid. + */ +static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, + uint16_t pasid, uint32_t flush_type, + bool all_hub) +{ + int vmid, i; + signed long r; + uint32_t seq; + uint16_t queried_pasid; + bool ret; + struct amdgpu_ring *ring = &adev->gfx.kiq.ring; + struct amdgpu_kiq *kiq = &adev->gfx.kiq; + + if (amdgpu_emu_mode == 0 && ring->sched.ready) { + spin_lock(&adev->gfx.kiq.ring_lock); + /* 2 dwords flush + 8 dwords fence */ + amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8); + kiq->pmf->kiq_invalidate_tlbs(ring, + pasid, flush_type, all_hub); + amdgpu_fence_emit_polling(ring, &seq); + amdgpu_ring_commit(ring); + spin_unlock(&adev->gfx.kiq.ring_lock); + r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout); + if (r < 1) { + DRM_ERROR("wait for kiq fence error: %ld.\n", r); + return -ETIME; + } + + return 0; + } + + for (vmid = 1; vmid < 16; vmid++) { + + ret = gmc_v10_0_get_atc_vmid_pasid_mapping_info(adev, vmid, + &queried_pasid); + if (ret && queried_pasid == pasid) { + if (all_hub) { + for (i = 0; i < adev->num_vmhubs; i++) + gmc_v10_0_flush_gpu_tlb(adev, vmid, + i, flush_type); + } else { + gmc_v10_0_flush_gpu_tlb(adev, vmid, + AMDGPU_GFXHUB_0, flush_type); + } + break; + } + } + + return 0; +} + static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr) { @@ -531,6 +606,7 @@ static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev, static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = { .flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb, + .flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid, .emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb, .emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping, .map_mtype = gmc_v10_0_map_mtype, @@ -564,22 +640,13 @@ static int gmc_v10_0_early_init(void *handle) static int gmc_v10_0_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 }; - unsigned i; - - for(i = 0; i < adev->num_rings; ++i) { - struct amdgpu_ring *ring = adev->rings[i]; - unsigned vmhub = ring->funcs->vmhub; + int r; - ring->vm_inv_eng = vm_inv_eng[vmhub]++; - dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n", - ring->idx, ring->name, ring->vm_inv_eng, - ring->funcs->vmhub); - } + amdgpu_bo_late_init(adev); - /* Engine 17 is used for GART flushes */ - for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i) - BUG_ON(vm_inv_eng[i] > 17); + r = amdgpu_gmc_allocate_vm_inv_eng(adev); + if (r) + return r; return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); } @@ -731,6 +798,10 @@ static int gmc_v10_0_sw_init(void *handle) r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT, &adev->gmc.vm_fault); + + if (r) + return r; + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT, &adev->gmc.vm_fault); @@ -743,15 +814,6 @@ static int gmc_v10_0_sw_init(void *handle) */ adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */ - /* - * Reserve 8M stolen memory for navi10 like vega10 - * TODO: will check if it's really needed on asic. - */ - if (amdgpu_emu_mode == 1) - adev->gmc.stolen_size = 0; - else - adev->gmc.stolen_size = 9 * 1024 *1024; - r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44)); if (r) { printk(KERN_WARNING "amdgpu: No suitable DMA available.\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index f08e5330642d6f525cbf65107b7dd5422f84b1ab..9da9596a36388d31e4c2cc5fb106df3c962dcd2f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -381,7 +381,8 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); #ifdef CONFIG_X86_64 - if (adev->flags & AMD_IS_APU) { + if (adev->flags & AMD_IS_APU && + adev->gmc.real_vram_size > adev->gmc.aper_size) { adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22; adev->gmc.aper_size = adev->gmc.real_vram_size; } @@ -418,6 +419,38 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) return 0; } +/** + * gmc_v7_0_flush_gpu_tlb_pasid - tlb flush via pasid + * + * @adev: amdgpu_device pointer + * @pasid: pasid to be flush + * + * Flush the TLB for the requested pasid. + */ +static int gmc_v7_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, + uint16_t pasid, uint32_t flush_type, + bool all_hub) +{ + int vmid; + unsigned int tmp; + + if (adev->in_gpu_reset) + return -EIO; + + for (vmid = 1; vmid < 16; vmid++) { + + tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); + if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) && + (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) { + WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); + RREG32(mmVM_INVALIDATE_RESPONSE); + break; + } + } + + return 0; +} + /* * GART * VMID 0 is the physical GPU addresses as used by the kernel. @@ -1333,6 +1366,7 @@ static const struct amd_ip_funcs gmc_v7_0_ip_funcs = { static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = { .flush_gpu_tlb = gmc_v7_0_flush_gpu_tlb, + .flush_gpu_tlb_pasid = gmc_v7_0_flush_gpu_tlb_pasid, .emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb, .emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping, .set_prt = gmc_v7_0_set_prt, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 6d96d40fbcb8f8382dd3ab9c4945c05695f9bdf6..27d83204fa2b00c3a8e663ceb18402ce39ba24e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -620,6 +620,39 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) return 0; } +/** + * gmc_v8_0_flush_gpu_tlb_pasid - tlb flush via pasid + * + * @adev: amdgpu_device pointer + * @pasid: pasid to be flush + * + * Flush the TLB for the requested pasid. + */ +static int gmc_v8_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, + uint16_t pasid, uint32_t flush_type, + bool all_hub) +{ + int vmid; + unsigned int tmp; + + if (adev->in_gpu_reset) + return -EIO; + + for (vmid = 1; vmid < 16; vmid++) { + + tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); + if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) && + (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) { + WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); + RREG32(mmVM_INVALIDATE_RESPONSE); + break; + } + } + + return 0; + +} + /* * GART * VMID 0 is the physical GPU addresses as used by the kernel. @@ -1700,6 +1733,7 @@ static const struct amd_ip_funcs gmc_v8_0_ip_funcs = { static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = { .flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb, + .flush_gpu_tlb_pasid = gmc_v8_0_flush_gpu_tlb_pasid, .emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb, .emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping, .set_prt = gmc_v8_0_set_prt, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index fa025ceeea0fa1e68ee9e7e6e71631e7f61e0250..90216abf14a4c356732a7950284d835f19fbc9de 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -38,10 +38,12 @@ #include "dce/dce_12_0_sh_mask.h" #include "vega10_enum.h" #include "mmhub/mmhub_1_0_offset.h" +#include "athub/athub_1_0_sh_mask.h" #include "athub/athub_1_0_offset.h" #include "oss/osssys_4_0_offset.h" #include "soc15.h" +#include "soc15d.h" #include "soc15_common.h" #include "umc/umc_6_0_sh_mask.h" @@ -207,6 +209,11 @@ static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev, { u32 bits, i, tmp, reg; + /* Devices newer then VEGA10/12 shall have these programming + sequences performed by PSP BL */ + if (adev->asic_type >= CHIP_VEGA20) + return 0; + bits = 0x7f; switch (state) { @@ -393,8 +400,10 @@ static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev) adev->gmc.vm_fault.num_types = 1; adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs; - adev->gmc.ecc_irq.num_types = 1; - adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs; + if (!amdgpu_sriov_vf(adev)) { + adev->gmc.ecc_irq.num_types = 1; + adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs; + } } static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid, @@ -434,6 +443,18 @@ static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev, adev->pdev->device == 0x15d8))); } +static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, + uint8_t vmid, uint16_t *p_pasid) +{ + uint32_t value; + + value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + + vmid); + *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK; + + return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); +} + /* * GART * VMID 0 is the physical GPU addresses as used by the kernel. @@ -455,13 +476,13 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, { bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub); const unsigned eng = 17; - u32 j, tmp; + u32 j, inv_req, tmp; struct amdgpu_vmhub *hub; BUG_ON(vmhub >= adev->num_vmhubs); hub = &adev->vmhub[vmhub]; - tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type); + inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type); /* This is necessary for a HW workaround under SRIOV as well * as GFXOFF under bare metal @@ -472,7 +493,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, uint32_t req = hub->vm_inv_eng0_req + eng; uint32_t ack = hub->vm_inv_eng0_ack + eng; - amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp, + amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req, 1 << vmid); return; } @@ -500,7 +521,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n"); } - WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp); + WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req); /* * Issue a dummy read to wait for the ACK register to be cleared @@ -532,6 +553,68 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, DRM_ERROR("Timeout waiting for VM flush ACK!\n"); } +/** + * gmc_v9_0_flush_gpu_tlb_pasid - tlb flush via pasid + * + * @adev: amdgpu_device pointer + * @pasid: pasid to be flush + * + * Flush the TLB for the requested pasid. + */ +static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, + uint16_t pasid, uint32_t flush_type, + bool all_hub) +{ + int vmid, i; + signed long r; + uint32_t seq; + uint16_t queried_pasid; + bool ret; + struct amdgpu_ring *ring = &adev->gfx.kiq.ring; + struct amdgpu_kiq *kiq = &adev->gfx.kiq; + + if (adev->in_gpu_reset) + return -EIO; + + if (ring->sched.ready) { + spin_lock(&adev->gfx.kiq.ring_lock); + /* 2 dwords flush + 8 dwords fence */ + amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8); + kiq->pmf->kiq_invalidate_tlbs(ring, + pasid, flush_type, all_hub); + amdgpu_fence_emit_polling(ring, &seq); + amdgpu_ring_commit(ring); + spin_unlock(&adev->gfx.kiq.ring_lock); + r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout); + if (r < 1) { + DRM_ERROR("wait for kiq fence error: %ld.\n", r); + return -ETIME; + } + + return 0; + } + + for (vmid = 1; vmid < 16; vmid++) { + + ret = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid, + &queried_pasid); + if (ret && queried_pasid == pasid) { + if (all_hub) { + for (i = 0; i < adev->num_vmhubs; i++) + gmc_v9_0_flush_gpu_tlb(adev, vmid, + i, flush_type); + } else { + gmc_v9_0_flush_gpu_tlb(adev, vmid, + AMDGPU_GFXHUB_0, flush_type); + } + break; + } + } + + return 0; + +} + static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr) { @@ -693,6 +776,7 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev, static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = { .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb, + .flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid, .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb, .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping, .map_mtype = gmc_v9_0_map_mtype, @@ -790,36 +874,6 @@ static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev) } } -static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev) -{ - struct amdgpu_ring *ring; - unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] = - {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP, - GFXHUB_FREE_VM_INV_ENGS_BITMAP}; - unsigned i; - unsigned vmhub, inv_eng; - - for (i = 0; i < adev->num_rings; ++i) { - ring = adev->rings[i]; - vmhub = ring->funcs->vmhub; - - inv_eng = ffs(vm_inv_engs[vmhub]); - if (!inv_eng) { - dev_err(adev->dev, "no VM inv eng for ring %s\n", - ring->name); - return -EINVAL; - } - - ring->vm_inv_eng = inv_eng - 1; - vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng); - - dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n", - ring->name, ring->vm_inv_eng, ring->funcs->vmhub); - } - - return 0; -} - static int gmc_v9_0_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -828,7 +882,7 @@ static int gmc_v9_0_late_init(void *handle) if (!gmc_v9_0_keep_stolen_memory(adev)) amdgpu_bo_late_init(adev); - r = gmc_v9_0_allocate_vm_inv_eng(adev); + r = amdgpu_gmc_allocate_vm_inv_eng(adev); if (r) return r; /* Check if ecc is available */ @@ -840,8 +894,8 @@ static int gmc_v9_0_late_init(void *handle) r = amdgpu_atomfirmware_mem_ecc_supported(adev); if (!r) { DRM_INFO("ECC is not present.\n"); - if (adev->df_funcs->enable_ecc_force_par_wr_rmw) - adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false); + if (adev->df.funcs->enable_ecc_force_par_wr_rmw) + adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false); } else { DRM_INFO("ECC is active.\n"); } @@ -1046,7 +1100,7 @@ static int gmc_v9_0_sw_init(void *handle) else chansize = 128; - numchan = adev->df_funcs->get_hbm_channel_number(adev); + numchan = adev->df.funcs->get_hbm_channel_number(adev); adev->gmc.vram_width = numchan * chansize; } @@ -1112,11 +1166,13 @@ static int gmc_v9_0_sw_init(void *handle) if (r) return r; - /* interrupt sent to DF. */ - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0, - &adev->gmc.ecc_irq); - if (r) - return r; + if (!amdgpu_sriov_vf(adev)) { + /* interrupt sent to DF. */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0, + &adev->gmc.ecc_irq); + if (r) + return r; + } /* Set the internal MC address mask * This is the max address of the GPU's @@ -1302,12 +1358,13 @@ static int gmc_v9_0_hw_init(void *handle) else value = true; - gfxhub_v1_0_set_fault_enable_default(adev, value); - if (adev->asic_type == CHIP_ARCTURUS) - mmhub_v9_4_set_fault_enable_default(adev, value); - else - mmhub_v1_0_set_fault_enable_default(adev, value); - + if (!amdgpu_sriov_vf(adev)) { + gfxhub_v1_0_set_fault_enable_default(adev, value); + if (adev->asic_type == CHIP_ARCTURUS) + mmhub_v9_4_set_fault_enable_default(adev, value); + else + mmhub_v1_0_set_fault_enable_default(adev, value); + } for (i = 0; i < adev->num_vmhubs; ++i) gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h index 49e8be761214bdecbc10e9927ffdda6dfd8fd1ad..e0585e8c6c1b74fb690d07c4b16ee8dc5d453a3e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h @@ -24,16 +24,6 @@ #ifndef __GMC_V9_0_H__ #define __GMC_V9_0_H__ - /* - * The latest engine allocation on gfx9 is: - * Engine 2, 3: firmware - * Engine 0, 1, 4~16: amdgpu ring, - * subject to change when ring number changes - * Engine 17: Gart flushes - */ -#define GFXHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3 -#define MMHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3 - extern const struct amd_ip_funcs gmc_v9_0_ip_funcs; extern const struct amdgpu_ip_block_version gmc_v9_0_ip_block; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c index a141408dfb23d2381c9d94b240cf7adebe8129a2..0debfd9f428c1b236c468277002135bc2a6e7e65 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c @@ -25,6 +25,7 @@ #include "amdgpu_jpeg.h" #include "soc15.h" #include "soc15d.h" +#include "vcn_v1_0.h" #include "vcn/vcn_1_0_offset.h" #include "vcn/vcn_1_0_sh_mask.h" @@ -561,7 +562,7 @@ static const struct amdgpu_ring_funcs jpeg_v1_0_decode_ring_vm_funcs = { .insert_start = jpeg_v1_0_decode_ring_insert_start, .insert_end = jpeg_v1_0_decode_ring_insert_end, .pad_ib = amdgpu_ring_generic_pad_ib, - .begin_use = amdgpu_vcn_ring_begin_use, + .begin_use = vcn_v1_0_ring_begin_use, .end_use = amdgpu_vcn_ring_end_use, .emit_wreg = jpeg_v1_0_decode_ring_emit_wreg, .emit_reg_wait = jpeg_v1_0_decode_ring_emit_reg_wait, diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c index a78292d84854dca5298618ef43a3f246ae1151db..ff2e6e1ccde7ccee5008fe282a4e407a905b4b33 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c @@ -690,7 +690,7 @@ static int jpeg_v2_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = (state == AMD_CG_STATE_GATE); if (enable) { if (jpeg_v2_0_is_idle(handle)) diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c index 2c58939e6ad03508dddfc75b2e75e63ec0634066..c6d046df4b706951455d2095c46f1eb08361f163 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c @@ -469,7 +469,7 @@ static int jpeg_v2_5_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = (state == AMD_CG_STATE_GATE); int i; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index adfd8a6171eb83b2951e7eda2074d0b863e1d432..49a3a56ec017d65506a943af107ae1a6f185fc9c 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -523,9 +523,9 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev, case CHIP_RAVEN: case CHIP_RENOIR: mmhub_v1_0_update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); mmhub_v1_0_update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c index a7cb185d639a330c79f87ba0fcaae91d2202e0e8..bde1896805219694c1927cbc9a57b39ad9c1fd8d 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c @@ -427,9 +427,9 @@ int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev, case CHIP_NAVI14: case CHIP_NAVI12: mmhub_v2_0_update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); mmhub_v2_0_update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c index d9301e80522aa9c9c7b9fde080487be4ba1bbcfb..a5281df8d84fbd75676420bf6ba16d4ac79af27c 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c @@ -128,45 +128,53 @@ static void mmhub_v9_4_init_system_aperture_regs(struct amdgpu_device *adev, hubid * MMHUB_INSTANCE_REGISTER_OFFSET, adev->gmc.agp_start >> 24); - /* Program the system aperture low logical page number. */ - WREG32_SOC15_OFFSET(MMHUB, 0, - mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_LOW_ADDR, - hubid * MMHUB_INSTANCE_REGISTER_OFFSET, - min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); - WREG32_SOC15_OFFSET(MMHUB, 0, - mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_HIGH_ADDR, - hubid * MMHUB_INSTANCE_REGISTER_OFFSET, - max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); + if (!amdgpu_sriov_vf(adev)) { + /* Program the system aperture low logical page number. */ + WREG32_SOC15_OFFSET( + MMHUB, 0, mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_LOW_ADDR, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, + min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); + WREG32_SOC15_OFFSET( + MMHUB, 0, mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_HIGH_ADDR, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, + max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); - /* Set default page address. */ - value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + - adev->vm_manager.vram_base_offset; - WREG32_SOC15_OFFSET(MMHUB, 0, + /* Set default page address. */ + value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + + adev->vm_manager.vram_base_offset; + WREG32_SOC15_OFFSET( + MMHUB, 0, mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, hubid * MMHUB_INSTANCE_REGISTER_OFFSET, (u32)(value >> 12)); - WREG32_SOC15_OFFSET(MMHUB, 0, + WREG32_SOC15_OFFSET( + MMHUB, 0, mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, hubid * MMHUB_INSTANCE_REGISTER_OFFSET, (u32)(value >> 44)); - /* Program "protection fault". */ - WREG32_SOC15_OFFSET(MMHUB, 0, - mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, - hubid * MMHUB_INSTANCE_REGISTER_OFFSET, - (u32)(adev->dummy_page_addr >> 12)); - WREG32_SOC15_OFFSET(MMHUB, 0, - mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, - hubid * MMHUB_INSTANCE_REGISTER_OFFSET, - (u32)((u64)adev->dummy_page_addr >> 44)); + /* Program "protection fault". */ + WREG32_SOC15_OFFSET( + MMHUB, 0, + mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, + (u32)(adev->dummy_page_addr >> 12)); + WREG32_SOC15_OFFSET( + MMHUB, 0, + mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, + (u32)((u64)adev->dummy_page_addr >> 44)); - tmp = RREG32_SOC15_OFFSET(MMHUB, 0, - mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2, - hubid * MMHUB_INSTANCE_REGISTER_OFFSET); - tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2, - ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1); - WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2, - hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp); + tmp = RREG32_SOC15_OFFSET( + MMHUB, 0, mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET); + tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2, + ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1); + WREG32_SOC15_OFFSET(MMHUB, 0, + mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, + tmp); + } } static void mmhub_v9_4_init_tlb_regs(struct amdgpu_device *adev, int hubid) @@ -368,30 +376,16 @@ int mmhub_v9_4_gart_enable(struct amdgpu_device *adev) int i; for (i = 0; i < MMHUB_NUM_INSTANCES; i++) { - if (amdgpu_sriov_vf(adev)) { - /* - * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase - * they are VF copy registers so vbios post doesn't - * program them, for SRIOV driver need to program them - */ - WREG32_SOC15_OFFSET(MMHUB, 0, - mmVMSHAREDVC0_MC_VM_FB_LOCATION_BASE, - i * MMHUB_INSTANCE_REGISTER_OFFSET, - adev->gmc.vram_start >> 24); - WREG32_SOC15_OFFSET(MMHUB, 0, - mmVMSHAREDVC0_MC_VM_FB_LOCATION_TOP, - i * MMHUB_INSTANCE_REGISTER_OFFSET, - adev->gmc.vram_end >> 24); - } - /* GART Enable. */ mmhub_v9_4_init_gart_aperture_regs(adev, i); mmhub_v9_4_init_system_aperture_regs(adev, i); mmhub_v9_4_init_tlb_regs(adev, i); - mmhub_v9_4_init_cache_regs(adev, i); + if (!amdgpu_sriov_vf(adev)) + mmhub_v9_4_init_cache_regs(adev, i); mmhub_v9_4_enable_system_domain(adev, i); - mmhub_v9_4_disable_identity_aperture(adev, i); + if (!amdgpu_sriov_vf(adev)) + mmhub_v9_4_disable_identity_aperture(adev, i); mmhub_v9_4_setup_vmid_config(adev, i); mmhub_v9_4_program_invalidation(adev, i); } @@ -631,9 +625,9 @@ int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev, switch (adev->asic_type) { case CHIP_ARCTURUS: mmhub_v9_4_update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); mmhub_v9_4_update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; @@ -669,6 +663,7 @@ void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags) } static const struct soc15_ras_field_entry mmhub_v9_4_ras_fields[] = { + /* MMHUB Range 0 */ { "MMEA0_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), @@ -757,6 +752,24 @@ static const struct soc15_ras_field_entry mmhub_v9_4_ras_fields[] = { 0, 0, SOC15_REG_FIELD(MMEA0_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), }, + { "MMEA0_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA0_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA0_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA0_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D3MEM_DED_COUNT), + }, + + /* MMHUB Range 1 */ { "MMEA1_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), @@ -844,16 +857,686 @@ static const struct soc15_ras_field_entry mmhub_v9_4_ras_fields[] = { { "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), 0, 0, SOC15_REG_FIELD(MMEA1_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + }, + { "MMEA1_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA1_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA1_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA1_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D3MEM_DED_COUNT), + }, + + /* MMHAB Range 2*/ + { "MMEA2_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA2_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA2_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA2_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA2_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA2_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA2_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA2_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA2_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA2_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, IOWR_DATAMEM_SED_COUNT), + 0, 0, + }, + { "MMEA2_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA2_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA2_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA2_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA2_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA2_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA2_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), + }, + { "MMEA2_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA2_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), + }, + { "MMEA2_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA2_EDC_CNT3, IORD_CMDMEM_DED_COUNT), + }, + { "MMEA2_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA2_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), + }, + { "MMEA2_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA2_EDC_CNT3, IOWR_DATAMEM_DED_COUNT), + }, + { "MMEA2_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA2_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), + }, + { "MMEA2_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA2_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + }, + { "MMEA2_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA2_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA2_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA2_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D3MEM_DED_COUNT), + }, + + /* MMHUB Rang 3 */ + { "MMEA3_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA3_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA3_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA3_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA3_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA3_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA3_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA3_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA3_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA3_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, IOWR_DATAMEM_SED_COUNT), + 0, 0, + }, + { "MMEA3_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA3_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA3_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA3_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA3_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA3_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA3_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), + }, + { "MMEA3_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA3_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), + }, + { "MMEA3_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA3_EDC_CNT3, IORD_CMDMEM_DED_COUNT), + }, + { "MMEA3_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA3_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), + }, + { "MMEA3_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA3_EDC_CNT3, IOWR_DATAMEM_DED_COUNT), + }, + { "MMEA3_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA3_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), + }, + { "MMEA3_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA3_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + }, + { "MMEA3_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA3_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA3_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA3_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D3MEM_DED_COUNT), + }, + + /* MMHUB Range 4 */ + { "MMEA4_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA4_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA4_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA4_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA4_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA4_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA4_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA4_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA4_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA4_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, IOWR_DATAMEM_SED_COUNT), + 0, 0, + }, + { "MMEA4_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA4_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA4_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA4_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA4_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA4_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA4_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), + }, + { "MMEA4_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA4_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), + }, + { "MMEA4_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA4_EDC_CNT3, IORD_CMDMEM_DED_COUNT), + }, + { "MMEA4_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA4_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), + }, + { "MMEA4_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA4_EDC_CNT3, IOWR_DATAMEM_DED_COUNT), + }, + { "MMEA4_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA4_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), + }, + { "MMEA4_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA4_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + }, + { "MMEA4_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA4_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA4_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA4_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D3MEM_DED_COUNT), + }, + + /* MMHUAB Range 5 */ + { "MMEA5_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA5_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA5_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA5_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA5_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA5_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA5_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA5_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA5_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA5_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, IOWR_DATAMEM_SED_COUNT), + 0, 0, + }, + { "MMEA5_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA5_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA5_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA5_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA5_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA5_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA5_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), + }, + { "MMEA5_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA5_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), + }, + { "MMEA5_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA5_EDC_CNT3, IORD_CMDMEM_DED_COUNT), + }, + { "MMEA5_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA5_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), + }, + { "MMEA5_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA5_EDC_CNT3, IOWR_DATAMEM_DED_COUNT), + }, + { "MMEA5_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA5_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), + }, + { "MMEA5_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA5_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + }, + { "MMEA5_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA5_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA5_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA5_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D3MEM_DED_COUNT), + }, + + /* MMHUB Range 6 */ + { "MMEA6_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA6_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA6_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA6_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA6_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA6_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA6_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA6_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA6_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA6_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, IOWR_DATAMEM_SED_COUNT), + 0, 0, + }, + { "MMEA6_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA6_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA6_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA6_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA6_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA6_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA6_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), + }, + { "MMEA6_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA6_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), + }, + { "MMEA6_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA6_EDC_CNT3, IORD_CMDMEM_DED_COUNT), + }, + { "MMEA6_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA6_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), + }, + { "MMEA6_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA6_EDC_CNT3, IOWR_DATAMEM_DED_COUNT), + }, + { "MMEA6_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA6_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), + }, + { "MMEA6_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA6_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + }, + { "MMEA6_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA6_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA6_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA6_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D3MEM_DED_COUNT), + }, + + /* MMHUB Range 7*/ + { "MMEA7_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA7_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA7_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA7_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA7_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA7_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA7_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA7_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA7_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA7_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, IOWR_DATAMEM_SED_COUNT), + 0, 0, + }, + { "MMEA7_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA7_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA7_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA7_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA7_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA7_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA7_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), + }, + { "MMEA7_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA7_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), + }, + { "MMEA7_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA7_EDC_CNT3, IORD_CMDMEM_DED_COUNT), + }, + { "MMEA7_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA7_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), + }, + { "MMEA7_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA7_EDC_CNT3, IOWR_DATAMEM_DED_COUNT), + }, + { "MMEA7_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA7_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), + }, + { "MMEA7_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA7_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + }, + { "MMEA7_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA7_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA7_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA7_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D3MEM_DED_COUNT), } }; static const struct soc15_reg_entry mmhub_v9_4_edc_cnt_regs[] = { - { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), 0, 0, 0}, - { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), 0, 0, 0}, - { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), 0, 0, 0}, - { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), 0, 0, 0}, - { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), 0, 0, 0}, - { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), 0, 0, 0}, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), 0, 0, 0 }, }; static int mmhub_v9_4_get_ras_error_count(const struct soc15_reg_entry *reg, diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h index 8af0bddf85e4b06e89a7200a8592d10c916360ba..20958639b601b6bf65c51156a9df308410bc36fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h +++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h @@ -47,6 +47,18 @@ struct mmsch_v1_0_init_header { uint32_t uvd_table_size; }; +struct mmsch_vf_eng_init_header { + uint32_t init_status; + uint32_t table_offset; + uint32_t table_size; +}; + +struct mmsch_v1_1_init_header { + uint32_t version; + uint32_t total_size; + struct mmsch_vf_eng_init_header eng[2]; +}; + struct mmsch_v1_0_cmd_direct_reg_header { uint32_t reg_offset : 28; uint32_t command_type : 4; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index 43305afa3d6f6f8bfc6b41b121f6a9e4b8cdee03..5fd67e1cc2a0465ef469434d29e2585ffcd042f0 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -250,7 +250,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) */ locked = mutex_trylock(&adev->lock_reset); if (locked) - adev->in_gpu_reset = 1; + adev->in_gpu_reset = true; do { if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL) @@ -262,7 +262,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) flr_done: if (locked) { - adev->in_gpu_reset = 0; + adev->in_gpu_reset = false; mutex_unlock(&adev->lock_reset); } diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index 0d8767eb7a709b06c786af6c3acdb0b213a1084a..237fa5e16b7c9c08282636400317a03321a961d3 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -252,7 +252,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work) */ locked = mutex_trylock(&adev->lock_reset); if (locked) - adev->in_gpu_reset = 1; + adev->in_gpu_reset = true; do { if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL) @@ -264,12 +264,16 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work) flr_done: if (locked) { - adev->in_gpu_reset = 0; + adev->in_gpu_reset = false; mutex_unlock(&adev->lock_reset); } /* Trigger recovery for world switch failure if no TDR */ - if (amdgpu_device_should_recover_gpu(adev)) + if (amdgpu_device_should_recover_gpu(adev) + && (adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT || + adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT || + adev->compute_timeout == MAX_SCHEDULE_TIMEOUT || + adev->video_timeout == MAX_SCHEDULE_TIMEOUT)) amdgpu_device_gpu_recover(adev, NULL); } diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c index 9af73567e716a62705c4f44b88f2b6a6d4f4f352..cf557a428298ba05f825facabafafb0ad648bf92 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c @@ -110,7 +110,6 @@ static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl static int navi10_ih_irq_init(struct amdgpu_device *adev) { struct amdgpu_ih_ring *ih = &adev->irq.ih; - int ret = 0; u32 ih_rb_cntl, ih_doorbell_rtpr, ih_chicken; u32 tmp; @@ -179,7 +178,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev) /* enable interrupts */ navi10_ih_enable_interrupts(adev); - return ret; + return 0; } /** @@ -427,7 +426,7 @@ static int navi10_ih_set_clockgating_state(void *handle, struct amdgpu_device *adev = (struct amdgpu_device *)handle; navi10_ih_update_clockgating_state(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index bb701dbfd472957b3763b3a3d9c44acd146db6b2..65eb378fa035391936c9762e57746e071d9c13ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -339,7 +339,7 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device /* ras_controller_int is dedicated for nbif ras error, * not the global interrupt for sync flood */ - amdgpu_ras_reset_gpu(adev, true); + amdgpu_ras_reset_gpu(adev); } } @@ -456,10 +456,8 @@ static int nbio_v7_4_init_ras_controller_interrupt (struct amdgpu_device *adev) r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT, &adev->nbio.ras_controller_irq); - if (r) - return r; - return 0; + return r; } static int nbio_v7_4_init_ras_err_event_athub_interrupt (struct amdgpu_device *adev) @@ -476,10 +474,8 @@ static int nbio_v7_4_init_ras_err_event_athub_interrupt (struct amdgpu_device *a r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT, &adev->nbio.ras_err_event_athub_irq); - if (r) - return r; - return 0; + return r; } #define smnPARITY_ERROR_STATUS_UNCORR_GRP2 0x13a20030 diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index b0229543e8875da44b8e0c0a195dfed2f763ca33..2d1bebdf1603d592b2776cd99fd557305b9d1e75 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -478,7 +478,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && - is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev)) + !amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); @@ -489,7 +489,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && - is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev)) + !amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); @@ -502,7 +502,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && - is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev)) + !amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); @@ -513,7 +513,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && - is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev)) + !amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); @@ -726,6 +726,12 @@ static int nv_common_early_init(void *handle) AMD_PG_SUPPORT_VCN_DPG | AMD_PG_SUPPORT_JPEG | AMD_PG_SUPPORT_ATHUB; + /* guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0, + * as a consequence, the rev_id and external_rev_id are wrong. + * workaround it by hardcoding rev_id to 0 (default value). + */ + if (amdgpu_sriov_vf(adev)) + adev->rev_id = 0; adev->external_rev_id = adev->rev_id + 0xa; break; default: @@ -944,13 +950,13 @@ static int nv_common_set_clockgating_state(void *handle, case CHIP_NAVI14: case CHIP_NAVI12: adev->nbio.funcs->update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); adev->nbio.funcs->update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); nv_update_hdp_mem_power_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); nv_update_hdp_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h index 74a9fe8e0cfb92e2cae2ed3e71bfc358b861079a..36b65797434e506d335b3af071df0ade99fdbd9d 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h +++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h @@ -242,6 +242,7 @@ enum psp_gfx_fw_type { GFX_FW_TYPE_SDMA5 = 55, /* SDMA5 MI */ GFX_FW_TYPE_SDMA6 = 56, /* SDMA6 MI */ GFX_FW_TYPE_SDMA7 = 57, /* SDMA7 MI */ + GFX_FW_TYPE_VCN1 = 58, /* VCN1 MI */ GFX_FW_TYPE_MAX }; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index c66ca8cc2ebdbd23b0a8ae77aad335a0dfb37bd9..0829188c1a5c5eca2992d8468fd7d5f5f3edd1c3 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -22,6 +22,7 @@ #include #include +#include #include "amdgpu.h" #include "amdgpu_psp.h" @@ -43,10 +44,13 @@ MODULE_FIRMWARE("amdgpu/vega20_asd.bin"); MODULE_FIRMWARE("amdgpu/vega20_ta.bin"); MODULE_FIRMWARE("amdgpu/navi10_sos.bin"); MODULE_FIRMWARE("amdgpu/navi10_asd.bin"); +MODULE_FIRMWARE("amdgpu/navi10_ta.bin"); MODULE_FIRMWARE("amdgpu/navi14_sos.bin"); MODULE_FIRMWARE("amdgpu/navi14_asd.bin"); +MODULE_FIRMWARE("amdgpu/navi14_ta.bin"); MODULE_FIRMWARE("amdgpu/navi12_sos.bin"); MODULE_FIRMWARE("amdgpu/navi12_asd.bin"); +MODULE_FIRMWARE("amdgpu/navi12_ta.bin"); MODULE_FIRMWARE("amdgpu/arcturus_sos.bin"); MODULE_FIRMWARE("amdgpu/arcturus_asd.bin"); MODULE_FIRMWARE("amdgpu/arcturus_ta.bin"); @@ -233,6 +237,29 @@ out: return err; } +int psp_v11_0_wait_for_bootloader(struct psp_context *psp) +{ + struct amdgpu_device *adev = psp->adev; + + int ret; + int retry_loop; + + for (retry_loop = 0; retry_loop < 10; retry_loop++) { + /* Wait for bootloader to signify that is + ready having bit 31 of C2PMSG_35 set to 1 */ + ret = psp_wait_for(psp, + SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), + 0x80000000, + 0x80000000, + false); + + if (ret == 0) + return 0; + } + + return ret; +} + static bool psp_v11_0_is_sos_alive(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; @@ -258,9 +285,7 @@ static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp) return 0; } - /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + ret = psp_v11_0_wait_for_bootloader(psp); if (ret) return ret; @@ -276,9 +301,7 @@ static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp) WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, psp_gfxdrv_command_reg); - /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1*/ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + ret = psp_v11_0_wait_for_bootloader(psp); return ret; } @@ -298,9 +321,7 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp) return 0; } - /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + ret = psp_v11_0_wait_for_bootloader(psp); if (ret) return ret; @@ -319,8 +340,7 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp) /* there might be handshake issue with hardware which needs delay */ mdelay(20); - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + ret = psp_v11_0_wait_for_bootloader(psp); return ret; } @@ -337,9 +357,7 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp) if (psp_v11_0_is_sos_alive(psp)) return 0; - /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + ret = psp_v11_0_wait_for_bootloader(psp); if (ret) return ret; @@ -954,10 +972,13 @@ Err_out: */ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops) { - int ret; - uint32_t p2c_header[4]; struct psp_memory_training_context *ctx = &psp->mem_train_ctx; uint32_t *pcache = (uint32_t*)ctx->sys_cache; + struct amdgpu_device *adev = psp->adev; + uint32_t p2c_header[4]; + uint32_t sz; + void *buf; + int ret; if (ctx->init == PSP_MEM_TRAIN_NOT_SUPPORT) { DRM_DEBUG("Memory training is not supported.\n"); @@ -972,7 +993,7 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops) return 0; } - amdgpu_device_vram_access(psp->adev, ctx->p2c_train_data_offset, p2c_header, sizeof(p2c_header), false); + amdgpu_device_vram_access(adev, ctx->p2c_train_data_offset, p2c_header, sizeof(p2c_header), false); DRM_DEBUG("sys_cache[%08x,%08x,%08x,%08x] p2c_header[%08x,%08x,%08x,%08x]\n", pcache[0], pcache[1], pcache[2], pcache[3], p2c_header[0], p2c_header[1], p2c_header[2], p2c_header[3]); @@ -1009,11 +1030,38 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops) DRM_DEBUG("Memory training ops:%x.\n", ops); if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) { + /* + * Long traing will encroach certain mount of bottom VRAM, + * saving the content of this bottom VRAM to system memory + * before training, and restoring it after training to avoid + * VRAM corruption. + */ + sz = GDDR6_MEM_TRAINING_ENCROACHED_SIZE; + + if (adev->gmc.visible_vram_size < sz || !adev->mman.aper_base_kaddr) { + DRM_ERROR("visible_vram_size %llx or aper_base_kaddr %p is not initialized.\n", + adev->gmc.visible_vram_size, + adev->mman.aper_base_kaddr); + return -EINVAL; + } + + buf = vmalloc(sz); + if (!buf) { + DRM_ERROR("failed to allocate system memory.\n"); + return -ENOMEM; + } + + memcpy_fromio(buf, adev->mman.aper_base_kaddr, sz); ret = psp_v11_0_memory_training_send_msg(psp, PSP_BL__DRAM_LONG_TRAIN); if (ret) { DRM_ERROR("Send long training msg failed.\n"); + vfree(buf); return ret; } + + memcpy_toio(adev->mman.aper_base_kaddr, buf, sz); + adev->nbio.funcs->hdp_flush(adev, NULL); + vfree(buf); } if (ops & PSP_MEM_TRAIN_SAVE) { diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index a101758380130907538ff13a533b6a2a5a190293..7d509a40076fa021f3724a4258fa779afe005a64 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -255,7 +255,7 @@ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, unsigned vmid = AMDGPU_JOB_GET_VMID(job); /* IB packet must end on a 8 DW boundary */ - sdma_v2_4_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); + sdma_v2_4_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7); amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)); @@ -750,7 +750,7 @@ static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib u32 pad_count; int i; - pad_count = (8 - (ib->length_dw & 0x7)) % 8; + pad_count = (-ib->length_dw) & 7; for (i = 0; i < pad_count; i++) if (sdma && sdma->burst_nop && (i == 0)) ib->ptr[ib->length_dw++] = @@ -1260,16 +1260,14 @@ static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = { static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev) { - struct drm_gpu_scheduler *sched; unsigned i; adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs; for (i = 0; i < adev->sdma.num_instances; i++) { - sched = &adev->sdma.instance[i].ring.sched; - adev->vm_manager.vm_pte_rqs[i] = - &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; + adev->vm_manager.vm_pte_scheds[i] = + &adev->sdma.instance[i].ring.sched; } - adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; + adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; } const struct amdgpu_ip_block_version sdma_v2_4_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 5f4e2c616241fc6b5e0fbf6aaf173720a698af09..b6109a99fc43c4e35d27de930b93756d5c612d8e 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -429,7 +429,7 @@ static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring, unsigned vmid = AMDGPU_JOB_GET_VMID(job); /* IB packet must end on a 8 DW boundary */ - sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); + sdma_v3_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7); amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)); @@ -1021,7 +1021,7 @@ static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib u32 pad_count; int i; - pad_count = (8 - (ib->length_dw & 0x7)) % 8; + pad_count = (-ib->length_dw) & 7; for (i = 0; i < pad_count; i++) if (sdma && sdma->burst_nop && (i == 0)) ib->ptr[ib->length_dw++] = @@ -1698,16 +1698,14 @@ static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = { static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev) { - struct drm_gpu_scheduler *sched; unsigned i; adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs; for (i = 0; i < adev->sdma.num_instances; i++) { - sched = &adev->sdma.instance[i].ring.sched; - adev->vm_manager.vm_pte_rqs[i] = - &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; + adev->vm_manager.vm_pte_scheds[i] = + &adev->sdma.instance[i].ring.sched; } - adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; + adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; } const struct amdgpu_ip_block_version sdma_v3_0_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 4ef4d31f52318086961e6637e9a32ece34778f0b..e55884d204bd70529afe7e61bedf36691556eb62 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -82,6 +82,7 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev); static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev); static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev); static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev); +static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev); static const struct soc15_reg_golden golden_settings_sdma_4[] = { SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07), @@ -254,7 +255,106 @@ static const struct soc15_reg_golden golden_settings_sdma_4_3[] = { SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0), - SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000) + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x03fbe1fe) +}; + +static const struct soc15_ras_field_entry sdma_v4_0_ras_fields[] = { + { "SDMA_UCODE_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_UCODE_BUF_SED), + 0, 0, + }, + { "SDMA_RB_CMD_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_RB_CMD_BUF_SED), + 0, 0, + }, + { "SDMA_IB_CMD_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_IB_CMD_BUF_SED), + 0, 0, + }, + { "SDMA_UTCL1_RD_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_UTCL1_RD_FIFO_SED), + 0, 0, + }, + { "SDMA_UTCL1_RDBST_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_UTCL1_RDBST_FIFO_SED), + 0, 0, + }, + { "SDMA_DATA_LUT_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_DATA_LUT_FIFO_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF0_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF0_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF1_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF1_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF2_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF2_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF3_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF3_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF4_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF4_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF5_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF5_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF6_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF6_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF7_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF7_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF8_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF8_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF9_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF9_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF10_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF10_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF11_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF11_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF12_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF12_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF13_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF13_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF14_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF14_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF15_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF15_SED), + 0, 0, + }, + { "SDMA_SPLIT_DAT_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_SPLIT_DAT_BUF_SED), + 0, 0, + }, + { "SDMA_MC_WR_ADDR_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MC_WR_ADDR_FIFO_SED), + 0, 0, + }, }; static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev, @@ -698,7 +798,7 @@ static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring, unsigned vmid = AMDGPU_JOB_GET_VMID(job); /* IB packet must end on a 8 DW boundary */ - sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); + sdma_v4_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7); amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)); @@ -1579,7 +1679,7 @@ static void sdma_v4_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib u32 pad_count; int i; - pad_count = (8 - (ib->length_dw & 0x7)) % 8; + pad_count = (-ib->length_dw) & 7; for (i = 0; i < pad_count; i++) if (sdma && sdma->burst_nop && (i == 0)) ib->ptr[ib->length_dw++] = @@ -1686,6 +1786,7 @@ static int sdma_v4_0_early_init(void *handle) sdma_v4_0_set_buffer_funcs(adev); sdma_v4_0_set_vm_pte_funcs(adev); sdma_v4_0_set_irq_funcs(adev); + sdma_v4_0_set_ras_funcs(adev); return 0; } @@ -1700,8 +1801,18 @@ static int sdma_v4_0_late_init(void *handle) struct ras_ih_if ih_info = { .cb = sdma_v4_0_process_ras_data_cb, }; + int i; - return amdgpu_sdma_ras_late_init(adev, &ih_info); + /* read back edc counter registers to clear the counters */ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) { + for (i = 0; i < adev->sdma.num_instances; i++) + RREG32_SDMA(i, mmSDMA0_EDC_COUNTER); + } + + if (adev->sdma.funcs && adev->sdma.funcs->ras_late_init) + return adev->sdma.funcs->ras_late_init(adev, &ih_info); + else + return 0; } static int sdma_v4_0_sw_init(void *handle) @@ -1773,7 +1884,8 @@ static int sdma_v4_0_sw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int i; - amdgpu_sdma_ras_fini(adev); + if (adev->sdma.funcs && adev->sdma.funcs->ras_fini) + adev->sdma.funcs->ras_fini(adev); for (i = 0; i < adev->sdma.num_instances; i++) { amdgpu_ring_fini(&adev->sdma.instance[i].ring); @@ -2064,9 +2176,9 @@ static int sdma_v4_0_set_clockgating_state(void *handle, case CHIP_ARCTURUS: case CHIP_RENOIR: sdma_v4_0_update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); sdma_v4_0_update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; @@ -2409,10 +2521,73 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev) sched = &adev->sdma.instance[i].page.sched; else sched = &adev->sdma.instance[i].ring.sched; - adev->vm_manager.vm_pte_rqs[i] = - &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; + adev->vm_manager.vm_pte_scheds[i] = sched; + } + adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; +} + +static void sdma_v4_0_get_ras_error_count(uint32_t value, + uint32_t instance, + uint32_t *sec_count) +{ + uint32_t i; + uint32_t sec_cnt; + + /* double bits error (multiple bits) error detection is not supported */ + for (i = 0; i < ARRAY_SIZE(sdma_v4_0_ras_fields); i++) { + /* the SDMA_EDC_COUNTER register in each sdma instance + * shares the same sed shift_mask + * */ + sec_cnt = (value & + sdma_v4_0_ras_fields[i].sec_count_mask) >> + sdma_v4_0_ras_fields[i].sec_count_shift; + if (sec_cnt) { + DRM_INFO("Detected %s in SDMA%d, SED %d\n", + sdma_v4_0_ras_fields[i].name, + instance, sec_cnt); + *sec_count += sec_cnt; + } + } +} + +static int sdma_v4_0_query_ras_error_count(struct amdgpu_device *adev, + uint32_t instance, void *ras_error_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + uint32_t sec_count = 0; + uint32_t reg_value = 0; + + reg_value = RREG32_SDMA(instance, mmSDMA0_EDC_COUNTER); + /* double bit error is not supported */ + if (reg_value) + sdma_v4_0_get_ras_error_count(reg_value, + instance, &sec_count); + /* err_data->ce_count should be initialized to 0 + * before calling into this function */ + err_data->ce_count += sec_count; + /* double bit error is not supported + * set ue count to 0 */ + err_data->ue_count = 0; + + return 0; +}; + +static const struct amdgpu_sdma_ras_funcs sdma_v4_0_ras_funcs = { + .ras_late_init = amdgpu_sdma_ras_late_init, + .ras_fini = amdgpu_sdma_ras_fini, + .query_ras_error_count = sdma_v4_0_query_ras_error_count, +}; + +static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev) +{ + switch (adev->asic_type) { + case CHIP_VEGA20: + case CHIP_ARCTURUS: + adev->sdma.funcs = &sdma_v4_0_ras_funcs; + break; + default: + break; } - adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; } const struct amdgpu_ip_block_version sdma_v4_0_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index f4ad2990f97303a13f1e3663884bde2572792336..67b9830b7c7ebfe3050d8db1cd4458f9f94a0cd5 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -382,8 +382,15 @@ static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring, unsigned vmid = AMDGPU_JOB_GET_VMID(job); uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid); - /* IB packet must end on a 8 DW boundary */ - sdma_v5_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); + /* An IB packet must end on a 8 DW boundary--the next dword + * must be on a 8-dword boundary. Our IB packet below is 6 + * dwords long, thus add x number of NOPs, such that, in + * modular arithmetic, + * wptr + 6 + x = 8k, k >= 0, which in C is, + * (wptr + 6 + x) % 8 = 0. + * The expression below, is a solution of x. + */ + sdma_v5_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7); amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)); @@ -907,16 +914,9 @@ static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring) udelay(1); } - if (i < adev->usec_timeout) { - if (amdgpu_emu_mode == 1) - DRM_INFO("ring test on %d succeeded in %d msecs\n", ring->idx, i); - else - DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); - } else { - DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", - ring->idx, tmp); - r = -EINVAL; - } + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; + amdgpu_device_wb_free(adev, index); return r; @@ -981,13 +981,10 @@ static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) goto err1; } tmp = le32_to_cpu(adev->wb.wb[index]); - if (tmp == 0xDEADBEEF) { - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + if (tmp == 0xDEADBEEF) r = 0; - } else { - DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); + else r = -EINVAL; - } err1: amdgpu_ib_free(adev, &ib, NULL); @@ -1086,10 +1083,10 @@ static void sdma_v5_0_vm_set_pte_pde(struct amdgpu_ib *ib, } /** - * sdma_v5_0_ring_pad_ib - pad the IB to the required number of dw - * + * sdma_v5_0_ring_pad_ib - pad the IB * @ib: indirect buffer to fill with padding * + * Pad the IB with NOPs to a boundary multiple of 8. */ static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) { @@ -1097,7 +1094,7 @@ static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib u32 pad_count; int i; - pad_count = (8 - (ib->length_dw & 0x7)) % 8; + pad_count = (-ib->length_dw) & 0x7; for (i = 0; i < pad_count; i++) if (sdma && sdma->burst_nop && (i == 0)) ib->ptr[ib->length_dw++] = @@ -1528,9 +1525,9 @@ static int sdma_v5_0_set_clockgating_state(void *handle, case CHIP_NAVI14: case CHIP_NAVI12: sdma_v5_0_update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); sdma_v5_0_update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; @@ -1721,17 +1718,15 @@ static const struct amdgpu_vm_pte_funcs sdma_v5_0_vm_pte_funcs = { static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev) { - struct drm_gpu_scheduler *sched; unsigned i; if (adev->vm_manager.vm_pte_funcs == NULL) { adev->vm_manager.vm_pte_funcs = &sdma_v5_0_vm_pte_funcs; for (i = 0; i < adev->sdma.num_instances; i++) { - sched = &adev->sdma.instance[i].ring.sched; - adev->vm_manager.vm_pte_rqs[i] = - &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; + adev->vm_manager.vm_pte_scheds[i] = + &adev->sdma.instance[i].ring.sched; } - adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; + adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; } } diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index bdda8b4e03f08c01a221061307272ff27d988c53..42d5601b6bf35233f7fbea9cad6181da0ce6198f 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c @@ -648,7 +648,7 @@ static int si_dma_set_clockgating_state(void *handle, bool enable; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - enable = (state == AMD_CG_STATE_GATE) ? true : false; + enable = (state == AMD_CG_STATE_GATE); if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) { for (i = 0; i < adev->sdma.num_instances; i++) { @@ -834,16 +834,14 @@ static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = { static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev) { - struct drm_gpu_scheduler *sched; unsigned i; adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs; for (i = 0; i < adev->sdma.num_instances; i++) { - sched = &adev->sdma.instance[i].ring.sched; - adev->vm_manager.vm_pte_rqs[i] = - &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; + adev->vm_manager.vm_pte_scheds[i] = + &adev->sdma.instance[i].ring.sched; } - adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; + adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; } const struct amdgpu_ip_block_version si_dma_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 5bd6ae7a52fe8e035eadd3aafaacdf1ae72e9cb9..15f3424a1ff792b299f83c71c335461f08177d10 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -479,62 +479,18 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev) return ret; } -static int soc15_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap) -{ - if (is_support_sw_smu(adev)) { - struct smu_context *smu = &adev->smu; - - *cap = smu_baco_is_support(smu); - return 0; - } else { - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - - if (!pp_funcs || !pp_funcs->get_asic_baco_capability) { - *cap = false; - return -ENOENT; - } - - return pp_funcs->get_asic_baco_capability(pp_handle, cap); - } -} - static int soc15_asic_baco_reset(struct amdgpu_device *adev) { struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + int ret = 0; /* avoid NBIF got stuck when do RAS recovery in BACO reset */ if (ras && ras->supported) adev->nbio.funcs->enable_doorbell_interrupt(adev, false); - dev_info(adev->dev, "GPU BACO reset\n"); - - if (is_support_sw_smu(adev)) { - struct smu_context *smu = &adev->smu; - int ret; - - ret = smu_baco_enter(smu); - if (ret) - return ret; - - ret = smu_baco_exit(smu); - if (ret) - return ret; - } else { - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - - if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state) - return -ENOENT; - - /* enter BACO state */ - if (pp_funcs->set_asic_baco_state(pp_handle, 1)) - return -EIO; - - /* exit BACO state */ - if (pp_funcs->set_asic_baco_state(pp_handle, 0)) - return -EIO; - } + ret = amdgpu_dpm_baco_reset(adev); + if (ret) + return ret; /* re-enable doorbell interrupt after BACO exit */ if (ras && ras->supported) @@ -543,17 +499,6 @@ static int soc15_asic_baco_reset(struct amdgpu_device *adev) return 0; } -static int soc15_mode2_reset(struct amdgpu_device *adev) -{ - if (is_support_sw_smu(adev)) - return smu_mode2_reset(&adev->smu); - if (!adev->powerplay.pp_funcs || - !adev->powerplay.pp_funcs->asic_reset_mode_2) - return -ENOENT; - - return adev->powerplay.pp_funcs->asic_reset_mode_2(adev->powerplay.pp_handle); -} - static enum amd_reset_method soc15_asic_reset_method(struct amdgpu_device *adev) { @@ -567,11 +512,11 @@ soc15_asic_reset_method(struct amdgpu_device *adev) case CHIP_VEGA10: case CHIP_VEGA12: case CHIP_ARCTURUS: - soc15_asic_get_baco_capability(adev, &baco_reset); + baco_reset = amdgpu_dpm_is_baco_supported(adev); break; case CHIP_VEGA20: if (adev->psp.sos_fw_version >= 0x80067) - soc15_asic_get_baco_capability(adev, &baco_reset); + baco_reset = amdgpu_dpm_is_baco_supported(adev); /* * 1. PMFW version > 0x284300: all cases use baco @@ -592,13 +537,17 @@ soc15_asic_reset_method(struct amdgpu_device *adev) static int soc15_asic_reset(struct amdgpu_device *adev) { + /* original raven doesn't have full asic reset */ + if (adev->pdev->device == 0x15dd && adev->rev_id < 0x8) + return 0; + switch (soc15_asic_reset_method(adev)) { case AMD_RESET_METHOD_BACO: if (!adev->in_suspend) amdgpu_inc_vram_lost(adev); return soc15_asic_baco_reset(adev); case AMD_RESET_METHOD_MODE2: - return soc15_mode2_reset(adev); + return amdgpu_dpm_mode2_reset(adev); default: if (!adev->in_suspend) amdgpu_inc_vram_lost(adev); @@ -608,24 +557,18 @@ static int soc15_asic_reset(struct amdgpu_device *adev) static bool soc15_supports_baco(struct amdgpu_device *adev) { - bool baco_support; - switch (adev->asic_type) { case CHIP_VEGA10: case CHIP_VEGA12: - soc15_asic_get_baco_capability(adev, &baco_support); - break; + case CHIP_ARCTURUS: + return amdgpu_dpm_is_baco_supported(adev); case CHIP_VEGA20: if (adev->psp.sos_fw_version >= 0x80067) - soc15_asic_get_baco_capability(adev, &baco_support); - else - baco_support = false; - break; + return amdgpu_dpm_is_baco_supported(adev); + return false; default: return false; } - - return baco_support; } /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock, @@ -738,9 +681,9 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) } if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS) - adev->df_funcs = &df_v3_6_funcs; + adev->df.funcs = &df_v3_6_funcs; else - adev->df_funcs = &df_v1_7_funcs; + adev->df.funcs = &df_v1_7_funcs; adev->rev_id = soc15_get_rev_id(adev); adev->nbio.funcs->detect_hw_virt(adev); @@ -827,11 +770,14 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); - if (!amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); + amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)) + if (amdgpu_sriov_vf(adev)) { + if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) + amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); + } else { amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); + } if (!amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block); break; @@ -841,8 +787,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block); - if (is_support_sw_smu(adev)) - amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block); + amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block); amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) @@ -1306,7 +1251,7 @@ static int soc15_common_sw_init(void *handle) if (amdgpu_sriov_vf(adev)) xgpu_ai_mailbox_add_irq_id(adev); - adev->df_funcs->sw_init(adev); + adev->df.funcs->sw_init(adev); return 0; } @@ -1316,7 +1261,7 @@ static int soc15_common_sw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; amdgpu_nbio_ras_fini(adev); - adev->df_funcs->sw_fini(adev); + adev->df.funcs->sw_fini(adev); return 0; } @@ -1526,38 +1471,38 @@ static int soc15_common_set_clockgating_state(void *handle, case CHIP_VEGA12: case CHIP_VEGA20: adev->nbio.funcs->update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); adev->nbio.funcs->update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); soc15_update_hdp_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); soc15_update_drm_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); soc15_update_drm_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); soc15_update_rom_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); - adev->df_funcs->update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); + adev->df.funcs->update_medium_grain_clock_gating(adev, + state == AMD_CG_STATE_GATE); break; case CHIP_RAVEN: case CHIP_RENOIR: adev->nbio.funcs->update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); adev->nbio.funcs->update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); soc15_update_hdp_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); soc15_update_drm_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); soc15_update_drm_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); soc15_update_rom_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; case CHIP_ARCTURUS: soc15_update_hdp_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; @@ -1595,7 +1540,7 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags) if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK)) *flags |= AMD_CG_SUPPORT_ROM_MGCG; - adev->df_funcs->get_clockgating_state(adev, flags); + adev->df.funcs->get_clockgating_state(adev, flags); } static int soc15_common_set_powergating_state(void *handle, diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c index 515eb50cd0f8db9ee9104952e242047d7d21f356..793bf70e64b1931af561d6d74e1d74925d9cdcf9 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c @@ -28,27 +28,24 @@ #include "rsmu/rsmu_0_0_2_sh_mask.h" #include "umc/umc_6_1_1_offset.h" #include "umc/umc_6_1_1_sh_mask.h" +#include "umc/umc_6_1_2_offset.h" -#define smnMCA_UMC0_MCUMC_ADDRT0 0x50f10 - -/* UMC 6_1_2 register offsets */ -#define mmUMCCH0_0_EccErrCntSel_ARCT 0x0360 -#define mmUMCCH0_0_EccErrCntSel_ARCT_BASE_IDX 1 -#define mmUMCCH0_0_EccErrCnt_ARCT 0x0361 -#define mmUMCCH0_0_EccErrCnt_ARCT_BASE_IDX 1 -#define mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT 0x03c2 -#define mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT_BASE_IDX 1 +#define UMC_6_INST_DIST 0x40000 /* * (addr / 256) * 8192, the higher 26 bits in ErrorAddr * is the index of 8KB block */ -#define ADDR_OF_8KB_BLOCK(addr) (((addr) & ~0xffULL) << 5) +#define ADDR_OF_8KB_BLOCK(addr) (((addr) & ~0xffULL) << 5) /* channel index is the index of 256B block */ #define ADDR_OF_256B_BLOCK(channel_index) ((channel_index) << 8) /* offset in 256B block */ #define OFFSET_IN_256B_BLOCK(addr) ((addr) & 0xffULL) +#define LOOP_UMC_INST(umc_inst) for ((umc_inst) = 0; (umc_inst) < adev->umc.umc_inst_num; (umc_inst)++) +#define LOOP_UMC_CH_INST(ch_inst) for ((ch_inst) = 0; (ch_inst) < adev->umc.channel_inst_num; (ch_inst)++) +#define LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) LOOP_UMC_INST((umc_inst)) LOOP_UMC_CH_INST((ch_inst)) + const uint32_t umc_v6_1_channel_idx_tbl[UMC_V6_1_UMC_INSTANCE_NUM][UMC_V6_1_CHANNEL_INSTANCE_NUM] = { {2, 18, 11, 27}, {4, 20, 13, 29}, @@ -57,24 +54,10 @@ const uint32_t {9, 25, 0, 16}, {15, 31, 6, 22} }; -static void umc_v6_1_enable_umc_index_mode(struct amdgpu_device *adev, - uint32_t umc_instance) +static void umc_v6_1_enable_umc_index_mode(struct amdgpu_device *adev) { - uint32_t rsmu_umc_index; - - rsmu_umc_index = RREG32_SOC15(RSMU, 0, - mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU); - rsmu_umc_index = REG_SET_FIELD(rsmu_umc_index, - RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, + WREG32_FIELD15(RSMU, 0, RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, RSMU_UMC_INDEX_MODE_EN, 1); - rsmu_umc_index = REG_SET_FIELD(rsmu_umc_index, - RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, - RSMU_UMC_INDEX_INSTANCE, umc_instance); - rsmu_umc_index = REG_SET_FIELD(rsmu_umc_index, - RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, - RSMU_UMC_INDEX_WREN, 1 << umc_instance); - WREG32_SOC15(RSMU, 0, mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, - rsmu_umc_index); } static void umc_v6_1_disable_umc_index_mode(struct amdgpu_device *adev) @@ -83,15 +66,23 @@ static void umc_v6_1_disable_umc_index_mode(struct amdgpu_device *adev) RSMU_UMC_INDEX_MODE_EN, 0); } -static uint32_t umc_v6_1_get_umc_inst(struct amdgpu_device *adev) +static uint32_t umc_v6_1_get_umc_index_mode_state(struct amdgpu_device *adev) { uint32_t rsmu_umc_index; rsmu_umc_index = RREG32_SOC15(RSMU, 0, - mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU); + mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU); + return REG_GET_FIELD(rsmu_umc_index, - RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, - RSMU_UMC_INDEX_INSTANCE); + RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, + RSMU_UMC_INDEX_MODE_EN); +} + +static inline uint32_t get_umc_6_reg_offset(struct amdgpu_device *adev, + uint32_t umc_inst, + uint32_t ch_inst) +{ + return adev->umc.channel_offs*ch_inst + UMC_6_INST_DIST*umc_inst; } static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev, @@ -105,7 +96,6 @@ static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev, if (adev->asic_type == CHIP_ARCTURUS) { /* UMC 6_1_2 registers */ - ecc_err_cnt_sel_addr = SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel_ARCT); ecc_err_cnt_addr = @@ -114,7 +104,6 @@ static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev, SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT); } else { /* UMC 6_1_1 registers */ - ecc_err_cnt_sel_addr = SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel); ecc_err_cnt_addr = @@ -124,31 +113,31 @@ static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev, } /* select the lower chip and check the error count */ - ecc_err_cnt_sel = RREG32(ecc_err_cnt_sel_addr + umc_reg_offset); + ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4); ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel, EccErrCntCsSel, 0); - WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel); - ecc_err_cnt = RREG32(ecc_err_cnt_addr + umc_reg_offset); + WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel); + ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4); *error_count += (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) - UMC_V6_1_CE_CNT_INIT); /* clear the lower chip err count */ - WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT); + WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT); /* select the higher chip and check the err counter */ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel, EccErrCntCsSel, 1); - WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel); - ecc_err_cnt = RREG32(ecc_err_cnt_addr + umc_reg_offset); + WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel); + ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4); *error_count += (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) - UMC_V6_1_CE_CNT_INIT); /* clear the higher chip err count */ - WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT); + WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT); /* check for SRAM correctable error MCUMC_STATUS is a 64 bit register */ - mc_umc_status = RREG64_UMC(mc_umc_status_addr + umc_reg_offset); + mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4); if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 6 && REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) @@ -164,18 +153,16 @@ static void umc_v6_1_querry_uncorrectable_error_count(struct amdgpu_device *adev if (adev->asic_type == CHIP_ARCTURUS) { /* UMC 6_1_2 registers */ - mc_umc_status_addr = SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT); } else { /* UMC 6_1_1 registers */ - mc_umc_status_addr = SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); } /* check the MCUMC_STATUS */ - mc_umc_status = RREG64_UMC(mc_umc_status_addr + umc_reg_offset); + mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4); if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 || REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || @@ -185,58 +172,78 @@ static void umc_v6_1_querry_uncorrectable_error_count(struct amdgpu_device *adev *error_count += 1; } -static void umc_v6_1_query_error_count(struct amdgpu_device *adev, - struct ras_err_data *err_data, uint32_t umc_reg_offset, - uint32_t channel_index) -{ - umc_v6_1_query_correctable_error_count(adev, umc_reg_offset, - &(err_data->ce_count)); - umc_v6_1_querry_uncorrectable_error_count(adev, umc_reg_offset, - &(err_data->ue_count)); -} - static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) { - amdgpu_umc_for_each_channel(umc_v6_1_query_error_count); + struct ras_err_data* err_data = (struct ras_err_data*)ras_error_status; + + uint32_t umc_inst = 0; + uint32_t ch_inst = 0; + uint32_t umc_reg_offset = 0; + + uint32_t rsmu_umc_index_state = umc_v6_1_get_umc_index_mode_state(adev); + + if (rsmu_umc_index_state) + umc_v6_1_disable_umc_index_mode(adev); + + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { + umc_reg_offset = get_umc_6_reg_offset(adev, + umc_inst, + ch_inst); + + umc_v6_1_query_correctable_error_count(adev, + umc_reg_offset, + &(err_data->ce_count)); + umc_v6_1_querry_uncorrectable_error_count(adev, + umc_reg_offset, + &(err_data->ue_count)); + } + + if (rsmu_umc_index_state) + umc_v6_1_enable_umc_index_mode(adev); } static void umc_v6_1_query_error_address(struct amdgpu_device *adev, struct ras_err_data *err_data, - uint32_t umc_reg_offset, uint32_t channel_index) + uint32_t umc_reg_offset, + uint32_t ch_inst, + uint32_t umc_inst) { uint32_t lsb, mc_umc_status_addr; - uint64_t mc_umc_status, err_addr, retired_page; + uint64_t mc_umc_status, err_addr, retired_page, mc_umc_addrt0; struct eeprom_table_record *err_rec; + uint32_t channel_index = adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst]; if (adev->asic_type == CHIP_ARCTURUS) { /* UMC 6_1_2 registers */ - mc_umc_status_addr = SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT); + mc_umc_addrt0 = + SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0_ARCT); } else { /* UMC 6_1_1 registers */ - mc_umc_status_addr = SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); + mc_umc_addrt0 = + SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0); } /* skip error address process if -ENOMEM */ if (!err_data->err_addr) { /* clear umc status */ - WREG64_UMC(mc_umc_status_addr + umc_reg_offset, 0x0ULL); + WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL); return; } err_rec = &err_data->err_addr[err_data->err_addr_cnt]; - mc_umc_status = RREG64_UMC(mc_umc_status_addr + umc_reg_offset); + mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4); /* calculate error address if ue/ce error is detected */ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) { - err_addr = RREG64_PCIE(smnMCA_UMC0_MCUMC_ADDRT0 + umc_reg_offset * 4); + err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4); /* the lowest lsb bits should be ignored */ lsb = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, LSB); err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr); @@ -257,39 +264,60 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev, err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE; err_rec->cu = 0; err_rec->mem_channel = channel_index; - err_rec->mcumc_id = umc_v6_1_get_umc_inst(adev); + err_rec->mcumc_id = umc_inst; err_data->err_addr_cnt++; } } /* clear umc status */ - WREG64_UMC(mc_umc_status_addr + umc_reg_offset, 0x0ULL); + WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL); } static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev, void *ras_error_status) { - amdgpu_umc_for_each_channel(umc_v6_1_query_error_address); + struct ras_err_data* err_data = (struct ras_err_data*)ras_error_status; + + uint32_t umc_inst = 0; + uint32_t ch_inst = 0; + uint32_t umc_reg_offset = 0; + + uint32_t rsmu_umc_index_state = umc_v6_1_get_umc_index_mode_state(adev); + + if (rsmu_umc_index_state) + umc_v6_1_disable_umc_index_mode(adev); + + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { + umc_reg_offset = get_umc_6_reg_offset(adev, + umc_inst, + ch_inst); + + umc_v6_1_query_error_address(adev, + err_data, + umc_reg_offset, + ch_inst, + umc_inst); + } + + if (rsmu_umc_index_state) + umc_v6_1_enable_umc_index_mode(adev); } static void umc_v6_1_err_cnt_init_per_channel(struct amdgpu_device *adev, - struct ras_err_data *err_data, - uint32_t umc_reg_offset, uint32_t channel_index) + uint32_t umc_reg_offset) { uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr; uint32_t ecc_err_cnt_addr; if (adev->asic_type == CHIP_ARCTURUS) { /* UMC 6_1_2 registers */ - ecc_err_cnt_sel_addr = SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel_ARCT); ecc_err_cnt_addr = SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt_ARCT); } else { /* UMC 6_1_1 registers */ - ecc_err_cnt_sel_addr = SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel); ecc_err_cnt_addr = @@ -297,28 +325,44 @@ static void umc_v6_1_err_cnt_init_per_channel(struct amdgpu_device *adev, } /* select the lower chip and check the error count */ - ecc_err_cnt_sel = RREG32(ecc_err_cnt_sel_addr + umc_reg_offset); + ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4); ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel, EccErrCntCsSel, 0); /* set ce error interrupt type to APIC based interrupt */ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel, EccErrInt, 0x1); - WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel); + WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel); /* set error count to initial value */ - WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT); + WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT); /* select the higher chip and check the err counter */ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel, EccErrCntCsSel, 1); - WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel); - WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT); + WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel); + WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT); } static void umc_v6_1_err_cnt_init(struct amdgpu_device *adev) { - void *ras_error_status = NULL; + uint32_t umc_inst = 0; + uint32_t ch_inst = 0; + uint32_t umc_reg_offset = 0; + + uint32_t rsmu_umc_index_state = umc_v6_1_get_umc_index_mode_state(adev); + + if (rsmu_umc_index_state) + umc_v6_1_disable_umc_index_mode(adev); + + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { + umc_reg_offset = get_umc_6_reg_offset(adev, + umc_inst, + ch_inst); + + umc_v6_1_err_cnt_init_per_channel(adev, umc_reg_offset); + } - amdgpu_umc_for_each_channel(umc_v6_1_err_cnt_init_per_channel); + if (rsmu_umc_index_state) + umc_v6_1_enable_umc_index_mode(adev); } const struct amdgpu_umc_funcs umc_v6_1_funcs = { @@ -326,6 +370,4 @@ const struct amdgpu_umc_funcs umc_v6_1_funcs = { .ras_late_init = amdgpu_umc_ras_late_init, .query_ras_error_count = umc_v6_1_query_ras_error_count, .query_ras_error_address = umc_v6_1_query_ras_error_address, - .enable_umc_index_mode = umc_v6_1_enable_umc_index_mode, - .disable_umc_index_mode = umc_v6_1_disable_umc_index_mode, }; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index 01e62fb8e6e00eb11b74d0c428f6aa3b56ccb119..0fa8aae2d78ebabffd9cdf52b0396dcbb153ae42 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -763,7 +763,7 @@ static int uvd_v5_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = (state == AMD_CG_STATE_GATE); if (enable) { /* wait for STATUS to clear */ diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 217084d56ab8c86e6b1b16b335bc4faf13258136..e0aadcaf6c8b3cb5629ae5605de26d2fe4a53074 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -1421,7 +1421,7 @@ static int uvd_v6_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = (state == AMD_CG_STATE_GATE); if (enable) { /* wait for STATUS to clear */ diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 475ae68f38f5059b8db2cd201f4d120266f19dd9..217db187207c766c08a949d4f006c11d70c4cad6 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -739,7 +739,7 @@ static int vce_v3_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = (state == AMD_CG_STATE_GATE); int i; if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index 683701cf72704de702b6d8fcf14be791a0770189..3fd102efb7afe63df7c2ee018ed2c328a856e27e 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -887,7 +887,7 @@ static int vce_v4_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = (state == AMD_CG_STATE_GATE); int i; if ((adev->asic_type == CHIP_POLARIS10) || diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 652cecc030b365bb2286aa8c348242c8aa55ff14..1a24fadd30e2da76d1c2a9cd3760f50edfdeb1ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -25,6 +25,7 @@ #include "amdgpu.h" #include "amdgpu_vcn.h" +#include "amdgpu_pm.h" #include "soc15.h" #include "soc15d.h" #include "soc15_common.h" @@ -38,10 +39,10 @@ #include "ivsrcid/vcn/irqsrcs_vcn_1_0.h" #include "jpeg_v1_0.h" -#define mmUVD_RBC_XX_IB_REG_CHECK 0x05ab -#define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 1 -#define mmUVD_REG_XX_MASK 0x05ac -#define mmUVD_REG_XX_MASK_BASE_IDX 1 +#define mmUVD_RBC_XX_IB_REG_CHECK_1_0 0x05ab +#define mmUVD_RBC_XX_IB_REG_CHECK_1_0_BASE_IDX 1 +#define mmUVD_REG_XX_MASK_1_0 0x05ac +#define mmUVD_REG_XX_MASK_1_0_BASE_IDX 1 static int vcn_v1_0_stop(struct amdgpu_device *adev); static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev); @@ -49,7 +50,9 @@ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev); static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev); static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state); static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev, - struct dpg_pause_state *new_state); + int inst_idx, struct dpg_pause_state *new_state); + +static void vcn_v1_0_idle_work_handler(struct work_struct *work); /** * vcn_v1_0_early_init - set function pointers @@ -105,6 +108,9 @@ static int vcn_v1_0_sw_init(void *handle) if (r) return r; + /* Override the work func */ + adev->vcn.idle_work.work.func = vcn_v1_0_idle_work_handler; + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { const struct common_firmware_header *hdr; hdr = (const struct common_firmware_header *)adev->vcn.fw->data; @@ -829,9 +835,9 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev) vcn_v1_0_mc_resume_spg_mode(adev); - WREG32_SOC15(UVD, 0, mmUVD_REG_XX_MASK, 0x10); - WREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK, - RREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK) | 0x3); + WREG32_SOC15(UVD, 0, mmUVD_REG_XX_MASK_1_0, 0x10); + WREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK_1_0, + RREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK_1_0) | 0x3); /* enable VCPU clock */ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK); @@ -1193,7 +1199,7 @@ static int vcn_v1_0_stop(struct amdgpu_device *adev) } static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev, - struct dpg_pause_state *new_state) + int inst_idx, struct dpg_pause_state *new_state) { int ret_code; uint32_t reg_data = 0; @@ -1340,7 +1346,7 @@ static int vcn_v1_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = (state == AMD_CG_STATE_GATE); if (enable) { /* wait for STATUS to clear */ @@ -1758,6 +1764,86 @@ static int vcn_v1_0_set_powergating_state(void *handle, return ret; } +static void vcn_v1_0_idle_work_handler(struct work_struct *work) +{ + struct amdgpu_device *adev = + container_of(work, struct amdgpu_device, vcn.idle_work.work); + unsigned int fences = 0, i; + + for (i = 0; i < adev->vcn.num_enc_rings; ++i) + fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]); + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { + struct dpg_pause_state new_state; + + if (fences) + new_state.fw_based = VCN_DPG_STATE__PAUSE; + else + new_state.fw_based = VCN_DPG_STATE__UNPAUSE; + + if (amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec)) + new_state.jpeg = VCN_DPG_STATE__PAUSE; + else + new_state.jpeg = VCN_DPG_STATE__UNPAUSE; + + adev->vcn.pause_dpg_mode(adev, 0, &new_state); + } + + fences += amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec); + fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_dec); + + if (fences == 0) { + amdgpu_gfx_off_ctrl(adev, true); + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_uvd(adev, false); + else + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, + AMD_PG_STATE_GATE); + } else { + schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT); + } +} + +void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work); + + if (set_clocks) { + amdgpu_gfx_off_ctrl(adev, false); + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_uvd(adev, true); + else + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, + AMD_PG_STATE_UNGATE); + } + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { + struct dpg_pause_state new_state; + unsigned int fences = 0, i; + + for (i = 0; i < adev->vcn.num_enc_rings; ++i) + fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]); + + if (fences) + new_state.fw_based = VCN_DPG_STATE__PAUSE; + else + new_state.fw_based = VCN_DPG_STATE__UNPAUSE; + + if (amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec)) + new_state.jpeg = VCN_DPG_STATE__PAUSE; + else + new_state.jpeg = VCN_DPG_STATE__UNPAUSE; + + if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) + new_state.fw_based = VCN_DPG_STATE__PAUSE; + else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) + new_state.jpeg = VCN_DPG_STATE__PAUSE; + + adev->vcn.pause_dpg_mode(adev, 0, &new_state); + } +} + static const struct amd_ip_funcs vcn_v1_0_ip_funcs = { .name = "vcn_v1_0", .early_init = vcn_v1_0_early_init, @@ -1804,7 +1890,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = { .insert_start = vcn_v1_0_dec_ring_insert_start, .insert_end = vcn_v1_0_dec_ring_insert_end, .pad_ib = amdgpu_ring_generic_pad_ib, - .begin_use = amdgpu_vcn_ring_begin_use, + .begin_use = vcn_v1_0_ring_begin_use, .end_use = amdgpu_vcn_ring_end_use, .emit_wreg = vcn_v1_0_dec_ring_emit_wreg, .emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait, @@ -1836,7 +1922,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = { .insert_nop = amdgpu_ring_insert_nop, .insert_end = vcn_v1_0_enc_ring_insert_end, .pad_ib = amdgpu_ring_generic_pad_ib, - .begin_use = amdgpu_vcn_ring_begin_use, + .begin_use = vcn_v1_0_ring_begin_use, .end_use = amdgpu_vcn_ring_end_use, .emit_wreg = vcn_v1_0_enc_ring_emit_wreg, .emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h index 2a497a7a4840383cbbad5fd86875a2348f4f7e85..f67d7391fc21c9759181ea2bd16149c2bdd8b41d 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h @@ -24,6 +24,8 @@ #ifndef __VCN_V1_0_H__ #define __VCN_V1_0_H__ +void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring); + extern const struct amdgpu_ip_block_version vcn_v1_0_ip_block; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index d76ece38c97bce370976ed21119d41c69a67ad88..4f7216788f11341860260374bb955b8c4933557f 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -47,18 +47,13 @@ #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x5a7 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x1e2 -#define mmUVD_RBC_XX_IB_REG_CHECK 0x026b -#define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 1 -#define mmUVD_REG_XX_MASK 0x026c -#define mmUVD_REG_XX_MASK_BASE_IDX 1 - static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev); static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev); static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev); static int vcn_v2_0_set_powergating_state(void *handle, enum amd_powergating_state state); static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev, - struct dpg_pause_state *new_state); + int inst_idx, struct dpg_pause_state *new_state); /** * vcn_v2_0_early_init - set function pointers @@ -356,88 +351,88 @@ static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirec /* cache window 0: fw */ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { if (!indirect) { - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo), 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi), 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); } else { - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); } offset = 0; } else { - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), lower_32_bits(adev->vcn.inst->gpu_addr), 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), upper_32_bits(adev->vcn.inst->gpu_addr), 0, indirect); offset = size; - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect); } if (!indirect) - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect); else - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect); /* cache window 1: stack */ if (!indirect) { - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); } else { - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); } - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect); /* cache window 2: context */ - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect); /* non-cache window */ - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect); /* VCN global tiling registers */ - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); } @@ -583,19 +578,19 @@ static void vcn_v2_0_clock_gating_dpg_mode(struct amdgpu_device *adev, UVD_CGC_CTRL__WCB_MODE_MASK | UVD_CGC_CTRL__VCPU_MODE_MASK | UVD_CGC_CTRL__SCPU_MODE_MASK); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect); /* turn off clock gating */ - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect); /* turn on SUVD clock gating */ - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect); /* turn on sw mode in UVD_SUVD_CGC_CTRL */ - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect); } @@ -759,7 +754,7 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect) WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp); if (indirect) - adev->vcn.dpg_sram_curr_addr = (uint32_t*)adev->vcn.dpg_sram_cpu_addr; + adev->vcn.inst->dpg_sram_curr_addr = (uint32_t*)adev->vcn.inst->dpg_sram_cpu_addr; /* enable clock gating */ vcn_v2_0_clock_gating_dpg_mode(adev, 0, indirect); @@ -768,11 +763,11 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect) tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; tmp |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK; - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect); /* disable master interupt */ - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect); /* setup mmUVD_LMI_CTRL */ @@ -784,28 +779,28 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect) UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | 0x00100000L); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_MPC_CNTL), 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_MPC_SET_MUXA0), ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_MPC_SET_MUXB0), ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_MPC_SET_MUX), ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | @@ -813,29 +808,29 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect) vcn_v2_0_mc_resume_dpg_mode(adev, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect); /* release VCPU reset to boot */ - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_SOFT_RESET), 0, 0, indirect); /* enable LMI MC and UMC channels */ - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_LMI_CTRL2), 0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT, 0, indirect); /* enable master interrupt */ - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_MASTINT_EN), UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); if (indirect) - psp_update_vcn_sram(adev, 0, adev->vcn.dpg_sram_gpu_addr, - (uint32_t)((uintptr_t)adev->vcn.dpg_sram_curr_addr - - (uintptr_t)adev->vcn.dpg_sram_cpu_addr)); + psp_update_vcn_sram(adev, 0, adev->vcn.inst->dpg_sram_gpu_addr, + (uint32_t)((uintptr_t)adev->vcn.inst->dpg_sram_curr_addr - + (uintptr_t)adev->vcn.inst->dpg_sram_cpu_addr)); /* force RBC into idle state */ rb_bufsz = order_base_2(ring->ring_size); @@ -1135,7 +1130,7 @@ power_off: } static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev, - struct dpg_pause_state *new_state) + int inst_idx, struct dpg_pause_state *new_state) { struct amdgpu_ring *ring; uint32_t reg_data = 0; @@ -1218,7 +1213,7 @@ static int vcn_v2_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = (state == AMD_CG_STATE_GATE); if (enable) { /* wait for STATUS to clear */ @@ -1629,7 +1624,7 @@ static int vcn_v2_0_process_interrupt(struct amdgpu_device *adev, return 0; } -static int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring) +int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; uint32_t tmp = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h index ef749b02ded9eba74d1ef5944a95aa3886e20eb4..6c9de18824284973d57581440c79608b32ad4c94 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h @@ -37,6 +37,7 @@ extern void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr); extern void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val); +extern int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring); extern void vcn_v2_0_enc_ring_insert_end(struct amdgpu_ring *ring); extern void vcn_v2_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index f67fca38c1a9f68bf1e81a42bc2b37298b99062a..70fae7977f8f4695e4e7a3afc240402fc7569904 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -29,6 +29,7 @@ #include "soc15.h" #include "soc15d.h" #include "vcn_v2_0.h" +#include "mmsch_v1_0.h" #include "vcn/vcn_2_5_offset.h" #include "vcn/vcn_2_5_sh_mask.h" @@ -54,6 +55,9 @@ static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev); static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev); static int vcn_v2_5_set_powergating_state(void *handle, enum amd_powergating_state state); +static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev, + int inst_idx, struct dpg_pause_state *new_state); +static int vcn_v2_5_sriov_start(struct amdgpu_device *adev); static int amdgpu_ih_clientid_vcns[] = { SOC15_IH_CLIENTID_VCN, @@ -88,7 +92,13 @@ static int vcn_v2_5_early_init(void *handle) } else adev->vcn.num_vcn_inst = 1; - adev->vcn.num_enc_rings = 2; + if (amdgpu_sriov_vf(adev)) { + adev->vcn.num_vcn_inst = 2; + adev->vcn.harvest_config = 0; + adev->vcn.num_enc_rings = 1; + } else { + adev->vcn.num_enc_rings = 2; + } vcn_v2_5_set_dec_ring_funcs(adev); vcn_v2_5_set_enc_ring_funcs(adev); @@ -176,7 +186,9 @@ static int vcn_v2_5_sw_init(void *handle) ring = &adev->vcn.inst[j].ring_dec; ring->use_doorbell = true; - ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8*j; + + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + (amdgpu_sriov_vf(adev) ? 2*j : 8*j); sprintf(ring->name, "vcn_dec_%d", j); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0); if (r) @@ -185,7 +197,10 @@ static int vcn_v2_5_sw_init(void *handle) for (i = 0; i < adev->vcn.num_enc_rings; ++i) { ring = &adev->vcn.inst[j].ring_enc[i]; ring->use_doorbell = true; - ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i + 8*j; + + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + (amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j)); + sprintf(ring->name, "vcn_enc_%d.%d", j, i); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0); if (r) @@ -193,6 +208,15 @@ static int vcn_v2_5_sw_init(void *handle) } } + if (amdgpu_sriov_vf(adev)) { + r = amdgpu_virt_alloc_mm_table(adev); + if (r) + return r; + } + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) + adev->vcn.pause_dpg_mode = vcn_v2_5_pause_dpg_mode; + return 0; } @@ -208,6 +232,9 @@ static int vcn_v2_5_sw_fini(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_free_mm_table(adev); + r = amdgpu_vcn_suspend(adev); if (r) return r; @@ -228,31 +255,44 @@ static int vcn_v2_5_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ring *ring; - int i, j, r; + int i, j, r = 0; + + if (amdgpu_sriov_vf(adev)) + r = vcn_v2_5_sriov_start(adev); for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { if (adev->vcn.harvest_config & (1 << j)) continue; - ring = &adev->vcn.inst[j].ring_dec; - adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, - ring->doorbell_index, j); + if (amdgpu_sriov_vf(adev)) { + adev->vcn.inst[j].ring_enc[0].sched.ready = true; + adev->vcn.inst[j].ring_enc[1].sched.ready = false; + adev->vcn.inst[j].ring_enc[2].sched.ready = false; + adev->vcn.inst[j].ring_dec.sched.ready = true; + } else { - r = amdgpu_ring_test_helper(ring); - if (r) - goto done; + ring = &adev->vcn.inst[j].ring_dec; + + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, + ring->doorbell_index, j); - for (i = 0; i < adev->vcn.num_enc_rings; ++i) { - ring = &adev->vcn.inst[j].ring_enc[i]; r = amdgpu_ring_test_helper(ring); if (r) goto done; + + for (i = 0; i < adev->vcn.num_enc_rings; ++i) { + ring = &adev->vcn.inst[j].ring_enc[i]; + r = amdgpu_ring_test_helper(ring); + if (r) + goto done; + } } } done: if (!r) - DRM_INFO("VCN decode and encode initialized successfully.\n"); + DRM_INFO("VCN decode and encode initialized successfully(under %s).\n", + (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode"); return r; } @@ -275,7 +315,9 @@ static int vcn_v2_5_hw_fini(void *handle) continue; ring = &adev->vcn.inst[i].ring_dec; - if (RREG32_SOC15(VCN, i, mmUVD_STATUS)) + if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) || + (adev->vcn.cur_state != AMD_PG_STATE_GATE && + RREG32_SOC15(VCN, i, mmUVD_STATUS))) vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE); ring->sched.ready = false; @@ -350,9 +392,9 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev) /* cache window 0: fw */ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, - (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo)); + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo)); WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, - (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi)); + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi)); WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0); offset = 0; } else { @@ -384,6 +426,99 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev) } } +static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect) +{ + uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); + uint32_t offset; + + /* cache window 0: fw */ + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + if (!indirect) { + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect); + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect); + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); + } else { + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect); + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect); + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); + } + offset = 0; + } else { + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); + offset = size; + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), + AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect); + } + + if (!indirect) + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect); + else + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect); + + /* cache window 1: stack */ + if (!indirect) { + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); + } else { + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect); + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect); + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); + } + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect); + + /* cache window 2: context */ + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect); + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect); + + /* non-cache window */ + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect); + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect); + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect); + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect); + + /* VCN global tiling registers */ + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); +} + /** * vcn_v2_5_disable_clock_gating - disable VCN clock gating * @@ -502,6 +637,54 @@ static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev) } } +static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev, + uint8_t sram_sel, int inst_idx, uint8_t indirect) +{ + uint32_t reg_data = 0; + + /* enable sw clock gating control */ + if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) + reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + else + reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; + reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; + reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK | + UVD_CGC_CTRL__UDEC_CM_MODE_MASK | + UVD_CGC_CTRL__UDEC_IT_MODE_MASK | + UVD_CGC_CTRL__UDEC_DB_MODE_MASK | + UVD_CGC_CTRL__UDEC_MP_MODE_MASK | + UVD_CGC_CTRL__SYS_MODE_MASK | + UVD_CGC_CTRL__UDEC_MODE_MASK | + UVD_CGC_CTRL__MPEG2_MODE_MASK | + UVD_CGC_CTRL__REGS_MODE_MASK | + UVD_CGC_CTRL__RBC_MODE_MASK | + UVD_CGC_CTRL__LMI_MC_MODE_MASK | + UVD_CGC_CTRL__LMI_UMC_MODE_MASK | + UVD_CGC_CTRL__IDCT_MODE_MASK | + UVD_CGC_CTRL__MPRD_MODE_MASK | + UVD_CGC_CTRL__MPC_MODE_MASK | + UVD_CGC_CTRL__LBSI_MODE_MASK | + UVD_CGC_CTRL__LRBBM_MODE_MASK | + UVD_CGC_CTRL__WCB_MODE_MASK | + UVD_CGC_CTRL__VCPU_MODE_MASK | + UVD_CGC_CTRL__MMSCH_MODE_MASK); + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect); + + /* turn off clock gating */ + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect); + + /* turn on SUVD clock gating */ + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect); + + /* turn on sw mode in UVD_SUVD_CGC_CTRL */ + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect); +} + /** * vcn_v2_5_enable_clock_gating - enable VCN clock gating * @@ -564,6 +747,138 @@ static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev) } } +static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect) +{ + struct amdgpu_ring *ring; + uint32_t rb_bufsz, tmp; + + /* disable register anti-hang mechanism */ + WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), 1, + ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + /* enable dynamic power gating mode */ + tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_POWER_STATUS); + tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK; + tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK; + WREG32_SOC15(UVD, inst_idx, mmUVD_POWER_STATUS, tmp); + + if (indirect) + adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t*)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr; + + /* enable clock gating */ + vcn_v2_5_clock_gating_dpg_mode(adev, 0, inst_idx, indirect); + + /* enable VCPU clock */ + tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); + tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; + tmp |= UVD_VCPU_CNTL__BLK_RST_MASK; + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect); + + /* disable master interupt */ + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect); + + /* setup mmUVD_LMI_CTRL */ + tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | + UVD_LMI_CTRL__REQ_MODE_MASK | + UVD_LMI_CTRL__CRC_RESET_MASK | + UVD_LMI_CTRL__MASK_MC_URGENT_MASK | + UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | + (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | + 0x00100000L); + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect); + + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_MPC_CNTL), + 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect); + + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_MPC_SET_MUXA0), + ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | + (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | + (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect); + + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_MPC_SET_MUXB0), + ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | + (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | + (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect); + + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_MPC_SET_MUX), + ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | + (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect); + + vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect); + + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect); + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect); + + /* enable LMI MC and UMC channels */ + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_LMI_CTRL2), 0, 0, indirect); + + /* unblock VCPU register access */ + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect); + + tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); + tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect); + + /* enable master interrupt */ + WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( + UVD, 0, mmUVD_MASTINT_EN), + UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); + + if (indirect) + psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr, + (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr - + (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr)); + + ring = &adev->vcn.inst[inst_idx].ring_dec; + /* force RBC into idle state */ + rb_bufsz = order_base_2(ring->ring_size); + tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); + WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_CNTL, tmp); + + /* set the write pointer delay */ + WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0); + + /* set the wb address */ + WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR_ADDR, + (upper_32_bits(ring->gpu_addr) >> 2)); + + /* programm the RB_BASE for ring buffer */ + WREG32_SOC15(UVD, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, + lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(UVD, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, + upper_32_bits(ring->gpu_addr)); + + /* Initialize the ring buffer's read and write pointers */ + WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR, 0); + + WREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2, 0); + + ring->wptr = RREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR); + WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR, + lower_32_bits(ring->wptr)); + + return 0; +} + static int vcn_v2_5_start(struct amdgpu_device *adev) { struct amdgpu_ring *ring; @@ -576,6 +891,11 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) continue; + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { + r = vcn_v2_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram); + continue; + } + /* disable register anti-hang mechanism */ WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), 0, ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); @@ -585,6 +905,9 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) WREG32_SOC15(UVD, i, mmUVD_STATUS, tmp); } + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) + return 0; + /*SW clock gating */ vcn_v2_5_disable_clock_gating(adev); @@ -741,6 +1064,233 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) return 0; } +static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev, + struct amdgpu_mm_table *table) +{ + uint32_t data = 0, loop = 0, size = 0; + uint64_t addr = table->gpu_addr; + struct mmsch_v1_1_init_header *header = NULL;; + + header = (struct mmsch_v1_1_init_header *)table->cpu_addr; + size = header->total_size; + + /* + * 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of + * memory descriptor location + */ + WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr)); + WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr)); + + /* 2, update vmid of descriptor */ + data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID); + data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK; + /* use domain0 for MM scheduler */ + data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); + WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data); + + /* 3, notify mmsch about the size of this descriptor */ + WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size); + + /* 4, set resp to zero */ + WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0); + + /* + * 5, kick off the initialization and wait until + * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero + */ + WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001); + + data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP); + loop = 10; + while ((data & 0x10000002) != 0x10000002) { + udelay(100); + data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP); + loop--; + if (!loop) + break; + } + + if (!loop) { + dev_err(adev->dev, + "failed to init MMSCH, mmMMSCH_VF_MAILBOX_RESP = %x\n", + data); + return -EBUSY; + } + + return 0; +} + +static int vcn_v2_5_sriov_start(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + uint32_t offset, size, tmp, i, rb_bufsz; + uint32_t table_size = 0; + struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } }; + struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } }; + struct mmsch_v1_0_cmd_direct_polling direct_poll = { { 0 } }; + struct mmsch_v1_0_cmd_end end = { { 0 } }; + uint32_t *init_table = adev->virt.mm_table.cpu_addr; + struct mmsch_v1_1_init_header *header = (struct mmsch_v1_1_init_header *)init_table; + + direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE; + direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE; + direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING; + end.cmd_header.command_type = MMSCH_COMMAND__END; + + header->version = MMSCH_VERSION; + header->total_size = sizeof(struct mmsch_v1_1_init_header) >> 2; + init_table += header->total_size; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + header->eng[i].table_offset = header->total_size; + header->eng[i].init_status = 0; + header->eng[i].table_size = 0; + + table_size = 0; + + MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), + ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY); + + size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); + /* mc resume*/ + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo); + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi); + offset = 0; + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0), 0); + } else { + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[i].gpu_addr)); + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[i].gpu_addr)); + offset = size; + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0), + AMDGPU_UVD_FIRMWARE_OFFSET >> 3); + } + + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), + size); + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[i].gpu_addr + offset)); + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[i].gpu_addr + offset)); + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), + 0); + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), + AMDGPU_VCN_STACK_SIZE); + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + + AMDGPU_VCN_STACK_SIZE)); + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + + AMDGPU_VCN_STACK_SIZE)); + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), + 0); + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2), + AMDGPU_VCN_CONTEXT_SIZE); + + ring = &adev->vcn.inst[i].ring_enc[0]; + ring->wptr = 0; + + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), + lower_32_bits(ring->gpu_addr)); + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), + upper_32_bits(ring->gpu_addr)); + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), + ring->ring_size / 4); + + ring = &adev->vcn.inst[i].ring_dec; + ring->wptr = 0; + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_RBC_RB_64BIT_BAR_LOW), + lower_32_bits(ring->gpu_addr)); + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH), + upper_32_bits(ring->gpu_addr)); + + /* force RBC into idle state */ + rb_bufsz = order_base_2(ring->ring_size); + tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp); + + /* add end packet */ + memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end)); + table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4; + init_table += sizeof(struct mmsch_v1_0_cmd_end) / 4; + + /* refine header */ + header->eng[i].table_size = table_size; + header->total_size += table_size; + } + + return vcn_v2_5_mmsch_start(adev, &adev->virt.mm_table); +} + +static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) +{ + int ret_code = 0; + uint32_t tmp; + + /* Wait for power status to be 1 */ + SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 1, + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); + + /* wait for read ptr to be equal to write ptr */ + tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR); + SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); + + tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR2); + SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code); + + tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF; + SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); + + SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 1, + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); + + /* disable dynamic power gating mode */ + WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), 0, + ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); + + return 0; +} + static int vcn_v2_5_stop(struct amdgpu_device *adev) { uint32_t tmp; @@ -749,6 +1299,11 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev) for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) continue; + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { + r = vcn_v2_5_stop_dpg_mode(adev, i); + continue; + } + /* wait for vcn idle */ SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, r); if (r) @@ -804,6 +1359,67 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev) return 0; } +static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev, + int inst_idx, struct dpg_pause_state *new_state) +{ + struct amdgpu_ring *ring; + uint32_t reg_data = 0; + int ret_code; + + /* pause/unpause if state is changed */ + if (adev->vcn.pause_state.fw_based != new_state->fw_based) { + DRM_DEBUG("dpg pause state changed %d -> %d", + adev->vcn.pause_state.fw_based, new_state->fw_based); + reg_data = RREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE) & + (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); + + if (new_state->fw_based == VCN_DPG_STATE__PAUSE) { + ret_code = 0; + SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 0x1, + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); + + if (!ret_code) { + /* pause DPG */ + reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; + WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data); + + /* wait for ACK */ + SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_DPG_PAUSE, + UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, + UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code); + + /* Restore */ + ring = &adev->vcn.inst[inst_idx].ring_enc[0]; + WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr); + WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4); + WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); + + ring = &adev->vcn.inst[inst_idx].ring_enc[1]; + WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr); + WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4); + WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); + WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); + + WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR, + RREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2) & 0x7FFFFFFF); + + SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, + 0x0, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); + } + } else { + /* unpause dpg, no need to wait */ + reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; + WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data); + } + adev->vcn.pause_state.fw_based = new_state->fw_based; + } + + return 0; +} + /** * vcn_v2_5_dec_ring_get_rptr - get read pointer * @@ -846,6 +1462,10 @@ static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) + WREG32_SOC15(UVD, ring->me, mmUVD_SCRATCH2, + lower_32_bits(ring->wptr) | 0x80000000); + if (ring->use_doorbell) { adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); @@ -871,7 +1491,7 @@ static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = { .emit_ib = vcn_v2_0_dec_ring_emit_ib, .emit_fence = vcn_v2_0_dec_ring_emit_fence, .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush, - .test_ring = amdgpu_vcn_dec_ring_test_ring, + .test_ring = vcn_v2_0_dec_ring_test_ring, .test_ib = amdgpu_vcn_dec_ring_test_ib, .insert_nop = vcn_v2_0_dec_ring_insert_nop, .insert_start = vcn_v2_0_dec_ring_insert_start, @@ -1046,7 +1666,10 @@ static int vcn_v2_5_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = (state == AMD_CG_STATE_GATE); + + if (amdgpu_sriov_vf(adev)) + return 0; if (enable) { if (vcn_v2_5_is_idle(handle)) @@ -1065,6 +1688,9 @@ static int vcn_v2_5_set_powergating_state(void *handle, struct amdgpu_device *adev = (struct amdgpu_device *)handle; int ret; + if (amdgpu_sriov_vf(adev)) + return 0; + if(state == adev->vcn.cur_state) return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index 5cb7e231de5f0e10e6095aae47f7c146e49b5cbd..407c6093c2ec04244823ec78a62060a8e7151f85 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -234,16 +234,9 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev) WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, (ih->gpu_addr >> 40) & 0xff); ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL); - ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN); ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl); - if (adev->irq.ih.use_bus_addr) { - ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1); - } else { - ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN, MC_SPACE_FBPA_ENABLE, 1); - } ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled); - if (amdgpu_sriov_vf(adev)) { if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) { DRM_ERROR("PSP program IH_RB_CNTL failed!\n"); @@ -253,10 +246,19 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev) WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl); } - if ((adev->asic_type == CHIP_ARCTURUS - && adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) - || adev->asic_type == CHIP_RENOIR) + if ((adev->asic_type == CHIP_ARCTURUS && + adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) || + adev->asic_type == CHIP_RENOIR) { + ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN); + if (adev->irq.ih.use_bus_addr) { + ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN, + MC_SPACE_GPA_ENABLE, 1); + } else { + ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN, + MC_SPACE_FBPA_ENABLE, 1); + } WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken); + } /* set the writeback address whether it's enabled or not */ WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO, @@ -715,7 +717,7 @@ static int vega10_ih_set_clockgating_state(void *handle, struct amdgpu_device *adev = (struct amdgpu_device *)handle; vega10_ih_update_clockgating_state(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index e4f4201b3c347091e2715c1eb378d279b59eca96..78b35901643bcf8707cdaa1a7a60e4cbfbeb5771 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -689,40 +689,6 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev) return -EINVAL; } -int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap) -{ - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - - if (!pp_funcs || !pp_funcs->get_asic_baco_capability) { - *cap = false; - return -ENOENT; - } - - return pp_funcs->get_asic_baco_capability(pp_handle, cap); -} - -int smu7_asic_baco_reset(struct amdgpu_device *adev) -{ - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - - if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state) - return -ENOENT; - - /* enter BACO state */ - if (pp_funcs->set_asic_baco_state(pp_handle, 1)) - return -EIO; - - /* exit BACO state */ - if (pp_funcs->set_asic_baco_state(pp_handle, 0)) - return -EIO; - - dev_info(adev->dev, "GPU BACO reset\n"); - - return 0; -} - /** * vi_asic_pci_config_reset - soft reset GPU * @@ -747,8 +713,6 @@ static int vi_asic_pci_config_reset(struct amdgpu_device *adev) static bool vi_asic_supports_baco(struct amdgpu_device *adev) { - bool baco_support; - switch (adev->asic_type) { case CHIP_FIJI: case CHIP_TONGA: @@ -756,14 +720,10 @@ static bool vi_asic_supports_baco(struct amdgpu_device *adev) case CHIP_POLARIS11: case CHIP_POLARIS12: case CHIP_TOPAZ: - smu7_asic_get_baco_capability(adev, &baco_support); - break; + return amdgpu_dpm_is_baco_supported(adev); default: - baco_support = false; - break; + return false; } - - return baco_support; } static enum amd_reset_method @@ -778,7 +738,7 @@ vi_asic_reset_method(struct amdgpu_device *adev) case CHIP_POLARIS11: case CHIP_POLARIS12: case CHIP_TOPAZ: - smu7_asic_get_baco_capability(adev, &baco_reset); + baco_reset = amdgpu_dpm_is_baco_supported(adev); break; default: baco_reset = false; @@ -807,7 +767,7 @@ static int vi_asic_reset(struct amdgpu_device *adev) if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { if (!adev->in_suspend) amdgpu_inc_vram_lost(adev); - r = smu7_asic_baco_reset(adev); + r = amdgpu_dpm_baco_reset(adev); } else { r = vi_asic_pci_config_reset(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h index 40d4174913a41d12107c626a52e04fa9c07f7bc6..defb4aaf929aad4f381b05de2a2fcd7d7bbba315 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.h +++ b/drivers/gpu/drm/amd/amdgpu/vi.h @@ -31,7 +31,5 @@ void vi_srbm_select(struct amdgpu_device *adev, int vi_set_ip_blocks(struct amdgpu_device *adev); void legacy_doorbell_index_init(struct amdgpu_device *adev); -int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap); -int smu7_asic_baco_reset(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index b6ba0697c53117dfe54decfec2fcfac369bce11f..3f0300e537277d130f1e025274ce58259834196e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -42,6 +42,7 @@ static long kfd_ioctl(struct file *, unsigned int, unsigned long); static int kfd_open(struct inode *, struct file *); +static int kfd_release(struct inode *, struct file *); static int kfd_mmap(struct file *, struct vm_area_struct *); static const char kfd_dev_name[] = "kfd"; @@ -51,6 +52,7 @@ static const struct file_operations kfd_fops = { .unlocked_ioctl = kfd_ioctl, .compat_ioctl = compat_ptr_ioctl, .open = kfd_open, + .release = kfd_release, .mmap = kfd_mmap, }; @@ -124,8 +126,13 @@ static int kfd_open(struct inode *inode, struct file *filep) if (IS_ERR(process)) return PTR_ERR(process); - if (kfd_is_locked()) + if (kfd_is_locked()) { + kfd_unref_process(process); return -EAGAIN; + } + + /* filep now owns the reference returned by kfd_create_process */ + filep->private_data = process; dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n", process->pasid, process->is_32bit_user_mode); @@ -133,6 +140,16 @@ static int kfd_open(struct inode *inode, struct file *filep) return 0; } +static int kfd_release(struct inode *inode, struct file *filep) +{ + struct kfd_process *process = filep->private_data; + + if (process) + kfd_unref_process(process); + + return 0; +} + static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p, void *data) { @@ -1801,9 +1818,14 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) dev_dbg(kfd_device, "ioctl cmd 0x%x (#0x%x), arg 0x%lx\n", cmd, nr, arg); - process = kfd_get_process(current); - if (IS_ERR(process)) { - dev_dbg(kfd_device, "no process\n"); + /* Get the process struct from the filep. Only the process + * that opened /dev/kfd can use the file descriptor. Child + * processes need to create their own KFD device context. + */ + process = filep->private_data; + if (process->lead_thread != current->group_leader) { + dev_dbg(kfd_device, "Using KFD FD in wrong process\n"); + retcode = -EBADF; goto err_i1; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c index 15c523027285cefb519f70085552009724abc5ad..511712c2e382dd3cb5fc95445d8a848190af6fcd 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c @@ -93,7 +93,7 @@ void kfd_debugfs_init(void) kfd_debugfs_hqds_by_device, &kfd_debugfs_fops); debugfs_create_file("rls", S_IFREG | 0444, debugfs_root, kfd_debugfs_rls_by_device, &kfd_debugfs_fops); - debugfs_create_file("hang_hws", S_IFREG | 0644, debugfs_root, + debugfs_create_file("hang_hws", S_IFREG | 0200, debugfs_root, NULL, &kfd_debugfs_hang_hws_fops); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 209bfc849352b9f20abb06e9d1930c1257f69914..2a9e4013173534eaa680739232347c2f6c12125b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -728,6 +728,9 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd) { if (!kfd->init_complete) return 0; + + kfd->dqm->ops.pre_reset(kfd->dqm); + kgd2kfd_suspend(kfd); kfd_signal_reset_event(kfd); @@ -822,6 +825,21 @@ dqm_start_error: return err; } +static inline void kfd_queue_work(struct workqueue_struct *wq, + struct work_struct *work) +{ + int cpu, new_cpu; + + cpu = new_cpu = smp_processor_id(); + do { + new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids; + if (cpu_to_node(new_cpu) == numa_node_id()) + break; + } while (cpu != new_cpu); + + queue_work_on(new_cpu, wq, work); +} + /* This is called directly from KGD at ISR. */ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) { @@ -844,7 +862,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) patched_ihre, &is_patched) && enqueue_ih_ring_entry(kfd, is_patched ? patched_ihre : ih_ring_entry)) - queue_work(kfd->ih_wq, &kfd->interrupt_work); + kfd_queue_work(kfd->ih_wq, &kfd->interrupt_work); spin_unlock_irqrestore(&kfd->interrupt_lock, flags); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index f7f6df40875e5b430df7fe9e581d6d9776f60490..80d22bf702e814c72a91949d7e94f5a11510fe5c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -930,11 +930,11 @@ static void uninitialize(struct device_queue_manager *dqm) for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++) kfree(dqm->mqd_mgrs[i]); mutex_destroy(&dqm->lock_hidden); - kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem); } static int start_nocpsch(struct device_queue_manager *dqm) { + pr_info("SW scheduler is used"); init_interrupts(dqm); if (dqm->dev->device_info->asic_family == CHIP_HAWAII) @@ -947,12 +947,19 @@ static int start_nocpsch(struct device_queue_manager *dqm) static int stop_nocpsch(struct device_queue_manager *dqm) { if (dqm->dev->device_info->asic_family == CHIP_HAWAII) - pm_uninit(&dqm->packets); + pm_uninit(&dqm->packets, false); dqm->sched_running = false; return 0; } +static void pre_reset(struct device_queue_manager *dqm) +{ + dqm_lock(dqm); + dqm->is_resetting = true; + dqm_unlock(dqm); +} + static int allocate_sdma_queue(struct device_queue_manager *dqm, struct queue *q) { @@ -1100,6 +1107,7 @@ static int start_cpsch(struct device_queue_manager *dqm) dqm_lock(dqm); /* clear hang status when driver try to start the hw scheduler */ dqm->is_hws_hang = false; + dqm->is_resetting = false; dqm->sched_running = true; execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); dqm_unlock(dqm); @@ -1107,20 +1115,24 @@ static int start_cpsch(struct device_queue_manager *dqm) return 0; fail_allocate_vidmem: fail_set_sched_resources: - pm_uninit(&dqm->packets); + pm_uninit(&dqm->packets, false); fail_packet_manager_init: return retval; } static int stop_cpsch(struct device_queue_manager *dqm) { + bool hanging; + dqm_lock(dqm); - unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); + if (!dqm->is_hws_hang) + unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); + hanging = dqm->is_hws_hang || dqm->is_resetting; dqm->sched_running = false; dqm_unlock(dqm); kfd_gtt_sa_free(dqm->dev, dqm->fence_mem); - pm_uninit(&dqm->packets); + pm_uninit(&dqm->packets, hanging); return 0; } @@ -1225,16 +1237,18 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, list_add(&q->list, &qpd->queues_list); qpd->queue_count++; + + if (q->properties.type == KFD_QUEUE_TYPE_SDMA) + dqm->sdma_queue_count++; + else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) + dqm->xgmi_sdma_queue_count++; + if (q->properties.is_active) { dqm->queue_count++; retval = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); } - if (q->properties.type == KFD_QUEUE_TYPE_SDMA) - dqm->sdma_queue_count++; - else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) - dqm->xgmi_sdma_queue_count++; /* * Unconditionally increment this counter, regardless of the queue's * type or whether the queue is active. @@ -1352,8 +1366,17 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, /* should be timed out */ retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED, queue_preemption_timeout_ms); - if (retval) + if (retval) { + pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n"); + dqm->is_hws_hang = true; + /* It's possible we're detecting a HWS hang in the + * middle of a GPU reset. No need to schedule another + * reset in this case. + */ + if (!dqm->is_resetting) + schedule_work(&dqm->hw_exception_work); return retval; + } pm_release_ib(&dqm->packets); dqm->active_runlist = false; @@ -1371,12 +1394,8 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm, if (dqm->is_hws_hang) return -EIO; retval = unmap_queues_cpsch(dqm, filter, filter_param); - if (retval) { - pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n"); - dqm->is_hws_hang = true; - schedule_work(&dqm->hw_exception_work); + if (retval) return retval; - } return map_queues_cpsch(dqm); } @@ -1770,6 +1789,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) dqm->ops.initialize = initialize_cpsch; dqm->ops.start = start_cpsch; dqm->ops.stop = stop_cpsch; + dqm->ops.pre_reset = pre_reset; dqm->ops.destroy_queue = destroy_queue_cpsch; dqm->ops.update_queue = update_queue; dqm->ops.register_process = register_process; @@ -1788,6 +1808,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) /* initialize dqm for no cp scheduling */ dqm->ops.start = start_nocpsch; dqm->ops.stop = stop_nocpsch; + dqm->ops.pre_reset = pre_reset; dqm->ops.create_queue = create_queue_nocpsch; dqm->ops.destroy_queue = destroy_queue_nocpsch; dqm->ops.update_queue = update_queue; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index a8c37e6da02785553839c9b9963fccf99ffa4c62..871d3b628d2dd85b6f6a227b5b1c51d6cad06c53 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -104,6 +104,7 @@ struct device_queue_manager_ops { int (*initialize)(struct device_queue_manager *dqm); int (*start)(struct device_queue_manager *dqm); int (*stop)(struct device_queue_manager *dqm); + void (*pre_reset)(struct device_queue_manager *dqm); void (*uninitialize)(struct device_queue_manager *dqm); int (*create_kernel_queue)(struct device_queue_manager *dqm, struct kernel_queue *kq, @@ -190,7 +191,6 @@ struct device_queue_manager { /* the pasid mapping for each kfd vmid */ uint16_t vmid_pasid[VMID_NUM]; uint64_t pipelines_addr; - struct kfd_mem_obj *pipeline_mem; uint64_t fence_gpu_addr; unsigned int *fence_addr; struct kfd_mem_obj *fence_mem; @@ -199,6 +199,7 @@ struct device_queue_manager { /* hw exception */ bool is_hws_hang; + bool is_resetting; struct work_struct hw_exception_work; struct kfd_mem_obj hiq_sdma_mqd; bool sched_running; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index 2d56dc534459670298f300010e1679e59682cea6..bae706462f962790be6ef3d463605c9f08a01ee6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c @@ -195,9 +195,9 @@ err_get_kernel_doorbell: } /* Uninitialize a kernel queue and free all its memory usages. */ -static void kq_uninitialize(struct kernel_queue *kq) +static void kq_uninitialize(struct kernel_queue *kq, bool hanging) { - if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ) + if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ && !hanging) kq->mqd_mgr->destroy_mqd(kq->mqd_mgr, kq->queue->mqd, KFD_PREEMPT_TYPE_WAVEFRONT_RESET, @@ -337,9 +337,9 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, return NULL; } -void kernel_queue_uninit(struct kernel_queue *kq) +void kernel_queue_uninit(struct kernel_queue *kq, bool hanging) { - kq_uninitialize(kq); + kq_uninitialize(kq, hanging); kfree(kq); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index 7832ec6e480b8660afe83ae7e146a2399c054d20..d1d68a51bfb80420ba30e6a1dd3666cb4cd1ae01 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -153,6 +153,14 @@ static int load_mqd(struct mqd_manager *mm, void *mqd, return r; } +static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd, + uint32_t pipe_id, uint32_t queue_id, + struct queue_properties *p, struct mm_struct *mms) +{ + return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->kgd, mqd, pipe_id, + queue_id, p->doorbell_off); +} + static void update_mqd(struct mqd_manager *mm, void *mqd, struct queue_properties *q) { @@ -409,7 +417,7 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, mqd->allocate_mqd = allocate_hiq_mqd; mqd->init_mqd = init_mqd_hiq; mqd->free_mqd = free_mqd_hiq_sdma; - mqd->load_mqd = load_mqd; + mqd->load_mqd = hiq_load_mqd_kiq; mqd->update_mqd = update_mqd; mqd->destroy_mqd = destroy_mqd; mqd->is_occupied = is_occupied; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index aa9010995eaf228469e43fafd4d4683c72bf3e4b..436b7f518979185392b264c374b9f0c950f973e1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -191,6 +191,14 @@ static int load_mqd(struct mqd_manager *mm, void *mqd, wptr_shift, 0, mms); } +static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd, + uint32_t pipe_id, uint32_t queue_id, + struct queue_properties *p, struct mm_struct *mms) +{ + return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->kgd, mqd, pipe_id, + queue_id, p->doorbell_off); +} + static void update_mqd(struct mqd_manager *mm, void *mqd, struct queue_properties *q) { @@ -449,7 +457,7 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, mqd->allocate_mqd = allocate_hiq_mqd; mqd->init_mqd = init_mqd_hiq; mqd->free_mqd = free_mqd_hiq_sdma; - mqd->load_mqd = load_mqd; + mqd->load_mqd = hiq_load_mqd_kiq; mqd->update_mqd = update_mqd; mqd->destroy_mqd = destroy_mqd; mqd->is_occupied = is_occupied; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c index 6cabed06ef5d75d169150247d805b4ac98e81dfa..dc406e6dee234bae33dc4d202d317b05d2db7ca9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c @@ -264,10 +264,10 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm) return 0; } -void pm_uninit(struct packet_manager *pm) +void pm_uninit(struct packet_manager *pm, bool hanging) { mutex_destroy(&pm->lock); - kernel_queue_uninit(pm->priv_queue); + kernel_queue_uninit(pm->priv_queue, hanging); } int pm_send_set_resources(struct packet_manager *pm, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index fc61b5ec068ed0066311a905d5eb88b097b2924c..6af1b5881f435e811073e996c1444d1b1c805d32 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -883,7 +883,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev); void device_queue_manager_uninit(struct device_queue_manager *dqm); struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, enum kfd_queue_type type); -void kernel_queue_uninit(struct kernel_queue *kq); +void kernel_queue_uninit(struct kernel_queue *kq, bool hanging); int kfd_process_vm_fault(struct device_queue_manager *dqm, unsigned int pasid); /* Process Queue Manager */ @@ -972,7 +972,7 @@ extern const struct packet_manager_funcs kfd_vi_pm_funcs; extern const struct packet_manager_funcs kfd_v9_pm_funcs; int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm); -void pm_uninit(struct packet_manager *pm); +void pm_uninit(struct packet_manager *pm, bool hanging); int pm_send_set_resources(struct packet_manager *pm, struct scheduling_resources *res); int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 8276601a122fc727296f2014be60d635af4e9e50..25b90f70aecd096b959cf87892686ff6ad95c70a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -32,6 +32,7 @@ #include #include #include "amdgpu_amdkfd.h" +#include "amdgpu.h" struct mm_struct; @@ -324,6 +325,8 @@ struct kfd_process *kfd_create_process(struct file *filep) (int)process->lead_thread->pid); } out: + if (!IS_ERR(process)) + kref_get(&process->ref); mutex_unlock(&kfd_processes_mutex); return process; @@ -1150,16 +1153,17 @@ int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process, void kfd_flush_tlb(struct kfd_process_device *pdd) { struct kfd_dev *dev = pdd->dev; - const struct kfd2kgd_calls *f2g = dev->kfd2kgd; if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { /* Nothing to flush until a VMID is assigned, which * only happens when the first queue is created. */ if (pdd->qpd.vmid) - f2g->invalidate_tlbs_vmid(dev->kgd, pdd->qpd.vmid); + amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->kgd, + pdd->qpd.vmid); } else { - f2g->invalidate_tlbs(dev->kgd, pdd->process->pasid); + amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->kgd, + pdd->process->pasid); } } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 1152490bbf532c4bf144b3ecea35e60bfe166e03..31fcd1b51f00fa6d4457e3249bd48fd345d424cd 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -374,7 +374,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid) /* destroy kernel queue (DIQ) */ dqm = pqn->kq->dev->dqm; dqm->ops.destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd); - kernel_queue_uninit(pqn->kq); + kernel_queue_uninit(pqn->kq, false); } if (pqn->q) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 69bd0628fdc607677890afd11e4b00c74ed0e113..203c823d65f158bc6a211895273b6f33263feb49 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -486,6 +486,10 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, dev->node_props.num_sdma_engines); sysfs_show_32bit_prop(buffer, "num_sdma_xgmi_engines", dev->node_props.num_sdma_xgmi_engines); + sysfs_show_32bit_prop(buffer, "num_sdma_queues_per_engine", + dev->node_props.num_sdma_queues_per_engine); + sysfs_show_32bit_prop(buffer, "num_cp_queues", + dev->node_props.num_cp_queues); if (dev->gpu) { log_max_watch_addr = @@ -1309,9 +1313,12 @@ int kfd_topology_add_device(struct kfd_dev *gpu) dev->node_props.num_sdma_engines = gpu->device_info->num_sdma_engines; dev->node_props.num_sdma_xgmi_engines = gpu->device_info->num_xgmi_sdma_engines; + dev->node_props.num_sdma_queues_per_engine = + gpu->device_info->num_sdma_queues_per_engine; dev->node_props.num_gws = (hws_gws_support && dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ? amdgpu_amdkfd_get_num_gws(dev->gpu->kgd) : 0; + dev->node_props.num_cp_queues = get_queues_num(dev->gpu->dqm); kfd_fill_mem_clk_max_info(dev); kfd_fill_iolink_non_crat_info(dev); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h index 15843e0fc7564246be673b04d90ccdde4dbb929e..74e9b1682af8a9cc4de04a30450f5aa761168bd6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h @@ -81,6 +81,8 @@ struct kfd_node_properties { int32_t drm_render_minor; uint32_t num_sdma_engines; uint32_t num_sdma_xgmi_engines; + uint32_t num_sdma_queues_per_engine; + uint32_t num_cp_queues; char name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE]; }; diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index 096db863c345a3cdc13679b91e4238661669867d..87858bc57e640b33c7ab83021685a718ac06fe96 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -6,7 +6,7 @@ config DRM_AMD_DC bool "AMD DC - Enable new display engine" default y select SND_HDA_COMPONENT if SND_HDA_CORE - select DRM_AMD_DC_DCN if X86 && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS) + select DRM_AMD_DC_DCN if (X86 || PPC64) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS) help Choose this option if you want to use the new display engine support for AMDGPU. This adds required support for Vega and diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index f2db400a39204c3f257392ed3009951679a98a9d..279541517a99a3a733722f72c9fff84fd8d13b2b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -98,6 +98,12 @@ MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB); #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin" MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); +/* Number of bytes in PSP header for firmware. */ +#define PSP_HEADER_BYTES 0x100 + +/* Number of bytes in PSP footer for firmware. */ +#define PSP_FOOTER_BYTES 0x100 + /** * DOC: overview * @@ -741,28 +747,27 @@ void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin) static int dm_dmub_hw_init(struct amdgpu_device *adev) { - const unsigned int psp_header_bytes = 0x100; - const unsigned int psp_footer_bytes = 0x100; const struct dmcub_firmware_header_v1_0 *hdr; struct dmub_srv *dmub_srv = adev->dm.dmub_srv; + struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info; const struct firmware *dmub_fw = adev->dm.dmub_fw; struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu; struct abm *abm = adev->dm.dc->res_pool->abm; - struct dmub_srv_region_params region_params; - struct dmub_srv_region_info region_info; - struct dmub_srv_fb_params fb_params; - struct dmub_srv_fb_info fb_info; struct dmub_srv_hw_params hw_params; enum dmub_status status; const unsigned char *fw_inst_const, *fw_bss_data; - uint32_t i; - int r; + uint32_t i, fw_inst_const_size, fw_bss_data_size; bool has_hw_support; if (!dmub_srv) /* DMUB isn't supported on the ASIC. */ return 0; + if (!fb_info) { + DRM_ERROR("No framebuffer info for DMUB service.\n"); + return -EINVAL; + } + if (!dmub_fw) { /* Firmware required for DMUB support. */ DRM_ERROR("No firmware provided for DMUB.\n"); @@ -782,60 +787,36 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data; - /* Calculate the size of all the regions for the DMUB service. */ - memset(®ion_params, 0, sizeof(region_params)); - - region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - - psp_header_bytes - psp_footer_bytes; - region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes); - region_params.vbios_size = adev->dm.dc->ctx->dc_bios->bios_size; - - status = dmub_srv_calc_region_info(dmub_srv, ®ion_params, - ®ion_info); - - if (status != DMUB_STATUS_OK) { - DRM_ERROR("Error calculating DMUB region info: %d\n", status); - return -EINVAL; - } - - /* - * Allocate a framebuffer based on the total size of all the regions. - * TODO: Move this into GART. - */ - r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo, - &adev->dm.dmub_bo_gpu_addr, - &adev->dm.dmub_bo_cpu_addr); - if (r) - return r; - - /* Rebase the regions on the framebuffer address. */ - memset(&fb_params, 0, sizeof(fb_params)); - fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr; - fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr; - fb_params.region_info = ®ion_info; - - status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, &fb_info); - if (status != DMUB_STATUS_OK) { - DRM_ERROR("Error calculating DMUB FB info: %d\n", status); - return -EINVAL; - } - fw_inst_const = dmub_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes) + - psp_header_bytes; + PSP_HEADER_BYTES; fw_bss_data = dmub_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes) + le32_to_cpu(hdr->inst_const_bytes); /* Copy firmware and bios info into FB memory. */ - memcpy(fb_info.fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const, - region_params.inst_const_size); - memcpy(fb_info.fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data, - region_params.bss_data_size); - memcpy(fb_info.fb[DMUB_WINDOW_3_VBIOS].cpu_addr, - adev->dm.dc->ctx->dc_bios->bios, region_params.vbios_size); + fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - + PSP_HEADER_BYTES - PSP_FOOTER_BYTES; + + fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes); + + memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const, + fw_inst_const_size); + memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data, + fw_bss_data_size); + memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios, + adev->bios_size); + + /* Reset regions that need to be reset. */ + memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0, + fb_info->fb[DMUB_WINDOW_4_MAILBOX].size); + + memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0, + fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size); + + memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0, + fb_info->fb[DMUB_WINDOW_6_FW_STATE].size); /* Initialize hardware. */ memset(&hw_params, 0, sizeof(hw_params)); @@ -845,8 +826,8 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) if (dmcu) hw_params.psp_version = dmcu->psp_version; - for (i = 0; i < fb_info.num_fb; ++i) - hw_params.fb[i] = &fb_info.fb[i]; + for (i = 0; i < fb_info->num_fb; ++i) + hw_params.fb[i] = &fb_info->fb[i]; status = dmub_srv_hw_init(dmub_srv, &hw_params); if (status != DMUB_STATUS_OK) { @@ -925,13 +906,16 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) init_data.dce_environment = DCE_ENV_PRODUCTION_DRV; - /* - * TODO debug why this doesn't work on Raven - */ - if (adev->flags & AMD_IS_APU && - adev->asic_type >= CHIP_CARRIZO && - adev->asic_type < CHIP_RAVEN) + switch (adev->asic_type) { + case CHIP_CARRIZO: + case CHIP_STONEY: + case CHIP_RAVEN: + case CHIP_RENOIR: init_data.flags.gpu_vm_support = true; + break; + default: + break; + } if (amdgpu_dc_feature_mask & DC_FBC_MASK) init_data.flags.fbc_support = true; @@ -956,14 +940,14 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) goto error; } - dc_hardware_init(adev->dm.dc); - r = dm_dmub_hw_init(adev); if (r) { DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); goto error; } + dc_hardware_init(adev->dm.dc); + adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); if (!adev->dm.freesync_module) { DRM_ERROR( @@ -1174,6 +1158,11 @@ static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address, static int dm_dmub_sw_init(struct amdgpu_device *adev) { struct dmub_srv_create_params create_params; + struct dmub_srv_region_params region_params; + struct dmub_srv_region_info region_info; + struct dmub_srv_fb_params fb_params; + struct dmub_srv_fb_info *fb_info; + struct dmub_srv *dmub_srv; const struct dmcub_firmware_header_v1_0 *hdr; const char *fw_name_dmub; enum dmub_asic dmub_asic; @@ -1191,24 +1180,6 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) return 0; } - adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL); - if (!adev->dm.dmub_srv) { - DRM_ERROR("Failed to allocate DMUB service!\n"); - return -ENOMEM; - } - - memset(&create_params, 0, sizeof(create_params)); - create_params.user_ctx = adev; - create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read; - create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write; - create_params.asic = dmub_asic; - - status = dmub_srv_create(adev->dm.dmub_srv, &create_params); - if (status != DMUB_STATUS_OK) { - DRM_ERROR("Error creating DMUB service: %d\n", status); - return -EINVAL; - } - r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev); if (r) { DRM_ERROR("DMUB firmware loading failed: %d\n", r); @@ -1238,6 +1209,80 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n", adev->dm.dmcub_fw_version); + adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL); + dmub_srv = adev->dm.dmub_srv; + + if (!dmub_srv) { + DRM_ERROR("Failed to allocate DMUB service!\n"); + return -ENOMEM; + } + + memset(&create_params, 0, sizeof(create_params)); + create_params.user_ctx = adev; + create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read; + create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write; + create_params.asic = dmub_asic; + + /* Create the DMUB service. */ + status = dmub_srv_create(dmub_srv, &create_params); + if (status != DMUB_STATUS_OK) { + DRM_ERROR("Error creating DMUB service: %d\n", status); + return -EINVAL; + } + + /* Calculate the size of all the regions for the DMUB service. */ + memset(®ion_params, 0, sizeof(region_params)); + + region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - + PSP_HEADER_BYTES - PSP_FOOTER_BYTES; + region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes); + region_params.vbios_size = adev->bios_size; + region_params.fw_bss_data = + adev->dm.dmub_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes) + + le32_to_cpu(hdr->inst_const_bytes); + + status = dmub_srv_calc_region_info(dmub_srv, ®ion_params, + ®ion_info); + + if (status != DMUB_STATUS_OK) { + DRM_ERROR("Error calculating DMUB region info: %d\n", status); + return -EINVAL; + } + + /* + * Allocate a framebuffer based on the total size of all the regions. + * TODO: Move this into GART. + */ + r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo, + &adev->dm.dmub_bo_gpu_addr, + &adev->dm.dmub_bo_cpu_addr); + if (r) + return r; + + /* Rebase the regions on the framebuffer address. */ + memset(&fb_params, 0, sizeof(fb_params)); + fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr; + fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr; + fb_params.region_info = ®ion_info; + + adev->dm.dmub_fb_info = + kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL); + fb_info = adev->dm.dmub_fb_info; + + if (!fb_info) { + DRM_ERROR( + "Failed to allocate framebuffer info for DMUB service!\n"); + return -ENOMEM; + } + + status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info); + if (status != DMUB_STATUS_OK) { + DRM_ERROR("Error calculating DMUB FB info: %d\n", status); + return -EINVAL; + } + return 0; } @@ -1257,6 +1302,9 @@ static int dm_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + kfree(adev->dm.dmub_fb_info); + adev->dm.dmub_fb_info = NULL; + if (adev->dm.dmub_srv) { dmub_srv_destroy(adev->dm.dmub_srv); adev->dm.dmub_srv = NULL; @@ -1559,7 +1607,7 @@ static int dm_resume(void *handle) struct dm_plane_state *dm_new_plane_state; struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state); enum dc_connection_type new_connection_type = dc_connection_none; - int i; + int i, r; /* Recreate dc_state - DC invalidates it when setting power state to S3. */ dc_release_state(dm_state->context); @@ -1567,6 +1615,11 @@ static int dm_resume(void *handle) /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ dc_resource_state_construct(dm->dc, dm_state->context); + /* Before powering on DC we need to re-initialize DMUB. */ + r = dm_dmub_hw_init(adev); + if (r) + DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); + /* power on hardware */ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); @@ -3654,27 +3707,21 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing) return color_space; } -static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out) -{ - if (timing_out->display_color_depth <= COLOR_DEPTH_888) - return; - - timing_out->display_color_depth--; -} - -static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out, - const struct drm_display_info *info) +static bool adjust_colour_depth_from_display_info( + struct dc_crtc_timing *timing_out, + const struct drm_display_info *info) { + enum dc_color_depth depth = timing_out->display_color_depth; int normalized_clk; - if (timing_out->display_color_depth <= COLOR_DEPTH_888) - return; do { normalized_clk = timing_out->pix_clk_100hz / 10; /* YCbCr 4:2:0 requires additional adjustment of 1/2 */ if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420) normalized_clk /= 2; /* Adjusting pix clock following on HDMI spec based on colour depth */ - switch (timing_out->display_color_depth) { + switch (depth) { + case COLOR_DEPTH_888: + break; case COLOR_DEPTH_101010: normalized_clk = (normalized_clk * 30) / 24; break; @@ -3685,14 +3732,15 @@ static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_ normalized_clk = (normalized_clk * 48) / 24; break; default: - return; + /* The above depths are the only ones valid for HDMI. */ + return false; } - if (normalized_clk <= info->max_tmds_clock) - return; - reduce_mode_colour_depth(timing_out); - - } while (timing_out->display_color_depth > COLOR_DEPTH_888); - + if (normalized_clk <= info->max_tmds_clock) { + timing_out->display_color_depth = depth; + return true; + } + } while (--depth > COLOR_DEPTH_666); + return false; } static void fill_stream_properties_from_drm_display_mode( @@ -3773,8 +3821,14 @@ static void fill_stream_properties_from_drm_display_mode( stream->out_transfer_func->type = TF_TYPE_PREDEFINED; stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; - if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) - adjust_colour_depth_from_display_info(timing_out, info); + if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { + if (!adjust_colour_depth_from_display_info(timing_out, info) && + drm_mode_is_420_also(info, mode_in) && + timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) { + timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; + adjust_colour_depth_from_display_info(timing_out, info); + } + } } static void fill_audio_info(struct audio_info *audio_info, @@ -4025,7 +4079,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { #if defined(CONFIG_DRM_AMD_DC_DCN) - dc_dsc_parse_dsc_dpcd(aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, + dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, + aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw, &dsc_caps); #endif @@ -4881,12 +4936,13 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, is_y420); bpp = convert_dc_color_depth_into_bpc(color_depth) * 3; clock = adjusted_mode->clock; - dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp); + dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false); } dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state, mst_mgr, mst_port, - dm_new_connector_state->pbn); + dm_new_connector_state->pbn, + 0); if (dm_new_connector_state->vcpi_slots < 0) { DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots); return dm_new_connector_state->vcpi_slots; @@ -4899,6 +4955,71 @@ const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = { .atomic_check = dm_encoder_helper_atomic_check }; +#if defined(CONFIG_DRM_AMD_DC_DCN) +static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, + struct dc_state *dc_state) +{ + struct dc_stream_state *stream = NULL; + struct drm_connector *connector; + struct drm_connector_state *new_con_state, *old_con_state; + struct amdgpu_dm_connector *aconnector; + struct dm_connector_state *dm_conn_state; + int i, j, clock, bpp; + int vcpi, pbn_div, pbn = 0; + + for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { + + aconnector = to_amdgpu_dm_connector(connector); + + if (!aconnector->port) + continue; + + if (!new_con_state || !new_con_state->crtc) + continue; + + dm_conn_state = to_dm_connector_state(new_con_state); + + for (j = 0; j < dc_state->stream_count; j++) { + stream = dc_state->streams[j]; + if (!stream) + continue; + + if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector) + break; + + stream = NULL; + } + + if (!stream) + continue; + + if (stream->timing.flags.DSC != 1) { + drm_dp_mst_atomic_enable_dsc(state, + aconnector->port, + dm_conn_state->pbn, + 0, + false); + continue; + } + + pbn_div = dm_mst_get_pbn_divider(stream->link); + bpp = stream->timing.dsc_cfg.bits_per_pixel; + clock = stream->timing.pix_clk_100hz / 10; + pbn = drm_dp_calc_pbn_mode(clock, bpp, true); + vcpi = drm_dp_mst_atomic_enable_dsc(state, + aconnector->port, + pbn, pbn_div, + true); + if (vcpi < 0) + return vcpi; + + dm_conn_state->pbn = pbn; + dm_conn_state->vcpi_slots = vcpi; + } + return 0; +} +#endif + static void dm_drm_plane_reset(struct drm_plane *plane) { struct dm_plane_state *amdgpu_state = NULL; @@ -5561,9 +5682,9 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16); - /* This defaults to the max in the range, but we want 8bpc. */ - aconnector->base.state->max_bpc = 8; - aconnector->base.state->max_requested_bpc = 8; + /* This defaults to the max in the range, but we want 8bpc for non-edp. */ + aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8; + aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc; if (connector_type == DRM_MODE_CONNECTOR_eDP && dc_is_dmcu_initialized(adev->dm.dc)) { @@ -7638,24 +7759,27 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm, struct drm_crtc_state *new_crtc_state, *old_crtc_state; struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state; struct dc_stream_status *status = NULL; - - struct dc_surface_update *updates; enum surface_update_type update_type = UPDATE_TYPE_FAST; + struct surface_info_bundle { + struct dc_surface_update surface_updates[MAX_SURFACES]; + struct dc_plane_info plane_infos[MAX_SURFACES]; + struct dc_scaling_info scaling_infos[MAX_SURFACES]; + struct dc_flip_addrs flip_addrs[MAX_SURFACES]; + struct dc_stream_update stream_update; + } *bundle; - updates = kcalloc(MAX_SURFACES, sizeof(*updates), GFP_KERNEL); + bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); - if (!updates) { - DRM_ERROR("Failed to allocate plane updates\n"); + if (!bundle) { + DRM_ERROR("Failed to allocate update bundle\n"); /* Set type to FULL to avoid crashing in DC*/ update_type = UPDATE_TYPE_FULL; goto cleanup; } for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - struct dc_scaling_info scaling_info; - struct dc_stream_update stream_update; - memset(&stream_update, 0, sizeof(stream_update)); + memset(bundle, 0, sizeof(struct surface_info_bundle)); new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); old_dm_crtc_state = to_dm_crtc_state(old_crtc_state); @@ -7672,8 +7796,9 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm, for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) { const struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(new_plane_state->fb); - struct dc_plane_info plane_info; - struct dc_flip_addrs flip_addr; + struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane]; + struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane]; + struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane]; uint64_t tiling_flags; new_plane_crtc = new_plane_state->crtc; @@ -7691,49 +7816,48 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm, if (crtc != new_plane_crtc) continue; - updates[num_plane].surface = new_dm_plane_state->dc_state; + bundle->surface_updates[num_plane].surface = + new_dm_plane_state->dc_state; if (new_crtc_state->mode_changed) { - stream_update.dst = new_dm_crtc_state->stream->dst; - stream_update.src = new_dm_crtc_state->stream->src; + bundle->stream_update.dst = new_dm_crtc_state->stream->dst; + bundle->stream_update.src = new_dm_crtc_state->stream->src; } if (new_crtc_state->color_mgmt_changed) { - updates[num_plane].gamma = + bundle->surface_updates[num_plane].gamma = new_dm_plane_state->dc_state->gamma_correction; - updates[num_plane].in_transfer_func = + bundle->surface_updates[num_plane].in_transfer_func = new_dm_plane_state->dc_state->in_transfer_func; - stream_update.gamut_remap = + bundle->stream_update.gamut_remap = &new_dm_crtc_state->stream->gamut_remap_matrix; - stream_update.output_csc_transform = + bundle->stream_update.output_csc_transform = &new_dm_crtc_state->stream->csc_color_matrix; - stream_update.out_transfer_func = + bundle->stream_update.out_transfer_func = new_dm_crtc_state->stream->out_transfer_func; } ret = fill_dc_scaling_info(new_plane_state, - &scaling_info); + scaling_info); if (ret) goto cleanup; - updates[num_plane].scaling_info = &scaling_info; + bundle->surface_updates[num_plane].scaling_info = scaling_info; if (amdgpu_fb) { ret = get_fb_info(amdgpu_fb, &tiling_flags); if (ret) goto cleanup; - memset(&flip_addr, 0, sizeof(flip_addr)); - ret = fill_dc_plane_info_and_addr( dm->adev, new_plane_state, tiling_flags, - &plane_info, - &flip_addr.address); + plane_info, + &flip_addr->address); if (ret) goto cleanup; - updates[num_plane].plane_info = &plane_info; - updates[num_plane].flip_addr = &flip_addr; + bundle->surface_updates[num_plane].plane_info = plane_info; + bundle->surface_updates[num_plane].flip_addr = flip_addr; } num_plane++; @@ -7754,14 +7878,15 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm, status = dc_stream_get_status_from_state(old_dm_state->context, new_dm_crtc_state->stream); - stream_update.stream = new_dm_crtc_state->stream; + bundle->stream_update.stream = new_dm_crtc_state->stream; /* * TODO: DC modifies the surface during this call so we need * to lock here - find a way to do this without locking. */ mutex_lock(&dm->dc_lock); - update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane, - &stream_update, status); + update_type = dc_check_update_surfaces_for_stream( + dc, bundle->surface_updates, num_plane, + &bundle->stream_update, status); mutex_unlock(&dm->dc_lock); if (update_type > UPDATE_TYPE_MED) { @@ -7771,12 +7896,35 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm, } cleanup: - kfree(updates); + kfree(bundle); *out_type = update_type; return ret; } +static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc) +{ + struct drm_connector *connector; + struct drm_connector_state *conn_state; + struct amdgpu_dm_connector *aconnector = NULL; + int i; + for_each_new_connector_in_state(state, connector, conn_state, i) { + if (conn_state->crtc != crtc) + continue; + + aconnector = to_amdgpu_dm_connector(connector); + if (!aconnector->port || !aconnector->mst_port) + aconnector = NULL; + else + break; + } + + if (!aconnector) + return 0; + + return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr); +} + /** * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM. * @dev: The DRM device @@ -7829,6 +7977,16 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, if (ret) goto fail; + if (adev->asic_type >= CHIP_NAVI10) { + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + if (drm_atomic_crtc_needs_modeset(new_crtc_state)) { + ret = add_affected_mst_dsc_crtcs(state, crtc); + if (ret) + goto fail; + } + } + } + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->color_mgmt_changed && @@ -7932,11 +8090,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, if (ret) goto fail; - /* Perform validation of MST topology in the state*/ - ret = drm_dp_mst_atomic_check(state); - if (ret) - goto fail; - if (state->legacy_cursor_update) { /* * This is a fast cursor update coming from the plane update @@ -8005,6 +8158,25 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, if (ret) goto fail; +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (!compute_mst_dsc_configs_for_state(state, dm_state->context)) + goto fail; + + ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context); + if (ret) + goto fail; +#endif + + /* + * Perform validation of MST topology in the state: + * We need to perform MST atomic check before calling + * dc_validate_global_state(), or there is a chance + * to get stuck in an infinite loop and hang eventually. + */ + ret = drm_dp_mst_atomic_check(state); + if (ret) + goto fail; + if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) { ret = -EINVAL; goto fail; @@ -8231,17 +8403,37 @@ static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream) bool amdgpu_dm_psr_enable(struct dc_stream_state *stream) { struct dc_link *link = stream->link; - struct dc_static_screen_events triggers = {0}; + unsigned int vsync_rate_hz = 0; + struct dc_static_screen_params params = {0}; + /* Calculate number of static frames before generating interrupt to + * enter PSR. + */ + unsigned int frame_time_microsec = 1000000 / vsync_rate_hz; + // Init fail safe of 2 frames static + unsigned int num_frames_static = 2; DRM_DEBUG_DRIVER("Enabling psr...\n"); - triggers.cursor_update = true; - triggers.overlay_update = true; - triggers.surface_update = true; + vsync_rate_hz = div64_u64(div64_u64(( + stream->timing.pix_clk_100hz * 100), + stream->timing.v_total), + stream->timing.h_total); + + /* Round up + * Calculate number of frames such that at least 30 ms of time has + * passed. + */ + if (vsync_rate_hz != 0) + num_frames_static = (30000 / frame_time_microsec) + 1; + + params.triggers.cursor_update = true; + params.triggers.overlay_update = true; + params.triggers.surface_update = true; + params.num_frames = num_frames_static; - dc_stream_set_static_screen_events(link->ctx->dc, + dc_stream_set_static_screen_params(link->ctx->dc, &stream, 1, - &triggers); + ¶ms); return dc_link_set_psr_allow_active(link, true, false); } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index a8fc90a927d64499af9dc38390d1f122178b437c..7ea9acb0358d35df4a50172a8e4bcbff5da6cd1b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -132,6 +132,13 @@ struct amdgpu_display_manager { */ struct dmub_srv *dmub_srv; + /** + * @dmub_fb_info: + * + * Framebuffer regions for the DMUB. + */ + struct dmub_srv_fb_info *dmub_fb_info; + /** * @dmub_fw: * @@ -323,6 +330,7 @@ struct amdgpu_dm_connector { struct drm_dp_mst_port *port; struct amdgpu_dm_connector *mst_port; struct amdgpu_encoder *mst_encoder; + struct drm_dp_aux *dsc_aux; /* TODO see if we can merge with ddc_bus or make a dm_connector */ struct amdgpu_i2c_adapter *i2c; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index ae329335dfcc7aa9ce3e8204e918737fb00c30b5..0acd3409dd6ce3599919391d0c12ffbddbefbf47 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -135,6 +135,20 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work, mutex_unlock(&hdcp_w->mutex); } +static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, + unsigned int link_index, + struct amdgpu_dm_connector *aconnector) +{ + struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; + + mutex_lock(&hdcp_w->mutex); + hdcp_w->aconnector = aconnector; + + mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output); + + process_output(hdcp_w); + mutex_unlock(&hdcp_w->mutex); +} void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index) { struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; @@ -303,6 +317,11 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) memset(link, 0, sizeof(*link)); display->index = aconnector->base.index; + + if (config->dpms_off) { + hdcp_remove_display(hdcp_work, link_index, aconnector); + return; + } display->state = MOD_HDCP_DISPLAY_ACTIVE; if (aconnector->dc_sink != NULL) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 66f266a5e10bf177f4c1bc16a21b20b880a84e7e..318b474ff20ee33f5090c22643a5fd8d697588f0 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -37,6 +37,7 @@ #include "dc.h" #include "amdgpu_dm.h" #include "amdgpu_dm_irq.h" +#include "amdgpu_dm_mst_types.h" #include "dm_helpers.h" @@ -215,7 +216,8 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port); } - ret = drm_dp_update_payload_part1(mst_mgr); + /* It's OK for this to fail */ + drm_dp_update_payload_part1(mst_mgr); /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or * AUX message. The sequence is slot 1-63 allocated sequence for each @@ -224,9 +226,6 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( get_payload_table(aconnector, proposed_table); - if (ret) - return false; - return true; } @@ -284,7 +283,6 @@ bool dm_helpers_dp_mst_send_payload_allocation( struct amdgpu_dm_connector *aconnector; struct drm_dp_mst_topology_mgr *mst_mgr; struct drm_dp_mst_port *mst_port; - int ret; aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; @@ -298,10 +296,8 @@ bool dm_helpers_dp_mst_send_payload_allocation( if (!mst_mgr->mst_state) return false; - ret = drm_dp_update_payload_part2(mst_mgr); - - if (ret) - return false; + /* It's OK for this to fail */ + drm_dp_update_payload_part2(mst_mgr); if (!enable) drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port); @@ -516,8 +512,24 @@ bool dm_helpers_dp_write_dsc_enable( ) { uint8_t enable_dsc = enable ? 1 : 0; + struct amdgpu_dm_connector *aconnector; + + if (!stream) + return false; + + if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; + + if (!aconnector->dsc_aux) + return false; + + return (drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1) >= 0); + } + + if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT) + return dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); - return dm_helpers_dp_write_dpcd(ctx, stream->sink->link, DP_DSC_ENABLE, &enable_dsc, 1); + return false; } bool dm_helpers_is_dp_sink_present(struct dc_link *link) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index 64445c4cc4c2e1d3b708553428dad3c2c2f6932a..cbcf504f73a51a5e5195ab09d359a91e1846efa3 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -111,17 +111,12 @@ static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd, */ static void dm_irq_work_func(struct work_struct *work) { - struct list_head *entry; struct irq_list_head *irq_list_head = container_of(work, struct irq_list_head, work); struct list_head *handler_list = &irq_list_head->head; struct amdgpu_dm_irq_handler_data *handler_data; - list_for_each(entry, handler_list) { - handler_data = list_entry(entry, - struct amdgpu_dm_irq_handler_data, - list); - + list_for_each_entry(handler_data, handler_list, list) { DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n", handler_data->irq_source); @@ -528,19 +523,13 @@ static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev, enum dc_irq_source irq_source) { struct amdgpu_dm_irq_handler_data *handler_data; - struct list_head *entry; unsigned long irq_table_flags; DM_IRQ_TABLE_LOCK(adev, irq_table_flags); - list_for_each( - entry, - &adev->dm.irq_handler_list_high_tab[irq_source]) { - - handler_data = list_entry(entry, - struct amdgpu_dm_irq_handler_data, - list); - + list_for_each_entry(handler_data, + &adev->dm.irq_handler_list_high_tab[irq_source], + list) { /* Call a subcomponent which registered for immediate * interrupt notification */ handler_data->handler(handler_data->handler_arg); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 81367c869134d2c390699bec19c2b0ce019376de..5672f776591965f7f082ed389f4032b2262725cb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -25,6 +25,7 @@ #include #include +#include #include "dm_services.h" #include "amdgpu.h" #include "amdgpu_dm.h" @@ -39,6 +40,12 @@ #if defined(CONFIG_DEBUG_FS) #include "amdgpu_dm_debugfs.h" #endif + + +#if defined(CONFIG_DRM_AMD_DC_DCN) +#include "dc/dcn20/dcn20_resource.h" +#endif + /* #define TRACE_DPCD */ #ifdef TRACE_DPCD @@ -180,6 +187,30 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = { .early_unregister = amdgpu_dm_mst_connector_early_unregister, }; +#if defined(CONFIG_DRM_AMD_DC_DCN) +static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector) +{ + struct dc_sink *dc_sink = aconnector->dc_sink; + struct drm_dp_mst_port *port = aconnector->port; + u8 dsc_caps[16] = { 0 }; + + aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port); + + if (!aconnector->dsc_aux) + return false; + + if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DSC_SUPPORT, dsc_caps, 16) < 0) + return false; + + if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, + dsc_caps, NULL, + &dc_sink->sink_dsc_caps.dsc_dec_caps)) + return false; + + return true; +} +#endif + static int dm_dp_mst_get_modes(struct drm_connector *connector) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); @@ -222,10 +253,16 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) /* dc_link_add_remote_sink returns a new reference */ aconnector->dc_sink = dc_sink; - if (aconnector->dc_sink) + if (aconnector->dc_sink) { amdgpu_dm_update_freesync_caps( connector, aconnector->edid); +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (!validate_dsc_caps_on_connector(aconnector)) + memset(&aconnector->dc_sink->sink_dsc_caps, + 0, sizeof(aconnector->dc_sink->sink_dsc_caps)); +#endif + } } drm_connector_update_edid_property( @@ -466,3 +503,384 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, aconnector->connector_id); } +int dm_mst_get_pbn_divider(struct dc_link *link) +{ + if (!link) + return 0; + + return dc_link_bandwidth_kbps(link, + dc_link_get_link_cap(link)) / (8 * 1000 * 54); +} + +#if defined(CONFIG_DRM_AMD_DC_DCN) + +struct dsc_mst_fairness_params { + struct dc_crtc_timing *timing; + struct dc_sink *sink; + struct dc_dsc_bw_range bw_range; + bool compression_possible; + struct drm_dp_mst_port *port; +}; + +struct dsc_mst_fairness_vars { + int pbn; + bool dsc_enabled; + int bpp_x16; +}; + +static int kbps_to_peak_pbn(int kbps) +{ + u64 peak_kbps = kbps; + + peak_kbps *= 1006; + peak_kbps = div_u64(peak_kbps, 1000); + return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000)); +} + +static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params, + struct dsc_mst_fairness_vars *vars, + int count) +{ + int i; + + for (i = 0; i < count; i++) { + memset(¶ms[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg)); + if (vars[i].dsc_enabled && dc_dsc_compute_config( + params[i].sink->ctx->dc->res_pool->dscs[0], + ¶ms[i].sink->sink_dsc_caps.dsc_dec_caps, + params[i].sink->ctx->dc->debug.dsc_min_slice_height_override, + 0, + params[i].timing, + ¶ms[i].timing->dsc_cfg)) { + params[i].timing->flags.DSC = 1; + params[i].timing->dsc_cfg.bits_per_pixel = vars[i].bpp_x16; + } else { + params[i].timing->flags.DSC = 0; + } + } +} + +static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn) +{ + struct dc_dsc_config dsc_config; + u64 kbps; + + kbps = div_u64((u64)pbn * 994 * 8 * 54, 64); + dc_dsc_compute_config( + param.sink->ctx->dc->res_pool->dscs[0], + ¶m.sink->sink_dsc_caps.dsc_dec_caps, + param.sink->ctx->dc->debug.dsc_min_slice_height_override, + (int) kbps, param.timing, &dsc_config); + + return dsc_config.bits_per_pixel; +} + +static void increase_dsc_bpp(struct drm_atomic_state *state, + struct dc_link *dc_link, + struct dsc_mst_fairness_params *params, + struct dsc_mst_fairness_vars *vars, + int count) +{ + int i; + bool bpp_increased[MAX_PIPES]; + int initial_slack[MAX_PIPES]; + int min_initial_slack; + int next_index; + int remaining_to_increase = 0; + int pbn_per_timeslot; + int link_timeslots_used; + int fair_pbn_alloc; + + for (i = 0; i < count; i++) { + if (vars[i].dsc_enabled) { + initial_slack[i] = kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i].pbn; + bpp_increased[i] = false; + remaining_to_increase += 1; + } else { + initial_slack[i] = 0; + bpp_increased[i] = true; + } + } + + pbn_per_timeslot = dc_link_bandwidth_kbps(dc_link, + dc_link_get_link_cap(dc_link)) / (8 * 1000 * 54); + + while (remaining_to_increase) { + next_index = -1; + min_initial_slack = -1; + for (i = 0; i < count; i++) { + if (!bpp_increased[i]) { + if (min_initial_slack == -1 || min_initial_slack > initial_slack[i]) { + min_initial_slack = initial_slack[i]; + next_index = i; + } + } + } + + if (next_index == -1) + break; + + link_timeslots_used = 0; + + for (i = 0; i < count; i++) + link_timeslots_used += DIV_ROUND_UP(vars[i].pbn, pbn_per_timeslot); + + fair_pbn_alloc = (63 - link_timeslots_used) / remaining_to_increase * pbn_per_timeslot; + + if (initial_slack[next_index] > fair_pbn_alloc) { + vars[next_index].pbn += fair_pbn_alloc; + if (drm_dp_atomic_find_vcpi_slots(state, + params[next_index].port->mgr, + params[next_index].port, + vars[next_index].pbn, + dm_mst_get_pbn_divider(dc_link)) < 0) + return; + if (!drm_dp_mst_atomic_check(state)) { + vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn); + } else { + vars[next_index].pbn -= fair_pbn_alloc; + if (drm_dp_atomic_find_vcpi_slots(state, + params[next_index].port->mgr, + params[next_index].port, + vars[next_index].pbn, + dm_mst_get_pbn_divider(dc_link)) < 0) + return; + } + } else { + vars[next_index].pbn += initial_slack[next_index]; + if (drm_dp_atomic_find_vcpi_slots(state, + params[next_index].port->mgr, + params[next_index].port, + vars[next_index].pbn, + dm_mst_get_pbn_divider(dc_link)) < 0) + return; + if (!drm_dp_mst_atomic_check(state)) { + vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16; + } else { + vars[next_index].pbn -= initial_slack[next_index]; + if (drm_dp_atomic_find_vcpi_slots(state, + params[next_index].port->mgr, + params[next_index].port, + vars[next_index].pbn, + dm_mst_get_pbn_divider(dc_link)) < 0) + return; + } + } + + bpp_increased[next_index] = true; + remaining_to_increase--; + } +} + +static void try_disable_dsc(struct drm_atomic_state *state, + struct dc_link *dc_link, + struct dsc_mst_fairness_params *params, + struct dsc_mst_fairness_vars *vars, + int count) +{ + int i; + bool tried[MAX_PIPES]; + int kbps_increase[MAX_PIPES]; + int max_kbps_increase; + int next_index; + int remaining_to_try = 0; + + for (i = 0; i < count; i++) { + if (vars[i].dsc_enabled && vars[i].bpp_x16 == params[i].bw_range.max_target_bpp_x16) { + kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps; + tried[i] = false; + remaining_to_try += 1; + } else { + kbps_increase[i] = 0; + tried[i] = true; + } + } + + while (remaining_to_try) { + next_index = -1; + max_kbps_increase = -1; + for (i = 0; i < count; i++) { + if (!tried[i]) { + if (max_kbps_increase == -1 || max_kbps_increase < kbps_increase[i]) { + max_kbps_increase = kbps_increase[i]; + next_index = i; + } + } + } + + if (next_index == -1) + break; + + vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps); + if (drm_dp_atomic_find_vcpi_slots(state, + params[next_index].port->mgr, + params[next_index].port, + vars[next_index].pbn, + 0) < 0) + return; + + if (!drm_dp_mst_atomic_check(state)) { + vars[next_index].dsc_enabled = false; + vars[next_index].bpp_x16 = 0; + } else { + vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps); + if (drm_dp_atomic_find_vcpi_slots(state, + params[next_index].port->mgr, + params[next_index].port, + vars[next_index].pbn, + dm_mst_get_pbn_divider(dc_link)) < 0) + return; + } + + tried[next_index] = true; + remaining_to_try--; + } +} + +static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, + struct dc_state *dc_state, + struct dc_link *dc_link) +{ + int i; + struct dc_stream_state *stream; + struct dsc_mst_fairness_params params[MAX_PIPES]; + struct dsc_mst_fairness_vars vars[MAX_PIPES]; + struct amdgpu_dm_connector *aconnector; + int count = 0; + + memset(params, 0, sizeof(params)); + + /* Set up params */ + for (i = 0; i < dc_state->stream_count; i++) { + struct dc_dsc_policy dsc_policy = {0}; + + stream = dc_state->streams[i]; + + if (stream->link != dc_link) + continue; + + stream->timing.flags.DSC = 0; + + params[count].timing = &stream->timing; + params[count].sink = stream->sink; + aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; + params[count].port = aconnector->port; + params[count].compression_possible = stream->sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported; + dc_dsc_get_policy_for_timing(params[count].timing, &dsc_policy); + if (!dc_dsc_compute_bandwidth_range( + stream->sink->ctx->dc->res_pool->dscs[0], + stream->sink->ctx->dc->debug.dsc_min_slice_height_override, + dsc_policy.min_target_bpp, + dsc_policy.max_target_bpp, + &stream->sink->sink_dsc_caps.dsc_dec_caps, + &stream->timing, ¶ms[count].bw_range)) + params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing); + + count++; + } + /* Try no compression */ + for (i = 0; i < count; i++) { + vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); + vars[i].dsc_enabled = false; + vars[i].bpp_x16 = 0; + if (drm_dp_atomic_find_vcpi_slots(state, + params[i].port->mgr, + params[i].port, + vars[i].pbn, + 0) < 0) + return false; + } + if (!drm_dp_mst_atomic_check(state)) { + set_dsc_configs_from_fairness_vars(params, vars, count); + return true; + } + + /* Try max compression */ + for (i = 0; i < count; i++) { + if (params[i].compression_possible) { + vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps); + vars[i].dsc_enabled = true; + vars[i].bpp_x16 = params[i].bw_range.min_target_bpp_x16; + if (drm_dp_atomic_find_vcpi_slots(state, + params[i].port->mgr, + params[i].port, + vars[i].pbn, + dm_mst_get_pbn_divider(dc_link)) < 0) + return false; + } else { + vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); + vars[i].dsc_enabled = false; + vars[i].bpp_x16 = 0; + if (drm_dp_atomic_find_vcpi_slots(state, + params[i].port->mgr, + params[i].port, + vars[i].pbn, + 0) < 0) + return false; + } + } + if (drm_dp_mst_atomic_check(state)) + return false; + + /* Optimize degree of compression */ + increase_dsc_bpp(state, dc_link, params, vars, count); + + try_disable_dsc(state, dc_link, params, vars, count); + + set_dsc_configs_from_fairness_vars(params, vars, count); + + return true; +} + +bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, + struct dc_state *dc_state) +{ + int i, j; + struct dc_stream_state *stream; + bool computed_streams[MAX_PIPES]; + struct amdgpu_dm_connector *aconnector; + + for (i = 0; i < dc_state->stream_count; i++) + computed_streams[i] = false; + + for (i = 0; i < dc_state->stream_count; i++) { + stream = dc_state->streams[i]; + + if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST) + continue; + + aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; + + if (!aconnector || !aconnector->dc_sink) + continue; + + if (!aconnector->dc_sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported) + continue; + + if (computed_streams[i]) + continue; + + mutex_lock(&aconnector->mst_mgr.lock); + if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) { + mutex_unlock(&aconnector->mst_mgr.lock); + return false; + } + mutex_unlock(&aconnector->mst_mgr.lock); + + for (j = 0; j < dc_state->stream_count; j++) { + if (dc_state->streams[j]->link == stream->link) + computed_streams[j] = true; + } + } + + for (i = 0; i < dc_state->stream_count; i++) { + stream = dc_state->streams[i]; + + if (stream->timing.flags.DSC == 1) + dcn20_add_dsc_to_stream_resource(stream->ctx->dc, dc_state, stream); + } + + return true; +} + +#endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h index 2da851b40042aee9b79eb2c666d45c0f5061fee0..d6813ce67bbd36844b5af0ac235b7190e8164a44 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h @@ -29,7 +29,14 @@ struct amdgpu_display_manager; struct amdgpu_dm_connector; +int dm_mst_get_pbn_divider(struct dc_link *link); + void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, struct amdgpu_dm_connector *aconnector); +#if defined(CONFIG_DRM_AMD_DC_DCN) +bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, + struct dc_state *dc_state); +#endif + #endif diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c index 2cb7a4288cb7840d943606254d7800294a52ea8a..629a07a2719b2db7e3c657c0ed07aa669e0b2b28 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c @@ -89,6 +89,10 @@ static enum bp_result encoder_control_digx_v1_5( struct bios_parser *bp, struct bp_encoder_control *cntl); +static enum bp_result encoder_control_fallback( + struct bios_parser *bp, + struct bp_encoder_control *cntl); + static void init_dig_encoder_control(struct bios_parser *bp) { uint32_t version = @@ -100,7 +104,7 @@ static void init_dig_encoder_control(struct bios_parser *bp) break; default: dm_output_to_console("Don't have dig_encoder_control for v%d\n", version); - bp->cmd_tbl.dig_encoder_control = NULL; + bp->cmd_tbl.dig_encoder_control = encoder_control_fallback; break; } } @@ -184,6 +188,18 @@ static enum bp_result encoder_control_digx_v1_5( return result; } +static enum bp_result encoder_control_fallback( + struct bios_parser *bp, + struct bp_encoder_control *cntl) +{ + if (bp->base.ctx->dc->ctx->dmub_srv && + bp->base.ctx->dc->debug.dmub_command_table) { + return encoder_control_digx_v1_5(bp, cntl); + } + + return BP_RESULT_FAILURE; +} + /***************************************************************************** ****************************************************************************** ** @@ -196,6 +212,10 @@ static enum bp_result transmitter_control_v1_6( struct bios_parser *bp, struct bp_transmitter_control *cntl); +static enum bp_result transmitter_control_fallback( + struct bios_parser *bp, + struct bp_transmitter_control *cntl); + static void init_transmitter_control(struct bios_parser *bp) { uint8_t frev; @@ -209,7 +229,7 @@ static void init_transmitter_control(struct bios_parser *bp) break; default: dm_output_to_console("Don't have transmitter_control for v%d\n", crev); - bp->cmd_tbl.transmitter_control = NULL; + bp->cmd_tbl.transmitter_control = transmitter_control_fallback; break; } } @@ -273,6 +293,18 @@ static enum bp_result transmitter_control_v1_6( return result; } +static enum bp_result transmitter_control_fallback( + struct bios_parser *bp, + struct bp_transmitter_control *cntl) +{ + if (bp->base.ctx->dc->ctx->dmub_srv && + bp->base.ctx->dc->debug.dmub_command_table) { + return transmitter_control_v1_6(bp, cntl); + } + + return BP_RESULT_FAILURE; +} + /****************************************************************************** ****************************************************************************** ** @@ -285,6 +317,10 @@ static enum bp_result set_pixel_clock_v7( struct bios_parser *bp, struct bp_pixel_clock_parameters *bp_params); +static enum bp_result set_pixel_clock_fallback( + struct bios_parser *bp, + struct bp_pixel_clock_parameters *bp_params); + static void init_set_pixel_clock(struct bios_parser *bp) { switch (BIOS_CMD_TABLE_PARA_REVISION(setpixelclock)) { @@ -294,7 +330,7 @@ static void init_set_pixel_clock(struct bios_parser *bp) default: dm_output_to_console("Don't have set_pixel_clock for v%d\n", BIOS_CMD_TABLE_PARA_REVISION(setpixelclock)); - bp->cmd_tbl.set_pixel_clock = NULL; + bp->cmd_tbl.set_pixel_clock = set_pixel_clock_fallback; break; } } @@ -400,6 +436,18 @@ static enum bp_result set_pixel_clock_v7( return result; } +static enum bp_result set_pixel_clock_fallback( + struct bios_parser *bp, + struct bp_pixel_clock_parameters *bp_params) +{ + if (bp->base.ctx->dc->ctx->dmub_srv && + bp->base.ctx->dc->debug.dmub_command_table) { + return set_pixel_clock_v7(bp, bp_params); + } + + return BP_RESULT_FAILURE; +} + /****************************************************************************** ****************************************************************************** ** @@ -632,6 +680,11 @@ static enum bp_result enable_disp_power_gating_v2_1( enum controller_id crtc_id, enum bp_pipe_control_action action); +static enum bp_result enable_disp_power_gating_fallback( + struct bios_parser *bp, + enum controller_id crtc_id, + enum bp_pipe_control_action action); + static void init_enable_disp_power_gating( struct bios_parser *bp) { @@ -643,7 +696,7 @@ static void init_enable_disp_power_gating( default: dm_output_to_console("Don't enable_disp_power_gating enable_crtc for v%d\n", BIOS_CMD_TABLE_PARA_REVISION(enabledisppowergating)); - bp->cmd_tbl.enable_disp_power_gating = NULL; + bp->cmd_tbl.enable_disp_power_gating = enable_disp_power_gating_fallback; break; } } @@ -658,6 +711,10 @@ static void enable_disp_power_gating_dmcub( power_gating.header.sub_type = DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING; power_gating.power_gating.pwr = *pwr; + /* ATOM_ENABLE is old API in DMUB */ + if (power_gating.power_gating.pwr.enable == ATOM_ENABLE) + power_gating.power_gating.pwr.enable = ATOM_INIT; + dc_dmub_srv_cmd_queue(dmcub, &power_gating.header); dc_dmub_srv_cmd_execute(dmcub); dc_dmub_srv_wait_idle(dmcub); @@ -695,6 +752,19 @@ static enum bp_result enable_disp_power_gating_v2_1( return result; } +static enum bp_result enable_disp_power_gating_fallback( + struct bios_parser *bp, + enum controller_id crtc_id, + enum bp_pipe_control_action action) +{ + if (bp->base.ctx->dc->ctx->dmub_srv && + bp->base.ctx->dc->debug.dmub_command_table) { + return enable_disp_power_gating_v2_1(bp, crtc_id, action); + } + + return BP_RESULT_FAILURE; +} + /****************************************************************************** ******************************************************************************* ** diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile index d0714a3d63c8dd09ef096120d3e09a2027126d62..4674aca8f20697f2550e91e2d29b06f128b4a7e1 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile +++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile @@ -1,5 +1,6 @@ # # Copyright 2017 Advanced Micro Devices, Inc. +# Copyright 2019 Raptor Engineering, LLC # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), @@ -24,7 +25,13 @@ # It calculates Bandwidth and Watermarks values for HW programming # +ifdef CONFIG_X86 calcs_ccflags := -mhard-float -msse +endif + +ifdef CONFIG_PPC64 +calcs_ccflags := -mhard-float -maltivec +endif ifdef CONFIG_CC_IS_GCC ifeq ($(call cc-ifversion, -lt, 0701, y), y) @@ -32,6 +39,7 @@ IS_OLD_GCC = 1 endif endif +ifdef CONFIG_X86 ifdef IS_OLD_GCC # Stack alignment mismatch, proceed with caution. # GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 @@ -40,6 +48,7 @@ calcs_ccflags += -mpreferred-stack-boundary=4 else calcs_ccflags += -msse2 endif +endif CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calcs.o := $(calcs_ccflags) CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calc_auto.o := $(calcs_ccflags) diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c index a1d49256fab733d85f52d8697aa08540f64a485a..5d081c42e81b0112fa3c3790464882a236d86124 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c @@ -154,14 +154,14 @@ static void calculate_bandwidth( - if (data->d0_underlay_mode == bw_def_none) { d0_underlay_enable = 0; } - else { - d0_underlay_enable = 1; - } - if (data->d1_underlay_mode == bw_def_none) { d1_underlay_enable = 0; } - else { - d1_underlay_enable = 1; - } + if (data->d0_underlay_mode == bw_def_none) + d0_underlay_enable = false; + else + d0_underlay_enable = true; + if (data->d1_underlay_mode == bw_def_none) + d1_underlay_enable = false; + else + d1_underlay_enable = true; data->number_of_underlay_surfaces = d0_underlay_enable + d1_underlay_enable; switch (data->underlay_surface_type) { case bw_def_420: @@ -286,8 +286,8 @@ static void calculate_bandwidth( data->cursor_width_pixels[2] = bw_int_to_fixed(0); data->cursor_width_pixels[3] = bw_int_to_fixed(0); /* graphics surface parameters from spreadsheet*/ - fbc_enabled = 0; - lpt_enabled = 0; + fbc_enabled = false; + lpt_enabled = false; for (i = 4; i <= maximum_number_of_surfaces - 3; i++) { if (i < data->number_of_displays + 4) { if (i == 4 && data->d0_underlay_mode == bw_def_underlay_only) { @@ -338,9 +338,9 @@ static void calculate_bandwidth( data->access_one_channel_only[i] = 0; } if (data->fbc_en[i] == 1) { - fbc_enabled = 1; + fbc_enabled = true; if (data->lpt_en[i] == 1) { - lpt_enabled = 1; + lpt_enabled = true; } } data->cursor_width_pixels[i] = bw_int_to_fixed(vbios->cursor_width); diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c index a4ddd657598f5b78c0be7a3a728878f406f8774d..1a37550731de2dc393e6600909137aa69fe70054 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c @@ -1,5 +1,6 @@ /* * Copyright 2017 Advanced Micro Devices, Inc. + * Copyright 2019 Raptor Engineering, LLC * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -622,7 +623,7 @@ static bool dcn_bw_apply_registry_override(struct dc *dc) { bool updated = false; - kernel_fpu_begin(); + DC_FP_START(); if ((int)(dc->dcn_soc->sr_exit_time * 1000) != dc->debug.sr_exit_time_ns && dc->debug.sr_exit_time_ns) { updated = true; @@ -658,7 +659,7 @@ static bool dcn_bw_apply_registry_override(struct dc *dc) dc->dcn_soc->dram_clock_change_latency = dc->debug.dram_clock_change_latency_ns / 1000.0; } - kernel_fpu_end(); + DC_FP_END(); return updated; } @@ -704,8 +705,8 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v, unsigned int get_highest_allowed_voltage_level(uint32_t hw_internal_rev) { - /* for dali, the highest voltage level we want is 0 */ - if (ASICREV_IS_DALI(hw_internal_rev)) + /* for dali & pollock, the highest voltage level we want is 0 */ + if (ASICREV_IS_POLLOCK(hw_internal_rev) || ASICREV_IS_DALI(hw_internal_rev)) return 0; /* we are ok with all levels */ @@ -738,7 +739,7 @@ bool dcn_validate_bandwidth( dcn_bw_sync_calcs_and_dml(dc); memset(v, 0, sizeof(*v)); - kernel_fpu_begin(); + DC_FP_START(); v->sr_exit_time = dc->dcn_soc->sr_exit_time; v->sr_enter_plus_exit_time = dc->dcn_soc->sr_enter_plus_exit_time; @@ -1271,7 +1272,7 @@ bool dcn_validate_bandwidth( bw_limit = dc->dcn_soc->percent_disp_bw_limit * v->fabric_and_dram_bandwidth_vmax0p9; bw_limit_pass = (v->total_data_read_bandwidth / 1000.0) < bw_limit; - kernel_fpu_end(); + DC_FP_END(); PERFORMANCE_TRACE_END(); BW_VAL_TRACE_FINISH(); @@ -1434,37 +1435,49 @@ void dcn_bw_update_from_pplib(struct dc *dc) struct dc_context *ctx = dc->ctx; struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0}; bool res; + unsigned vmin0p65_idx, vmid0p72_idx, vnom0p8_idx, vmax0p9_idx; /* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */ res = dm_pp_get_clock_levels_by_type_with_voltage( ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks); - kernel_fpu_begin(); + DC_FP_START(); if (res) res = verify_clock_values(&fclks); if (res) { - ASSERT(fclks.num_levels >= 3); - dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 32 * (fclks.data[0].clocks_in_khz / 1000.0) / 1000.0; - dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = dc->dcn_soc->number_of_channels * - (fclks.data[fclks.num_levels - (fclks.num_levels > 2 ? 3 : 2)].clocks_in_khz / 1000.0) - * ddr4_dram_factor_single_Channel / 1000.0; - dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = dc->dcn_soc->number_of_channels * - (fclks.data[fclks.num_levels - 2].clocks_in_khz / 1000.0) - * ddr4_dram_factor_single_Channel / 1000.0; - dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = dc->dcn_soc->number_of_channels * - (fclks.data[fclks.num_levels - 1].clocks_in_khz / 1000.0) - * ddr4_dram_factor_single_Channel / 1000.0; + ASSERT(fclks.num_levels); + + vmin0p65_idx = 0; + vmid0p72_idx = fclks.num_levels - + (fclks.num_levels > 2 ? 3 : (fclks.num_levels > 1 ? 2 : 1)); + vnom0p8_idx = fclks.num_levels - (fclks.num_levels > 1 ? 2 : 1); + vmax0p9_idx = fclks.num_levels - 1; + + dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = + 32 * (fclks.data[vmin0p65_idx].clocks_in_khz / 1000.0) / 1000.0; + dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = + dc->dcn_soc->number_of_channels * + (fclks.data[vmid0p72_idx].clocks_in_khz / 1000.0) + * ddr4_dram_factor_single_Channel / 1000.0; + dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = + dc->dcn_soc->number_of_channels * + (fclks.data[vnom0p8_idx].clocks_in_khz / 1000.0) + * ddr4_dram_factor_single_Channel / 1000.0; + dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = + dc->dcn_soc->number_of_channels * + (fclks.data[vmax0p9_idx].clocks_in_khz / 1000.0) + * ddr4_dram_factor_single_Channel / 1000.0; } else BREAK_TO_DEBUGGER(); - kernel_fpu_end(); + DC_FP_END(); res = dm_pp_get_clock_levels_by_type_with_voltage( ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks); - kernel_fpu_begin(); + DC_FP_START(); if (res) res = verify_clock_values(&dcfclks); @@ -1477,7 +1490,7 @@ void dcn_bw_update_from_pplib(struct dc *dc) } else BREAK_TO_DEBUGGER(); - kernel_fpu_end(); + DC_FP_END(); } void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc) @@ -1492,11 +1505,11 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc) if (!pp || !pp->set_wm_ranges) return; - kernel_fpu_begin(); + DC_FP_START(); min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32; min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000; socclk_khz = dc->dcn_soc->socclk * 1000; - kernel_fpu_end(); + DC_FP_END(); /* Now notify PPLib/SMU about which Watermarks sets they should select * depending on DPM state they are in. And update BW MGR GFX Engine and @@ -1547,7 +1560,7 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc) void dcn_bw_sync_calcs_and_dml(struct dc *dc) { - kernel_fpu_begin(); + DC_FP_START(); DC_LOG_BANDWIDTH_CALCS("sr_exit_time: %f ns\n" "sr_enter_plus_exit_time: %f ns\n" "urgent_latency: %f ns\n" @@ -1736,5 +1749,5 @@ void dcn_bw_sync_calcs_and_dml(struct dc *dc) dc->dml.ip.bug_forcing_LC_req_same_size_fixed = dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes; dc->dml.ip.dcfclk_cstate_latency = dc->dcn_ip->dcfclk_cstate_latency; - kernel_fpu_end(); + DC_FP_END(); } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index 6d60ef82261929bff810c677badc597fb32dcca0..a78e5c74c79cc93fd78dd0625a167a22edfd1fa6 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -134,13 +134,13 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p #if defined(CONFIG_DRM_AMD_DC_DCN) case FAMILY_RV: - if (ASICREV_IS_DALI(asic_id.hw_internal_rev)) { + if (ASICREV_IS_DALI(asic_id.hw_internal_rev) || + ASICREV_IS_POLLOCK(asic_id.hw_internal_rev)) { /* TEMP: this check has to come before ASICREV_IS_RENOIR */ - /* which also incorrectly returns true for Dali */ + /* which also incorrectly returns true for Dali/Pollock*/ rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu); break; } - if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev)) { rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); break; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index 25d7b7c6681cc218f8c79d7b4f3e50f570526da4..495f01e9f2cac4f3da8e243f4ca0f79dadfac495 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -27,6 +27,7 @@ #include "clk_mgr_internal.h" #include "dce100/dce_clk_mgr.h" +#include "dcn20_clk_mgr.h" #include "reg_helper.h" #include "core_types.h" #include "dm_helpers.h" @@ -100,13 +101,13 @@ uint32_t dentist_get_did_from_divider(int divider) } void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr, - struct dc_state *context) + struct dc_state *context, bool safe_to_lower) { int i; clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz; for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { - int dpp_inst, dppclk_khz; + int dpp_inst, dppclk_khz, prev_dppclk_khz; /* Loop index will match dpp->inst if resource exists, * and we want to avoid dependency on dpp object @@ -114,8 +115,12 @@ void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr, dpp_inst = i; dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz; - clk_mgr->dccg->funcs->update_dpp_dto( - clk_mgr->dccg, dpp_inst, dppclk_khz); + prev_dppclk_khz = clk_mgr->base.ctx->dc->current_state->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz; + + if (safe_to_lower || prev_dppclk_khz < dppclk_khz) { + clk_mgr->dccg->funcs->update_dpp_dto( + clk_mgr->dccg, dpp_inst, dppclk_khz); + } } } @@ -161,6 +166,9 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, dc->debug.force_clock_mode & 0x1) { //this is from resume or boot up, if forced_clock cfg option used, we bypass program dispclk and DPPCLK, but need set them for S3. force_reset = true; + + dcn2_read_clocks_from_hw_dentist(clk_mgr_base); + //force_clock_mode 0x1: force reset the clock even it is the same clock as long as it is in Passive level. } display_count = clk_mgr_helper_get_active_display_cnt(dc, context); @@ -240,7 +248,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) { if (dpp_clock_lowered) { // if clock is being lowered, increase DTO before lowering refclk - dcn20_update_clocks_update_dpp_dto(clk_mgr, context); + dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); dcn20_update_clocks_update_dentist(clk_mgr); } else { // if clock is being raised, increase refclk before lowering DTO @@ -248,7 +256,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, dcn20_update_clocks_update_dentist(clk_mgr); // always update dtos unless clock is lowered and not safe to lower if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz) - dcn20_update_clocks_update_dpp_dto(clk_mgr, context); + dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); } } @@ -339,6 +347,32 @@ void dcn2_enable_pme_wa(struct clk_mgr *clk_mgr_base) } } + +void dcn2_read_clocks_from_hw_dentist(struct clk_mgr *clk_mgr_base) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + uint32_t dispclk_wdivider; + uint32_t dppclk_wdivider; + int disp_divider; + int dpp_divider; + + REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, &dispclk_wdivider); + REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_WDIVIDER, &dppclk_wdivider); + + disp_divider = dentist_get_divider_from_did(dispclk_wdivider); + dpp_divider = dentist_get_divider_from_did(dispclk_wdivider); + + if (disp_divider && dpp_divider) { + /* Calculate the current DFS clock, in kHz.*/ + clk_mgr_base->clks.dispclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR + * clk_mgr->base.dentist_vco_freq_khz) / disp_divider; + + clk_mgr_base->clks.dppclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR + * clk_mgr->base.dentist_vco_freq_khz) / dpp_divider; + } + +} + void dcn2_get_clock(struct clk_mgr *clk_mgr, struct dc_state *context, enum dc_clock_type clock_type, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h index c9fd824f3c231c1506a9a39abc653c8156c8e18a..0b9c045b0c8e944bc5bfa0c7ea84b8b8a0502c5e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h @@ -34,7 +34,7 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr, struct dc_state *context, bool safe_to_lower); void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr, - struct dc_state *context); + struct dc_state *context, bool safe_to_lower); void dcn2_init_clocks(struct clk_mgr *clk_mgr); @@ -51,4 +51,8 @@ void dcn2_get_clock(struct clk_mgr *clk_mgr, struct dc_clock_config *clock_cfg); void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr); + +void dcn2_read_clocks_from_hw_dentist(struct clk_mgr *clk_mgr_base); + + #endif //__DCN20_CLK_MGR_H__ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index de51ef12e33a37aa9665e5a3c5a24bb118811b4b..7ae4c06232dd2bf53fdb89a0c92f24e34d2fe9c2 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -59,14 +59,16 @@ int rn_get_active_display_cnt_wa( struct dc_state *context) { int i, display_count; - bool hdmi_present = false; + bool tmds_present = false; display_count = 0; for (i = 0; i < context->stream_count; i++) { const struct dc_stream_state *stream = context->streams[i]; - if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) - hdmi_present = true; + if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A || + stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK || + stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) + tmds_present = true; } for (i = 0; i < dc->link_count; i++) { @@ -85,7 +87,7 @@ int rn_get_active_display_cnt_wa( } /* WA for hang on HDMI after display off back back on*/ - if (display_count == 0 && hdmi_present) + if (display_count == 0 && tmds_present) display_count = 1; return display_count; @@ -164,16 +166,16 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base, } if (dpp_clock_lowered) { - // if clock is being lowered, increase DTO before lowering refclk - dcn20_update_clocks_update_dpp_dto(clk_mgr, context); + // increase per DPP DTO before lowering global dppclk + dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz); } else { - // if clock is being raised, increase refclk before lowering DTO + // increase global DPPCLK before lowering per DPP DTO if (update_dppclk || update_dispclk) rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz); // always update dtos unless clock is lowered and not safe to lower if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz) - dcn20_update_clocks_update_dpp_dto(clk_mgr, context); + dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); } if (update_dispclk && @@ -409,7 +411,7 @@ void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_ra continue; ranges->reader_wm_sets[num_valid_sets].wm_inst = bw_params->wm_table.entries[i].wm_inst; - ranges->reader_wm_sets[num_valid_sets].wm_type = bw_params->wm_table.entries[i].wm_type;; + ranges->reader_wm_sets[num_valid_sets].wm_type = bw_params->wm_table.entries[i].wm_type; /* We will not select WM based on dcfclk, so leave it as unconstrained */ ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; ranges->reader_wm_sets[num_valid_sets].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 39fe38cb39b6e44b0bc5b7ca7cebe86334fba4dd..04441dbcba76fb83ed8397bdcf1bd035c0c6b0db 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -66,6 +66,9 @@ #include "dce/dce_i2c.h" +#define CTX \ + dc->ctx + #define DC_LOGGER \ dc->ctx->logger @@ -284,7 +287,6 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc, struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->stream == stream && pipe->stream_res.tg) { - pipe->stream->adjust = *adjust; dc->hwss.set_drr(&pipe, 1, adjust->v_total_min, @@ -508,10 +510,10 @@ bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream) return ret; } -void dc_stream_set_static_screen_events(struct dc *dc, +void dc_stream_set_static_screen_params(struct dc *dc, struct dc_stream_state **streams, int num_streams, - const struct dc_static_screen_events *events) + const struct dc_static_screen_params *params) { int i = 0; int j = 0; @@ -530,7 +532,7 @@ void dc_stream_set_static_screen_events(struct dc *dc, } } - dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events); + dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params); } static void dc_destruct(struct dc *dc) @@ -579,6 +581,40 @@ static void dc_destruct(struct dc *dc) } +static bool dc_construct_ctx(struct dc *dc, + const struct dc_init_data *init_params) +{ + struct dc_context *dc_ctx; + enum dce_version dc_version = DCE_VERSION_UNKNOWN; + + dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL); + if (!dc_ctx) + return false; + + dc_ctx->cgs_device = init_params->cgs_device; + dc_ctx->driver_context = init_params->driver; + dc_ctx->dc = dc; + dc_ctx->asic_id = init_params->asic_id; + dc_ctx->dc_sink_id_count = 0; + dc_ctx->dc_stream_id_count = 0; + dc_ctx->dce_environment = init_params->dce_environment; + + /* Create logger */ + + dc_version = resource_parse_asic_id(init_params->asic_id); + dc_ctx->dce_version = dc_version; + + dc_ctx->perf_trace = dc_perf_trace_create(); + if (!dc_ctx->perf_trace) { + ASSERT_CRITICAL(false); + return false; + } + + dc->ctx = dc_ctx; + + return true; +} + static bool dc_construct(struct dc *dc, const struct dc_init_data *init_params) { @@ -590,7 +626,6 @@ static bool dc_construct(struct dc *dc, struct dcn_ip_params *dcn_ip; #endif - enum dce_version dc_version = DCE_VERSION_UNKNOWN; dc->config = init_params->flags; // Allocate memory for the vm_helper @@ -636,26 +671,12 @@ static bool dc_construct(struct dc *dc, dc->soc_bounding_box = init_params->soc_bounding_box; #endif - dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL); - if (!dc_ctx) { + if (!dc_construct_ctx(dc, init_params)) { dm_error("%s: failed to create ctx\n", __func__); goto fail; } - dc_ctx->cgs_device = init_params->cgs_device; - dc_ctx->driver_context = init_params->driver; - dc_ctx->dc = dc; - dc_ctx->asic_id = init_params->asic_id; - dc_ctx->dc_sink_id_count = 0; - dc_ctx->dc_stream_id_count = 0; - dc->ctx = dc_ctx; - - /* Create logger */ - - dc_ctx->dce_environment = init_params->dce_environment; - - dc_version = resource_parse_asic_id(init_params->asic_id); - dc_ctx->dce_version = dc_version; + dc_ctx = dc->ctx; /* Resource should construct all asic specific resources. * This should be the only place where we need to parse the asic id @@ -670,7 +691,7 @@ static bool dc_construct(struct dc *dc, bp_init_data.bios = init_params->asic_id.atombios_base_address; dc_ctx->dc_bios = dal_bios_parser_create( - &bp_init_data, dc_version); + &bp_init_data, dc_ctx->dce_version); if (!dc_ctx->dc_bios) { ASSERT_CRITICAL(false); @@ -678,17 +699,13 @@ static bool dc_construct(struct dc *dc, } dc_ctx->created_bios = true; - } - - dc_ctx->perf_trace = dc_perf_trace_create(); - if (!dc_ctx->perf_trace) { - ASSERT_CRITICAL(false); - goto fail; } + + /* Create GPIO service */ dc_ctx->gpio_service = dal_gpio_service_create( - dc_version, + dc_ctx->dce_version, dc_ctx->dce_environment, dc_ctx); @@ -697,7 +714,7 @@ static bool dc_construct(struct dc *dc, goto fail; } - dc->res_pool = dc_create_resource_pool(dc, init_params, dc_version); + dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version); if (!dc->res_pool) goto fail; @@ -728,8 +745,6 @@ static bool dc_construct(struct dc *dc, return true; fail: - - dc_destruct(dc); return false; } @@ -783,6 +798,33 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) dc_release_state(current_ctx); } +static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context) +{ + int i; + int count = 0; + struct pipe_ctx *pipe; + PERF_TRACE(); + for (i = 0; i < MAX_PIPES; i++) { + pipe = &context->res_ctx.pipe_ctx[i]; + + if (!pipe->plane_state) + continue; + + /* Timeout 100 ms */ + while (count < 100000) { + /* Must set to false to start with, due to OR in update function */ + pipe->plane_state->status.is_flip_pending = false; + dc->hwss.update_pending_status(pipe); + if (!pipe->plane_state->status.is_flip_pending) + break; + udelay(1); + count++; + } + ASSERT(!pipe->plane_state->status.is_flip_pending); + } + PERF_TRACE(); +} + /******************************************************************************* * Public functions ******************************************************************************/ @@ -795,28 +837,38 @@ struct dc *dc_create(const struct dc_init_data *init_params) if (NULL == dc) goto alloc_fail; - if (false == dc_construct(dc, init_params)) - goto construct_fail; + if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) { + if (false == dc_construct_ctx(dc, init_params)) { + dc_destruct(dc); + goto construct_fail; + } + } else { + if (false == dc_construct(dc, init_params)) { + dc_destruct(dc); + goto construct_fail; + } + + full_pipe_count = dc->res_pool->pipe_count; + if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE) + full_pipe_count--; + dc->caps.max_streams = min( + full_pipe_count, + dc->res_pool->stream_enc_count); - full_pipe_count = dc->res_pool->pipe_count; - if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE) - full_pipe_count--; - dc->caps.max_streams = min( - full_pipe_count, - dc->res_pool->stream_enc_count); + dc->optimize_seamless_boot_streams = 0; + dc->caps.max_links = dc->link_count; + dc->caps.max_audios = dc->res_pool->audio_count; + dc->caps.linear_pitch_alignment = 64; - dc->caps.max_links = dc->link_count; - dc->caps.max_audios = dc->res_pool->audio_count; - dc->caps.linear_pitch_alignment = 64; + dc->caps.max_dp_protocol_version = DP_VERSION_1_4; - dc->caps.max_dp_protocol_version = DP_VERSION_1_4; + if (dc->res_pool->dmcu != NULL) + dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version; + } /* Populate versioning information */ dc->versions.dc_ver = DC_VER; - if (dc->res_pool->dmcu != NULL) - dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version; - dc->build_id = DC_BUILD_ID; DC_LOG_DC("Display Core initialized\n"); @@ -834,7 +886,8 @@ alloc_fail: void dc_hardware_init(struct dc *dc) { - dc->hwss.init_hw(dc); + if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW) + dc->hwss.init_hw(dc); } void dc_init_callbacks(struct dc *dc, @@ -1148,10 +1201,10 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c for (i = 0; i < context->stream_count; i++) { if (context->streams[i]->apply_seamless_boot_optimization) - dc->optimize_seamless_boot = true; + dc->optimize_seamless_boot_streams++; } - if (!dc->optimize_seamless_boot) + if (dc->optimize_seamless_boot_streams == 0) dc->hwss.prepare_bandwidth(dc, context); /* re-program planes for existing stream, in case we need to @@ -1224,9 +1277,12 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c dc_enable_stereo(dc, context, dc_streams, context->stream_count); - if (!dc->optimize_seamless_boot) - /* pplib is notified if disp_num changed */ - dc->hwss.optimize_bandwidth(dc, context); + if (dc->optimize_seamless_boot_streams == 0) { + /* Must wait for no flips to be pending before doing optimize bw */ + wait_for_no_pipes_pending(dc, context); + /* pplib is notified if disp_num changed */ + dc->hwss.optimize_bandwidth(dc, context); + } for (i = 0; i < context->stream_count; i++) context->streams[i]->mode_changed = false; @@ -1262,12 +1318,18 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context) return (result == DC_OK); } +bool dc_is_hw_initialized(struct dc *dc) +{ + struct dc_bios *dcb = dc->ctx->dc_bios; + return dcb->funcs->is_accelerated_mode(dcb); +} + bool dc_post_update_surfaces_to_stream(struct dc *dc) { int i; struct dc_state *context = dc->current_state; - if (!dc->optimized_required || dc->optimize_seamless_boot) + if (!dc->optimized_required || dc->optimize_seamless_boot_streams > 0) return true; post_surface_trace(dc); @@ -1543,7 +1605,7 @@ static enum surface_update_type get_scaling_info_update_type( update_flags->bits.scaling_change = 1; if (u->scaling_info->src_rect.width > u->surface->src_rect.width - && u->scaling_info->src_rect.height > u->surface->src_rect.height) + || u->scaling_info->src_rect.height > u->surface->src_rect.height) /* Making src rect bigger requires a bandwidth change */ update_flags->bits.clock_change = 1; } @@ -1557,11 +1619,11 @@ static enum surface_update_type get_scaling_info_update_type( update_flags->bits.position_change = 1; if (update_flags->bits.clock_change - || update_flags->bits.bandwidth_change) + || update_flags->bits.bandwidth_change + || update_flags->bits.scaling_change) return UPDATE_TYPE_FULL; - if (update_flags->bits.scaling_change - || update_flags->bits.position_change) + if (update_flags->bits.position_change) return UPDATE_TYPE_MED; return UPDATE_TYPE_FAST; @@ -2051,7 +2113,7 @@ static void commit_planes_do_stream_update(struct dc *dc, dc->hwss.optimize_bandwidth(dc, dc->current_state); } else { - if (!dc->optimize_seamless_boot) + if (dc->optimize_seamless_boot_streams == 0) dc->hwss.prepare_bandwidth(dc, dc->current_state); core_link_enable_stream(dc->current_state, pipe_ctx); @@ -2092,7 +2154,7 @@ static void commit_planes_for_stream(struct dc *dc, int i, j; struct pipe_ctx *top_pipe_to_program = NULL; - if (dc->optimize_seamless_boot && surface_count > 0) { + if (dc->optimize_seamless_boot_streams > 0 && surface_count > 0) { /* Optimize seamless boot flag keeps clocks and watermarks high until * first flip. After first flip, optimization is required to lower * bandwidth. Important to note that it is expected UEFI will @@ -2101,12 +2163,14 @@ static void commit_planes_for_stream(struct dc *dc, */ if (stream->apply_seamless_boot_optimization) { stream->apply_seamless_boot_optimization = false; - dc->optimize_seamless_boot = false; - dc->optimized_required = true; + dc->optimize_seamless_boot_streams--; + + if (dc->optimize_seamless_boot_streams == 0) + dc->optimized_required = true; } } - if (update_type == UPDATE_TYPE_FULL && !dc->optimize_seamless_boot) { + if (update_type == UPDATE_TYPE_FULL && dc->optimize_seamless_boot_streams == 0) { dc->hwss.prepare_bandwidth(dc, context); context_clock_trace(dc, context); } @@ -2398,12 +2462,7 @@ void dc_set_power_state( enum dc_acpi_cm_power_state power_state) { struct kref refcount; - struct display_mode_lib *dml = kzalloc(sizeof(struct display_mode_lib), - GFP_KERNEL); - - ASSERT(dml); - if (!dml) - return; + struct display_mode_lib *dml; switch (power_state) { case DC_ACPI_CM_POWER_STATE_D0: @@ -2426,6 +2485,12 @@ void dc_set_power_state( * clean state, and dc hw programming optimizations will not * cause any trouble. */ + dml = kzalloc(sizeof(struct display_mode_lib), + GFP_KERNEL); + + ASSERT(dml); + if (!dml) + return; /* Preserve refcount */ refcount = dc->current_state->refcount; @@ -2439,10 +2504,10 @@ void dc_set_power_state( dc->current_state->refcount = refcount; dc->current_state->bw_ctx.dml = *dml; + kfree(dml); + break; } - - kfree(dml); } void dc_resume(struct dc *dc) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index cef8c1ba9797a6b25c064aed04eb710ff4ed6744..a09119c10d7c9399724e45f741f985befb6b3ff9 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -45,6 +45,7 @@ #include "dpcd_defs.h" #include "dmcu.h" #include "hw/clk_mgr.h" +#include "../dce/dmub_psr.h" #define DC_LOGGER_INIT(logger) @@ -817,8 +818,8 @@ static bool dc_link_detect_helper(struct dc_link *link, } case SIGNAL_TYPE_EDP: { - read_current_link_settings_on_detect(link); detect_edp_sink_caps(link); + read_current_link_settings_on_detect(link); sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX; sink_caps.signal = SIGNAL_TYPE_EDP; break; @@ -850,18 +851,12 @@ static bool dc_link_detect_helper(struct dc_link *link, if (memcmp(&link->dpcd_caps, &prev_dpcd_caps, sizeof(struct dpcd_caps))) same_dpcd = false; } - /* Active dongle plug in without display or downstream unplug*/ + /* Active dongle downstream unplug*/ if (link->type == dc_connection_active_dongle && link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) { - if (prev_sink != NULL) { + if (prev_sink != NULL) /* Downstream unplug */ dc_sink_release(prev_sink); - } else { - /* Empty dongle plug in */ - dp_verify_link_cap_with_retries(link, - &link->reported_link_cap, - LINK_TRAINING_MAX_VERIFY_RETRY); - } return true; } @@ -968,8 +963,7 @@ static bool dc_link_detect_helper(struct dc_link *link, same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid); if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && - sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX && - reason != DETECT_REASON_HPDRX) { + sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) { /* * TODO debug why Dell 2413 doesn't like * two link trainings @@ -2404,10 +2398,11 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool { struct dc *dc = link->ctx->dc; struct dmcu *dmcu = dc->res_pool->dmcu; + struct dmub_psr *psr = dc->res_pool->psr; - - - if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_feature_enabled) + if ((psr != NULL) && link->psr_feature_enabled) + psr->funcs->set_psr_enable(psr, allow_active); + else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_feature_enabled) dmcu->funcs->set_psr_enable(dmcu, allow_active, wait); link->psr_allow_active = allow_active; @@ -2419,8 +2414,11 @@ bool dc_link_get_psr_state(const struct dc_link *link, uint32_t *psr_state) { struct dc *dc = link->ctx->dc; struct dmcu *dmcu = dc->res_pool->dmcu; + struct dmub_psr *psr = dc->res_pool->psr; - if (dmcu != NULL && link->psr_feature_enabled) + if (psr != NULL && link->psr_feature_enabled) + psr->funcs->get_psr_state(psr_state); + else if (dmcu != NULL && link->psr_feature_enabled) dmcu->funcs->get_psr_state(dmcu, psr_state); return true; @@ -2467,6 +2465,7 @@ bool dc_link_setup_psr(struct dc_link *link, { struct dc *dc; struct dmcu *dmcu; + struct dmub_psr *psr; int i; /* updateSinkPsrDpcdConfig*/ union dpcd_psr_configuration psr_configuration; @@ -2478,8 +2477,9 @@ bool dc_link_setup_psr(struct dc_link *link, dc = link->ctx->dc; dmcu = dc->res_pool->dmcu; + psr = dc->res_pool->psr; - if (!dmcu) + if (!dmcu && !psr) return false; @@ -2535,7 +2535,7 @@ bool dc_link_setup_psr(struct dc_link *link, transmitter_to_phy_id(link->link_enc->transmitter); psr_context->crtcTimingVerticalTotal = stream->timing.v_total; - psr_context->vsyncRateHz = div64_u64(div64_u64((stream-> + psr_context->vsync_rate_hz = div64_u64(div64_u64((stream-> timing.pix_clk_100hz * 100), stream->timing.v_total), stream->timing.h_total); @@ -2588,7 +2588,10 @@ bool dc_link_setup_psr(struct dc_link *link, */ psr_context->frame_delay = 0; - link->psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context); + if (psr) + link->psr_feature_enabled = psr->funcs->setup_psr(psr, link, psr_context); + else + link->psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context); /* psr_enabled == 0 indicates setup_psr did not succeed, but this * should not happen since firmware should be running at this point @@ -2863,6 +2866,52 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) return DC_OK; } + +enum dc_status dc_link_reallocate_mst_payload(struct dc_link *link) +{ + int i; + struct pipe_ctx *pipe_ctx; + + // Clear all of MST payload then reallocate + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; + + /* driver enable split pipe for external monitors + * we have to check pipe_ctx is split pipe or not + * If it's split pipe, driver using top pipe to + * reaallocate. + */ + if (!pipe_ctx || pipe_ctx->top_pipe) + continue; + + if (pipe_ctx->stream && pipe_ctx->stream->link == link && + pipe_ctx->stream->dpms_off == false && + pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + deallocate_mst_payload(pipe_ctx); + } + } + + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; + + if (!pipe_ctx || pipe_ctx->top_pipe) + continue; + + if (pipe_ctx->stream && pipe_ctx->stream->link == link && + pipe_ctx->stream->dpms_off == false && + pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + /* enable/disable PHY will clear connection between BE and FE + * need to restore it. + */ + link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc, + pipe_ctx->stream_res.stream_enc->id, true); + dc_link_allocate_mst_payload(pipe_ctx); + } + } + + return DC_OK; +} + #if defined(CONFIG_DRM_AMD_DC_HDCP) static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) { @@ -3361,3 +3410,10 @@ const struct dc_link_settings *dc_link_get_link_cap( return &link->preferred_link_setting; return &link->verified_link_cap; } + +void dc_link_overwrite_extended_receiver_cap( + struct dc_link *link) +{ + dp_overwrite_extended_receiver_cap(link); +} + diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c index c2c136b121849765889616a68a2d77d65f786064..a49c10d5df26b7c161370ad5cfbe996cba0edef2 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c @@ -590,7 +590,7 @@ bool dal_ddc_submit_aux_command(struct ddc_service *ddc, struct aux_payload *payload) { uint32_t retrieved = 0; - bool ret = 0; + bool ret = false; if (!ddc) return false; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 42aa889fd0f5dabb9404d8246e893fc1fd5eee19..cb731c1d30b1000b4624f12a7e495e2946aded47 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -983,7 +983,7 @@ static enum link_training_result perform_clock_recovery_sequence( offset); /* 2. update DPCD of the receiver*/ - if (!retries_cr) + if (!retry_count) /* EPR #361076 - write as a 5-byte burst, * but only for the 1-st iteration.*/ dpcd_set_lt_pattern_and_lane_settings( @@ -1217,24 +1217,33 @@ static void configure_lttpr_mode(struct dc_link *link) uint8_t repeater_cnt; uint32_t aux_interval_address; uint8_t repeater_id; + enum dc_status result = DC_ERROR_UNEXPECTED; uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT; DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__); - core_link_write_dpcd(link, + result = core_link_write_dpcd(link, DP_PHY_REPEATER_MODE, (uint8_t *)&repeater_mode, sizeof(repeater_mode)); + if (result == DC_OK) { + link->dpcd_caps.lttpr_caps.mode = repeater_mode; + } + if (!link->is_lttpr_mode_transparent) { DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Non Transparent Mode\n", __func__); repeater_mode = DP_PHY_REPEATER_MODE_NON_TRANSPARENT; - core_link_write_dpcd(link, + result = core_link_write_dpcd(link, DP_PHY_REPEATER_MODE, (uint8_t *)&repeater_mode, sizeof(repeater_mode)); + if (result == DC_OK) { + link->dpcd_caps.lttpr_caps.mode = repeater_mode; + } + repeater_cnt = convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); for (repeater_id = repeater_cnt; repeater_id > 0; repeater_id--) { aux_interval_address = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 + @@ -1883,6 +1892,16 @@ bool dp_verify_link_cap( /* disable PHY done possible by BIOS, will be done by driver itself */ dp_disable_link_phy(link, link->connector_signal); + /* Temporary Renoir-specific workaround for SWDEV-215184; + * PHY will sometimes be in bad state on hotplugging display from certain USB-C dongle, + * so add extra cycle of enabling and disabling the PHY before first link training. + */ + if (link->link_enc->features.flags.bits.DP_IS_USB_C && + link->dc->debug.usbc_combo_phy_reset_wa) { + dp_enable_link_phy(link, link->connector_signal, dp_cs_id, cur); + dp_disable_link_phy(link, link->connector_signal); + } + dp_cs_id = get_clock_source_id(link); /* link training starts with the maximum common settings @@ -2854,10 +2873,12 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd /* For now we only handle 'Downstream port status' case. * If we got sink count changed it means * Downstream port status changed, - * then DM should call DC to do the detection. */ - if (hpd_rx_irq_check_link_loss_status( - link, - &hpd_irq_dpcd_data)) { + * then DM should call DC to do the detection. + * NOTE: Do not handle link loss on eDP since it is internal link*/ + if ((link->connector_signal != SIGNAL_TYPE_EDP) && + hpd_rx_irq_check_link_loss_status( + link, + &hpd_irq_dpcd_data)) { /* Connectivity log: link loss */ CONN_DATA_LINK_LOSS(link, hpd_irq_dpcd_data.raw, @@ -2874,18 +2895,14 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd return false; previous_link_settings = link->cur_link_settings; - dp_disable_link_phy(link, pipe_ctx->stream->signal); perform_link_training_with_retries(&previous_link_settings, true, LINK_TRAINING_ATTEMPTS, pipe_ctx, pipe_ctx->stream->signal); - if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link && - pipe_ctx->stream->dpms_off == false && - pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { - dc_link_allocate_mst_payload(pipe_ctx); - } + if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) + dc_link_reallocate_mst_payload(link); status = false; if (out_link_loss) @@ -3267,7 +3284,7 @@ static bool retrieve_link_cap(struct dc_link *link) dpcd_data[DP_TRAINING_AUX_RD_INTERVAL]; link->dpcd_caps.ext_receiver_cap_field_present = - aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1 ? true:false; + aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1; if (aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1) { uint8_t ext_cap_data[16]; @@ -3426,6 +3443,68 @@ static bool retrieve_link_cap(struct dc_link *link) return true; } +bool dp_overwrite_extended_receiver_cap(struct dc_link *link) +{ + uint8_t dpcd_data[16]; + uint32_t read_dpcd_retry_cnt = 3; + enum dc_status status = DC_ERROR_UNEXPECTED; + union dp_downstream_port_present ds_port = { 0 }; + union down_stream_port_count down_strm_port_count; + union edp_configuration_cap edp_config_cap; + + int i; + + for (i = 0; i < read_dpcd_retry_cnt; i++) { + status = core_link_read_dpcd( + link, + DP_DPCD_REV, + dpcd_data, + sizeof(dpcd_data)); + if (status == DC_OK) + break; + } + + link->dpcd_caps.dpcd_rev.raw = + dpcd_data[DP_DPCD_REV - DP_DPCD_REV]; + + if (dpcd_data[DP_MAX_LANE_COUNT - DP_DPCD_REV] == 0) + return false; + + ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT - + DP_DPCD_REV]; + + get_active_converter_info(ds_port.byte, link); + + down_strm_port_count.raw = dpcd_data[DP_DOWN_STREAM_PORT_COUNT - + DP_DPCD_REV]; + + link->dpcd_caps.allow_invalid_MSA_timing_param = + down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM; + + link->dpcd_caps.max_ln_count.raw = dpcd_data[ + DP_MAX_LANE_COUNT - DP_DPCD_REV]; + + link->dpcd_caps.max_down_spread.raw = dpcd_data[ + DP_MAX_DOWNSPREAD - DP_DPCD_REV]; + + link->reported_link_cap.lane_count = + link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT; + link->reported_link_cap.link_rate = dpcd_data[ + DP_MAX_LINK_RATE - DP_DPCD_REV]; + link->reported_link_cap.link_spread = + link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ? + LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED; + + edp_config_cap.raw = dpcd_data[ + DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV]; + link->dpcd_caps.panel_mode_edp = + edp_config_cap.bits.ALT_SCRAMBLER_RESET; + link->dpcd_caps.dpcd_display_control_capable = + edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE; + + return true; +} + bool detect_dp_sink_caps(struct dc_link *link) { return retrieve_link_cap(link); @@ -3601,6 +3680,7 @@ static void set_crtc_test_pattern(struct dc_link *link, struct pipe_ctx *odm_pipe; enum controller_dp_color_space controller_color_space; int opp_cnt = 1; + int count; switch (test_pattern_color_space) { case DP_TEST_PATTERN_COLOR_SPACE_RGB: @@ -3644,6 +3724,12 @@ static void set_crtc_test_pattern(struct dc_link *link, NULL, width, height); + /* wait for dpg to blank pixel data with test pattern */ + for (count = 0; count < 1000; count++) { + if (opp->funcs->dpg_is_blanked(opp)) + break; + udelay(100); + } } } break; @@ -3839,8 +3925,38 @@ bool dc_link_dp_set_test_pattern( sizeof(training_pattern)); } } else { - /* CRTC Patterns */ + enum dc_color_space color_space = COLOR_SPACE_UNKNOWN; + + switch (test_pattern_color_space) { + case DP_TEST_PATTERN_COLOR_SPACE_RGB: + color_space = COLOR_SPACE_SRGB; + if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) + color_space = COLOR_SPACE_SRGB_LIMITED; + break; + + case DP_TEST_PATTERN_COLOR_SPACE_YCBCR601: + color_space = COLOR_SPACE_YCBCR601; + if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) + color_space = COLOR_SPACE_YCBCR601_LIMITED; + break; + case DP_TEST_PATTERN_COLOR_SPACE_YCBCR709: + color_space = COLOR_SPACE_YCBCR709; + if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) + color_space = COLOR_SPACE_YCBCR709_LIMITED; + break; + default: + break; + } + /* update MSA to requested color space */ + pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(pipe_ctx->stream_res.stream_enc, + &pipe_ctx->stream->timing, + color_space, + pipe_ctx->stream->use_vsc_sdp_for_colorimetry, + link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP); + + /* CRTC Patterns */ set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space); + /* Set Test Pattern state */ link->test_pattern_enabled = true; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index 548aac02ca112f61a4e0ab21f1b677d06355ed05..ddb8550457672bc57408cdf4835a2cf98d8aea96 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -173,15 +173,20 @@ bool edp_receiver_ready_T9(struct dc_link *link) } bool edp_receiver_ready_T7(struct dc_link *link) { - unsigned int tries = 0; unsigned char sinkstatus = 0; unsigned char edpRev = 0; enum dc_status result = DC_OK; + /* use absolute time stamp to constrain max T7*/ + unsigned long long enter_timestamp = 0; + unsigned long long finish_timestamp = 0; + unsigned long long time_taken_in_ns = 0; + result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev)); if (result == DC_OK && edpRev < DP_EDP_12) return true; /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/ + enter_timestamp = dm_get_timestamp(link->ctx); do { sinkstatus = 0; result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus)); @@ -189,8 +194,10 @@ bool edp_receiver_ready_T7(struct dc_link *link) break; if (result != DC_OK) break; - udelay(25); //MAx T7 is 50ms - } while (++tries < 300); + udelay(25); + finish_timestamp = dm_get_timestamp(link->ctx); + time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp); + } while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms if (link->local_sink->edid_caps.panel_patch.extra_t7_ms > 0) udelay(link->local_sink->edid_caps.panel_patch.extra_t7_ms * 1000); @@ -518,6 +525,9 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable) struct dsc_config dsc_cfg; uint8_t dsc_packed_pps[128]; + memset(&dsc_cfg, 0, sizeof(dsc_cfg)); + memset(dsc_packed_pps, 0, 128); + /* Enable DSC hw block */ dsc_cfg.pic_width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 0c19de6783394079b0049931fbed3a83649be46c..a0eb9e533a615b316cc442918c740e47a90bab71 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -940,30 +940,43 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx) } -static void calculate_integer_scaling(struct pipe_ctx *pipe_ctx) +/* + * When handling 270 rotation in mixed SLS mode, we have + * stream->timing.h_border_left that is non zero. If we are doing + * pipe-splitting, this h_border_left value gets added to recout.x and when it + * calls calculate_inits_and_adj_vp() and + * adjust_vp_and_init_for_seamless_clip(), it can cause viewport.height for a + * pipe to be incorrect. + * + * To fix this, instead of using stream->timing.h_border_left, we can use + * stream->dst.x to represent the border instead. So we will set h_border_left + * to 0 and shift the appropriate amount in stream->dst.x. We will then + * perform all calculations in resource_build_scaling_params() based on this + * and then restore the h_border_left and stream->dst.x to their original + * values. + * + * shift_border_left_to_dst() will shift the amount of h_border_left to + * stream->dst.x and set h_border_left to 0. restore_border_left_from_dst() + * will restore h_border_left and stream->dst.x back to their original values + * We also need to make sure pipe_ctx->plane_res.scl_data.h_active uses the + * original h_border_left value in its calculation. + */ +int shift_border_left_to_dst(struct pipe_ctx *pipe_ctx) { - unsigned int integer_multiple = 1; - - if (pipe_ctx->plane_state->scaling_quality.integer_scaling) { - // calculate maximum # of replication of src onto addressable - integer_multiple = min( - pipe_ctx->stream->timing.h_addressable / pipe_ctx->stream->src.width, - pipe_ctx->stream->timing.v_addressable / pipe_ctx->stream->src.height); + int store_h_border_left = pipe_ctx->stream->timing.h_border_left; - //scale dst - pipe_ctx->stream->dst.width = integer_multiple * pipe_ctx->stream->src.width; - pipe_ctx->stream->dst.height = integer_multiple * pipe_ctx->stream->src.height; - - //center dst onto addressable - pipe_ctx->stream->dst.x = (pipe_ctx->stream->timing.h_addressable - pipe_ctx->stream->dst.width)/2; - pipe_ctx->stream->dst.y = (pipe_ctx->stream->timing.v_addressable - pipe_ctx->stream->dst.height)/2; - - //We are guaranteed that we are scaling in integer ratio - pipe_ctx->plane_state->scaling_quality.v_taps = 1; - pipe_ctx->plane_state->scaling_quality.h_taps = 1; - pipe_ctx->plane_state->scaling_quality.v_taps_c = 1; - pipe_ctx->plane_state->scaling_quality.h_taps_c = 1; + if (store_h_border_left) { + pipe_ctx->stream->timing.h_border_left = 0; + pipe_ctx->stream->dst.x += store_h_border_left; } + return store_h_border_left; +} + +void restore_border_left_from_dst(struct pipe_ctx *pipe_ctx, + int store_h_border_left) +{ + pipe_ctx->stream->dst.x -= store_h_border_left; + pipe_ctx->stream->timing.h_border_left = store_h_border_left; } bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) @@ -971,6 +984,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) const struct dc_plane_state *plane_state = pipe_ctx->plane_state; struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; bool res = false; + int store_h_border_left = shift_border_left_to_dst(pipe_ctx); DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); /* Important: scaling ratio calculation requires pixel format, * lb depth calculation requires recout and taps require scaling ratios. @@ -979,14 +993,18 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface( pipe_ctx->plane_state->format); - calculate_integer_scaling(pipe_ctx); - calculate_scaling_ratios(pipe_ctx); calculate_viewport(pipe_ctx); - if (pipe_ctx->plane_res.scl_data.viewport.height < 16 || pipe_ctx->plane_res.scl_data.viewport.width < 16) + if (pipe_ctx->plane_res.scl_data.viewport.height < 16 || + pipe_ctx->plane_res.scl_data.viewport.width < 16) { + if (store_h_border_left) { + restore_border_left_from_dst(pipe_ctx, + store_h_border_left); + } return false; + } calculate_recout(pipe_ctx); @@ -999,8 +1017,10 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_res.scl_data.recout.x += timing->h_border_left; pipe_ctx->plane_res.scl_data.recout.y += timing->v_border_top; - pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right; - pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom; + pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + + store_h_border_left + timing->h_border_right; + pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + + timing->v_border_top + timing->v_border_bottom; /* Taps calculations */ if (pipe_ctx->plane_res.xfm != NULL) @@ -1047,6 +1067,9 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) plane_state->dst_rect.x, plane_state->dst_rect.y); + if (store_h_border_left) + restore_border_left_from_dst(pipe_ctx, store_h_border_left); + return res; } @@ -1894,8 +1917,26 @@ static int acquire_resource_from_hw_enabled_state( pipe_ctx->plane_res.dpp = pool->dpps[tg_inst]; pipe_ctx->stream_res.opp = pool->opps[tg_inst]; - if (pool->dpps[tg_inst]) + if (pool->dpps[tg_inst]) { pipe_ctx->plane_res.mpcc_inst = pool->dpps[tg_inst]->inst; + + // Read DPP->MPCC->OPP Pipe from HW State + if (pool->mpc->funcs->read_mpcc_state) { + struct mpcc_state s = {0}; + + pool->mpc->funcs->read_mpcc_state(pool->mpc, pipe_ctx->plane_res.mpcc_inst, &s); + + if (s.dpp_id < MAX_MPCC) + pool->mpc->mpcc_array[pipe_ctx->plane_res.mpcc_inst].dpp_id = s.dpp_id; + + if (s.bot_mpcc_id < MAX_MPCC) + pool->mpc->mpcc_array[pipe_ctx->plane_res.mpcc_inst].mpcc_bot = + &pool->mpc->mpcc_array[s.bot_mpcc_id]; + + if (s.opp_id < MAX_OPP) + pipe_ctx->stream_res.opp->mpc_tree_params.opp_id = s.opp_id; + } + } pipe_ctx->pipe_idx = tg_inst; pipe_ctx->stream = stream; @@ -2025,6 +2066,13 @@ void dc_resource_state_construct( dst_ctx->clk_mgr = dc->clk_mgr; } + +bool dc_resource_is_dsc_encoding_supported(const struct dc *dc) +{ + return dc->res_pool->res_cap->num_dsc > 0; +} + + /** * dc_validate_global_state() - Determine if HW can support a given state * Checks HW resource availability and bandwidth requirement. @@ -2281,7 +2329,7 @@ static void set_avi_info_frame( if (color_space == COLOR_SPACE_SRGB || color_space == COLOR_SPACE_2020_RGB_FULLRANGE) { hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_FULL_RANGE; - hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_FULL_RANGE; + hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; } else if (color_space == COLOR_SPACE_SRGB_LIMITED || color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE) { hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_LIMITED_RANGE; @@ -2811,3 +2859,48 @@ unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format) return -1; } } +static unsigned int get_max_audio_sample_rate(struct audio_mode *modes) +{ + if (modes) { + if (modes->sample_rates.rate.RATE_192) + return 192000; + if (modes->sample_rates.rate.RATE_176_4) + return 176400; + if (modes->sample_rates.rate.RATE_96) + return 96000; + if (modes->sample_rates.rate.RATE_88_2) + return 88200; + if (modes->sample_rates.rate.RATE_48) + return 48000; + if (modes->sample_rates.rate.RATE_44_1) + return 44100; + if (modes->sample_rates.rate.RATE_32) + return 32000; + } + /*original logic when no audio info*/ + return 441000; +} + +void get_audio_check(struct audio_info *aud_modes, + struct audio_check *audio_chk) +{ + unsigned int i; + unsigned int max_sample_rate = 0; + + if (aud_modes) { + audio_chk->audio_packet_type = 0x2;/*audio sample packet AP = .25 for layout0, 1 for layout1*/ + + audio_chk->max_audiosample_rate = 0; + for (i = 0; i < aud_modes->mode_count; i++) { + max_sample_rate = get_max_audio_sample_rate(&aud_modes->modes[i]); + if (audio_chk->max_audiosample_rate < max_sample_rate) + audio_chk->max_audiosample_rate = max_sample_rate; + /*dts takes the same as type 2: AP = 0.25*/ + } + /*check which one take more bandwidth*/ + if (audio_chk->max_audiosample_rate > 192000) + audio_chk->audio_packet_type = 0x9;/*AP =1*/ + audio_chk->acat = 0;/*not support*/ + } +} + diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index b43a4b115fd8ad3ffa71c675d0e56b409863ecd2..6ddbb00ed37a5aa5f0b53a525995bb327bc882b7 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -406,25 +406,30 @@ bool dc_stream_add_writeback(struct dc *dc, stream->writeback_info[stream->num_wb_info++] = *wb_info; } - if (!dc->hwss.update_bandwidth(dc, dc->current_state)) { - dm_error("DC: update_bandwidth failed!\n"); - return false; - } - - /* enable writeback */ if (dc->hwss.enable_writeback) { struct dc_stream_status *stream_status = dc_stream_get_status(stream); struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; + dwb->otg_inst = stream_status->primary_otg_inst; + } + if (IS_DIAG_DC(dc->ctx->dce_environment)) { + if (!dc->hwss.update_bandwidth(dc, dc->current_state)) { + dm_error("DC: update_bandwidth failed!\n"); + return false; + } - if (dwb->funcs->is_enabled(dwb)) { - /* writeback pipe already enabled, only need to update */ - dc->hwss.update_writeback(dc, stream_status, wb_info, dc->current_state); - } else { - /* Enable writeback pipe from scratch*/ - dc->hwss.enable_writeback(dc, stream_status, wb_info, dc->current_state); + /* enable writeback */ + if (dc->hwss.enable_writeback) { + struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; + + if (dwb->funcs->is_enabled(dwb)) { + /* writeback pipe already enabled, only need to update */ + dc->hwss.update_writeback(dc, wb_info, dc->current_state); + } else { + /* Enable writeback pipe from scratch*/ + dc->hwss.enable_writeback(dc, wb_info, dc->current_state); + } } } - return true; } @@ -463,19 +468,29 @@ bool dc_stream_remove_writeback(struct dc *dc, } stream->num_wb_info = j; - /* recalculate and apply DML parameters */ - if (!dc->hwss.update_bandwidth(dc, dc->current_state)) { - dm_error("DC: update_bandwidth failed!\n"); - return false; - } - - /* disable writeback */ - if (dc->hwss.disable_writeback) - dc->hwss.disable_writeback(dc, dwb_pipe_inst); + if (IS_DIAG_DC(dc->ctx->dce_environment)) { + /* recalculate and apply DML parameters */ + if (!dc->hwss.update_bandwidth(dc, dc->current_state)) { + dm_error("DC: update_bandwidth failed!\n"); + return false; + } + /* disable writeback */ + if (dc->hwss.disable_writeback) + dc->hwss.disable_writeback(dc, dwb_pipe_inst); + } return true; } +bool dc_stream_warmup_writeback(struct dc *dc, + int num_dwb, + struct dc_writeback_info *wb_info) +{ + if (dc->hwss.mmhubbub_warmup) + return dc->hwss.mmhubbub_warmup(dc, num_dwb, wb_info); + else + return false; +} uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream) { uint8_t i; diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index c24639080371f73f99a73dc48d4b02ee38bc0f06..8ff25b5dd2f6d76009a633831de5f219defc800a 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -39,7 +39,7 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.2.62" +#define DC_VER "3.2.69" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -157,11 +157,14 @@ struct dc_surface_dcc_cap { bool const_color_support; }; -struct dc_static_screen_events { - bool force_trigger; - bool cursor_update; - bool surface_update; - bool overlay_update; +struct dc_static_screen_params { + struct { + bool force_trigger; + bool cursor_update; + bool surface_update; + bool overlay_update; + } triggers; + unsigned int num_frames; }; @@ -367,6 +370,7 @@ struct dc_debug_options { bool disable_hubp_power_gate; bool disable_dsc_power_gate; int dsc_min_slice_height_override; + int dsc_bpp_increment_div; bool native422_support; bool disable_pplib_wm_range; enum wm_report_mode pplib_wm_report_mode; @@ -419,6 +423,9 @@ struct dc_debug_options { bool nv12_iflip_vm_wa; bool disable_dram_clock_change_vactive_support; bool validate_dml_output; + bool enable_dmcub_surface_flip; + bool usbc_combo_phy_reset_wa; + bool disable_dsc; }; struct dc_debug_data { @@ -513,7 +520,7 @@ struct dc { bool optimized_required; /* Require to maintain clocks and bandwidth for UEFI enabled HW */ - bool optimize_seamless_boot; + int optimize_seamless_boot_streams; /* FBC compressor */ struct compressor *fbc_compressor; @@ -909,6 +916,8 @@ void dc_resource_state_copy_construct_current( void dc_resource_state_destruct(struct dc_state *context); +bool dc_resource_is_dsc_encoding_supported(const struct dc *dc); + /* * TODO update to make it about validation sets * Set up streams and links associated to drive sinks @@ -1066,6 +1075,7 @@ unsigned int dc_get_current_backlight_pwm(struct dc *dc); unsigned int dc_get_target_backlight_pwm(struct dc *dc); bool dc_is_dmcu_initialized(struct dc *dc); +bool dc_is_hw_initialized(struct dc *dc); enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping); void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg); diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h index 8ec09813ee17c3e5ac68d7215984b3fce291a19d..3800340a5b4f62f3676f05239512315463bbdc19 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h @@ -53,7 +53,8 @@ struct dc_dsc_policy { uint32_t min_target_bpp; }; -bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data, +bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, + const uint8_t *dpcd_dsc_basic_data, const uint8_t *dpcd_dsc_ext_data, struct dsc_dec_dpcd_caps *dsc_sink_caps); @@ -77,4 +78,6 @@ bool dc_dsc_compute_config( void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, struct dc_dsc_policy *policy); +void dc_dsc_policy_set_max_target_bpp_limit(uint32_t limit); + #endif diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c index 02a63e9cb62ff555b2cb2945c05f1237001f079b..737048d8a96c16cf2bf2b89763e243a80d2af8db 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c @@ -552,6 +552,36 @@ uint32_t generic_read_indirect_reg(const struct dc_context *ctx, return value; } +uint32_t generic_indirect_reg_get(const struct dc_context *ctx, + uint32_t addr_index, uint32_t addr_data, + uint32_t index, int n, + uint8_t shift1, uint32_t mask1, uint32_t *field_value1, + ...) +{ + uint32_t shift, mask, *field_value; + uint32_t value = 0; + int i = 1; + + va_list ap; + + va_start(ap, field_value1); + + value = generic_read_indirect_reg(ctx, addr_index, addr_data, index); + *field_value1 = get_reg_field_value_ex(value, mask1, shift1); + + while (i < n) { + shift = va_arg(ap, uint32_t); + mask = va_arg(ap, uint32_t); + field_value = va_arg(ap, uint32_t *); + + *field_value = get_reg_field_value_ex(value, mask, shift); + i++; + } + + va_end(ap); + + return value; +} uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx, uint32_t addr_index, uint32_t addr_data, diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 1ff79f703734090ce5ffdba22e65e55ae7717acc..d25603128394b2b4cc3d366a7d5e8b26a37fec37 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -133,6 +133,7 @@ struct dc_link { struct link_flags { bool dp_keep_receiver_powered; bool dp_skip_DID2; + bool dp_skip_reset_segment; } wa_flags; struct link_mst_stream_allocation_table mst_stream_alloc_table; @@ -204,6 +205,7 @@ enum dc_detect_reason { bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason); bool dc_link_get_hpd_state(struct dc_link *dc_link); enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx); +enum dc_status dc_link_reallocate_mst_payload(struct dc_link *link); /* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt). * Return: @@ -300,6 +302,9 @@ uint32_t dc_link_bandwidth_kbps( const struct dc_link_settings *dc_link_get_link_cap( const struct dc_link *link); +void dc_link_overwrite_extended_receiver_cap( + struct dc_link *link); + bool dc_submit_i2c( struct dc *dc, uint32_t link_index, diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 3ea54321b04581da8405157c05e752a992a96c84..92096de79dec14d1f2af98572249d490acd91c35 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -344,10 +344,17 @@ bool dc_add_all_planes_for_stream( bool dc_stream_add_writeback(struct dc *dc, struct dc_stream_state *stream, struct dc_writeback_info *wb_info); + bool dc_stream_remove_writeback(struct dc *dc, struct dc_stream_state *stream, uint32_t dwb_pipe_inst); + +bool dc_stream_warmup_writeback(struct dc *dc, + int num_dwb, + struct dc_writeback_info *wb_info); + bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream); + bool dc_stream_set_dynamic_metadata(struct dc *dc, struct dc_stream_state *stream, struct dc_dmdata_attributes *dmdata_attr); @@ -432,10 +439,10 @@ bool dc_stream_get_crc(struct dc *dc, uint32_t *g_y, uint32_t *b_cb); -void dc_stream_set_static_screen_events(struct dc *dc, +void dc_stream_set_static_screen_params(struct dc *dc, struct dc_stream_state **stream, int num_streams, - const struct dc_static_screen_events *events); + const struct dc_static_screen_params *params); void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream, enum dc_dynamic_expansion option); diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 2b92bfa28bde8665eefdbcccbef5229f5e1b27ec..e59532d98cb425d0a3de8d1b8944c4219c06a415 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -60,7 +60,12 @@ enum dce_environment { DCE_ENV_FPGA_MAXIMUS, /* Emulation on real HW or on FPGA. Used by Diagnostics, enforces * requirements of Diagnostics team. */ - DCE_ENV_DIAG + DCE_ENV_DIAG, + /* + * Guest VM system, DC HW may exist but is not virtualized and + * should not be used. SW support for VDI only. + */ + DCE_ENV_VIRTUAL_HW }; /* Note: use these macro definitions instead of direct comparison! */ @@ -224,6 +229,7 @@ struct dc_panel_patch { unsigned int extra_t12_ms; unsigned int extra_delay_backlight_off; unsigned int extra_t7_ms; + unsigned int manage_secondary_link; }; struct dc_edid_caps { @@ -598,7 +604,11 @@ struct audio_info { /* this field must be last in this struct */ struct audio_mode modes[DC_MAX_AUDIO_DESC_COUNT]; }; - +struct audio_check { + unsigned int audio_packet_type; + unsigned int max_audiosample_rate; + unsigned int acat; +}; enum dc_infoframe_type { DC_HDMI_INFOFRAME_TYPE_VENDOR = 0x81, DC_HDMI_INFOFRAME_TYPE_AVI = 0x82, @@ -719,7 +729,7 @@ struct psr_context { /* The VSync rate in Hz used to calculate the * step size for smooth brightness feature */ - unsigned int vsyncRateHz; + unsigned int vsync_rate_hz; unsigned int skipPsrWaitForPllLock; unsigned int numberOfControllers; /* Unused, for future use. To indicate that first changed frame from diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c index e619e67e6b513a2c4183a56da08a2b60e82c1a62..30d953acd01613bf8afaae3d21e7d708422bc4e1 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c @@ -537,9 +537,6 @@ static void dcn10_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait) if (dmcu->dmcu_state != DMCU_RUNNING) return; - dcn10_get_dmcu_psr_state(dmcu, &psr_state); - if (psr_state == 0 && !enable) - return; /* waitDMCUReadyForCmd */ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, dmcu_wait_reg_ready_interval, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c index 1cd4d8fc361f9ac4e8d2d95fd7878d57fb0f2945..066188ba79498abfc0d523f8b76a26337da4ba1d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c @@ -100,20 +100,6 @@ static uint32_t get_hw_buffer_available_size( dce_i2c_hw->buffer_used_bytes; } -static uint32_t get_speed( - const struct dce_i2c_hw *dce_i2c_hw) -{ - uint32_t pre_scale = 0; - - REG_GET(SPEED, DC_I2C_DDC1_PRESCALE, &pre_scale); - - /* [anaumov] it seems following is unnecessary */ - /*ASSERT(value.bits.DC_I2C_DDC1_PRESCALE);*/ - return pre_scale ? - dce_i2c_hw->reference_frequency / pre_scale : - dce_i2c_hw->default_speed; -} - static void process_channel_reply( struct dce_i2c_hw *dce_i2c_hw, struct i2c_payload *reply) @@ -278,16 +264,25 @@ static void set_speed( struct dce_i2c_hw *dce_i2c_hw, uint32_t speed) { + uint32_t xtal_ref_div = 0; + uint32_t prescale = 0; + + REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div); + + if (xtal_ref_div == 0) + xtal_ref_div = 2; + + prescale = ((dce_i2c_hw->reference_frequency * 2) / xtal_ref_div) / speed; if (speed) { if (dce_i2c_hw->masks->DC_I2C_DDC1_START_STOP_TIMING_CNTL) REG_UPDATE_N(SPEED, 3, - FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), dce_i2c_hw->reference_frequency / speed, + FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale, FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2, FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1); else REG_UPDATE_N(SPEED, 2, - FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), dce_i2c_hw->reference_frequency / speed, + FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale, FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2); } } @@ -344,9 +339,7 @@ static void release_engine( bool safe_to_reset; /* Restore original HW engine speed */ - - set_speed(dce_i2c_hw, dce_i2c_hw->original_speed); - + set_speed(dce_i2c_hw, dce_i2c_hw->default_speed); /* Reset HW engine */ { @@ -378,7 +371,6 @@ struct dce_i2c_hw *acquire_i2c_hw_engine( { uint32_t counter = 0; enum gpio_result result; - uint32_t current_speed; struct dce_i2c_hw *dce_i2c_hw = NULL; if (!ddc) @@ -416,11 +408,6 @@ struct dce_i2c_hw *acquire_i2c_hw_engine( dce_i2c_hw->ddc = ddc; - current_speed = get_speed(dce_i2c_hw); - - if (current_speed) - dce_i2c_hw->original_speed = current_speed; - if (!setup_engine(dce_i2c_hw)) { release_engine(dce_i2c_hw); return NULL; @@ -478,13 +465,9 @@ static void submit_channel_request_hw( static uint32_t get_transaction_timeout_hw( const struct dce_i2c_hw *dce_i2c_hw, - uint32_t length) + uint32_t length, + uint32_t speed) { - - uint32_t speed = get_speed(dce_i2c_hw); - - - uint32_t period_timeout; uint32_t num_of_clock_stretches; @@ -504,7 +487,8 @@ static uint32_t get_transaction_timeout_hw( bool dce_i2c_hw_engine_submit_payload( struct dce_i2c_hw *dce_i2c_hw, struct i2c_payload *payload, - bool middle_of_transaction) + bool middle_of_transaction, + uint32_t speed) { struct i2c_request_transaction_data request; @@ -542,7 +526,7 @@ bool dce_i2c_hw_engine_submit_payload( /* obtain timeout value before submitting request */ transaction_timeout = get_transaction_timeout_hw( - dce_i2c_hw, payload->length + 1); + dce_i2c_hw, payload->length + 1, speed); submit_channel_request_hw( dce_i2c_hw, &request); @@ -588,13 +572,11 @@ bool dce_i2c_submit_command_hw( struct i2c_payload *payload = cmd->payloads + index_of_payload; if (!dce_i2c_hw_engine_submit_payload( - dce_i2c_hw, payload, mot)) { + dce_i2c_hw, payload, mot, cmd->speed)) { result = false; break; } - - ++index_of_payload; } @@ -625,7 +607,6 @@ void dce_i2c_hw_construct( dce_i2c_hw->buffer_used_bytes = 0; dce_i2c_hw->transaction_count = 0; dce_i2c_hw->engine_keep_power_up_count = 1; - dce_i2c_hw->original_speed = DEFAULT_I2C_HW_SPEED; dce_i2c_hw->default_speed = DEFAULT_I2C_HW_SPEED; dce_i2c_hw->send_reset_length = 0; dce_i2c_hw->setup_limit = I2C_SETUP_TIME_LIMIT_DCE; @@ -640,9 +621,6 @@ void dce100_i2c_hw_construct( const struct dce_i2c_shift *shifts, const struct dce_i2c_mask *masks) { - - uint32_t xtal_ref_div = 0; - dce_i2c_hw_construct(dce_i2c_hw, ctx, engine_id, @@ -650,21 +628,6 @@ void dce100_i2c_hw_construct( shifts, masks); dce_i2c_hw->buffer_size = I2C_HW_BUFFER_SIZE_DCE100; - - REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div); - - if (xtal_ref_div == 0) - xtal_ref_div = 2; - - /*Calculating Reference Clock by divding original frequency by - * XTAL_REF_DIV. - * At upper level, uint32_t reference_frequency = - * dal_dce_i2c_get_reference_clock(as) >> 1 - * which already divided by 2. So we need x2 to get original - * reference clock from ppll_info - */ - dce_i2c_hw->reference_frequency = - (dce_i2c_hw->reference_frequency * 2) / xtal_ref_div; } void dce112_i2c_hw_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h index d4b2037f7d74f10d336b4286a943eb94334cc11f..fb055e6883c0b7044a6497aaf443b58d3fc44694 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h @@ -256,7 +256,6 @@ struct i2c_request_transaction_data { struct dce_i2c_hw { struct ddc *ddc; - uint32_t original_speed; uint32_t engine_keep_power_up_count; uint32_t transaction_count; uint32_t buffer_used_bytes; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c new file mode 100644 index 0000000000000000000000000000000000000000..225955ec6d39269f1bd883f326be872c22be0686 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c @@ -0,0 +1,220 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dmub_psr.h" +#include "dc.h" +#include "dc_dmub_srv.h" +#include "../../dmub/inc/dmub_srv.h" +#include "dmub_fw_state.h" +#include "core_types.h" +#include "ipp.h" + +#define MAX_PIPES 6 + +/** + * Get PSR state from firmware. + */ +static void dmub_get_psr_state(uint32_t *psr_state) +{ + // Not yet implemented + // Trigger GPINT interrupt from firmware +} + +/** + * Enable/Disable PSR. + */ +static void dmub_set_psr_enable(struct dmub_psr *dmub, bool enable) +{ + union dmub_rb_cmd cmd; + struct dc_context *dc = dmub->ctx; + + cmd.psr_enable.header.type = DMUB_CMD__PSR; + + if (enable) + cmd.psr_enable.header.sub_type = DMUB_CMD__PSR_ENABLE; + else + cmd.psr_enable.header.sub_type = DMUB_CMD__PSR_DISABLE; + + cmd.psr_enable.header.payload_bytes = 0; // Send header only + + dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_enable.header); + dc_dmub_srv_cmd_execute(dc->dmub_srv); + dc_dmub_srv_wait_idle(dc->dmub_srv); +} + +/** + * Set PSR level. + */ +static void dmub_set_psr_level(struct dmub_psr *dmub, uint16_t psr_level) +{ + union dmub_rb_cmd cmd; + uint32_t psr_state = 0; + struct dc_context *dc = dmub->ctx; + + dmub_get_psr_state(&psr_state); + + if (psr_state == 0) + return; + + cmd.psr_set_level.header.type = DMUB_CMD__PSR; + cmd.psr_set_level.header.sub_type = DMUB_CMD__PSR_SET_LEVEL; + cmd.psr_set_level.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_level_data); + cmd.psr_set_level.psr_set_level_data.psr_level = psr_level; + + dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_set_level.header); + dc_dmub_srv_cmd_execute(dc->dmub_srv); + dc_dmub_srv_wait_idle(dc->dmub_srv); +} + +/** + * Setup PSR by programming phy registers and sending psr hw context values to firmware. + */ +static bool dmub_setup_psr(struct dmub_psr *dmub, + struct dc_link *link, + struct psr_context *psr_context) +{ + union dmub_rb_cmd cmd; + struct dc_context *dc = dmub->ctx; + struct dmub_cmd_psr_copy_settings_data *copy_settings_data + = &cmd.psr_copy_settings.psr_copy_settings_data; + struct pipe_ctx *pipe_ctx = NULL; + struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx; + + for (int i = 0; i < MAX_PIPES; i++) { + if (res_ctx && + res_ctx->pipe_ctx[i].stream && + res_ctx->pipe_ctx[i].stream->link && + res_ctx->pipe_ctx[i].stream->link == link && + res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) { + pipe_ctx = &res_ctx->pipe_ctx[i]; + break; + } + } + + if (!pipe_ctx || + !&pipe_ctx->plane_res || + !&pipe_ctx->stream_res) + return false; + + // Program DP DPHY fast training registers + link->link_enc->funcs->psr_program_dp_dphy_fast_training(link->link_enc, + psr_context->psrExitLinkTrainingRequired); + + // Program DP_SEC_CNTL1 register to set transmission GPS0 line num and priority to high + link->link_enc->funcs->psr_program_secondary_packet(link->link_enc, + psr_context->sdpTransmitLineNumDeadline); + + cmd.psr_copy_settings.header.type = DMUB_CMD__PSR; + cmd.psr_copy_settings.header.sub_type = DMUB_CMD__PSR_COPY_SETTINGS; + cmd.psr_copy_settings.header.payload_bytes = sizeof(struct dmub_cmd_psr_copy_settings_data); + + // Hw insts + copy_settings_data->dpphy_inst = psr_context->phyType; + copy_settings_data->aux_inst = psr_context->channel; + copy_settings_data->digfe_inst = psr_context->engineId; + copy_settings_data->digbe_inst = psr_context->transmitterId; + + copy_settings_data->mpcc_inst = pipe_ctx->plane_res.mpcc_inst; + + if (pipe_ctx->plane_res.hubp) + copy_settings_data->hubp_inst = pipe_ctx->plane_res.hubp->inst; + else + copy_settings_data->hubp_inst = 0; + if (pipe_ctx->plane_res.dpp) + copy_settings_data->dpp_inst = pipe_ctx->plane_res.dpp->inst; + else + copy_settings_data->dpp_inst = 0; + if (pipe_ctx->stream_res.opp) + copy_settings_data->opp_inst = pipe_ctx->stream_res.opp->inst; + else + copy_settings_data->opp_inst = 0; + if (pipe_ctx->stream_res.tg) + copy_settings_data->otg_inst = pipe_ctx->stream_res.tg->inst; + else + copy_settings_data->otg_inst = 0; + + // Misc + copy_settings_data->psr_level = psr_context->psr_level.u32all; + copy_settings_data->hyst_frames = psr_context->timehyst_frames; + copy_settings_data->hyst_lines = psr_context->hyst_lines; + copy_settings_data->phy_type = psr_context->phyType; + copy_settings_data->aux_repeat = psr_context->aux_repeats; + copy_settings_data->smu_optimizations_en = psr_context->allow_smu_optimizations; + copy_settings_data->skip_wait_for_pll_lock = psr_context->skipPsrWaitForPllLock; + copy_settings_data->frame_delay = psr_context->frame_delay; + copy_settings_data->smu_phy_id = psr_context->smuPhyId; + copy_settings_data->num_of_controllers = psr_context->numberOfControllers; + copy_settings_data->frame_cap_ind = psr_context->psrFrameCaptureIndicationReq; + copy_settings_data->phy_num = psr_context->frame_delay & 0x7; + copy_settings_data->link_rate = psr_context->frame_delay & 0xF; + + dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_copy_settings.header); + dc_dmub_srv_cmd_execute(dc->dmub_srv); + dc_dmub_srv_wait_idle(dc->dmub_srv); + + return true; +} + +static const struct dmub_psr_funcs psr_funcs = { + .set_psr_enable = dmub_set_psr_enable, + .setup_psr = dmub_setup_psr, + .get_psr_state = dmub_get_psr_state, + .set_psr_level = dmub_set_psr_level, +}; + +/** + * Construct PSR object. + */ +static void dmub_psr_construct(struct dmub_psr *psr, struct dc_context *ctx) +{ + psr->ctx = ctx; + psr->funcs = &psr_funcs; +} + +/** + * Allocate and initialize PSR object. + */ +struct dmub_psr *dmub_psr_create(struct dc_context *ctx) +{ + struct dmub_psr *psr = kzalloc(sizeof(struct dmub_psr), GFP_KERNEL); + + if (psr == NULL) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dmub_psr_construct(psr, ctx); + + return psr; +} + +/** + * Deallocate PSR object. + */ +void dmub_psr_destroy(struct dmub_psr **dmub) +{ + kfree(dmub); + *dmub = NULL; +} diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_state.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h similarity index 58% rename from drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_state.h rename to drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h index c87b1ba7590ee00ca2f27060f84d56e9718bb435..229958de3035b92d0974565b1e76b304a32adb47 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_state.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h @@ -1,5 +1,5 @@ /* - * Copyright 2019 Advanced Micro Devices, Inc. + * Copyright 2012-16 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -23,51 +23,25 @@ * */ -#ifndef _DMUB_FW_STATE_H_ -#define _DMUB_FW_STATE_H_ +#ifndef _DMUB_PSR_H_ +#define _DMUB_PSR_H_ -#include "dmub_types.h" +#include "os_types.h" -#pragma pack(push, 1) - -struct dmub_fw_state { - /** - * @phy_initialized_during_fw_boot: - * - * Detects if VBIOS/VBL has ran before firmware boot. - * A value of 1 will usually mean S0i3 boot. - */ - uint8_t phy_initialized_during_fw_boot; - - /** - * @intialized_phy: - * - * Bit vector of initialized PHY. - */ - uint8_t initialized_phy; - - /** - * @enabled_phy: - * - * Bit vector of enabled PHY for DP alt mode switch tracking. - */ - uint8_t enabled_phy; - - /** - * @dmcu_fw_loaded: - * - * DMCU auto load state. - */ - uint8_t dmcu_fw_loaded; +struct dmub_psr { + struct dc_context *ctx; + const struct dmub_psr_funcs *funcs; +}; - /** - * @psr_state: - * - * PSR state tracking. - */ - uint8_t psr_state; +struct dmub_psr_funcs { + void (*set_psr_enable)(struct dmub_psr *dmub, bool enable); + bool (*setup_psr)(struct dmub_psr *dmub, struct dc_link *link, struct psr_context *psr_context); + void (*get_psr_state)(uint32_t *psr_state); + void (*set_psr_level)(struct dmub_psr *dmub, uint16_t psr_level); }; -#pragma pack(pop) +struct dmub_psr *dmub_psr_create(struct dc_context *ctx); +void dmub_psr_destroy(struct dmub_psr **dmub); + -#endif /* _DMUB_FW_STATE_H_ */ +#endif /* _DCE_DMUB_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 4939cf3b316f29c2fe30cb7be4556df83cde878d..5b689273ff4403ccf8fd2538abb643fc726c290a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1373,9 +1373,13 @@ static enum dc_status apply_single_controller_ctx_to_hw( // DRR should set trigger event to monitor surface update event if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0) event_triggers = 0x80; + /* Event triggers and num frames initialized for DRR, but can be + * later updated for PSR use. Note DRR trigger events are generated + * regardless of whether num frames met. + */ if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control) pipe_ctx->stream_res.tg->funcs->set_static_screen_control( - pipe_ctx->stream_res.tg, event_triggers); + pipe_ctx->stream_res.tg, event_triggers, 2); if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) pipe_ctx->stream_res.stream_enc->funcs->dig_connect_to_otg( @@ -1706,6 +1710,8 @@ static void set_drr(struct pipe_ctx **pipe_ctx, struct drr_params params = {0}; // DRR should set trigger event to monitor surface update event unsigned int event_triggers = 0x80; + // Note DRR trigger events are generated regardless of whether num frames met. + unsigned int num_frames = 2; params.vertical_total_max = vmax; params.vertical_total_min = vmin; @@ -1721,7 +1727,7 @@ static void set_drr(struct pipe_ctx **pipe_ctx, if (vmax != 0 && vmin != 0) pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control( pipe_ctx[i]->stream_res.tg, - event_triggers); + event_triggers, num_frames); } } @@ -1738,30 +1744,31 @@ static void get_position(struct pipe_ctx **pipe_ctx, } static void set_static_screen_control(struct pipe_ctx **pipe_ctx, - int num_pipes, const struct dc_static_screen_events *events) + int num_pipes, const struct dc_static_screen_params *params) { unsigned int i; - unsigned int value = 0; + unsigned int triggers = 0; - if (events->overlay_update) - value |= 0x100; - if (events->surface_update) - value |= 0x80; - if (events->cursor_update) - value |= 0x2; - if (events->force_trigger) - value |= 0x1; + if (params->triggers.overlay_update) + triggers |= 0x100; + if (params->triggers.surface_update) + triggers |= 0x80; + if (params->triggers.cursor_update) + triggers |= 0x2; + if (params->triggers.force_trigger) + triggers |= 0x1; if (num_pipes) { struct dc *dc = pipe_ctx[0]->stream->ctx->dc; if (dc->fbc_compressor) - value |= 0x84; + triggers |= 0x84; } for (i = 0; i < num_pipes; i++) pipe_ctx[i]->stream_res.tg->funcs-> - set_static_screen_control(pipe_ctx[i]->stream_res.tg, value); + set_static_screen_control(pipe_ctx[i]->stream_res.tg, + triggers, params->num_frames); } /* diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c index 5f7c2c5641c48ccf19f605064e557fc561bf5099..1ea7db8eeb988469fd13fe4d3ca4be2f94cf1a03 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c @@ -469,22 +469,27 @@ void dce110_timing_generator_set_drr( void dce110_timing_generator_set_static_screen_control( struct timing_generator *tg, - uint32_t value) + uint32_t event_triggers, + uint32_t num_frames) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); uint32_t static_screen_cntl = 0; uint32_t addr = 0; + // By register spec, it only takes 8 bit value + if (num_frames > 0xFF) + num_frames = 0xFF; + addr = CRTC_REG(mmCRTC_STATIC_SCREEN_CONTROL); static_screen_cntl = dm_read_reg(tg->ctx, addr); set_reg_field_value(static_screen_cntl, - value, + event_triggers, CRTC_STATIC_SCREEN_CONTROL, CRTC_STATIC_SCREEN_EVENT_MASK); set_reg_field_value(static_screen_cntl, - 2, + num_frames, CRTC_STATIC_SCREEN_CONTROL, CRTC_STATIC_SCREEN_FRAME_COUNT); diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h index 768ccf27ada92bcf91a5db75617ad0a353bef301..d8a5ed7b485d13fa2037ad29d0f1572301d66625 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h @@ -231,7 +231,8 @@ void dce110_timing_generator_set_drr( void dce110_timing_generator_set_static_screen_control( struct timing_generator *tg, - uint32_t value); + uint32_t event_triggers, + uint32_t num_frames); void dce110_timing_generator_get_crtc_scanoutpos( struct timing_generator *tg, diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c index 098e56962f2a53c2468b196d267a17391518f515..82bc4e192bbf83eb29fb60f3c263f469d5f651e1 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c @@ -819,13 +819,18 @@ void dce120_tg_set_colors(struct timing_generator *tg, static void dce120_timing_generator_set_static_screen_control( struct timing_generator *tg, - uint32_t value) + uint32_t event_triggers, + uint32_t num_frames) { struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); + // By register spec, it only takes 8 bit value + if (num_frames > 0xFF) + num_frames = 0xFF; + CRTC_REG_UPDATE_2(CRTC0_CRTC_STATIC_SCREEN_CONTROL, - CRTC_STATIC_SCREEN_EVENT_MASK, value, - CRTC_STATIC_SCREEN_FRAME_COUNT, 2); + CRTC_STATIC_SCREEN_EVENT_MASK, event_triggers, + CRTC_STATIC_SCREEN_FRAME_COUNT, num_frames); } void dce120_timing_generator_set_test_pattern( diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c index 935c892622a06be6169956851729f0c89e48d931..4d3f7d5e1473afa1f61b379729f6528eb5702c64 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c @@ -88,26 +88,6 @@ enum dscl_mode_sel { DSCL_MODE_DSCL_BYPASS = 6 }; -static const struct dpp_input_csc_matrix dpp_input_csc_matrix[] = { - {COLOR_SPACE_SRGB, - {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} }, - {COLOR_SPACE_SRGB_LIMITED, - {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} }, - {COLOR_SPACE_YCBCR601, - {0x2cdd, 0x2000, 0, 0xe991, 0xe926, 0x2000, 0xf4fd, 0x10ef, - 0, 0x2000, 0x38b4, 0xe3a6} }, - {COLOR_SPACE_YCBCR601_LIMITED, - {0x3353, 0x2568, 0, 0xe400, 0xe5dc, 0x2568, 0xf367, 0x1108, - 0, 0x2568, 0x40de, 0xdd3a} }, - {COLOR_SPACE_YCBCR709, - {0x3265, 0x2000, 0, 0xe6ce, 0xf105, 0x2000, 0xfa01, 0xa7d, 0, - 0x2000, 0x3b61, 0xe24f} }, - - {COLOR_SPACE_YCBCR709_LIMITED, - {0x39a6, 0x2568, 0, 0xe0d6, 0xeedd, 0x2568, 0xf925, 0x9a8, 0, - 0x2568, 0x43ee, 0xdbb2} } -}; - static void program_gamut_remap( struct dcn10_dpp *dpp, const uint16_t *regval, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c index a02c10e23e0d661dc67b552a37d6b53b618f7665..f36a0d8cedfe1eec4b8db2766ca401991200ad41 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c @@ -930,6 +930,9 @@ static bool hubbub1_get_dcc_compression_cap(struct hubbub *hubbub, output->grph.rgb.max_compressed_blk_size = 64; output->grph.rgb.independent_64b_blks = true; break; + default: + ASSERT(false); + break; } output->capable = true; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 4d1301e5eaf59a4c76a18673f26f1d037cac3831..31b64733d693576b23e751616b5be1c8c34fed7e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -810,8 +810,7 @@ static void hubp1_set_vm_context0_settings(struct hubp *hubp, void min_set_viewport( struct hubp *hubp, const struct rect *viewport, - const struct rect *viewport_c, - enum dc_rotation_angle rotation) + const struct rect *viewport_c) { struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index e44eaae5033bd2717ddb375ef9c6d8514f766b8e..780af5b3c16f051343cc02024a003db1f8bf5651 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -749,9 +749,7 @@ void hubp1_set_blank(struct hubp *hubp, bool blank); void min_set_viewport(struct hubp *hubp, const struct rect *viewport, - const struct rect *viewport_c, - enum dc_rotation_angle rotation); -/* rotation angle added for use by hubp21_set_viewport */ + const struct rect *viewport_c); void hubp1_clk_cntl(struct hubp *hubp, bool enable); void hubp1_vtg_sel(struct hubp *hubp, uint32_t otg_inst); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 3996fef56948ade484bc8a3019b99766444bb6ca..1008ac8a0f2afb8bf637aa8601eed3bf19214388 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -479,10 +479,10 @@ void dcn10_enable_power_gating_plane( struct dce_hwseq *hws, bool enable) { - bool force_on = 1; /* disable power gating */ + bool force_on = true; /* disable power gating */ if (enable) - force_on = 0; + force_on = false; /* DCHUBP0/1/2/3 */ REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on); @@ -860,6 +860,7 @@ static void dcn10_reset_back_end_for_pipe( struct dc_state *context) { int i; + struct dc_link *link; DC_LOGGER_INIT(dc->ctx->logger); if (pipe_ctx->stream_res.stream_enc == NULL) { pipe_ctx->stream = NULL; @@ -867,8 +868,14 @@ static void dcn10_reset_back_end_for_pipe( } if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - /* DPMS may already disable */ - if (!pipe_ctx->stream->dpms_off) + link = pipe_ctx->stream->link; + /* DPMS may already disable or */ + /* dpms_off status is incorrect due to fastboot + * feature. When system resume from S4 with second + * screen only, the dpms_off would be true but + * VBIOS lit up eDP, so check link status too. + */ + if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) core_link_disable_stream(pipe_ctx); else if (pipe_ctx->stream_res.audio) dc->hwss.disable_audio_stream(pipe_ctx); @@ -1156,7 +1163,8 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context) } } - for (i = 0; i < dc->res_pool->pipe_count; i++) { + /* num_opp will be equal to number of mpcc */ + for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; /* Cannot reset the MPC mux if seamless boot */ @@ -1180,8 +1188,14 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context) if (can_apply_seamless_boot && pipe_ctx->stream != NULL && pipe_ctx->stream_res.tg->funcs->is_tg_enabled( - pipe_ctx->stream_res.tg)) + pipe_ctx->stream_res.tg)) { + // Enable double buffering for OTG_BLANK no matter if + // seamless boot is enabled or not to suppress global sync + // signals when OTG blanked. This is to prevent pipe from + // requesting data while in PSR. + tg->funcs->tg_init(tg); continue; + } /* Disable on the current state so the new one isn't cleared. */ pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; @@ -2291,8 +2305,7 @@ static void dcn10_update_dchubp_dpp( hubp->funcs->mem_program_viewport( hubp, &pipe_ctx->plane_res.scl_data.viewport, - &pipe_ctx->plane_res.scl_data.viewport_c, - plane_state->rotation); + &pipe_ctx->plane_res.scl_data.viewport_c); } if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) { @@ -2697,6 +2710,8 @@ void dcn10_set_drr(struct pipe_ctx **pipe_ctx, struct drr_params params = {0}; // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow unsigned int event_triggers = 0x800; + // Note DRR trigger events are generated regardless of whether num frames met. + unsigned int num_frames = 2; params.vertical_total_max = vmax; params.vertical_total_min = vmin; @@ -2713,7 +2728,7 @@ void dcn10_set_drr(struct pipe_ctx **pipe_ctx, if (vmax != 0 && vmin != 0) pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control( pipe_ctx[i]->stream_res.tg, - event_triggers); + event_triggers, num_frames); } } @@ -2730,21 +2745,22 @@ void dcn10_get_position(struct pipe_ctx **pipe_ctx, } void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx, - int num_pipes, const struct dc_static_screen_events *events) + int num_pipes, const struct dc_static_screen_params *params) { unsigned int i; - unsigned int value = 0; + unsigned int triggers = 0; - if (events->surface_update) - value |= 0x80; - if (events->cursor_update) - value |= 0x2; - if (events->force_trigger) - value |= 0x1; + if (params->triggers.surface_update) + triggers |= 0x80; + if (params->triggers.cursor_update) + triggers |= 0x2; + if (params->triggers.force_trigger) + triggers |= 0x1; for (i = 0; i < num_pipes; i++) pipe_ctx[i]->stream_res.tg->funcs-> - set_static_screen_control(pipe_ctx[i]->stream_res.tg, value); + set_static_screen_control(pipe_ctx[i]->stream_res.tg, + triggers, params->num_frames); } static void dcn10_config_stereo_parameters( @@ -2895,6 +2911,33 @@ void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data) hubbub->funcs->update_dchub(hubbub, dh_data); } +static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx) +{ + struct pipe_ctx *test_pipe; + const struct rect *r1 = &pipe_ctx->plane_res.scl_data.recout, *r2; + int r1_r = r1->x + r1->width, r1_b = r1->y + r1->height, r2_r, r2_b; + + /** + * Disable the cursor if there's another pipe above this with a + * plane that contains this pipe's viewport to prevent double cursor + * and incorrect scaling artifacts. + */ + for (test_pipe = pipe_ctx->top_pipe; test_pipe; + test_pipe = test_pipe->top_pipe) { + if (!test_pipe->plane_state->visible) + continue; + + r2 = &test_pipe->plane_res.scl_data.recout; + r2_r = r2->x + r2->width; + r2_b = r2->y + r2->height; + + if (r1->x >= r2->x && r1->y >= r2->y && r1_r <= r2_r && r1_b <= r2_b) + return true; + } + + return false; +} + void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) { struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position; @@ -2909,6 +2952,8 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) .rotation = pipe_ctx->plane_state->rotation, .mirror = pipe_ctx->plane_state->horizontal_mirror }; + bool pipe_split_on = (pipe_ctx->top_pipe != NULL) || + (pipe_ctx->bottom_pipe != NULL); int x_plane = pipe_ctx->plane_state->dst_rect.x; int y_plane = pipe_ctx->plane_state->dst_rect.y; @@ -2938,9 +2983,13 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) pos_cpy.enable = false; + if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx)) + pos_cpy.enable = false; + // Swap axis and mirror horizontally if (param.rotation == ROTATION_ANGLE_90) { uint32_t temp_x = pos_cpy.x; + pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width - (pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x; pos_cpy.y = temp_x; @@ -2948,26 +2997,44 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) // Swap axis and mirror vertically else if (param.rotation == ROTATION_ANGLE_270) { uint32_t temp_y = pos_cpy.y; - if (pos_cpy.x > pipe_ctx->plane_res.scl_data.viewport.height) { - pos_cpy.x = pos_cpy.x - pipe_ctx->plane_res.scl_data.viewport.height; - pos_cpy.y = pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.x; - } else { - pos_cpy.y = 2 * pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.x; - } + int viewport_height = + pipe_ctx->plane_res.scl_data.viewport.height; + + if (pipe_split_on) { + if (pos_cpy.x > viewport_height) { + pos_cpy.x = pos_cpy.x - viewport_height; + pos_cpy.y = viewport_height - pos_cpy.x; + } else { + pos_cpy.y = 2 * viewport_height - pos_cpy.x; + } + } else + pos_cpy.y = viewport_height - pos_cpy.x; pos_cpy.x = temp_y; } // Mirror horizontally and vertically else if (param.rotation == ROTATION_ANGLE_180) { - if (pos_cpy.x >= pipe_ctx->plane_res.scl_data.viewport.width + pipe_ctx->plane_res.scl_data.viewport.x) { - pos_cpy.x = 2 * pipe_ctx->plane_res.scl_data.viewport.width - - pos_cpy.x + 2 * pipe_ctx->plane_res.scl_data.viewport.x; - } else { - uint32_t temp_x = pos_cpy.x; - pos_cpy.x = 2 * pipe_ctx->plane_res.scl_data.viewport.x - pos_cpy.x; - if (temp_x >= pipe_ctx->plane_res.scl_data.viewport.x + (int)hubp->curs_attr.width - || pos_cpy.x <= (int)hubp->curs_attr.width + pipe_ctx->plane_state->src_rect.x) { - pos_cpy.x = temp_x + pipe_ctx->plane_res.scl_data.viewport.width; + int viewport_width = + pipe_ctx->plane_res.scl_data.viewport.width; + int viewport_x = + pipe_ctx->plane_res.scl_data.viewport.x; + + if (pipe_split_on) { + if (pos_cpy.x >= viewport_width + viewport_x) { + pos_cpy.x = 2 * viewport_width + - pos_cpy.x + 2 * viewport_x; + } else { + uint32_t temp_x = pos_cpy.x; + + pos_cpy.x = 2 * viewport_x - pos_cpy.x; + if (temp_x >= viewport_x + + (int)hubp->curs_attr.width || pos_cpy.x + <= (int)hubp->curs_attr.width + + pipe_ctx->plane_state->src_rect.x) { + pos_cpy.x = temp_x + viewport_width; + } } + } else { + pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x; } pos_cpy.y = pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h index 55b8f3b2fc4edea35c0c6d22ba7e796b710a3b55..4d20f6586bb57e49de2f83de58428119a4fc9b26 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h @@ -132,7 +132,7 @@ void dcn10_get_position(struct pipe_ctx **pipe_ctx, int num_pipes, struct crtc_position *position); void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx, - int num_pipes, const struct dc_static_screen_events *events); + int num_pipes, const struct dc_static_screen_params *params); void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc); void dce110_set_avmute(struct pipe_ctx *pipe_ctx, bool enable); void dcn10_log_hw_state(struct dc *dc, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h index 7493a630f4dcdf2250f4ca547550748e9947270a..eb13589b9a81b6579e164c781310ec169ab780f8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h @@ -124,6 +124,26 @@ struct dcn10_link_enc_registers { uint32_t RDPCSTX_PHY_CNTL13; uint32_t RDPCSTX_PHY_CNTL14; uint32_t RDPCSTX_PHY_CNTL15; + uint32_t RDPCSTX_CNTL; + uint32_t RDPCSTX_CLOCK_CNTL; + uint32_t RDPCSTX_PHY_CNTL0; + uint32_t RDPCSTX_PHY_CNTL2; + uint32_t RDPCSTX_PLL_UPDATE_DATA; + uint32_t RDPCS_TX_CR_ADDR; + uint32_t RDPCS_TX_CR_DATA; + uint32_t DPCSTX_TX_CLOCK_CNTL; + uint32_t DPCSTX_TX_CNTL; + uint32_t RDPCSTX_INTERRUPT_CONTROL; + uint32_t RDPCSTX_PHY_FUSE0; + uint32_t RDPCSTX_PHY_FUSE1; + uint32_t RDPCSTX_PHY_FUSE2; + uint32_t RDPCSTX_PHY_FUSE3; + uint32_t RDPCSTX_PHY_RX_LD_VAL; + uint32_t DPCSTX_DEBUG_CONFIG; + uint32_t RDPCSTX_DEBUG_CONFIG; + uint32_t RDPCSTX0_RDPCSTX_SCRATCH; + uint32_t RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG; + uint32_t DCIO_SOFT_RESET; /* indirect registers */ uint32_t RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2; uint32_t RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index cd7412dc42d1a23401703bea6814ce53352454ac..a9a43b397db999b8a8252a295588ba7bb604b7a8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c @@ -789,21 +789,26 @@ void optc1_set_early_control( void optc1_set_static_screen_control( struct timing_generator *optc, - uint32_t value) + uint32_t event_triggers, + uint32_t num_frames) { struct optc *optc1 = DCN10TG_FROM_TG(optc); + // By register spec, it only takes 8 bit value + if (num_frames > 0xFF) + num_frames = 0xFF; + /* Bit 8 is no longer applicable in RV for PSR case, * set bit 8 to 0 if given */ - if ((value & STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN) + if ((event_triggers & STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN) != 0) - value = value & + event_triggers = event_triggers & ~STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN; REG_SET_2(OTG_STATIC_SCREEN_CONTROL, 0, - OTG_STATIC_SCREEN_EVENT_MASK, value, - OTG_STATIC_SCREEN_FRAME_COUNT, 2); + OTG_STATIC_SCREEN_EVENT_MASK, event_triggers, + OTG_STATIC_SCREEN_FRAME_COUNT, num_frames); } void optc1_setup_manual_trigger(struct timing_generator *optc) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h index 3afeb1a30f214da63c6c173a68fd023525863915..f277656d5464629e9a53e2802fc791c0745a1c00 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h @@ -625,7 +625,8 @@ void optc1_set_drr( void optc1_set_static_screen_control( struct timing_generator *optc, - uint32_t value); + uint32_t event_triggers, + uint32_t num_frames); void optc1_program_stereo(struct timing_generator *optc, const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile index fd52862d6624b793957e43eb79fc90b0db5d1b1d..5fcaf78334ff9a96a24948422733a310bbe08fd2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile @@ -9,7 +9,13 @@ DCN20 = dcn20_resource.o dcn20_init.o dcn20_hwseq.o dcn20_dpp.o dcn20_dpp_cm.o d DCN20 += dcn20_dsc.o +ifdef CONFIG_X86 CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -msse +endif + +ifdef CONFIG_PPC64 +CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -maltivec +endif ifdef CONFIG_CC_IS_GCC ifeq ($(call cc-ifversion, -lt, 0701, y), y) @@ -17,6 +23,7 @@ IS_OLD_GCC = 1 endif endif +ifdef CONFIG_X86 ifdef IS_OLD_GCC # Stack alignment mismatch, proceed with caution. # GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 @@ -25,6 +32,7 @@ CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -mpreferred-stack-boundary=4 else CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -msse2 endif +endif AMD_DAL_DCN20 = $(addprefix $(AMDDALPATH)/dc/dcn20/,$(DCN20)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c index 1e1151356e6063089fffac01ed08fffe81c4cb02..50bffbfdd3947b8fecca4c522e033cea299859ac 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c @@ -50,20 +50,20 @@ void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk) if (dccg->ref_dppclk && req_dppclk) { int ref_dppclk = dccg->ref_dppclk; + int modulo, phase; - ASSERT(req_dppclk <= ref_dppclk); - /* need to clamp to 8 bits */ - if (ref_dppclk > 0xff) { - int divider = (ref_dppclk + 0xfe) / 0xff; + // phase / modulo = dpp pipe clk / dpp global clk + modulo = 0xff; // use FF at the end + phase = ((modulo * req_dppclk) + ref_dppclk - 1) / ref_dppclk; - ref_dppclk /= divider; - req_dppclk = (req_dppclk + divider - 1) / divider; - if (req_dppclk > ref_dppclk) - req_dppclk = ref_dppclk; + if (phase > 0xff) { + ASSERT(false); + phase = 0xff; } + REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0, - DPPCLK0_DTO_PHASE, req_dppclk, - DPPCLK0_DTO_MODULO, ref_dppclk); + DPPCLK0_DTO_PHASE, phase, + DPPCLK0_DTO_MODULO, modulo); REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_ENABLE[dpp_inst], 1); } else { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c index 4d7e45892f087ac81db26a2aaff99de449201b99..13e057d7ee93da28efd01a55cbcea1873801e09b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c @@ -104,7 +104,7 @@ static void dpp2_cnv_setup ( uint32_t pixel_format = 0; uint32_t alpha_en = 1; enum dc_color_space color_space = COLOR_SPACE_SRGB; - enum dcn10_input_csc_select select = INPUT_CSC_SELECT_BYPASS; + enum dcn20_input_csc_select select = DCN2_ICSC_SELECT_BYPASS; bool force_disable_cursor = false; struct out_csc_color_matrix tbl_entry; uint32_t is_2bit = 0; @@ -145,25 +145,25 @@ static void dpp2_cnv_setup ( force_disable_cursor = false; pixel_format = 65; color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; + select = DCN2_ICSC_SELECT_ICSC_A; break; case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: force_disable_cursor = true; pixel_format = 64; color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; + select = DCN2_ICSC_SELECT_ICSC_A; break; case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: force_disable_cursor = true; pixel_format = 67; color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; + select = DCN2_ICSC_SELECT_ICSC_A; break; case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: force_disable_cursor = true; pixel_format = 66; color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; + select = DCN2_ICSC_SELECT_ICSC_A; break; case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: pixel_format = 22; @@ -177,7 +177,7 @@ static void dpp2_cnv_setup ( case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888: pixel_format = 12; color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; + select = DCN2_ICSC_SELECT_ICSC_A; break; case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX: pixel_format = 112; @@ -188,13 +188,13 @@ static void dpp2_cnv_setup ( case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010: pixel_format = 114; color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; + select = DCN2_ICSC_SELECT_ICSC_A; is_2bit = 1; break; case SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102: pixel_format = 115; color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; + select = DCN2_ICSC_SELECT_ICSC_A; is_2bit = 1; break; case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT: @@ -227,13 +227,13 @@ static void dpp2_cnv_setup ( tbl_entry.color_space = input_color_space; if (color_space >= COLOR_SPACE_YCBCR601) - select = INPUT_CSC_SELECT_ICSC; + select = DCN2_ICSC_SELECT_ICSC_A; else - select = INPUT_CSC_SELECT_BYPASS; + select = DCN2_ICSC_SELECT_BYPASS; - dpp1_program_input_csc(dpp_base, color_space, select, &tbl_entry); + dpp2_program_input_csc(dpp_base, color_space, select, &tbl_entry); } else - dpp1_program_input_csc(dpp_base, color_space, select, NULL); + dpp2_program_input_csc(dpp_base, color_space, select, NULL); if (force_disable_cursor) { REG_UPDATE(CURSOR_CONTROL, @@ -458,7 +458,7 @@ static struct dpp_funcs dcn20_dpp_funcs = { .dpp_reset = dpp_reset, .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, .dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps, - .dpp_set_gamut_remap = dpp1_cm_set_gamut_remap, + .dpp_set_gamut_remap = dpp2_cm_set_gamut_remap, .dpp_set_csc_adjustment = NULL, .dpp_set_csc_default = NULL, .dpp_program_regamma_pwl = oppn20_dummy_program_regamma_pwl, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h index 5b03b737b1d6104f2e95ea6e9780f6e940ce77dc..27610251c57f1df2c675e9e256e6d9a1d9850781 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h @@ -150,6 +150,16 @@ SRI(CM_SHAPER_RAMA_REGION_32_33, CM, id), \ SRI(CM_SHAPER_LUT_INDEX, CM, id) +#define TF_REG_LIST_DCN20_COMMON_APPEND(id) \ + SRI(CM_GAMUT_REMAP_B_C11_C12, CM, id),\ + SRI(CM_GAMUT_REMAP_B_C13_C14, CM, id),\ + SRI(CM_GAMUT_REMAP_B_C21_C22, CM, id),\ + SRI(CM_GAMUT_REMAP_B_C23_C24, CM, id),\ + SRI(CM_GAMUT_REMAP_B_C31_C32, CM, id),\ + SRI(CM_GAMUT_REMAP_B_C33_C34, CM, id),\ + SRI(CM_ICSC_B_C11_C12, CM, id), \ + SRI(CM_ICSC_B_C33_C34, CM, id) + #define TF_REG_LIST_DCN20(id) \ TF_REG_LIST_DCN(id), \ TF_REG_LIST_DCN20_COMMON(id), \ @@ -572,10 +582,29 @@ TF_SF(DSCL0_OBUF_MEM_PWR_CTRL, OBUF_MEM_PWR_FORCE, mask_sh),\ TF_SF(DSCL0_DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, mask_sh) +/* DPP CM debug status register: + * + * Status index including current ICSC, Gamut Remap Mode is 9 + * ICSC Mode: [4..3] + * Gamut Remap Mode: [10..9] + */ +#define CM_TEST_DEBUG_DATA_STATUS_IDX 9 + +#define TF_DEBUG_REG_LIST_SH_DCN20 \ + TF_DEBUG_REG_LIST_SH_DCN10, \ + .CM_TEST_DEBUG_DATA_ICSC_MODE = 3, \ + .CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE = 9 + +#define TF_DEBUG_REG_LIST_MASK_DCN20 \ + TF_DEBUG_REG_LIST_MASK_DCN10, \ + .CM_TEST_DEBUG_DATA_ICSC_MODE = 0x18, \ + .CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE = 0x600 #define TF_REG_FIELD_LIST_DCN2_0(type) \ TF_REG_FIELD_LIST(type) \ type CM_BLNDGAM_LUT_DATA; \ + type CM_TEST_DEBUG_DATA_ICSC_MODE; \ + type CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE; \ type FORMAT_CNV16; \ type CNVC_BYPASS_MSB_ALIGN; \ type CLAMP_POSITIVE; \ @@ -630,11 +659,22 @@ struct dcn2_dpp_mask { uint32_t COLOR_KEYER_RED; \ uint32_t COLOR_KEYER_GREEN; \ uint32_t COLOR_KEYER_BLUE; \ - uint32_t OBUF_MEM_PWR_CTRL;\ + uint32_t OBUF_MEM_PWR_CTRL; \ uint32_t DSCL_MEM_PWR_CTRL +#define DPP_DCN2_REG_VARIABLE_LIST_CM_APPEND \ + uint32_t CM_GAMUT_REMAP_B_C11_C12; \ + uint32_t CM_GAMUT_REMAP_B_C13_C14; \ + uint32_t CM_GAMUT_REMAP_B_C21_C22; \ + uint32_t CM_GAMUT_REMAP_B_C23_C24; \ + uint32_t CM_GAMUT_REMAP_B_C31_C32; \ + uint32_t CM_GAMUT_REMAP_B_C33_C34; \ + uint32_t CM_ICSC_B_C11_C12; \ + uint32_t CM_ICSC_B_C33_C34 + struct dcn2_dpp_registers { DPP_DCN2_REG_VARIABLE_LIST; + DPP_DCN2_REG_VARIABLE_LIST_CM_APPEND; }; struct dcn20_dpp { @@ -656,6 +696,18 @@ struct dcn20_dpp { struct pwl_params pwl_data; }; +enum dcn20_input_csc_select { + DCN2_ICSC_SELECT_BYPASS = 0, + DCN2_ICSC_SELECT_ICSC_A = 1, + DCN2_ICSC_SELECT_ICSC_B = 2 +}; + +enum dcn20_gamut_remap_select { + DCN2_GAMUT_REMAP_BYPASS = 0, + DCN2_GAMUT_REMAP_COEF_A = 1, + DCN2_GAMUT_REMAP_COEF_B = 2 +}; + void dpp20_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s); @@ -667,6 +719,16 @@ void dpp2_set_degamma( struct dpp *dpp_base, enum ipp_degamma_mode mode); +void dpp2_cm_set_gamut_remap( + struct dpp *dpp_base, + const struct dpp_grph_csc_adjustment *adjust); + +void dpp2_program_input_csc( + struct dpp *dpp_base, + enum dc_color_space color_space, + enum dcn20_input_csc_select input_select, + const struct out_csc_color_matrix *tbl_entry); + bool dpp20_program_blnd_lut( struct dpp *dpp_base, const struct pwl_params *params); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c index 05a3e7f97ef05eb78ae3e6244f86b0a8344e6851..8dc3d1f7398422e49bc7823e3c5bad56ab111673 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c @@ -36,6 +36,9 @@ #define REG(reg)\ dpp->tf_regs->reg +#define IND_REG(index) \ + (index) + #define CTX \ dpp->base.ctx @@ -44,9 +47,6 @@ dpp->tf_shift->field_name, dpp->tf_mask->field_name - - - static void dpp2_enable_cm_block( struct dpp *dpp_base) { @@ -158,6 +158,155 @@ void dpp2_set_degamma( } } +static void program_gamut_remap( + struct dcn20_dpp *dpp, + const uint16_t *regval, + enum dcn20_gamut_remap_select select) +{ + uint32_t cur_select = 0; + struct color_matrices_reg gam_regs; + + if (regval == NULL || select == DCN2_GAMUT_REMAP_BYPASS) { + REG_SET(CM_GAMUT_REMAP_CONTROL, 0, + CM_GAMUT_REMAP_MODE, 0); + return; + } + + /* determine which gamut_remap coefficients (A or B) we are using + * currently. select the alternate set to double buffer + * the update so gamut_remap is updated on frame boundary + */ + IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA, + CM_TEST_DEBUG_DATA_STATUS_IDX, + CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE, &cur_select); + + /* value stored in dbg reg will be 1 greater than mode we want */ + if (cur_select != DCN2_GAMUT_REMAP_COEF_A) + select = DCN2_GAMUT_REMAP_COEF_A; + else + select = DCN2_GAMUT_REMAP_COEF_B; + + gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; + gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11; + gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; + gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; + + if (select == DCN2_GAMUT_REMAP_COEF_A) { + gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); + } else { + gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34); + } + + cm_helper_program_color_matrices( + dpp->base.ctx, + regval, + &gam_regs); + + REG_SET( + CM_GAMUT_REMAP_CONTROL, 0, + CM_GAMUT_REMAP_MODE, select); + +} + +void dpp2_cm_set_gamut_remap( + struct dpp *dpp_base, + const struct dpp_grph_csc_adjustment *adjust) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + int i = 0; + + if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW) + /* Bypass if type is bypass or hw */ + program_gamut_remap(dpp, NULL, DCN2_GAMUT_REMAP_BYPASS); + else { + struct fixed31_32 arr_matrix[12]; + uint16_t arr_reg_val[12]; + + for (i = 0; i < 12; i++) + arr_matrix[i] = adjust->temperature_matrix[i]; + + convert_float_matrix( + arr_reg_val, arr_matrix, 12); + + program_gamut_remap(dpp, arr_reg_val, DCN2_GAMUT_REMAP_COEF_A); + } +} + +void dpp2_program_input_csc( + struct dpp *dpp_base, + enum dc_color_space color_space, + enum dcn20_input_csc_select input_select, + const struct out_csc_color_matrix *tbl_entry) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + int i; + int arr_size = sizeof(dpp_input_csc_matrix)/sizeof(struct dpp_input_csc_matrix); + const uint16_t *regval = NULL; + uint32_t cur_select = 0; + enum dcn20_input_csc_select select; + struct color_matrices_reg icsc_regs; + + if (input_select == DCN2_ICSC_SELECT_BYPASS) { + REG_SET(CM_ICSC_CONTROL, 0, CM_ICSC_MODE, 0); + return; + } + + if (tbl_entry == NULL) { + for (i = 0; i < arr_size; i++) + if (dpp_input_csc_matrix[i].color_space == color_space) { + regval = dpp_input_csc_matrix[i].regval; + break; + } + + if (regval == NULL) { + BREAK_TO_DEBUGGER(); + return; + } + } else { + regval = tbl_entry->regval; + } + + /* determine which CSC coefficients (A or B) we are using + * currently. select the alternate set to double buffer + * the CSC update so CSC is updated on frame boundary + */ + IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA, + CM_TEST_DEBUG_DATA_STATUS_IDX, + CM_TEST_DEBUG_DATA_ICSC_MODE, &cur_select); + + if (cur_select != DCN2_ICSC_SELECT_ICSC_A) + select = DCN2_ICSC_SELECT_ICSC_A; + else + select = DCN2_ICSC_SELECT_ICSC_B; + + icsc_regs.shifts.csc_c11 = dpp->tf_shift->CM_ICSC_C11; + icsc_regs.masks.csc_c11 = dpp->tf_mask->CM_ICSC_C11; + icsc_regs.shifts.csc_c12 = dpp->tf_shift->CM_ICSC_C12; + icsc_regs.masks.csc_c12 = dpp->tf_mask->CM_ICSC_C12; + + if (select == DCN2_ICSC_SELECT_ICSC_A) { + + icsc_regs.csc_c11_c12 = REG(CM_ICSC_C11_C12); + icsc_regs.csc_c33_c34 = REG(CM_ICSC_C33_C34); + + } else { + + icsc_regs.csc_c11_c12 = REG(CM_ICSC_B_C11_C12); + icsc_regs.csc_c33_c34 = REG(CM_ICSC_B_C33_C34); + + } + + cm_helper_program_color_matrices( + dpp->base.ctx, + regval, + &icsc_regs); + + REG_SET(CM_ICSC_CONTROL, 0, + CM_ICSC_MODE, select); +} + static void dpp20_power_on_blnd_lut( struct dpp *dpp_base, bool power_on) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c index 0111545dac75d9035044dcd5d39732f9f7e54969..6bdfee20b6a79a17d1c068a0a67a6887c40a94db 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c @@ -206,6 +206,9 @@ static bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, const str struct dsc_reg_values dsc_reg_vals; struct dsc_optc_config dsc_optc_cfg; + memset(&dsc_reg_vals, 0, sizeof(dsc_reg_vals)); + memset(&dsc_optc_cfg, 0, sizeof(dsc_optc_cfg)); + DC_LOG_DSC("Getting packed DSC PPS for DSC Config:"); dsc_config_log(dsc, dsc_cfg); DC_LOG_DSC("DSC Picture Parameter Set (PPS):"); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c index 8b84385661019e3212b8c0b272938462ccfac719..9235f7d294545ea9296c9fcecee3450f4b57d2c6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c @@ -293,6 +293,9 @@ bool hubbub2_get_dcc_compression_cap(struct hubbub *hubbub, output->grph.rgb.max_compressed_blk_size = 64; output->grph.rgb.independent_64b_blks = true; break; + default: + ASSERT(false); + break; } output->capable = true; output->const_color_support = true; @@ -601,7 +604,8 @@ static const struct hubbub_funcs hubbub2_funcs = { .wm_read_state = hubbub2_wm_read_state, .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq, .program_watermarks = hubbub2_program_watermarks, - .allow_self_refresh_control = hubbub1_allow_self_refresh_control + .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, + .allow_self_refresh_control = hubbub1_allow_self_refresh_control, }; void hubbub2_construct(struct dcn20_hubbub *hubbub, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 32878a65bdd7cbb1c1ddc1b7ef97ce4f416434cf..cfbbaffa865475933d069f647be2df3bb02b322a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -183,10 +183,10 @@ void dcn20_enable_power_gating_plane( struct dce_hwseq *hws, bool enable) { - bool force_on = 1; /* disable power gating */ + bool force_on = true; /* disable power gating */ if (enable) - force_on = 0; + force_on = false; /* DCHUBP0/1/2/3/4/5 */ REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on); @@ -686,9 +686,13 @@ enum dc_status dcn20_enable_stream_timing( // DRR should set trigger event to monitor surface update event if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0) event_triggers = 0x80; + /* Event triggers and num frames initialized for DRR, but can be + * later updated for PSR use. Note DRR trigger events are generated + * regardless of whether num frames met. + */ if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control) pipe_ctx->stream_res.tg->funcs->set_static_screen_control( - pipe_ctx->stream_res.tg, event_triggers); + pipe_ctx->stream_res.tg, event_triggers, 2); /* TODO program crtc source select for non-virtual signal*/ /* TODO program FMT */ @@ -941,6 +945,9 @@ void dcn20_blank_pixel_data( int width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; int height = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top; + if (stream->link->test_pattern_enabled) + return; + /* get opp dpg blank color */ color_space_to_black_color(dc, color_space, &black_color); @@ -1305,6 +1312,7 @@ static void dcn20_update_dchubp_dpp( struct hubp *hubp = pipe_ctx->plane_res.hubp; struct dpp *dpp = pipe_ctx->plane_res.dpp; struct dc_plane_state *plane_state = pipe_ctx->plane_state; + bool viewport_changed = false; if (pipe_ctx->update_flags.bits.dppclk) dpp->funcs->dpp_dppclk_control(dpp, false, true); @@ -1355,9 +1363,9 @@ static void dcn20_update_dchubp_dpp( || plane_state->update_flags.bits.global_alpha_change || plane_state->update_flags.bits.per_pixel_alpha_change) { // MPCC inst is equal to pipe index in practice - int mpcc_inst = pipe_ctx->pipe_idx; + int mpcc_inst = hubp->inst; int opp_inst; - int opp_count = dc->res_pool->res_cap->num_opp; + int opp_count = dc->res_pool->pipe_count; for (opp_inst = 0; opp_inst < opp_count; opp_inst++) { if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) { @@ -1383,15 +1391,18 @@ static void dcn20_update_dchubp_dpp( if (pipe_ctx->update_flags.bits.viewport || (context == dc->current_state && plane_state->update_flags.bits.scaling_change) || - (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) + (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) { + hubp->funcs->mem_program_viewport( hubp, &pipe_ctx->plane_res.scl_data.viewport, - &pipe_ctx->plane_res.scl_data.viewport_c, - plane_state->rotation); + &pipe_ctx->plane_res.scl_data.viewport_c); + viewport_changed = true; + } /* Any updates are handled in dc interface, just need to apply existing for plane enable */ - if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed) + if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed || + pipe_ctx->update_flags.bits.scaler || pipe_ctx->update_flags.bits.viewport) && pipe_ctx->stream->cursor_attributes.address.quad_part != 0) { dc->hwss.set_cursor_position(pipe_ctx); dc->hwss.set_cursor_attribute(pipe_ctx); @@ -1441,9 +1452,14 @@ static void dcn20_update_dchubp_dpp( hubp->power_gated = false; } + if (hubp->funcs->apply_PLAT_54186_wa && viewport_changed) + hubp->funcs->apply_PLAT_54186_wa(hubp, &plane_state->address); + if (pipe_ctx->update_flags.bits.enable || plane_state->update_flags.bits.addr_update) hws->funcs.update_plane_addr(dc, pipe_ctx); + + if (pipe_ctx->update_flags.bits.enable) hubp->funcs->set_blank(hubp, false); } @@ -1629,9 +1645,9 @@ void dcn20_program_front_end_for_ctx( struct hubp *hubp = pipe->plane_res.hubp; int j = 0; - for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS + for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS*1000 && hubp->funcs->hubp_is_flip_pending(hubp); j++) - msleep(1); + mdelay(1); } } @@ -1731,7 +1747,6 @@ bool dcn20_update_bandwidth( void dcn20_enable_writeback( struct dc *dc, - const struct dc_stream_status *stream_status, struct dc_writeback_info *wb_info, struct dc_state *context) { @@ -1745,8 +1760,7 @@ void dcn20_enable_writeback( mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst]; /* set the OPTC source mux */ - ASSERT(stream_status->primary_otg_inst < MAX_PIPES); - optc = dc->res_pool->timing_generators[stream_status->primary_otg_inst]; + optc = dc->res_pool->timing_generators[dwb->otg_inst]; optc->funcs->set_dwb_source(optc, wb_info->dwb_pipe_inst); /* set MCIF_WB buffer and arbitration configuration */ mcif_wb->funcs->config_mcif_buf(mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height); @@ -1995,6 +2009,7 @@ static void dcn20_reset_back_end_for_pipe( struct dc_state *context) { int i; + struct dc_link *link; DC_LOGGER_INIT(dc->ctx->logger); if (pipe_ctx->stream_res.stream_enc == NULL) { pipe_ctx->stream = NULL; @@ -2002,8 +2017,14 @@ static void dcn20_reset_back_end_for_pipe( } if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - /* DPMS may already disable */ - if (!pipe_ctx->stream->dpms_off) + link = pipe_ctx->stream->link; + /* DPMS may already disable or */ + /* dpms_off status is incorrect due to fastboot + * feature. When system resume from S4 with second + * screen only, the dpms_off would be true but + * VBIOS lit up eDP, so check link status too. + */ + if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) core_link_disable_stream(pipe_ctx); else if (pipe_ctx->stream_res.audio) dc->hwss.disable_audio_stream(pipe_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h index eecd7a26ec4c41556b06df516ff97475adecf262..02c9be5ebd47480ecfe6029f9831522f85159388 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h @@ -104,7 +104,6 @@ void dcn20_program_triple_buffer( bool enable_triple_buffer); void dcn20_enable_writeback( struct dc *dc, - const struct dc_stream_status *stream_status, struct dc_writeback_info *wb_info, struct dc_state *context); void dcn20_disable_writeback( diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h index 62dfd34c69f184fabeb4acc6de11c7d108700ab9..8cab8107fd94c2094610685a1e2bd6db0f15c027 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h @@ -33,7 +33,142 @@ SRI(AUX_DPHY_TX_CONTROL, DP_AUX, id) #define UNIPHY_MASK_SH_LIST(mask_sh)\ - LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_LINK_ENABLE, mask_sh) + LE_SF(SYMCLKA_CLOCK_ENABLE, SYMCLKA_CLOCK_ENABLE, mask_sh),\ + LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_LINK_ENABLE, mask_sh),\ + LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL0_XBAR_SOURCE, mask_sh),\ + LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL1_XBAR_SOURCE, mask_sh),\ + LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL2_XBAR_SOURCE, mask_sh),\ + LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL3_XBAR_SOURCE, mask_sh) + +#define DPCS_MASK_SH_LIST(mask_sh)\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_CLK_RDY, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_DATA_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_CLK_RDY, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_DATA_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_CLK_RDY, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_DATA_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_CLK_RDY, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_DATA_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX0_TERM_CTRL, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX1_TERM_CTRL, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX2_TERM_CTRL, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL4, RDPCS_PHY_DP_TX3_TERM_CTRL, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_DP_MPLLB_MULTIPLIER, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX0_WIDTH, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX0_RATE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX1_WIDTH, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX1_RATE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX2_PSTATE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX3_PSTATE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX2_MPLL_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX3_MPLL_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL7, RDPCS_PHY_DP_MPLLB_FRACN_QUOT, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL7, RDPCS_PHY_DP_MPLLB_FRACN_DEN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL8, RDPCS_PHY_DP_MPLLB_SSC_PEAK, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL9, RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL9, RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL10, RDPCS_PHY_DP_MPLLB_FRACN_REM, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_DP_REF_CLK_MPLLB_DIV, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_HDMI_MPLLB_HDMI_DIV, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_SSC_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_TX_CLK_DIV, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL12, RDPCS_PHY_DP_MPLLB_STATE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL13, RDPCS_PHY_DP_MPLLB_DIV_CLK_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL13, RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL14, RDPCS_PHY_DP_MPLLB_FRACN_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL14, RDPCS_PHY_DP_MPLLB_PMIX_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE0_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE1_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE2_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_LANE3_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_CNTL, RDPCS_TX_FIFO_RD_START_DELAY, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_EXT_REFCLK_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SRAMCLK_BYPASS, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SRAMCLK_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SRAMCLK_CLOCK_ON, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SYMCLK_DIV2_CLOCK_ON, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SYMCLK_DIV2_GATE_DIS, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_SYMCLK_DIV2_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_DISABLE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_DISABLE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_DISABLE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_DISABLE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_REQ, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_REQ, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_REQ, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_REQ, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_ACK, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_ACK, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_ACK, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_ACK, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX0_RESET, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX1_RESET, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX2_RESET, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL3, RDPCS_PHY_DP_TX3_RESET, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_RESET, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_CR_MUX_SEL, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_REF_RANGE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_SRAM_BYPASS, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_SRAM_EXT_LD_DONE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_HDMIMODE_ENABLE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_SRAM_INIT_DONE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DP4_POR, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA, RDPCS_PLL_UPDATE_DATA, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_REG_FIFO_ERROR_MASK, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_TX_FIFO_ERROR_MASK, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_DPALT_DISABLE_TOGGLE_MASK, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL, RDPCS_DPALT_4LANE_TOGGLE_MASK, mask_sh),\ + LE_SF(RDPCSTX0_RDPCS_TX_CR_ADDR, RDPCS_TX_CR_ADDR, mask_sh),\ + LE_SF(RDPCSTX0_RDPCS_TX_CR_DATA, RDPCS_TX_CR_DATA, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_MPLLB_V2I, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_MAIN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_PRE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_POST, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_MPLLB_FREQ_VCO, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_MPLLB_CP_INT, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_MPLLB_CP_PROP, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_MAIN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_PRE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_POST, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_MAIN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_PRE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_POST, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_MAIN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DCO_FINETUNE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DCO_RANGE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_PRE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_POST, mask_sh),\ + LE_SF(DPCSTX0_DPCSTX_TX_CLOCK_CNTL, DPCS_SYMCLK_CLOCK_ON, mask_sh),\ + LE_SF(DPCSTX0_DPCSTX_TX_CLOCK_CNTL, DPCS_SYMCLK_GATE_DIS, mask_sh),\ + LE_SF(DPCSTX0_DPCSTX_TX_CLOCK_CNTL, DPCS_SYMCLK_EN, mask_sh),\ + LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_SWAP, mask_sh),\ + LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_ORDER_INVERT, mask_sh),\ + LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_EN, mask_sh),\ + LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_RD_START_DELAY, mask_sh),\ + LE_SF(DPCSTX0_DPCSTX_DEBUG_CONFIG, DPCS_DBG_CBUS_DIS, mask_sh) + +#define DPCS_DCN2_MASK_SH_LIST(mask_sh)\ + DPCS_MASK_SH_LIST(mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL, RDPCS_PHY_RX_REF_LD_VAL, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL, RDPCS_PHY_RX_VCO_LD_VAL, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE_ACK, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX0_PSTATE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX1_PSTATE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX0_MPLL_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX1_MPLL_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_REF_CLK_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX2_WIDTH, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX2_RATE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX3_WIDTH, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX3_RATE, mask_sh),\ + LE_SF(DCIO_SOFT_RESET, UNIPHYA_SOFT_RESET, mask_sh),\ + LE_SF(DCIO_SOFT_RESET, UNIPHYB_SOFT_RESET, mask_sh),\ + LE_SF(DCIO_SOFT_RESET, UNIPHYC_SOFT_RESET, mask_sh),\ + LE_SF(DCIO_SOFT_RESET, UNIPHYD_SOFT_RESET, mask_sh),\ + LE_SF(DCIO_SOFT_RESET, UNIPHYE_SOFT_RESET, mask_sh) #define LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh)\ LINK_ENCODER_MASK_SH_LIST_DCN10(mask_sh),\ @@ -63,6 +198,49 @@ SRI(CLOCK_ENABLE, SYMCLK, id), \ SRI(CHANNEL_XBAR_CNTL, UNIPHY, id) +#define DPCS_DCN2_CMN_REG_LIST(id) \ + SRI(DIG_LANE_ENABLE, DIG, id), \ + SRI(TMDS_CTL_BITS, DIG, id), \ + SRI(RDPCSTX_PHY_CNTL3, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_CNTL4, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_CNTL5, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_CNTL6, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_CNTL7, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_CNTL8, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_CNTL9, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_CNTL10, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_CNTL11, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_CNTL12, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_CNTL13, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_CNTL14, RDPCSTX, id), \ + SRI(RDPCSTX_CNTL, RDPCSTX, id), \ + SRI(RDPCSTX_CLOCK_CNTL, RDPCSTX, id), \ + SRI(RDPCSTX_INTERRUPT_CONTROL, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_CNTL0, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_CNTL2, RDPCSTX, id), \ + SRI(RDPCSTX_PLL_UPDATE_DATA, RDPCSTX, id), \ + SRI(RDPCS_TX_CR_ADDR, RDPCSTX, id), \ + SRI(RDPCS_TX_CR_DATA, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_FUSE0, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_FUSE1, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_FUSE2, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \ + SRI(DPCSTX_TX_CLOCK_CNTL, DPCSTX, id), \ + SRI(DPCSTX_TX_CNTL, DPCSTX, id), \ + SRI(DPCSTX_DEBUG_CONFIG, DPCSTX, id), \ + SRI(RDPCSTX_DEBUG_CONFIG, RDPCSTX, id), \ + SR(RDPCSTX0_RDPCSTX_SCRATCH) + + +#define DPCS_DCN2_REG_LIST(id) \ + DPCS_DCN2_CMN_REG_LIST(id), \ + SRI(RDPCSTX_PHY_RX_LD_VAL, RDPCSTX, id),\ + SRI(RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG, RDPCSTX, id) + +#define LE_DCN2_REG_LIST(id) \ + LE_DCN10_REG_LIST(id), \ + SR(DCIO_SOFT_RESET) + struct mpll_cfg { uint32_t mpllb_ana_v2i; uint32_t mpllb_ana_freq_vco; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c index f90031ed58a640c344d6b2489057fe05ec9eb768..de9c857ab3e97abf9bd8ca0e53896a391b46e55f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c @@ -33,6 +33,9 @@ #define REG(reg)\ mpc20->mpc_regs->reg +#define IND_REG(index) \ + (index) + #define CTX \ mpc20->base.ctx @@ -132,19 +135,33 @@ void mpc2_set_output_csc( const uint16_t *regval, enum mpc_output_csc_mode ocsc_mode) { + uint32_t cur_mode; struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); struct color_matrices_reg ocsc_regs; - REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode); - - if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE) + if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE) { + REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode); return; + } if (regval == NULL) { BREAK_TO_DEBUGGER(); return; } + /* determine which CSC coefficients (A or B) we are using + * currently. select the alternate set to double buffer + * the CSC update so CSC is updated on frame boundary + */ + IX_REG_GET(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_DATA, + MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX, + MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE, &cur_mode); + + if (cur_mode != MPC_OUTPUT_CSC_COEF_A) + ocsc_mode = MPC_OUTPUT_CSC_COEF_A; + else + ocsc_mode = MPC_OUTPUT_CSC_COEF_B; + ocsc_regs.shifts.csc_c11 = mpc20->mpc_shift->MPC_OCSC_C11_A; ocsc_regs.masks.csc_c11 = mpc20->mpc_mask->MPC_OCSC_C11_A; ocsc_regs.shifts.csc_c12 = mpc20->mpc_shift->MPC_OCSC_C12_A; @@ -157,10 +174,13 @@ void mpc2_set_output_csc( ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_B[opp_id]); ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_B[opp_id]); } + cm_helper_program_color_matrices( mpc20->base.ctx, regval, &ocsc_regs); + + REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode); } void mpc2_set_ocsc_default( @@ -169,14 +189,16 @@ void mpc2_set_ocsc_default( enum dc_color_space color_space, enum mpc_output_csc_mode ocsc_mode) { + uint32_t cur_mode; struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); uint32_t arr_size; struct color_matrices_reg ocsc_regs; const uint16_t *regval = NULL; - REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode); - if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE) + if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE) { + REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode); return; + } regval = find_color_matrix(color_space, &arr_size); @@ -185,6 +207,19 @@ void mpc2_set_ocsc_default( return; } + /* determine which CSC coefficients (A or B) we are using + * currently. select the alternate set to double buffer + * the CSC update so CSC is updated on frame boundary + */ + IX_REG_GET(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_DATA, + MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX, + MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE, &cur_mode); + + if (cur_mode != MPC_OUTPUT_CSC_COEF_A) + ocsc_mode = MPC_OUTPUT_CSC_COEF_A; + else + ocsc_mode = MPC_OUTPUT_CSC_COEF_B; + ocsc_regs.shifts.csc_c11 = mpc20->mpc_shift->MPC_OCSC_C11_A; ocsc_regs.masks.csc_c11 = mpc20->mpc_mask->MPC_OCSC_C11_A; ocsc_regs.shifts.csc_c12 = mpc20->mpc_shift->MPC_OCSC_C12_A; @@ -203,6 +238,8 @@ void mpc2_set_ocsc_default( mpc20->base.ctx, regval, &ocsc_regs); + + REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode); } static void mpc2_ogam_get_reg_field( diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h index 9f53192da2dc399cce035e093344ad3311a34054..c78fd5123497b6f14a4b043aba3c01aec9ba4297 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h @@ -80,6 +80,10 @@ SRII(DENORM_CLAMP_G_Y, MPC_OUT, inst),\ SRII(DENORM_CLAMP_B_CB, MPC_OUT, inst) +#define MPC_DBG_REG_LIST_DCN2_0() \ + SR(MPC_OCSC_TEST_DEBUG_DATA),\ + SR(MPC_OCSC_TEST_DEBUG_INDEX) + #define MPC_REG_VARIABLE_LIST_DCN2_0 \ MPC_COMMON_REG_VARIABLE_LIST \ uint32_t MPCC_TOP_GAIN[MAX_MPCC]; \ @@ -118,6 +122,8 @@ uint32_t MPCC_OGAM_LUT_RAM_CONTROL[MAX_MPCC];\ uint32_t MPCC_OGAM_LUT_DATA[MAX_MPCC];\ uint32_t MPCC_OGAM_MODE[MAX_MPCC];\ + uint32_t MPC_OCSC_TEST_DEBUG_DATA;\ + uint32_t MPC_OCSC_TEST_DEBUG_INDEX;\ uint32_t CSC_MODE[MAX_OPP]; \ uint32_t CSC_C11_C12_A[MAX_OPP]; \ uint32_t CSC_C33_C34_A[MAX_OPP]; \ @@ -134,6 +140,7 @@ SF(MPCC0_MPCC_TOP_GAIN, MPCC_TOP_GAIN, mask_sh),\ SF(MPCC0_MPCC_BOT_GAIN_INSIDE, MPCC_BOT_GAIN_INSIDE, mask_sh),\ SF(MPCC0_MPCC_BOT_GAIN_OUTSIDE, MPCC_BOT_GAIN_OUTSIDE, mask_sh),\ + SF(MPC_OCSC_TEST_DEBUG_INDEX, MPC_OCSC_TEST_DEBUG_INDEX, mask_sh),\ SF(MPC_OUT0_CSC_MODE, MPC_OCSC_MODE, mask_sh),\ SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C11_A, mask_sh),\ SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C12_A, mask_sh),\ @@ -174,6 +181,19 @@ SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MAX_B_CB, mask_sh),\ SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh) +/* + * DCN2 MPC_OCSC debug status register: + * + * Status index including current OCSC Mode is 1 + * OCSC Mode: [1..0] + */ +#define MPC_OCSC_TEST_DEBUG_DATA_STATUS_IDX 1 + +#define MPC_DEBUG_REG_LIST_SH_DCN20 \ + .MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE = 0 + +#define MPC_DEBUG_REG_LIST_MASK_DCN20 \ + .MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE = 0x3 #define MPC_REG_FIELD_LIST_DCN2_0(type) \ MPC_REG_FIELD_LIST(type)\ @@ -182,6 +202,8 @@ type MPCC_TOP_GAIN;\ type MPCC_BOT_GAIN_INSIDE;\ type MPCC_BOT_GAIN_OUTSIDE;\ + type MPC_OCSC_TEST_DEBUG_DATA_OCSC_MODE;\ + type MPC_OCSC_TEST_DEBUG_INDEX;\ type MPC_OCSC_MODE;\ type MPC_OCSC_C11_A;\ type MPC_OCSC_C12_A;\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c index 673c83e2afd40dc58e4d12513f25f20e47fa0a48..d875b0c38fded4928c81e2173ba3b425f5e54963 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c @@ -236,12 +236,13 @@ void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c struct dc_crtc_timing *timing) { struct optc *optc1 = DCN10TG_FROM_TG(optc); - /* 2 pieces of memory required for up to 5120 displays, 4 for up to 8192 */ int mpcc_hactive = (timing->h_addressable + timing->h_border_left + timing->h_border_right) / opp_cnt; - int memory_mask = mpcc_hactive <= 2560 ? 0x3 : 0xf; + uint32_t memory_mask; uint32_t data_fmt = 0; + ASSERT(opp_cnt == 2); + /* TODO: In pseudocode but does not affect maximus, delete comment if we dont need on asic * REG_SET(OTG_GLOBAL_CONTROL2, 0, GLOBAL_UPDATE_LOCK_EN, 1); * Program OTG register MASTER_UPDATE_LOCK_DB_X/Y to the position before DP frame start @@ -249,9 +250,17 @@ void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c * MASTER_UPDATE_LOCK_DB_X, 160, * MASTER_UPDATE_LOCK_DB_Y, 240); */ + + /* 2 pieces of memory required for up to 5120 displays, 4 for up to 8192, + * however, for ODM combine we can simplify by always using 4. + * To make sure there's no overlap, each instance "reserves" 2 memories and + * they are uniquely combined here. + */ + memory_mask = 0x3 << (opp_id[0] * 2) | 0x3 << (opp_id[1] * 2); + if (REG(OPTC_MEMORY_CONFIG)) REG_SET(OPTC_MEMORY_CONFIG, 0, - OPTC_MEM_SEL, memory_mask << (optc->inst * 4)); + OPTC_MEM_SEL, memory_mask); if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) data_fmt = 1; @@ -260,7 +269,6 @@ void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c REG_UPDATE(OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, data_fmt); - ASSERT(opp_cnt == 2); REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0, OPTC_NUM_OF_INPUT_SEGMENT, 1, OPTC_SEG0_SRC_SEL, opp_id[0], @@ -382,14 +390,8 @@ void optc2_setup_manual_trigger(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); - REG_SET(OTG_MANUAL_FLOW_CONTROL, 0, - MANUAL_FLOW_CONTROL, 1); - - REG_SET(OTG_GLOBAL_CONTROL2, 0, - MANUAL_FLOW_CONTROL_SEL, optc->inst); - REG_SET_8(OTG_TRIGA_CNTL, 0, - OTG_TRIGA_SOURCE_SELECT, 22, + OTG_TRIGA_SOURCE_SELECT, 21, OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst, OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1, OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 0, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h index ac93fbfaee03adadf8555eaa065a1fe1df896698..239cc40ae474be3765e7bbd54c9766a52b2f9712 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h @@ -106,6 +106,7 @@ void optc2_triplebuffer_lock(struct timing_generator *optc); void optc2_triplebuffer_unlock(struct timing_generator *optc); void optc2_lock_doublebuffer_disable(struct timing_generator *optc); void optc2_lock_doublebuffer_enable(struct timing_generator *optc); +void optc2_setup_manual_trigger(struct timing_generator *optc); void optc2_program_manual_trigger(struct timing_generator *optc); bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing); #endif /* __DC_OPTC_DCN20_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index cfc69919ef9e45c725e071ae576e3cad3418a63a..85f90f3e24cb50a600a447db86dcfbc061397c52 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -1,5 +1,6 @@ /* * Copyright 2016 Advanced Micro Devices, Inc. + * Copyright 2019 Raptor Engineering, LLC * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -65,6 +66,8 @@ #include "dcn/dcn_2_0_0_offset.h" #include "dcn/dcn_2_0_0_sh_mask.h" +#include "dpcs/dpcs_2_0_0_offset.h" +#include "dpcs/dpcs_2_0_0_sh_mask.h" #include "nbio/nbio_2_3_offset.h" @@ -548,6 +551,7 @@ static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { [id] = {\ LE_DCN10_REG_LIST(id), \ UNIPHY_DCN2_REG_LIST(phyid), \ + DPCS_DCN2_REG_LIST(id), \ SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ } @@ -561,11 +565,13 @@ static const struct dcn10_link_enc_registers link_enc_regs[] = { }; static const struct dcn10_link_enc_shift le_shift = { - LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT) + LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT),\ + DPCS_DCN2_MASK_SH_LIST(__SHIFT) }; static const struct dcn10_link_enc_mask le_mask = { - LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK) + LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK),\ + DPCS_DCN2_MASK_SH_LIST(_MASK) }; #define ipp_regs(id)\ @@ -632,6 +638,7 @@ static const struct dce110_aux_registers aux_engine_regs[] = { #define tf_regs(id)\ [id] = {\ TF_REG_LIST_DCN20(id),\ + TF_REG_LIST_DCN20_COMMON_APPEND(id),\ } static const struct dcn2_dpp_registers tf_regs[] = { @@ -645,12 +652,12 @@ static const struct dcn2_dpp_registers tf_regs[] = { static const struct dcn2_dpp_shift tf_shift = { TF_REG_LIST_SH_MASK_DCN20(__SHIFT), - TF_DEBUG_REG_LIST_SH_DCN10 + TF_DEBUG_REG_LIST_SH_DCN20 }; static const struct dcn2_dpp_mask tf_mask = { TF_REG_LIST_SH_MASK_DCN20(_MASK), - TF_DEBUG_REG_LIST_MASK_DCN10 + TF_DEBUG_REG_LIST_MASK_DCN20 }; #define dwbc_regs_dcn2(id)\ @@ -700,14 +707,17 @@ static const struct dcn20_mpc_registers mpc_regs = { MPC_OUT_MUX_REG_LIST_DCN2_0(3), MPC_OUT_MUX_REG_LIST_DCN2_0(4), MPC_OUT_MUX_REG_LIST_DCN2_0(5), + MPC_DBG_REG_LIST_DCN2_0() }; static const struct dcn20_mpc_shift mpc_shift = { - MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) + MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT), + MPC_DEBUG_REG_LIST_SH_DCN20 }; static const struct dcn20_mpc_mask mpc_mask = { - MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK) + MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK), + MPC_DEBUG_REG_LIST_MASK_DCN20 }; #define tg_regs(id)\ @@ -1563,7 +1573,7 @@ static void release_dsc(struct resource_context *res_ctx, -static enum dc_status add_dsc_to_stream_resource(struct dc *dc, +enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc, struct dc_state *dc_ctx, struct dc_stream_state *dc_stream) { @@ -1578,6 +1588,9 @@ static enum dc_status add_dsc_to_stream_resource(struct dc *dc, if (pipe_ctx->stream != dc_stream) continue; + if (pipe_ctx->stream_res.dsc) + continue; + acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc, i); /* The number of DSCs can be less than the number of pipes */ @@ -1626,7 +1639,7 @@ enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, /* Get a DSC if required and available */ if (result == DC_OK && dc_stream->timing.flags.DSC) - result = add_dsc_to_stream_resource(dc, new_ctx, dc_stream); + result = dcn20_add_dsc_to_stream_resource(dc, new_ctx, dc_stream); if (result == DC_OK) result = dcn20_build_mapped_resource(dc, new_ctx, dc_stream); @@ -1848,6 +1861,22 @@ void dcn20_populate_dml_writeback_from_context( } +static int get_num_odm_heads(struct pipe_ctx *pipe) +{ + int odm_head_count = 0; + struct pipe_ctx *next_pipe = pipe->next_odm_pipe; + while (next_pipe) { + odm_head_count++; + next_pipe = next_pipe->next_odm_pipe; + } + pipe = pipe->prev_odm_pipe; + while (pipe) { + odm_head_count++; + pipe = pipe->prev_odm_pipe; + } + return odm_head_count ? odm_head_count + 1 : 0; +} + int dcn20_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes) { @@ -1874,17 +1903,21 @@ int dcn20_populate_dml_pipes_from_context( for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { struct dc_crtc_timing *timing = &res_ctx->pipe_ctx[i].stream->timing; unsigned int v_total; + unsigned int front_porch; int output_bpc; if (!res_ctx->pipe_ctx[i].stream) continue; v_total = timing->v_total; + front_porch = timing->v_front_porch; /* todo: pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = 0; pipes[pipe_cnt].pipe.src.dcc = 0; pipes[pipe_cnt].pipe.src.vm = 0;*/ + pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0; + pipes[pipe_cnt].dout.dsc_enable = res_ctx->pipe_ctx[i].stream->timing.flags.DSC; /* todo: rotation?*/ pipes[pipe_cnt].dout.dsc_slices = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.num_slices_h; @@ -1906,7 +1939,7 @@ int dcn20_populate_dml_pipes_from_context( - timing->h_addressable - timing->h_border_left - timing->h_border_right; - pipes[pipe_cnt].pipe.dest.vblank_start = v_total - timing->v_front_porch; + pipes[pipe_cnt].pipe.dest.vblank_start = v_total - front_porch; pipes[pipe_cnt].pipe.dest.vblank_end = pipes[pipe_cnt].pipe.dest.vblank_start - timing->v_addressable - timing->v_border_top @@ -1923,8 +1956,13 @@ int dcn20_populate_dml_pipes_from_context( pipes[pipe_cnt].dout.dp_lanes = 4; pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min; pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max; - pipes[pipe_cnt].pipe.dest.odm_combine = res_ctx->pipe_ctx[i].prev_odm_pipe - || res_ctx->pipe_ctx[i].next_odm_pipe; + switch (get_num_odm_heads(&res_ctx->pipe_ctx[i])) { + case 2: + pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_2to1; + break; + default: + pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_disabled; + } pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].pipe_idx; if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state == res_ctx->pipe_ctx[i].plane_state) @@ -2034,6 +2072,9 @@ int dcn20_populate_dml_pipes_from_context( if (pipes[pipe_cnt].pipe.src.viewport_height > 1080) pipes[pipe_cnt].pipe.src.viewport_height = 1080; pipes[pipe_cnt].pipe.src.surface_height_y = pipes[pipe_cnt].pipe.src.viewport_height; + pipes[pipe_cnt].pipe.src.surface_width_y = pipes[pipe_cnt].pipe.src.viewport_width; + pipes[pipe_cnt].pipe.src.surface_height_c = pipes[pipe_cnt].pipe.src.viewport_height; + pipes[pipe_cnt].pipe.src.surface_width_c = pipes[pipe_cnt].pipe.src.viewport_width; pipes[pipe_cnt].pipe.src.data_pitch = ((pipes[pipe_cnt].pipe.src.viewport_width + 63) / 64) * 64; /* linear sw only */ pipes[pipe_cnt].pipe.src.source_format = dm_444_32; pipes[pipe_cnt].pipe.dest.recout_width = pipes[pipe_cnt].pipe.src.viewport_width; /*vp_width/hratio*/ @@ -2067,7 +2108,10 @@ int dcn20_populate_dml_pipes_from_context( pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c.width; pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport.height; pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c.height; + pipes[pipe_cnt].pipe.src.surface_width_y = pln->plane_size.surface_size.width; pipes[pipe_cnt].pipe.src.surface_height_y = pln->plane_size.surface_size.height; + pipes[pipe_cnt].pipe.src.surface_width_c = pln->plane_size.chroma_size.width; + pipes[pipe_cnt].pipe.src.surface_height_c = pln->plane_size.chroma_size.height; if (pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.surface_pitch; pipes[pipe_cnt].pipe.src.data_pitch_c = pln->plane_size.chroma_pitch; @@ -2481,7 +2525,7 @@ int dcn20_validate_apply_pipe_split_flags( split[i] = true; if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) { split[i] = true; - context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = true; + context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = dm_odm_combine_mode_2to1; } context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] = context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx]; @@ -2886,12 +2930,19 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool voltage_supported = false; bool full_pstate_supported = false; bool dummy_pstate_supported = false; - double p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us; - context->bw_ctx.dml.soc.disable_dram_clock_change_vactive_support = dc->debug.disable_dram_clock_change_vactive_support; + double p_state_latency_us; - if (fast_validate) - return dcn20_validate_bandwidth_internal(dc, context, true); + DC_FP_START(); + p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us; + context->bw_ctx.dml.soc.disable_dram_clock_change_vactive_support = + dc->debug.disable_dram_clock_change_vactive_support; + if (fast_validate) { + voltage_supported = dcn20_validate_bandwidth_internal(dc, context, true); + + DC_FP_END(); + return voltage_supported; + } // Best case, we support full UCLK switch latency voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false); @@ -2899,7 +2950,7 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 || (voltage_supported && full_pstate_supported)) { - context->bw_ctx.bw.dcn.clk.p_state_change_support = true; + context->bw_ctx.bw.dcn.clk.p_state_change_support = full_pstate_supported; goto restore_dml_state; } @@ -2920,6 +2971,7 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, restore_dml_state: context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us; + DC_FP_END(); return voltage_supported; } @@ -3211,7 +3263,6 @@ void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb) { - kernel_fpu_begin(); if ((int)(bb->sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns && dc->bb_overrides.sr_exit_time_ns) { bb->sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0; @@ -3235,7 +3286,6 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st bb->dram_clock_change_latency_us = dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; } - kernel_fpu_end(); } static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb( @@ -3441,6 +3491,8 @@ static bool dcn20_resource_construct( enum dml_project dml_project_version = get_dml_project_version(ctx->asic_id.hw_internal_rev); + DC_FP_START(); + ctx->dc_bios->regs = &bios_regs; pool->base.funcs = &dcn20_res_pool_funcs; @@ -3738,10 +3790,12 @@ static bool dcn20_resource_construct( pool->base.oem_device = NULL; } + DC_FP_END(); return true; create_fail: + DC_FP_END(); dcn20_resource_destruct(pool); return false; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h index 840ca66c34e12edb921358492cd8d4654bd78673..f5893840b79be1089de686411808cc71733e7b8a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h @@ -157,6 +157,7 @@ void dcn20_calculate_dlg_params( enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream); enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream); +enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc, struct dc_state *dc_ctx, struct dc_stream_state *dc_stream); enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream); enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state); diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile index 4763721fb1c9a2a29d64550b51e5dab620292b1d..07684d3e375abdceafeb2c43c82259f35a61fb32 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile @@ -5,7 +5,13 @@ DCN21 = dcn21_init.o dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o \ dcn21_hwseq.o dcn21_link_encoder.o +ifdef CONFIG_X86 CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse +endif + +ifdef CONFIG_PPC64 +CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -maltivec +endif ifdef CONFIG_CC_IS_GCC ifeq ($(call cc-ifversion, -lt, 0701, y), y) @@ -13,6 +19,7 @@ IS_OLD_GCC = 1 endif endif +ifdef CONFIG_X86 ifdef IS_OLD_GCC # Stack alignment mismatch, proceed with caution. # GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 @@ -21,6 +28,7 @@ CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -mpreferred-stack-boundary=4 else CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -msse2 endif +endif AMD_DAL_DCN21 = $(addprefix $(AMDDALPATH)/dc/dcn21/,$(DCN21)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c index 332bf3d3a66450aac74daf4db721ce13ac773008..cf09b933572801c9ef605f56a71fc412a6f554ed 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c @@ -29,6 +29,8 @@ #include "dm_services.h" #include "reg_helper.h" +#include "dc_dmub_srv.h" + #define DC_LOGGER_INIT(logger) #define REG(reg)\ @@ -169,12 +171,9 @@ static void hubp21_setup( void hubp21_set_viewport( struct hubp *hubp, const struct rect *viewport, - const struct rect *viewport_c, - enum dc_rotation_angle rotation) + const struct rect *viewport_c) { struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); - int patched_viewport_height = 0; - struct dc_debug_options *debug = &hubp->ctx->dc->debug; REG_SET_2(DCSURF_PRI_VIEWPORT_DIMENSION, 0, PRI_VIEWPORT_WIDTH, viewport->width, @@ -193,31 +192,10 @@ void hubp21_set_viewport( SEC_VIEWPORT_X_START, viewport->x, SEC_VIEWPORT_Y_START, viewport->y); - /* - * Work around for underflow issue with NV12 + rIOMMU translation - * + immediate flip. This will cause hubp underflow, but will not - * be user visible since underflow is in blank region - * Disable w/a when rotated 180 degrees, causes vertical chroma offset - */ - patched_viewport_height = viewport_c->height; - if (debug->nv12_iflip_vm_wa && viewport_c->height > 512 && - rotation != ROTATION_ANGLE_180) { - int pte_row_height = 0; - int pte_rows = 0; - - REG_GET(DCHUBP_REQ_SIZE_CONFIG_C, - PTE_ROW_HEIGHT_LINEAR_C, &pte_row_height); - - pte_row_height = 1 << (pte_row_height + 3); - pte_rows = (viewport_c->height / pte_row_height) + 1; - patched_viewport_height = pte_rows * pte_row_height + 1; - } - - /* DC supports NV12 only at the moment */ REG_SET_2(DCSURF_PRI_VIEWPORT_DIMENSION_C, 0, PRI_VIEWPORT_WIDTH_C, viewport_c->width, - PRI_VIEWPORT_HEIGHT_C, patched_viewport_height); + PRI_VIEWPORT_HEIGHT_C, viewport_c->height); REG_SET_2(DCSURF_PRI_VIEWPORT_START_C, 0, PRI_VIEWPORT_X_START_C, viewport_c->x, @@ -225,13 +203,123 @@ void hubp21_set_viewport( REG_SET_2(DCSURF_SEC_VIEWPORT_DIMENSION_C, 0, SEC_VIEWPORT_WIDTH_C, viewport_c->width, - SEC_VIEWPORT_HEIGHT_C, patched_viewport_height); + SEC_VIEWPORT_HEIGHT_C, viewport_c->height); REG_SET_2(DCSURF_SEC_VIEWPORT_START_C, 0, SEC_VIEWPORT_X_START_C, viewport_c->x, SEC_VIEWPORT_Y_START_C, viewport_c->y); } +static void hubp21_apply_PLAT_54186_wa( + struct hubp *hubp, + const struct dc_plane_address *address) +{ + struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); + struct dc_debug_options *debug = &hubp->ctx->dc->debug; + unsigned int chroma_bpe = 2; + unsigned int luma_addr_high_part = 0; + unsigned int row_height = 0; + unsigned int chroma_pitch = 0; + unsigned int viewport_c_height = 0; + unsigned int viewport_c_width = 0; + unsigned int patched_viewport_height = 0; + unsigned int patched_viewport_width = 0; + unsigned int rotation_angle = 0; + unsigned int pix_format = 0; + unsigned int h_mirror_en = 0; + unsigned int tile_blk_size = 64 * 1024; /* 64KB for 64KB SW, 4KB for 4KB SW */ + + + if (!debug->nv12_iflip_vm_wa) + return; + + REG_GET(DCHUBP_REQ_SIZE_CONFIG_C, + PTE_ROW_HEIGHT_LINEAR_C, &row_height); + + REG_GET_2(DCSURF_PRI_VIEWPORT_DIMENSION_C, + PRI_VIEWPORT_WIDTH_C, &viewport_c_width, + PRI_VIEWPORT_HEIGHT_C, &viewport_c_height); + + REG_GET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, + PRIMARY_SURFACE_ADDRESS_HIGH_C, &luma_addr_high_part); + + REG_GET(DCSURF_SURFACE_PITCH_C, + PITCH_C, &chroma_pitch); + + chroma_pitch += 1; + + REG_GET_3(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, &pix_format, + ROTATION_ANGLE, &rotation_angle, + H_MIRROR_EN, &h_mirror_en); + + /* reset persistent cached data */ + hubp21->PLAT_54186_wa_chroma_addr_offset = 0; + /* apply wa only for NV12 surface with scatter gather enabled with viewport > 512 along + * the vertical direction*/ + if (address->type != PLN_ADDR_TYPE_VIDEO_PROGRESSIVE || + address->video_progressive.luma_addr.high_part == 0xf4) + return; + + if ((rotation_angle == ROTATION_ANGLE_0 || rotation_angle == ROTATION_ANGLE_180) + && viewport_c_height <= 512) + return; + + if ((rotation_angle == ROTATION_ANGLE_90 || rotation_angle == ROTATION_ANGLE_270) + && viewport_c_width <= 512) + return; + + switch (rotation_angle) { + case ROTATION_ANGLE_0: /* 0 degree rotation */ + row_height = 128; + patched_viewport_height = (viewport_c_height / row_height + 1) * row_height + 1; + patched_viewport_width = viewport_c_width; + hubp21->PLAT_54186_wa_chroma_addr_offset = 0; + break; + case ROTATION_ANGLE_180: /* 180 degree rotation */ + row_height = 128; + patched_viewport_height = viewport_c_height + row_height; + patched_viewport_width = viewport_c_width; + hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - chroma_pitch * row_height * chroma_bpe; + break; + case ROTATION_ANGLE_90: /* 90 degree rotation */ + row_height = 256; + if (h_mirror_en) { + patched_viewport_height = viewport_c_height; + patched_viewport_width = viewport_c_width + row_height; + hubp21->PLAT_54186_wa_chroma_addr_offset = 0; + } else { + patched_viewport_height = viewport_c_height; + patched_viewport_width = viewport_c_width + row_height; + hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - tile_blk_size; + } + break; + case ROTATION_ANGLE_270: /* 270 degree rotation */ + row_height = 256; + if (h_mirror_en) { + patched_viewport_height = viewport_c_height; + patched_viewport_width = viewport_c_width + row_height; + hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - tile_blk_size; + } else { + patched_viewport_height = viewport_c_height; + patched_viewport_width = viewport_c_width + row_height; + hubp21->PLAT_54186_wa_chroma_addr_offset = 0; + } + break; + default: + ASSERT(0); + break; + } + + /* catch cases where viewport keep growing */ + ASSERT(patched_viewport_height && patched_viewport_height < 5000); + ASSERT(patched_viewport_width && patched_viewport_width < 5000); + + REG_UPDATE_2(DCSURF_PRI_VIEWPORT_DIMENSION_C, + PRI_VIEWPORT_WIDTH_C, patched_viewport_width, + PRI_VIEWPORT_HEIGHT_C, patched_viewport_height); +} + void hubp21_set_vm_system_aperture_settings(struct hubp *hubp, struct vm_system_aperture_param *apt) { @@ -602,6 +690,219 @@ void hubp21_validate_dml_output(struct hubp *hubp, dml_dlg_attr->refcyc_per_meta_chunk_flip_l, dlg_attr.refcyc_per_meta_chunk_flip_l); } +static void program_surface_flip_and_addr(struct hubp *hubp, struct surface_flip_registers *flip_regs) +{ + struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); + + REG_UPDATE_3(DCSURF_FLIP_CONTROL, + SURFACE_FLIP_TYPE, flip_regs->immediate, + SURFACE_FLIP_MODE_FOR_STEREOSYNC, flip_regs->grph_stereo, + SURFACE_FLIP_IN_STEREOSYNC, flip_regs->grph_stereo); + + REG_UPDATE(VMID_SETTINGS_0, + VMID, flip_regs->vmid); + + REG_UPDATE_8(DCSURF_SURFACE_CONTROL, + PRIMARY_SURFACE_TMZ, flip_regs->tmz_surface, + PRIMARY_SURFACE_TMZ_C, flip_regs->tmz_surface, + PRIMARY_META_SURFACE_TMZ, flip_regs->tmz_surface, + PRIMARY_META_SURFACE_TMZ_C, flip_regs->tmz_surface, + SECONDARY_SURFACE_TMZ, flip_regs->tmz_surface, + SECONDARY_SURFACE_TMZ_C, flip_regs->tmz_surface, + SECONDARY_META_SURFACE_TMZ, flip_regs->tmz_surface, + SECONDARY_META_SURFACE_TMZ_C, flip_regs->tmz_surface); + + REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, 0, + PRIMARY_META_SURFACE_ADDRESS_HIGH_C, + flip_regs->DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C); + + REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, 0, + PRIMARY_META_SURFACE_ADDRESS_C, + flip_regs->DCSURF_PRIMARY_META_SURFACE_ADDRESS_C); + + REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0, + PRIMARY_META_SURFACE_ADDRESS_HIGH, + flip_regs->DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH); + + REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0, + PRIMARY_META_SURFACE_ADDRESS, + flip_regs->DCSURF_PRIMARY_META_SURFACE_ADDRESS); + + REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH, 0, + SECONDARY_META_SURFACE_ADDRESS_HIGH, + flip_regs->DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH); + + REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS, 0, + SECONDARY_META_SURFACE_ADDRESS, + flip_regs->DCSURF_SECONDARY_META_SURFACE_ADDRESS); + + + REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, 0, + SECONDARY_SURFACE_ADDRESS_HIGH, + flip_regs->DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH); + + REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS, 0, + SECONDARY_SURFACE_ADDRESS, + flip_regs->DCSURF_SECONDARY_SURFACE_ADDRESS); + + + REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, 0, + PRIMARY_SURFACE_ADDRESS_HIGH_C, + flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C); + + REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_C, 0, + PRIMARY_SURFACE_ADDRESS_C, + flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_C); + + REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0, + PRIMARY_SURFACE_ADDRESS_HIGH, + flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH); + + REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0, + PRIMARY_SURFACE_ADDRESS, + flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS); +} + +void dmcub_PLAT_54186_wa(struct hubp *hubp, struct surface_flip_registers *flip_regs) +{ + struct dc_dmub_srv *dmcub = hubp->ctx->dmub_srv; + struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); + struct dmub_rb_cmd_PLAT_54186_wa PLAT_54186_wa = { 0 }; + + PLAT_54186_wa.header.type = DMUB_CMD__PLAT_54186_WA; + PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS; + PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_C = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_C; + PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH; + PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C; + PLAT_54186_wa.flip.flip_params.grph_stereo = flip_regs->grph_stereo; + PLAT_54186_wa.flip.flip_params.hubp_inst = hubp->inst; + PLAT_54186_wa.flip.flip_params.immediate = flip_regs->immediate; + PLAT_54186_wa.flip.flip_params.tmz_surface = flip_regs->tmz_surface; + PLAT_54186_wa.flip.flip_params.vmid = flip_regs->vmid; + + PERF_TRACE(); // TODO: remove after performance is stable. + dc_dmub_srv_cmd_queue(dmcub, &PLAT_54186_wa.header); + PERF_TRACE(); // TODO: remove after performance is stable. + dc_dmub_srv_cmd_execute(dmcub); + PERF_TRACE(); // TODO: remove after performance is stable. + dc_dmub_srv_wait_idle(dmcub); + PERF_TRACE(); // TODO: remove after performance is stable. +} + +bool hubp21_program_surface_flip_and_addr( + struct hubp *hubp, + const struct dc_plane_address *address, + bool flip_immediate) +{ + struct dc_debug_options *debug = &hubp->ctx->dc->debug; + struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); + struct surface_flip_registers flip_regs = { 0 }; + + flip_regs.vmid = address->vmid; + + switch (address->type) { + case PLN_ADDR_TYPE_GRAPHICS: + if (address->grph.addr.quad_part == 0) { + BREAK_TO_DEBUGGER(); + break; + } + + if (address->grph.meta_addr.quad_part != 0) { + flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS = + address->grph.meta_addr.low_part; + flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH = + address->grph.meta_addr.high_part; + } + + flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS = + address->grph.addr.low_part; + flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH = + address->grph.addr.high_part; + break; + case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE: + if (address->video_progressive.luma_addr.quad_part == 0 + || address->video_progressive.chroma_addr.quad_part == 0) + break; + + if (address->video_progressive.luma_meta_addr.quad_part != 0) { + flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS = + address->video_progressive.luma_meta_addr.low_part; + flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH = + address->video_progressive.luma_meta_addr.high_part; + + flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_C = + address->video_progressive.chroma_meta_addr.low_part; + flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C = + address->video_progressive.chroma_meta_addr.high_part; + } + + flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS = + address->video_progressive.luma_addr.low_part; + flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH = + address->video_progressive.luma_addr.high_part; + + if (debug->nv12_iflip_vm_wa) { + flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_C = + address->video_progressive.chroma_addr.low_part + hubp21->PLAT_54186_wa_chroma_addr_offset; + } else + flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_C = + address->video_progressive.chroma_addr.low_part; + + flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C = + address->video_progressive.chroma_addr.high_part; + + break; + case PLN_ADDR_TYPE_GRPH_STEREO: + if (address->grph_stereo.left_addr.quad_part == 0) + break; + if (address->grph_stereo.right_addr.quad_part == 0) + break; + + flip_regs.grph_stereo = true; + + if (address->grph_stereo.right_meta_addr.quad_part != 0) { + flip_regs.DCSURF_SECONDARY_META_SURFACE_ADDRESS = + address->grph_stereo.right_meta_addr.low_part; + flip_regs.DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH = + address->grph_stereo.right_meta_addr.high_part; + } + + if (address->grph_stereo.left_meta_addr.quad_part != 0) { + flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS = + address->grph_stereo.left_meta_addr.low_part; + flip_regs.DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH = + address->grph_stereo.left_meta_addr.high_part; + } + + flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS = + address->grph_stereo.left_addr.low_part; + flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH = + address->grph_stereo.left_addr.high_part; + + flip_regs.DCSURF_SECONDARY_SURFACE_ADDRESS = + address->grph_stereo.right_addr.low_part; + flip_regs.DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH = + address->grph_stereo.right_addr.high_part; + + break; + default: + BREAK_TO_DEBUGGER(); + break; + } + + flip_regs.tmz_surface = address->tmz_surface; + flip_regs.immediate = flip_immediate; + + if (hubp->ctx->dc->debug.enable_dmcub_surface_flip && address->type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) + dmcub_PLAT_54186_wa(hubp, &flip_regs); + else + program_surface_flip_and_addr(hubp, &flip_regs); + + hubp->request_address = *address; + + return true; +} + void hubp21_init(struct hubp *hubp) { // DEDCN21-133: Inconsistent row starting line for flip between DPTE and Meta @@ -614,7 +915,7 @@ void hubp21_init(struct hubp *hubp) static struct hubp_funcs dcn21_hubp_funcs = { .hubp_enable_tripleBuffer = hubp2_enable_triplebuffer, .hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled, - .hubp_program_surface_flip_and_addr = hubp2_program_surface_flip_and_addr, + .hubp_program_surface_flip_and_addr = hubp21_program_surface_flip_and_addr, .hubp_program_surface_config = hubp1_program_surface_config, .hubp_is_flip_pending = hubp1_is_flip_pending, .hubp_setup = hubp21_setup, @@ -623,6 +924,7 @@ static struct hubp_funcs dcn21_hubp_funcs = { .set_blank = hubp1_set_blank, .dcc_control = hubp1_dcc_control, .mem_program_viewport = hubp21_set_viewport, + .apply_PLAT_54186_wa = hubp21_apply_PLAT_54186_wa, .set_cursor_attributes = hubp2_cursor_set_attributes, .set_cursor_position = hubp1_cursor_set_position, .hubp_clk_cntl = hubp1_clk_cntl, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.h index aeda719a2a13a7dfacab8f2ab554be8bb91fe749..9873b6cbc5bab5d957e210dca0c3ebc88298dab5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.h @@ -108,6 +108,7 @@ struct dcn21_hubp { const struct dcn_hubp2_registers *hubp_regs; const struct dcn_hubp2_shift *hubp_shift; const struct dcn_hubp2_mask *hubp_mask; + int PLAT_54186_wa_chroma_addr_offset; }; bool hubp21_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h index 1d7a1a51f13d0663774deceb932e3c560f8334b8..033d5d76f1956a0b02ef571365185c98ca3b184d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h @@ -33,6 +33,45 @@ struct dcn21_link_encoder { struct dpcssys_phy_seq_cfg phy_seq_cfg; }; +#define DPCS_DCN21_MASK_SH_LIST(mask_sh)\ + DPCS_DCN2_MASK_SH_LIST(mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_TX_VBOOST_LVL, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_MPLLB_CP_PROP_GS, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_RX_VREF_CTRL, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_MPLLB_CP_INT_GS, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG, RDPCS_DMCU_DPALT_DIS_BLOCK_REG, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL15, RDPCS_PHY_SUP_PRE_HP, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL15, RDPCS_PHY_DP_TX0_VREGDRV_BYP, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL15, RDPCS_PHY_DP_TX1_VREGDRV_BYP, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL15, RDPCS_PHY_DP_TX2_VREGDRV_BYP, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL15, RDPCS_PHY_DP_TX3_VREGDRV_BYP, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_MAIN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_PRE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE0, RDPCS_PHY_DP_TX0_EQ_POST, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_MAIN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_PRE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE1, RDPCS_PHY_DP_TX1_EQ_POST, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_MAIN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_PRE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE2, RDPCS_PHY_DP_TX2_EQ_POST, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_MAIN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DCO_FINETUNE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DCO_RANGE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_PRE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_FUSE3, RDPCS_PHY_DP_TX3_EQ_POST, mask_sh),\ + LE_SF(DCIO_SOFT_RESET, UNIPHYA_SOFT_RESET, mask_sh),\ + LE_SF(DCIO_SOFT_RESET, UNIPHYB_SOFT_RESET, mask_sh),\ + LE_SF(DCIO_SOFT_RESET, UNIPHYC_SOFT_RESET, mask_sh),\ + LE_SF(DCIO_SOFT_RESET, UNIPHYD_SOFT_RESET, mask_sh),\ + LE_SF(DCIO_SOFT_RESET, UNIPHYE_SOFT_RESET, mask_sh) + +#define DPCS_DCN21_REG_LIST(id) \ + DPCS_DCN2_REG_LIST(id),\ + SRI(RDPCSTX_PHY_CNTL15, RDPCSTX, id),\ + SRI(RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG, RDPCSTX, id) + #define LINK_ENCODER_MASK_SH_LIST_DCN21(mask_sh)\ LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh),\ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL0_XBAR_SOURCE, mask_sh),\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index c865b95d5c0e82cf9b3b484f1aa9dd05fd0f3cfc..0d506d30d6b6f455a197a053d693f38c060ec782 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -1,5 +1,6 @@ /* * Copyright 2018 Advanced Micro Devices, Inc. + * Copyright 2019 Raptor Engineering, LLC * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -62,6 +63,8 @@ #include "dcn20/dcn20_dwb.h" #include "dcn20/dcn20_mmhubbub.h" +#include "dpcs/dpcs_2_1_0_offset.h" +#include "dpcs/dpcs_2_1_0_sh_mask.h" #include "renoir_ip_offset.h" #include "dcn/dcn_2_1_0_offset.h" @@ -80,6 +83,7 @@ #include "dcn21_resource.h" #include "vm_helper.h" #include "dcn20/dcn20_vmid.h" +#include "../dce/dmub_psr.h" #define SOC_BOUNDING_BOX_VALID false #define DC_LOGGER_INIT(logger) @@ -462,15 +466,18 @@ static const struct dcn20_mpc_registers mpc_regs = { MPC_OUT_MUX_REG_LIST_DCN2_0(0), MPC_OUT_MUX_REG_LIST_DCN2_0(1), MPC_OUT_MUX_REG_LIST_DCN2_0(2), - MPC_OUT_MUX_REG_LIST_DCN2_0(3) + MPC_OUT_MUX_REG_LIST_DCN2_0(3), + MPC_DBG_REG_LIST_DCN2_0() }; static const struct dcn20_mpc_shift mpc_shift = { - MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) + MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT), + MPC_DEBUG_REG_LIST_SH_DCN20 }; static const struct dcn20_mpc_mask mpc_mask = { - MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK) + MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK), + MPC_DEBUG_REG_LIST_MASK_DCN20 }; #define hubp_regs(id)\ @@ -605,6 +612,7 @@ static const struct dce110_aux_registers aux_engine_regs[] = { #define tf_regs(id)\ [id] = {\ TF_REG_LIST_DCN20(id),\ + TF_REG_LIST_DCN20_COMMON_APPEND(id),\ } static const struct dcn2_dpp_registers tf_regs[] = { @@ -615,11 +623,13 @@ static const struct dcn2_dpp_registers tf_regs[] = { }; static const struct dcn2_dpp_shift tf_shift = { - TF_REG_LIST_SH_MASK_DCN20(__SHIFT) + TF_REG_LIST_SH_MASK_DCN20(__SHIFT), + TF_DEBUG_REG_LIST_SH_DCN20 }; static const struct dcn2_dpp_mask tf_mask = { - TF_REG_LIST_SH_MASK_DCN20(_MASK) + TF_REG_LIST_SH_MASK_DCN20(_MASK), + TF_DEBUG_REG_LIST_MASK_DCN20 }; #define stream_enc_regs(id)\ @@ -820,12 +830,13 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_dcc = DCC_ENABLE, .vsr_support = true, .performance_trace = false, - .max_downscale_src_width = 3840, + .max_downscale_src_width = 4096, .disable_pplib_wm_range = false, .scl_reset_length10 = true, .sanity_checks = true, .disable_48mhz_pwrdwn = false, - .nv12_iflip_vm_wa = true + .nv12_iflip_vm_wa = true, + .usbc_combo_phy_reset_wa = true }; static const struct dc_debug_options debug_defaults_diags = { @@ -993,7 +1004,8 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s { int i; - kernel_fpu_begin(); + DC_FP_START(); + if (dc->bb_overrides.sr_exit_time_ns) { for (i = 0; i < WM_SET_COUNT; i++) { dc->clk_mgr->bw_params->wm_table.entries[i].sr_exit_time_us = @@ -1019,7 +1031,7 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s } } - kernel_fpu_end(); + DC_FP_END(); } void dcn21_calculate_wm( @@ -1319,12 +1331,6 @@ struct display_stream_compressor *dcn21_dsc_create( static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) { - /* - TODO: Fix this function to calcualte correct values. - There are known issues with this function currently - that will need to be investigated. Use hardcoded known good values for now. - - struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool); struct clk_limit_table *clk_table = &bw_params->clk_table; int i; @@ -1339,11 +1345,14 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param dcn2_1_soc.clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; dcn2_1_soc.clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz; dcn2_1_soc.clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz; - dcn2_1_soc.clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 16 / 1000; + dcn2_1_soc.clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2; } - dcn2_1_soc.clock_limits[i] = dcn2_1_soc.clock_limits[i - i]; + dcn2_1_soc.clock_limits[i] = dcn2_1_soc.clock_limits[i - 1]; dcn2_1_soc.num_states = i; - */ + + // diags does not retrieve proper values from SMU, do not update DML instance for diags + if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && !IS_DIAG_DC(dc->ctx->dce_environment)) + dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21); } /* Temporary Place holder until we can get them from fuse */ @@ -1497,8 +1506,9 @@ static const struct encoder_feature_support link_enc_feature = { #define link_regs(id, phyid)\ [id] = {\ - LE_DCN10_REG_LIST(id), \ + LE_DCN2_REG_LIST(id), \ UNIPHY_DCN2_REG_LIST(phyid), \ + DPCS_DCN21_REG_LIST(id), \ SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ } @@ -1537,11 +1547,13 @@ static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { }; static const struct dcn10_link_enc_shift le_shift = { - LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT) + LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT),\ + DPCS_DCN21_MASK_SH_LIST(__SHIFT) }; static const struct dcn10_link_enc_mask le_mask = { - LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK) + LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK),\ + DPCS_DCN21_MASK_SH_LIST(_MASK) }; static int map_transmitter_id_to_phy_instance( @@ -1740,6 +1752,10 @@ static bool dcn21_resource_construct( goto create_fail; } + // Leave as NULL to not affect current dmcu psr programming sequence + // Will be uncommented when functionality is confirmed to be working + pool->base.psr = NULL; + pool->base.abm = dce_abm_create(ctx, &abm_regs, &abm_shift, @@ -1776,41 +1792,41 @@ static bool dcn21_resource_construct( if ((pipe_fuses & (1 << i)) != 0) continue; - pool->base.hubps[i] = dcn21_hubp_create(ctx, i); - if (pool->base.hubps[i] == NULL) { + pool->base.hubps[j] = dcn21_hubp_create(ctx, i); + if (pool->base.hubps[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create memory input!\n"); goto create_fail; } - pool->base.ipps[i] = dcn21_ipp_create(ctx, i); - if (pool->base.ipps[i] == NULL) { + pool->base.ipps[j] = dcn21_ipp_create(ctx, i); + if (pool->base.ipps[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create input pixel processor!\n"); goto create_fail; } - pool->base.dpps[i] = dcn21_dpp_create(ctx, i); - if (pool->base.dpps[i] == NULL) { + pool->base.dpps[j] = dcn21_dpp_create(ctx, i); + if (pool->base.dpps[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create dpps!\n"); goto create_fail; } - pool->base.opps[i] = dcn21_opp_create(ctx, i); - if (pool->base.opps[i] == NULL) { + pool->base.opps[j] = dcn21_opp_create(ctx, i); + if (pool->base.opps[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error( "DC: failed to create output pixel processor!\n"); goto create_fail; } - pool->base.timing_generators[i] = dcn21_timing_generator_create( + pool->base.timing_generators[j] = dcn21_timing_generator_create( ctx, i); - if (pool->base.timing_generators[i] == NULL) { + if (pool->base.timing_generators[j] == NULL) { BREAK_TO_DEBUGGER(); dm_error("DC: failed to create tg!\n"); goto create_fail; diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h index a3d1be20dd9d948624654d81647895600a8caac9..b52ba6ffabe1c6c2a7309073d5b2560c10e4d6e6 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h +++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h @@ -220,6 +220,7 @@ struct dm_bl_data_point { }; /* Total size of the structure should not exceed 256 bytes */ +#define BL_DATA_POINTS 99 struct dm_acpi_atif_backlight_caps { uint16_t size; /* Bytes 0-1 (2 bytes) */ uint16_t flags; /* Byted 2-3 (2 bytes) */ @@ -229,7 +230,7 @@ struct dm_acpi_atif_backlight_caps { uint8_t min_input_signal; /* Byte 7 */ uint8_t max_input_signal; /* Byte 8 */ uint8_t num_data_points; /* Byte 9 */ - struct dm_bl_data_point data_points[99]; /* Bytes 10-207 (198 bytes)*/ + struct dm_bl_data_point data_points[BL_DATA_POINTS]; /* Bytes 10-207 (198 bytes)*/ }; enum dm_acpi_display_type { diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index fb6358036be8d4562ddec35938f9e3742a84b4be..7ee8b8460a9ba4c60ab01492216bc81bb0f05161 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -1,5 +1,6 @@ # # Copyright 2017 Advanced Micro Devices, Inc. +# Copyright 2019 Raptor Engineering, LLC # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), @@ -24,7 +25,13 @@ # It provides the general basic services required by other DAL # subcomponents. +ifdef CONFIG_X86 dml_ccflags := -mhard-float -msse +endif + +ifdef CONFIG_PPC64 +dml_ccflags := -mhard-float -maltivec +endif ifdef CONFIG_CC_IS_GCC ifeq ($(call cc-ifversion, -lt, 0701, y), y) @@ -32,6 +39,7 @@ IS_OLD_GCC = 1 endif endif +ifdef CONFIG_X86 ifdef IS_OLD_GCC # Stack alignment mismatch, proceed with caution. # GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 @@ -40,6 +48,7 @@ dml_ccflags += -mpreferred-stack-boundary=4 else dml_ccflags += -msse2 endif +endif CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c index 3b224b155e8cf09b1a21a138fdf4a0fbc170d88d..45f028986a8dbfe0fb4d4ad2b4b24d8dc757958c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c @@ -38,6 +38,7 @@ #define BPP_INVALID 0 #define BPP_BLENDED_PIPE 0xffffffff +#define DCN20_MAX_420_IMAGE_WIDTH 4096 static double adjust_ReturnBW( struct display_mode_lib *mode_lib, @@ -937,7 +938,7 @@ static unsigned int CalculateVMAndRowBytes( *MetaRowByte = 0; } - if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_lvp) { + if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_l_vp) { MacroTileSizeBytes = 256; MacroTileHeight = BlockHeight256Bytes; } else if (SurfaceTiling == dm_sw_4kb_s || SurfaceTiling == dm_sw_4kb_s_x @@ -1335,11 +1336,11 @@ static void dml20_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPer else mode_lib->vba.SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportHeight[k]; - if (mode_lib->vba.ODMCombineEnabled[k] == true) + if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) MainPlaneDoesODMCombine = true; for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) if (mode_lib->vba.BlendingAndTiming[k] == j - && mode_lib->vba.ODMCombineEnabled[j] == true) + && mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) MainPlaneDoesODMCombine = true; if (MainPlaneDoesODMCombine == true) @@ -2848,12 +2849,12 @@ static void dml20_DisplayPipeConfiguration(struct display_mode_lib *mode_lib) SwathWidth = mode_lib->vba.ViewportHeight[k]; } - if (mode_lib->vba.ODMCombineEnabled[k] == true) { + if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) { MainPlaneDoesODMCombine = true; } for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) { if (mode_lib->vba.BlendingAndTiming[k] == j - && mode_lib->vba.ODMCombineEnabled[j] == true) { + && mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) { MainPlaneDoesODMCombine = true; } } @@ -3348,7 +3349,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l == dm_420_10)) || (((mode_lib->vba.SurfaceTiling[k] == dm_sw_gfx7_2d_thin_gl || mode_lib->vba.SurfaceTiling[k] - == dm_sw_gfx7_2d_thin_lvp) + == dm_sw_gfx7_2d_thin_l_vp) && !((mode_lib->vba.SourcePixelFormat[k] == dm_444_64 || mode_lib->vba.SourcePixelFormat[k] @@ -3446,10 +3447,10 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l locals->FabricAndDRAMBandwidthPerState[i] * 1000) * locals->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly / 100; - locals->ReturnBWPerState[i] = locals->ReturnBWToDCNPerState; + locals->ReturnBWPerState[i][0] = locals->ReturnBWToDCNPerState; if (locals->DCCEnabledInAnyPlane == true && locals->ReturnBWToDCNPerState > locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) { - locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i], + locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0], locals->ReturnBWToDCNPerState * 4 * (1 - locals->UrgentLatency / ((locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024 / (locals->ReturnBWToDCNPerState - locals->DCFCLKPerState[i] @@ -3460,7 +3461,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l + (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024); if (locals->DCCEnabledInAnyPlane && locals->CriticalPoint > 1 && locals->CriticalPoint < 4) { - locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i], + locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0], 4 * locals->ReturnBWToDCNPerState * (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024 * locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency / @@ -3472,7 +3473,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l locals->DCFCLKPerState[i], locals->FabricAndDRAMBandwidthPerState[i] * 1000); if (locals->DCCEnabledInAnyPlane == true && locals->ReturnBWToDCNPerState > locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) { - locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i], + locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0], locals->ReturnBWToDCNPerState * 4 * (1 - locals->UrgentLatency / ((locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024 / (locals->ReturnBWToDCNPerState - locals->DCFCLKPerState[i] @@ -3483,7 +3484,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l + (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024); if (locals->DCCEnabledInAnyPlane && locals->CriticalPoint > 1 && locals->CriticalPoint < 4) { - locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i], + locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0], 4 * locals->ReturnBWToDCNPerState * (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024 * locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency / @@ -3521,12 +3522,12 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i] = (mode_lib->vba.RoundTripPingLatencyCycles + 32.0) / mode_lib->vba.DCFCLKPerState[i] - + locals->UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i]; - if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i] + + locals->UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i][0]; + if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i][0] > locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i]) { - locals->ROBSupport[i] = true; + locals->ROBSupport[i][0] = true; } else { - locals->ROBSupport[i] = false; + locals->ROBSupport[i][0] = false; } } /*Writeback Mode Support Check*/ @@ -3894,16 +3895,22 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l && i == mode_lib->vba.soc.num_states) mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2 * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); - if (mode_lib->vba.ODMCapability == false || mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine <= mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) { - locals->ODMCombineEnablePerState[i][k] = false; - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; - } else { - locals->ODMCombineEnablePerState[i][k] = true; - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + + locals->ODMCombineEnablePerState[i][k] = false; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; + if (mode_lib->vba.ODMCapability) { + if (locals->PlaneRequiredDISPCLKWithoutODMCombine > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) { + locals->ODMCombineEnablePerState[i][k] = true; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + } else if (locals->HActive[k] > DCN20_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) { + locals->ODMCombineEnablePerState[i][k] = true; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + } } + if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity && locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k] - && locals->ODMCombineEnablePerState[i][k] == false) { + && locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) { locals->NoOfDPP[i][j][k] = 1; locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); @@ -3992,16 +3999,16 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l /*Viewport Size Check*/ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { - locals->ViewportSizeSupport[i] = true; + locals->ViewportSizeSupport[i][0] = true; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { - if (locals->ODMCombineEnablePerState[i][k] == true) { + if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) { if (dml_min(locals->SwathWidthYSingleDPP[k], dml_round(mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k])) > locals->MaximumSwathWidth[k]) { - locals->ViewportSizeSupport[i] = false; + locals->ViewportSizeSupport[i][0] = false; } } else { if (locals->SwathWidthYSingleDPP[k] / 2.0 > locals->MaximumSwathWidth[k]) { - locals->ViewportSizeSupport[i] = false; + locals->ViewportSizeSupport[i][0] = false; } } } @@ -4183,8 +4190,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.DSCFormatFactor = 1; } if (locals->RequiresDSC[i][k] == true) { - if (locals->ODMCombineEnablePerState[i][k] - == true) { + if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) { if (mode_lib->vba.PixelClockBackEnd[k] / 6.0 / mode_lib->vba.DSCFormatFactor > (1.0 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * mode_lib->vba.MaxDSCCLK[i]) { locals->DSCCLKRequiredMoreThanSupported[i] = @@ -4207,7 +4213,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.TotalDSCUnitsRequired = 0.0; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (locals->RequiresDSC[i][k] == true) { - if (locals->ODMCombineEnablePerState[i][k] == true) { + if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) { mode_lib->vba.TotalDSCUnitsRequired = mode_lib->vba.TotalDSCUnitsRequired + 2.0; } else { @@ -4249,7 +4255,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.bpp = locals->OutputBppPerState[i][k]; } if (locals->RequiresDSC[i][k] == true && mode_lib->vba.bpp != 0.0) { - if (locals->ODMCombineEnablePerState[i][k] == false) { + if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) { locals->DSCDelayPerState[i][k] = dscceComputeDelay( mode_lib->vba.DSCInputBitPerComponent[k], @@ -4292,7 +4298,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { for (j = 0; j < 2; j++) { for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { - if (locals->ODMCombineEnablePerState[i][k] == true) + if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) locals->SwathWidthYPerState[i][j][k] = dml_min(locals->SwathWidthYSingleDPP[k], dml_round(locals->HActive[k] / 2 * locals->HRatio[k])); else locals->SwathWidthYPerState[i][j][k] = locals->SwathWidthYSingleDPP[k] / locals->NoOfDPP[i][j][k]; @@ -4345,28 +4351,28 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l locals->EffectiveDETLBLinesLuma = dml_floor(locals->LinesInDETLuma + dml_min( locals->LinesInDETLuma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETY[k] * - locals->PSCL_FACTOR[k] / locals->ReturnBWPerState[i], + locals->PSCL_FACTOR[k] / locals->ReturnBWPerState[i][0], locals->EffectiveLBLatencyHidingSourceLinesLuma), locals->SwathHeightYPerState[i][j][k]); locals->EffectiveDETLBLinesChroma = dml_floor(locals->LinesInDETChroma + dml_min( locals->LinesInDETChroma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETC[k] * - locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i], + locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i][0], locals->EffectiveLBLatencyHidingSourceLinesChroma), locals->SwathHeightCPerState[i][j][k]); if (locals->BytePerPixelInDETC[k] == 0) { locals->UrgentLatencySupportUsPerState[i][j][k] = locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k]) / locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] * - dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k]); + dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]); } else { locals->UrgentLatencySupportUsPerState[i][j][k] = dml_min( locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k]) / locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] * - dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k]), + dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]), locals->EffectiveDETLBLinesChroma * (locals->HTotal[k] / locals->PixelClock[k]) / (locals->VRatio[k] / 2) - locals->EffectiveDETLBLinesChroma * locals->SwathWidthYPerState[i][j][k] / 2 * - dml_ceil(locals->BytePerPixelInDETC[k], 2) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k])); + dml_ceil(locals->BytePerPixelInDETC[k], 2) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k])); } } } @@ -4406,14 +4412,14 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l locals->SwathHeightYThisState[k] = locals->SwathHeightYPerState[i][j][k]; locals->SwathHeightCThisState[k] = locals->SwathHeightCPerState[i][j][k]; locals->SwathWidthYThisState[k] = locals->SwathWidthYPerState[i][j][k]; - mode_lib->vba.ProjectedDCFCLKDeepSleep = dml_max( - mode_lib->vba.ProjectedDCFCLKDeepSleep, + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max( + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], mode_lib->vba.PixelClock[k] / 16.0); if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) { if (mode_lib->vba.VRatio[k] <= 1.0) { - mode_lib->vba.ProjectedDCFCLKDeepSleep = + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max( - mode_lib->vba.ProjectedDCFCLKDeepSleep, + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], 1.1 * dml_ceil( mode_lib->vba.BytePerPixelInDETY[k], @@ -4423,9 +4429,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l * mode_lib->vba.PixelClock[k] / mode_lib->vba.NoOfDPP[i][j][k]); } else { - mode_lib->vba.ProjectedDCFCLKDeepSleep = + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max( - mode_lib->vba.ProjectedDCFCLKDeepSleep, + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], 1.1 * dml_ceil( mode_lib->vba.BytePerPixelInDETY[k], @@ -4436,9 +4442,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } } else { if (mode_lib->vba.VRatio[k] <= 1.0) { - mode_lib->vba.ProjectedDCFCLKDeepSleep = + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max( - mode_lib->vba.ProjectedDCFCLKDeepSleep, + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], 1.1 * dml_ceil( mode_lib->vba.BytePerPixelInDETY[k], @@ -4448,9 +4454,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l * mode_lib->vba.PixelClock[k] / mode_lib->vba.NoOfDPP[i][j][k]); } else { - mode_lib->vba.ProjectedDCFCLKDeepSleep = + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max( - mode_lib->vba.ProjectedDCFCLKDeepSleep, + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], 1.1 * dml_ceil( mode_lib->vba.BytePerPixelInDETY[k], @@ -4460,9 +4466,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l * mode_lib->vba.RequiredDPPCLK[i][j][k]); } if (mode_lib->vba.VRatio[k] / 2.0 <= 1.0) { - mode_lib->vba.ProjectedDCFCLKDeepSleep = + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max( - mode_lib->vba.ProjectedDCFCLKDeepSleep, + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], 1.1 * dml_ceil( mode_lib->vba.BytePerPixelInDETC[k], @@ -4473,9 +4479,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l * mode_lib->vba.PixelClock[k] / mode_lib->vba.NoOfDPP[i][j][k]); } else { - mode_lib->vba.ProjectedDCFCLKDeepSleep = + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max( - mode_lib->vba.ProjectedDCFCLKDeepSleep, + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], 1.1 * dml_ceil( mode_lib->vba.BytePerPixelInDETC[k], @@ -4511,7 +4517,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l &mode_lib->vba.PTEBufferSizeNotExceededY[i][j][k], &mode_lib->vba.dpte_row_height[k], &mode_lib->vba.meta_row_height[k]); - mode_lib->vba.PrefetchLinesY[k] = CalculatePrefetchSourceLines( + mode_lib->vba.PrefetchLinesY[0][0][k] = CalculatePrefetchSourceLines( mode_lib, mode_lib->vba.VRatio[k], mode_lib->vba.vtaps[k], @@ -4550,7 +4556,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l &mode_lib->vba.PTEBufferSizeNotExceededC[i][j][k], &mode_lib->vba.dpte_row_height_chroma[k], &mode_lib->vba.meta_row_height_chroma[k]); - mode_lib->vba.PrefetchLinesC[k] = CalculatePrefetchSourceLines( + mode_lib->vba.PrefetchLinesC[0][0][k] = CalculatePrefetchSourceLines( mode_lib, mode_lib->vba.VRatio[k] / 2.0, mode_lib->vba.VTAPsChroma[k], @@ -4564,14 +4570,14 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = 0.0; mode_lib->vba.MetaRowBytesC = 0.0; mode_lib->vba.DPTEBytesPerRowC = 0.0; - locals->PrefetchLinesC[k] = 0.0; + locals->PrefetchLinesC[0][0][k] = 0.0; locals->PTEBufferSizeNotExceededC[i][j][k] = true; locals->PTEBufferSizeInRequestsForLuma = mode_lib->vba.PTEBufferSizeInRequestsLuma + mode_lib->vba.PTEBufferSizeInRequestsChroma; } - locals->PDEAndMetaPTEBytesPerFrame[k] = + locals->PDEAndMetaPTEBytesPerFrame[0][0][k] = mode_lib->vba.PDEAndMetaPTEBytesPerFrameY + mode_lib->vba.PDEAndMetaPTEBytesPerFrameC; - locals->MetaRowBytes[k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC; - locals->DPTEBytesPerRow[k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC; + locals->MetaRowBytes[0][0][k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC; + locals->DPTEBytesPerRow[0][0][k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC; CalculateActiveRowBandwidth( mode_lib->vba.GPUVMEnable, @@ -4598,14 +4604,14 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l + mode_lib->vba.TotalNumberOfDCCActiveDPP[i][j] * mode_lib->vba.MetaChunkSize) * 1024.0 - / mode_lib->vba.ReturnBWPerState[i]; + / mode_lib->vba.ReturnBWPerState[i][0]; if (mode_lib->vba.GPUVMEnable == true) { mode_lib->vba.ExtraLatency = mode_lib->vba.ExtraLatency + mode_lib->vba.TotalNumberOfActiveDPP[i][j] * mode_lib->vba.PTEGroupSize - / mode_lib->vba.ReturnBWPerState[i]; + / mode_lib->vba.ReturnBWPerState[i][0]; } - mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep; + mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0]; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (mode_lib->vba.BlendingAndTiming[k] == k) { @@ -4655,7 +4661,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { - locals->MaximumVStartup[k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k] + locals->MaximumVStartup[0][0][k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k] - dml_max(1.0, dml_ceil(locals->WritebackDelay[i][k] / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]), 1.0)); } @@ -4700,7 +4706,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.RequiredDPPCLK[i][j][k], mode_lib->vba.RequiredDISPCLK[i][j], mode_lib->vba.PixelClock[k], - mode_lib->vba.ProjectedDCFCLKDeepSleep, + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], mode_lib->vba.DSCDelayPerState[i][k], mode_lib->vba.NoOfDPP[i][j][k], mode_lib->vba.ScalerEnabled[k], @@ -4718,7 +4724,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l - mode_lib->vba.VActive[k], mode_lib->vba.HTotal[k], mode_lib->vba.MaxInterDCNTileRepeaters, - mode_lib->vba.MaximumVStartup[k], + mode_lib->vba.MaximumVStartup[0][0][k], mode_lib->vba.GPUVMMaxPageTableLevels, mode_lib->vba.GPUVMEnable, mode_lib->vba.DynamicMetadataEnable[k], @@ -4728,15 +4734,15 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.UrgentLatencyPixelDataOnly, mode_lib->vba.ExtraLatency, mode_lib->vba.TimeCalc, - mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k], - mode_lib->vba.MetaRowBytes[k], - mode_lib->vba.DPTEBytesPerRow[k], - mode_lib->vba.PrefetchLinesY[k], + mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k], + mode_lib->vba.MetaRowBytes[0][0][k], + mode_lib->vba.DPTEBytesPerRow[0][0][k], + mode_lib->vba.PrefetchLinesY[0][0][k], mode_lib->vba.SwathWidthYPerState[i][j][k], mode_lib->vba.BytePerPixelInDETY[k], mode_lib->vba.PrefillY[k], mode_lib->vba.MaxNumSwY[k], - mode_lib->vba.PrefetchLinesC[k], + mode_lib->vba.PrefetchLinesC[0][0][k], mode_lib->vba.BytePerPixelInDETC[k], mode_lib->vba.PrefillC[k], mode_lib->vba.MaxNumSwC[k], @@ -4767,19 +4773,19 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l locals->prefetch_vm_bw_valid = true; locals->prefetch_row_bw_valid = true; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { - if (locals->PDEAndMetaPTEBytesPerFrame[k] == 0) + if (locals->PDEAndMetaPTEBytesPerFrame[0][0][k] == 0) locals->prefetch_vm_bw[k] = 0; else if (locals->LinesForMetaPTE[k] > 0) - locals->prefetch_vm_bw[k] = locals->PDEAndMetaPTEBytesPerFrame[k] + locals->prefetch_vm_bw[k] = locals->PDEAndMetaPTEBytesPerFrame[0][0][k] / (locals->LinesForMetaPTE[k] * locals->HTotal[k] / locals->PixelClock[k]); else { locals->prefetch_vm_bw[k] = 0; locals->prefetch_vm_bw_valid = false; } - if (locals->MetaRowBytes[k] + locals->DPTEBytesPerRow[k] == 0) + if (locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k] == 0) locals->prefetch_row_bw[k] = 0; else if (locals->LinesForMetaAndDPTERow[k] > 0) - locals->prefetch_row_bw[k] = (locals->MetaRowBytes[k] + locals->DPTEBytesPerRow[k]) + locals->prefetch_row_bw[k] = (locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k]) / (locals->LinesForMetaAndDPTERow[k] * locals->HTotal[k] / locals->PixelClock[k]); else { locals->prefetch_row_bw[k] = 0; @@ -4798,13 +4804,13 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.RequiredPrefetchPixelDataBWLuma[i][j][k]) + mode_lib->vba.meta_row_bw[k] + mode_lib->vba.dpte_row_bw[k]); } - locals->BandwidthWithoutPrefetchSupported[i] = true; - if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i]) { - locals->BandwidthWithoutPrefetchSupported[i] = false; + locals->BandwidthWithoutPrefetchSupported[i][0] = true; + if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i][0]) { + locals->BandwidthWithoutPrefetchSupported[i][0] = false; } locals->PrefetchSupported[i][j] = true; - if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i]) { + if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i][0]) { locals->PrefetchSupported[i][j] = false; } for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { @@ -4829,7 +4835,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l if (mode_lib->vba.PrefetchSupported[i][j] == true && mode_lib->vba.VRatioInPrefetchSupported[i][j] == true) { mode_lib->vba.BandwidthAvailableForImmediateFlip = - mode_lib->vba.ReturnBWPerState[i]; + mode_lib->vba.ReturnBWPerState[i][0]; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { mode_lib->vba.BandwidthAvailableForImmediateFlip = mode_lib->vba.BandwidthAvailableForImmediateFlip @@ -4843,9 +4849,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8 && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) { mode_lib->vba.ImmediateFlipBytes[k] = - mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k] - + mode_lib->vba.MetaRowBytes[k] - + mode_lib->vba.DPTEBytesPerRow[k]; + mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k] + + mode_lib->vba.MetaRowBytes[0][0][k] + + mode_lib->vba.DPTEBytesPerRow[0][0][k]; } } mode_lib->vba.TotImmediateFlipBytes = 0.0; @@ -4873,9 +4879,9 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l / mode_lib->vba.PixelClock[k], mode_lib->vba.VRatio[k], mode_lib->vba.Tno_bw[k], - mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k], - mode_lib->vba.MetaRowBytes[k], - mode_lib->vba.DPTEBytesPerRow[k], + mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k], + mode_lib->vba.MetaRowBytes[0][0][k], + mode_lib->vba.DPTEBytesPerRow[0][0][k], mode_lib->vba.DCCEnable[k], mode_lib->vba.dpte_row_height[k], mode_lib->vba.meta_row_height[k], @@ -4900,7 +4906,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } mode_lib->vba.ImmediateFlipSupportedForState[i][j] = true; if (mode_lib->vba.total_dcn_read_bw_with_flip - > mode_lib->vba.ReturnBWPerState[i]) { + > mode_lib->vba.ReturnBWPerState[i][0]) { mode_lib->vba.ImmediateFlipSupportedForState[i][j] = false; } for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { @@ -4919,13 +4925,13 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; k++) mode_lib->vba.MaxTotalVActiveRDBandwidth = mode_lib->vba.MaxTotalVActiveRDBandwidth + mode_lib->vba.ReadBandwidth[k]; for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { - mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i] = dml_min(mode_lib->vba.ReturnBusWidth * + mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][0] = dml_min(mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKPerState[i], mode_lib->vba.FabricAndDRAMBandwidthPerState[i] * 1000) * mode_lib->vba.MaxAveragePercentOfIdealDRAMBWDisplayCanUseInNormalSystemOperation / 100; - if (mode_lib->vba.MaxTotalVActiveRDBandwidth <= mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i]) - mode_lib->vba.TotalVerticalActiveBandwidthSupport[i] = true; + if (mode_lib->vba.MaxTotalVActiveRDBandwidth <= mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][0]) + mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][0] = true; else - mode_lib->vba.TotalVerticalActiveBandwidthSupport[i] = false; + mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][0] = false; } /*PTE Buffer Size Check*/ @@ -5013,7 +5019,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l status = DML_FAIL_SCALE_RATIO_TAP; } else if (mode_lib->vba.SourceFormatPixelAndScanSupport != true) { status = DML_FAIL_SOURCE_PIXEL_FORMAT; - } else if (locals->ViewportSizeSupport[i] != true) { + } else if (locals->ViewportSizeSupport[i][0] != true) { status = DML_FAIL_VIEWPORT_SIZE; } else if (locals->DIOSupport[i] != true) { status = DML_FAIL_DIO_SUPPORT; @@ -5023,7 +5029,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l status = DML_FAIL_DSC_CLK_REQUIRED; } else if (locals->UrgentLatencySupport[i][j] != true) { status = DML_FAIL_URGENT_LATENCY; - } else if (locals->ROBSupport[i] != true) { + } else if (locals->ROBSupport[i][0] != true) { status = DML_FAIL_REORDERING_BUFFER; } else if (locals->DISPCLK_DPPCLK_Support[i][j] != true) { status = DML_FAIL_DISPCLK_DPPCLK; @@ -5043,7 +5049,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l status = DML_FAIL_PITCH_SUPPORT; } else if (locals->PrefetchSupported[i][j] != true) { status = DML_FAIL_PREFETCH_SUPPORT; - } else if (locals->TotalVerticalActiveBandwidthSupport[i] != true) { + } else if (locals->TotalVerticalActiveBandwidthSupport[i][0] != true) { status = DML_FAIL_TOTAL_V_ACTIVE_BW; } else if (locals->VRatioInPrefetchSupported[i][j] != true) { status = DML_FAIL_V_RATIO_PREFETCH; @@ -5089,7 +5095,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.DRAMSpeed = mode_lib->vba.DRAMSpeedPerState[mode_lib->vba.VoltageLevel]; mode_lib->vba.FabricClock = mode_lib->vba.FabricClockPerState[mode_lib->vba.VoltageLevel]; mode_lib->vba.SOCCLK = mode_lib->vba.SOCCLKPerState[mode_lib->vba.VoltageLevel]; - mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel]; + mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel][0]; mode_lib->vba.FabricAndDRAMBandwidth = locals->FabricAndDRAMBandwidthPerState[mode_lib->vba.VoltageLevel]; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (mode_lib->vba.BlendingAndTiming[k] == k) { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c index 6482d7b99bae19769212d92dc4ddad5e01377239..485a9c62ec586aa83bc12861fd9b091d3872be43 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c @@ -39,6 +39,7 @@ #define BPP_INVALID 0 #define BPP_BLENDED_PIPE 0xffffffff #define DCN20_MAX_DSC_IMAGE_WIDTH 5184 +#define DCN20_MAX_420_IMAGE_WIDTH 4096 static double adjust_ReturnBW( struct display_mode_lib *mode_lib, @@ -997,7 +998,7 @@ static unsigned int CalculateVMAndRowBytes( *MetaRowByte = 0; } - if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_lvp) { + if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_l_vp) { MacroTileSizeBytes = 256; MacroTileHeight = BlockHeight256Bytes; } else if (SurfaceTiling == dm_sw_4kb_s || SurfaceTiling == dm_sw_4kb_s_x @@ -1395,11 +1396,11 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP else mode_lib->vba.SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportHeight[k]; - if (mode_lib->vba.ODMCombineEnabled[k] == true) + if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) MainPlaneDoesODMCombine = true; for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) if (mode_lib->vba.BlendingAndTiming[k] == j - && mode_lib->vba.ODMCombineEnabled[j] == true) + && mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) MainPlaneDoesODMCombine = true; if (MainPlaneDoesODMCombine == true) @@ -2885,12 +2886,12 @@ static void dml20v2_DisplayPipeConfiguration(struct display_mode_lib *mode_lib) SwathWidth = mode_lib->vba.ViewportHeight[k]; } - if (mode_lib->vba.ODMCombineEnabled[k] == true) { + if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) { MainPlaneDoesODMCombine = true; } for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) { if (mode_lib->vba.BlendingAndTiming[k] == j - && mode_lib->vba.ODMCombineEnabled[j] == true) { + && mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) { MainPlaneDoesODMCombine = true; } } @@ -3385,7 +3386,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode == dm_420_10)) || (((mode_lib->vba.SurfaceTiling[k] == dm_sw_gfx7_2d_thin_gl || mode_lib->vba.SurfaceTiling[k] - == dm_sw_gfx7_2d_thin_lvp) + == dm_sw_gfx7_2d_thin_l_vp) && !((mode_lib->vba.SourcePixelFormat[k] == dm_444_64 || mode_lib->vba.SourcePixelFormat[k] @@ -3483,10 +3484,10 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode locals->FabricAndDRAMBandwidthPerState[i] * 1000) * locals->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly / 100; - locals->ReturnBWPerState[i] = locals->ReturnBWToDCNPerState; + locals->ReturnBWPerState[i][0] = locals->ReturnBWToDCNPerState; if (locals->DCCEnabledInAnyPlane == true && locals->ReturnBWToDCNPerState > locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) { - locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i], + locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0], locals->ReturnBWToDCNPerState * 4 * (1 - locals->UrgentLatency / ((locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024 / (locals->ReturnBWToDCNPerState - locals->DCFCLKPerState[i] @@ -3497,7 +3498,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode + (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024); if (locals->DCCEnabledInAnyPlane && locals->CriticalPoint > 1 && locals->CriticalPoint < 4) { - locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i], + locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0], 4 * locals->ReturnBWToDCNPerState * (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024 * locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency / @@ -3509,7 +3510,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode locals->DCFCLKPerState[i], locals->FabricAndDRAMBandwidthPerState[i] * 1000); if (locals->DCCEnabledInAnyPlane == true && locals->ReturnBWToDCNPerState > locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) { - locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i], + locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0], locals->ReturnBWToDCNPerState * 4 * (1 - locals->UrgentLatency / ((locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024 / (locals->ReturnBWToDCNPerState - locals->DCFCLKPerState[i] @@ -3520,7 +3521,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode + (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024); if (locals->DCCEnabledInAnyPlane && locals->CriticalPoint > 1 && locals->CriticalPoint < 4) { - locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i], + locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0], 4 * locals->ReturnBWToDCNPerState * (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024 * locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency / @@ -3558,12 +3559,12 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i] = (mode_lib->vba.RoundTripPingLatencyCycles + 32.0) / mode_lib->vba.DCFCLKPerState[i] - + locals->UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i]; - if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i] + + locals->UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i][0]; + if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i][0] > locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i]) { - locals->ROBSupport[i] = true; + locals->ROBSupport[i][0] = true; } else { - locals->ROBSupport[i] = false; + locals->ROBSupport[i][0] = false; } } /*Writeback Mode Support Check*/ @@ -3935,18 +3936,25 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode && i == mode_lib->vba.soc.num_states) mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2 * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); - if (mode_lib->vba.ODMCapability == false || - (locals->PlaneRequiredDISPCLKWithoutODMCombine <= MaxMaxDispclkRoundedDown - && (!locals->DSCEnabled[k] || locals->HActive[k] <= DCN20_MAX_DSC_IMAGE_WIDTH))) { - locals->ODMCombineEnablePerState[i][k] = false; - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; - } else { - locals->ODMCombineEnablePerState[i][k] = true; - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + + locals->ODMCombineEnablePerState[i][k] = false; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; + if (mode_lib->vba.ODMCapability) { + if (locals->PlaneRequiredDISPCLKWithoutODMCombine > MaxMaxDispclkRoundedDown) { + locals->ODMCombineEnablePerState[i][k] = true; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + } else if (locals->DSCEnabled[k] && (locals->HActive[k] > DCN20_MAX_DSC_IMAGE_WIDTH)) { + locals->ODMCombineEnablePerState[i][k] = true; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + } else if (locals->HActive[k] > DCN20_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) { + locals->ODMCombineEnablePerState[i][k] = true; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + } } + if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity && locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k] - && locals->ODMCombineEnablePerState[i][k] == false) { + && locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) { locals->NoOfDPP[i][j][k] = 1; locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); @@ -4035,16 +4043,16 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode /*Viewport Size Check*/ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { - locals->ViewportSizeSupport[i] = true; + locals->ViewportSizeSupport[i][0] = true; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { - if (locals->ODMCombineEnablePerState[i][k] == true) { + if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) { if (dml_min(locals->SwathWidthYSingleDPP[k], dml_round(mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k])) > locals->MaximumSwathWidth[k]) { - locals->ViewportSizeSupport[i] = false; + locals->ViewportSizeSupport[i][0] = false; } } else { if (locals->SwathWidthYSingleDPP[k] / 2.0 > locals->MaximumSwathWidth[k]) { - locals->ViewportSizeSupport[i] = false; + locals->ViewportSizeSupport[i][0] = false; } } } @@ -4226,8 +4234,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode mode_lib->vba.DSCFormatFactor = 1; } if (locals->RequiresDSC[i][k] == true) { - if (locals->ODMCombineEnablePerState[i][k] - == true) { + if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) { if (mode_lib->vba.PixelClockBackEnd[k] / 6.0 / mode_lib->vba.DSCFormatFactor > (1.0 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * mode_lib->vba.MaxDSCCLK[i]) { locals->DSCCLKRequiredMoreThanSupported[i] = @@ -4250,7 +4257,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode mode_lib->vba.TotalDSCUnitsRequired = 0.0; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (locals->RequiresDSC[i][k] == true) { - if (locals->ODMCombineEnablePerState[i][k] == true) { + if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) { mode_lib->vba.TotalDSCUnitsRequired = mode_lib->vba.TotalDSCUnitsRequired + 2.0; } else { @@ -4292,7 +4299,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode mode_lib->vba.bpp = locals->OutputBppPerState[i][k]; } if (locals->RequiresDSC[i][k] == true && mode_lib->vba.bpp != 0.0) { - if (locals->ODMCombineEnablePerState[i][k] == false) { + if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) { locals->DSCDelayPerState[i][k] = dscceComputeDelay( mode_lib->vba.DSCInputBitPerComponent[k], @@ -4335,7 +4342,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { for (j = 0; j < 2; j++) { for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { - if (locals->ODMCombineEnablePerState[i][k] == true) + if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) locals->SwathWidthYPerState[i][j][k] = dml_min(locals->SwathWidthYSingleDPP[k], dml_round(locals->HActive[k] / 2 * locals->HRatio[k])); else locals->SwathWidthYPerState[i][j][k] = locals->SwathWidthYSingleDPP[k] / locals->NoOfDPP[i][j][k]; @@ -4388,28 +4395,28 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode locals->EffectiveDETLBLinesLuma = dml_floor(locals->LinesInDETLuma + dml_min( locals->LinesInDETLuma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETY[k] * - locals->PSCL_FACTOR[k] / locals->ReturnBWPerState[i], + locals->PSCL_FACTOR[k] / locals->ReturnBWPerState[i][0], locals->EffectiveLBLatencyHidingSourceLinesLuma), locals->SwathHeightYPerState[i][j][k]); locals->EffectiveDETLBLinesChroma = dml_floor(locals->LinesInDETChroma + dml_min( locals->LinesInDETChroma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETC[k] * - locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i], + locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i][0], locals->EffectiveLBLatencyHidingSourceLinesChroma), locals->SwathHeightCPerState[i][j][k]); if (locals->BytePerPixelInDETC[k] == 0) { locals->UrgentLatencySupportUsPerState[i][j][k] = locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k]) / locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] * - dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k]); + dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]); } else { locals->UrgentLatencySupportUsPerState[i][j][k] = dml_min( locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k]) / locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] * - dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k]), + dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]), locals->EffectiveDETLBLinesChroma * (locals->HTotal[k] / locals->PixelClock[k]) / (locals->VRatio[k] / 2) - locals->EffectiveDETLBLinesChroma * locals->SwathWidthYPerState[i][j][k] / 2 * - dml_ceil(locals->BytePerPixelInDETC[k], 2) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k])); + dml_ceil(locals->BytePerPixelInDETC[k], 2) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k])); } } } @@ -4454,14 +4461,14 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode locals->SwathHeightYThisState[k] = locals->SwathHeightYPerState[i][j][k]; locals->SwathHeightCThisState[k] = locals->SwathHeightCPerState[i][j][k]; locals->SwathWidthYThisState[k] = locals->SwathWidthYPerState[i][j][k]; - mode_lib->vba.ProjectedDCFCLKDeepSleep = dml_max( - mode_lib->vba.ProjectedDCFCLKDeepSleep, + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max( + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], mode_lib->vba.PixelClock[k] / 16.0); if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) { if (mode_lib->vba.VRatio[k] <= 1.0) { - mode_lib->vba.ProjectedDCFCLKDeepSleep = + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max( - mode_lib->vba.ProjectedDCFCLKDeepSleep, + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], 1.1 * dml_ceil( mode_lib->vba.BytePerPixelInDETY[k], @@ -4471,9 +4478,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode * mode_lib->vba.PixelClock[k] / mode_lib->vba.NoOfDPP[i][j][k]); } else { - mode_lib->vba.ProjectedDCFCLKDeepSleep = + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max( - mode_lib->vba.ProjectedDCFCLKDeepSleep, + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], 1.1 * dml_ceil( mode_lib->vba.BytePerPixelInDETY[k], @@ -4484,9 +4491,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode } } else { if (mode_lib->vba.VRatio[k] <= 1.0) { - mode_lib->vba.ProjectedDCFCLKDeepSleep = + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max( - mode_lib->vba.ProjectedDCFCLKDeepSleep, + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], 1.1 * dml_ceil( mode_lib->vba.BytePerPixelInDETY[k], @@ -4496,9 +4503,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode * mode_lib->vba.PixelClock[k] / mode_lib->vba.NoOfDPP[i][j][k]); } else { - mode_lib->vba.ProjectedDCFCLKDeepSleep = + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max( - mode_lib->vba.ProjectedDCFCLKDeepSleep, + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], 1.1 * dml_ceil( mode_lib->vba.BytePerPixelInDETY[k], @@ -4508,9 +4515,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode * mode_lib->vba.RequiredDPPCLK[i][j][k]); } if (mode_lib->vba.VRatio[k] / 2.0 <= 1.0) { - mode_lib->vba.ProjectedDCFCLKDeepSleep = + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max( - mode_lib->vba.ProjectedDCFCLKDeepSleep, + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], 1.1 * dml_ceil( mode_lib->vba.BytePerPixelInDETC[k], @@ -4521,9 +4528,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode * mode_lib->vba.PixelClock[k] / mode_lib->vba.NoOfDPP[i][j][k]); } else { - mode_lib->vba.ProjectedDCFCLKDeepSleep = + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max( - mode_lib->vba.ProjectedDCFCLKDeepSleep, + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], 1.1 * dml_ceil( mode_lib->vba.BytePerPixelInDETC[k], @@ -4559,7 +4566,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode &mode_lib->vba.PTEBufferSizeNotExceededY[i][j][k], &mode_lib->vba.dpte_row_height[k], &mode_lib->vba.meta_row_height[k]); - mode_lib->vba.PrefetchLinesY[k] = CalculatePrefetchSourceLines( + mode_lib->vba.PrefetchLinesY[0][0][k] = CalculatePrefetchSourceLines( mode_lib, mode_lib->vba.VRatio[k], mode_lib->vba.vtaps[k], @@ -4598,7 +4605,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode &mode_lib->vba.PTEBufferSizeNotExceededC[i][j][k], &mode_lib->vba.dpte_row_height_chroma[k], &mode_lib->vba.meta_row_height_chroma[k]); - mode_lib->vba.PrefetchLinesC[k] = CalculatePrefetchSourceLines( + mode_lib->vba.PrefetchLinesC[0][0][k] = CalculatePrefetchSourceLines( mode_lib, mode_lib->vba.VRatio[k] / 2.0, mode_lib->vba.VTAPsChroma[k], @@ -4612,14 +4619,14 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = 0.0; mode_lib->vba.MetaRowBytesC = 0.0; mode_lib->vba.DPTEBytesPerRowC = 0.0; - locals->PrefetchLinesC[k] = 0.0; + locals->PrefetchLinesC[0][0][k] = 0.0; locals->PTEBufferSizeNotExceededC[i][j][k] = true; locals->PTEBufferSizeInRequestsForLuma = mode_lib->vba.PTEBufferSizeInRequestsLuma + mode_lib->vba.PTEBufferSizeInRequestsChroma; } - locals->PDEAndMetaPTEBytesPerFrame[k] = + locals->PDEAndMetaPTEBytesPerFrame[0][0][k] = mode_lib->vba.PDEAndMetaPTEBytesPerFrameY + mode_lib->vba.PDEAndMetaPTEBytesPerFrameC; - locals->MetaRowBytes[k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC; - locals->DPTEBytesPerRow[k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC; + locals->MetaRowBytes[0][0][k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC; + locals->DPTEBytesPerRow[0][0][k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC; CalculateActiveRowBandwidth( mode_lib->vba.GPUVMEnable, @@ -4646,14 +4653,14 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode + mode_lib->vba.TotalNumberOfDCCActiveDPP[i][j] * mode_lib->vba.MetaChunkSize) * 1024.0 - / mode_lib->vba.ReturnBWPerState[i]; + / mode_lib->vba.ReturnBWPerState[i][0]; if (mode_lib->vba.GPUVMEnable == true) { mode_lib->vba.ExtraLatency = mode_lib->vba.ExtraLatency + mode_lib->vba.TotalNumberOfActiveDPP[i][j] * mode_lib->vba.PTEGroupSize - / mode_lib->vba.ReturnBWPerState[i]; + / mode_lib->vba.ReturnBWPerState[i][0]; } - mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep; + mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0]; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (mode_lib->vba.BlendingAndTiming[k] == k) { @@ -4703,7 +4710,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode } for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { - locals->MaximumVStartup[k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k] + locals->MaximumVStartup[0][0][k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k] - dml_max(1.0, dml_ceil(locals->WritebackDelay[i][k] / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]), 1.0)); } @@ -4743,7 +4750,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0.0; } - CalculateDelayAfterScaler(mode_lib, mode_lib->vba.ReturnBWPerState[i], mode_lib->vba.ReadBandwidthLuma[k], mode_lib->vba.ReadBandwidthChroma[k], mode_lib->vba.MaxTotalVActiveRDBandwidth, + CalculateDelayAfterScaler(mode_lib, mode_lib->vba.ReturnBWPerState[i][0], mode_lib->vba.ReadBandwidthLuma[k], mode_lib->vba.ReadBandwidthChroma[k], mode_lib->vba.MaxTotalVActiveRDBandwidth, mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k], mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k], mode_lib->vba.RequiredDPPCLK[i][j][k], mode_lib->vba.RequiredDISPCLK[i][j], mode_lib->vba.PixelClock[k], mode_lib->vba.DSCDelayPerState[i][k], mode_lib->vba.NoOfDPP[i][j][k], mode_lib->vba.ScalerEnabled[k], mode_lib->vba.NumberOfCursors[k], mode_lib->vba.DPPCLKDelaySubtotal, mode_lib->vba.DPPCLKDelaySCL, mode_lib->vba.DPPCLKDelaySCLLBOnly, mode_lib->vba.DPPCLKDelayCNVCFormater, mode_lib->vba.DPPCLKDelayCNVCCursor, mode_lib->vba.DISPCLKDelaySubtotal, @@ -4757,14 +4764,14 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode mode_lib->vba.RequiredDPPCLK[i][j][k], mode_lib->vba.RequiredDISPCLK[i][j], mode_lib->vba.PixelClock[k], - mode_lib->vba.ProjectedDCFCLKDeepSleep, + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], mode_lib->vba.NoOfDPP[i][j][k], mode_lib->vba.NumberOfCursors[k], mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k], mode_lib->vba.HTotal[k], mode_lib->vba.MaxInterDCNTileRepeaters, - mode_lib->vba.MaximumVStartup[k], + mode_lib->vba.MaximumVStartup[0][0][k], mode_lib->vba.GPUVMMaxPageTableLevels, mode_lib->vba.GPUVMEnable, mode_lib->vba.DynamicMetadataEnable[k], @@ -4774,15 +4781,15 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode mode_lib->vba.UrgentLatencyPixelDataOnly, mode_lib->vba.ExtraLatency, mode_lib->vba.TimeCalc, - mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k], - mode_lib->vba.MetaRowBytes[k], - mode_lib->vba.DPTEBytesPerRow[k], - mode_lib->vba.PrefetchLinesY[k], + mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k], + mode_lib->vba.MetaRowBytes[0][0][k], + mode_lib->vba.DPTEBytesPerRow[0][0][k], + mode_lib->vba.PrefetchLinesY[0][0][k], mode_lib->vba.SwathWidthYPerState[i][j][k], mode_lib->vba.BytePerPixelInDETY[k], mode_lib->vba.PrefillY[k], mode_lib->vba.MaxNumSwY[k], - mode_lib->vba.PrefetchLinesC[k], + mode_lib->vba.PrefetchLinesC[0][0][k], mode_lib->vba.BytePerPixelInDETC[k], mode_lib->vba.PrefillC[k], mode_lib->vba.MaxNumSwC[k], @@ -4812,19 +4819,19 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode locals->prefetch_vm_bw_valid = true; locals->prefetch_row_bw_valid = true; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { - if (locals->PDEAndMetaPTEBytesPerFrame[k] == 0) + if (locals->PDEAndMetaPTEBytesPerFrame[0][0][k] == 0) locals->prefetch_vm_bw[k] = 0; else if (locals->LinesForMetaPTE[k] > 0) - locals->prefetch_vm_bw[k] = locals->PDEAndMetaPTEBytesPerFrame[k] + locals->prefetch_vm_bw[k] = locals->PDEAndMetaPTEBytesPerFrame[0][0][k] / (locals->LinesForMetaPTE[k] * locals->HTotal[k] / locals->PixelClock[k]); else { locals->prefetch_vm_bw[k] = 0; locals->prefetch_vm_bw_valid = false; } - if (locals->MetaRowBytes[k] + locals->DPTEBytesPerRow[k] == 0) + if (locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k] == 0) locals->prefetch_row_bw[k] = 0; else if (locals->LinesForMetaAndDPTERow[k] > 0) - locals->prefetch_row_bw[k] = (locals->MetaRowBytes[k] + locals->DPTEBytesPerRow[k]) + locals->prefetch_row_bw[k] = (locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k]) / (locals->LinesForMetaAndDPTERow[k] * locals->HTotal[k] / locals->PixelClock[k]); else { locals->prefetch_row_bw[k] = 0; @@ -4843,13 +4850,13 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode mode_lib->vba.RequiredPrefetchPixelDataBWLuma[i][j][k]) + mode_lib->vba.meta_row_bw[k] + mode_lib->vba.dpte_row_bw[k]); } - locals->BandwidthWithoutPrefetchSupported[i] = true; - if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i]) { - locals->BandwidthWithoutPrefetchSupported[i] = false; + locals->BandwidthWithoutPrefetchSupported[i][0] = true; + if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i][0]) { + locals->BandwidthWithoutPrefetchSupported[i][0] = false; } locals->PrefetchSupported[i][j] = true; - if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i]) { + if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i][0]) { locals->PrefetchSupported[i][j] = false; } for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { @@ -4874,7 +4881,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode if (mode_lib->vba.PrefetchSupported[i][j] == true && mode_lib->vba.VRatioInPrefetchSupported[i][j] == true) { mode_lib->vba.BandwidthAvailableForImmediateFlip = - mode_lib->vba.ReturnBWPerState[i]; + mode_lib->vba.ReturnBWPerState[i][0]; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { mode_lib->vba.BandwidthAvailableForImmediateFlip = mode_lib->vba.BandwidthAvailableForImmediateFlip @@ -4888,9 +4895,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8 && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) { mode_lib->vba.ImmediateFlipBytes[k] = - mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k] - + mode_lib->vba.MetaRowBytes[k] - + mode_lib->vba.DPTEBytesPerRow[k]; + mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k] + + mode_lib->vba.MetaRowBytes[0][0][k] + + mode_lib->vba.DPTEBytesPerRow[0][0][k]; } } mode_lib->vba.TotImmediateFlipBytes = 0.0; @@ -4918,9 +4925,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode / mode_lib->vba.PixelClock[k], mode_lib->vba.VRatio[k], mode_lib->vba.Tno_bw[k], - mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k], - mode_lib->vba.MetaRowBytes[k], - mode_lib->vba.DPTEBytesPerRow[k], + mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k], + mode_lib->vba.MetaRowBytes[0][0][k], + mode_lib->vba.DPTEBytesPerRow[0][0][k], mode_lib->vba.DCCEnable[k], mode_lib->vba.dpte_row_height[k], mode_lib->vba.meta_row_height[k], @@ -4945,7 +4952,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode } mode_lib->vba.ImmediateFlipSupportedForState[i][j] = true; if (mode_lib->vba.total_dcn_read_bw_with_flip - > mode_lib->vba.ReturnBWPerState[i]) { + > mode_lib->vba.ReturnBWPerState[i][0]) { mode_lib->vba.ImmediateFlipSupportedForState[i][j] = false; } for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { @@ -4961,13 +4968,13 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode /*Vertical Active BW support*/ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { - mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i] = dml_min(mode_lib->vba.ReturnBusWidth * + mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][0] = dml_min(mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKPerState[i], mode_lib->vba.FabricAndDRAMBandwidthPerState[i] * 1000) * mode_lib->vba.MaxAveragePercentOfIdealDRAMBWDisplayCanUseInNormalSystemOperation / 100; - if (mode_lib->vba.MaxTotalVActiveRDBandwidth <= mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i]) - mode_lib->vba.TotalVerticalActiveBandwidthSupport[i] = true; + if (mode_lib->vba.MaxTotalVActiveRDBandwidth <= mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][0]) + mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][0] = true; else - mode_lib->vba.TotalVerticalActiveBandwidthSupport[i] = false; + mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][0] = false; } /*PTE Buffer Size Check*/ @@ -5055,7 +5062,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode status = DML_FAIL_SCALE_RATIO_TAP; } else if (mode_lib->vba.SourceFormatPixelAndScanSupport != true) { status = DML_FAIL_SOURCE_PIXEL_FORMAT; - } else if (locals->ViewportSizeSupport[i] != true) { + } else if (locals->ViewportSizeSupport[i][0] != true) { status = DML_FAIL_VIEWPORT_SIZE; } else if (locals->DIOSupport[i] != true) { status = DML_FAIL_DIO_SUPPORT; @@ -5065,7 +5072,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode status = DML_FAIL_DSC_CLK_REQUIRED; } else if (locals->UrgentLatencySupport[i][j] != true) { status = DML_FAIL_URGENT_LATENCY; - } else if (locals->ROBSupport[i] != true) { + } else if (locals->ROBSupport[i][0] != true) { status = DML_FAIL_REORDERING_BUFFER; } else if (locals->DISPCLK_DPPCLK_Support[i][j] != true) { status = DML_FAIL_DISPCLK_DPPCLK; @@ -5085,7 +5092,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode status = DML_FAIL_PITCH_SUPPORT; } else if (locals->PrefetchSupported[i][j] != true) { status = DML_FAIL_PREFETCH_SUPPORT; - } else if (locals->TotalVerticalActiveBandwidthSupport[i] != true) { + } else if (locals->TotalVerticalActiveBandwidthSupport[i][0] != true) { status = DML_FAIL_TOTAL_V_ACTIVE_BW; } else if (locals->VRatioInPrefetchSupported[i][j] != true) { status = DML_FAIL_V_RATIO_PREFETCH; @@ -5131,7 +5138,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode mode_lib->vba.DRAMSpeed = mode_lib->vba.DRAMSpeedPerState[mode_lib->vba.VoltageLevel]; mode_lib->vba.FabricClock = mode_lib->vba.FabricClockPerState[mode_lib->vba.VoltageLevel]; mode_lib->vba.SOCCLK = mode_lib->vba.SOCCLKPerState[mode_lib->vba.VoltageLevel]; - mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel]; + mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel][0]; mode_lib->vba.FabricAndDRAMBandwidth = locals->FabricAndDRAMBandwidthPerState[mode_lib->vba.VoltageLevel]; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (mode_lib->vba.BlendingAndTiming[k] == k) { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c index 9df24ececcecb080dceed8d66d9f91baa8fea437..ca807846032f561c631025a1f47dce9d6ddf780f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c @@ -107,10 +107,10 @@ static unsigned int get_bytes_per_element(enum source_format_class source_format static bool is_dual_plane(enum source_format_class source_format) { - bool ret_val = 0; + bool ret_val = false; if ((source_format == dm_420_8) || (source_format == dm_420_10)) - ret_val = 1; + ret_val = true; return ret_val; } @@ -240,8 +240,8 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib, unsigned int swath_bytes_c = 0; unsigned int full_swath_bytes_packed_l = 0; unsigned int full_swath_bytes_packed_c = 0; - bool req128_l = 0; - bool req128_c = 0; + bool req128_l = false; + bool req128_c = false; bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear); bool surf_vert = (pipe_src_param.source_scan == dm_vert); unsigned int log2_swath_height_l = 0; @@ -264,13 +264,13 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib, total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c; if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request - req128_l = 0; - req128_c = 0; + req128_l = false; + req128_c = false; swath_bytes_l = full_swath_bytes_packed_l; swath_bytes_c = full_swath_bytes_packed_c; } else { //128b request (for luma only for yuv420 8bpc) - req128_l = 1; - req128_c = 0; + req128_l = true; + req128_c = false; swath_bytes_l = full_swath_bytes_packed_l / 2; swath_bytes_c = full_swath_bytes_packed_c; } @@ -280,9 +280,9 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib, total_swath_bytes = 2 * full_swath_bytes_packed_l; if (total_swath_bytes <= detile_buf_size_in_bytes) - req128_l = 0; + req128_l = false; else - req128_l = 1; + req128_l = true; swath_bytes_l = total_swath_bytes; swath_bytes_c = 0; @@ -670,7 +670,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib, const display_pipe_source_params_st pipe_src_param, bool is_chroma) { - bool mode_422 = 0; + bool mode_422 = false; unsigned int vp_width = 0; unsigned int vp_height = 0; unsigned int data_pitch = 0; @@ -958,7 +958,7 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, // Source // dcc_en = src.dcc; dual_plane = is_dual_plane((enum source_format_class)(src->source_format)); - mode_422 = 0; // TODO + mode_422 = false; // TODO access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed // bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0); // bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c index 1e6aeb1bd2bf6f0a910b3884aa538ac1714b8b09..287b7a0ad108c1e693edd3fc168aa3a66d0a185e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c @@ -107,10 +107,10 @@ static unsigned int get_bytes_per_element(enum source_format_class source_format static bool is_dual_plane(enum source_format_class source_format) { - bool ret_val = 0; + bool ret_val = false; if ((source_format == dm_420_8) || (source_format == dm_420_10)) - ret_val = 1; + ret_val = true; return ret_val; } @@ -240,8 +240,8 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib, unsigned int swath_bytes_c = 0; unsigned int full_swath_bytes_packed_l = 0; unsigned int full_swath_bytes_packed_c = 0; - bool req128_l = 0; - bool req128_c = 0; + bool req128_l = false; + bool req128_c = false; bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear); bool surf_vert = (pipe_src_param.source_scan == dm_vert); unsigned int log2_swath_height_l = 0; @@ -264,13 +264,13 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib, total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c; if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request - req128_l = 0; - req128_c = 0; + req128_l = false; + req128_c = false; swath_bytes_l = full_swath_bytes_packed_l; swath_bytes_c = full_swath_bytes_packed_c; } else { //128b request (for luma only for yuv420 8bpc) - req128_l = 1; - req128_c = 0; + req128_l = true; + req128_c = false; swath_bytes_l = full_swath_bytes_packed_l / 2; swath_bytes_c = full_swath_bytes_packed_c; } @@ -280,9 +280,9 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib, total_swath_bytes = 2 * full_swath_bytes_packed_l; if (total_swath_bytes <= detile_buf_size_in_bytes) - req128_l = 0; + req128_l = false; else - req128_l = 1; + req128_l = true; swath_bytes_l = total_swath_bytes; swath_bytes_c = 0; @@ -670,7 +670,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib, const display_pipe_source_params_st pipe_src_param, bool is_chroma) { - bool mode_422 = 0; + bool mode_422 = false; unsigned int vp_width = 0; unsigned int vp_height = 0; unsigned int data_pitch = 0; @@ -959,7 +959,7 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, // Source // dcc_en = src.dcc; dual_plane = is_dual_plane((enum source_format_class)(src->source_format)); - mode_422 = 0; // TODO + mode_422 = false; // TODO access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed // bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0); // bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c index 945291d5ad985762652d90bad39ab01a251a4d9c..e6617c958bb8bfb26ef690facecec0432ae43fb2 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c @@ -65,6 +65,7 @@ typedef struct { #define BPP_INVALID 0 #define BPP_BLENDED_PIPE 0xffffffff #define DCN21_MAX_DSC_IMAGE_WIDTH 5184 +#define DCN21_MAX_420_IMAGE_WIDTH 4096 static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib); static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation( @@ -197,7 +198,7 @@ static unsigned int CalculateVMAndRowBytes( unsigned int *meta_row_width, unsigned int *meta_row_height, unsigned int *vm_group_bytes, - long *dpte_group_bytes, + unsigned int *dpte_group_bytes, unsigned int *PixelPTEReqWidth, unsigned int *PixelPTEReqHeight, unsigned int *PTERequestSize, @@ -295,7 +296,7 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport( double UrgentOutOfOrderReturn, double ReturnBW, bool GPUVMEnable, - long dpte_group_bytes[], + int dpte_group_bytes[], unsigned int MetaChunkSize, double UrgentLatency, double ExtraLatency, @@ -309,13 +310,13 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport( int DPPPerPlane[], bool DCCEnable[], double DPPCLK[], - unsigned int SwathWidthSingleDPPY[], + double SwathWidthSingleDPPY[], unsigned int SwathHeightY[], double ReadBandwidthPlaneLuma[], unsigned int SwathHeightC[], double ReadBandwidthPlaneChroma[], unsigned int LBBitPerPixel[], - unsigned int SwathWidthY[], + double SwathWidthY[], double HRatio[], unsigned int vtaps[], unsigned int VTAPsChroma[], @@ -344,7 +345,7 @@ static void CalculateDCFCLKDeepSleep( double BytePerPixelDETY[], double BytePerPixelDETC[], double VRatio[], - unsigned int SwathWidthY[], + double SwathWidthY[], int DPPPerPlane[], double HRatio[], double PixelClock[], @@ -435,7 +436,7 @@ static void CalculateMetaAndPTETimes( unsigned int meta_row_height[], unsigned int meta_req_width[], unsigned int meta_req_height[], - long dpte_group_bytes[], + int dpte_group_bytes[], unsigned int PTERequestSizeY[], unsigned int PTERequestSizeC[], unsigned int PixelPTEReqWidthY[], @@ -477,7 +478,7 @@ static double CalculateExtraLatency( bool HostVMEnable, int NumberOfActivePlanes, int NumberOfDPP[], - long dpte_group_bytes[], + int dpte_group_bytes[], double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData, double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly, int HostVMMaxPageTableLevels, @@ -1280,7 +1281,7 @@ static unsigned int CalculateVMAndRowBytes( unsigned int *meta_row_width, unsigned int *meta_row_height, unsigned int *vm_group_bytes, - long *dpte_group_bytes, + unsigned int *dpte_group_bytes, unsigned int *PixelPTEReqWidth, unsigned int *PixelPTEReqHeight, unsigned int *PTERequestSize, @@ -1338,7 +1339,7 @@ static unsigned int CalculateVMAndRowBytes( *MetaRowByte = 0; } - if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_lvp) { + if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_l_vp) { MacroTileSizeBytes = 256; MacroTileHeight = BlockHeight256Bytes; } else if (SurfaceTiling == dm_sw_4kb_s || SurfaceTiling == dm_sw_4kb_s_x @@ -1683,11 +1684,11 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman else locals->SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportHeight[k]; - if (mode_lib->vba.ODMCombineEnabled[k] == true) + if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) MainPlaneDoesODMCombine = true; for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) if (mode_lib->vba.BlendingAndTiming[k] == j - && mode_lib->vba.ODMCombineEnabled[j] == true) + && mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) MainPlaneDoesODMCombine = true; if (MainPlaneDoesODMCombine == true) @@ -2940,12 +2941,12 @@ static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib) SwathWidth = mode_lib->vba.ViewportHeight[k]; } - if (mode_lib->vba.ODMCombineEnabled[k] == true) { + if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) { MainPlaneDoesODMCombine = true; } for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) { if (mode_lib->vba.BlendingAndTiming[k] == j - && mode_lib->vba.ODMCombineEnabled[j] == true) { + && mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) { MainPlaneDoesODMCombine = true; } } @@ -3453,7 +3454,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l == dm_420_10)) || (((mode_lib->vba.SurfaceTiling[k] == dm_sw_gfx7_2d_thin_gl || mode_lib->vba.SurfaceTiling[k] - == dm_sw_gfx7_2d_thin_lvp) + == dm_sw_gfx7_2d_thin_l_vp) && !((mode_lib->vba.SourcePixelFormat[k] == dm_444_64 || mode_lib->vba.SourcePixelFormat[k] @@ -3542,17 +3543,17 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } } for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { - locals->IdealSDPPortBandwidthPerState[i] = dml_min3( + locals->IdealSDPPortBandwidthPerState[i][0] = dml_min3( mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKPerState[i], mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels * mode_lib->vba.DRAMChannelWidth, mode_lib->vba.FabricClockPerState[i] * mode_lib->vba.FabricDatapathToDCNDataReturn); if (mode_lib->vba.HostVMEnable == false) { - locals->ReturnBWPerState[i] = locals->IdealSDPPortBandwidthPerState[i] + locals->ReturnBWPerState[i][0] = locals->IdealSDPPortBandwidthPerState[i][0] * mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly / 100.0; } else { - locals->ReturnBWPerState[i] = locals->IdealSDPPortBandwidthPerState[i] + locals->ReturnBWPerState[i][0] = locals->IdealSDPPortBandwidthPerState[i][0] * mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData / 100.0; } } @@ -3589,12 +3590,12 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l + dml_max3(mode_lib->vba.UrgentOutOfOrderReturnPerChannelPixelDataOnly, mode_lib->vba.UrgentOutOfOrderReturnPerChannelPixelMixedWithVMData, mode_lib->vba.UrgentOutOfOrderReturnPerChannelVMDataOnly) - * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i]; - if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i] + * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i][0]; + if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i][0] > locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i]) { - locals->ROBSupport[i] = true; + locals->ROBSupport[i][0] = true; } else { - locals->ROBSupport[i] = false; + locals->ROBSupport[i][0] = false; } } /*Writeback Mode Support Check*/ @@ -3971,18 +3972,25 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l && i == mode_lib->vba.soc.num_states) mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2 * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); - if (mode_lib->vba.ODMCapability == false || - (locals->PlaneRequiredDISPCLKWithoutODMCombine <= MaxMaxDispclkRoundedDown - && (!locals->DSCEnabled[k] || locals->HActive[k] <= DCN21_MAX_DSC_IMAGE_WIDTH))) { - locals->ODMCombineEnablePerState[i][k] = false; - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; - } else { - locals->ODMCombineEnablePerState[i][k] = true; - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + + locals->ODMCombineEnablePerState[i][k] = false; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; + if (mode_lib->vba.ODMCapability) { + if (locals->PlaneRequiredDISPCLKWithoutODMCombine > MaxMaxDispclkRoundedDown) { + locals->ODMCombineEnablePerState[i][k] = true; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + } else if (locals->DSCEnabled[k] && (locals->HActive[k] > DCN21_MAX_DSC_IMAGE_WIDTH)) { + locals->ODMCombineEnablePerState[i][k] = true; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + } else if (locals->HActive[k] > DCN21_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) { + locals->ODMCombineEnablePerState[i][k] = true; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + } } + if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity && locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k] - && locals->ODMCombineEnablePerState[i][k] == false) { + && locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) { locals->NoOfDPP[i][j][k] = 1; locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); @@ -4071,16 +4079,16 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l /*Viewport Size Check*/ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { - locals->ViewportSizeSupport[i] = true; + locals->ViewportSizeSupport[i][0] = true; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { - if (locals->ODMCombineEnablePerState[i][k] == true) { + if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) { if (dml_min(locals->SwathWidthYSingleDPP[k], dml_round(mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k])) > locals->MaximumSwathWidth[k]) { - locals->ViewportSizeSupport[i] = false; + locals->ViewportSizeSupport[i][0] = false; } } else { if (locals->SwathWidthYSingleDPP[k] / 2.0 > locals->MaximumSwathWidth[k]) { - locals->ViewportSizeSupport[i] = false; + locals->ViewportSizeSupport[i][0] = false; } } } @@ -4121,11 +4129,11 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { - locals->RequiresDSC[i][k] = 0; + locals->RequiresDSC[i][k] = false; locals->RequiresFEC[i][k] = 0; if (mode_lib->vba.BlendingAndTiming[k] == k) { if (mode_lib->vba.Output[k] == dm_hdmi) { - locals->RequiresDSC[i][k] = 0; + locals->RequiresDSC[i][k] = false; locals->RequiresFEC[i][k] = 0; locals->OutputBppPerState[i][k] = TruncToValidBPP( dml_min(600.0, mode_lib->vba.PHYCLKPerState[i]) / mode_lib->vba.PixelClockBackEnd[k] * 24, @@ -4269,8 +4277,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.DSCFormatFactor = 1; } if (locals->RequiresDSC[i][k] == true) { - if (locals->ODMCombineEnablePerState[i][k] - == true) { + if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) { if (mode_lib->vba.PixelClockBackEnd[k] / 6.0 / mode_lib->vba.DSCFormatFactor > (1.0 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * mode_lib->vba.MaxDSCCLK[i]) { locals->DSCCLKRequiredMoreThanSupported[i] = @@ -4293,7 +4300,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.TotalDSCUnitsRequired = 0.0; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (locals->RequiresDSC[i][k] == true) { - if (locals->ODMCombineEnablePerState[i][k] == true) { + if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) { mode_lib->vba.TotalDSCUnitsRequired = mode_lib->vba.TotalDSCUnitsRequired + 2.0; } else { @@ -4335,7 +4342,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.bpp = locals->OutputBppPerState[i][k]; } if (locals->RequiresDSC[i][k] == true && mode_lib->vba.bpp != 0.0) { - if (locals->ODMCombineEnablePerState[i][k] == false) { + if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) { locals->DSCDelayPerState[i][k] = dscceComputeDelay( mode_lib->vba.DSCInputBitPerComponent[k], @@ -4399,7 +4406,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { locals->RequiredDPPCLKThisState[k] = locals->RequiredDPPCLK[i][j][k]; locals->NoOfDPPThisState[k] = locals->NoOfDPP[i][j][k]; - if (locals->ODMCombineEnablePerState[i][k] == true) { + if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) { locals->SwathWidthYThisState[k] = dml_min(locals->SwathWidthYSingleDPP[k], dml_round(mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k])); } else { @@ -4451,7 +4458,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l locals->PSCL_FACTOR, locals->PSCL_FACTOR_CHROMA, locals->RequiredDPPCLKThisState, - &mode_lib->vba.ProjectedDCFCLKDeepSleep); + &mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0]); for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64 @@ -4496,7 +4503,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l locals->PTERequestSizeC, locals->dpde0_bytes_per_frame_ub_c, locals->meta_pte_bytes_per_frame_ub_c); - locals->PrefetchLinesC[k] = CalculatePrefetchSourceLines( + locals->PrefetchLinesC[0][0][k] = CalculatePrefetchSourceLines( mode_lib, mode_lib->vba.VRatio[k]/2, mode_lib->vba.VTAPsChroma[k], @@ -4511,7 +4518,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = 0.0; mode_lib->vba.MetaRowBytesC = 0.0; mode_lib->vba.DPTEBytesPerRowC = 0.0; - locals->PrefetchLinesC[k] = 0.0; + locals->PrefetchLinesC[0][0][k] = 0.0; locals->PTEBufferSizeNotExceededC[i][j][k] = true; locals->PTEBufferSizeInRequestsForLuma = mode_lib->vba.PTEBufferSizeInRequestsLuma + mode_lib->vba.PTEBufferSizeInRequestsChroma; } @@ -4552,7 +4559,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l locals->PTERequestSizeY, locals->dpde0_bytes_per_frame_ub_l, locals->meta_pte_bytes_per_frame_ub_l); - locals->PrefetchLinesY[k] = CalculatePrefetchSourceLines( + locals->PrefetchLinesY[0][0][k] = CalculatePrefetchSourceLines( mode_lib, mode_lib->vba.VRatio[k], mode_lib->vba.vtaps[k], @@ -4562,10 +4569,10 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.ViewportYStartY[k], &locals->PrefillY[k], &locals->MaxNumSwY[k]); - locals->PDEAndMetaPTEBytesPerFrame[k] = + locals->PDEAndMetaPTEBytesPerFrame[0][0][k] = mode_lib->vba.PDEAndMetaPTEBytesPerFrameY + mode_lib->vba.PDEAndMetaPTEBytesPerFrameC; - locals->MetaRowBytes[k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC; - locals->DPTEBytesPerRow[k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC; + locals->MetaRowBytes[0][0][k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC; + locals->DPTEBytesPerRow[0][0][k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC; CalculateActiveRowBandwidth( mode_lib->vba.GPUVMEnable, @@ -4591,7 +4598,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.PixelChunkSizeInKByte, locals->TotalNumberOfDCCActiveDPP[i][j], mode_lib->vba.MetaChunkSize, - locals->ReturnBWPerState[i], + locals->ReturnBWPerState[i][0], mode_lib->vba.GPUVMEnable, mode_lib->vba.HostVMEnable, mode_lib->vba.NumberOfActivePlanes, @@ -4602,7 +4609,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.HostVMMaxPageTableLevels, mode_lib->vba.HostVMCachedPageTableLevels); - mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep; + mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0]; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (mode_lib->vba.BlendingAndTiming[k] == k) { if (mode_lib->vba.WritebackEnable[k] == true) { @@ -4644,15 +4651,15 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } } } - mode_lib->vba.MaxMaxVStartup = 0; + mode_lib->vba.MaxMaxVStartup[0][0] = 0; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { - locals->MaximumVStartup[k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k] + locals->MaximumVStartup[0][0][k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k] - dml_max(1.0, dml_ceil(locals->WritebackDelay[i][k] / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]), 1.0)); - mode_lib->vba.MaxMaxVStartup = dml_max(mode_lib->vba.MaxMaxVStartup, locals->MaximumVStartup[k]); + mode_lib->vba.MaxMaxVStartup[0][0] = dml_max(mode_lib->vba.MaxMaxVStartup[0][0], locals->MaximumVStartup[0][0][k]); } mode_lib->vba.NextPrefetchMode = mode_lib->vba.MinPrefetchMode; - mode_lib->vba.NextMaxVStartup = mode_lib->vba.MaxMaxVStartup; + mode_lib->vba.NextMaxVStartup = mode_lib->vba.MaxMaxVStartup[0][0]; do { mode_lib->vba.PrefetchMode[i][j] = mode_lib->vba.NextPrefetchMode; mode_lib->vba.MaxVStartup = mode_lib->vba.NextMaxVStartup; @@ -4693,7 +4700,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l myPipe.DPPCLK = locals->RequiredDPPCLK[i][j][k]; myPipe.DISPCLK = locals->RequiredDISPCLK[i][j]; myPipe.PixelClock = mode_lib->vba.PixelClock[k]; - myPipe.DCFCLKDeepSleep = mode_lib->vba.ProjectedDCFCLKDeepSleep; + myPipe.DCFCLKDeepSleep = mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0]; myPipe.DPPPerPlane = locals->NoOfDPP[i][j][k]; myPipe.ScalerEnabled = mode_lib->vba.ScalerEnabled[k]; myPipe.SourceScan = mode_lib->vba.SourceScan[k]; @@ -4727,8 +4734,8 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l locals->SwathWidthYThisState[k] / mode_lib->vba.HRatio[k], mode_lib->vba.OutputFormat[k], mode_lib->vba.MaxInterDCNTileRepeaters, - dml_min(mode_lib->vba.MaxVStartup, locals->MaximumVStartup[k]), - locals->MaximumVStartup[k], + dml_min(mode_lib->vba.MaxVStartup, locals->MaximumVStartup[0][0][k]), + locals->MaximumVStartup[0][0][k], mode_lib->vba.GPUVMMaxPageTableLevels, mode_lib->vba.GPUVMEnable, &myHostVM, @@ -4739,15 +4746,15 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.UrgentLatency, mode_lib->vba.ExtraLatency, mode_lib->vba.TimeCalc, - locals->PDEAndMetaPTEBytesPerFrame[k], - locals->MetaRowBytes[k], - locals->DPTEBytesPerRow[k], - locals->PrefetchLinesY[k], + locals->PDEAndMetaPTEBytesPerFrame[0][0][k], + locals->MetaRowBytes[0][0][k], + locals->DPTEBytesPerRow[0][0][k], + locals->PrefetchLinesY[0][0][k], locals->SwathWidthYThisState[k], locals->BytePerPixelInDETY[k], locals->PrefillY[k], locals->MaxNumSwY[k], - locals->PrefetchLinesC[k], + locals->PrefetchLinesC[0][0][k], locals->BytePerPixelInDETC[k], locals->PrefillC[k], locals->MaxNumSwC[k], @@ -4836,14 +4843,14 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l + locals->RequiredPrefetchPixelDataBWChroma[i][j][k] * locals->UrgentBurstFactorChromaPre[k] + locals->cursor_bw_pre[k] * locals->UrgentBurstFactorCursorPre[k]); } - locals->BandwidthWithoutPrefetchSupported[i] = true; - if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i] + locals->BandwidthWithoutPrefetchSupported[i][0] = true; + if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i][0] || locals->NotEnoughUrgentLatencyHiding == 1) { - locals->BandwidthWithoutPrefetchSupported[i] = false; + locals->BandwidthWithoutPrefetchSupported[i][0] = false; } locals->PrefetchSupported[i][j] = true; - if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i] + if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i][0] || locals->NotEnoughUrgentLatencyHiding == 1 || locals->NotEnoughUrgentLatencyHidingPre == 1) { locals->PrefetchSupported[i][j] = false; @@ -4872,17 +4879,17 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } if (mode_lib->vba.MaxVStartup <= 13 || mode_lib->vba.AnyLinesForVMOrRowTooLarge == false) { - mode_lib->vba.NextMaxVStartup = mode_lib->vba.MaxMaxVStartup; + mode_lib->vba.NextMaxVStartup = mode_lib->vba.MaxMaxVStartup[0][0]; mode_lib->vba.NextPrefetchMode = mode_lib->vba.NextPrefetchMode + 1; } else { mode_lib->vba.NextMaxVStartup = mode_lib->vba.NextMaxVStartup - 1; } } while ((locals->PrefetchSupported[i][j] != true || locals->VRatioInPrefetchSupported[i][j] != true) - && (mode_lib->vba.NextMaxVStartup != mode_lib->vba.MaxMaxVStartup + && (mode_lib->vba.NextMaxVStartup != mode_lib->vba.MaxMaxVStartup[0][0] || mode_lib->vba.NextPrefetchMode < mode_lib->vba.MaxPrefetchMode)); if (locals->PrefetchSupported[i][j] == true && locals->VRatioInPrefetchSupported[i][j] == true) { - mode_lib->vba.BandwidthAvailableForImmediateFlip = locals->ReturnBWPerState[i]; + mode_lib->vba.BandwidthAvailableForImmediateFlip = locals->ReturnBWPerState[i][0]; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { mode_lib->vba.BandwidthAvailableForImmediateFlip = mode_lib->vba.BandwidthAvailableForImmediateFlip - dml_max(locals->ReadBandwidthLuma[k] * locals->UrgentBurstFactorLuma[k] @@ -4895,7 +4902,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.TotImmediateFlipBytes = 0.0; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { mode_lib->vba.TotImmediateFlipBytes = mode_lib->vba.TotImmediateFlipBytes - + locals->PDEAndMetaPTEBytesPerFrame[k] + locals->MetaRowBytes[k] + locals->DPTEBytesPerRow[k]; + + locals->PDEAndMetaPTEBytesPerFrame[0][0][k] + locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k]; } for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { @@ -4910,9 +4917,9 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.HostVMMaxPageTableLevels, mode_lib->vba.HostVMCachedPageTableLevels, mode_lib->vba.GPUVMEnable, - locals->PDEAndMetaPTEBytesPerFrame[k], - locals->MetaRowBytes[k], - locals->DPTEBytesPerRow[k], + locals->PDEAndMetaPTEBytesPerFrame[0][0][k], + locals->MetaRowBytes[0][0][k], + locals->DPTEBytesPerRow[0][0][k], mode_lib->vba.BandwidthAvailableForImmediateFlip, mode_lib->vba.TotImmediateFlipBytes, mode_lib->vba.SourcePixelFormat[k], @@ -4943,7 +4950,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } locals->ImmediateFlipSupportedForState[i][j] = true; if (mode_lib->vba.total_dcn_read_bw_with_flip - > locals->ReturnBWPerState[i]) { + > locals->ReturnBWPerState[i][0]) { locals->ImmediateFlipSupportedForState[i][j] = false; } for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { @@ -4970,7 +4977,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.WritebackInterfaceChromaBufferSize, mode_lib->vba.DCFCLKPerState[i], mode_lib->vba.UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels, - locals->ReturnBWPerState[i], + locals->ReturnBWPerState[i][0], mode_lib->vba.GPUVMEnable, locals->dpte_group_bytes, mode_lib->vba.MetaChunkSize, @@ -4982,7 +4989,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.DRAMClockChangeLatency, mode_lib->vba.SRExitTime, mode_lib->vba.SREnterPlusExitTime, - mode_lib->vba.ProjectedDCFCLKDeepSleep, + mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], locals->NoOfDPPThisState, mode_lib->vba.DCCEnable, locals->RequiredDPPCLKThisState, @@ -5025,8 +5032,8 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l MaxTotalVActiveRDBandwidth = MaxTotalVActiveRDBandwidth + locals->ReadBandwidth[k]; } for (i = 0; i <= mode_lib->vba.soc.num_states; ++i) { - locals->MaxTotalVerticalActiveAvailableBandwidth[i] = dml_min( - locals->IdealSDPPortBandwidthPerState[i] * + locals->MaxTotalVerticalActiveAvailableBandwidth[i][0] = dml_min( + locals->IdealSDPPortBandwidthPerState[i][0] * mode_lib->vba.MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation / 100.0, mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels * @@ -5034,10 +5041,10 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.MaxAveragePercentOfIdealDRAMBWDisplayCanUseInNormalSystemOperation / 100.0); - if (MaxTotalVActiveRDBandwidth <= locals->MaxTotalVerticalActiveAvailableBandwidth[i]) { - locals->TotalVerticalActiveBandwidthSupport[i] = true; + if (MaxTotalVActiveRDBandwidth <= locals->MaxTotalVerticalActiveAvailableBandwidth[i][0]) { + locals->TotalVerticalActiveBandwidthSupport[i][0] = true; } else { - locals->TotalVerticalActiveBandwidthSupport[i] = false; + locals->TotalVerticalActiveBandwidthSupport[i][0] = false; } } } @@ -5116,7 +5123,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l status = DML_FAIL_SCALE_RATIO_TAP; } else if (mode_lib->vba.SourceFormatPixelAndScanSupport != true) { status = DML_FAIL_SOURCE_PIXEL_FORMAT; - } else if (locals->ViewportSizeSupport[i] != true) { + } else if (locals->ViewportSizeSupport[i][0] != true) { status = DML_FAIL_VIEWPORT_SIZE; } else if (locals->DIOSupport[i] != true) { status = DML_FAIL_DIO_SUPPORT; @@ -5124,7 +5131,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l status = DML_FAIL_NOT_ENOUGH_DSC; } else if (locals->DSCCLKRequiredMoreThanSupported[i] != false) { status = DML_FAIL_DSC_CLK_REQUIRED; - } else if (locals->ROBSupport[i] != true) { + } else if (locals->ROBSupport[i][0] != true) { status = DML_FAIL_REORDERING_BUFFER; } else if (locals->DISPCLK_DPPCLK_Support[i][j] != true) { status = DML_FAIL_DISPCLK_DPPCLK; @@ -5142,7 +5149,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l status = DML_FAIL_CURSOR_SUPPORT; } else if (mode_lib->vba.PitchSupport != true) { status = DML_FAIL_PITCH_SUPPORT; - } else if (locals->TotalVerticalActiveBandwidthSupport[i] != true) { + } else if (locals->TotalVerticalActiveBandwidthSupport[i][0] != true) { status = DML_FAIL_TOTAL_V_ACTIVE_BW; } else if (locals->PTEBufferSizeNotExceeded[i][j] != true) { status = DML_FAIL_PTE_BUFFER_SIZE; @@ -5198,13 +5205,13 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.DRAMSpeed = mode_lib->vba.DRAMSpeedPerState[mode_lib->vba.VoltageLevel]; mode_lib->vba.FabricClock = mode_lib->vba.FabricClockPerState[mode_lib->vba.VoltageLevel]; mode_lib->vba.SOCCLK = mode_lib->vba.SOCCLKPerState[mode_lib->vba.VoltageLevel]; - mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel]; + mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel][0]; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (mode_lib->vba.BlendingAndTiming[k] == k) { mode_lib->vba.ODMCombineEnabled[k] = locals->ODMCombineEnablePerState[mode_lib->vba.VoltageLevel][k]; } else { - mode_lib->vba.ODMCombineEnabled[k] = 0; + mode_lib->vba.ODMCombineEnabled[k] = false; } mode_lib->vba.DSCEnabled[k] = locals->RequiresDSC[mode_lib->vba.VoltageLevel][k]; @@ -5227,7 +5234,7 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport( double UrgentOutOfOrderReturn, double ReturnBW, bool GPUVMEnable, - long dpte_group_bytes[], + int dpte_group_bytes[], unsigned int MetaChunkSize, double UrgentLatency, double ExtraLatency, @@ -5241,13 +5248,13 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport( int DPPPerPlane[], bool DCCEnable[], double DPPCLK[], - unsigned int SwathWidthSingleDPPY[], + double SwathWidthSingleDPPY[], unsigned int SwathHeightY[], double ReadBandwidthPlaneLuma[], unsigned int SwathHeightC[], double ReadBandwidthPlaneChroma[], unsigned int LBBitPerPixel[], - unsigned int SwathWidthY[], + double SwathWidthY[], double HRatio[], unsigned int vtaps[], unsigned int VTAPsChroma[], @@ -5503,7 +5510,7 @@ static void CalculateDCFCLKDeepSleep( double BytePerPixelDETY[], double BytePerPixelDETC[], double VRatio[], - unsigned int SwathWidthY[], + double SwathWidthY[], int DPPPerPlane[], double HRatio[], double PixelClock[], @@ -5831,7 +5838,7 @@ static void CalculateMetaAndPTETimes( unsigned int meta_row_height[], unsigned int meta_req_width[], unsigned int meta_req_height[], - long dpte_group_bytes[], + int dpte_group_bytes[], unsigned int PTERequestSizeY[], unsigned int PTERequestSizeC[], unsigned int PixelPTEReqWidthY[], @@ -6087,7 +6094,7 @@ static double CalculateExtraLatency( bool HostVMEnable, int NumberOfActivePlanes, int NumberOfDPP[], - long dpte_group_bytes[], + int dpte_group_bytes[], double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData, double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly, int HostVMMaxPageTableLevels, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c index e60af383b4db92408d44bcb3630d21be66a2f63e..a38baa73d4841af90d8ea5381ba4fc9ef7d22861 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c @@ -82,10 +82,10 @@ static unsigned int get_bytes_per_element(enum source_format_class source_format static bool is_dual_plane(enum source_format_class source_format) { - bool ret_val = 0; + bool ret_val = false; if ((source_format == dm_420_8) || (source_format == dm_420_10)) - ret_val = 1; + ret_val = true; return ret_val; } @@ -222,8 +222,8 @@ static void handle_det_buf_split( unsigned int swath_bytes_c = 0; unsigned int full_swath_bytes_packed_l = 0; unsigned int full_swath_bytes_packed_c = 0; - bool req128_l = 0; - bool req128_c = 0; + bool req128_l = false; + bool req128_c = false; bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear); bool surf_vert = (pipe_src_param.source_scan == dm_vert); unsigned int log2_swath_height_l = 0; @@ -248,13 +248,13 @@ static void handle_det_buf_split( total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c; if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request - req128_l = 0; - req128_c = 0; + req128_l = false; + req128_c = false; swath_bytes_l = full_swath_bytes_packed_l; swath_bytes_c = full_swath_bytes_packed_c; } else { //128b request (for luma only for yuv420 8bpc) - req128_l = 1; - req128_c = 0; + req128_l = true; + req128_c = false; swath_bytes_l = full_swath_bytes_packed_l / 2; swath_bytes_c = full_swath_bytes_packed_c; } @@ -264,9 +264,9 @@ static void handle_det_buf_split( total_swath_bytes = 2 * full_swath_bytes_packed_l; if (total_swath_bytes <= detile_buf_size_in_bytes) - req128_l = 0; + req128_l = false; else - req128_l = 1; + req128_l = true; swath_bytes_l = total_swath_bytes; swath_bytes_c = 0; @@ -679,7 +679,7 @@ static void get_surf_rq_param( const display_pipe_params_st pipe_param, bool is_chroma) { - bool mode_422 = 0; + bool mode_422 = false; unsigned int vp_width = 0; unsigned int vp_height = 0; unsigned int data_pitch = 0; @@ -1010,7 +1010,7 @@ static void dml_rq_dlg_get_dlg_params( // Source // dcc_en = src.dcc; dual_plane = is_dual_plane((enum source_format_class) (src->source_format)); - mode_422 = 0; // FIXME + mode_422 = false; // FIXME access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed // bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0); // bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1); diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h index 55d4cb23a073a141ebe72d70a31471358ee213ac..bfc2f39bd1efa1a75a5e51ee596ebcdaa478c545 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h @@ -85,7 +85,7 @@ enum dm_swizzle_mode { dm_sw_var_s_x = 29, dm_sw_var_d_x = 30, dm_sw_64kb_r_x, - dm_sw_gfx7_2d_thin_lvp, + dm_sw_gfx7_2d_thin_l_vp, dm_sw_gfx7_2d_thin_gl, }; enum lb_depth { @@ -119,6 +119,10 @@ enum mpc_combine_affinity { dm_mpc_never }; +enum RequestType { + REQ_256Bytes, REQ_128BytesNonContiguous, REQ_128BytesContiguous, REQ_NA +}; + enum self_refresh_affinity { dm_try_to_allow_self_refresh_and_mclk_switch, dm_allow_self_refresh_and_mclk_switch, @@ -165,4 +169,16 @@ enum odm_combine_mode { dm_odm_combine_mode_4to1, }; +enum odm_combine_policy { + dm_odm_combine_policy_dal, + dm_odm_combine_policy_none, + dm_odm_combine_policy_2to1, + dm_odm_combine_policy_4to1, +}; + +enum immediate_flip_requirement { + dm_immediate_flip_not_required, + dm_immediate_flip_required, +}; + #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h index 220d5e610f1f5812186040c4965ade41229de988..658f81e757e937dd59234759df1afdda1e4ff45b 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h @@ -63,6 +63,7 @@ struct _vcs_dpi_voltage_scaling_st { double dispclk_mhz; double phyclk_mhz; double dppclk_mhz; + double dtbclk_mhz; }; struct _vcs_dpi_soc_bounding_box_st { @@ -214,6 +215,7 @@ struct _vcs_dpi_display_pipe_source_params_st { int source_format; unsigned char dcc; unsigned int dcc_rate; + unsigned int dcc_rate_chroma; unsigned char dcc_use_global; unsigned char vm; bool gpuvm; // gpuvm enabled @@ -225,7 +227,10 @@ struct _vcs_dpi_display_pipe_source_params_st { int source_scan; int sw_mode; int macro_tile_size; + unsigned int surface_width_y; unsigned int surface_height_y; + unsigned int surface_width_c; + unsigned int surface_height_c; unsigned int viewport_width; unsigned int viewport_height; unsigned int viewport_y_y; @@ -278,6 +283,7 @@ struct _vcs_dpi_display_output_params_st { int output_type; int output_format; int dsc_slices; + int max_audio_sample_rate; struct writeback_st wb; }; @@ -323,7 +329,7 @@ struct _vcs_dpi_display_pipe_dest_params_st { double pixel_rate_mhz; unsigned char synchronized_vblank_all_planes; unsigned char otg_inst; - unsigned char odm_combine; + unsigned int odm_combine; unsigned char use_maximum_vstartup; unsigned int vtotal_max; unsigned int vtotal_min; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c index 15b72a8b5174f5759eb892bbb5d04fdfd9371373..b3c96d9b472f043321afe0ec082192499e12c271 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c @@ -264,7 +264,10 @@ static void fetch_socbb_params(struct display_mode_lib *mode_lib) mode_lib->vba.DRAMSpeedPerState[i] = soc->clock_limits[i].dram_speed_mts; //mode_lib->vba.DRAMSpeedPerState[i] = soc->clock_limits[i].dram_speed_mhz; mode_lib->vba.MaxDispclk[i] = soc->clock_limits[i].dispclk_mhz; + mode_lib->vba.DTBCLKPerState[i] = soc->clock_limits[i].dtbclk_mhz; } + mode_lib->vba.MinVoltageLevel = 0; + mode_lib->vba.MaxVoltageLevel = mode_lib->vba.soc.num_states; mode_lib->vba.DoUrgentLatencyAdjustment = soc->do_urgent_latency_adjustment; @@ -306,8 +309,6 @@ static void fetch_ip_params(struct display_mode_lib *mode_lib) mode_lib->vba.WritebackInterfaceBufferSize = ip->writeback_interface_buffer_size_kbytes; mode_lib->vba.WritebackLineBufferSize = ip->writeback_line_buffer_buffer_size; - mode_lib->vba.MinVoltageLevel = 0; - mode_lib->vba.MaxVoltageLevel = 5; mode_lib->vba.WritebackChromaLineBufferWidth = ip->writeback_chroma_line_buffer_width_pixels; @@ -423,8 +424,8 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) ip->dcc_supported : src->dcc && ip->dcc_supported; mode_lib->vba.DCCRate[mode_lib->vba.NumberOfActivePlanes] = src->dcc_rate; /* TODO: Needs to be set based on src->dcc_rate_luma/chroma */ - mode_lib->vba.DCCRateLuma[mode_lib->vba.NumberOfActivePlanes] = 0; - mode_lib->vba.DCCRateChroma[mode_lib->vba.NumberOfActivePlanes] = 0; + mode_lib->vba.DCCRateLuma[mode_lib->vba.NumberOfActivePlanes] = src->dcc_rate; + mode_lib->vba.DCCRateChroma[mode_lib->vba.NumberOfActivePlanes] = src->dcc_rate_chroma; mode_lib->vba.SourcePixelFormat[mode_lib->vba.NumberOfActivePlanes] = (enum source_format_class) (src->source_format); @@ -436,8 +437,6 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) dst->recout_width; // TODO: or should this be full_recout_width???...maybe only when in hsplit mode? mode_lib->vba.ODMCombineEnabled[mode_lib->vba.NumberOfActivePlanes] = dst->odm_combine; - mode_lib->vba.ODMCombineTypeEnabled[mode_lib->vba.NumberOfActivePlanes] = - dst->odm_combine; mode_lib->vba.OutputFormat[mode_lib->vba.NumberOfActivePlanes] = (enum output_format_class) (dout->output_format); mode_lib->vba.OutputBpp[mode_lib->vba.NumberOfActivePlanes] = @@ -454,7 +453,7 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) dout->dp_lanes; /* TODO: Needs to be set based on dout->audio.audio_sample_rate_khz/sample_layout */ mode_lib->vba.AudioSampleRate[mode_lib->vba.NumberOfActivePlanes] = - 44.1 * 1000; + dout->max_audio_sample_rate; mode_lib->vba.AudioSampleLayout[mode_lib->vba.NumberOfActivePlanes] = 1; mode_lib->vba.DRAMClockChangeLatencyOverride = 0.0; @@ -590,6 +589,7 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) for (k = j + 1; k < mode_lib->vba.cache_num_pipes; ++k) { display_pipe_source_params_st *src_k = &pipes[k].pipe.src; display_pipe_dest_params_st *dst_k = &pipes[k].pipe.dest; + display_output_params_st *dout_k = &pipes[j].dout; if (src_k->is_hsplit && !visited[k] && src->hsplit_grp == src_k->hsplit_grp) { @@ -600,12 +600,18 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) == dm_horz) { mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] += src_k->viewport_width; + mode_lib->vba.ViewportWidthChroma[mode_lib->vba.NumberOfActivePlanes] += + src_k->viewport_width; mode_lib->vba.ScalerRecoutWidth[mode_lib->vba.NumberOfActivePlanes] += dst_k->recout_width; } else { mode_lib->vba.ViewportHeight[mode_lib->vba.NumberOfActivePlanes] += src_k->viewport_height; + mode_lib->vba.ViewportHeightChroma[mode_lib->vba.NumberOfActivePlanes] += + src_k->viewport_height; } + mode_lib->vba.NumberOfDSCSlices[mode_lib->vba.NumberOfActivePlanes] += + dout_k->dsc_slices; visited[k] = true; } @@ -811,7 +817,9 @@ void ModeSupportAndSystemConfiguration(struct display_mode_lib *mode_lib) unsigned int total_pipes = 0; mode_lib->vba.VoltageLevel = mode_lib->vba.cache_pipes[0].clks_cfg.voltage; - mode_lib->vba.ReturnBW = mode_lib->vba.ReturnBWPerState[mode_lib->vba.VoltageLevel]; + mode_lib->vba.ReturnBW = mode_lib->vba.ReturnBWPerState[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb]; + if (mode_lib->vba.ReturnBW == 0) + mode_lib->vba.ReturnBW = mode_lib->vba.ReturnBWPerState[mode_lib->vba.VoltageLevel][0]; mode_lib->vba.FabricAndDRAMBandwidth = mode_lib->vba.FabricAndDRAMBandwidthPerState[mode_lib->vba.VoltageLevel]; fetch_socbb_params(mode_lib); diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h index 3eb657ed57149641bef11179e81843c8f2e959e0..2875efd854673fa7fbbe2b3a21aaeecece1f010d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h @@ -157,6 +157,7 @@ struct vba_vars_st { bool DummyPStateCheck; bool DRAMClockChangeSupportsVActive; bool PrefetchModeSupported; + bool PrefetchAndImmediateFlipSupported; enum self_refresh_affinity AllowDRAMSelfRefreshOrDRAMClockChangeInVblank; // Mode Support only double XFCRemoteSurfaceFlipDelay; double TInitXFill; @@ -318,8 +319,7 @@ struct vba_vars_st { unsigned int DynamicMetadataTransmittedBytes[DC__NUM_DPP__MAX]; double DCCRate[DC__NUM_DPP__MAX]; double AverageDCCCompressionRate; - bool ODMCombineEnabled[DC__NUM_DPP__MAX]; - enum odm_combine_mode ODMCombineTypeEnabled[DC__NUM_DPP__MAX]; + enum odm_combine_mode ODMCombineEnabled[DC__NUM_DPP__MAX]; double OutputBpp[DC__NUM_DPP__MAX]; bool DSCEnabled[DC__NUM_DPP__MAX]; unsigned int DSCInputBitPerComponent[DC__NUM_DPP__MAX]; @@ -347,6 +347,7 @@ struct vba_vars_st { unsigned int EffectiveLBLatencyHidingSourceLinesChroma; double BandwidthAvailableForImmediateFlip; unsigned int PrefetchMode[DC__VOLTAGE_STATES + 1][2]; + unsigned int PrefetchModePerState[DC__VOLTAGE_STATES + 1][2]; unsigned int MinPrefetchMode; unsigned int MaxPrefetchMode; bool AnyLinesForVMOrRowTooLarge; @@ -396,6 +397,7 @@ struct vba_vars_st { bool WritebackLumaAndChromaScalingSupported; bool Cursor64BppSupport; double DCFCLKPerState[DC__VOLTAGE_STATES + 1]; + double DCFCLKState[DC__VOLTAGE_STATES + 1][2]; double FabricClockPerState[DC__VOLTAGE_STATES + 1]; double SOCCLKPerState[DC__VOLTAGE_STATES + 1]; double PHYCLKPerState[DC__VOLTAGE_STATES + 1]; @@ -444,7 +446,7 @@ struct vba_vars_st { double OutputLinkDPLanes[DC__NUM_DPP__MAX]; double ForcedOutputLinkBPP[DC__NUM_DPP__MAX]; // Mode Support only double ImmediateFlipBW[DC__NUM_DPP__MAX]; - double MaxMaxVStartup; + double MaxMaxVStartup[DC__VOLTAGE_STATES + 1][2]; double WritebackLumaVExtra; double WritebackChromaVExtra; @@ -471,7 +473,7 @@ struct vba_vars_st { double RoundedUpMaxSwathSizeBytesC; double EffectiveDETLBLinesLuma; double EffectiveDETLBLinesChroma; - double ProjectedDCFCLKDeepSleep; + double ProjectedDCFCLKDeepSleep[DC__VOLTAGE_STATES + 1][2]; double PDEAndMetaPTEBytesPerFrameY; double PDEAndMetaPTEBytesPerFrameC; unsigned int MetaRowBytesY; @@ -489,12 +491,11 @@ struct vba_vars_st { double FractionOfUrgentBandwidthImmediateFlip; // Mode Support debugging output /* ms locals */ - double IdealSDPPortBandwidthPerState[DC__VOLTAGE_STATES + 1]; + double IdealSDPPortBandwidthPerState[DC__VOLTAGE_STATES + 1][2]; unsigned int NoOfDPP[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; int NoOfDPPThisState[DC__NUM_DPP__MAX]; - bool ODMCombineEnablePerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX]; - enum odm_combine_mode ODMCombineTypeEnablePerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX]; - unsigned int SwathWidthYThisState[DC__NUM_DPP__MAX]; + enum odm_combine_mode ODMCombineEnablePerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX]; + double SwathWidthYThisState[DC__NUM_DPP__MAX]; unsigned int SwathHeightCPerState[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; unsigned int SwathHeightYThisState[DC__NUM_DPP__MAX]; unsigned int SwathHeightCThisState[DC__NUM_DPP__MAX]; @@ -506,7 +507,7 @@ struct vba_vars_st { double RequiredDPPCLKThisState[DC__NUM_DPP__MAX]; bool PTEBufferSizeNotExceededY[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; bool PTEBufferSizeNotExceededC[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; - bool BandwidthWithoutPrefetchSupported[DC__VOLTAGE_STATES + 1]; + bool BandwidthWithoutPrefetchSupported[DC__VOLTAGE_STATES + 1][2]; bool PrefetchSupported[DC__VOLTAGE_STATES + 1][2]; bool VRatioInPrefetchSupported[DC__VOLTAGE_STATES + 1][2]; double RequiredDISPCLK[DC__VOLTAGE_STATES + 1][2]; @@ -515,22 +516,22 @@ struct vba_vars_st { unsigned int TotalNumberOfActiveDPP[DC__VOLTAGE_STATES + 1][2]; unsigned int TotalNumberOfDCCActiveDPP[DC__VOLTAGE_STATES + 1][2]; bool ModeSupport[DC__VOLTAGE_STATES + 1][2]; - double ReturnBWPerState[DC__VOLTAGE_STATES + 1]; + double ReturnBWPerState[DC__VOLTAGE_STATES + 1][2]; bool DIOSupport[DC__VOLTAGE_STATES + 1]; bool NotEnoughDSCUnits[DC__VOLTAGE_STATES + 1]; bool DSCCLKRequiredMoreThanSupported[DC__VOLTAGE_STATES + 1]; bool DTBCLKRequiredMoreThanSupported[DC__VOLTAGE_STATES + 1]; double UrgentRoundTripAndOutOfOrderLatencyPerState[DC__VOLTAGE_STATES + 1]; - bool ROBSupport[DC__VOLTAGE_STATES + 1]; + bool ROBSupport[DC__VOLTAGE_STATES + 1][2]; bool PTEBufferSizeNotExceeded[DC__VOLTAGE_STATES + 1][2]; - bool TotalVerticalActiveBandwidthSupport[DC__VOLTAGE_STATES + 1]; - double MaxTotalVerticalActiveAvailableBandwidth[DC__VOLTAGE_STATES + 1]; + bool TotalVerticalActiveBandwidthSupport[DC__VOLTAGE_STATES + 1][2]; + double MaxTotalVerticalActiveAvailableBandwidth[DC__VOLTAGE_STATES + 1][2]; double PrefetchBW[DC__NUM_DPP__MAX]; - double PDEAndMetaPTEBytesPerFrame[DC__NUM_DPP__MAX]; - double MetaRowBytes[DC__NUM_DPP__MAX]; - double DPTEBytesPerRow[DC__NUM_DPP__MAX]; - double PrefetchLinesY[DC__NUM_DPP__MAX]; - double PrefetchLinesC[DC__NUM_DPP__MAX]; + double PDEAndMetaPTEBytesPerFrame[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; + double MetaRowBytes[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; + double DPTEBytesPerRow[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; + double PrefetchLinesY[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; + double PrefetchLinesC[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; unsigned int MaxNumSwY[DC__NUM_DPP__MAX]; unsigned int MaxNumSwC[DC__NUM_DPP__MAX]; double PrefillY[DC__NUM_DPP__MAX]; @@ -539,7 +540,7 @@ struct vba_vars_st { double LinesForMetaPTE[DC__NUM_DPP__MAX]; double LinesForMetaAndDPTERow[DC__NUM_DPP__MAX]; double MinDPPCLKUsingSingleDPP[DC__NUM_DPP__MAX]; - unsigned int SwathWidthYSingleDPP[DC__NUM_DPP__MAX]; + double SwathWidthYSingleDPP[DC__NUM_DPP__MAX]; double BytePerPixelInDETY[DC__NUM_DPP__MAX]; double BytePerPixelInDETC[DC__NUM_DPP__MAX]; bool RequiresDSC[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX]; @@ -547,7 +548,7 @@ struct vba_vars_st { double RequiresFEC[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX]; double OutputBppPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX]; double DSCDelayPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX]; - bool ViewportSizeSupport[DC__VOLTAGE_STATES + 1]; + bool ViewportSizeSupport[DC__VOLTAGE_STATES + 1][2]; unsigned int Read256BlockHeightY[DC__NUM_DPP__MAX]; unsigned int Read256BlockWidthY[DC__NUM_DPP__MAX]; unsigned int Read256BlockHeightC[DC__NUM_DPP__MAX]; @@ -562,7 +563,7 @@ struct vba_vars_st { double WriteBandwidth[DC__NUM_DPP__MAX]; double PSCL_FACTOR[DC__NUM_DPP__MAX]; double PSCL_FACTOR_CHROMA[DC__NUM_DPP__MAX]; - double MaximumVStartup[DC__NUM_DPP__MAX]; + double MaximumVStartup[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; unsigned int MacroTileWidthY[DC__NUM_DPP__MAX]; unsigned int MacroTileWidthC[DC__NUM_DPP__MAX]; double AlignedDCCMetaPitch[DC__NUM_DPP__MAX]; @@ -579,7 +580,7 @@ struct vba_vars_st { bool ImmediateFlipSupportedForState[DC__VOLTAGE_STATES + 1][2]; double WritebackDelay[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX]; unsigned int vm_group_bytes[DC__NUM_DPP__MAX]; - long dpte_group_bytes[DC__NUM_DPP__MAX]; + unsigned int dpte_group_bytes[DC__NUM_DPP__MAX]; unsigned int dpte_row_height[DC__NUM_DPP__MAX]; unsigned int meta_req_height[DC__NUM_DPP__MAX]; unsigned int meta_req_width[DC__NUM_DPP__MAX]; @@ -605,14 +606,14 @@ struct vba_vars_st { double UrgentBurstFactorChroma[DC__NUM_DPP__MAX]; double UrgentBurstFactorChromaPre[DC__NUM_DPP__MAX]; + bool MPCCombine[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; double SwathWidthCSingleDPP[DC__NUM_DPP__MAX]; double MaximumSwathWidthInLineBufferLuma; double MaximumSwathWidthInLineBufferChroma; double MaximumSwathWidthLuma[DC__NUM_DPP__MAX]; double MaximumSwathWidthChroma[DC__NUM_DPP__MAX]; - bool odm_combine_dummy[DC__NUM_DPP__MAX]; - enum odm_combine_mode odm_combine_mode_dummy[DC__NUM_DPP__MAX]; + enum odm_combine_mode odm_combine_dummy[DC__NUM_DPP__MAX]; double dummy1[DC__NUM_DPP__MAX]; double dummy2[DC__NUM_DPP__MAX]; double dummy3[DC__NUM_DPP__MAX]; @@ -622,9 +623,9 @@ struct vba_vars_st { double dummy7[DC__NUM_DPP__MAX]; double dummy8[DC__NUM_DPP__MAX]; unsigned int dummyinteger1ms[DC__NUM_DPP__MAX]; - unsigned int dummyinteger2ms[DC__NUM_DPP__MAX]; + double dummyinteger2ms[DC__NUM_DPP__MAX]; unsigned int dummyinteger3[DC__NUM_DPP__MAX]; - unsigned int dummyinteger4; + unsigned int dummyinteger4[DC__NUM_DPP__MAX]; unsigned int dummyinteger5; unsigned int dummyinteger6; unsigned int dummyinteger7; @@ -637,7 +638,6 @@ struct vba_vars_st { unsigned int dummyintegerarr2[DC__NUM_DPP__MAX]; unsigned int dummyintegerarr3[DC__NUM_DPP__MAX]; unsigned int dummyintegerarr4[DC__NUM_DPP__MAX]; - long dummylongarr1[DC__NUM_DPP__MAX]; bool dummysinglestring; bool SingleDPPViewportSizeSupportPerPlane[DC__NUM_DPP__MAX]; double PlaneRequiredDISPCLKWithODMCombine2To1; @@ -645,20 +645,19 @@ struct vba_vars_st { unsigned int TotalNumberOfSingleDPPPlanes[DC__VOLTAGE_STATES + 1][2]; bool LinkDSCEnable; bool ODMCombine4To1SupportCheckOK[DC__VOLTAGE_STATES + 1]; - bool ODMCombineEnableThisState[DC__NUM_DPP__MAX]; - enum odm_combine_mode ODMCombineEnableTypeThisState[DC__NUM_DPP__MAX]; - unsigned int SwathWidthCThisState[DC__NUM_DPP__MAX]; + enum odm_combine_mode ODMCombineEnableThisState[DC__NUM_DPP__MAX]; + double SwathWidthCThisState[DC__NUM_DPP__MAX]; bool ViewportSizeSupportPerPlane[DC__NUM_DPP__MAX]; double AlignedDCCMetaPitchY[DC__NUM_DPP__MAX]; double AlignedDCCMetaPitchC[DC__NUM_DPP__MAX]; unsigned int NotEnoughUrgentLatencyHiding; unsigned int NotEnoughUrgentLatencyHidingPre; - long PTEBufferSizeInRequestsForLuma; - long PTEBufferSizeInRequestsForChroma; + int PTEBufferSizeInRequestsForLuma; + int PTEBufferSizeInRequestsForChroma; // Missing from VBA - long dpte_group_bytes_chroma; + int dpte_group_bytes_chroma; unsigned int vm_group_bytes_chroma; double dst_x_after_scaler; double dst_y_after_scaler; @@ -683,8 +682,8 @@ struct vba_vars_st { double MinTTUVBlank[DC__NUM_DPP__MAX]; double BytePerPixelDETY[DC__NUM_DPP__MAX]; double BytePerPixelDETC[DC__NUM_DPP__MAX]; - unsigned int SwathWidthY[DC__NUM_DPP__MAX]; - unsigned int SwathWidthSingleDPPY[DC__NUM_DPP__MAX]; + double SwathWidthY[DC__NUM_DPP__MAX]; + double SwathWidthSingleDPPY[DC__NUM_DPP__MAX]; double CursorRequestDeliveryTime[DC__NUM_DPP__MAX]; double CursorRequestDeliveryTimePrefetch[DC__NUM_DPP__MAX]; double ReadBandwidthPlaneLuma[DC__NUM_DPP__MAX]; @@ -760,12 +759,12 @@ struct vba_vars_st { double LinesInDETY[DC__NUM_DPP__MAX]; double LinesInDETYRoundedDownToSwath[DC__NUM_DPP__MAX]; - unsigned int SwathWidthSingleDPPC[DC__NUM_DPP__MAX]; - unsigned int SwathWidthC[DC__NUM_DPP__MAX]; + double SwathWidthSingleDPPC[DC__NUM_DPP__MAX]; + double SwathWidthC[DC__NUM_DPP__MAX]; unsigned int BytePerPixelY[DC__NUM_DPP__MAX]; unsigned int BytePerPixelC[DC__NUM_DPP__MAX]; - long dummyinteger1; - long dummyinteger2; + unsigned int dummyinteger1; + unsigned int dummyinteger2; double FinalDRAMClockChangeLatency; double Tdmdl_vm[DC__NUM_DPP__MAX]; double Tdmdl[DC__NUM_DPP__MAX]; @@ -779,6 +778,7 @@ struct vba_vars_st { unsigned int DCCCMaxCompressedBlock[DC__NUM_DPP__MAX]; unsigned int DCCCIndependent64ByteBlock[DC__NUM_DPP__MAX]; double VStartupMargin; + bool NotEnoughTimeForDynamicMetadata; /* Missing from VBA */ unsigned int MaximumMaxVStartupLines; @@ -814,7 +814,7 @@ struct vba_vars_st { unsigned int ViewportHeightChroma[DC__NUM_DPP__MAX]; double HRatioChroma[DC__NUM_DPP__MAX]; double VRatioChroma[DC__NUM_DPP__MAX]; - long WritebackSourceWidth[DC__NUM_DPP__MAX]; + int WritebackSourceWidth[DC__NUM_DPP__MAX]; bool ModeIsSupported; bool ODMCombine4To1Supported; @@ -850,6 +850,58 @@ struct vba_vars_st { unsigned int MaxNumHDMIFRLOutputs; int AudioSampleRate[DC__NUM_DPP__MAX]; int AudioSampleLayout[DC__NUM_DPP__MAX]; + + int PercentMarginOverMinimumRequiredDCFCLK; + bool DynamicMetadataSupported[DC__VOLTAGE_STATES + 1][2]; + enum immediate_flip_requirement ImmediateFlipRequirement; + double DETBufferSizeYThisState[DC__NUM_DPP__MAX]; + double DETBufferSizeCThisState[DC__NUM_DPP__MAX]; + bool NoUrgentLatencyHiding[DC__NUM_DPP__MAX]; + bool NoUrgentLatencyHidingPre[DC__NUM_DPP__MAX]; + int swath_width_luma_ub_this_state[DC__NUM_DPP__MAX]; + int swath_width_chroma_ub_this_state[DC__NUM_DPP__MAX]; + double UrgLatency[DC__VOLTAGE_STATES + 1]; + double VActiveCursorBandwidth[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; + double VActivePixelBandwidth[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; + bool NoTimeForPrefetch[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; + bool NoTimeForDynamicMetadata[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; + double dpte_row_bandwidth[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; + double meta_row_bandwidth[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; + double DETBufferSizeYAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; + double DETBufferSizeCAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; + int swath_width_luma_ub_all_states[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; + int swath_width_chroma_ub_all_states[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; + bool NotUrgentLatencyHiding[DC__VOLTAGE_STATES + 1][2]; + unsigned int SwathHeightYAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; + unsigned int SwathHeightCAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; + unsigned int SwathWidthYAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; + unsigned int SwathWidthCAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX]; + double TotalDPTERowBandwidth[DC__VOLTAGE_STATES + 1][2]; + double TotalMetaRowBandwidth[DC__VOLTAGE_STATES + 1][2]; + double TotalVActiveCursorBandwidth[DC__VOLTAGE_STATES + 1][2]; + double TotalVActivePixelBandwidth[DC__VOLTAGE_STATES + 1][2]; + bool UseMinimumRequiredDCFCLK; + double WritebackDelayTime[DC__NUM_DPP__MAX]; + unsigned int DCCYIndependentBlock[DC__NUM_DPP__MAX]; + unsigned int DCCCIndependentBlock[DC__NUM_DPP__MAX]; + unsigned int dummyinteger15; + unsigned int dummyinteger16; + unsigned int dummyinteger17; + unsigned int dummyinteger18; + unsigned int dummyinteger19; + unsigned int dummyinteger20; + unsigned int dummyinteger21; + unsigned int dummyinteger22; + unsigned int dummyinteger23; + unsigned int dummyinteger24; + unsigned int dummyinteger25; + unsigned int dummyinteger26; + unsigned int dummyinteger27; + unsigned int dummyinteger28; + unsigned int dummyinteger29; + bool dummystring[DC__NUM_DPP__MAX]; + double BPP; + enum odm_combine_policy ODMCombinePolicy; }; bool CalculateMinAndMaxPrefetchMode( diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c index b953b02a151214b5adcb945c38dcd911944109ad..723af0b2dda04ba385e6c6c993ae49e14b076cad 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c @@ -24,7 +24,7 @@ */ #include "dml_common_defs.h" -#include "../calcs/dcn_calc_math.h" +#include "dcn_calc_math.h" #include "dml_inline_defs.h" diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h index eca140da13d8296fadc3aa3314572c0152e50fbf..ded71ea82413df3ef8b537099eb0860e270a8485 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h @@ -27,7 +27,7 @@ #define __DML_INLINE_DEFS_H__ #include "dml_common_defs.h" -#include "../calcs/dcn_calc_math.h" +#include "dcn_calc_math.h" #include "dml_logger.h" static inline double dml_min(double a, double b) diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile index 641ffb7cfaed4dbc739a707176e07baa08b65268..3f66868df171f2aecb39daf2d8a5ab73c8d706a4 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile @@ -2,7 +2,13 @@ # # Makefile for the 'dsc' sub-component of DAL. +ifdef CONFIG_X86 dsc_ccflags := -mhard-float -msse +endif + +ifdef CONFIG_PPC64 +dsc_ccflags := -mhard-float -maltivec +endif ifdef CONFIG_CC_IS_GCC ifeq ($(call cc-ifversion, -lt, 0701, y), y) @@ -10,6 +16,7 @@ IS_OLD_GCC = 1 endif endif +ifdef CONFIG_X86 ifdef IS_OLD_GCC # Stack alignment mismatch, proceed with caution. # GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 @@ -18,6 +25,7 @@ dsc_ccflags += -mpreferred-stack-boundary=4 else dsc_ccflags += -msse2 endif +endif CFLAGS_$(AMDDALPATH)/dc/dsc/rc_calc.o := $(dsc_ccflags) CFLAGS_$(AMDDALPATH)/dc/dsc/rc_calc_dpi.o := $(dsc_ccflags) diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index d2423ad1fac2ef228858d39ad9ae266a62f7926d..87d682d25278ab4a8f5f41e9a586112299006f3c 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -29,6 +29,9 @@ /* This module's internal functions */ +/* default DSC policy target bitrate limit is 16bpp */ +static uint32_t dsc_policy_max_target_bpp_limit = 16; + static uint32_t dc_dsc_bandwidth_in_kbps_from_timing( const struct dc_crtc_timing *timing) { @@ -221,7 +224,8 @@ static void get_dsc_enc_caps( memset(dsc_enc_caps, 0, sizeof(struct dsc_enc_caps)); if (dsc) { - dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz); + if (!dsc->ctx->dc->debug.disable_dsc) + dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz); if (dsc->ctx->dc->debug.native422_support) dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1; } @@ -757,7 +761,7 @@ done: return is_dsc_possible; } -bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data, const uint8_t *dpcd_dsc_ext_data, struct dsc_dec_dpcd_caps *dsc_sink_caps) +bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, const uint8_t *dpcd_dsc_basic_data, const uint8_t *dpcd_dsc_ext_data, struct dsc_dec_dpcd_caps *dsc_sink_caps) { if (!dpcd_dsc_basic_data) return false; @@ -810,6 +814,23 @@ bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data, const uint8_t *dp if (!dsc_bpp_increment_div_from_dpcd(dpcd_dsc_basic_data[DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT], &dsc_sink_caps->bpp_increment_div)) return false; + if (dc->debug.dsc_bpp_increment_div) { + /* dsc_bpp_increment_div should onl be 1, 2, 4, 8 or 16, but rather than rejecting invalid values, + * we'll accept all and get it into range. This also makes the above check against 0 redundant, + * but that one stresses out the override will be only used if it's not 0. + */ + if (dc->debug.dsc_bpp_increment_div >= 1) + dsc_sink_caps->bpp_increment_div = 1; + if (dc->debug.dsc_bpp_increment_div >= 2) + dsc_sink_caps->bpp_increment_div = 2; + if (dc->debug.dsc_bpp_increment_div >= 4) + dsc_sink_caps->bpp_increment_div = 4; + if (dc->debug.dsc_bpp_increment_div >= 8) + dsc_sink_caps->bpp_increment_div = 8; + if (dc->debug.dsc_bpp_increment_div >= 16) + dsc_sink_caps->bpp_increment_div = 16; + } + /* Extended caps */ if (dpcd_dsc_ext_data == NULL) { // Extended DPCD DSC data can be null, e.g. because it doesn't apply to SST dsc_sink_caps->branch_overall_throughput_0_mps = 0; @@ -951,7 +972,12 @@ void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, struct dc default: return; } - /* internal upper limit to 16 bpp */ - if (policy->max_target_bpp > 16) - policy->max_target_bpp = 16; + /* internal upper limit, default 16 bpp */ + if (policy->max_target_bpp > dsc_policy_max_target_bpp_limit) + policy->max_target_bpp = dsc_policy_max_target_bpp_limit; +} + +void dc_dsc_policy_set_max_target_bpp_limit(uint32_t limit) +{ + dsc_policy_max_target_bpp_limit = limit; } diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 16f6ef22367b19771d3f7edb5ed33fedf1843156..f285b76888fbae2a3ab674a3c1ac1df770356422 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -212,6 +212,7 @@ struct resource_pool { struct abm *abm; struct dmcu *dmcu; + struct dmub_psr *psr; const struct resource_funcs *funcs; const struct resource_caps *res_cap; diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h index 6198bccd61991e021bc7d4698a6ef79da025547d..8b1f0ce6c2a7fcae890cd364ea5b27f364b7757f 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h @@ -76,6 +76,8 @@ void dp_enable_mst_on_sink(struct dc_link *link, bool enable); enum dp_panel_mode dp_get_panel_mode(struct dc_link *link); void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode); +bool dp_overwrite_extended_receiver_cap(struct dc_link *link); + void dp_set_fec_ready(struct dc_link *link, bool ready); void dp_set_fec_enable(struct dc_link *link, bool enable); bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable); diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calc_math.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h rename to drivers/gpu/drm/amd/display/dc/inc/dcn_calc_math.h diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h index 125e42dbd3c581727b3221c3cf4fa89b8be36725..45ef390ae052cf26d4234a4ec41915e45de7e20b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h @@ -47,6 +47,26 @@ struct dpp_input_csc_matrix { uint16_t regval[12]; }; +static const struct dpp_input_csc_matrix dpp_input_csc_matrix[] = { + {COLOR_SPACE_SRGB, + {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} }, + {COLOR_SPACE_SRGB_LIMITED, + {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} }, + {COLOR_SPACE_YCBCR601, + {0x2cdd, 0x2000, 0, 0xe991, 0xe926, 0x2000, 0xf4fd, 0x10ef, + 0, 0x2000, 0x38b4, 0xe3a6} }, + {COLOR_SPACE_YCBCR601_LIMITED, + {0x3353, 0x2568, 0, 0xe400, 0xe5dc, 0x2568, 0xf367, 0x1108, + 0, 0x2568, 0x40de, 0xdd3a} }, + {COLOR_SPACE_YCBCR709, + {0x3265, 0x2000, 0, 0xe6ce, 0xf105, 0x2000, 0xfa01, 0xa7d, 0, + 0x2000, 0x3b61, 0xe24f} }, + + {COLOR_SPACE_YCBCR709_LIMITED, + {0x39a6, 0x2568, 0, 0xe0d6, 0xeedd, 0x2568, 0xf925, 0x9a8, 0, + 0x2568, 0x43ee, 0xdbb2} } +}; + struct dpp_grph_csc_adjustment { struct fixed31_32 temperature_matrix[CSC_TEMPERATURE_MATRIX_SIZE]; enum graphics_gamut_adjust_type gamut_adjust_type; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h index 735f41901b88552c83242fd590c0247084ebabc1..459f95f52486172a74523106b197d6cc4c0ca2e4 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h @@ -113,7 +113,8 @@ struct dwbc { int wb_src_plane_inst;/*hubp, mpcc, inst*/ bool update_privacymask; uint32_t mask_id; - + int otg_inst; + bool mvc_cfg; }; struct dwbc_funcs { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h index 85a34dde8526427cc95b76b791b7ba9634607fbd..2cb8466e657b9618bbeaccdbce0aafb815c95ab2 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h @@ -63,6 +63,26 @@ struct hubp { bool power_gated; }; +struct surface_flip_registers { + uint32_t DCSURF_SURFACE_CONTROL; + uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH; + uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS; + uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH; + uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS; + uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C; + uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_C; + uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C; + uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C; + uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH; + uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS; + uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH; + uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS; + bool tmz_surface; + bool immediate; + uint8_t vmid; + bool grph_stereo; +}; + struct hubp_funcs { void (*hubp_setup)( struct hubp *hubp, @@ -82,9 +102,10 @@ struct hubp_funcs { void (*mem_program_viewport)( struct hubp *hubp, const struct rect *viewport, - const struct rect *viewport_c, - enum dc_rotation_angle rotation); - /* rotation needed for Renoir workaround */ + const struct rect *viewport_c); + + void (*apply_PLAT_54186_wa)(struct hubp *hubp, + const struct dc_plane_address *address); bool (*hubp_program_surface_flip_and_addr)( struct hubp *hubp, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index 2d3efd71fa515e7ebc1b32c9d7b0639362c1cf4f..e5e7d94026fc6b7ed0c796ff508d7f601cbbcd32 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -208,7 +208,8 @@ struct timing_generator_funcs { bool enable, const struct dc_crtc_timing *timing); void (*set_drr)(struct timing_generator *tg, const struct drr_params *params); void (*set_static_screen_control)(struct timing_generator *tg, - uint32_t value); + uint32_t event_triggers, + uint32_t num_frames); void (*set_test_pattern)( struct timing_generator *tg, enum controller_dp_test_pattern test_pattern, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index e9c6021a5372733f592d78d9f703dd730d2d7b18..209118f9f19358558a911d337a9b9f72144db561 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -42,7 +42,7 @@ struct dc_state; struct dc_stream_status; struct dc_writeback_info; struct dchub_init_data; -struct dc_static_screen_events; +struct dc_static_screen_params; struct resource_pool; struct dc_phy_addr_space_config; struct dc_virtual_addr_space_config; @@ -102,7 +102,7 @@ struct hw_sequencer_funcs { unsigned int vmid, unsigned int vmid_frame_number); void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx, int num_pipes, - const struct dc_static_screen_events *events); + const struct dc_static_screen_params *events); /* Stream Related */ void (*enable_stream)(struct pipe_ctx *pipe_ctx); @@ -149,16 +149,18 @@ struct hw_sequencer_funcs { /* Writeback Related */ void (*update_writeback)(struct dc *dc, - const struct dc_stream_status *stream_status, struct dc_writeback_info *wb_info, struct dc_state *context); void (*enable_writeback)(struct dc *dc, - const struct dc_stream_status *stream_status, struct dc_writeback_info *wb_info, struct dc_state *context); void (*disable_writeback)(struct dc *dc, unsigned int dwb_pipe_inst); + bool (*mmhubbub_warmup)(struct dc *dc, + unsigned int num_dwb, + struct dc_writeback_info *wb_info); + /* Clock Related */ enum dc_status (*set_clock)(struct dc *dc, enum dc_clock_type clock_type, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h index 8ba06f015975f4404fe7df9cf461112f4b60369e..ecf566378ccdbb38c13e71e6e8553f4efb4922c1 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h @@ -51,7 +51,7 @@ struct dc_state; struct dc_stream_status; struct dc_writeback_info; struct dchub_init_data; -struct dc_static_screen_events; +struct dc_static_screen_params; struct resource_pool; struct resource_context; struct stream_resource; diff --git a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h index 47e307388581a88424427101ca8f3adfb5df6d68..2470405e996bc5b9f1b1008233467194714fa769 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h +++ b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h @@ -458,7 +458,14 @@ uint32_t generic_reg_get8(const struct dc_context *ctx, uint32_t addr, #define IX_REG_READ(index_reg_name, data_reg_name, index) \ generic_read_indirect_reg(CTX, REG(index_reg_name), REG(data_reg_name), IND_REG(index)) +#define IX_REG_GET_N(index_reg_name, data_reg_name, index, n, ...) \ + generic_indirect_reg_get(CTX, REG(index_reg_name), REG(data_reg_name), \ + IND_REG(index), \ + n, __VA_ARGS__) +#define IX_REG_GET(index_reg_name, data_reg_name, index, field, val) \ + IX_REG_GET_N(index_reg_name, data_reg_name, index, 1, \ + FN(data_reg_name, field), val) #define IX_REG_UPDATE_N(index_reg_name, data_reg_name, index, n, ...) \ generic_indirect_reg_update_ex(CTX, \ @@ -479,6 +486,12 @@ uint32_t generic_read_indirect_reg(const struct dc_context *ctx, uint32_t addr_index, uint32_t addr_data, uint32_t index); +uint32_t generic_indirect_reg_get(const struct dc_context *ctx, + uint32_t addr_index, uint32_t addr_data, + uint32_t index, int n, + uint8_t shift1, uint32_t mask1, uint32_t *field_value1, + ...); + uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx, uint32_t addr_index, uint32_t addr_data, uint32_t index, uint32_t reg_val, int n, diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index 7a85abc53d054163a4615bb4de365879b9d015a4..5ae8ada154efe5127d3091df6f0493baabe64ba7 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -177,4 +177,6 @@ void update_audio_usage( unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format); +void get_audio_check(struct audio_info *aud_modes, + struct audio_check *aud_chk); #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h index 13b9a9bb32c86dd31df09c7f0d92abc5de32bbfd..c34eba19860a32a9952f26ab094dd7266552c953 100644 --- a/drivers/gpu/drm/amd/display/dc/os_types.h +++ b/drivers/gpu/drm/amd/display/dc/os_types.h @@ -1,5 +1,6 @@ /* * Copyright 2012-16 Advanced Micro Devices, Inc. + * Copyright 2019 Raptor Engineering, LLC * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -50,7 +51,38 @@ #define dm_error(fmt, ...) DRM_ERROR(fmt, ##__VA_ARGS__) #if defined(CONFIG_DRM_AMD_DC_DCN) +#if defined(CONFIG_X86) #include +#define DC_FP_START() kernel_fpu_begin() +#define DC_FP_END() kernel_fpu_end() +#elif defined(CONFIG_PPC64) +#include +#include +#define DC_FP_START() { \ + if (cpu_has_feature(CPU_FTR_VSX_COMP)) { \ + preempt_disable(); \ + enable_kernel_vsx(); \ + } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) { \ + preempt_disable(); \ + enable_kernel_altivec(); \ + } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) { \ + preempt_disable(); \ + enable_kernel_fp(); \ + } \ +} +#define DC_FP_END() { \ + if (cpu_has_feature(CPU_FTR_VSX_COMP)) { \ + disable_kernel_vsx(); \ + preempt_enable(); \ + } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) { \ + disable_kernel_altivec(); \ + preempt_enable(); \ + } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) { \ + disable_kernel_fp(); \ + preempt_enable(); \ + } \ +} +#endif #endif /* diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index b10728f33f627e6aaafd9019d9340f6e5dad09de..cd9532b4f14df78796983555e4970ffa27ab8dcd 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -36,6 +36,7 @@ #define DMUB_RB_SIZE (DMUB_RB_CMD_SIZE * DMUB_RB_MAX_ENTRY) #define REG_SET_MASK 0xFFFF + /* * Command IDs should be treated as stable ABI. * Do not reuse or modify IDs. @@ -47,6 +48,7 @@ enum dmub_cmd_type { DMUB_CMD__REG_SEQ_FIELD_UPDATE_SEQ = 2, DMUB_CMD__REG_SEQ_BURST_WRITE = 3, DMUB_CMD__REG_REG_WAIT = 4, + DMUB_CMD__PLAT_54186_WA = 5, DMUB_CMD__PSR = 64, DMUB_CMD__VBIOS = 128, }; @@ -145,6 +147,32 @@ struct dmub_rb_cmd_reg_wait { struct dmub_cmd_reg_wait_data reg_wait; }; +#ifndef PHYSICAL_ADDRESS_LOC +#define PHYSICAL_ADDRESS_LOC union large_integer +#endif + +struct dmub_cmd_PLAT_54186_wa { + uint32_t DCSURF_SURFACE_CONTROL; + uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH; + uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS; + uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C; + uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C; + struct { + uint8_t hubp_inst : 4; + uint8_t tmz_surface : 1; + uint8_t immediate :1; + uint8_t vmid : 4; + uint8_t grph_stereo : 1; + uint32_t reserved : 21; + } flip_params; + uint32_t reserved[9]; +}; + +struct dmub_rb_cmd_PLAT_54186_wa { + struct dmub_cmd_header header; + struct dmub_cmd_PLAT_54186_wa flip; +}; + struct dmub_cmd_digx_encoder_control_data { union dig_encoder_control_parameters_v1_5 dig; }; @@ -187,9 +215,28 @@ struct dmub_rb_cmd_dpphy_init { }; struct dmub_cmd_psr_copy_settings_data { - uint32_t reg1; - uint32_t reg2; - uint32_t reg3; + uint16_t psr_level; + uint8_t hubp_inst; + uint8_t dpp_inst; + uint8_t mpcc_inst; + uint8_t opp_inst; + uint8_t otg_inst; + uint8_t digfe_inst; + uint8_t digbe_inst; + uint8_t dpphy_inst; + uint8_t aux_inst; + uint8_t hyst_frames; + uint8_t hyst_lines; + uint8_t phy_num; + uint8_t phy_type; + uint8_t aux_repeat; + uint8_t smu_optimizations_en; + uint8_t skip_wait_for_pll_lock; + uint8_t frame_delay; + uint8_t smu_phy_id; + uint8_t num_of_controllers; + uint8_t link_rate; + uint8_t frame_cap_ind; }; struct dmub_rb_cmd_psr_copy_settings { @@ -206,31 +253,17 @@ struct dmub_rb_cmd_psr_set_level { struct dmub_cmd_psr_set_level_data psr_set_level_data; }; -struct dmub_rb_cmd_psr_disable { - struct dmub_cmd_header header; -}; - struct dmub_rb_cmd_psr_enable { struct dmub_cmd_header header; }; -struct dmub_cmd_psr_notify_vblank_data { - uint32_t vblank_int; // Which vblank interrupt was triggered -}; - -struct dmub_rb_cmd_notify_vblank { - struct dmub_cmd_header header; - struct dmub_cmd_psr_notify_vblank_data psr_notify_vblank_data; -}; - -struct dmub_cmd_psr_notify_static_state_data { - uint32_t ss_int; // Which static screen interrupt was triggered - uint32_t ss_enter; // Enter (1) or exit (0) static screen +struct dmub_cmd_psr_setup_data { + enum psr_version version; // PSR version 1 or 2 }; -struct dmub_rb_cmd_psr_notify_static_state { +struct dmub_rb_cmd_psr_setup { struct dmub_cmd_header header; - struct dmub_cmd_psr_notify_static_state_data psr_notify_static_state_data; + struct dmub_cmd_psr_setup_data psr_setup_data; }; union dmub_rb_cmd { @@ -245,9 +278,10 @@ union dmub_rb_cmd { struct dmub_rb_cmd_dpphy_init dpphy_init; struct dmub_rb_cmd_dig1_transmitter_control dig1_transmitter_control; struct dmub_rb_cmd_psr_enable psr_enable; - struct dmub_rb_cmd_psr_disable psr_disable; struct dmub_rb_cmd_psr_copy_settings psr_copy_settings; struct dmub_rb_cmd_psr_set_level psr_set_level; + struct dmub_rb_cmd_PLAT_54186_wa PLAT_54186_wa; + struct dmub_rb_cmd_psr_setup psr_setup; }; #pragma pack(pop) diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h index 14f13e8a6f3b4fe588150994be9e4fa1808d6fa3..7b69eb37f762c7b9fb8ccef0f37c75739aa556b7 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h @@ -32,10 +32,17 @@ */ enum dmub_cmd_psr_type { - DMUB_CMD__PSR_ENABLE = 0, - DMUB_CMD__PSR_DISABLE = 1, - DMUB_CMD__PSR_COPY_SETTINGS = 2, - DMUB_CMD__PSR_SET_LEVEL = 3, + DMUB_CMD__PSR_SETUP = 0, + DMUB_CMD__PSR_COPY_SETTINGS = 1, + DMUB_CMD__PSR_ENABLE = 2, + DMUB_CMD__PSR_DISABLE = 3, + DMUB_CMD__PSR_SET_LEVEL = 4, +}; + +enum psr_version { + PSR_VERSION_1 = 0x10, // PSR Version 1 + PSR_VERSION_2 = 0x20, // PSR Version 2, includes selective update + PSR_VERSION_2_Y_COORD = 0x21, // PSR Version 2, includes Y-coordinate support for SU }; #endif /* _DMUB_CMD_DAL_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_meta.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..242ec257998c68e8b9464cbc8d28dd7f43cba010 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_meta.h @@ -0,0 +1,63 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef _DMUB_META_H_ +#define _DMUB_META_H_ + +#include "dmub_types.h" + +#pragma pack(push, 1) + +/* Magic value for identifying dmub_fw_meta_info */ +#define DMUB_FW_META_MAGIC 0x444D5542 + +/* Offset from the end of the file to the dmub_fw_meta_info */ +#define DMUB_FW_META_OFFSET 0x24 + +/** + * struct dmub_fw_meta_info - metadata associated with fw binary + * + * NOTE: This should be considered a stable API. Fields should + * not be repurposed or reordered. New fields should be + * added instead to extend the structure. + * + * @magic_value: magic value identifying DMUB firmware meta info + * @fw_region_size: size of the firmware state region + * @trace_buffer_size: size of the tracebuffer region + */ +struct dmub_fw_meta_info { + uint32_t magic_value; + uint32_t fw_region_size; + uint32_t trace_buffer_size; +}; + +/* Ensure that the structure remains 64 bytes. */ +union dmub_fw_meta { + struct dmub_fw_meta_info info; + uint8_t reserved[64]; +}; + +#pragma pack(pop) + +#endif /* _DMUB_META_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h index ac22744eaa948742a77e241d39cf9adde093101a..df875fdd2ab07a2d99342f167bd83292b63bf7d5 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h @@ -73,12 +73,17 @@ static inline bool dmub_rb_full(struct dmub_rb *rb) static inline bool dmub_rb_push_front(struct dmub_rb *rb, const struct dmub_cmd_header *cmd) { - uint8_t *wt_ptr = (uint8_t *)(rb->base_address) + rb->wrpt; + uint64_t volatile *dst = (uint64_t volatile *)(rb->base_address) + rb->wrpt / sizeof(uint64_t); + const uint64_t *src = (const uint64_t *)cmd; + int i; if (dmub_rb_full(rb)) return false; - dmub_memcpy(wt_ptr, cmd, DMUB_RB_CMD_SIZE); + // copying data + for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++) + *dst++ = *src++; + rb->wrpt += DMUB_RB_CMD_SIZE; if (rb->wrpt >= rb->capacity) @@ -113,6 +118,26 @@ static inline bool dmub_rb_pop_front(struct dmub_rb *rb) return true; } +static inline void dmub_rb_flush_pending(const struct dmub_rb *rb) +{ + uint32_t rptr = rb->rptr; + uint32_t wptr = rb->wrpt; + + while (rptr != wptr) { + uint64_t volatile *data = (uint64_t volatile *)rb->base_address + rptr / sizeof(uint64_t); + //uint64_t volatile *p = (uint64_t volatile *)data; + uint64_t temp; + int i; + + for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++) + temp = *data++; + + rptr += DMUB_RB_CMD_SIZE; + if (rptr >= rb->capacity) + rptr %= rb->capacity; + } +} + static inline void dmub_rb_init(struct dmub_rb *rb, struct dmub_rb_init_params *init_params) { diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h index 528243e35adda60912d39625f2f24de0fd596f17..f8917594036ab375dc8a28b011c230b342c63cf9 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h @@ -67,7 +67,6 @@ #include "dmub_types.h" #include "dmub_cmd.h" #include "dmub_rb.h" -#include "dmub_fw_state.h" #if defined(__cplusplus) extern "C" { @@ -76,7 +75,7 @@ extern "C" { /* Forward declarations */ struct dmub_srv; struct dmub_cmd_header; -struct dmcu; +struct dmub_srv_common_regs; /* enum dmub_status - return code for dmcub functions */ enum dmub_status { @@ -145,11 +144,13 @@ struct dmub_fb { * @inst_const_size: size of the fw inst const section * @bss_data_size: size of the fw bss data section * @vbios_size: size of the vbios data + * @fw_bss_data: raw firmware bss data section */ struct dmub_srv_region_params { uint32_t inst_const_size; uint32_t bss_data_size; uint32_t vbios_size; + const uint8_t *fw_bss_data; }; /** @@ -230,6 +231,8 @@ struct dmub_srv_base_funcs { struct dmub_srv_hw_funcs { /* private: internal use only */ + void (*init)(struct dmub_srv *dmub); + void (*reset)(struct dmub_srv *dmub); void (*reset_release)(struct dmub_srv *dmub); @@ -307,6 +310,8 @@ struct dmub_srv { volatile const struct dmub_fw_state *fw_state; /* private: internal use only */ + const struct dmub_srv_common_regs *regs; + struct dmub_srv_base_funcs funcs; struct dmub_srv_hw_funcs hw_funcs; struct dmub_rb inbox1_rb; @@ -413,6 +418,21 @@ enum dmub_status dmub_srv_is_hw_init(struct dmub_srv *dmub, bool *is_hw_init); enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, const struct dmub_srv_hw_params *params); +/** + * dmub_srv_hw_reset() - puts the DMUB hardware in reset state if initialized + * @dmub: the dmub service + * + * Before destroying the DMUB service or releasing the backing framebuffer + * memory we'll need to put the DMCUB into reset first. + * + * A subsequent call to dmub_srv_hw_init() will re-enable the DMCUB. + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub); + /** * dmub_srv_cmd_queue() - queues a command to the DMUB * @dmub: the dmub service @@ -441,25 +461,6 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub, */ enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub); -/** - * dmub_srv_cmd_submit() - submits a command to the DMUB immediately - * @dmub: the dmub service - * @cmd: the command to submit - * @timeout_us: the maximum number of microseconds to wait - * - * Submits a command to the DMUB with an optional timeout. - * If timeout_us is given then the service will attempt to - * resubmit for the given number of microseconds. - * - * Return: - * DMUB_STATUS_OK - success - * DMUB_STATUS_TIMEOUT - wait for submit timed out - * DMUB_STATUS_INVALID - unspecified error - */ -enum dmub_status dmub_srv_cmd_submit(struct dmub_srv *dmub, - const struct dmub_cmd_header *cmd, - uint32_t timeout_us); - /** * dmub_srv_wait_for_auto_load() - Waits for firmware auto load to complete * @dmub: the dmub service diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c index 951ea7053c7e3971bec9ab3702679aac17ab468b..b2ca8e0dbac92e662e03422ebd25373e3b7650ec 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c @@ -25,6 +25,7 @@ #include "../inc/dmub_srv.h" #include "dmub_reg.h" +#include "dmub_dcn20.h" #include "dcn/dcn_2_0_0_offset.h" #include "dcn/dcn_2_0_0_sh_mask.h" @@ -33,36 +34,90 @@ #define BASE_INNER(seg) DCN_BASE__INST0_SEG##seg #define CTX dmub +#define REGS dmub->regs + +/* Registers. */ + +const struct dmub_srv_common_regs dmub_srv_dcn20_regs = { +#define DMUB_SR(reg) REG_OFFSET(reg), + { DMUB_COMMON_REGS() }, +#undef DMUB_SR + +#define DMUB_SF(reg, field) FD_MASK(reg, field), + { DMUB_COMMON_FIELDS() }, +#undef DMUB_SF + +#define DMUB_SF(reg, field) FD_SHIFT(reg, field), + { DMUB_COMMON_FIELDS() }, +#undef DMUB_SF +}; + +/* Shared functions. */ + +static void dmub_dcn20_get_fb_base_offset(struct dmub_srv *dmub, + uint64_t *fb_base, + uint64_t *fb_offset) +{ + uint32_t tmp; + + REG_GET(DCN_VM_FB_LOCATION_BASE, FB_BASE, &tmp); + *fb_base = (uint64_t)tmp << 24; + + REG_GET(DCN_VM_FB_OFFSET, FB_OFFSET, &tmp); + *fb_offset = (uint64_t)tmp << 24; +} + +static inline void dmub_dcn20_translate_addr(const union dmub_addr *addr_in, + uint64_t fb_base, + uint64_t fb_offset, + union dmub_addr *addr_out) +{ + addr_out->quad_part = addr_in->quad_part - fb_base + fb_offset; +} void dmub_dcn20_reset(struct dmub_srv *dmub) { REG_UPDATE(DMCUB_CNTL, DMCUB_SOFT_RESET, 1); REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); + REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1); + REG_WRITE(DMCUB_INBOX1_RPTR, 0); + REG_WRITE(DMCUB_INBOX1_WPTR, 0); } void dmub_dcn20_reset_release(struct dmub_srv *dmub) { + REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 0); REG_WRITE(DMCUB_SCRATCH15, dmub->psp_version & 0x001100FF); REG_UPDATE_2(DMCUB_CNTL, DMCUB_ENABLE, 1, DMCUB_TRACEPORT_EN, 1); REG_UPDATE(DMCUB_CNTL, DMCUB_SOFT_RESET, 0); } -void dmub_dcn20_backdoor_load(struct dmub_srv *dmub, struct dmub_window *cw0, - struct dmub_window *cw1) +void dmub_dcn20_backdoor_load(struct dmub_srv *dmub, + const struct dmub_window *cw0, + const struct dmub_window *cw1) { + union dmub_addr offset; + uint64_t fb_base, fb_offset; + + dmub_dcn20_get_fb_base_offset(dmub, &fb_base, &fb_offset); + REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1); - REG_UPDATE_2(DMCUB_MEM_CNTL, DMCUB_MEM_READ_SPACE, 0x4, - DMCUB_MEM_WRITE_SPACE, 0x4); + REG_UPDATE_2(DMCUB_MEM_CNTL, DMCUB_MEM_READ_SPACE, 0x3, + DMCUB_MEM_WRITE_SPACE, 0x3); + + dmub_dcn20_translate_addr(&cw0->offset, fb_base, fb_offset, &offset); - REG_WRITE(DMCUB_REGION3_CW0_OFFSET, cw0->offset.u.low_part); - REG_WRITE(DMCUB_REGION3_CW0_OFFSET_HIGH, cw0->offset.u.high_part); + REG_WRITE(DMCUB_REGION3_CW0_OFFSET, offset.u.low_part); + REG_WRITE(DMCUB_REGION3_CW0_OFFSET_HIGH, offset.u.high_part); REG_WRITE(DMCUB_REGION3_CW0_BASE_ADDRESS, cw0->region.base); REG_SET_2(DMCUB_REGION3_CW0_TOP_ADDRESS, 0, DMCUB_REGION3_CW0_TOP_ADDRESS, cw0->region.top, DMCUB_REGION3_CW0_ENABLE, 1); - REG_WRITE(DMCUB_REGION3_CW1_OFFSET, cw1->offset.u.low_part); - REG_WRITE(DMCUB_REGION3_CW1_OFFSET_HIGH, cw1->offset.u.high_part); + dmub_dcn20_translate_addr(&cw1->offset, fb_base, fb_offset, &offset); + + REG_WRITE(DMCUB_REGION3_CW1_OFFSET, offset.u.low_part); + REG_WRITE(DMCUB_REGION3_CW1_OFFSET_HIGH, offset.u.high_part); REG_WRITE(DMCUB_REGION3_CW1_BASE_ADDRESS, cw1->region.base); REG_SET_2(DMCUB_REGION3_CW1_TOP_ADDRESS, 0, DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top, @@ -79,37 +134,51 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub, const struct dmub_window *cw5, const struct dmub_window *cw6) { - REG_WRITE(DMCUB_REGION3_CW2_OFFSET, cw2->offset.u.low_part); - REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, cw2->offset.u.high_part); + union dmub_addr offset; + uint64_t fb_base, fb_offset; + + dmub_dcn20_get_fb_base_offset(dmub, &fb_base, &fb_offset); + + dmub_dcn20_translate_addr(&cw2->offset, fb_base, fb_offset, &offset); + + REG_WRITE(DMCUB_REGION3_CW2_OFFSET, offset.u.low_part); + REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, offset.u.high_part); REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, cw2->region.base); REG_SET_2(DMCUB_REGION3_CW2_TOP_ADDRESS, 0, DMCUB_REGION3_CW2_TOP_ADDRESS, cw2->region.top, DMCUB_REGION3_CW2_ENABLE, 1); - REG_WRITE(DMCUB_REGION3_CW3_OFFSET, cw3->offset.u.low_part); - REG_WRITE(DMCUB_REGION3_CW3_OFFSET_HIGH, cw3->offset.u.high_part); + dmub_dcn20_translate_addr(&cw3->offset, fb_base, fb_offset, &offset); + + REG_WRITE(DMCUB_REGION3_CW3_OFFSET, offset.u.low_part); + REG_WRITE(DMCUB_REGION3_CW3_OFFSET_HIGH, offset.u.high_part); REG_WRITE(DMCUB_REGION3_CW3_BASE_ADDRESS, cw3->region.base); REG_SET_2(DMCUB_REGION3_CW3_TOP_ADDRESS, 0, DMCUB_REGION3_CW3_TOP_ADDRESS, cw3->region.top, DMCUB_REGION3_CW3_ENABLE, 1); /* TODO: Move this to CW4. */ + dmub_dcn20_translate_addr(&cw4->offset, fb_base, fb_offset, &offset); - REG_WRITE(DMCUB_REGION4_OFFSET, cw4->offset.u.low_part); - REG_WRITE(DMCUB_REGION4_OFFSET_HIGH, cw4->offset.u.high_part); + REG_WRITE(DMCUB_REGION4_OFFSET, offset.u.low_part); + REG_WRITE(DMCUB_REGION4_OFFSET_HIGH, offset.u.high_part); REG_SET_2(DMCUB_REGION4_TOP_ADDRESS, 0, DMCUB_REGION4_TOP_ADDRESS, cw4->region.top - cw4->region.base - 1, DMCUB_REGION4_ENABLE, 1); - REG_WRITE(DMCUB_REGION3_CW5_OFFSET, cw5->offset.u.low_part); - REG_WRITE(DMCUB_REGION3_CW5_OFFSET_HIGH, cw5->offset.u.high_part); + dmub_dcn20_translate_addr(&cw5->offset, fb_base, fb_offset, &offset); + + REG_WRITE(DMCUB_REGION3_CW5_OFFSET, offset.u.low_part); + REG_WRITE(DMCUB_REGION3_CW5_OFFSET_HIGH, offset.u.high_part); REG_WRITE(DMCUB_REGION3_CW5_BASE_ADDRESS, cw5->region.base); REG_SET_2(DMCUB_REGION3_CW5_TOP_ADDRESS, 0, DMCUB_REGION3_CW5_TOP_ADDRESS, cw5->region.top, DMCUB_REGION3_CW5_ENABLE, 1); - REG_WRITE(DMCUB_REGION3_CW6_OFFSET, cw6->offset.u.low_part); - REG_WRITE(DMCUB_REGION3_CW6_OFFSET_HIGH, cw6->offset.u.high_part); + dmub_dcn20_translate_addr(&cw6->offset, fb_base, fb_offset, &offset); + + REG_WRITE(DMCUB_REGION3_CW6_OFFSET, offset.u.low_part); + REG_WRITE(DMCUB_REGION3_CW6_OFFSET_HIGH, offset.u.high_part); REG_WRITE(DMCUB_REGION3_CW6_BASE_ADDRESS, cw6->region.base); REG_SET_2(DMCUB_REGION3_CW6_TOP_ADDRESS, 0, DMCUB_REGION3_CW6_TOP_ADDRESS, cw6->region.top, @@ -123,8 +192,6 @@ void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub, REG_WRITE(DMCUB_INBOX1_BASE_ADDRESS, 0x80000000); REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base); - REG_WRITE(DMCUB_INBOX1_RPTR, 0); - REG_WRITE(DMCUB_INBOX1_WPTR, 0); } uint32_t dmub_dcn20_get_inbox1_rptr(struct dmub_srv *dmub) diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h index e70a57573467f72445d7c21f0d60f58049a4b288..04b0fa13153dc72e622762fecb4603c666d1d3fa 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h @@ -30,6 +30,129 @@ struct dmub_srv; +/* DCN20 register definitions. */ + +#define DMUB_COMMON_REGS() \ + DMUB_SR(DMCUB_CNTL) \ + DMUB_SR(DMCUB_MEM_CNTL) \ + DMUB_SR(DMCUB_SEC_CNTL) \ + DMUB_SR(DMCUB_INBOX1_BASE_ADDRESS) \ + DMUB_SR(DMCUB_INBOX1_SIZE) \ + DMUB_SR(DMCUB_INBOX1_RPTR) \ + DMUB_SR(DMCUB_INBOX1_WPTR) \ + DMUB_SR(DMCUB_REGION3_CW0_OFFSET) \ + DMUB_SR(DMCUB_REGION3_CW1_OFFSET) \ + DMUB_SR(DMCUB_REGION3_CW2_OFFSET) \ + DMUB_SR(DMCUB_REGION3_CW3_OFFSET) \ + DMUB_SR(DMCUB_REGION3_CW4_OFFSET) \ + DMUB_SR(DMCUB_REGION3_CW5_OFFSET) \ + DMUB_SR(DMCUB_REGION3_CW6_OFFSET) \ + DMUB_SR(DMCUB_REGION3_CW7_OFFSET) \ + DMUB_SR(DMCUB_REGION3_CW0_OFFSET_HIGH) \ + DMUB_SR(DMCUB_REGION3_CW1_OFFSET_HIGH) \ + DMUB_SR(DMCUB_REGION3_CW2_OFFSET_HIGH) \ + DMUB_SR(DMCUB_REGION3_CW3_OFFSET_HIGH) \ + DMUB_SR(DMCUB_REGION3_CW4_OFFSET_HIGH) \ + DMUB_SR(DMCUB_REGION3_CW5_OFFSET_HIGH) \ + DMUB_SR(DMCUB_REGION3_CW6_OFFSET_HIGH) \ + DMUB_SR(DMCUB_REGION3_CW7_OFFSET_HIGH) \ + DMUB_SR(DMCUB_REGION3_CW0_BASE_ADDRESS) \ + DMUB_SR(DMCUB_REGION3_CW1_BASE_ADDRESS) \ + DMUB_SR(DMCUB_REGION3_CW2_BASE_ADDRESS) \ + DMUB_SR(DMCUB_REGION3_CW3_BASE_ADDRESS) \ + DMUB_SR(DMCUB_REGION3_CW4_BASE_ADDRESS) \ + DMUB_SR(DMCUB_REGION3_CW5_BASE_ADDRESS) \ + DMUB_SR(DMCUB_REGION3_CW6_BASE_ADDRESS) \ + DMUB_SR(DMCUB_REGION3_CW7_BASE_ADDRESS) \ + DMUB_SR(DMCUB_REGION3_CW0_TOP_ADDRESS) \ + DMUB_SR(DMCUB_REGION3_CW1_TOP_ADDRESS) \ + DMUB_SR(DMCUB_REGION3_CW2_TOP_ADDRESS) \ + DMUB_SR(DMCUB_REGION3_CW3_TOP_ADDRESS) \ + DMUB_SR(DMCUB_REGION3_CW4_TOP_ADDRESS) \ + DMUB_SR(DMCUB_REGION3_CW5_TOP_ADDRESS) \ + DMUB_SR(DMCUB_REGION3_CW6_TOP_ADDRESS) \ + DMUB_SR(DMCUB_REGION3_CW7_TOP_ADDRESS) \ + DMUB_SR(DMCUB_REGION4_OFFSET) \ + DMUB_SR(DMCUB_REGION4_OFFSET_HIGH) \ + DMUB_SR(DMCUB_REGION4_TOP_ADDRESS) \ + DMUB_SR(DMCUB_SCRATCH0) \ + DMUB_SR(DMCUB_SCRATCH1) \ + DMUB_SR(DMCUB_SCRATCH2) \ + DMUB_SR(DMCUB_SCRATCH3) \ + DMUB_SR(DMCUB_SCRATCH4) \ + DMUB_SR(DMCUB_SCRATCH5) \ + DMUB_SR(DMCUB_SCRATCH6) \ + DMUB_SR(DMCUB_SCRATCH7) \ + DMUB_SR(DMCUB_SCRATCH8) \ + DMUB_SR(DMCUB_SCRATCH9) \ + DMUB_SR(DMCUB_SCRATCH10) \ + DMUB_SR(DMCUB_SCRATCH11) \ + DMUB_SR(DMCUB_SCRATCH12) \ + DMUB_SR(DMCUB_SCRATCH13) \ + DMUB_SR(DMCUB_SCRATCH14) \ + DMUB_SR(DMCUB_SCRATCH15) \ + DMUB_SR(CC_DC_PIPE_DIS) \ + DMUB_SR(MMHUBBUB_SOFT_RESET) \ + DMUB_SR(DCN_VM_FB_LOCATION_BASE) \ + DMUB_SR(DCN_VM_FB_OFFSET) + +#define DMUB_COMMON_FIELDS() \ + DMUB_SF(DMCUB_CNTL, DMCUB_ENABLE) \ + DMUB_SF(DMCUB_CNTL, DMCUB_SOFT_RESET) \ + DMUB_SF(DMCUB_CNTL, DMCUB_TRACEPORT_EN) \ + DMUB_SF(DMCUB_MEM_CNTL, DMCUB_MEM_READ_SPACE) \ + DMUB_SF(DMCUB_MEM_CNTL, DMCUB_MEM_WRITE_SPACE) \ + DMUB_SF(DMCUB_SEC_CNTL, DMCUB_SEC_RESET) \ + DMUB_SF(DMCUB_SEC_CNTL, DMCUB_MEM_UNIT_ID) \ + DMUB_SF(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_TOP_ADDRESS) \ + DMUB_SF(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE) \ + DMUB_SF(DMCUB_REGION3_CW1_TOP_ADDRESS, DMCUB_REGION3_CW1_TOP_ADDRESS) \ + DMUB_SF(DMCUB_REGION3_CW1_TOP_ADDRESS, DMCUB_REGION3_CW1_ENABLE) \ + DMUB_SF(DMCUB_REGION3_CW2_TOP_ADDRESS, DMCUB_REGION3_CW2_TOP_ADDRESS) \ + DMUB_SF(DMCUB_REGION3_CW2_TOP_ADDRESS, DMCUB_REGION3_CW2_ENABLE) \ + DMUB_SF(DMCUB_REGION3_CW3_TOP_ADDRESS, DMCUB_REGION3_CW3_TOP_ADDRESS) \ + DMUB_SF(DMCUB_REGION3_CW3_TOP_ADDRESS, DMCUB_REGION3_CW3_ENABLE) \ + DMUB_SF(DMCUB_REGION3_CW4_TOP_ADDRESS, DMCUB_REGION3_CW4_TOP_ADDRESS) \ + DMUB_SF(DMCUB_REGION3_CW4_TOP_ADDRESS, DMCUB_REGION3_CW4_ENABLE) \ + DMUB_SF(DMCUB_REGION3_CW5_TOP_ADDRESS, DMCUB_REGION3_CW5_TOP_ADDRESS) \ + DMUB_SF(DMCUB_REGION3_CW5_TOP_ADDRESS, DMCUB_REGION3_CW5_ENABLE) \ + DMUB_SF(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_TOP_ADDRESS) \ + DMUB_SF(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE) \ + DMUB_SF(DMCUB_REGION3_CW7_TOP_ADDRESS, DMCUB_REGION3_CW7_TOP_ADDRESS) \ + DMUB_SF(DMCUB_REGION3_CW7_TOP_ADDRESS, DMCUB_REGION3_CW7_ENABLE) \ + DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_TOP_ADDRESS) \ + DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_ENABLE) \ + DMUB_SF(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE) \ + DMUB_SF(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET) \ + DMUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE) \ + DMUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET) + +struct dmub_srv_common_reg_offset { +#define DMUB_SR(reg) uint32_t reg; + DMUB_COMMON_REGS() +#undef DMUB_SR +}; + +struct dmub_srv_common_reg_shift { +#define DMUB_SF(reg, field) uint8_t reg##__##field; + DMUB_COMMON_FIELDS() +#undef DMUB_SF +}; + +struct dmub_srv_common_reg_mask { +#define DMUB_SF(reg, field) uint32_t reg##__##field; + DMUB_COMMON_FIELDS() +#undef DMUB_SF +}; + +struct dmub_srv_common_regs { + const struct dmub_srv_common_reg_offset offset; + const struct dmub_srv_common_reg_mask mask; + const struct dmub_srv_common_reg_shift shift; +}; + +extern const struct dmub_srv_common_regs dmub_srv_dcn20_regs; + /* Hardware functions. */ void dmub_dcn20_init(struct dmub_srv *dmub); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c index 9cea7a2d8dbfd36a0b9934818f5a22e6a90a4cbf..5bed9fcd6b5cc06f76f8fb2827de3a360b8aab3e 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c @@ -25,6 +25,7 @@ #include "../inc/dmub_srv.h" #include "dmub_reg.h" +#include "dmub_dcn21.h" #include "dcn/dcn_2_1_0_offset.h" #include "dcn/dcn_2_1_0_sh_mask.h" @@ -32,103 +33,25 @@ #define BASE_INNER(seg) DMU_BASE__INST0_SEG##seg #define CTX dmub +#define REGS dmub->regs -static inline void dmub_dcn21_translate_addr(const union dmub_addr *addr_in, - uint64_t fb_base, - uint64_t fb_offset, - union dmub_addr *addr_out) -{ - addr_out->quad_part = addr_in->quad_part - fb_base + fb_offset; -} - -void dmub_dcn21_backdoor_load(struct dmub_srv *dmub, - const struct dmub_window *cw0, - const struct dmub_window *cw1) -{ - union dmub_addr offset; - uint64_t fb_base = dmub->fb_base, fb_offset = dmub->fb_offset; - - REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1); - REG_UPDATE_2(DMCUB_MEM_CNTL, DMCUB_MEM_READ_SPACE, 0x3, - DMCUB_MEM_WRITE_SPACE, 0x3); - - dmub_dcn21_translate_addr(&cw0->offset, fb_base, fb_offset, &offset); - - REG_WRITE(DMCUB_REGION3_CW0_OFFSET, offset.u.low_part); - REG_WRITE(DMCUB_REGION3_CW0_OFFSET_HIGH, offset.u.high_part); - REG_WRITE(DMCUB_REGION3_CW0_BASE_ADDRESS, cw0->region.base); - REG_SET_2(DMCUB_REGION3_CW0_TOP_ADDRESS, 0, - DMCUB_REGION3_CW0_TOP_ADDRESS, cw0->region.top, - DMCUB_REGION3_CW0_ENABLE, 1); - - dmub_dcn21_translate_addr(&cw1->offset, fb_base, fb_offset, &offset); - - REG_WRITE(DMCUB_REGION3_CW1_OFFSET, offset.u.low_part); - REG_WRITE(DMCUB_REGION3_CW1_OFFSET_HIGH, offset.u.high_part); - REG_WRITE(DMCUB_REGION3_CW1_BASE_ADDRESS, cw1->region.base); - REG_SET_2(DMCUB_REGION3_CW1_TOP_ADDRESS, 0, - DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top, - DMCUB_REGION3_CW1_ENABLE, 1); - - REG_UPDATE_2(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 0, DMCUB_MEM_UNIT_ID, - 0x20); -} - -void dmub_dcn21_setup_windows(struct dmub_srv *dmub, - const struct dmub_window *cw2, - const struct dmub_window *cw3, - const struct dmub_window *cw4, - const struct dmub_window *cw5, - const struct dmub_window *cw6) -{ - union dmub_addr offset; - uint64_t fb_base = dmub->fb_base, fb_offset = dmub->fb_offset; - - dmub_dcn21_translate_addr(&cw2->offset, fb_base, fb_offset, &offset); - - REG_WRITE(DMCUB_REGION3_CW2_OFFSET, offset.u.low_part); - REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, offset.u.high_part); - REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, cw2->region.base); - REG_SET_2(DMCUB_REGION3_CW2_TOP_ADDRESS, 0, - DMCUB_REGION3_CW2_TOP_ADDRESS, cw2->region.top, - DMCUB_REGION3_CW2_ENABLE, 1); +/* Registers. */ - dmub_dcn21_translate_addr(&cw3->offset, fb_base, fb_offset, &offset); +const struct dmub_srv_common_regs dmub_srv_dcn21_regs = { +#define DMUB_SR(reg) REG_OFFSET(reg), + { DMUB_COMMON_REGS() }, +#undef DMUB_SR - REG_WRITE(DMCUB_REGION3_CW3_OFFSET, offset.u.low_part); - REG_WRITE(DMCUB_REGION3_CW3_OFFSET_HIGH, offset.u.high_part); - REG_WRITE(DMCUB_REGION3_CW3_BASE_ADDRESS, cw3->region.base); - REG_SET_2(DMCUB_REGION3_CW3_TOP_ADDRESS, 0, - DMCUB_REGION3_CW3_TOP_ADDRESS, cw3->region.top, - DMCUB_REGION3_CW3_ENABLE, 1); +#define DMUB_SF(reg, field) FD_MASK(reg, field), + { DMUB_COMMON_FIELDS() }, +#undef DMUB_SF - /* TODO: Move this to CW4. */ - dmub_dcn21_translate_addr(&cw4->offset, fb_base, fb_offset, &offset); +#define DMUB_SF(reg, field) FD_SHIFT(reg, field), + { DMUB_COMMON_FIELDS() }, +#undef DMUB_SF +}; - REG_WRITE(DMCUB_REGION4_OFFSET, offset.u.low_part); - REG_WRITE(DMCUB_REGION4_OFFSET_HIGH, offset.u.high_part); - REG_SET_2(DMCUB_REGION4_TOP_ADDRESS, 0, DMCUB_REGION4_TOP_ADDRESS, - cw4->region.top - cw4->region.base - 1, DMCUB_REGION4_ENABLE, - 1); - - dmub_dcn21_translate_addr(&cw5->offset, fb_base, fb_offset, &offset); - - REG_WRITE(DMCUB_REGION3_CW5_OFFSET, offset.u.low_part); - REG_WRITE(DMCUB_REGION3_CW5_OFFSET_HIGH, offset.u.high_part); - REG_WRITE(DMCUB_REGION3_CW5_BASE_ADDRESS, cw5->region.base); - REG_SET_2(DMCUB_REGION3_CW5_TOP_ADDRESS, 0, - DMCUB_REGION3_CW5_TOP_ADDRESS, cw5->region.top, - DMCUB_REGION3_CW5_ENABLE, 1); - - dmub_dcn21_translate_addr(&cw6->offset, fb_base, fb_offset, &offset); - - REG_WRITE(DMCUB_REGION3_CW6_OFFSET, offset.u.low_part); - REG_WRITE(DMCUB_REGION3_CW6_OFFSET_HIGH, offset.u.high_part); - REG_WRITE(DMCUB_REGION3_CW6_BASE_ADDRESS, cw6->region.base); - REG_SET_2(DMCUB_REGION3_CW6_TOP_ADDRESS, 0, - DMCUB_REGION3_CW6_TOP_ADDRESS, cw6->region.top, - DMCUB_REGION3_CW6_ENABLE, 1); -} +/* Shared functions. */ bool dmub_dcn21_is_auto_load_done(struct dmub_srv *dmub) { diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h index f7a93a5dcfa50d505b3089dd6c4f5119dbeda01d..2bbea237137bf0d42bffa8c70629e42be0f2ba92 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h @@ -28,18 +28,11 @@ #include "dmub_dcn20.h" -/* Hardware functions. */ +/* Registers. */ -void dmub_dcn21_backdoor_load(struct dmub_srv *dmub, - const struct dmub_window *cw0, - const struct dmub_window *cw1); +extern const struct dmub_srv_common_regs dmub_srv_dcn21_regs; -void dmub_dcn21_setup_windows(struct dmub_srv *dmub, - const struct dmub_window *cw2, - const struct dmub_window *cw3, - const struct dmub_window *cw4, - const struct dmub_window *cw5, - const struct dmub_window *cw6); +/* Hardware functions. */ bool dmub_dcn21_is_auto_load_done(struct dmub_srv *dmub); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h index bac4ee8f745f393c43f2b3eddc099a9ab542289e..c1f4030929a4bd0419b176cc2038b3f87e0d6a06 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h @@ -34,11 +34,15 @@ struct dmub_srv; #define BASE(seg) BASE_INNER(seg) -#define REG_OFFSET(base_index, addr) (BASE(base_index) + addr) +#define REG_OFFSET(reg_name) (BASE(mm##reg_name##_BASE_IDX) + mm##reg_name) -#define REG(reg_name) REG_OFFSET(mm ## reg_name ## _BASE_IDX, mm ## reg_name) +#define FD_SHIFT(reg_name, field) reg_name##__##field##__SHIFT -#define FD(reg_field) reg_field ## __SHIFT, reg_field ## _MASK +#define FD_MASK(reg_name, field) reg_name##__##field##_MASK + +#define REG(reg) (REGS)->offset.reg + +#define FD(reg_field) (REGS)->shift.reg_field, (REGS)->mask.reg_field #define FN(reg_name, field) FD(reg_name##__##field) diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index 5f39166d3c08c2e65ae438cd92e4005c5039a834..85a518bf8a76b155cc75750fc0add87d25263ca4 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -26,7 +26,7 @@ #include "../inc/dmub_srv.h" #include "dmub_dcn20.h" #include "dmub_dcn21.h" -#include "dmub_trace_buffer.h" +#include "dmub_fw_meta.h" #include "os_types.h" /* * Note: the DMUB service is standalone. No additional headers should be @@ -46,6 +46,11 @@ /* Mailbox size */ #define DMUB_MAILBOX_SIZE (DMUB_RB_SIZE) +/* Default state size if meta is absent. */ +#define DMUB_FW_STATE_SIZE (1024) + +/* Default tracebuffer size if meta is absent. */ +#define DMUB_TRACE_BUFFER_SIZE (1024) /* Number of windows in use. */ #define DMUB_NUM_WINDOWS (DMUB_WINDOW_6_FW_STATE + 1) @@ -62,6 +67,47 @@ static inline uint32_t dmub_align(uint32_t val, uint32_t factor) return (val + factor - 1) / factor * factor; } +static void dmub_flush_buffer_mem(const struct dmub_fb *fb) +{ + const uint8_t *base = (const uint8_t *)fb->cpu_addr; + uint8_t buf[64]; + uint32_t pos, end; + + /** + * Read 64-byte chunks since we don't want to store a + * large temporary buffer for this purpose. + */ + end = fb->size / sizeof(buf) * sizeof(buf); + + for (pos = 0; pos < end; pos += sizeof(buf)) + dmub_memcpy(buf, base + pos, sizeof(buf)); + + /* Read anything leftover into the buffer. */ + if (end < fb->size) + dmub_memcpy(buf, base + pos, fb->size - end); +} + +static const struct dmub_fw_meta_info * +dmub_get_fw_meta_info(const uint8_t *fw_bss_data, uint32_t fw_bss_data_size) +{ + const union dmub_fw_meta *meta; + + if (fw_bss_data == NULL) + return NULL; + + if (fw_bss_data_size < sizeof(union dmub_fw_meta) + DMUB_FW_META_OFFSET) + return NULL; + + meta = (const union dmub_fw_meta *)(fw_bss_data + fw_bss_data_size - + DMUB_FW_META_OFFSET - + sizeof(union dmub_fw_meta)); + + if (meta->info.magic_value != DMUB_FW_META_MAGIC) + return NULL; + + return &meta->info; +} + static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) { struct dmub_srv_hw_funcs *funcs = &dmub->hw_funcs; @@ -69,6 +115,8 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) switch (asic) { case DMUB_ASIC_DCN20: case DMUB_ASIC_DCN21: + dmub->regs = &dmub_srv_dcn20_regs; + funcs->reset = dmub_dcn20_reset; funcs->reset_release = dmub_dcn20_reset_release; funcs->backdoor_load = dmub_dcn20_backdoor_load; @@ -80,8 +128,8 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) funcs->is_hw_init = dmub_dcn20_is_hw_init; if (asic == DMUB_ASIC_DCN21) { - funcs->backdoor_load = dmub_dcn21_backdoor_load; - funcs->setup_windows = dmub_dcn21_setup_windows; + dmub->regs = &dmub_srv_dcn21_regs; + funcs->is_auto_load_done = dmub_dcn21_is_auto_load_done; funcs->is_phy_init = dmub_dcn21_is_phy_init; } @@ -160,6 +208,9 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub, struct dmub_region *mail = &out->regions[DMUB_WINDOW_4_MAILBOX]; struct dmub_region *trace_buff = &out->regions[DMUB_WINDOW_5_TRACEBUFF]; struct dmub_region *fw_state = &out->regions[DMUB_WINDOW_6_FW_STATE]; + const struct dmub_fw_meta_info *fw_info; + uint32_t fw_state_size = DMUB_FW_STATE_SIZE; + uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE; if (!dmub->sw_init) return DMUB_STATUS_INVALID; @@ -174,6 +225,11 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub, data->base = dmub_align(inst->top, 256); data->top = data->base + params->bss_data_size; + /* + * All cache windows below should be aligned to the size + * of the DMCUB cache line, 64 bytes. + */ + stack->base = dmub_align(data->top, 256); stack->top = stack->base + DMUB_STACK_SIZE + DMUB_CONTEXT_SIZE; @@ -183,14 +239,19 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub, mail->base = dmub_align(bios->top, 256); mail->top = mail->base + DMUB_MAILBOX_SIZE; + fw_info = dmub_get_fw_meta_info(params->fw_bss_data, + params->bss_data_size); + + if (fw_info) { + fw_state_size = fw_info->fw_region_size; + trace_buffer_size = fw_info->trace_buffer_size; + } + trace_buff->base = dmub_align(mail->top, 256); - trace_buff->top = trace_buff->base + TRACE_BUF_SIZE; + trace_buff->top = trace_buff->base + dmub_align(trace_buffer_size, 64); fw_state->base = dmub_align(trace_buff->top, 256); - - /* Align firmware state to size of cache line. */ - fw_state->top = - fw_state->base + dmub_align(sizeof(struct dmub_fw_state), 64); + fw_state->top = fw_state->base + dmub_align(fw_state_size, 64); out->fb_size = dmub_align(fw_state->top, 4096); @@ -251,6 +312,9 @@ enum dmub_status dmub_srv_is_hw_init(struct dmub_srv *dmub, bool *is_hw_init) if (!dmub->sw_init) return DMUB_STATUS_INVALID; + if (!dmub->hw_init) + return DMUB_STATUS_OK; + if (dmub->hw_funcs.is_hw_init) *is_hw_init = dmub->hw_funcs.is_hw_init(dmub); @@ -288,6 +352,13 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, cw1.region.base = DMUB_CW1_BASE; cw1.region.top = cw1.region.base + stack_fb->size - 1; + /** + * Read back all the instruction memory so we don't hang the + * DMCUB when backdoor loading if the write from x86 hasn't been + * flushed yet. This only occurs in backdoor loading. + */ + dmub_flush_buffer_mem(inst_fb); + if (params->load_inst_const && dmub->hw_funcs.backdoor_load) dmub->hw_funcs.backdoor_load(dmub, &cw0, &cw1); } @@ -347,6 +418,22 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, return DMUB_STATUS_OK; } +enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub) +{ + if (!dmub->sw_init) + return DMUB_STATUS_INVALID; + + if (dmub->hw_init == false) + return DMUB_STATUS_OK; + + if (dmub->hw_funcs.reset) + dmub->hw_funcs.reset(dmub); + + dmub->hw_init = false; + + return DMUB_STATUS_OK; +} + enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub, const struct dmub_cmd_header *cmd) { @@ -364,33 +451,17 @@ enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub) if (!dmub->hw_init) return DMUB_STATUS_INVALID; + /** + * Read back all the queued commands to ensure that they've + * been flushed to framebuffer memory. Otherwise DMCUB might + * read back stale, fully invalid or partially invalid data. + */ + dmub_rb_flush_pending(&dmub->inbox1_rb); + dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt); return DMUB_STATUS_OK; } -enum dmub_status dmub_srv_cmd_submit(struct dmub_srv *dmub, - const struct dmub_cmd_header *cmd, - uint32_t timeout_us) -{ - uint32_t i = 0; - - if (!dmub->hw_init) - return DMUB_STATUS_INVALID; - - for (i = 0; i <= timeout_us; ++i) { - dmub->inbox1_rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub); - if (dmub_rb_push_front(&dmub->inbox1_rb, cmd)) { - dmub->hw_funcs.set_inbox1_wptr(dmub, - dmub->inbox1_rb.wrpt); - return DMUB_STATUS_OK; - } - - udelay(1); - } - - return DMUB_STATUS_TIMEOUT; -} - enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub, uint32_t timeout_us) { diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h index 72b659c63aeae4eab4ba2c2db0380504fd6796b4..a2903985b9e87495340d82ca6b83791a3c416364 100644 --- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h +++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h @@ -134,17 +134,31 @@ #define PICASSO_A0 0x41 /* DCN1_01 */ #define RAVEN2_A0 0x81 +#define RAVEN2_15D8_REV_94 0x94 +#define RAVEN2_15D8_REV_95 0x95 #define RAVEN2_15D8_REV_E3 0xE3 #define RAVEN2_15D8_REV_E4 0xE4 +#define RAVEN2_15D8_REV_E9 0xE9 +#define RAVEN2_15D8_REV_EA 0xEA +#define RAVEN2_15D8_REV_EB 0xEB #define RAVEN1_F0 0xF0 #define RAVEN_UNKNOWN 0xFF - +#ifndef ASICREV_IS_RAVEN #define ASICREV_IS_RAVEN(eChipRev) ((eChipRev >= RAVEN_A0) && eChipRev < RAVEN_UNKNOWN) +#endif + #define ASICREV_IS_PICASSO(eChipRev) ((eChipRev >= PICASSO_A0) && (eChipRev < RAVEN2_A0)) +#ifndef ASICREV_IS_RAVEN2 #define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < RAVEN1_F0)) +#endif #define ASICREV_IS_RV1_F0(eChipRev) ((eChipRev >= RAVEN1_F0) && (eChipRev < RAVEN_UNKNOWN)) #define ASICREV_IS_DALI(eChipRev) ((eChipRev == RAVEN2_15D8_REV_E3) \ || (eChipRev == RAVEN2_15D8_REV_E4)) +#define ASICREV_IS_POLLOCK(eChipRev) (eChipRev == RAVEN2_15D8_REV_94 \ + || eChipRev == RAVEN2_15D8_REV_95 \ + || eChipRev == RAVEN2_15D8_REV_E9 \ + || eChipRev == RAVEN2_15D8_REV_EA \ + || eChipRev == RAVEN2_15D8_REV_EB) #define FAMILY_RV 142 /* DCN 1*/ diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index b52c4d3796511c3365e26e72da78540cb309097b..cac09d500fda938daa5899f44f0bdc87a2725ea5 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -364,8 +364,10 @@ static struct fixed31_32 translate_from_linear_space( scratch_2 = dc_fixpt_mul(gamma_of_2, pow_buffer[pow_buffer_ptr%16]); - pow_buffer[pow_buffer_ptr%16] = scratch_2; - pow_buffer_ptr++; + if (pow_buffer_ptr != -1) { + pow_buffer[pow_buffer_ptr%16] = scratch_2; + pow_buffer_ptr++; + } scratch_1 = dc_fixpt_mul(scratch_1, scratch_2); scratch_1 = dc_fixpt_sub(scratch_1, args->a2); @@ -1671,129 +1673,6 @@ static bool map_regamma_hw_to_x_user( #define _EXTRA_POINTS 3 -bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, - const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed, - const struct freesync_hdr_tf_params *fs_params) -{ - struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts; - struct dividers dividers; - - struct pwl_float_data *rgb_user = NULL; - struct pwl_float_data_ex *rgb_regamma = NULL; - struct gamma_pixel *axis_x = NULL; - struct pixel_gamma_point *coeff = NULL; - enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB; - bool ret = false; - - if (output_tf->type == TF_TYPE_BYPASS) - return false; - - /* we can use hardcoded curve for plain SRGB TF */ - if (output_tf->type == TF_TYPE_PREDEFINED && canRomBeUsed == true && - output_tf->tf == TRANSFER_FUNCTION_SRGB) { - if (ramp == NULL) - return true; - if ((ramp->is_identity && ramp->type != GAMMA_CS_TFM_1D) || - (!mapUserRamp && ramp->type == GAMMA_RGB_256)) - return true; - } - - output_tf->type = TF_TYPE_DISTRIBUTED_POINTS; - - if (ramp && ramp->type != GAMMA_CS_TFM_1D && - (mapUserRamp || ramp->type != GAMMA_RGB_256)) { - rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS, - sizeof(*rgb_user), - GFP_KERNEL); - if (!rgb_user) - goto rgb_user_alloc_fail; - - axis_x = kvcalloc(ramp->num_entries + 3, sizeof(*axis_x), - GFP_KERNEL); - if (!axis_x) - goto axis_x_alloc_fail; - - dividers.divider1 = dc_fixpt_from_fraction(3, 2); - dividers.divider2 = dc_fixpt_from_int(2); - dividers.divider3 = dc_fixpt_from_fraction(5, 2); - - build_evenly_distributed_points( - axis_x, - ramp->num_entries, - dividers); - - if (ramp->type == GAMMA_RGB_256 && mapUserRamp) - scale_gamma(rgb_user, ramp, dividers); - else if (ramp->type == GAMMA_RGB_FLOAT_1024) - scale_gamma_dx(rgb_user, ramp, dividers); - } - - rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, - sizeof(*rgb_regamma), - GFP_KERNEL); - if (!rgb_regamma) - goto rgb_regamma_alloc_fail; - - coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff), - GFP_KERNEL); - if (!coeff) - goto coeff_alloc_fail; - - tf = output_tf->tf; - if (tf == TRANSFER_FUNCTION_PQ) { - tf_pts->end_exponent = 7; - tf_pts->x_point_at_y1_red = 125; - tf_pts->x_point_at_y1_green = 125; - tf_pts->x_point_at_y1_blue = 125; - - build_pq(rgb_regamma, - MAX_HW_POINTS, - coordinates_x, - output_tf->sdr_ref_white_level); - } else if (tf == TRANSFER_FUNCTION_GAMMA22 && - fs_params != NULL && fs_params->skip_tm == 0) { - build_freesync_hdr(rgb_regamma, - MAX_HW_POINTS, - coordinates_x, - fs_params); - } else if (tf == TRANSFER_FUNCTION_HLG) { - build_freesync_hdr(rgb_regamma, - MAX_HW_POINTS, - coordinates_x, - fs_params); - - } else { - tf_pts->end_exponent = 0; - tf_pts->x_point_at_y1_red = 1; - tf_pts->x_point_at_y1_green = 1; - tf_pts->x_point_at_y1_blue = 1; - - build_regamma(rgb_regamma, - MAX_HW_POINTS, - coordinates_x, tf); - } - map_regamma_hw_to_x_user(ramp, coeff, rgb_user, - coordinates_x, axis_x, rgb_regamma, - MAX_HW_POINTS, tf_pts, - (mapUserRamp || (ramp && ramp->type != GAMMA_RGB_256)) && - (ramp && ramp->type != GAMMA_CS_TFM_1D)); - - if (ramp && ramp->type == GAMMA_CS_TFM_1D) - apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts); - - ret = true; - - kvfree(coeff); -coeff_alloc_fail: - kvfree(rgb_regamma); -rgb_regamma_alloc_fail: - kvfree(axis_x); -axis_x_alloc_fail: - kvfree(rgb_user); -rgb_user_alloc_fail: - return ret; -} - bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf, const struct regamma_lut *regamma) { @@ -2041,14 +1920,14 @@ rgb_user_alloc_fail: return ret; } - -bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans, +static bool calculate_curve(enum dc_transfer_func_predefined trans, struct dc_transfer_func_distributed_points *points, + struct pwl_float_data_ex *rgb_regamma, + const struct freesync_hdr_tf_params *fs_params, uint32_t sdr_ref_white_level) { uint32_t i; bool ret = false; - struct pwl_float_data_ex *rgb_regamma = NULL; if (trans == TRANSFER_FUNCTION_UNITY || trans == TRANSFER_FUNCTION_LINEAR) { @@ -2058,68 +1937,33 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans, points->x_point_at_y1_blue = 1; for (i = 0; i <= MAX_HW_POINTS ; i++) { - points->red[i] = coordinates_x[i].x; - points->green[i] = coordinates_x[i].x; - points->blue[i] = coordinates_x[i].x; + rgb_regamma[i].r = coordinates_x[i].x; + rgb_regamma[i].g = coordinates_x[i].x; + rgb_regamma[i].b = coordinates_x[i].x; } + ret = true; } else if (trans == TRANSFER_FUNCTION_PQ) { - rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, - sizeof(*rgb_regamma), - GFP_KERNEL); - if (!rgb_regamma) - goto rgb_regamma_alloc_fail; points->end_exponent = 7; points->x_point_at_y1_red = 125; points->x_point_at_y1_green = 125; points->x_point_at_y1_blue = 125; - build_pq(rgb_regamma, MAX_HW_POINTS, coordinates_x, sdr_ref_white_level); - for (i = 0; i <= MAX_HW_POINTS ; i++) { - points->red[i] = rgb_regamma[i].r; - points->green[i] = rgb_regamma[i].g; - points->blue[i] = rgb_regamma[i].b; - } - ret = true; - kvfree(rgb_regamma); - } else if (trans == TRANSFER_FUNCTION_SRGB || - trans == TRANSFER_FUNCTION_BT709 || - trans == TRANSFER_FUNCTION_GAMMA22 || - trans == TRANSFER_FUNCTION_GAMMA24 || - trans == TRANSFER_FUNCTION_GAMMA26) { - rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, - sizeof(*rgb_regamma), - GFP_KERNEL); - if (!rgb_regamma) - goto rgb_regamma_alloc_fail; - points->end_exponent = 0; - points->x_point_at_y1_red = 1; - points->x_point_at_y1_green = 1; - points->x_point_at_y1_blue = 1; - - build_regamma(rgb_regamma, + ret = true; + } else if (trans == TRANSFER_FUNCTION_GAMMA22 && + fs_params != NULL && fs_params->skip_tm == 0) { + build_freesync_hdr(rgb_regamma, MAX_HW_POINTS, coordinates_x, - trans); - for (i = 0; i <= MAX_HW_POINTS ; i++) { - points->red[i] = rgb_regamma[i].r; - points->green[i] = rgb_regamma[i].g; - points->blue[i] = rgb_regamma[i].b; - } - ret = true; + fs_params); - kvfree(rgb_regamma); + ret = true; } else if (trans == TRANSFER_FUNCTION_HLG) { - rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, - sizeof(*rgb_regamma), - GFP_KERNEL); - if (!rgb_regamma) - goto rgb_regamma_alloc_fail; points->end_exponent = 4; points->x_point_at_y1_red = 12; points->x_point_at_y1_green = 12; @@ -2129,18 +1973,127 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans, MAX_HW_POINTS, coordinates_x, 80, 1000); - for (i = 0; i <= MAX_HW_POINTS ; i++) { - points->red[i] = rgb_regamma[i].r; - points->green[i] = rgb_regamma[i].g; - points->blue[i] = rgb_regamma[i].b; - } + + ret = true; + } else { + // trans == TRANSFER_FUNCTION_SRGB + // trans == TRANSFER_FUNCTION_BT709 + // trans == TRANSFER_FUNCTION_GAMMA22 + // trans == TRANSFER_FUNCTION_GAMMA24 + // trans == TRANSFER_FUNCTION_GAMMA26 + points->end_exponent = 0; + points->x_point_at_y1_red = 1; + points->x_point_at_y1_green = 1; + points->x_point_at_y1_blue = 1; + + build_regamma(rgb_regamma, + MAX_HW_POINTS, + coordinates_x, + trans); + ret = true; - kvfree(rgb_regamma); } -rgb_regamma_alloc_fail: + return ret; } +bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, + const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed, + const struct freesync_hdr_tf_params *fs_params) +{ + struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts; + struct dividers dividers; + + struct pwl_float_data *rgb_user = NULL; + struct pwl_float_data_ex *rgb_regamma = NULL; + struct gamma_pixel *axis_x = NULL; + struct pixel_gamma_point *coeff = NULL; + enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB; + bool ret = false; + + if (output_tf->type == TF_TYPE_BYPASS) + return false; + + /* we can use hardcoded curve for plain SRGB TF */ + if (output_tf->type == TF_TYPE_PREDEFINED && canRomBeUsed == true && + output_tf->tf == TRANSFER_FUNCTION_SRGB) { + if (ramp == NULL) + return true; + if ((ramp->is_identity && ramp->type != GAMMA_CS_TFM_1D) || + (!mapUserRamp && ramp->type == GAMMA_RGB_256)) + return true; + } + + output_tf->type = TF_TYPE_DISTRIBUTED_POINTS; + + if (ramp && ramp->type != GAMMA_CS_TFM_1D && + (mapUserRamp || ramp->type != GAMMA_RGB_256)) { + rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS, + sizeof(*rgb_user), + GFP_KERNEL); + if (!rgb_user) + goto rgb_user_alloc_fail; + + axis_x = kvcalloc(ramp->num_entries + 3, sizeof(*axis_x), + GFP_KERNEL); + if (!axis_x) + goto axis_x_alloc_fail; + + dividers.divider1 = dc_fixpt_from_fraction(3, 2); + dividers.divider2 = dc_fixpt_from_int(2); + dividers.divider3 = dc_fixpt_from_fraction(5, 2); + + build_evenly_distributed_points( + axis_x, + ramp->num_entries, + dividers); + + if (ramp->type == GAMMA_RGB_256 && mapUserRamp) + scale_gamma(rgb_user, ramp, dividers); + else if (ramp->type == GAMMA_RGB_FLOAT_1024) + scale_gamma_dx(rgb_user, ramp, dividers); + } + + rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, + sizeof(*rgb_regamma), + GFP_KERNEL); + if (!rgb_regamma) + goto rgb_regamma_alloc_fail; + + coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff), + GFP_KERNEL); + if (!coeff) + goto coeff_alloc_fail; + + tf = output_tf->tf; + + ret = calculate_curve(tf, + tf_pts, + rgb_regamma, + fs_params, + output_tf->sdr_ref_white_level); + + if (ret) { + map_regamma_hw_to_x_user(ramp, coeff, rgb_user, + coordinates_x, axis_x, rgb_regamma, + MAX_HW_POINTS, tf_pts, + (mapUserRamp || (ramp && ramp->type != GAMMA_RGB_256)) && + (ramp && ramp->type != GAMMA_CS_TFM_1D)); + + if (ramp && ramp->type == GAMMA_CS_TFM_1D) + apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts); + } + + kvfree(coeff); +coeff_alloc_fail: + kvfree(rgb_regamma); +rgb_regamma_alloc_fail: + kvfree(axis_x); +axis_x_alloc_fail: + kvfree(rgb_user); +rgb_user_alloc_fail: + return ret; +} bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans, struct dc_transfer_func_distributed_points *points) diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h index 44ddea58523a8008216e10437a08ddedef5e36c8..9994817a9a03267d79f6efb714c4b883819e88fb 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h @@ -103,10 +103,6 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, bool mod_color_calculate_degamma_params(struct dc_transfer_func *output_tf, const struct dc_gamma *ramp, bool mapUserRamp); -bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans, - struct dc_transfer_func_distributed_points *points, - uint32_t sdr_ref_white_level); - bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans, struct dc_transfer_func_distributed_points *points); diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index a94700940fd66e8ebc981b674fa8b53c38a03e95..b9992ebf77a627c06ae4e7cde30ae6b00cb8152a 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -37,8 +37,8 @@ #define STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME ((1000 / 60) * 65) /* Number of elements in the render times cache array */ #define RENDER_TIMES_MAX_COUNT 10 -/* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */ -#define BTR_EXIT_MARGIN 2000 +/* Threshold to exit/exit BTR (to avoid frequent enter-exits at the lower limit) */ +#define BTR_MAX_MARGIN 2500 /* Threshold to change BTR multiplier (to avoid frequent changes) */ #define BTR_DRIFT_MARGIN 2000 /*Threshold to exit fixed refresh rate*/ @@ -254,24 +254,22 @@ static void apply_below_the_range(struct core_freesync *core_freesync, unsigned int delta_from_mid_point_in_us_1 = 0xFFFFFFFF; unsigned int delta_from_mid_point_in_us_2 = 0xFFFFFFFF; unsigned int frames_to_insert = 0; - unsigned int min_frame_duration_in_ns = 0; - unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us; unsigned int delta_from_mid_point_delta_in_us; - - min_frame_duration_in_ns = ((unsigned int) (div64_u64( - (1000000000ULL * 1000000), - in_out_vrr->max_refresh_in_uhz))); + unsigned int max_render_time_in_us = + in_out_vrr->max_duration_in_us - in_out_vrr->btr.margin_in_us; /* Program BTR */ - if (last_render_time_in_us + BTR_EXIT_MARGIN < max_render_time_in_us) { + if ((last_render_time_in_us + in_out_vrr->btr.margin_in_us / 2) < max_render_time_in_us) { /* Exit Below the Range */ if (in_out_vrr->btr.btr_active) { in_out_vrr->btr.frame_counter = 0; in_out_vrr->btr.btr_active = false; } - } else if (last_render_time_in_us > max_render_time_in_us) { + } else if (last_render_time_in_us > (max_render_time_in_us + in_out_vrr->btr.margin_in_us / 2)) { /* Enter Below the Range */ - in_out_vrr->btr.btr_active = true; + if (!in_out_vrr->btr.btr_active) { + in_out_vrr->btr.btr_active = true; + } } /* BTR set to "not active" so disengage */ @@ -327,7 +325,9 @@ static void apply_below_the_range(struct core_freesync *core_freesync, /* Choose number of frames to insert based on how close it * can get to the mid point of the variable range. */ - if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) { + if ((frame_time_in_us / mid_point_frames_ceil) > in_out_vrr->min_duration_in_us && + (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2 || + mid_point_frames_floor < 2)) { frames_to_insert = mid_point_frames_ceil; delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 - delta_from_mid_point_in_us_1; @@ -343,7 +343,7 @@ static void apply_below_the_range(struct core_freesync *core_freesync, if (in_out_vrr->btr.frames_to_insert != 0 && delta_from_mid_point_delta_in_us < BTR_DRIFT_MARGIN) { if (((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) < - in_out_vrr->max_duration_in_us) && + max_render_time_in_us) && ((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) > in_out_vrr->min_duration_in_us)) frames_to_insert = in_out_vrr->btr.frames_to_insert; @@ -381,7 +381,7 @@ static void apply_fixed_refresh(struct core_freesync *core_freesync, bool update = false; unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us; - //Compute the exit refresh rate and exit frame duration + /* Compute the exit refresh rate and exit frame duration */ unsigned int exit_refresh_rate_in_milli_hz = ((1000000000/max_render_time_in_us) + (1000*FIXED_REFRESH_EXIT_MARGIN_IN_HZ)); unsigned int exit_frame_duration_in_us = 1000000000/exit_refresh_rate_in_milli_hz; @@ -796,6 +796,11 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, refresh_range = in_out_vrr->max_refresh_in_uhz - in_out_vrr->min_refresh_in_uhz; + in_out_vrr->btr.margin_in_us = in_out_vrr->max_duration_in_us - + 2 * in_out_vrr->min_duration_in_us; + if (in_out_vrr->btr.margin_in_us > BTR_MAX_MARGIN) + in_out_vrr->btr.margin_in_us = BTR_MAX_MARGIN; + in_out_vrr->supported = true; } @@ -811,6 +816,9 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, in_out_vrr->btr.inserted_duration_in_us = 0; in_out_vrr->btr.frames_to_insert = 0; in_out_vrr->btr.frame_counter = 0; + in_out_vrr->fixed.fixed_active = false; + in_out_vrr->fixed.target_refresh_in_uhz = 0; + in_out_vrr->btr.mid_point_in_us = (in_out_vrr->min_duration_in_us + in_out_vrr->max_duration_in_us) / 2; @@ -826,6 +834,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, in_out_vrr->adjust.v_total_max = stream->timing.v_total; } else if (in_out_vrr->state == VRR_STATE_ACTIVE_VARIABLE && refresh_range >= MIN_REFRESH_RANGE_IN_US) { + in_out_vrr->adjust.v_total_min = calc_v_total_from_refresh(stream, in_out_vrr->max_refresh_in_uhz); diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h index f98d3d9ecb6dfa912f028675d9601b7d438eff60..af78e4f1be686a02730a4c2b289bd5bcb4fcc1bb 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h @@ -63,7 +63,7 @@ struct mod_hdcp_transition_input_hdcp1 { uint8_t hdcp_capable_dp; uint8_t binfo_read_dp; uint8_t r0p_available_dp; - uint8_t link_integiry_check; + uint8_t link_integrity_check; uint8_t reauth_request_check; uint8_t stream_encryption_dp; }; diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c index 04845e43df15c8896439a5e6819a21cd3d6cf7fa..37670db6485562dc5f5f8c8738966c5c47fc20ca 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c @@ -283,8 +283,8 @@ static enum mod_hdcp_status wait_for_ready(struct mod_hdcp *hdcp, hdcp, "bstatus_read")) goto out; if (!mod_hdcp_execute_and_set(check_link_integrity_dp, - &input->link_integiry_check, &status, - hdcp, "link_integiry_check")) + &input->link_integrity_check, &status, + hdcp, "link_integrity_check")) goto out; if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp, &input->reauth_request_check, &status, @@ -431,8 +431,8 @@ static enum mod_hdcp_status authenticated_dp(struct mod_hdcp *hdcp, hdcp, "bstatus_read")) goto out; if (!mod_hdcp_execute_and_set(check_link_integrity_dp, - &input->link_integiry_check, &status, - hdcp, "link_integiry_check")) + &input->link_integrity_check, &status, + hdcp, "link_integrity_check")) goto out; if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp, &input->reauth_request_check, &status, diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c index 136b8011ff3fc375439780948402bd18d18c414f..76edcbe51f71c1352d0eae3e97c271942ca7e3a1 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c @@ -67,11 +67,19 @@ enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp, break; case H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER: if (input->bcaps_read != PASS || - input->r0p_read != PASS || - input->rx_validation != PASS || - (!conn->is_repeater && input->encryption != PASS)) { + input->r0p_read != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->rx_validation != PASS) { /* 1A-06: consider invalid r0' a failure */ /* 1A-08: consider bksv listed in SRM a failure */ + /* + * some slow RX will fail rx validation when it is + * not ready. give it more time to react before retry. + */ + fail_and_restart_in_ms(1000, &status, output); + break; + } else if (!conn->is_repeater && input->encryption != PASS) { fail_and_restart_in_ms(0, &status, output); break; } @@ -212,7 +220,11 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp, * after 3 attempts. * 1A-08: consider bksv listed in SRM a failure */ - fail_and_restart_in_ms(0, &status, output); + /* + * some slow RX will fail rx validation when it is + * not ready. give it more time to react before retry. + */ + fail_and_restart_in_ms(1000, &status, output); } break; } else if ((!conn->is_repeater && input->encryption != PASS) || @@ -229,7 +241,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp, } break; case D1_A4_AUTHENTICATED: - if (input->link_integiry_check != PASS || + if (input->link_integrity_check != PASS || input->reauth_request_check != PASS) { /* 1A-07: restart hdcp on a link integrity failure */ fail_and_restart_in_ms(0, &status, output); @@ -237,7 +249,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp, } break; case D1_A6_WAIT_FOR_READY: - if (input->link_integiry_check == FAIL || + if (input->link_integrity_check == FAIL || input->reauth_request_check == FAIL) { fail_and_restart_in_ms(0, &status, output); break; diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c index e8043c903a84f67202d4c69bc7d88b146d7c5b75..8cae3e3aacd579617b34b9183730f989a6dfae06 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c @@ -114,7 +114,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp, if (event_ctx->event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { /* 1A-11-3: consider h' timeout a failure */ - fail_and_restart_in_ms(0, &status, output); + fail_and_restart_in_ms(1000, &status, output); } else { /* continue h' polling */ callback_in_ms(100, output); @@ -166,7 +166,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp, if (event_ctx->event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { /* 1A-11-2: consider h' timeout a failure */ - fail_and_restart_in_ms(0, &status, output); + fail_and_restart_in_ms(1000, &status, output); } else { /* continue h' polling */ callback_in_ms(20, output); @@ -439,7 +439,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp, if (event_ctx->event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) /* 1A-10-3: consider h' timeout a failure */ - fail_and_restart_in_ms(0, &status, output); + fail_and_restart_in_ms(1000, &status, output); else increment_stay_counter(hdcp); break; @@ -484,7 +484,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp, if (event_ctx->event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) /* 1A-10-2: consider h' timeout a failure */ - fail_and_restart_in_ms(0, &status, output); + fail_and_restart_in_ms(1000, &status, output); else increment_stay_counter(hdcp); break; @@ -630,7 +630,10 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp, break; } else if (input->prepare_stream_manage != PASS || input->stream_manage_write != PASS) { - fail_and_restart_in_ms(0, &status, output); + if (event_ctx->event == MOD_HDCP_EVENT_CALLBACK) + fail_and_restart_in_ms(0, &status, output); + else + increment_stay_counter(hdcp); break; } callback_in_ms(100, output); @@ -655,10 +658,12 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp, */ if (hdcp->auth.count.stream_management_retry_count > 10) { fail_and_restart_in_ms(0, &status, output); - } else { + } else if (event_ctx->event == MOD_HDCP_EVENT_CALLBACK) { hdcp->auth.count.stream_management_retry_count++; callback_in_ms(0, output); set_state_id(hdcp, output, D2_A9_SEND_STREAM_MANAGEMENT); + } else { + increment_stay_counter(hdcp); } break; } diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c index ef4eb55f44743e6e95d3a06ee0b8dbb43c4c06e6..7911dc157d5a719063b673acd7015b58a134639c 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c @@ -145,10 +145,11 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp) psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + hdcp->auth.id = hdcp_cmd->out_msg.hdcp1_create_session.session_handle; + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) return MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE; - hdcp->auth.id = hdcp_cmd->out_msg.hdcp1_create_session.session_handle; hdcp->auth.msg.hdcp1.ainfo = hdcp_cmd->out_msg.hdcp1_create_session.ainfo_primary; memcpy(hdcp->auth.msg.hdcp1.aksv, hdcp_cmd->out_msg.hdcp1_create_session.aksv_primary, sizeof(hdcp->auth.msg.hdcp1.aksv)); @@ -510,7 +511,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp) psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) - return MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE; + return MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE; if (msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) return MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE; @@ -794,7 +795,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(struct mod_hdcp *hdcp) hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2; psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); - return (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) && + return (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS) && (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE; diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h index dc187844d10b1f5e99e4423f15cadb6b422643d5..dbe7835aabcf747c25825e76dac9d2e047bb9025 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h @@ -92,6 +92,7 @@ struct mod_vrr_params_btr { uint32_t inserted_duration_in_us; uint32_t frames_to_insert; uint32_t frame_counter; + uint32_t margin_in_us; }; struct mod_vrr_params_fixed_refresh { diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c index 4e2f615c3566c2ba76ecea1698336b24dc712bb3..e75a4bb94488ec6fa71febd25d23e5e56853da8c 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c @@ -662,7 +662,11 @@ bool dmcu_load_iram(struct dmcu *dmcu, memset(&ram_table, 0, sizeof(ram_table)); - if (dmcu->dmcu_version.abm_version == 0x23) { + if (dmcu->dmcu_version.abm_version == 0x24) { + fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params); + result = dmcu->funcs->load_iram( + dmcu, 0, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2_2); + } else if (dmcu->dmcu_version.abm_version == 0x23) { fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params); result = dmcu->funcs->load_iram( @@ -687,3 +691,4 @@ bool dmcu_load_iram(struct dmcu *dmcu, return result; } + diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_offset.h index cff8f91555d306108c3f60698142c4ed11c2f07c..e9b2bd84cfede9f92eaceddd728369683f6ceb88 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_offset.h @@ -8134,6 +8134,10 @@ #define mmMPC_OUT5_CSC_C33_C34_B 0x1604 #define mmMPC_OUT5_CSC_C33_C34_B_BASE_IDX 2 +#define mmMPC_OCSC_TEST_DEBUG_INDEX 0x163b +#define mmMPC_OCSC_TEST_DEBUG_INDEX_BASE_IDX 2 +#define mmMPC_OCSC_TEST_DEBUG_DATA_BASE_IDX 2 +#define mmMPC_OCSC_TEST_DEBUG_DATA 0x163c // addressBlock: dce_dc_mpc_mpc_dcperfmon_dc_perfmon_dispdec // base address: 0x5964 diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_sh_mask.h index 10c83fecd147754f59aa36b2068aecf22fbc19c8..dc8ce7aaa0cf179124c1b96d6d2c59e35f122f4f 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_sh_mask.h @@ -28263,7 +28263,14 @@ #define MPC_OUT5_CSC_C33_C34_B__MPC_OCSC_C34_B__SHIFT 0x10 #define MPC_OUT5_CSC_C33_C34_B__MPC_OCSC_C33_B_MASK 0x0000FFFFL #define MPC_OUT5_CSC_C33_C34_B__MPC_OCSC_C34_B_MASK 0xFFFF0000L - +//MPC_OCSC_TEST_DEBUG_INDEX +#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_INDEX__SHIFT 0x0 +#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_WRITE_EN__SHIFT 0x8 +#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_INDEX_MASK 0x000000FFL +#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_WRITE_EN_MASK 0x00000100L +//MPC_OCSC_TEST_DEBUG_DATA +#define MPC_OCSC_TEST_DEBUG_DATA__MPC_OCSC_TEST_DEBUG_DATA__SHIFT 0x0 +#define MPC_OCSC_TEST_DEBUG_DATA__MPC_OCSC_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL // addressBlock: dce_dc_mpc_mpc_dcperfmon_dc_perfmon_dispdec //DC_PERFMON17_PERFCOUNTER_CNTL diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h index eddf83ec1c39605d1000e1f7c2e3bf01265a9a45..7cd0ee61c03041fb670720f712f75a9f3a7afbb6 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h @@ -7103,7 +7103,10 @@ #define mmMPC_OUT3_CSC_C31_C32_B_BASE_IDX 2 #define mmMPC_OUT3_CSC_C33_C34_B 0x15ea #define mmMPC_OUT3_CSC_C33_C34_B_BASE_IDX 2 - +#define mmMPC_OCSC_TEST_DEBUG_INDEX 0x163b +#define mmMPC_OCSC_TEST_DEBUG_INDEX_BASE_IDX 2 +#define mmMPC_OCSC_TEST_DEBUG_DATA_BASE_IDX 2 +#define mmMPC_OCSC_TEST_DEBUG_DATA 0x163c // addressBlock: dce_dc_mpc_mpc_dcperfmon_dc_perfmon_dispdec // base address: 0x5964 diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_sh_mask.h index faa0e76e32b4c02810aa4a96ad326e23596dec75..2f780aefc7221aa78a2d829cdd00bc9d6635c99f 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_sh_mask.h @@ -56634,5 +56634,13 @@ #define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L #define AZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L +//MPC_OCSC_TEST_DEBUG_INDEX +#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_INDEX__SHIFT 0x0 +#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_WRITE_EN__SHIFT 0x8 +#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_INDEX_MASK 0x000000FFL +#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_WRITE_EN_MASK 0x00000100L +//MPC_OCSC_TEST_DEBUG_DATA +#define MPC_OCSC_TEST_DEBUG_DATA__MPC_OCSC_TEST_DEBUG_DATA__SHIFT 0x0 +#define MPC_OCSC_TEST_DEBUG_DATA__MPC_OCSC_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL #endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h index c2bd25589e845207057b32049ba303ce47506506..bb2c9c7a18dffec143ab7c37fc61d2b20bada8ee 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h @@ -27,6 +27,9 @@ #define mmDF_PIE_AON0_DfGlobalClkGater 0x00fc #define mmDF_PIE_AON0_DfGlobalClkGater_BASE_IDX 0 +#define mmDF_CS_UMC_AON0_DfGlobalCtrl 0x00fe +#define mmDF_CS_UMC_AON0_DfGlobalCtrl_BASE_IDX 0 + #define mmDF_CS_UMC_AON0_DramBaseAddress0 0x0044 #define mmDF_CS_UMC_AON0_DramBaseAddress0_BASE_IDX 0 @@ -38,6 +41,14 @@ #define smnPerfMonCtlHi2 0x01d464UL #define smnPerfMonCtlLo3 0x01d470UL #define smnPerfMonCtlHi3 0x01d474UL +#define smnPerfMonCtlLo4 0x01d880UL +#define smnPerfMonCtlHi4 0x01d884UL +#define smnPerfMonCtlLo5 0x01d888UL +#define smnPerfMonCtlHi5 0x01d88cUL +#define smnPerfMonCtlLo6 0x01d890UL +#define smnPerfMonCtlHi6 0x01d894UL +#define smnPerfMonCtlLo7 0x01d898UL +#define smnPerfMonCtlHi7 0x01d89cUL #define smnPerfMonCtrLo0 0x01d448UL #define smnPerfMonCtrHi0 0x01d44cUL @@ -47,9 +58,20 @@ #define smnPerfMonCtrHi2 0x01d46cUL #define smnPerfMonCtrLo3 0x01d478UL #define smnPerfMonCtrHi3 0x01d47cUL +#define smnPerfMonCtrLo4 0x01d790UL +#define smnPerfMonCtrHi4 0x01d794UL +#define smnPerfMonCtrLo5 0x01d798UL +#define smnPerfMonCtrHi5 0x01d79cUL +#define smnPerfMonCtrLo6 0x01d7a0UL +#define smnPerfMonCtrHi6 0x01d7a4UL +#define smnPerfMonCtrLo7 0x01d7a8UL +#define smnPerfMonCtrHi7 0x01d7acUL #define smnDF_PIE_AON_FabricIndirectConfigAccessAddress3 0x1d05cUL #define smnDF_PIE_AON_FabricIndirectConfigAccessDataLo3 0x1d098UL #define smnDF_PIE_AON_FabricIndirectConfigAccessDataHi3 0x1d09cUL +#define smnDF_CS_UMC_AON0_DramBaseAddress0 0x1c110UL +#define smnDF_CS_UMC_AON0_DramLimitAddress0 0x1c114UL + #endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h index 06fac509e987358fc491f6e2e7b48b2d531f8bde..7afa87c7ff5437d875182a552b9e1a928a38bd92 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h @@ -33,6 +33,14 @@ #define DF_PIE_AON0_DfGlobalClkGater__MGCGMode__SHIFT 0x0 #define DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK 0x0000000FL +/* DF_CS_UMC_AON0_DfGlobalCtrl */ +#define DF_CS_UMC_AON0_DfGlobalCtrl__GlbHashIntlvCtl64K__SHIFT 0x14 +#define DF_CS_UMC_AON0_DfGlobalCtrl__GlbHashIntlvCtl2M__SHIFT 0x15 +#define DF_CS_UMC_AON0_DfGlobalCtrl__GlbHashIntlvCtl1G__SHIFT 0x16 +#define DF_CS_UMC_AON0_DfGlobalCtrl__GlbHashIntlvCtl64K_MASK 0x00100000L +#define DF_CS_UMC_AON0_DfGlobalCtrl__GlbHashIntlvCtl2M_MASK 0x00200000L +#define DF_CS_UMC_AON0_DfGlobalCtrl__GlbHashIntlvCtl1G_MASK 0x00400000L + /* DF_CS_AON0_DramBaseAddress0 */ #define DF_CS_UMC_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0 #define DF_CS_UMC_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1 @@ -45,4 +53,12 @@ #define DF_CS_UMC_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000E00L #define DF_CS_UMC_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L +//DF_CS_UMC_AON0_DramLimitAddress0 +#define DF_CS_UMC_AON0_DramLimitAddress0__DstFabricID__SHIFT 0x0 +#define DF_CS_UMC_AON0_DramLimitAddress0__AllowReqIO__SHIFT 0xa +#define DF_CS_UMC_AON0_DramLimitAddress0__DramLimitAddr__SHIFT 0xc +#define DF_CS_UMC_AON0_DramLimitAddress0__DstFabricID_MASK 0x000003FFL +#define DF_CS_UMC_AON0_DramLimitAddress0__AllowReqIO_MASK 0x00000400L +#define DF_CS_UMC_AON0_DramLimitAddress0__DramLimitAddr_MASK 0xFFFFF000L + #endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_offset.h new file mode 100644 index 0000000000000000000000000000000000000000..36ae5b7fe88e61bf619084fd55b6effb711f099c --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_offset.h @@ -0,0 +1,647 @@ +/* + * Copyright (C) 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _dpcs_2_0_0_OFFSET_HEADER +#define _dpcs_2_0_0_OFFSET_HEADER + + + +// addressBlock: dpcssys_dpcs0_dpcstx0_dispdec +// base address: 0x0 +#define mmDPCSTX0_DPCSTX_TX_CLOCK_CNTL 0x2928 +#define mmDPCSTX0_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2 +#define mmDPCSTX0_DPCSTX_TX_CNTL 0x2929 +#define mmDPCSTX0_DPCSTX_TX_CNTL_BASE_IDX 2 +#define mmDPCSTX0_DPCSTX_CBUS_CNTL 0x292a +#define mmDPCSTX0_DPCSTX_CBUS_CNTL_BASE_IDX 2 +#define mmDPCSTX0_DPCSTX_INTERRUPT_CNTL 0x292b +#define mmDPCSTX0_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2 +#define mmDPCSTX0_DPCSTX_PLL_UPDATE_ADDR 0x292c +#define mmDPCSTX0_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2 +#define mmDPCSTX0_DPCSTX_PLL_UPDATE_DATA 0x292d +#define mmDPCSTX0_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2 +#define mmDPCSTX0_DPCSTX_DEBUG_CONFIG 0x292e +#define mmDPCSTX0_DPCSTX_DEBUG_CONFIG_BASE_IDX 2 + + +// addressBlock: dpcssys_dpcs0_rdpcstx0_dispdec +// base address: 0x0 +#define mmRDPCSTX0_RDPCSTX_CNTL 0x2930 +#define mmRDPCSTX0_RDPCSTX_CNTL_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_CLOCK_CNTL 0x2931 +#define mmRDPCSTX0_RDPCSTX_CLOCK_CNTL_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_INTERRUPT_CONTROL 0x2932 +#define mmRDPCSTX0_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_PLL_UPDATE_DATA 0x2933 +#define mmRDPCSTX0_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2 +#define mmRDPCSTX0_RDPCS_TX_CR_ADDR 0x2934 +#define mmRDPCSTX0_RDPCS_TX_CR_ADDR_BASE_IDX 2 +#define mmRDPCSTX0_RDPCS_TX_CR_DATA 0x2935 +#define mmRDPCSTX0_RDPCS_TX_CR_DATA_BASE_IDX 2 +#define mmRDPCSTX0_RDPCS_TX_SRAM_CNTL 0x2936 +#define mmRDPCSTX0_RDPCS_TX_SRAM_CNTL_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_MEM_POWER_CTRL 0x2937 +#define mmRDPCSTX0_RDPCSTX_MEM_POWER_CTRL_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_MEM_POWER_CTRL2 0x2938 +#define mmRDPCSTX0_RDPCSTX_MEM_POWER_CTRL2_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_SCRATCH 0x2939 +#define mmRDPCSTX0_RDPCSTX_SCRATCH_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x293c +#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_DEBUG_CONFIG 0x293d +#define mmRDPCSTX0_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL0 0x2940 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL0_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL1 0x2941 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL1_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL2 0x2942 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL2_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL3 0x2943 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL3_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL4 0x2944 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL4_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL5 0x2945 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL5_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL6 0x2946 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL6_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL7 0x2947 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL7_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL8 0x2948 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL8_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL9 0x2949 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL9_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL10 0x294a +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL10_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL11 0x294b +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL11_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL12 0x294c +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL12_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL13 0x294d +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL13_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL14 0x294e +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL14_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_PHY_FUSE0 0x294f +#define mmRDPCSTX0_RDPCSTX_PHY_FUSE0_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_PHY_FUSE1 0x2950 +#define mmRDPCSTX0_RDPCSTX_PHY_FUSE1_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_PHY_FUSE2 0x2951 +#define mmRDPCSTX0_RDPCSTX_PHY_FUSE2_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_PHY_FUSE3 0x2952 +#define mmRDPCSTX0_RDPCSTX_PHY_FUSE3_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_PHY_RX_LD_VAL 0x2953 +#define mmRDPCSTX0_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3 0x2954 +#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6 0x2955 +#define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_DPALT_CONTROL_REG 0x2956 +#define mmRDPCSTX0_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX 2 + + +// addressBlock: dpcssys_dpcssys_cr0_dispdec +// base address: 0x0 +#define mmDPCSSYS_CR0_DPCSSYS_CR_ADDR 0x2934 +#define mmDPCSSYS_CR0_DPCSSYS_CR_ADDR_BASE_IDX 2 +#define mmDPCSSYS_CR0_DPCSSYS_CR_DATA 0x2935 +#define mmDPCSSYS_CR0_DPCSSYS_CR_DATA_BASE_IDX 2 + + +// addressBlock: dpcssys_dpcs0_dpcstx1_dispdec +// base address: 0x360 +#define mmDPCSTX1_DPCSTX_TX_CLOCK_CNTL 0x2a00 +#define mmDPCSTX1_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2 +#define mmDPCSTX1_DPCSTX_TX_CNTL 0x2a01 +#define mmDPCSTX1_DPCSTX_TX_CNTL_BASE_IDX 2 +#define mmDPCSTX1_DPCSTX_CBUS_CNTL 0x2a02 +#define mmDPCSTX1_DPCSTX_CBUS_CNTL_BASE_IDX 2 +#define mmDPCSTX1_DPCSTX_INTERRUPT_CNTL 0x2a03 +#define mmDPCSTX1_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2 +#define mmDPCSTX1_DPCSTX_PLL_UPDATE_ADDR 0x2a04 +#define mmDPCSTX1_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2 +#define mmDPCSTX1_DPCSTX_PLL_UPDATE_DATA 0x2a05 +#define mmDPCSTX1_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2 +#define mmDPCSTX1_DPCSTX_DEBUG_CONFIG 0x2a06 +#define mmDPCSTX1_DPCSTX_DEBUG_CONFIG_BASE_IDX 2 + + +// addressBlock: dpcssys_dpcs0_rdpcstx1_dispdec +// base address: 0x360 +#define mmRDPCSTX1_RDPCSTX_CNTL 0x2a08 +#define mmRDPCSTX1_RDPCSTX_CNTL_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_CLOCK_CNTL 0x2a09 +#define mmRDPCSTX1_RDPCSTX_CLOCK_CNTL_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_INTERRUPT_CONTROL 0x2a0a +#define mmRDPCSTX1_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_PLL_UPDATE_DATA 0x2a0b +#define mmRDPCSTX1_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2 +#define mmRDPCSTX1_RDPCS_TX_CR_ADDR 0x2a0c +#define mmRDPCSTX1_RDPCS_TX_CR_ADDR_BASE_IDX 2 +#define mmRDPCSTX1_RDPCS_TX_CR_DATA 0x2a0d +#define mmRDPCSTX1_RDPCS_TX_CR_DATA_BASE_IDX 2 +#define mmRDPCSTX1_RDPCS_TX_SRAM_CNTL 0x2a0e +#define mmRDPCSTX1_RDPCS_TX_SRAM_CNTL_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_MEM_POWER_CTRL 0x2a0f +#define mmRDPCSTX1_RDPCSTX_MEM_POWER_CTRL_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_MEM_POWER_CTRL2 0x2a10 +#define mmRDPCSTX1_RDPCSTX_MEM_POWER_CTRL2_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_SCRATCH 0x2a11 +#define mmRDPCSTX1_RDPCSTX_SCRATCH_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2a14 +#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_DEBUG_CONFIG 0x2a15 +#define mmRDPCSTX1_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL0 0x2a18 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL0_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL1 0x2a19 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL1_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL2 0x2a1a +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL2_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL3 0x2a1b +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL3_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL4 0x2a1c +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL4_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL5 0x2a1d +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL5_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL6 0x2a1e +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL6_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL7 0x2a1f +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL7_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL8 0x2a20 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL8_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL9 0x2a21 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL9_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL10 0x2a22 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL10_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL11 0x2a23 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL11_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL12 0x2a24 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL12_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL13 0x2a25 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL13_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL14 0x2a26 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL14_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_PHY_FUSE0 0x2a27 +#define mmRDPCSTX1_RDPCSTX_PHY_FUSE0_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_PHY_FUSE1 0x2a28 +#define mmRDPCSTX1_RDPCSTX_PHY_FUSE1_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_PHY_FUSE2 0x2a29 +#define mmRDPCSTX1_RDPCSTX_PHY_FUSE2_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_PHY_FUSE3 0x2a2a +#define mmRDPCSTX1_RDPCSTX_PHY_FUSE3_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_PHY_RX_LD_VAL 0x2a2b +#define mmRDPCSTX1_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3 0x2a2c +#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6 0x2a2d +#define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_DPALT_CONTROL_REG 0x2a2e +#define mmRDPCSTX1_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX 2 + + +// addressBlock: dpcssys_dpcssys_cr1_dispdec +// base address: 0x360 +#define mmDPCSSYS_CR1_DPCSSYS_CR_ADDR 0x2a0c +#define mmDPCSSYS_CR1_DPCSSYS_CR_ADDR_BASE_IDX 2 +#define mmDPCSSYS_CR1_DPCSSYS_CR_DATA 0x2a0d +#define mmDPCSSYS_CR1_DPCSSYS_CR_DATA_BASE_IDX 2 + + +// addressBlock: dpcssys_dpcs0_dpcstx2_dispdec +// base address: 0x6c0 +#define mmDPCSTX2_DPCSTX_TX_CLOCK_CNTL 0x2ad8 +#define mmDPCSTX2_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2 +#define mmDPCSTX2_DPCSTX_TX_CNTL 0x2ad9 +#define mmDPCSTX2_DPCSTX_TX_CNTL_BASE_IDX 2 +#define mmDPCSTX2_DPCSTX_CBUS_CNTL 0x2ada +#define mmDPCSTX2_DPCSTX_CBUS_CNTL_BASE_IDX 2 +#define mmDPCSTX2_DPCSTX_INTERRUPT_CNTL 0x2adb +#define mmDPCSTX2_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2 +#define mmDPCSTX2_DPCSTX_PLL_UPDATE_ADDR 0x2adc +#define mmDPCSTX2_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2 +#define mmDPCSTX2_DPCSTX_PLL_UPDATE_DATA 0x2add +#define mmDPCSTX2_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2 +#define mmDPCSTX2_DPCSTX_DEBUG_CONFIG 0x2ade +#define mmDPCSTX2_DPCSTX_DEBUG_CONFIG_BASE_IDX 2 + + +// addressBlock: dpcssys_dpcs0_rdpcstx2_dispdec +// base address: 0x6c0 +#define mmRDPCSTX2_RDPCSTX_CNTL 0x2ae0 +#define mmRDPCSTX2_RDPCSTX_CNTL_BASE_IDX 2 +#define mmRDPCSTX2_RDPCSTX_CLOCK_CNTL 0x2ae1 +#define mmRDPCSTX2_RDPCSTX_CLOCK_CNTL_BASE_IDX 2 +#define mmRDPCSTX2_RDPCSTX_INTERRUPT_CONTROL 0x2ae2 +#define mmRDPCSTX2_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2 +#define mmRDPCSTX2_RDPCSTX_PLL_UPDATE_DATA 0x2ae3 +#define mmRDPCSTX2_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2 +#define mmRDPCSTX2_RDPCS_TX_CR_ADDR 0x2ae4 +#define mmRDPCSTX2_RDPCS_TX_CR_ADDR_BASE_IDX 2 +#define mmRDPCSTX2_RDPCS_TX_CR_DATA 0x2ae5 +#define mmRDPCSTX2_RDPCS_TX_CR_DATA_BASE_IDX 2 +#define mmRDPCSTX2_RDPCS_TX_SRAM_CNTL 0x2ae6 +#define mmRDPCSTX2_RDPCS_TX_SRAM_CNTL_BASE_IDX 2 +#define mmRDPCSTX2_RDPCSTX_MEM_POWER_CTRL 0x2ae7 +#define mmRDPCSTX2_RDPCSTX_MEM_POWER_CTRL_BASE_IDX 2 +#define mmRDPCSTX2_RDPCSTX_MEM_POWER_CTRL2 0x2ae8 +#define mmRDPCSTX2_RDPCSTX_MEM_POWER_CTRL2_BASE_IDX 2 +#define mmRDPCSTX2_RDPCSTX_SCRATCH 0x2ae9 +#define mmRDPCSTX2_RDPCSTX_SCRATCH_BASE_IDX 2 +#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2aec +#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2 +#define mmRDPCSTX2_RDPCSTX_DEBUG_CONFIG 0x2aed +#define mmRDPCSTX2_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2 +#define mmRDPCSTX2_RDPCSTX_PHY_CNTL0 0x2af0 +#define mmRDPCSTX2_RDPCSTX_PHY_CNTL0_BASE_IDX 2 +#define mmRDPCSTX2_RDPCSTX_PHY_CNTL1 0x2af1 +#define mmRDPCSTX2_RDPCSTX_PHY_CNTL1_BASE_IDX 2 +#define mmRDPCSTX2_RDPCSTX_PHY_CNTL2 0x2af2 +#define mmRDPCSTX2_RDPCSTX_PHY_CNTL2_BASE_IDX 2 +#define mmRDPCSTX2_RDPCSTX_PHY_CNTL3 0x2af3 +#define mmRDPCSTX2_RDPCSTX_PHY_CNTL3_BASE_IDX 2 +#define mmRDPCSTX2_RDPCSTX_PHY_CNTL4 0x2af4 +#define mmRDPCSTX2_RDPCSTX_PHY_CNTL4_BASE_IDX 2 +#define mmRDPCSTX2_RDPCSTX_PHY_CNTL5 0x2af5 +#define mmRDPCSTX2_RDPCSTX_PHY_CNTL5_BASE_IDX 2 +#define mmRDPCSTX2_RDPCSTX_PHY_CNTL6 0x2af6 +#define mmRDPCSTX2_RDPCSTX_PHY_CNTL6_BASE_IDX 2 +#define mmRDPCSTX2_RDPCSTX_PHY_CNTL7 0x2af7 +#define mmRDPCSTX2_RDPCSTX_PHY_CNTL7_BASE_IDX 2 +#define mmRDPCSTX2_RDPCSTX_PHY_CNTL8 0x2af8 +#define mmRDPCSTX2_RDPCSTX_PHY_CNTL8_BASE_IDX 2 +#define mmRDPCSTX2_RDPCSTX_PHY_CNTL9 0x2af9 +#define mmRDPCSTX2_RDPCSTX_PHY_CNTL9_BASE_IDX 2 +#define mmRDPCSTX2_RDPCSTX_PHY_CNTL10 0x2afa +#define mmRDPCSTX2_RDPCSTX_PHY_CNTL10_BASE_IDX 2 +#define mmRDPCSTX2_RDPCSTX_PHY_CNTL11 0x2afb +#define mmRDPCSTX2_RDPCSTX_PHY_CNTL11_BASE_IDX 2 +#define mmRDPCSTX2_RDPCSTX_PHY_CNTL12 0x2afc +#define mmRDPCSTX2_RDPCSTX_PHY_CNTL12_BASE_IDX 2 +#define mmRDPCSTX2_RDPCSTX_PHY_CNTL13 0x2afd +#define mmRDPCSTX2_RDPCSTX_PHY_CNTL13_BASE_IDX 2 +#define mmRDPCSTX2_RDPCSTX_PHY_CNTL14 0x2afe +#define mmRDPCSTX2_RDPCSTX_PHY_CNTL14_BASE_IDX 2 +#define mmRDPCSTX2_RDPCSTX_PHY_FUSE0 0x2aff +#define mmRDPCSTX2_RDPCSTX_PHY_FUSE0_BASE_IDX 2 +#define mmRDPCSTX2_RDPCSTX_PHY_FUSE1 0x2b00 +#define mmRDPCSTX2_RDPCSTX_PHY_FUSE1_BASE_IDX 2 +#define mmRDPCSTX2_RDPCSTX_PHY_FUSE2 0x2b01 +#define mmRDPCSTX2_RDPCSTX_PHY_FUSE2_BASE_IDX 2 +#define mmRDPCSTX2_RDPCSTX_PHY_FUSE3 0x2b02 +#define mmRDPCSTX2_RDPCSTX_PHY_FUSE3_BASE_IDX 2 +#define mmRDPCSTX2_RDPCSTX_PHY_RX_LD_VAL 0x2b03 +#define mmRDPCSTX2_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX 2 +#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3 0x2b04 +#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX 2 +#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6 0x2b05 +#define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX 2 +#define mmRDPCSTX2_RDPCSTX_DPALT_CONTROL_REG 0x2b06 +#define mmRDPCSTX2_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX 2 + + +// addressBlock: dpcssys_dpcssys_cr2_dispdec +// base address: 0x6c0 +#define mmDPCSSYS_CR2_DPCSSYS_CR_ADDR 0x2ae4 +#define mmDPCSSYS_CR2_DPCSSYS_CR_ADDR_BASE_IDX 2 +#define mmDPCSSYS_CR2_DPCSSYS_CR_DATA 0x2ae5 +#define mmDPCSSYS_CR2_DPCSSYS_CR_DATA_BASE_IDX 2 + + +// addressBlock: dpcssys_dpcs0_dpcstx3_dispdec +// base address: 0xa20 +#define mmDPCSTX3_DPCSTX_TX_CLOCK_CNTL 0x2bb0 +#define mmDPCSTX3_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2 +#define mmDPCSTX3_DPCSTX_TX_CNTL 0x2bb1 +#define mmDPCSTX3_DPCSTX_TX_CNTL_BASE_IDX 2 +#define mmDPCSTX3_DPCSTX_CBUS_CNTL 0x2bb2 +#define mmDPCSTX3_DPCSTX_CBUS_CNTL_BASE_IDX 2 +#define mmDPCSTX3_DPCSTX_INTERRUPT_CNTL 0x2bb3 +#define mmDPCSTX3_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2 +#define mmDPCSTX3_DPCSTX_PLL_UPDATE_ADDR 0x2bb4 +#define mmDPCSTX3_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2 +#define mmDPCSTX3_DPCSTX_PLL_UPDATE_DATA 0x2bb5 +#define mmDPCSTX3_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2 +#define mmDPCSTX3_DPCSTX_DEBUG_CONFIG 0x2bb6 +#define mmDPCSTX3_DPCSTX_DEBUG_CONFIG_BASE_IDX 2 + + +// addressBlock: dpcssys_dpcs0_rdpcstx3_dispdec +// base address: 0xa20 +#define mmRDPCSTX3_RDPCSTX_CNTL 0x2bb8 +#define mmRDPCSTX3_RDPCSTX_CNTL_BASE_IDX 2 +#define mmRDPCSTX3_RDPCSTX_CLOCK_CNTL 0x2bb9 +#define mmRDPCSTX3_RDPCSTX_CLOCK_CNTL_BASE_IDX 2 +#define mmRDPCSTX3_RDPCSTX_INTERRUPT_CONTROL 0x2bba +#define mmRDPCSTX3_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2 +#define mmRDPCSTX3_RDPCSTX_PLL_UPDATE_DATA 0x2bbb +#define mmRDPCSTX3_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2 +#define mmRDPCSTX3_RDPCS_TX_CR_ADDR 0x2bbc +#define mmRDPCSTX3_RDPCS_TX_CR_ADDR_BASE_IDX 2 +#define mmRDPCSTX3_RDPCS_TX_CR_DATA 0x2bbd +#define mmRDPCSTX3_RDPCS_TX_CR_DATA_BASE_IDX 2 +#define mmRDPCSTX3_RDPCS_TX_SRAM_CNTL 0x2bbe +#define mmRDPCSTX3_RDPCS_TX_SRAM_CNTL_BASE_IDX 2 +#define mmRDPCSTX3_RDPCSTX_MEM_POWER_CTRL 0x2bbf +#define mmRDPCSTX3_RDPCSTX_MEM_POWER_CTRL_BASE_IDX 2 +#define mmRDPCSTX3_RDPCSTX_MEM_POWER_CTRL2 0x2bc0 +#define mmRDPCSTX3_RDPCSTX_MEM_POWER_CTRL2_BASE_IDX 2 +#define mmRDPCSTX3_RDPCSTX_SCRATCH 0x2bc1 +#define mmRDPCSTX3_RDPCSTX_SCRATCH_BASE_IDX 2 +#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2bc4 +#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2 +#define mmRDPCSTX3_RDPCSTX_DEBUG_CONFIG 0x2bc5 +#define mmRDPCSTX3_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2 +#define mmRDPCSTX3_RDPCSTX_PHY_CNTL0 0x2bc8 +#define mmRDPCSTX3_RDPCSTX_PHY_CNTL0_BASE_IDX 2 +#define mmRDPCSTX3_RDPCSTX_PHY_CNTL1 0x2bc9 +#define mmRDPCSTX3_RDPCSTX_PHY_CNTL1_BASE_IDX 2 +#define mmRDPCSTX3_RDPCSTX_PHY_CNTL2 0x2bca +#define mmRDPCSTX3_RDPCSTX_PHY_CNTL2_BASE_IDX 2 +#define mmRDPCSTX3_RDPCSTX_PHY_CNTL3 0x2bcb +#define mmRDPCSTX3_RDPCSTX_PHY_CNTL3_BASE_IDX 2 +#define mmRDPCSTX3_RDPCSTX_PHY_CNTL4 0x2bcc +#define mmRDPCSTX3_RDPCSTX_PHY_CNTL4_BASE_IDX 2 +#define mmRDPCSTX3_RDPCSTX_PHY_CNTL5 0x2bcd +#define mmRDPCSTX3_RDPCSTX_PHY_CNTL5_BASE_IDX 2 +#define mmRDPCSTX3_RDPCSTX_PHY_CNTL6 0x2bce +#define mmRDPCSTX3_RDPCSTX_PHY_CNTL6_BASE_IDX 2 +#define mmRDPCSTX3_RDPCSTX_PHY_CNTL7 0x2bcf +#define mmRDPCSTX3_RDPCSTX_PHY_CNTL7_BASE_IDX 2 +#define mmRDPCSTX3_RDPCSTX_PHY_CNTL8 0x2bd0 +#define mmRDPCSTX3_RDPCSTX_PHY_CNTL8_BASE_IDX 2 +#define mmRDPCSTX3_RDPCSTX_PHY_CNTL9 0x2bd1 +#define mmRDPCSTX3_RDPCSTX_PHY_CNTL9_BASE_IDX 2 +#define mmRDPCSTX3_RDPCSTX_PHY_CNTL10 0x2bd2 +#define mmRDPCSTX3_RDPCSTX_PHY_CNTL10_BASE_IDX 2 +#define mmRDPCSTX3_RDPCSTX_PHY_CNTL11 0x2bd3 +#define mmRDPCSTX3_RDPCSTX_PHY_CNTL11_BASE_IDX 2 +#define mmRDPCSTX3_RDPCSTX_PHY_CNTL12 0x2bd4 +#define mmRDPCSTX3_RDPCSTX_PHY_CNTL12_BASE_IDX 2 +#define mmRDPCSTX3_RDPCSTX_PHY_CNTL13 0x2bd5 +#define mmRDPCSTX3_RDPCSTX_PHY_CNTL13_BASE_IDX 2 +#define mmRDPCSTX3_RDPCSTX_PHY_CNTL14 0x2bd6 +#define mmRDPCSTX3_RDPCSTX_PHY_CNTL14_BASE_IDX 2 +#define mmRDPCSTX3_RDPCSTX_PHY_FUSE0 0x2bd7 +#define mmRDPCSTX3_RDPCSTX_PHY_FUSE0_BASE_IDX 2 +#define mmRDPCSTX3_RDPCSTX_PHY_FUSE1 0x2bd8 +#define mmRDPCSTX3_RDPCSTX_PHY_FUSE1_BASE_IDX 2 +#define mmRDPCSTX3_RDPCSTX_PHY_FUSE2 0x2bd9 +#define mmRDPCSTX3_RDPCSTX_PHY_FUSE2_BASE_IDX 2 +#define mmRDPCSTX3_RDPCSTX_PHY_FUSE3 0x2bda +#define mmRDPCSTX3_RDPCSTX_PHY_FUSE3_BASE_IDX 2 +#define mmRDPCSTX3_RDPCSTX_PHY_RX_LD_VAL 0x2bdb +#define mmRDPCSTX3_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX 2 +#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3 0x2bdc +#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX 2 +#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6 0x2bdd +#define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX 2 +#define mmRDPCSTX3_RDPCSTX_DPALT_CONTROL_REG 0x2bde +#define mmRDPCSTX3_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX 2 + + +// addressBlock: dpcssys_dpcssys_cr3_dispdec +// base address: 0xa20 +#define mmDPCSSYS_CR3_DPCSSYS_CR_ADDR 0x2bbc +#define mmDPCSSYS_CR3_DPCSSYS_CR_ADDR_BASE_IDX 2 +#define mmDPCSSYS_CR3_DPCSSYS_CR_DATA 0x2bbd +#define mmDPCSSYS_CR3_DPCSSYS_CR_DATA_BASE_IDX 2 + + +// addressBlock: dpcssys_dpcs0_dpcsrx_dispdec +// base address: 0x0 +#define mmDPCSRX_PHY_CNTL 0x2c76 +#define mmDPCSRX_PHY_CNTL_BASE_IDX 2 +#define mmDPCSRX_RX_CLOCK_CNTL 0x2c78 +#define mmDPCSRX_RX_CLOCK_CNTL_BASE_IDX 2 +#define mmDPCSRX_RX_CNTL 0x2c7a +#define mmDPCSRX_RX_CNTL_BASE_IDX 2 +#define mmDPCSRX_CBUS_CNTL 0x2c7b +#define mmDPCSRX_CBUS_CNTL_BASE_IDX 2 +#define mmDPCSRX_REG_ERROR_STATUS 0x2c7c +#define mmDPCSRX_REG_ERROR_STATUS_BASE_IDX 2 +#define mmDPCSRX_RX_ERROR_STATUS 0x2c7d +#define mmDPCSRX_RX_ERROR_STATUS_BASE_IDX 2 +#define mmDPCSRX_INDEX_MODE_ADDR 0x2c80 +#define mmDPCSRX_INDEX_MODE_ADDR_BASE_IDX 2 +#define mmDPCSRX_INDEX_MODE_DATA 0x2c81 +#define mmDPCSRX_INDEX_MODE_DATA_BASE_IDX 2 +#define mmDPCSRX_DEBUG_CONFIG 0x2c82 +#define mmDPCSRX_DEBUG_CONFIG_BASE_IDX 2 + + +// addressBlock: dpcssys_dpcs0_dpcstx4_dispdec +// base address: 0xd80 +#define mmDPCSTX4_DPCSTX_TX_CLOCK_CNTL 0x2c88 +#define mmDPCSTX4_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2 +#define mmDPCSTX4_DPCSTX_TX_CNTL 0x2c89 +#define mmDPCSTX4_DPCSTX_TX_CNTL_BASE_IDX 2 +#define mmDPCSTX4_DPCSTX_CBUS_CNTL 0x2c8a +#define mmDPCSTX4_DPCSTX_CBUS_CNTL_BASE_IDX 2 +#define mmDPCSTX4_DPCSTX_INTERRUPT_CNTL 0x2c8b +#define mmDPCSTX4_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2 +#define mmDPCSTX4_DPCSTX_PLL_UPDATE_ADDR 0x2c8c +#define mmDPCSTX4_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2 +#define mmDPCSTX4_DPCSTX_PLL_UPDATE_DATA 0x2c8d +#define mmDPCSTX4_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2 +#define mmDPCSTX4_DPCSTX_DEBUG_CONFIG 0x2c8e +#define mmDPCSTX4_DPCSTX_DEBUG_CONFIG_BASE_IDX 2 + + +// addressBlock: dpcssys_dpcs0_rdpcstx4_dispdec +// base address: 0xd80 +#define mmRDPCSTX4_RDPCSTX_CNTL 0x2c90 +#define mmRDPCSTX4_RDPCSTX_CNTL_BASE_IDX 2 +#define mmRDPCSTX4_RDPCSTX_CLOCK_CNTL 0x2c91 +#define mmRDPCSTX4_RDPCSTX_CLOCK_CNTL_BASE_IDX 2 +#define mmRDPCSTX4_RDPCSTX_INTERRUPT_CONTROL 0x2c92 +#define mmRDPCSTX4_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2 +#define mmRDPCSTX4_RDPCSTX_PLL_UPDATE_DATA 0x2c93 +#define mmRDPCSTX4_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2 +#define mmRDPCSTX4_RDPCS_TX_CR_ADDR 0x2c94 +#define mmRDPCSTX4_RDPCS_TX_CR_ADDR_BASE_IDX 2 +#define mmRDPCSTX4_RDPCS_TX_CR_DATA 0x2c95 +#define mmRDPCSTX4_RDPCS_TX_CR_DATA_BASE_IDX 2 +#define mmRDPCSTX4_RDPCS_TX_SRAM_CNTL 0x2c96 +#define mmRDPCSTX4_RDPCS_TX_SRAM_CNTL_BASE_IDX 2 +#define mmRDPCSTX4_RDPCSTX_MEM_POWER_CTRL 0x2c97 +#define mmRDPCSTX4_RDPCSTX_MEM_POWER_CTRL_BASE_IDX 2 +#define mmRDPCSTX4_RDPCSTX_MEM_POWER_CTRL2 0x2c98 +#define mmRDPCSTX4_RDPCSTX_MEM_POWER_CTRL2_BASE_IDX 2 +#define mmRDPCSTX4_RDPCSTX_SCRATCH 0x2c99 +#define mmRDPCSTX4_RDPCSTX_SCRATCH_BASE_IDX 2 +#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2c9c +#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2 +#define mmRDPCSTX4_RDPCSTX_DEBUG_CONFIG 0x2c9d +#define mmRDPCSTX4_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2 +#define mmRDPCSTX4_RDPCSTX_PHY_CNTL0 0x2ca0 +#define mmRDPCSTX4_RDPCSTX_PHY_CNTL0_BASE_IDX 2 +#define mmRDPCSTX4_RDPCSTX_PHY_CNTL1 0x2ca1 +#define mmRDPCSTX4_RDPCSTX_PHY_CNTL1_BASE_IDX 2 +#define mmRDPCSTX4_RDPCSTX_PHY_CNTL2 0x2ca2 +#define mmRDPCSTX4_RDPCSTX_PHY_CNTL2_BASE_IDX 2 +#define mmRDPCSTX4_RDPCSTX_PHY_CNTL3 0x2ca3 +#define mmRDPCSTX4_RDPCSTX_PHY_CNTL3_BASE_IDX 2 +#define mmRDPCSTX4_RDPCSTX_PHY_CNTL4 0x2ca4 +#define mmRDPCSTX4_RDPCSTX_PHY_CNTL4_BASE_IDX 2 +#define mmRDPCSTX4_RDPCSTX_PHY_CNTL5 0x2ca5 +#define mmRDPCSTX4_RDPCSTX_PHY_CNTL5_BASE_IDX 2 +#define mmRDPCSTX4_RDPCSTX_PHY_CNTL6 0x2ca6 +#define mmRDPCSTX4_RDPCSTX_PHY_CNTL6_BASE_IDX 2 +#define mmRDPCSTX4_RDPCSTX_PHY_CNTL7 0x2ca7 +#define mmRDPCSTX4_RDPCSTX_PHY_CNTL7_BASE_IDX 2 +#define mmRDPCSTX4_RDPCSTX_PHY_CNTL8 0x2ca8 +#define mmRDPCSTX4_RDPCSTX_PHY_CNTL8_BASE_IDX 2 +#define mmRDPCSTX4_RDPCSTX_PHY_CNTL9 0x2ca9 +#define mmRDPCSTX4_RDPCSTX_PHY_CNTL9_BASE_IDX 2 +#define mmRDPCSTX4_RDPCSTX_PHY_CNTL10 0x2caa +#define mmRDPCSTX4_RDPCSTX_PHY_CNTL10_BASE_IDX 2 +#define mmRDPCSTX4_RDPCSTX_PHY_CNTL11 0x2cab +#define mmRDPCSTX4_RDPCSTX_PHY_CNTL11_BASE_IDX 2 +#define mmRDPCSTX4_RDPCSTX_PHY_CNTL12 0x2cac +#define mmRDPCSTX4_RDPCSTX_PHY_CNTL12_BASE_IDX 2 +#define mmRDPCSTX4_RDPCSTX_PHY_CNTL13 0x2cad +#define mmRDPCSTX4_RDPCSTX_PHY_CNTL13_BASE_IDX 2 +#define mmRDPCSTX4_RDPCSTX_PHY_CNTL14 0x2cae +#define mmRDPCSTX4_RDPCSTX_PHY_CNTL14_BASE_IDX 2 +#define mmRDPCSTX4_RDPCSTX_PHY_FUSE0 0x2caf +#define mmRDPCSTX4_RDPCSTX_PHY_FUSE0_BASE_IDX 2 +#define mmRDPCSTX4_RDPCSTX_PHY_FUSE1 0x2cb0 +#define mmRDPCSTX4_RDPCSTX_PHY_FUSE1_BASE_IDX 2 +#define mmRDPCSTX4_RDPCSTX_PHY_FUSE2 0x2cb1 +#define mmRDPCSTX4_RDPCSTX_PHY_FUSE2_BASE_IDX 2 +#define mmRDPCSTX4_RDPCSTX_PHY_FUSE3 0x2cb2 +#define mmRDPCSTX4_RDPCSTX_PHY_FUSE3_BASE_IDX 2 +#define mmRDPCSTX4_RDPCSTX_PHY_RX_LD_VAL 0x2cb3 +#define mmRDPCSTX4_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX 2 +#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3 0x2cb4 +#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX 2 +#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6 0x2cb5 +#define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX 2 +#define mmRDPCSTX4_RDPCSTX_DPALT_CONTROL_REG 0x2cb6 +#define mmRDPCSTX4_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX 2 + + +// addressBlock: dpcssys_dpcssys_cr4_dispdec +// base address: 0xd80 +#define mmDPCSSYS_CR4_DPCSSYS_CR_ADDR 0x2c94 +#define mmDPCSSYS_CR4_DPCSSYS_CR_ADDR_BASE_IDX 2 +#define mmDPCSSYS_CR4_DPCSSYS_CR_DATA 0x2c95 +#define mmDPCSSYS_CR4_DPCSSYS_CR_DATA_BASE_IDX 2 + + +// addressBlock: dpcssys_dpcs0_dpcstx5_dispdec +// base address: 0x10e0 +#define mmDPCSTX5_DPCSTX_TX_CLOCK_CNTL 0x2d60 +#define mmDPCSTX5_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2 +#define mmDPCSTX5_DPCSTX_TX_CNTL 0x2d61 +#define mmDPCSTX5_DPCSTX_TX_CNTL_BASE_IDX 2 +#define mmDPCSTX5_DPCSTX_CBUS_CNTL 0x2d62 +#define mmDPCSTX5_DPCSTX_CBUS_CNTL_BASE_IDX 2 +#define mmDPCSTX5_DPCSTX_INTERRUPT_CNTL 0x2d63 +#define mmDPCSTX5_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2 +#define mmDPCSTX5_DPCSTX_PLL_UPDATE_ADDR 0x2d64 +#define mmDPCSTX5_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2 +#define mmDPCSTX5_DPCSTX_PLL_UPDATE_DATA 0x2d65 +#define mmDPCSTX5_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2 +#define mmDPCSTX5_DPCSTX_DEBUG_CONFIG 0x2d66 +#define mmDPCSTX5_DPCSTX_DEBUG_CONFIG_BASE_IDX 2 + + +// addressBlock: dpcssys_dpcs0_rdpcstx5_dispdec +// base address: 0x10e0 +#define mmRDPCSTX5_RDPCSTX_CNTL 0x2d68 +#define mmRDPCSTX5_RDPCSTX_CNTL_BASE_IDX 2 +#define mmRDPCSTX5_RDPCSTX_CLOCK_CNTL 0x2d69 +#define mmRDPCSTX5_RDPCSTX_CLOCK_CNTL_BASE_IDX 2 +#define mmRDPCSTX5_RDPCSTX_INTERRUPT_CONTROL 0x2d6a +#define mmRDPCSTX5_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2 +#define mmRDPCSTX5_RDPCSTX_PLL_UPDATE_DATA 0x2d6b +#define mmRDPCSTX5_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2 +#define mmRDPCSTX5_RDPCS_TX_CR_ADDR 0x2d6c +#define mmRDPCSTX5_RDPCS_TX_CR_ADDR_BASE_IDX 2 +#define mmRDPCSTX5_RDPCS_TX_CR_DATA 0x2d6d +#define mmRDPCSTX5_RDPCS_TX_CR_DATA_BASE_IDX 2 +#define mmRDPCSTX5_RDPCS_TX_SRAM_CNTL 0x2d6e +#define mmRDPCSTX5_RDPCS_TX_SRAM_CNTL_BASE_IDX 2 +#define mmRDPCSTX5_RDPCSTX_MEM_POWER_CTRL 0x2d6f +#define mmRDPCSTX5_RDPCSTX_MEM_POWER_CTRL_BASE_IDX 2 +#define mmRDPCSTX5_RDPCSTX_MEM_POWER_CTRL2 0x2d70 +#define mmRDPCSTX5_RDPCSTX_MEM_POWER_CTRL2_BASE_IDX 2 +#define mmRDPCSTX5_RDPCSTX_SCRATCH 0x2d71 +#define mmRDPCSTX5_RDPCSTX_SCRATCH_BASE_IDX 2 +#define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2d74 +#define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2 +#define mmRDPCSTX5_RDPCSTX_DEBUG_CONFIG 0x2d75 +#define mmRDPCSTX5_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2 +#define mmRDPCSTX5_RDPCSTX_PHY_CNTL0 0x2d78 +#define mmRDPCSTX5_RDPCSTX_PHY_CNTL0_BASE_IDX 2 +#define mmRDPCSTX5_RDPCSTX_PHY_CNTL1 0x2d79 +#define mmRDPCSTX5_RDPCSTX_PHY_CNTL1_BASE_IDX 2 +#define mmRDPCSTX5_RDPCSTX_PHY_CNTL2 0x2d7a +#define mmRDPCSTX5_RDPCSTX_PHY_CNTL2_BASE_IDX 2 +#define mmRDPCSTX5_RDPCSTX_PHY_CNTL3 0x2d7b +#define mmRDPCSTX5_RDPCSTX_PHY_CNTL3_BASE_IDX 2 +#define mmRDPCSTX5_RDPCSTX_PHY_CNTL4 0x2d7c +#define mmRDPCSTX5_RDPCSTX_PHY_CNTL4_BASE_IDX 2 +#define mmRDPCSTX5_RDPCSTX_PHY_CNTL5 0x2d7d +#define mmRDPCSTX5_RDPCSTX_PHY_CNTL5_BASE_IDX 2 +#define mmRDPCSTX5_RDPCSTX_PHY_CNTL6 0x2d7e +#define mmRDPCSTX5_RDPCSTX_PHY_CNTL6_BASE_IDX 2 +#define mmRDPCSTX5_RDPCSTX_PHY_CNTL7 0x2d7f +#define mmRDPCSTX5_RDPCSTX_PHY_CNTL7_BASE_IDX 2 +#define mmRDPCSTX5_RDPCSTX_PHY_CNTL8 0x2d80 +#define mmRDPCSTX5_RDPCSTX_PHY_CNTL8_BASE_IDX 2 +#define mmRDPCSTX5_RDPCSTX_PHY_CNTL9 0x2d81 +#define mmRDPCSTX5_RDPCSTX_PHY_CNTL9_BASE_IDX 2 +#define mmRDPCSTX5_RDPCSTX_PHY_CNTL10 0x2d82 +#define mmRDPCSTX5_RDPCSTX_PHY_CNTL10_BASE_IDX 2 +#define mmRDPCSTX5_RDPCSTX_PHY_CNTL11 0x2d83 +#define mmRDPCSTX5_RDPCSTX_PHY_CNTL11_BASE_IDX 2 +#define mmRDPCSTX5_RDPCSTX_PHY_CNTL12 0x2d84 +#define mmRDPCSTX5_RDPCSTX_PHY_CNTL12_BASE_IDX 2 +#define mmRDPCSTX5_RDPCSTX_PHY_CNTL13 0x2d85 +#define mmRDPCSTX5_RDPCSTX_PHY_CNTL13_BASE_IDX 2 +#define mmRDPCSTX5_RDPCSTX_PHY_CNTL14 0x2d86 +#define mmRDPCSTX5_RDPCSTX_PHY_CNTL14_BASE_IDX 2 +#define mmRDPCSTX5_RDPCSTX_PHY_FUSE0 0x2d87 +#define mmRDPCSTX5_RDPCSTX_PHY_FUSE0_BASE_IDX 2 +#define mmRDPCSTX5_RDPCSTX_PHY_FUSE1 0x2d88 +#define mmRDPCSTX5_RDPCSTX_PHY_FUSE1_BASE_IDX 2 +#define mmRDPCSTX5_RDPCSTX_PHY_FUSE2 0x2d89 +#define mmRDPCSTX5_RDPCSTX_PHY_FUSE2_BASE_IDX 2 +#define mmRDPCSTX5_RDPCSTX_PHY_FUSE3 0x2d8a +#define mmRDPCSTX5_RDPCSTX_PHY_FUSE3_BASE_IDX 2 +#define mmRDPCSTX5_RDPCSTX_PHY_RX_LD_VAL 0x2d8b +#define mmRDPCSTX5_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX 2 +#define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3 0x2d8c +#define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX 2 +#define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6 0x2d8d +#define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX 2 +#define mmRDPCSTX5_RDPCSTX_DPALT_CONTROL_REG 0x2d8e +#define mmRDPCSTX5_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX 2 + + +// addressBlock: dpcssys_dpcssys_cr5_dispdec +// base address: 0x10e0 +#define mmDPCSSYS_CR5_DPCSSYS_CR_ADDR 0x2d6c +#define mmDPCSSYS_CR5_DPCSSYS_CR_ADDR_BASE_IDX 2 +#define mmDPCSSYS_CR5_DPCSSYS_CR_DATA 0x2d6d +#define mmDPCSSYS_CR5_DPCSSYS_CR_DATA_BASE_IDX 2 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_sh_mask.h new file mode 100644 index 0000000000000000000000000000000000000000..25e0569e05c849ce903fc4132fd6dd1139e233c8 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_sh_mask.h @@ -0,0 +1,3912 @@ +/* + * Copyright (C) 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _dpcs_2_0_0_SH_MASK_HEADER +#define _dpcs_2_0_0_SH_MASK_HEADER + + +// addressBlock: dpcssys_dpcs0_dpcstx0_dispdec +//DPCSTX0_DPCSTX_TX_CLOCK_CNTL +#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0 +#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1 +#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2 +#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3 +#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L +#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L +#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L +#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L +//DPCSTX0_DPCSTX_TX_CNTL +#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc +#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd +#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe +#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf +#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10 +#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11 +#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14 +#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f +#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L +#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L +#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L +#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L +#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L +#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L +#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L +#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L +//DPCSTX0_DPCSTX_CBUS_CNTL +#define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0 +#define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f +#define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL +#define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L +//DPCSTX0_DPCSTX_INTERRUPT_CNTL +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0 +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1 +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4 +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8 +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9 +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10 +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14 +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L +//DPCSTX0_DPCSTX_PLL_UPDATE_ADDR +#define DPCSTX0_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0 +#define DPCSTX0_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL +//DPCSTX0_DPCSTX_PLL_UPDATE_DATA +#define DPCSTX0_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0 +#define DPCSTX0_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL +//DPCSTX0_DPCSTX_DEBUG_CONFIG +#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0 +#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1 +#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4 +#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8 +#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe +#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10 +#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L +#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL +#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L +#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L +#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L +#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L + + +// addressBlock: dpcssys_dpcs0_rdpcstx0_dispdec +//RDPCSTX0_RDPCSTX_CNTL +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4 +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10 +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11 +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14 +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT 0x18 +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT 0x19 +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT 0x1a +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK 0x01000000L +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK 0x02000000L +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK 0x04000000L +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L +//RDPCSTX0_RDPCSTX_CLOCK_CNTL +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4 +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5 +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6 +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7 +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8 +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9 +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10 +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L +//RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1 +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2 +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4 +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5 +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6 +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7 +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8 +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9 +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10 +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11 +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12 +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14 +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L +//RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA +#define RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L +//RDPCSTX0_RDPCS_TX_CR_ADDR +#define RDPCSTX0_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0 +#define RDPCSTX0_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL +//RDPCSTX0_RDPCS_TX_CR_DATA +#define RDPCSTX0_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0 +#define RDPCSTX0_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL +//RDPCSTX0_RDPCS_TX_SRAM_CNTL +#define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14 +#define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18 +#define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c +#define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L +#define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L +#define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L +//RDPCSTX0_RDPCSTX_MEM_POWER_CTRL +#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES__SHIFT 0xc +#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1__SHIFT 0x1a +#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2__SHIFT 0x1b +#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1__SHIFT 0x1c +#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2__SHIFT 0x1d +#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM__SHIFT 0x1e +#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES_MASK 0x00000FFFL +#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES_MASK 0x03FFF000L +#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1_MASK 0x04000000L +#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2_MASK 0x08000000L +#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1_MASK 0x10000000L +#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2_MASK 0x20000000L +#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM_MASK 0x40000000L +//RDPCSTX0_RDPCSTX_MEM_POWER_CTRL2 +#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO__SHIFT 0x2 +#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF_MASK 0x00000003L +#define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO_MASK 0x00000004L +//RDPCSTX0_RDPCSTX_SCRATCH +#define RDPCSTX0_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL +//RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT 0x4 +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT 0x8 +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK 0x00000001L +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK 0x00000010L +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK 0x0000FF00L +//RDPCSTX0_RDPCSTX_DEBUG_CONFIG +#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT 0x4 +#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT 0x7 +#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT 0x8 +#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT 0xf +#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT 0x10 +#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT 0x18 +#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK 0x00000001L +#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK 0x00000070L +#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK 0x00000080L +#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK 0x00001F00L +#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK 0x00008000L +#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK 0x00FF0000L +#define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK 0xFF000000L +//RDPCSTX0_RDPCSTX_PHY_CNTL0 +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1 +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2 +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3 +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4 +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8 +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9 +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0xe +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11 +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12 +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14 +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15 +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18 +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19 +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x0001C000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L +//RDPCSTX0_RDPCSTX_PHY_CNTL1 +#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1 +#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2 +#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3 +#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4 +#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5 +#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6 +#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7 +#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L +#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L +#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L +#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L +#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L +#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L +#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L +#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L +//RDPCSTX0_RDPCSTX_PHY_CNTL2 +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3 +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4 +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5 +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6 +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7 +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8 +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9 +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L +//RDPCSTX0_RDPCSTX_PHY_CNTL3 +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1 +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2 +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3 +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4 +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5 +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8 +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9 +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10 +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11 +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12 +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13 +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14 +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15 +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18 +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19 +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L +//RDPCSTX0_RDPCSTX_PHY_CNTL4 +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4 +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6 +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7 +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8 +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10 +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14 +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16 +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17 +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18 +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L +//RDPCSTX0_RDPCSTX_PHY_CNTL5 +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x1 +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x4 +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0x6 +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0x7 +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x8 +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x9 +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0xc +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0xe +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0xf +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x10 +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x11 +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x14 +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0x16 +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0x17 +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x18 +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x19 +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x1c +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1e +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1f +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000001L +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x0000000EL +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000030L +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000040L +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00000080L +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00000100L +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00000E00L +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x00003000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x00004000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x00008000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK 0x00010000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK 0x000E0000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00300000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00400000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00800000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK 0x01000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK 0x0E000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x30000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x40000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x80000000L +//RDPCSTX0_RDPCSTX_PHY_CNTL6 +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0x2 +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x4 +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x6 +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x8 +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0xc +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0xe +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10 +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11 +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12 +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x13 +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x14 +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x00000003L +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000004L +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x00000030L +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x00000040L +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x00000300L +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x00003000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x00004000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00080000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00100000L +//RDPCSTX0_RDPCSTX_PHY_CNTL7 +#define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10 +#define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL +#define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L +//RDPCSTX0_RDPCSTX_PHY_CNTL8 +#define RDPCSTX0_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL +//RDPCSTX0_RDPCSTX_PHY_CNTL9 +#define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18 +#define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL +#define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L +//RDPCSTX0_RDPCSTX_PHY_CNTL10 +#define RDPCSTX0_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL +//RDPCSTX0_RDPCSTX_PHY_CNTL11 +#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4 +#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10 +#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14 +#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18 +#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L +#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L +//RDPCSTX0_RDPCSTX_PHY_CNTL12 +#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2 +#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4 +#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7 +#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8 +#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L +#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L +#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L +#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L +#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L +//RDPCSTX0_RDPCSTX_PHY_CNTL13 +#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14 +#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c +#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d +#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e +#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L +//RDPCSTX0_RDPCSTX_PHY_CNTL14 +#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18 +#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c +#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L +#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L +//RDPCSTX0_RDPCSTX_PHY_FUSE0 +#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6 +#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc +#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12 +#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14 +#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL +#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L +#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L +#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L +#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L +//RDPCSTX0_RDPCSTX_PHY_FUSE1 +#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6 +#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc +#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12 +#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19 +#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL +#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L +#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L +#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L +#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L +//RDPCSTX0_RDPCSTX_PHY_FUSE2 +#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6 +#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc +#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL +#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L +#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L +//RDPCSTX0_RDPCSTX_PHY_FUSE3 +#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6 +#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc +#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12 +#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18 +#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL +#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L +#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L +#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L +#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L +//RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL +#define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8 +#define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL +#define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L +//RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3 +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT 0x1 +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT 0x2 +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT 0x3 +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT 0x4 +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT 0x5 +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT 0x8 +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT 0x9 +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT 0xa +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT 0xb +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT 0xc +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT 0xd +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT 0x10 +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT 0x11 +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT 0x12 +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT 0x13 +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT 0x14 +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT 0x15 +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT 0x18 +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT 0x19 +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT 0x1a +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT 0x1b +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT 0x1c +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT 0x1d +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK 0x00000001L +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK 0x00000002L +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK 0x00000004L +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK 0x00000008L +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK 0x00000010L +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK 0x00000020L +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK 0x00000100L +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK 0x00000200L +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK 0x00000400L +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK 0x00000800L +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK 0x00001000L +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK 0x00002000L +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK 0x00010000L +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK 0x00020000L +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK 0x00040000L +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK 0x00080000L +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK 0x00100000L +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK 0x00200000L +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK 0x01000000L +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK 0x02000000L +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK 0x04000000L +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK 0x08000000L +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK 0x10000000L +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK 0x20000000L +//RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6 +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT 0x2 +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT 0x4 +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT 0x6 +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT 0x8 +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT 0xa +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT 0xc +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT 0xe +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT 0x10 +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT 0x11 +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT 0x12 +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT 0x13 +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT 0x14 +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK 0x00000003L +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK 0x00000004L +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK 0x00000030L +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK 0x00000040L +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK 0x00000300L +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK 0x00000400L +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK 0x00003000L +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK 0x00004000L +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK 0x00010000L +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK 0x00020000L +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK 0x00040000L +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK 0x00080000L +#define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK 0x00100000L +//RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG +#define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT 0x4 +#define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT 0x8 +#define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK 0x00000001L +#define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK 0x00000010L +#define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK 0x0000FF00L + + +// addressBlock: dpcssys_dpcssys_cr0_dispdec +//DPCSSYS_CR0_DPCSSYS_CR_ADDR +#define DPCSSYS_CR0_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0 +#define DPCSSYS_CR0_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL +//DPCSSYS_CR0_DPCSSYS_CR_DATA +#define DPCSSYS_CR0_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0 +#define DPCSSYS_CR0_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL + + +// addressBlock: dpcssys_dpcs0_dpcstx1_dispdec +//DPCSTX1_DPCSTX_TX_CLOCK_CNTL +#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0 +#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1 +#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2 +#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3 +#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L +#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L +#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L +#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L +//DPCSTX1_DPCSTX_TX_CNTL +#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc +#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd +#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe +#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf +#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10 +#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11 +#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14 +#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f +#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L +#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L +#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L +#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L +#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L +#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L +#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L +#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L +//DPCSTX1_DPCSTX_CBUS_CNTL +#define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0 +#define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f +#define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL +#define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L +//DPCSTX1_DPCSTX_INTERRUPT_CNTL +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0 +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1 +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4 +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8 +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9 +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10 +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14 +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L +//DPCSTX1_DPCSTX_PLL_UPDATE_ADDR +#define DPCSTX1_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0 +#define DPCSTX1_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL +//DPCSTX1_DPCSTX_PLL_UPDATE_DATA +#define DPCSTX1_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0 +#define DPCSTX1_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL +//DPCSTX1_DPCSTX_DEBUG_CONFIG +#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0 +#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1 +#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4 +#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8 +#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe +#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10 +#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L +#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL +#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L +#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L +#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L +#define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L + + +// addressBlock: dpcssys_dpcs0_rdpcstx1_dispdec +//RDPCSTX1_RDPCSTX_CNTL +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4 +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10 +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11 +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14 +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT 0x18 +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT 0x19 +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT 0x1a +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK 0x01000000L +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK 0x02000000L +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK 0x04000000L +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L +//RDPCSTX1_RDPCSTX_CLOCK_CNTL +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4 +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5 +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6 +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7 +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8 +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9 +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10 +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L +//RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1 +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2 +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4 +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5 +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6 +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7 +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8 +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9 +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10 +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11 +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12 +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14 +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L +//RDPCSTX1_RDPCSTX_PLL_UPDATE_DATA +#define RDPCSTX1_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L +//RDPCSTX1_RDPCS_TX_CR_ADDR +#define RDPCSTX1_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0 +#define RDPCSTX1_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL +//RDPCSTX1_RDPCS_TX_CR_DATA +#define RDPCSTX1_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0 +#define RDPCSTX1_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL +//RDPCSTX1_RDPCS_TX_SRAM_CNTL +#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14 +#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18 +#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c +#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L +#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L +#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L +//RDPCSTX1_RDPCSTX_MEM_POWER_CTRL +#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES__SHIFT 0xc +#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1__SHIFT 0x1a +#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2__SHIFT 0x1b +#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1__SHIFT 0x1c +#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2__SHIFT 0x1d +#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM__SHIFT 0x1e +#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES_MASK 0x00000FFFL +#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES_MASK 0x03FFF000L +#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1_MASK 0x04000000L +#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2_MASK 0x08000000L +#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1_MASK 0x10000000L +#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2_MASK 0x20000000L +#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM_MASK 0x40000000L +//RDPCSTX1_RDPCSTX_MEM_POWER_CTRL2 +#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO__SHIFT 0x2 +#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF_MASK 0x00000003L +#define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO_MASK 0x00000004L +//RDPCSTX1_RDPCSTX_SCRATCH +#define RDPCSTX1_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL +//RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT 0x4 +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT 0x8 +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK 0x00000001L +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK 0x00000010L +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK 0x0000FF00L +//RDPCSTX1_RDPCSTX_DEBUG_CONFIG +#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT 0x4 +#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT 0x7 +#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT 0x8 +#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT 0xf +#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT 0x10 +#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT 0x18 +#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK 0x00000001L +#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK 0x00000070L +#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK 0x00000080L +#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK 0x00001F00L +#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK 0x00008000L +#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK 0x00FF0000L +#define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK 0xFF000000L +//RDPCSTX1_RDPCSTX_PHY_CNTL0 +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1 +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2 +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3 +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4 +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8 +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9 +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0xe +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11 +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12 +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14 +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15 +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18 +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19 +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x0001C000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L +//RDPCSTX1_RDPCSTX_PHY_CNTL1 +#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1 +#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2 +#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3 +#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4 +#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5 +#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6 +#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7 +#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L +#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L +#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L +#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L +#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L +#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L +#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L +#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L +//RDPCSTX1_RDPCSTX_PHY_CNTL2 +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3 +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4 +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5 +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6 +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7 +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8 +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9 +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L +//RDPCSTX1_RDPCSTX_PHY_CNTL3 +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1 +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2 +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3 +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4 +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5 +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8 +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9 +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10 +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11 +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12 +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13 +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14 +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15 +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18 +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19 +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L +//RDPCSTX1_RDPCSTX_PHY_CNTL4 +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4 +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6 +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7 +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8 +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10 +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14 +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16 +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17 +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18 +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L +//RDPCSTX1_RDPCSTX_PHY_CNTL5 +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x1 +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x4 +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0x6 +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0x7 +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x8 +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x9 +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0xc +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0xe +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0xf +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x10 +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x11 +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x14 +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0x16 +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0x17 +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x18 +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x19 +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x1c +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1e +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1f +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000001L +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x0000000EL +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000030L +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000040L +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00000080L +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00000100L +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00000E00L +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x00003000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x00004000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x00008000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK 0x00010000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK 0x000E0000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00300000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00400000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00800000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK 0x01000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK 0x0E000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x30000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x40000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x80000000L +//RDPCSTX1_RDPCSTX_PHY_CNTL6 +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0x2 +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x4 +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x6 +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x8 +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0xc +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0xe +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10 +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11 +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12 +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x13 +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x14 +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x00000003L +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000004L +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x00000030L +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x00000040L +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x00000300L +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x00003000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x00004000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00080000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00100000L +//RDPCSTX1_RDPCSTX_PHY_CNTL7 +#define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10 +#define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL +#define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L +//RDPCSTX1_RDPCSTX_PHY_CNTL8 +#define RDPCSTX1_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL +//RDPCSTX1_RDPCSTX_PHY_CNTL9 +#define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18 +#define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL +#define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L +//RDPCSTX1_RDPCSTX_PHY_CNTL10 +#define RDPCSTX1_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL +//RDPCSTX1_RDPCSTX_PHY_CNTL11 +#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4 +#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10 +#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14 +#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18 +#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L +#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L +//RDPCSTX1_RDPCSTX_PHY_CNTL12 +#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2 +#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4 +#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7 +#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8 +#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L +#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L +#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L +#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L +#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L +//RDPCSTX1_RDPCSTX_PHY_CNTL13 +#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14 +#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c +#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d +#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e +#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L +//RDPCSTX1_RDPCSTX_PHY_CNTL14 +#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18 +#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c +#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L +#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L +//RDPCSTX1_RDPCSTX_PHY_FUSE0 +#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6 +#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc +#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12 +#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14 +#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL +#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L +#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L +#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L +#define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L +//RDPCSTX1_RDPCSTX_PHY_FUSE1 +#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6 +#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc +#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12 +#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19 +#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL +#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L +#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L +#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L +#define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L +//RDPCSTX1_RDPCSTX_PHY_FUSE2 +#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6 +#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc +#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL +#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L +#define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L +//RDPCSTX1_RDPCSTX_PHY_FUSE3 +#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6 +#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc +#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12 +#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18 +#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL +#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L +#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L +#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L +#define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L +//RDPCSTX1_RDPCSTX_PHY_RX_LD_VAL +#define RDPCSTX1_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8 +#define RDPCSTX1_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL +#define RDPCSTX1_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L +//RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3 +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT 0x1 +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT 0x2 +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT 0x3 +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT 0x4 +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT 0x5 +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT 0x8 +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT 0x9 +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT 0xa +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT 0xb +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT 0xc +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT 0xd +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT 0x10 +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT 0x11 +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT 0x12 +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT 0x13 +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT 0x14 +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT 0x15 +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT 0x18 +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT 0x19 +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT 0x1a +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT 0x1b +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT 0x1c +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT 0x1d +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK 0x00000001L +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK 0x00000002L +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK 0x00000004L +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK 0x00000008L +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK 0x00000010L +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK 0x00000020L +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK 0x00000100L +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK 0x00000200L +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK 0x00000400L +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK 0x00000800L +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK 0x00001000L +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK 0x00002000L +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK 0x00010000L +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK 0x00020000L +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK 0x00040000L +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK 0x00080000L +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK 0x00100000L +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK 0x00200000L +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK 0x01000000L +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK 0x02000000L +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK 0x04000000L +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK 0x08000000L +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK 0x10000000L +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK 0x20000000L +//RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6 +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT 0x2 +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT 0x4 +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT 0x6 +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT 0x8 +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT 0xa +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT 0xc +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT 0xe +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT 0x10 +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT 0x11 +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT 0x12 +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT 0x13 +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT 0x14 +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK 0x00000003L +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK 0x00000004L +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK 0x00000030L +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK 0x00000040L +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK 0x00000300L +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK 0x00000400L +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK 0x00003000L +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK 0x00004000L +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK 0x00010000L +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK 0x00020000L +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK 0x00040000L +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK 0x00080000L +#define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK 0x00100000L +//RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG +#define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT 0x4 +#define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT 0x8 +#define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK 0x00000001L +#define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK 0x00000010L +#define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK 0x0000FF00L + + +// addressBlock: dpcssys_dpcssys_cr1_dispdec +//DPCSSYS_CR1_DPCSSYS_CR_ADDR +#define DPCSSYS_CR1_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0 +#define DPCSSYS_CR1_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL +//DPCSSYS_CR1_DPCSSYS_CR_DATA +#define DPCSSYS_CR1_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0 +#define DPCSSYS_CR1_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL + + +// addressBlock: dpcssys_dpcs0_dpcstx2_dispdec +//DPCSTX2_DPCSTX_TX_CLOCK_CNTL +#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0 +#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1 +#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2 +#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3 +#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L +#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L +#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L +#define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L +//DPCSTX2_DPCSTX_TX_CNTL +#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc +#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd +#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe +#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf +#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10 +#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11 +#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14 +#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f +#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L +#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L +#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L +#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L +#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L +#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L +#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L +#define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L +//DPCSTX2_DPCSTX_CBUS_CNTL +#define DPCSTX2_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0 +#define DPCSTX2_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f +#define DPCSTX2_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL +#define DPCSTX2_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L +//DPCSTX2_DPCSTX_INTERRUPT_CNTL +#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0 +#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1 +#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4 +#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8 +#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9 +#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa +#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb +#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc +#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10 +#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14 +#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L +#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L +#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L +#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L +#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L +#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L +#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L +#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L +#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L +#define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L +//DPCSTX2_DPCSTX_PLL_UPDATE_ADDR +#define DPCSTX2_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0 +#define DPCSTX2_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL +//DPCSTX2_DPCSTX_PLL_UPDATE_DATA +#define DPCSTX2_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0 +#define DPCSTX2_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL +//DPCSTX2_DPCSTX_DEBUG_CONFIG +#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0 +#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1 +#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4 +#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8 +#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe +#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10 +#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L +#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL +#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L +#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L +#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L +#define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L + + +// addressBlock: dpcssys_dpcs0_rdpcstx2_dispdec +//RDPCSTX2_RDPCSTX_CNTL +#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0 +#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4 +#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc +#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd +#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe +#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf +#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10 +#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11 +#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14 +#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT 0x18 +#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT 0x19 +#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT 0x1a +#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f +#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L +#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L +#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L +#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L +#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L +#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L +#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L +#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L +#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L +#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK 0x01000000L +#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK 0x02000000L +#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK 0x04000000L +#define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L +//RDPCSTX2_RDPCSTX_CLOCK_CNTL +#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0 +#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4 +#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5 +#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6 +#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7 +#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8 +#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9 +#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa +#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc +#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd +#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe +#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10 +#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L +#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L +#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L +#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L +#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L +#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L +#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L +#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L +#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L +#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L +#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L +#define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L +//RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL +#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0 +#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1 +#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2 +#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4 +#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5 +#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6 +#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7 +#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8 +#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9 +#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa +#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc +#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10 +#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11 +#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12 +#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14 +#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L +#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L +#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L +#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L +#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L +#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L +#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L +#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L +#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L +#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L +#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L +#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L +#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L +#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L +#define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L +//RDPCSTX2_RDPCSTX_PLL_UPDATE_DATA +#define RDPCSTX2_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0 +#define RDPCSTX2_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L +//RDPCSTX2_RDPCS_TX_CR_ADDR +#define RDPCSTX2_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0 +#define RDPCSTX2_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL +//RDPCSTX2_RDPCS_TX_CR_DATA +#define RDPCSTX2_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0 +#define RDPCSTX2_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL +//RDPCSTX2_RDPCS_TX_SRAM_CNTL +#define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14 +#define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18 +#define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c +#define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L +#define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L +#define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L +//RDPCSTX2_RDPCSTX_MEM_POWER_CTRL +#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES__SHIFT 0x0 +#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES__SHIFT 0xc +#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1__SHIFT 0x1a +#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2__SHIFT 0x1b +#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1__SHIFT 0x1c +#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2__SHIFT 0x1d +#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM__SHIFT 0x1e +#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES_MASK 0x00000FFFL +#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES_MASK 0x03FFF000L +#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1_MASK 0x04000000L +#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2_MASK 0x08000000L +#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1_MASK 0x10000000L +#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2_MASK 0x20000000L +#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM_MASK 0x40000000L +//RDPCSTX2_RDPCSTX_MEM_POWER_CTRL2 +#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF__SHIFT 0x0 +#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO__SHIFT 0x2 +#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF_MASK 0x00000003L +#define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO_MASK 0x00000004L +//RDPCSTX2_RDPCSTX_SCRATCH +#define RDPCSTX2_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0 +#define RDPCSTX2_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL +//RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT 0x0 +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT 0x4 +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT 0x8 +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK 0x00000001L +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK 0x00000010L +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK 0x0000FF00L +//RDPCSTX2_RDPCSTX_DEBUG_CONFIG +#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT 0x0 +#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT 0x4 +#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT 0x7 +#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT 0x8 +#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT 0xf +#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT 0x10 +#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT 0x18 +#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK 0x00000001L +#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK 0x00000070L +#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK 0x00000080L +#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK 0x00001F00L +#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK 0x00008000L +#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK 0x00FF0000L +#define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK 0xFF000000L +//RDPCSTX2_RDPCSTX_PHY_CNTL0 +#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0 +#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1 +#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2 +#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3 +#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4 +#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8 +#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9 +#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0xe +#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11 +#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12 +#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14 +#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15 +#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18 +#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19 +#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c +#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d +#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f +#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L +#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L +#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L +#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L +#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L +#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L +#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L +#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x0001C000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L +//RDPCSTX2_RDPCSTX_PHY_CNTL1 +#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0 +#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1 +#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2 +#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3 +#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4 +#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5 +#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6 +#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7 +#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L +#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L +#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L +#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L +#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L +#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L +#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L +#define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L +//RDPCSTX2_RDPCSTX_PHY_CNTL2 +#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3 +#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4 +#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5 +#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6 +#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7 +#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8 +#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9 +#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa +#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb +#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L +#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L +#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L +#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L +#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L +#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L +#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L +#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L +#define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L +//RDPCSTX2_RDPCSTX_PHY_CNTL3 +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0 +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1 +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2 +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3 +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4 +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5 +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8 +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9 +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10 +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11 +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12 +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13 +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14 +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15 +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18 +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19 +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L +//RDPCSTX2_RDPCSTX_PHY_CNTL4 +#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0 +#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4 +#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6 +#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7 +#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8 +#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc +#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe +#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf +#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10 +#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14 +#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16 +#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17 +#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18 +#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c +#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e +#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f +#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L +#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L +#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L +#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L +#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L +#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L +//RDPCSTX2_RDPCSTX_PHY_CNTL5 +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x0 +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x1 +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x4 +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0x6 +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0x7 +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x8 +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x9 +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0xc +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0xe +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0xf +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x10 +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x11 +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x14 +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0x16 +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0x17 +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x18 +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x19 +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x1c +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1e +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1f +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000001L +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x0000000EL +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000030L +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000040L +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00000080L +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00000100L +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00000E00L +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x00003000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x00004000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x00008000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK 0x00010000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK 0x000E0000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00300000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00400000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00800000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK 0x01000000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK 0x0E000000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x30000000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x40000000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x80000000L +//RDPCSTX2_RDPCSTX_PHY_CNTL6 +#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x0 +#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0x2 +#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x4 +#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x6 +#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x8 +#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa +#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0xc +#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0xe +#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10 +#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11 +#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12 +#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x13 +#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x14 +#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x00000003L +#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000004L +#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x00000030L +#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x00000040L +#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x00000300L +#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L +#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x00003000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x00004000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00080000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00100000L +//RDPCSTX2_RDPCSTX_PHY_CNTL7 +#define RDPCSTX2_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0 +#define RDPCSTX2_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10 +#define RDPCSTX2_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL +#define RDPCSTX2_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L +//RDPCSTX2_RDPCSTX_PHY_CNTL8 +#define RDPCSTX2_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0 +#define RDPCSTX2_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL +//RDPCSTX2_RDPCSTX_PHY_CNTL9 +#define RDPCSTX2_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0 +#define RDPCSTX2_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18 +#define RDPCSTX2_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL +#define RDPCSTX2_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L +//RDPCSTX2_RDPCSTX_PHY_CNTL10 +#define RDPCSTX2_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0 +#define RDPCSTX2_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL +//RDPCSTX2_RDPCSTX_PHY_CNTL11 +#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4 +#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10 +#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14 +#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18 +#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L +#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L +//RDPCSTX2_RDPCSTX_PHY_CNTL12 +#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0 +#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2 +#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4 +#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7 +#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8 +#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L +#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L +#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L +#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L +#define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L +//RDPCSTX2_RDPCSTX_PHY_CNTL13 +#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14 +#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c +#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d +#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e +#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L +//RDPCSTX2_RDPCSTX_PHY_CNTL14 +#define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0 +#define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18 +#define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c +#define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L +#define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L +#define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L +//RDPCSTX2_RDPCSTX_PHY_FUSE0 +#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0 +#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6 +#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc +#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12 +#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14 +#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL +#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L +#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L +#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L +#define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L +//RDPCSTX2_RDPCSTX_PHY_FUSE1 +#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0 +#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6 +#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc +#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12 +#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19 +#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL +#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L +#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L +#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L +#define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L +//RDPCSTX2_RDPCSTX_PHY_FUSE2 +#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0 +#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6 +#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc +#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL +#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L +#define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L +//RDPCSTX2_RDPCSTX_PHY_FUSE3 +#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0 +#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6 +#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc +#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12 +#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18 +#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL +#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L +#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L +#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L +#define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L +//RDPCSTX2_RDPCSTX_PHY_RX_LD_VAL +#define RDPCSTX2_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0 +#define RDPCSTX2_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8 +#define RDPCSTX2_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL +#define RDPCSTX2_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L +//RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3 +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT 0x0 +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT 0x1 +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT 0x2 +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT 0x3 +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT 0x4 +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT 0x5 +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT 0x8 +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT 0x9 +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT 0xa +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT 0xb +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT 0xc +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT 0xd +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT 0x10 +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT 0x11 +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT 0x12 +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT 0x13 +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT 0x14 +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT 0x15 +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT 0x18 +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT 0x19 +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT 0x1a +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT 0x1b +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT 0x1c +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT 0x1d +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK 0x00000001L +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK 0x00000002L +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK 0x00000004L +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK 0x00000008L +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK 0x00000010L +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK 0x00000020L +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK 0x00000100L +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK 0x00000200L +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK 0x00000400L +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK 0x00000800L +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK 0x00001000L +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK 0x00002000L +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK 0x00010000L +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK 0x00020000L +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK 0x00040000L +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK 0x00080000L +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK 0x00100000L +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK 0x00200000L +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK 0x01000000L +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK 0x02000000L +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK 0x04000000L +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK 0x08000000L +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK 0x10000000L +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK 0x20000000L +//RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6 +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT 0x0 +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT 0x2 +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT 0x4 +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT 0x6 +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT 0x8 +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT 0xa +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT 0xc +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT 0xe +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT 0x10 +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT 0x11 +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT 0x12 +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT 0x13 +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT 0x14 +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK 0x00000003L +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK 0x00000004L +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK 0x00000030L +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK 0x00000040L +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK 0x00000300L +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK 0x00000400L +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK 0x00003000L +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK 0x00004000L +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK 0x00010000L +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK 0x00020000L +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK 0x00040000L +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK 0x00080000L +#define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK 0x00100000L +//RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG +#define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT 0x0 +#define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT 0x4 +#define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT 0x8 +#define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK 0x00000001L +#define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK 0x00000010L +#define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK 0x0000FF00L + + +// addressBlock: dpcssys_dpcssys_cr2_dispdec +//DPCSSYS_CR2_DPCSSYS_CR_ADDR +#define DPCSSYS_CR2_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0 +#define DPCSSYS_CR2_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL +//DPCSSYS_CR2_DPCSSYS_CR_DATA +#define DPCSSYS_CR2_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0 +#define DPCSSYS_CR2_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL + + +// addressBlock: dpcssys_dpcs0_dpcstx3_dispdec +//DPCSTX3_DPCSTX_TX_CLOCK_CNTL +#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0 +#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1 +#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2 +#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3 +#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L +#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L +#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L +#define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L +//DPCSTX3_DPCSTX_TX_CNTL +#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc +#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd +#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe +#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf +#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10 +#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11 +#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14 +#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f +#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L +#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L +#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L +#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L +#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L +#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L +#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L +#define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L +//DPCSTX3_DPCSTX_CBUS_CNTL +#define DPCSTX3_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0 +#define DPCSTX3_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f +#define DPCSTX3_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL +#define DPCSTX3_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L +//DPCSTX3_DPCSTX_INTERRUPT_CNTL +#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0 +#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1 +#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4 +#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8 +#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9 +#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa +#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb +#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc +#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10 +#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14 +#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L +#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L +#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L +#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L +#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L +#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L +#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L +#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L +#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L +#define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L +//DPCSTX3_DPCSTX_PLL_UPDATE_ADDR +#define DPCSTX3_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0 +#define DPCSTX3_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL +//DPCSTX3_DPCSTX_PLL_UPDATE_DATA +#define DPCSTX3_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0 +#define DPCSTX3_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL +//DPCSTX3_DPCSTX_DEBUG_CONFIG +#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0 +#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1 +#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4 +#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8 +#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe +#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10 +#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L +#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL +#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L +#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L +#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L +#define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L + + +// addressBlock: dpcssys_dpcs0_rdpcstx3_dispdec +//RDPCSTX3_RDPCSTX_CNTL +#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0 +#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4 +#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc +#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd +#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe +#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf +#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10 +#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11 +#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14 +#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT 0x18 +#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT 0x19 +#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT 0x1a +#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f +#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L +#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L +#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L +#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L +#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L +#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L +#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L +#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L +#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L +#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK 0x01000000L +#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK 0x02000000L +#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK 0x04000000L +#define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L +//RDPCSTX3_RDPCSTX_CLOCK_CNTL +#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0 +#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4 +#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5 +#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6 +#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7 +#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8 +#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9 +#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa +#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc +#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd +#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe +#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10 +#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L +#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L +#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L +#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L +#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L +#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L +#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L +#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L +#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L +#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L +#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L +#define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L +//RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL +#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0 +#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1 +#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2 +#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4 +#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5 +#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6 +#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7 +#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8 +#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9 +#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa +#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc +#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10 +#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11 +#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12 +#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14 +#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L +#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L +#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L +#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L +#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L +#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L +#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L +#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L +#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L +#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L +#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L +#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L +#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L +#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L +#define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L +//RDPCSTX3_RDPCSTX_PLL_UPDATE_DATA +#define RDPCSTX3_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0 +#define RDPCSTX3_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L +//RDPCSTX3_RDPCS_TX_CR_ADDR +#define RDPCSTX3_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0 +#define RDPCSTX3_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL +//RDPCSTX3_RDPCS_TX_CR_DATA +#define RDPCSTX3_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0 +#define RDPCSTX3_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL +//RDPCSTX3_RDPCS_TX_SRAM_CNTL +#define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14 +#define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18 +#define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c +#define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L +#define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L +#define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L +//RDPCSTX3_RDPCSTX_MEM_POWER_CTRL +#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES__SHIFT 0x0 +#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES__SHIFT 0xc +#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1__SHIFT 0x1a +#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2__SHIFT 0x1b +#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1__SHIFT 0x1c +#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2__SHIFT 0x1d +#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM__SHIFT 0x1e +#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES_MASK 0x00000FFFL +#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES_MASK 0x03FFF000L +#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1_MASK 0x04000000L +#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2_MASK 0x08000000L +#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1_MASK 0x10000000L +#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2_MASK 0x20000000L +#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM_MASK 0x40000000L +//RDPCSTX3_RDPCSTX_MEM_POWER_CTRL2 +#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF__SHIFT 0x0 +#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO__SHIFT 0x2 +#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF_MASK 0x00000003L +#define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO_MASK 0x00000004L +//RDPCSTX3_RDPCSTX_SCRATCH +#define RDPCSTX3_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0 +#define RDPCSTX3_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL +//RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT 0x0 +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT 0x4 +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT 0x8 +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK 0x00000001L +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK 0x00000010L +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK 0x0000FF00L +//RDPCSTX3_RDPCSTX_DEBUG_CONFIG +#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT 0x0 +#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT 0x4 +#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT 0x7 +#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT 0x8 +#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT 0xf +#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT 0x10 +#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT 0x18 +#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK 0x00000001L +#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK 0x00000070L +#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK 0x00000080L +#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK 0x00001F00L +#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK 0x00008000L +#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK 0x00FF0000L +#define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK 0xFF000000L +//RDPCSTX3_RDPCSTX_PHY_CNTL0 +#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0 +#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1 +#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2 +#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3 +#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4 +#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8 +#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9 +#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0xe +#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11 +#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12 +#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14 +#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15 +#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18 +#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19 +#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c +#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d +#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f +#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L +#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L +#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L +#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L +#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L +#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L +#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L +#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x0001C000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L +//RDPCSTX3_RDPCSTX_PHY_CNTL1 +#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0 +#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1 +#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2 +#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3 +#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4 +#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5 +#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6 +#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7 +#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L +#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L +#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L +#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L +#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L +#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L +#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L +#define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L +//RDPCSTX3_RDPCSTX_PHY_CNTL2 +#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3 +#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4 +#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5 +#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6 +#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7 +#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8 +#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9 +#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa +#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb +#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L +#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L +#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L +#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L +#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L +#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L +#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L +#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L +#define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L +//RDPCSTX3_RDPCSTX_PHY_CNTL3 +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0 +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1 +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2 +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3 +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4 +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5 +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8 +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9 +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10 +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11 +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12 +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13 +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14 +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15 +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18 +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19 +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L +//RDPCSTX3_RDPCSTX_PHY_CNTL4 +#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0 +#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4 +#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6 +#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7 +#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8 +#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc +#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe +#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf +#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10 +#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14 +#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16 +#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17 +#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18 +#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c +#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e +#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f +#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L +#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L +#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L +#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L +#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L +#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L +//RDPCSTX3_RDPCSTX_PHY_CNTL5 +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x0 +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x1 +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x4 +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0x6 +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0x7 +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x8 +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x9 +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0xc +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0xe +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0xf +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x10 +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x11 +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x14 +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0x16 +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0x17 +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x18 +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x19 +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x1c +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1e +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1f +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000001L +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x0000000EL +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000030L +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000040L +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00000080L +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00000100L +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00000E00L +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x00003000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x00004000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x00008000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK 0x00010000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK 0x000E0000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00300000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00400000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00800000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK 0x01000000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK 0x0E000000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x30000000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x40000000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x80000000L +//RDPCSTX3_RDPCSTX_PHY_CNTL6 +#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x0 +#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0x2 +#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x4 +#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x6 +#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x8 +#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa +#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0xc +#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0xe +#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10 +#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11 +#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12 +#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x13 +#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x14 +#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x00000003L +#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000004L +#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x00000030L +#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x00000040L +#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x00000300L +#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L +#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x00003000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x00004000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00080000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00100000L +//RDPCSTX3_RDPCSTX_PHY_CNTL7 +#define RDPCSTX3_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0 +#define RDPCSTX3_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10 +#define RDPCSTX3_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL +#define RDPCSTX3_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L +//RDPCSTX3_RDPCSTX_PHY_CNTL8 +#define RDPCSTX3_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0 +#define RDPCSTX3_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL +//RDPCSTX3_RDPCSTX_PHY_CNTL9 +#define RDPCSTX3_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0 +#define RDPCSTX3_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18 +#define RDPCSTX3_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL +#define RDPCSTX3_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L +//RDPCSTX3_RDPCSTX_PHY_CNTL10 +#define RDPCSTX3_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0 +#define RDPCSTX3_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL +//RDPCSTX3_RDPCSTX_PHY_CNTL11 +#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4 +#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10 +#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14 +#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18 +#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L +#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L +//RDPCSTX3_RDPCSTX_PHY_CNTL12 +#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0 +#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2 +#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4 +#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7 +#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8 +#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L +#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L +#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L +#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L +#define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L +//RDPCSTX3_RDPCSTX_PHY_CNTL13 +#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14 +#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c +#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d +#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e +#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L +//RDPCSTX3_RDPCSTX_PHY_CNTL14 +#define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0 +#define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18 +#define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c +#define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L +#define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L +#define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L +//RDPCSTX3_RDPCSTX_PHY_FUSE0 +#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0 +#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6 +#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc +#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12 +#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14 +#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL +#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L +#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L +#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L +#define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L +//RDPCSTX3_RDPCSTX_PHY_FUSE1 +#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0 +#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6 +#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc +#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12 +#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19 +#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL +#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L +#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L +#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L +#define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L +//RDPCSTX3_RDPCSTX_PHY_FUSE2 +#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0 +#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6 +#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc +#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL +#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L +#define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L +//RDPCSTX3_RDPCSTX_PHY_FUSE3 +#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0 +#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6 +#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc +#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12 +#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18 +#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL +#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L +#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L +#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L +#define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L +//RDPCSTX3_RDPCSTX_PHY_RX_LD_VAL +#define RDPCSTX3_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0 +#define RDPCSTX3_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8 +#define RDPCSTX3_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL +#define RDPCSTX3_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L +//RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3 +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT 0x0 +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT 0x1 +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT 0x2 +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT 0x3 +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT 0x4 +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT 0x5 +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT 0x8 +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT 0x9 +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT 0xa +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT 0xb +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT 0xc +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT 0xd +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT 0x10 +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT 0x11 +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT 0x12 +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT 0x13 +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT 0x14 +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT 0x15 +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT 0x18 +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT 0x19 +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT 0x1a +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT 0x1b +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT 0x1c +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT 0x1d +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK 0x00000001L +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK 0x00000002L +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK 0x00000004L +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK 0x00000008L +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK 0x00000010L +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK 0x00000020L +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK 0x00000100L +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK 0x00000200L +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK 0x00000400L +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK 0x00000800L +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK 0x00001000L +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK 0x00002000L +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK 0x00010000L +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK 0x00020000L +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK 0x00040000L +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK 0x00080000L +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK 0x00100000L +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK 0x00200000L +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK 0x01000000L +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK 0x02000000L +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK 0x04000000L +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK 0x08000000L +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK 0x10000000L +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK 0x20000000L +//RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6 +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT 0x0 +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT 0x2 +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT 0x4 +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT 0x6 +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT 0x8 +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT 0xa +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT 0xc +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT 0xe +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT 0x10 +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT 0x11 +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT 0x12 +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT 0x13 +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT 0x14 +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK 0x00000003L +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK 0x00000004L +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK 0x00000030L +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK 0x00000040L +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK 0x00000300L +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK 0x00000400L +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK 0x00003000L +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK 0x00004000L +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK 0x00010000L +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK 0x00020000L +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK 0x00040000L +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK 0x00080000L +#define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK 0x00100000L +//RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG +#define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT 0x0 +#define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT 0x4 +#define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT 0x8 +#define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK 0x00000001L +#define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK 0x00000010L +#define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK 0x0000FF00L + + +// addressBlock: dpcssys_dpcssys_cr3_dispdec +//DPCSSYS_CR3_DPCSSYS_CR_ADDR +#define DPCSSYS_CR3_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0 +#define DPCSSYS_CR3_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL +//DPCSSYS_CR3_DPCSSYS_CR_DATA +#define DPCSSYS_CR3_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0 +#define DPCSSYS_CR3_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL + + +// addressBlock: dpcssys_dpcs0_dpcsrx_dispdec +//DPCSRX_PHY_CNTL +#define DPCSRX_PHY_CNTL__DPCS_PHY_RESET__SHIFT 0x0 +#define DPCSRX_PHY_CNTL__DPCS_PHY_RESET_MASK 0x00000001L +//DPCSRX_RX_CLOCK_CNTL +#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_GATE_DIS__SHIFT 0x0 +#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_EN__SHIFT 0x1 +#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_SEL__SHIFT 0x2 +#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_CLOCK_ON__SHIFT 0x4 +#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX0_GATE_DIS__SHIFT 0x10 +#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX0_EN__SHIFT 0x11 +#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX0_CLOCK_ON__SHIFT 0x12 +#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX1_GATE_DIS__SHIFT 0x14 +#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX1_EN__SHIFT 0x15 +#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX1_CLOCK_ON__SHIFT 0x16 +#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX2_GATE_DIS__SHIFT 0x18 +#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX2_EN__SHIFT 0x19 +#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX2_CLOCK_ON__SHIFT 0x1a +#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX3_GATE_DIS__SHIFT 0x1c +#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX3_EN__SHIFT 0x1d +#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX3_CLOCK_ON__SHIFT 0x1e +#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_GATE_DIS_MASK 0x00000001L +#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_EN_MASK 0x00000002L +#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_SEL_MASK 0x0000000CL +#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_CLOCK_ON_MASK 0x00000010L +#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX0_GATE_DIS_MASK 0x00010000L +#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX0_EN_MASK 0x00020000L +#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX0_CLOCK_ON_MASK 0x00040000L +#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX1_GATE_DIS_MASK 0x00100000L +#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX1_EN_MASK 0x00200000L +#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX1_CLOCK_ON_MASK 0x00400000L +#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX2_GATE_DIS_MASK 0x01000000L +#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX2_EN_MASK 0x02000000L +#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX2_CLOCK_ON_MASK 0x04000000L +#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX3_GATE_DIS_MASK 0x10000000L +#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX3_EN_MASK 0x20000000L +#define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX3_CLOCK_ON_MASK 0x40000000L +//DPCSRX_RX_CNTL +#define DPCSRX_RX_CNTL__DPCS_RX_LANE0_EN__SHIFT 0x0 +#define DPCSRX_RX_CNTL__DPCS_RX_LANE1_EN__SHIFT 0x1 +#define DPCSRX_RX_CNTL__DPCS_RX_LANE2_EN__SHIFT 0x2 +#define DPCSRX_RX_CNTL__DPCS_RX_LANE3_EN__SHIFT 0x3 +#define DPCSRX_RX_CNTL__DPCS_RX_FIFO_EN__SHIFT 0x4 +#define DPCSRX_RX_CNTL__DPCS_RX_FIFO_START__SHIFT 0x5 +#define DPCSRX_RX_CNTL__DPCS_RX_FIFO_RD_START_DELAY__SHIFT 0x8 +#define DPCSRX_RX_CNTL__DPCS_RX_SOFT_RESET__SHIFT 0x1f +#define DPCSRX_RX_CNTL__DPCS_RX_LANE0_EN_MASK 0x00000001L +#define DPCSRX_RX_CNTL__DPCS_RX_LANE1_EN_MASK 0x00000002L +#define DPCSRX_RX_CNTL__DPCS_RX_LANE2_EN_MASK 0x00000004L +#define DPCSRX_RX_CNTL__DPCS_RX_LANE3_EN_MASK 0x00000008L +#define DPCSRX_RX_CNTL__DPCS_RX_FIFO_EN_MASK 0x00000010L +#define DPCSRX_RX_CNTL__DPCS_RX_FIFO_START_MASK 0x00000020L +#define DPCSRX_RX_CNTL__DPCS_RX_FIFO_RD_START_DELAY_MASK 0x00000F00L +#define DPCSRX_RX_CNTL__DPCS_RX_SOFT_RESET_MASK 0x80000000L +//DPCSRX_CBUS_CNTL +#define DPCSRX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0 +#define DPCSRX_CBUS_CNTL__DPCS_PHY_MASTER_REQ_DELAY__SHIFT 0x8 +#define DPCSRX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f +#define DPCSRX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x0000000FL +#define DPCSRX_CBUS_CNTL__DPCS_PHY_MASTER_REQ_DELAY_MASK 0x0000FF00L +#define DPCSRX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L +//DPCSRX_REG_ERROR_STATUS +#define DPCSRX_REG_ERROR_STATUS__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0 +#define DPCSRX_REG_ERROR_STATUS__DPCS_REG_ERROR_CLR__SHIFT 0x1 +#define DPCSRX_REG_ERROR_STATUS__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4 +#define DPCSRX_REG_ERROR_STATUS__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L +#define DPCSRX_REG_ERROR_STATUS__DPCS_REG_ERROR_CLR_MASK 0x00000002L +#define DPCSRX_REG_ERROR_STATUS__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L +//DPCSRX_RX_ERROR_STATUS +#define DPCSRX_RX_ERROR_STATUS__DPCS_RX0_FIFO_ERROR__SHIFT 0x0 +#define DPCSRX_RX_ERROR_STATUS__DPCS_RX1_FIFO_ERROR__SHIFT 0x1 +#define DPCSRX_RX_ERROR_STATUS__DPCS_RX2_FIFO_ERROR__SHIFT 0x2 +#define DPCSRX_RX_ERROR_STATUS__DPCS_RX3_FIFO_ERROR__SHIFT 0x3 +#define DPCSRX_RX_ERROR_STATUS__DPCS_RX_ERROR_CLR__SHIFT 0x8 +#define DPCSRX_RX_ERROR_STATUS__DPCS_RX_FIFO_ERROR_MASK__SHIFT 0xc +#define DPCSRX_RX_ERROR_STATUS__DPCS_RX0_FIFO_ERROR_MASK 0x00000001L +#define DPCSRX_RX_ERROR_STATUS__DPCS_RX1_FIFO_ERROR_MASK 0x00000002L +#define DPCSRX_RX_ERROR_STATUS__DPCS_RX2_FIFO_ERROR_MASK 0x00000004L +#define DPCSRX_RX_ERROR_STATUS__DPCS_RX3_FIFO_ERROR_MASK 0x00000008L +#define DPCSRX_RX_ERROR_STATUS__DPCS_RX_ERROR_CLR_MASK 0x00000100L +#define DPCSRX_RX_ERROR_STATUS__DPCS_RX_FIFO_ERROR_MASK_MASK 0x00001000L +//DPCSRX_INDEX_MODE_ADDR +#define DPCSRX_INDEX_MODE_ADDR__DPCS_INDEX_MODE_ADDR__SHIFT 0x0 +#define DPCSRX_INDEX_MODE_ADDR__DPCS_INDEX_MODE_ADDR_MASK 0x0003FFFFL +//DPCSRX_INDEX_MODE_DATA +#define DPCSRX_INDEX_MODE_DATA__DPCS_INDEX_MODE_DATA__SHIFT 0x0 +#define DPCSRX_INDEX_MODE_DATA__DPCS_INDEX_MODE_DATA_MASK 0xFFFFFFFFL +//DPCSRX_DEBUG_CONFIG +#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0 +#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1 +#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_RX_SYMCLK_SEL__SHIFT 0x6 +#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_BLOCK_SEL__SHIFT 0xb +#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe +#define DPCSRX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10 +#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L +#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL +#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_RX_SYMCLK_SEL_MASK 0x000000C0L +#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_BLOCK_SEL_MASK 0x00003800L +#define DPCSRX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L +#define DPCSRX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L + + +// addressBlock: dpcssys_dpcs0_dpcstx4_dispdec +//DPCSTX4_DPCSTX_TX_CLOCK_CNTL +#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0 +#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1 +#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2 +#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3 +#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L +#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L +#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L +#define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L +//DPCSTX4_DPCSTX_TX_CNTL +#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc +#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd +#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe +#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf +#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10 +#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11 +#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14 +#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f +#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L +#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L +#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L +#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L +#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L +#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L +#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L +#define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L +//DPCSTX4_DPCSTX_CBUS_CNTL +#define DPCSTX4_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0 +#define DPCSTX4_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f +#define DPCSTX4_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL +#define DPCSTX4_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L +//DPCSTX4_DPCSTX_INTERRUPT_CNTL +#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0 +#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1 +#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4 +#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8 +#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9 +#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa +#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb +#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc +#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10 +#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14 +#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L +#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L +#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L +#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L +#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L +#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L +#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L +#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L +#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L +#define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L +//DPCSTX4_DPCSTX_PLL_UPDATE_ADDR +#define DPCSTX4_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0 +#define DPCSTX4_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL +//DPCSTX4_DPCSTX_PLL_UPDATE_DATA +#define DPCSTX4_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0 +#define DPCSTX4_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL +//DPCSTX4_DPCSTX_DEBUG_CONFIG +#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0 +#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1 +#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4 +#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8 +#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe +#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10 +#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L +#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL +#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L +#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L +#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L +#define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L + + +// addressBlock: dpcssys_dpcs0_rdpcstx4_dispdec +//RDPCSTX4_RDPCSTX_CNTL +#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0 +#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4 +#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc +#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd +#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe +#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf +#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10 +#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11 +#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14 +#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT 0x18 +#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT 0x19 +#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT 0x1a +#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f +#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L +#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L +#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L +#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L +#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L +#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L +#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L +#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L +#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L +#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK 0x01000000L +#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK 0x02000000L +#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK 0x04000000L +#define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L +//RDPCSTX4_RDPCSTX_CLOCK_CNTL +#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0 +#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4 +#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5 +#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6 +#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7 +#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8 +#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9 +#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa +#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc +#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd +#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe +#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10 +#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L +#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L +#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L +#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L +#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L +#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L +#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L +#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L +#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L +#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L +#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L +#define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L +//RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL +#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0 +#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1 +#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2 +#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4 +#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5 +#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6 +#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7 +#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8 +#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9 +#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa +#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc +#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10 +#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11 +#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12 +#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14 +#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L +#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L +#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L +#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L +#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L +#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L +#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L +#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L +#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L +#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L +#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L +#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L +#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L +#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L +#define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L +//RDPCSTX4_RDPCSTX_PLL_UPDATE_DATA +#define RDPCSTX4_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0 +#define RDPCSTX4_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L +//RDPCSTX4_RDPCS_TX_CR_ADDR +#define RDPCSTX4_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0 +#define RDPCSTX4_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL +//RDPCSTX4_RDPCS_TX_CR_DATA +#define RDPCSTX4_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0 +#define RDPCSTX4_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL +//RDPCSTX4_RDPCS_TX_SRAM_CNTL +#define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14 +#define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18 +#define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c +#define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L +#define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L +#define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L +//RDPCSTX4_RDPCSTX_MEM_POWER_CTRL +#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES__SHIFT 0x0 +#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES__SHIFT 0xc +#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1__SHIFT 0x1a +#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2__SHIFT 0x1b +#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1__SHIFT 0x1c +#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2__SHIFT 0x1d +#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM__SHIFT 0x1e +#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES_MASK 0x00000FFFL +#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES_MASK 0x03FFF000L +#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1_MASK 0x04000000L +#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2_MASK 0x08000000L +#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1_MASK 0x10000000L +#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2_MASK 0x20000000L +#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM_MASK 0x40000000L +//RDPCSTX4_RDPCSTX_MEM_POWER_CTRL2 +#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF__SHIFT 0x0 +#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO__SHIFT 0x2 +#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF_MASK 0x00000003L +#define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO_MASK 0x00000004L +//RDPCSTX4_RDPCSTX_SCRATCH +#define RDPCSTX4_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0 +#define RDPCSTX4_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL +//RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT 0x0 +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT 0x4 +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT 0x8 +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK 0x00000001L +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK 0x00000010L +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK 0x0000FF00L +//RDPCSTX4_RDPCSTX_DEBUG_CONFIG +#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT 0x0 +#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT 0x4 +#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT 0x7 +#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT 0x8 +#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT 0xf +#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT 0x10 +#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT 0x18 +#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK 0x00000001L +#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK 0x00000070L +#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK 0x00000080L +#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK 0x00001F00L +#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK 0x00008000L +#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK 0x00FF0000L +#define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK 0xFF000000L +//RDPCSTX4_RDPCSTX_PHY_CNTL0 +#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0 +#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1 +#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2 +#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3 +#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4 +#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8 +#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9 +#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0xe +#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11 +#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12 +#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14 +#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15 +#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18 +#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19 +#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c +#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d +#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f +#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L +#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L +#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L +#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L +#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L +#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L +#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L +#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x0001C000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L +//RDPCSTX4_RDPCSTX_PHY_CNTL1 +#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0 +#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1 +#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2 +#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3 +#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4 +#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5 +#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6 +#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7 +#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L +#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L +#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L +#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L +#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L +#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L +#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L +#define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L +//RDPCSTX4_RDPCSTX_PHY_CNTL2 +#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3 +#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4 +#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5 +#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6 +#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7 +#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8 +#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9 +#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa +#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb +#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L +#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L +#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L +#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L +#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L +#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L +#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L +#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L +#define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L +//RDPCSTX4_RDPCSTX_PHY_CNTL3 +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0 +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1 +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2 +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3 +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4 +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5 +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8 +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9 +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10 +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11 +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12 +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13 +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14 +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15 +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18 +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19 +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L +//RDPCSTX4_RDPCSTX_PHY_CNTL4 +#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0 +#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4 +#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6 +#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7 +#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8 +#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc +#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe +#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf +#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10 +#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14 +#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16 +#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17 +#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18 +#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c +#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e +#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f +#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L +#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L +#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L +#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L +#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L +#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L +//RDPCSTX4_RDPCSTX_PHY_CNTL5 +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x0 +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x1 +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x4 +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0x6 +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0x7 +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x8 +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x9 +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0xc +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0xe +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0xf +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x10 +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x11 +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x14 +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0x16 +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0x17 +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x18 +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x19 +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x1c +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1e +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1f +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000001L +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x0000000EL +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000030L +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000040L +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00000080L +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00000100L +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00000E00L +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x00003000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x00004000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x00008000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK 0x00010000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK 0x000E0000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00300000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00400000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00800000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK 0x01000000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK 0x0E000000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x30000000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x40000000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x80000000L +//RDPCSTX4_RDPCSTX_PHY_CNTL6 +#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x0 +#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0x2 +#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x4 +#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x6 +#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x8 +#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa +#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0xc +#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0xe +#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10 +#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11 +#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12 +#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x13 +#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x14 +#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x00000003L +#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000004L +#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x00000030L +#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x00000040L +#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x00000300L +#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L +#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x00003000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x00004000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00080000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00100000L +//RDPCSTX4_RDPCSTX_PHY_CNTL7 +#define RDPCSTX4_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0 +#define RDPCSTX4_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10 +#define RDPCSTX4_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL +#define RDPCSTX4_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L +//RDPCSTX4_RDPCSTX_PHY_CNTL8 +#define RDPCSTX4_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0 +#define RDPCSTX4_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL +//RDPCSTX4_RDPCSTX_PHY_CNTL9 +#define RDPCSTX4_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0 +#define RDPCSTX4_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18 +#define RDPCSTX4_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL +#define RDPCSTX4_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L +//RDPCSTX4_RDPCSTX_PHY_CNTL10 +#define RDPCSTX4_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0 +#define RDPCSTX4_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL +//RDPCSTX4_RDPCSTX_PHY_CNTL11 +#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4 +#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10 +#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14 +#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18 +#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L +#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L +//RDPCSTX4_RDPCSTX_PHY_CNTL12 +#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0 +#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2 +#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4 +#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7 +#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8 +#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L +#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L +#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L +#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L +#define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L +//RDPCSTX4_RDPCSTX_PHY_CNTL13 +#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14 +#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c +#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d +#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e +#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L +//RDPCSTX4_RDPCSTX_PHY_CNTL14 +#define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0 +#define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18 +#define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c +#define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L +#define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L +#define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L +//RDPCSTX4_RDPCSTX_PHY_FUSE0 +#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0 +#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6 +#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc +#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12 +#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14 +#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL +#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L +#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L +#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L +#define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L +//RDPCSTX4_RDPCSTX_PHY_FUSE1 +#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0 +#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6 +#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc +#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12 +#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19 +#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL +#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L +#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L +#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L +#define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L +//RDPCSTX4_RDPCSTX_PHY_FUSE2 +#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0 +#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6 +#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc +#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL +#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L +#define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L +//RDPCSTX4_RDPCSTX_PHY_FUSE3 +#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0 +#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6 +#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc +#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12 +#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18 +#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL +#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L +#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L +#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L +#define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L +//RDPCSTX4_RDPCSTX_PHY_RX_LD_VAL +#define RDPCSTX4_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0 +#define RDPCSTX4_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8 +#define RDPCSTX4_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL +#define RDPCSTX4_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L +//RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3 +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT 0x0 +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT 0x1 +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT 0x2 +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT 0x3 +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT 0x4 +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT 0x5 +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT 0x8 +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT 0x9 +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT 0xa +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT 0xb +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT 0xc +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT 0xd +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT 0x10 +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT 0x11 +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT 0x12 +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT 0x13 +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT 0x14 +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT 0x15 +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT 0x18 +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT 0x19 +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT 0x1a +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT 0x1b +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT 0x1c +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT 0x1d +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK 0x00000001L +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK 0x00000002L +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK 0x00000004L +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK 0x00000008L +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK 0x00000010L +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK 0x00000020L +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK 0x00000100L +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK 0x00000200L +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK 0x00000400L +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK 0x00000800L +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK 0x00001000L +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK 0x00002000L +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK 0x00010000L +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK 0x00020000L +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK 0x00040000L +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK 0x00080000L +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK 0x00100000L +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK 0x00200000L +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK 0x01000000L +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK 0x02000000L +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK 0x04000000L +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK 0x08000000L +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK 0x10000000L +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK 0x20000000L +//RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6 +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT 0x0 +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT 0x2 +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT 0x4 +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT 0x6 +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT 0x8 +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT 0xa +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT 0xc +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT 0xe +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT 0x10 +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT 0x11 +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT 0x12 +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT 0x13 +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT 0x14 +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK 0x00000003L +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK 0x00000004L +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK 0x00000030L +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK 0x00000040L +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK 0x00000300L +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK 0x00000400L +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK 0x00003000L +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK 0x00004000L +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK 0x00010000L +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK 0x00020000L +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK 0x00040000L +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK 0x00080000L +#define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK 0x00100000L +//RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG +#define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT 0x0 +#define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT 0x4 +#define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT 0x8 +#define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK 0x00000001L +#define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK 0x00000010L +#define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK 0x0000FF00L + + +// addressBlock: dpcssys_dpcssys_cr4_dispdec +//DPCSSYS_CR4_DPCSSYS_CR_ADDR +#define DPCSSYS_CR4_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0 +#define DPCSSYS_CR4_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL +//DPCSSYS_CR4_DPCSSYS_CR_DATA +#define DPCSSYS_CR4_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0 +#define DPCSSYS_CR4_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL + + +// addressBlock: dpcssys_dpcs0_dpcstx5_dispdec +//DPCSTX5_DPCSTX_TX_CLOCK_CNTL +#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0 +#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1 +#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2 +#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3 +#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L +#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L +#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L +#define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L +//DPCSTX5_DPCSTX_TX_CNTL +#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc +#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd +#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe +#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf +#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10 +#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11 +#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14 +#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f +#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L +#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L +#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L +#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L +#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L +#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L +#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L +#define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L +//DPCSTX5_DPCSTX_CBUS_CNTL +#define DPCSTX5_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0 +#define DPCSTX5_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f +#define DPCSTX5_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL +#define DPCSTX5_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L +//DPCSTX5_DPCSTX_INTERRUPT_CNTL +#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0 +#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1 +#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4 +#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8 +#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9 +#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa +#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb +#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc +#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10 +#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14 +#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L +#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L +#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L +#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L +#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L +#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L +#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L +#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L +#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L +#define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L +//DPCSTX5_DPCSTX_PLL_UPDATE_ADDR +#define DPCSTX5_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0 +#define DPCSTX5_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL +//DPCSTX5_DPCSTX_PLL_UPDATE_DATA +#define DPCSTX5_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0 +#define DPCSTX5_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL +//DPCSTX5_DPCSTX_DEBUG_CONFIG +#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0 +#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1 +#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4 +#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8 +#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe +#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10 +#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L +#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL +#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L +#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L +#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L +#define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L + + +// addressBlock: dpcssys_dpcs0_rdpcstx5_dispdec +//RDPCSTX5_RDPCSTX_CNTL +#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0 +#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4 +#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc +#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd +#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe +#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf +#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10 +#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11 +#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14 +#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT 0x18 +#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT 0x19 +#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT 0x1a +#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f +#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L +#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L +#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L +#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L +#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L +#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L +#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L +#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L +#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L +#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK 0x01000000L +#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK 0x02000000L +#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK 0x04000000L +#define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L +//RDPCSTX5_RDPCSTX_CLOCK_CNTL +#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0 +#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4 +#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5 +#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6 +#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7 +#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8 +#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9 +#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa +#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc +#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd +#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe +#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10 +#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L +#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L +#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L +#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L +#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L +#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L +#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L +#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L +#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L +#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L +#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L +#define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L +//RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL +#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0 +#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1 +#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2 +#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4 +#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5 +#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6 +#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7 +#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8 +#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9 +#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa +#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc +#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10 +#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11 +#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12 +#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14 +#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L +#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L +#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L +#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L +#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L +#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L +#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L +#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L +#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L +#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L +#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L +#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L +#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L +#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L +#define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L +//RDPCSTX5_RDPCSTX_PLL_UPDATE_DATA +#define RDPCSTX5_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0 +#define RDPCSTX5_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L +//RDPCSTX5_RDPCS_TX_CR_ADDR +#define RDPCSTX5_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0 +#define RDPCSTX5_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL +//RDPCSTX5_RDPCS_TX_CR_DATA +#define RDPCSTX5_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0 +#define RDPCSTX5_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL +//RDPCSTX5_RDPCS_TX_SRAM_CNTL +#define RDPCSTX5_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14 +#define RDPCSTX5_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18 +#define RDPCSTX5_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c +#define RDPCSTX5_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L +#define RDPCSTX5_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L +#define RDPCSTX5_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L +//RDPCSTX5_RDPCSTX_MEM_POWER_CTRL +#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES__SHIFT 0x0 +#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES__SHIFT 0xc +#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1__SHIFT 0x1a +#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2__SHIFT 0x1b +#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1__SHIFT 0x1c +#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2__SHIFT 0x1d +#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM__SHIFT 0x1e +#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES_MASK 0x00000FFFL +#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES_MASK 0x03FFF000L +#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1_MASK 0x04000000L +#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2_MASK 0x08000000L +#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1_MASK 0x10000000L +#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2_MASK 0x20000000L +#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM_MASK 0x40000000L +//RDPCSTX5_RDPCSTX_MEM_POWER_CTRL2 +#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF__SHIFT 0x0 +#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO__SHIFT 0x2 +#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF_MASK 0x00000003L +#define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO_MASK 0x00000004L +//RDPCSTX5_RDPCSTX_SCRATCH +#define RDPCSTX5_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0 +#define RDPCSTX5_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL +//RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT 0x0 +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT 0x4 +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT 0x8 +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK 0x00000001L +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK 0x00000010L +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK 0x0000FF00L +//RDPCSTX5_RDPCSTX_DEBUG_CONFIG +#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT 0x0 +#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT 0x4 +#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT 0x7 +#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT 0x8 +#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT 0xf +#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT 0x10 +#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT 0x18 +#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK 0x00000001L +#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK 0x00000070L +#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK 0x00000080L +#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK 0x00001F00L +#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK 0x00008000L +#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK 0x00FF0000L +#define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK 0xFF000000L +//RDPCSTX5_RDPCSTX_PHY_CNTL0 +#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0 +#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1 +#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2 +#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3 +#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4 +#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8 +#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9 +#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0xe +#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11 +#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12 +#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14 +#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15 +#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18 +#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19 +#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c +#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d +#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f +#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L +#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L +#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L +#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L +#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L +#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L +#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L +#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x0001C000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L +//RDPCSTX5_RDPCSTX_PHY_CNTL1 +#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0 +#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1 +#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2 +#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3 +#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4 +#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5 +#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6 +#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7 +#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L +#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L +#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L +#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L +#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L +#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L +#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L +#define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L +//RDPCSTX5_RDPCSTX_PHY_CNTL2 +#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3 +#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4 +#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5 +#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6 +#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7 +#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8 +#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9 +#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa +#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb +#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L +#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L +#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L +#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L +#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L +#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L +#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L +#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L +#define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L +//RDPCSTX5_RDPCSTX_PHY_CNTL3 +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0 +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1 +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2 +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3 +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4 +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5 +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8 +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9 +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10 +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11 +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12 +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13 +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14 +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15 +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18 +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19 +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L +//RDPCSTX5_RDPCSTX_PHY_CNTL4 +#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0 +#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4 +#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6 +#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7 +#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8 +#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc +#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe +#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf +#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10 +#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14 +#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16 +#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17 +#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18 +#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c +#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e +#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f +#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L +#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L +#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L +#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L +#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L +#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L +//RDPCSTX5_RDPCSTX_PHY_CNTL5 +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x0 +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x1 +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x4 +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0x6 +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0x7 +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x8 +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x9 +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0xc +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0xe +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0xf +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x10 +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x11 +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x14 +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0x16 +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0x17 +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x18 +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x19 +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x1c +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1e +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1f +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000001L +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x0000000EL +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000030L +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000040L +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00000080L +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00000100L +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00000E00L +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x00003000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x00004000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x00008000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK 0x00010000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK 0x000E0000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00300000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00400000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00800000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK 0x01000000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK 0x0E000000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x30000000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x40000000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x80000000L +//RDPCSTX5_RDPCSTX_PHY_CNTL6 +#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x0 +#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0x2 +#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x4 +#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x6 +#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x8 +#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa +#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0xc +#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0xe +#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10 +#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11 +#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12 +#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x13 +#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x14 +#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x00000003L +#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000004L +#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x00000030L +#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x00000040L +#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x00000300L +#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L +#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x00003000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x00004000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00080000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00100000L +//RDPCSTX5_RDPCSTX_PHY_CNTL7 +#define RDPCSTX5_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0 +#define RDPCSTX5_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10 +#define RDPCSTX5_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL +#define RDPCSTX5_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L +//RDPCSTX5_RDPCSTX_PHY_CNTL8 +#define RDPCSTX5_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0 +#define RDPCSTX5_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL +//RDPCSTX5_RDPCSTX_PHY_CNTL9 +#define RDPCSTX5_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0 +#define RDPCSTX5_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18 +#define RDPCSTX5_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL +#define RDPCSTX5_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L +//RDPCSTX5_RDPCSTX_PHY_CNTL10 +#define RDPCSTX5_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0 +#define RDPCSTX5_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL +//RDPCSTX5_RDPCSTX_PHY_CNTL11 +#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4 +#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10 +#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14 +#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18 +#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L +#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L +//RDPCSTX5_RDPCSTX_PHY_CNTL12 +#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0 +#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2 +#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4 +#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7 +#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8 +#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L +#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L +#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L +#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L +#define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L +//RDPCSTX5_RDPCSTX_PHY_CNTL13 +#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14 +#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c +#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d +#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e +#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L +//RDPCSTX5_RDPCSTX_PHY_CNTL14 +#define RDPCSTX5_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0 +#define RDPCSTX5_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18 +#define RDPCSTX5_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c +#define RDPCSTX5_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L +#define RDPCSTX5_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L +#define RDPCSTX5_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L +//RDPCSTX5_RDPCSTX_PHY_FUSE0 +#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0 +#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6 +#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc +#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12 +#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14 +#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL +#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L +#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L +#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L +#define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L +//RDPCSTX5_RDPCSTX_PHY_FUSE1 +#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0 +#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6 +#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc +#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12 +#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19 +#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL +#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L +#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L +#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L +#define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L +//RDPCSTX5_RDPCSTX_PHY_FUSE2 +#define RDPCSTX5_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0 +#define RDPCSTX5_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6 +#define RDPCSTX5_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc +#define RDPCSTX5_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL +#define RDPCSTX5_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L +#define RDPCSTX5_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L +//RDPCSTX5_RDPCSTX_PHY_FUSE3 +#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0 +#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6 +#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc +#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12 +#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18 +#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL +#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L +#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L +#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L +#define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L +//RDPCSTX5_RDPCSTX_PHY_RX_LD_VAL +#define RDPCSTX5_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0 +#define RDPCSTX5_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8 +#define RDPCSTX5_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL +#define RDPCSTX5_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L +//RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3 +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT 0x0 +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT 0x1 +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT 0x2 +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT 0x3 +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT 0x4 +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT 0x5 +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT 0x8 +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT 0x9 +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT 0xa +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT 0xb +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT 0xc +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT 0xd +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT 0x10 +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT 0x11 +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT 0x12 +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT 0x13 +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT 0x14 +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT 0x15 +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT 0x18 +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT 0x19 +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT 0x1a +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT 0x1b +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT 0x1c +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT 0x1d +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK 0x00000001L +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK 0x00000002L +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK 0x00000004L +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK 0x00000008L +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK 0x00000010L +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK 0x00000020L +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK 0x00000100L +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK 0x00000200L +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK 0x00000400L +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK 0x00000800L +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK 0x00001000L +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK 0x00002000L +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK 0x00010000L +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK 0x00020000L +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK 0x00040000L +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK 0x00080000L +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK 0x00100000L +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK 0x00200000L +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK 0x01000000L +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK 0x02000000L +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK 0x04000000L +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK 0x08000000L +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK 0x10000000L +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK 0x20000000L +//RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6 +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT 0x0 +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT 0x2 +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT 0x4 +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT 0x6 +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT 0x8 +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT 0xa +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT 0xc +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT 0xe +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT 0x10 +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT 0x11 +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT 0x12 +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT 0x13 +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT 0x14 +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK 0x00000003L +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK 0x00000004L +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK 0x00000030L +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK 0x00000040L +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK 0x00000300L +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK 0x00000400L +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK 0x00003000L +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK 0x00004000L +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK 0x00010000L +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK 0x00020000L +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK 0x00040000L +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK 0x00080000L +#define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK 0x00100000L +//RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG +#define RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT 0x0 +#define RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT 0x4 +#define RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT 0x8 +#define RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK 0x00000001L +#define RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK 0x00000010L +#define RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK 0x0000FF00L + + +// addressBlock: dpcssys_dpcssys_cr5_dispdec +//DPCSSYS_CR5_DPCSSYS_CR_ADDR +#define DPCSSYS_CR5_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0 +#define DPCSSYS_CR5_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL +//DPCSSYS_CR5_DPCSSYS_CR_DATA +#define DPCSSYS_CR5_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0 +#define DPCSSYS_CR5_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_2_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_1_0_offset.h similarity index 100% rename from drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_2_1_0_offset.h rename to drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_1_0_offset.h diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_2_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_1_0_sh_mask.h similarity index 100% rename from drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_2_1_0_sh_mask.h rename to drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_1_0_sh_mask.h diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h index 2bfaaa8157d083625b2ce1221cc9bd09ef02c555..d984c916df808f84863d262a905b0a367dd22a01 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h @@ -2224,6 +2224,14 @@ #define mmCOMPUTE_STATIC_THREAD_MGMT_SE2_BASE_IDX 0 #define mmCOMPUTE_STATIC_THREAD_MGMT_SE3 0x0e1a #define mmCOMPUTE_STATIC_THREAD_MGMT_SE3_BASE_IDX 0 +#define mmCOMPUTE_STATIC_THREAD_MGMT_SE4 0x0e25 +#define mmCOMPUTE_STATIC_THREAD_MGMT_SE4_BASE_IDX 0 +#define mmCOMPUTE_STATIC_THREAD_MGMT_SE5 0x0e26 +#define mmCOMPUTE_STATIC_THREAD_MGMT_SE5_BASE_IDX 0 +#define mmCOMPUTE_STATIC_THREAD_MGMT_SE6 0x0e27 +#define mmCOMPUTE_STATIC_THREAD_MGMT_SE6_BASE_IDX 0 +#define mmCOMPUTE_STATIC_THREAD_MGMT_SE7 0x0e28 +#define mmCOMPUTE_STATIC_THREAD_MGMT_SE7_BASE_IDX 0 #define mmCOMPUTE_RESTART_X 0x0e1b #define mmCOMPUTE_RESTART_X_BASE_IDX 0 #define mmCOMPUTE_RESTART_Y 0x0e1c diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h index d4c613a8535278b7275be83cc4fee332ae77292d..ea316d8dcb37f9f95a6993a7e3258e6a76fb9cf9 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h @@ -2060,7 +2060,8 @@ // addressBlock: gc_sqdec //SQ_CONFIG -#define SQ_CONFIG__UNUSED__SHIFT 0x0 +#define SQ_CONFIG__DISABLE_BARRIER_WAITCNT__SHIFT 0x0 +#define SQ_CONFIG__UNUSED__SHIFT 0x1 #define SQ_CONFIG__OVERRIDE_ALU_BUSY__SHIFT 0x7 #define SQ_CONFIG__DEBUG_EN__SHIFT 0x8 #define SQ_CONFIG__DEBUG_SINGLE_MEMOP__SHIFT 0x9 @@ -2079,7 +2080,8 @@ #define SQ_CONFIG__DISABLE_SP_REDUNDANT_THREAD_GATING__SHIFT 0x1d #define SQ_CONFIG__DISABLE_FLAT_SOFT_CLAUSE__SHIFT 0x1e #define SQ_CONFIG__DISABLE_MIMG_SOFT_CLAUSE__SHIFT 0x1f -#define SQ_CONFIG__UNUSED_MASK 0x0000007FL +#define SQ_CONFIG__DISABLE_BARRIER_WAITCNT_MASK 0x00000001L +#define SQ_CONFIG__UNUSED_MASK 0x0000007EL #define SQ_CONFIG__OVERRIDE_ALU_BUSY_MASK 0x00000080L #define SQ_CONFIG__DEBUG_EN_MASK 0x00000100L #define SQ_CONFIG__DEBUG_SINGLE_MEMOP_MASK 0x00000200L @@ -8739,10 +8741,16 @@ #define TCP_ADDR_CONFIG__NUM_BANKS__SHIFT 0x4 #define TCP_ADDR_CONFIG__COLHI_WIDTH__SHIFT 0x6 #define TCP_ADDR_CONFIG__RB_SPLIT_COLHI__SHIFT 0x9 +#define TCP_ADDR_CONFIG__ENABLE64KHASH__SHIFT 0xb +#define TCP_ADDR_CONFIG__ENABLE2MHASH__SHIFT 0xc +#define TCP_ADDR_CONFIG__ENABLE1GHASH__SHIFT 0xd #define TCP_ADDR_CONFIG__NUM_TCC_BANKS_MASK 0x0000000FL #define TCP_ADDR_CONFIG__NUM_BANKS_MASK 0x00000030L #define TCP_ADDR_CONFIG__COLHI_WIDTH_MASK 0x000001C0L #define TCP_ADDR_CONFIG__RB_SPLIT_COLHI_MASK 0x00000200L +#define TCP_ADDR_CONFIG__ENABLE64KHASH_MASK 0x00000800L +#define TCP_ADDR_CONFIG__ENABLE2MHASH_MASK 0x00001000L +#define TCP_ADDR_CONFIG__ENABLE1GHASH_MASK 0x00002000L //TCP_CREDIT #define TCP_CREDIT__LFIFO_CREDIT__SHIFT 0x0 #define TCP_CREDIT__REQ_FIFO_CREDIT__SHIFT 0x10 diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_offset.h new file mode 100644 index 0000000000000000000000000000000000000000..f41556abfbbc8c43a76fc0246248c913f0446a23 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_offset.h @@ -0,0 +1,264 @@ +/* + * Copyright (C) 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _gc_9_4_1_OFFSET_HEADER +#define _gc_9_4_1_OFFSET_HEADER + +// addressBlock: gc_grbmdec +// base address: 0x8000 +#define mmGRBM_CNTL 0x0000 +#define mmGRBM_CNTL_BASE_IDX 0 +#define mmGRBM_SKEW_CNTL 0x0001 +#define mmGRBM_SKEW_CNTL_BASE_IDX 0 +#define mmGRBM_STATUS2 0x0002 +#define mmGRBM_STATUS2_BASE_IDX 0 +#define mmGRBM_PWR_CNTL 0x0003 +#define mmGRBM_PWR_CNTL_BASE_IDX 0 +#define mmGRBM_STATUS 0x0004 +#define mmGRBM_STATUS_BASE_IDX 0 +#define mmGRBM_STATUS_SE0 0x0005 +#define mmGRBM_STATUS_SE0_BASE_IDX 0 +#define mmGRBM_STATUS_SE1 0x0006 +#define mmGRBM_STATUS_SE1_BASE_IDX 0 +#define mmGRBM_SOFT_RESET 0x0008 +#define mmGRBM_SOFT_RESET_BASE_IDX 0 +#define mmGRBM_GFX_CLKEN_CNTL 0x000c +#define mmGRBM_GFX_CLKEN_CNTL_BASE_IDX 0 +#define mmGRBM_WAIT_IDLE_CLOCKS 0x000d +#define mmGRBM_WAIT_IDLE_CLOCKS_BASE_IDX 0 +#define mmGRBM_STATUS_SE2 0x000e +#define mmGRBM_STATUS_SE2_BASE_IDX 0 +#define mmGRBM_STATUS_SE3 0x000f +#define mmGRBM_STATUS_SE3_BASE_IDX 0 +#define mmGRBM_READ_ERROR 0x0016 +#define mmGRBM_READ_ERROR_BASE_IDX 0 +#define mmGRBM_READ_ERROR2 0x0017 +#define mmGRBM_READ_ERROR2_BASE_IDX 0 +#define mmGRBM_INT_CNTL 0x0018 +#define mmGRBM_INT_CNTL_BASE_IDX 0 +#define mmGRBM_TRAP_OP 0x0019 +#define mmGRBM_TRAP_OP_BASE_IDX 0 +#define mmGRBM_TRAP_ADDR 0x001a +#define mmGRBM_TRAP_ADDR_BASE_IDX 0 +#define mmGRBM_TRAP_ADDR_MSK 0x001b +#define mmGRBM_TRAP_ADDR_MSK_BASE_IDX 0 +#define mmGRBM_TRAP_WD 0x001c +#define mmGRBM_TRAP_WD_BASE_IDX 0 +#define mmGRBM_TRAP_WD_MSK 0x001d +#define mmGRBM_TRAP_WD_MSK_BASE_IDX 0 +#define mmGRBM_DSM_BYPASS 0x001e +#define mmGRBM_DSM_BYPASS_BASE_IDX 0 +#define mmGRBM_WRITE_ERROR 0x001f +#define mmGRBM_WRITE_ERROR_BASE_IDX 0 +#define mmGRBM_IOV_ERROR 0x0020 +#define mmGRBM_IOV_ERROR_BASE_IDX 0 +#define mmGRBM_CHIP_REVISION 0x0021 +#define mmGRBM_CHIP_REVISION_BASE_IDX 0 +#define mmGRBM_GFX_CNTL 0x0022 +#define mmGRBM_GFX_CNTL_BASE_IDX 0 +#define mmGRBM_RSMU_CFG 0x0023 +#define mmGRBM_RSMU_CFG_BASE_IDX 0 +#define mmGRBM_IH_CREDIT 0x0024 +#define mmGRBM_IH_CREDIT_BASE_IDX 0 +#define mmGRBM_PWR_CNTL2 0x0025 +#define mmGRBM_PWR_CNTL2_BASE_IDX 0 +#define mmGRBM_UTCL2_INVAL_RANGE_START 0x0026 +#define mmGRBM_UTCL2_INVAL_RANGE_START_BASE_IDX 0 +#define mmGRBM_UTCL2_INVAL_RANGE_END 0x0027 +#define mmGRBM_UTCL2_INVAL_RANGE_END_BASE_IDX 0 +#define mmGRBM_RSMU_READ_ERROR 0x0028 +#define mmGRBM_RSMU_READ_ERROR_BASE_IDX 0 +#define mmGRBM_CHICKEN_BITS 0x0029 +#define mmGRBM_CHICKEN_BITS_BASE_IDX 0 +#define mmGRBM_FENCE_RANGE0 0x002a +#define mmGRBM_FENCE_RANGE0_BASE_IDX 0 +#define mmGRBM_FENCE_RANGE1 0x002b +#define mmGRBM_FENCE_RANGE1_BASE_IDX 0 +#define mmGRBM_NOWHERE 0x003f +#define mmGRBM_NOWHERE_BASE_IDX 0 +#define mmGRBM_SCRATCH_REG0 0x0040 +#define mmGRBM_SCRATCH_REG0_BASE_IDX 0 +#define mmGRBM_SCRATCH_REG1 0x0041 +#define mmGRBM_SCRATCH_REG1_BASE_IDX 0 +#define mmGRBM_SCRATCH_REG2 0x0042 +#define mmGRBM_SCRATCH_REG2_BASE_IDX 0 +#define mmGRBM_SCRATCH_REG3 0x0043 +#define mmGRBM_SCRATCH_REG3_BASE_IDX 0 +#define mmGRBM_SCRATCH_REG4 0x0044 +#define mmGRBM_SCRATCH_REG4_BASE_IDX 0 +#define mmGRBM_SCRATCH_REG5 0x0045 +#define mmGRBM_SCRATCH_REG5_BASE_IDX 0 +#define mmGRBM_SCRATCH_REG6 0x0046 +#define mmGRBM_SCRATCH_REG6_BASE_IDX 0 +#define mmGRBM_SCRATCH_REG7 0x0047 +#define mmGRBM_SCRATCH_REG7_BASE_IDX 0 + +// addressBlock: gc_cppdec2 +// base address: 0xc600 +#define mmCPF_EDC_TAG_CNT 0x1189 +#define mmCPF_EDC_TAG_CNT_BASE_IDX 0 +#define mmCPF_EDC_ROQ_CNT 0x118a +#define mmCPF_EDC_ROQ_CNT_BASE_IDX 0 +#define mmCPG_EDC_TAG_CNT 0x118b +#define mmCPG_EDC_TAG_CNT_BASE_IDX 0 +#define mmCPG_EDC_DMA_CNT 0x118d +#define mmCPG_EDC_DMA_CNT_BASE_IDX 0 +#define mmCPC_EDC_SCRATCH_CNT 0x118e +#define mmCPC_EDC_SCRATCH_CNT_BASE_IDX 0 +#define mmCPC_EDC_UCODE_CNT 0x118f +#define mmCPC_EDC_UCODE_CNT_BASE_IDX 0 +#define mmDC_EDC_STATE_CNT 0x1191 +#define mmDC_EDC_STATE_CNT_BASE_IDX 0 +#define mmDC_EDC_CSINVOC_CNT 0x1192 +#define mmDC_EDC_CSINVOC_CNT_BASE_IDX 0 +#define mmDC_EDC_RESTORE_CNT 0x1193 +#define mmDC_EDC_RESTORE_CNT_BASE_IDX 0 + +// addressBlock: gc_gdsdec +// base address: 0x9700 +#define mmGDS_EDC_CNT 0x05c5 +#define mmGDS_EDC_CNT_BASE_IDX 0 +#define mmGDS_EDC_GRBM_CNT 0x05c6 +#define mmGDS_EDC_GRBM_CNT_BASE_IDX 0 +#define mmGDS_EDC_OA_DED 0x05c7 +#define mmGDS_EDC_OA_DED_BASE_IDX 0 +#define mmGDS_EDC_OA_PHY_CNT 0x05cb +#define mmGDS_EDC_OA_PHY_CNT_BASE_IDX 0 +#define mmGDS_EDC_OA_PIPE_CNT 0x05cc +#define mmGDS_EDC_OA_PIPE_CNT_BASE_IDX 0 + +// addressBlock: gc_shsdec +// base address: 0x9000 +#define mmSPI_EDC_CNT 0x0445 +#define mmSPI_EDC_CNT_BASE_IDX 0 + +// addressBlock: gc_sqdec +// base address: 0x8c00 +#define mmSQC_EDC_CNT2 0x032c +#define mmSQC_EDC_CNT2_BASE_IDX 0 +#define mmSQC_EDC_CNT3 0x032d +#define mmSQC_EDC_CNT3_BASE_IDX 0 +#define mmSQC_EDC_PARITY_CNT3 0x032e +#define mmSQC_EDC_PARITY_CNT3_BASE_IDX 0 +#define mmSQC_EDC_CNT 0x03a2 +#define mmSQC_EDC_CNT_BASE_IDX 0 +#define mmSQ_EDC_SEC_CNT 0x03a3 +#define mmSQ_EDC_SEC_CNT_BASE_IDX 0 +#define mmSQ_EDC_DED_CNT 0x03a4 +#define mmSQ_EDC_DED_CNT_BASE_IDX 0 +#define mmSQ_EDC_INFO 0x03a5 +#define mmSQ_EDC_INFO_BASE_IDX 0 +#define mmSQ_EDC_CNT 0x03a6 +#define mmSQ_EDC_CNT_BASE_IDX 0 + +// addressBlock: gc_tpdec +// base address: 0x9400 +#define mmTA_EDC_CNT 0x0586 +#define mmTA_EDC_CNT_BASE_IDX 0 + +// addressBlock: gc_tcdec +// base address: 0xac00 +#define mmTCP_EDC_CNT 0x0b17 +#define mmTCP_EDC_CNT_BASE_IDX 0 +#define mmTCP_EDC_CNT_NEW 0x0b18 +#define mmTCP_EDC_CNT_NEW_BASE_IDX 0 +#define mmTCP_ATC_EDC_GATCL1_CNT 0x12b1 +#define mmTCP_ATC_EDC_GATCL1_CNT_BASE_IDX 0 +#define mmTCI_EDC_CNT 0x0b60 +#define mmTCI_EDC_CNT_BASE_IDX 0 +#define mmTCC_EDC_CNT 0x0b82 +#define mmTCC_EDC_CNT_BASE_IDX 0 +#define mmTCC_EDC_CNT2 0x0b83 +#define mmTCC_EDC_CNT2_BASE_IDX 0 +#define mmTCA_EDC_CNT 0x0bc5 +#define mmTCA_EDC_CNT_BASE_IDX 0 + +// addressBlock: gc_tpdec +// base address: 0x9400 +#define mmTD_EDC_CNT 0x052e +#define mmTD_EDC_CNT_BASE_IDX 0 +#define mmTA_EDC_CNT 0x0586 +#define mmTA_EDC_CNT_BASE_IDX 0 + +// addressBlock: gc_ea_gceadec2 +// base address: 0x9c00 +#define mmGCEA_EDC_CNT 0x0706 +#define mmGCEA_EDC_CNT_BASE_IDX 0 +#define mmGCEA_EDC_CNT2 0x0707 +#define mmGCEA_EDC_CNT2_BASE_IDX 0 +#define mmGCEA_EDC_CNT3 0x071b +#define mmGCEA_EDC_CNT3_BASE_IDX 0 + +// addressBlock: gc_gfxudec +// base address: 0x30000 +#define mmSCRATCH_REG0 0x2040 +#define mmSCRATCH_REG0_BASE_IDX 1 +#define mmSCRATCH_REG1 0x2041 +#define mmSCRATCH_REG1_BASE_IDX 1 +#define mmSCRATCH_REG2 0x2042 +#define mmSCRATCH_REG2_BASE_IDX 1 +#define mmSCRATCH_REG3 0x2043 +#define mmSCRATCH_REG3_BASE_IDX 1 +#define mmSCRATCH_REG4 0x2044 +#define mmSCRATCH_REG4_BASE_IDX 1 +#define mmSCRATCH_REG5 0x2045 +#define mmSCRATCH_REG5_BASE_IDX 1 +#define mmSCRATCH_REG6 0x2046 +#define mmSCRATCH_REG6_BASE_IDX 1 +#define mmSCRATCH_REG7 0x2047 +#define mmSCRATCH_REG7_BASE_IDX 1 +#define mmGRBM_GFX_INDEX 0x2200 +#define mmGRBM_GFX_INDEX_BASE_IDX 1 + +// addressBlock: gc_utcl2_atcl2dec +// base address: 0xa000 +#define mmATC_L2_CACHE_4K_DSM_INDEX 0x080e +#define mmATC_L2_CACHE_4K_DSM_INDEX_BASE_IDX 0 +#define mmATC_L2_CACHE_2M_DSM_INDEX 0x080f +#define mmATC_L2_CACHE_2M_DSM_INDEX_BASE_IDX 0 +#define mmATC_L2_CACHE_4K_DSM_CNTL 0x0810 +#define mmATC_L2_CACHE_4K_DSM_CNTL_BASE_IDX 0 +#define mmATC_L2_CACHE_2M_DSM_CNTL 0x0811 +#define mmATC_L2_CACHE_2M_DSM_CNTL_BASE_IDX 0 + +// addressBlock: gc_utcl2_vml2pfdec +// base address: 0xa100 +#define mmVML2_MEM_ECC_INDEX 0x0860 +#define mmVML2_MEM_ECC_INDEX_BASE_IDX 0 +#define mmVML2_WALKER_MEM_ECC_INDEX 0x0861 +#define mmVML2_WALKER_MEM_ECC_INDEX_BASE_IDX 0 +#define mmUTCL2_MEM_ECC_INDEX 0x0862 +#define mmUTCL2_MEM_ECC_INDEX_BASE_IDX 0 + +#define mmVML2_MEM_ECC_CNTL 0x0863 +#define mmVML2_MEM_ECC_CNTL_BASE_IDX 0 +#define mmVML2_WALKER_MEM_ECC_CNTL 0x0864 +#define mmVML2_WALKER_MEM_ECC_CNTL_BASE_IDX 0 +#define mmUTCL2_MEM_ECC_CNTL 0x0865 +#define mmUTCL2_MEM_ECC_CNTL_BASE_IDX 0 + +// addressBlock: gc_rlcpdec +// base address: 0x3b000 +#define mmRLC_EDC_CNT 0x4d40 +#define mmRLC_EDC_CNT_BASE_IDX 1 +#define mmRLC_EDC_CNT2 0x4d41 +#define mmRLC_EDC_CNT2_BASE_IDX 1 + +#endif \ No newline at end of file diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h new file mode 100644 index 0000000000000000000000000000000000000000..f26246a600c65f8b38e3f30d6c1e4b3a1ea23e28 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h @@ -0,0 +1,748 @@ +/* + * Copyright (C) 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _gc_9_4_1_SH_MASK_HEADER +#define _gc_9_4_1_SH_MASK_HEADER + +// addressBlock: gc_cppdec2 +//CPF_EDC_TAG_CNT +#define CPF_EDC_TAG_CNT__DED_COUNT__SHIFT 0x0 +#define CPF_EDC_TAG_CNT__SEC_COUNT__SHIFT 0x2 +#define CPF_EDC_TAG_CNT__DED_COUNT_MASK 0x00000003L +#define CPF_EDC_TAG_CNT__SEC_COUNT_MASK 0x0000000CL +//CPF_EDC_ROQ_CNT +#define CPF_EDC_ROQ_CNT__DED_COUNT_ME1__SHIFT 0x0 +#define CPF_EDC_ROQ_CNT__SEC_COUNT_ME1__SHIFT 0x2 +#define CPF_EDC_ROQ_CNT__DED_COUNT_ME2__SHIFT 0x4 +#define CPF_EDC_ROQ_CNT__SEC_COUNT_ME2__SHIFT 0x6 +#define CPF_EDC_ROQ_CNT__DED_COUNT_ME1_MASK 0x00000003L +#define CPF_EDC_ROQ_CNT__SEC_COUNT_ME1_MASK 0x0000000CL +#define CPF_EDC_ROQ_CNT__DED_COUNT_ME2_MASK 0x00000030L +#define CPF_EDC_ROQ_CNT__SEC_COUNT_ME2_MASK 0x000000C0L +//CPG_EDC_TAG_CNT +#define CPG_EDC_TAG_CNT__DED_COUNT__SHIFT 0x0 +#define CPG_EDC_TAG_CNT__SEC_COUNT__SHIFT 0x2 +#define CPG_EDC_TAG_CNT__DED_COUNT_MASK 0x00000003L +#define CPG_EDC_TAG_CNT__SEC_COUNT_MASK 0x0000000CL +//CPG_EDC_DMA_CNT +#define CPG_EDC_DMA_CNT__ROQ_DED_COUNT__SHIFT 0x0 +#define CPG_EDC_DMA_CNT__ROQ_SEC_COUNT__SHIFT 0x2 +#define CPG_EDC_DMA_CNT__TAG_DED_COUNT__SHIFT 0x4 +#define CPG_EDC_DMA_CNT__TAG_SEC_COUNT__SHIFT 0x6 +#define CPG_EDC_DMA_CNT__ROQ_DED_COUNT_MASK 0x00000003L +#define CPG_EDC_DMA_CNT__ROQ_SEC_COUNT_MASK 0x0000000CL +#define CPG_EDC_DMA_CNT__TAG_DED_COUNT_MASK 0x00000030L +#define CPG_EDC_DMA_CNT__TAG_SEC_COUNT_MASK 0x000000C0L +//CPC_EDC_SCRATCH_CNT +#define CPC_EDC_SCRATCH_CNT__DED_COUNT__SHIFT 0x0 +#define CPC_EDC_SCRATCH_CNT__SEC_COUNT__SHIFT 0x2 +#define CPC_EDC_SCRATCH_CNT__DED_COUNT_MASK 0x00000003L +#define CPC_EDC_SCRATCH_CNT__SEC_COUNT_MASK 0x0000000CL +//CPC_EDC_UCODE_CNT +#define CPC_EDC_UCODE_CNT__DED_COUNT__SHIFT 0x0 +#define CPC_EDC_UCODE_CNT__SEC_COUNT__SHIFT 0x2 +#define CPC_EDC_UCODE_CNT__DED_COUNT_MASK 0x00000003L +#define CPC_EDC_UCODE_CNT__SEC_COUNT_MASK 0x0000000CL +//DC_EDC_STATE_CNT +#define DC_EDC_STATE_CNT__DED_COUNT_ME1__SHIFT 0x0 +#define DC_EDC_STATE_CNT__SEC_COUNT_ME1__SHIFT 0x2 +#define DC_EDC_STATE_CNT__DED_COUNT_ME1_MASK 0x00000003L +#define DC_EDC_STATE_CNT__SEC_COUNT_ME1_MASK 0x0000000CL +//DC_EDC_CSINVOC_CNT +#define DC_EDC_CSINVOC_CNT__DED_COUNT_ME1__SHIFT 0x0 +#define DC_EDC_CSINVOC_CNT__SEC_COUNT_ME1__SHIFT 0x2 +#define DC_EDC_CSINVOC_CNT__DED_COUNT1_ME1__SHIFT 0x4 +#define DC_EDC_CSINVOC_CNT__SEC_COUNT1_ME1__SHIFT 0x6 +#define DC_EDC_CSINVOC_CNT__DED_COUNT_ME1_MASK 0x00000003L +#define DC_EDC_CSINVOC_CNT__SEC_COUNT_ME1_MASK 0x0000000CL +#define DC_EDC_CSINVOC_CNT__DED_COUNT1_ME1_MASK 0x00000030L +#define DC_EDC_CSINVOC_CNT__SEC_COUNT1_ME1_MASK 0x000000C0L +//DC_EDC_RESTORE_CNT +#define DC_EDC_RESTORE_CNT__DED_COUNT_ME1__SHIFT 0x0 +#define DC_EDC_RESTORE_CNT__SEC_COUNT_ME1__SHIFT 0x2 +#define DC_EDC_RESTORE_CNT__DED_COUNT1_ME1__SHIFT 0x4 +#define DC_EDC_RESTORE_CNT__SEC_COUNT1_ME1__SHIFT 0x6 +#define DC_EDC_RESTORE_CNT__DED_COUNT_ME1_MASK 0x00000003L +#define DC_EDC_RESTORE_CNT__SEC_COUNT_ME1_MASK 0x0000000CL +#define DC_EDC_RESTORE_CNT__DED_COUNT1_ME1_MASK 0x00000030L +#define DC_EDC_RESTORE_CNT__SEC_COUNT1_ME1_MASK 0x000000C0L + +// addressBlock: gc_gdsdec +//GDS_EDC_CNT +#define GDS_EDC_CNT__GDS_MEM_DED__SHIFT 0x0 +#define GDS_EDC_CNT__GDS_MEM_SEC__SHIFT 0x4 +#define GDS_EDC_CNT__UNUSED__SHIFT 0x6 +#define GDS_EDC_CNT__GDS_MEM_DED_MASK 0x00000003L +#define GDS_EDC_CNT__GDS_MEM_SEC_MASK 0x00000030L +#define GDS_EDC_CNT__UNUSED_MASK 0xFFFFFFC0L +//GDS_EDC_GRBM_CNT +#define GDS_EDC_GRBM_CNT__DED__SHIFT 0x0 +#define GDS_EDC_GRBM_CNT__SEC__SHIFT 0x2 +#define GDS_EDC_GRBM_CNT__UNUSED__SHIFT 0x4 +#define GDS_EDC_GRBM_CNT__DED_MASK 0x00000003L +#define GDS_EDC_GRBM_CNT__SEC_MASK 0x0000000CL +#define GDS_EDC_GRBM_CNT__UNUSED_MASK 0xFFFFFFF0L +//GDS_EDC_OA_DED +#define GDS_EDC_OA_DED__ME0_GFXHP3D_PIX_DED__SHIFT 0x0 +#define GDS_EDC_OA_DED__ME0_GFXHP3D_VTX_DED__SHIFT 0x1 +#define GDS_EDC_OA_DED__ME0_CS_DED__SHIFT 0x2 +#define GDS_EDC_OA_DED__ME0_GFXHP3D_GS_DED__SHIFT 0x3 +#define GDS_EDC_OA_DED__ME1_PIPE0_DED__SHIFT 0x4 +#define GDS_EDC_OA_DED__ME1_PIPE1_DED__SHIFT 0x5 +#define GDS_EDC_OA_DED__ME1_PIPE2_DED__SHIFT 0x6 +#define GDS_EDC_OA_DED__ME1_PIPE3_DED__SHIFT 0x7 +#define GDS_EDC_OA_DED__ME2_PIPE0_DED__SHIFT 0x8 +#define GDS_EDC_OA_DED__ME2_PIPE1_DED__SHIFT 0x9 +#define GDS_EDC_OA_DED__ME2_PIPE2_DED__SHIFT 0xa +#define GDS_EDC_OA_DED__ME2_PIPE3_DED__SHIFT 0xb +#define GDS_EDC_OA_DED__UNUSED1__SHIFT 0xc +#define GDS_EDC_OA_DED__ME0_GFXHP3D_PIX_DED_MASK 0x00000001L +#define GDS_EDC_OA_DED__ME0_GFXHP3D_VTX_DED_MASK 0x00000002L +#define GDS_EDC_OA_DED__ME0_CS_DED_MASK 0x00000004L +#define GDS_EDC_OA_DED__ME0_GFXHP3D_GS_DED_MASK 0x00000008L +#define GDS_EDC_OA_DED__ME1_PIPE0_DED_MASK 0x00000010L +#define GDS_EDC_OA_DED__ME1_PIPE1_DED_MASK 0x00000020L +#define GDS_EDC_OA_DED__ME1_PIPE2_DED_MASK 0x00000040L +#define GDS_EDC_OA_DED__ME1_PIPE3_DED_MASK 0x00000080L +#define GDS_EDC_OA_DED__ME2_PIPE0_DED_MASK 0x00000100L +#define GDS_EDC_OA_DED__ME2_PIPE1_DED_MASK 0x00000200L +#define GDS_EDC_OA_DED__ME2_PIPE2_DED_MASK 0x00000400L +#define GDS_EDC_OA_DED__ME2_PIPE3_DED_MASK 0x00000800L +#define GDS_EDC_OA_DED__UNUSED1_MASK 0xFFFFF000L +//GDS_EDC_OA_PHY_CNT +#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_SEC__SHIFT 0x0 +#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_DED__SHIFT 0x2 +#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_SEC__SHIFT 0x4 +#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_DED__SHIFT 0x6 +#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_SEC__SHIFT 0x8 +#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_DED__SHIFT 0xa +#define GDS_EDC_OA_PHY_CNT__UNUSED1__SHIFT 0xc +#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_SEC_MASK 0x00000003L +#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_DED_MASK 0x0000000CL +#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_SEC_MASK 0x00000030L +#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_DED_MASK 0x000000C0L +#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_SEC_MASK 0x00000300L +#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_DED_MASK 0x00000C00L +#define GDS_EDC_OA_PHY_CNT__UNUSED1_MASK 0xFFFFF000L +//GDS_EDC_OA_PIPE_CNT +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_SEC__SHIFT 0x0 +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_DED__SHIFT 0x2 +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_SEC__SHIFT 0x4 +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_DED__SHIFT 0x6 +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_SEC__SHIFT 0x8 +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_DED__SHIFT 0xa +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_SEC__SHIFT 0xc +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_DED__SHIFT 0xe +#define GDS_EDC_OA_PIPE_CNT__UNUSED__SHIFT 0x10 +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_SEC_MASK 0x00000003L +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_DED_MASK 0x0000000CL +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_SEC_MASK 0x00000030L +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_DED_MASK 0x000000C0L +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_SEC_MASK 0x00000300L +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_DED_MASK 0x00000C00L +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_SEC_MASK 0x00003000L +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_DED_MASK 0x0000C000L +#define GDS_EDC_OA_PIPE_CNT__UNUSED_MASK 0xFFFF0000L + +// addressBlock: gc_shsdec +//SPI_EDC_CNT +#define SPI_EDC_CNT__SPI_SR_MEM_SEC_COUNT__SHIFT 0x0 +#define SPI_EDC_CNT__SPI_SR_MEM_DED_COUNT__SHIFT 0x2 +#define SPI_EDC_CNT__SPI_GDS_EXPREQ_SEC_COUNT__SHIFT 0x4 +#define SPI_EDC_CNT__SPI_GDS_EXPREQ_DED_COUNT__SHIFT 0x6 +#define SPI_EDC_CNT__SPI_WB_GRANT_30_SEC_COUNT__SHIFT 0x8 +#define SPI_EDC_CNT__SPI_WB_GRANT_30_DED_COUNT__SHIFT 0xa +#define SPI_EDC_CNT__SPI_WB_GRANT_61_SEC_COUNT__SHIFT 0xc +#define SPI_EDC_CNT__SPI_WB_GRANT_61_DED_COUNT__SHIFT 0xe +#define SPI_EDC_CNT__SPI_LIFE_CNT_SEC_COUNT__SHIFT 0x10 +#define SPI_EDC_CNT__SPI_LIFE_CNT_DED_COUNT__SHIFT 0x12 +#define SPI_EDC_CNT__SPI_SR_MEM_SEC_COUNT_MASK 0x00000003L +#define SPI_EDC_CNT__SPI_SR_MEM_DED_COUNT_MASK 0x0000000CL +#define SPI_EDC_CNT__SPI_GDS_EXPREQ_SEC_COUNT_MASK 0x00000030L +#define SPI_EDC_CNT__SPI_GDS_EXPREQ_DED_COUNT_MASK 0x000000C0L +#define SPI_EDC_CNT__SPI_WB_GRANT_30_SEC_COUNT_MASK 0x00000300L +#define SPI_EDC_CNT__SPI_WB_GRANT_30_DED_COUNT_MASK 0x00000C00L +#define SPI_EDC_CNT__SPI_WB_GRANT_61_SEC_COUNT_MASK 0x00003000L +#define SPI_EDC_CNT__SPI_WB_GRANT_61_DED_COUNT_MASK 0x0000C000L +#define SPI_EDC_CNT__SPI_LIFE_CNT_SEC_COUNT_MASK 0x00030000L +#define SPI_EDC_CNT__SPI_LIFE_CNT_DED_COUNT_MASK 0x000C0000L + +// addressBlock: gc_sqdec +//SQC_EDC_CNT2 +#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_SEC_COUNT__SHIFT 0x0 +#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_DED_COUNT__SHIFT 0x2 +#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_SEC_COUNT__SHIFT 0x4 +#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_DED_COUNT__SHIFT 0x6 +#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_SEC_COUNT__SHIFT 0x8 +#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_DED_COUNT__SHIFT 0xa +#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_SEC_COUNT__SHIFT 0xc +#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_DED_COUNT__SHIFT 0xe +#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_SEC_COUNT__SHIFT 0x10 +#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_DED_COUNT__SHIFT 0x12 +#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_SEC_COUNT_MASK 0x00000003L +#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_DED_COUNT_MASK 0x0000000CL +#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_SEC_COUNT_MASK 0x00000030L +#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_DED_COUNT_MASK 0x000000C0L +#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_SEC_COUNT_MASK 0x00000300L +#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_DED_COUNT_MASK 0x00000C00L +#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_SEC_COUNT_MASK 0x00003000L +#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_DED_COUNT_MASK 0x0000C000L +#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_SEC_COUNT_MASK 0x00030000L +#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_DED_COUNT_MASK 0x000C0000L +//SQC_EDC_CNT3 +#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_SEC_COUNT__SHIFT 0x0 +#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_DED_COUNT__SHIFT 0x2 +#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_SEC_COUNT__SHIFT 0x4 +#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_DED_COUNT__SHIFT 0x6 +#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_SEC_COUNT__SHIFT 0x8 +#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_DED_COUNT__SHIFT 0xa +#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_SEC_COUNT__SHIFT 0xc +#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_DED_COUNT__SHIFT 0xe +#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_SEC_COUNT_MASK 0x00000003L +#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_DED_COUNT_MASK 0x0000000CL +#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_SEC_COUNT_MASK 0x00000030L +#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_DED_COUNT_MASK 0x000000C0L +#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_SEC_COUNT_MASK 0x00000300L +#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_DED_COUNT_MASK 0x00000C00L +#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_SEC_COUNT_MASK 0x00003000L +#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_DED_COUNT_MASK 0x0000C000L +//SQC_EDC_PARITY_CNT3 +#define SQC_EDC_PARITY_CNT3__INST_BANKA_UTCL1_MISS_FIFO_SEC_COUNT__SHIFT 0x0 +#define SQC_EDC_PARITY_CNT3__INST_BANKA_UTCL1_MISS_FIFO_DED_COUNT__SHIFT 0x2 +#define SQC_EDC_PARITY_CNT3__INST_BANKA_MISS_FIFO_SEC_COUNT__SHIFT 0x4 +#define SQC_EDC_PARITY_CNT3__INST_BANKA_MISS_FIFO_DED_COUNT__SHIFT 0x6 +#define SQC_EDC_PARITY_CNT3__DATA_BANKA_HIT_FIFO_SEC_COUNT__SHIFT 0x8 +#define SQC_EDC_PARITY_CNT3__DATA_BANKA_HIT_FIFO_DED_COUNT__SHIFT 0xa +#define SQC_EDC_PARITY_CNT3__DATA_BANKA_MISS_FIFO_SEC_COUNT__SHIFT 0xc +#define SQC_EDC_PARITY_CNT3__DATA_BANKA_MISS_FIFO_DED_COUNT__SHIFT 0xe +#define SQC_EDC_PARITY_CNT3__INST_BANKB_UTCL1_MISS_FIFO_SEC_COUNT__SHIFT 0x10 +#define SQC_EDC_PARITY_CNT3__INST_BANKB_UTCL1_MISS_FIFO_DED_COUNT__SHIFT 0x12 +#define SQC_EDC_PARITY_CNT3__INST_BANKB_MISS_FIFO_SEC_COUNT__SHIFT 0x14 +#define SQC_EDC_PARITY_CNT3__INST_BANKB_MISS_FIFO_DED_COUNT__SHIFT 0x16 +#define SQC_EDC_PARITY_CNT3__DATA_BANKB_HIT_FIFO_SEC_COUNT__SHIFT 0x18 +#define SQC_EDC_PARITY_CNT3__DATA_BANKB_HIT_FIFO_DED_COUNT__SHIFT 0x1a +#define SQC_EDC_PARITY_CNT3__DATA_BANKB_MISS_FIFO_SEC_COUNT__SHIFT 0x1c +#define SQC_EDC_PARITY_CNT3__DATA_BANKB_MISS_FIFO_DED_COUNT__SHIFT 0x1e +#define SQC_EDC_PARITY_CNT3__INST_BANKA_UTCL1_MISS_FIFO_SEC_COUNT_MASK 0x00000003L +#define SQC_EDC_PARITY_CNT3__INST_BANKA_UTCL1_MISS_FIFO_DED_COUNT_MASK 0x0000000CL +#define SQC_EDC_PARITY_CNT3__INST_BANKA_MISS_FIFO_SEC_COUNT_MASK 0x00000030L +#define SQC_EDC_PARITY_CNT3__INST_BANKA_MISS_FIFO_DED_COUNT_MASK 0x000000C0L +#define SQC_EDC_PARITY_CNT3__DATA_BANKA_HIT_FIFO_SEC_COUNT_MASK 0x00000300L +#define SQC_EDC_PARITY_CNT3__DATA_BANKA_HIT_FIFO_DED_COUNT_MASK 0x00000C00L +#define SQC_EDC_PARITY_CNT3__DATA_BANKA_MISS_FIFO_SEC_COUNT_MASK 0x00003000L +#define SQC_EDC_PARITY_CNT3__DATA_BANKA_MISS_FIFO_DED_COUNT_MASK 0x0000C000L +#define SQC_EDC_PARITY_CNT3__INST_BANKB_UTCL1_MISS_FIFO_SEC_COUNT_MASK 0x00030000L +#define SQC_EDC_PARITY_CNT3__INST_BANKB_UTCL1_MISS_FIFO_DED_COUNT_MASK 0x000C0000L +#define SQC_EDC_PARITY_CNT3__INST_BANKB_MISS_FIFO_SEC_COUNT_MASK 0x00300000L +#define SQC_EDC_PARITY_CNT3__INST_BANKB_MISS_FIFO_DED_COUNT_MASK 0x00C00000L +#define SQC_EDC_PARITY_CNT3__DATA_BANKB_HIT_FIFO_SEC_COUNT_MASK 0x03000000L +#define SQC_EDC_PARITY_CNT3__DATA_BANKB_HIT_FIFO_DED_COUNT_MASK 0x0C000000L +#define SQC_EDC_PARITY_CNT3__DATA_BANKB_MISS_FIFO_SEC_COUNT_MASK 0x30000000L +#define SQC_EDC_PARITY_CNT3__DATA_BANKB_MISS_FIFO_DED_COUNT_MASK 0xC0000000L +//SQC_EDC_CNT +#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_SEC_COUNT__SHIFT 0x0 +#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_DED_COUNT__SHIFT 0x2 +#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_SEC_COUNT__SHIFT 0x4 +#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_DED_COUNT__SHIFT 0x6 +#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_SEC_COUNT__SHIFT 0x8 +#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_DED_COUNT__SHIFT 0xa +#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_SEC_COUNT__SHIFT 0xc +#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_DED_COUNT__SHIFT 0xe +#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_SEC_COUNT__SHIFT 0x10 +#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_DED_COUNT__SHIFT 0x12 +#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_SEC_COUNT__SHIFT 0x14 +#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_DED_COUNT__SHIFT 0x16 +#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_SEC_COUNT__SHIFT 0x18 +#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_DED_COUNT__SHIFT 0x1a +#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_SEC_COUNT__SHIFT 0x1c +#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_DED_COUNT__SHIFT 0x1e +#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_SEC_COUNT_MASK 0x00000003L +#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_DED_COUNT_MASK 0x0000000CL +#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_SEC_COUNT_MASK 0x00000030L +#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_DED_COUNT_MASK 0x000000C0L +#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_SEC_COUNT_MASK 0x00000300L +#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_DED_COUNT_MASK 0x00000C00L +#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_SEC_COUNT_MASK 0x00003000L +#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_DED_COUNT_MASK 0x0000C000L +#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_SEC_COUNT_MASK 0x00030000L +#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_DED_COUNT_MASK 0x000C0000L +#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_SEC_COUNT_MASK 0x00300000L +#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_DED_COUNT_MASK 0x00C00000L +#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_SEC_COUNT_MASK 0x03000000L +#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_DED_COUNT_MASK 0x0C000000L +#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_SEC_COUNT_MASK 0x30000000L +#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_DED_COUNT_MASK 0xC0000000L +//SQ_EDC_SEC_CNT +#define SQ_EDC_SEC_CNT__LDS_SEC__SHIFT 0x0 +#define SQ_EDC_SEC_CNT__SGPR_SEC__SHIFT 0x8 +#define SQ_EDC_SEC_CNT__VGPR_SEC__SHIFT 0x10 +#define SQ_EDC_SEC_CNT__LDS_SEC_MASK 0x000000FFL +#define SQ_EDC_SEC_CNT__SGPR_SEC_MASK 0x0000FF00L +#define SQ_EDC_SEC_CNT__VGPR_SEC_MASK 0x00FF0000L +//SQ_EDC_DED_CNT +#define SQ_EDC_DED_CNT__LDS_DED__SHIFT 0x0 +#define SQ_EDC_DED_CNT__SGPR_DED__SHIFT 0x8 +#define SQ_EDC_DED_CNT__VGPR_DED__SHIFT 0x10 +#define SQ_EDC_DED_CNT__LDS_DED_MASK 0x000000FFL +#define SQ_EDC_DED_CNT__SGPR_DED_MASK 0x0000FF00L +#define SQ_EDC_DED_CNT__VGPR_DED_MASK 0x00FF0000L +//SQ_EDC_INFO +#define SQ_EDC_INFO__WAVE_ID__SHIFT 0x0 +#define SQ_EDC_INFO__SIMD_ID__SHIFT 0x4 +#define SQ_EDC_INFO__SOURCE__SHIFT 0x6 +#define SQ_EDC_INFO__VM_ID__SHIFT 0x9 +#define SQ_EDC_INFO__WAVE_ID_MASK 0x0000000FL +#define SQ_EDC_INFO__SIMD_ID_MASK 0x00000030L +#define SQ_EDC_INFO__SOURCE_MASK 0x000001C0L +#define SQ_EDC_INFO__VM_ID_MASK 0x00001E00L +//SQ_EDC_CNT +#define SQ_EDC_CNT__LDS_D_SEC_COUNT__SHIFT 0x0 +#define SQ_EDC_CNT__LDS_D_DED_COUNT__SHIFT 0x2 +#define SQ_EDC_CNT__LDS_I_SEC_COUNT__SHIFT 0x4 +#define SQ_EDC_CNT__LDS_I_DED_COUNT__SHIFT 0x6 +#define SQ_EDC_CNT__SGPR_SEC_COUNT__SHIFT 0x8 +#define SQ_EDC_CNT__SGPR_DED_COUNT__SHIFT 0xa +#define SQ_EDC_CNT__VGPR0_SEC_COUNT__SHIFT 0xc +#define SQ_EDC_CNT__VGPR0_DED_COUNT__SHIFT 0xe +#define SQ_EDC_CNT__VGPR1_SEC_COUNT__SHIFT 0x10 +#define SQ_EDC_CNT__VGPR1_DED_COUNT__SHIFT 0x12 +#define SQ_EDC_CNT__VGPR2_SEC_COUNT__SHIFT 0x14 +#define SQ_EDC_CNT__VGPR2_DED_COUNT__SHIFT 0x16 +#define SQ_EDC_CNT__VGPR3_SEC_COUNT__SHIFT 0x18 +#define SQ_EDC_CNT__VGPR3_DED_COUNT__SHIFT 0x1a +#define SQ_EDC_CNT__LDS_D_SEC_COUNT_MASK 0x00000003L +#define SQ_EDC_CNT__LDS_D_DED_COUNT_MASK 0x0000000CL +#define SQ_EDC_CNT__LDS_I_SEC_COUNT_MASK 0x00000030L +#define SQ_EDC_CNT__LDS_I_DED_COUNT_MASK 0x000000C0L +#define SQ_EDC_CNT__SGPR_SEC_COUNT_MASK 0x00000300L +#define SQ_EDC_CNT__SGPR_DED_COUNT_MASK 0x00000C00L +#define SQ_EDC_CNT__VGPR0_SEC_COUNT_MASK 0x00003000L +#define SQ_EDC_CNT__VGPR0_DED_COUNT_MASK 0x0000C000L +#define SQ_EDC_CNT__VGPR1_SEC_COUNT_MASK 0x00030000L +#define SQ_EDC_CNT__VGPR1_DED_COUNT_MASK 0x000C0000L +#define SQ_EDC_CNT__VGPR2_SEC_COUNT_MASK 0x00300000L +#define SQ_EDC_CNT__VGPR2_DED_COUNT_MASK 0x00C00000L +#define SQ_EDC_CNT__VGPR3_SEC_COUNT_MASK 0x03000000L +#define SQ_EDC_CNT__VGPR3_DED_COUNT_MASK 0x0C000000L + +// addressBlock: gc_tpdec +//TA_EDC_CNT +#define TA_EDC_CNT__TA_FS_DFIFO_SEC_COUNT__SHIFT 0x0 +#define TA_EDC_CNT__TA_FS_DFIFO_DED_COUNT__SHIFT 0x2 +#define TA_EDC_CNT__TA_FS_AFIFO_SEC_COUNT__SHIFT 0x4 +#define TA_EDC_CNT__TA_FS_AFIFO_DED_COUNT__SHIFT 0x6 +#define TA_EDC_CNT__TA_FL_LFIFO_SEC_COUNT__SHIFT 0x8 +#define TA_EDC_CNT__TA_FL_LFIFO_DED_COUNT__SHIFT 0xa +#define TA_EDC_CNT__TA_FX_LFIFO_SEC_COUNT__SHIFT 0xc +#define TA_EDC_CNT__TA_FX_LFIFO_DED_COUNT__SHIFT 0xe +#define TA_EDC_CNT__TA_FS_CFIFO_SEC_COUNT__SHIFT 0x10 +#define TA_EDC_CNT__TA_FS_CFIFO_DED_COUNT__SHIFT 0x12 +#define TA_EDC_CNT__TA_FS_DFIFO_SEC_COUNT_MASK 0x00000003L +#define TA_EDC_CNT__TA_FS_DFIFO_DED_COUNT_MASK 0x0000000CL +#define TA_EDC_CNT__TA_FS_AFIFO_SEC_COUNT_MASK 0x00000030L +#define TA_EDC_CNT__TA_FS_AFIFO_DED_COUNT_MASK 0x000000C0L +#define TA_EDC_CNT__TA_FL_LFIFO_SEC_COUNT_MASK 0x00000300L +#define TA_EDC_CNT__TA_FL_LFIFO_DED_COUNT_MASK 0x00000C00L +#define TA_EDC_CNT__TA_FX_LFIFO_SEC_COUNT_MASK 0x00003000L +#define TA_EDC_CNT__TA_FX_LFIFO_DED_COUNT_MASK 0x0000C000L +#define TA_EDC_CNT__TA_FS_CFIFO_SEC_COUNT_MASK 0x00030000L +#define TA_EDC_CNT__TA_FS_CFIFO_DED_COUNT_MASK 0x000C0000L + +// addressBlock: gc_tcdec +//TCP_EDC_CNT +#define TCP_EDC_CNT__SEC_COUNT__SHIFT 0x0 +#define TCP_EDC_CNT__LFIFO_SED_COUNT__SHIFT 0x8 +#define TCP_EDC_CNT__DED_COUNT__SHIFT 0x10 +#define TCP_EDC_CNT__SEC_COUNT_MASK 0x000000FFL +#define TCP_EDC_CNT__LFIFO_SED_COUNT_MASK 0x0000FF00L +#define TCP_EDC_CNT__DED_COUNT_MASK 0x00FF0000L +//TCP_EDC_CNT_NEW +#define TCP_EDC_CNT_NEW__CACHE_RAM_SEC_COUNT__SHIFT 0x0 +#define TCP_EDC_CNT_NEW__CACHE_RAM_DED_COUNT__SHIFT 0x2 +#define TCP_EDC_CNT_NEW__LFIFO_RAM_SEC_COUNT__SHIFT 0x4 +#define TCP_EDC_CNT_NEW__LFIFO_RAM_DED_COUNT__SHIFT 0x6 +#define TCP_EDC_CNT_NEW__CMD_FIFO_SEC_COUNT__SHIFT 0x8 +#define TCP_EDC_CNT_NEW__CMD_FIFO_DED_COUNT__SHIFT 0xa +#define TCP_EDC_CNT_NEW__VM_FIFO_SEC_COUNT__SHIFT 0xc +#define TCP_EDC_CNT_NEW__VM_FIFO_DED_COUNT__SHIFT 0xe +#define TCP_EDC_CNT_NEW__DB_RAM_SED_COUNT__SHIFT 0x10 +#define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_SEC_COUNT__SHIFT 0x12 +#define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_DED_COUNT__SHIFT 0x14 +#define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_SEC_COUNT__SHIFT 0x16 +#define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_DED_COUNT__SHIFT 0x18 +#define TCP_EDC_CNT_NEW__CACHE_RAM_SEC_COUNT_MASK 0x00000003L +#define TCP_EDC_CNT_NEW__CACHE_RAM_DED_COUNT_MASK 0x0000000CL +#define TCP_EDC_CNT_NEW__LFIFO_RAM_SEC_COUNT_MASK 0x00000030L +#define TCP_EDC_CNT_NEW__LFIFO_RAM_DED_COUNT_MASK 0x000000C0L +#define TCP_EDC_CNT_NEW__CMD_FIFO_SEC_COUNT_MASK 0x00000300L +#define TCP_EDC_CNT_NEW__CMD_FIFO_DED_COUNT_MASK 0x00000C00L +#define TCP_EDC_CNT_NEW__VM_FIFO_SEC_COUNT_MASK 0x00003000L +#define TCP_EDC_CNT_NEW__VM_FIFO_DED_COUNT_MASK 0x0000C000L +#define TCP_EDC_CNT_NEW__DB_RAM_SED_COUNT_MASK 0x00030000L +#define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_SEC_COUNT_MASK 0x000C0000L +#define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_DED_COUNT_MASK 0x00300000L +#define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_SEC_COUNT_MASK 0x00C00000L +#define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_DED_COUNT_MASK 0x03000000L +//TCP_ATC_EDC_GATCL1_CNT +#define TCP_ATC_EDC_GATCL1_CNT__DATA_SEC__SHIFT 0x0 +#define TCP_ATC_EDC_GATCL1_CNT__DATA_SEC_MASK 0x000000FFL +//TCI_EDC_CNT +#define TCI_EDC_CNT__WRITE_RAM_SEC_COUNT__SHIFT 0x0 +#define TCI_EDC_CNT__WRITE_RAM_DED_COUNT__SHIFT 0x2 +#define TCI_EDC_CNT__WRITE_RAM_SEC_COUNT_MASK 0x00000003L +#define TCI_EDC_CNT__WRITE_RAM_DED_COUNT_MASK 0x0000000CL +//TCA_EDC_CNT +#define TCA_EDC_CNT__HOLE_FIFO_SEC_COUNT__SHIFT 0x0 +#define TCA_EDC_CNT__HOLE_FIFO_DED_COUNT__SHIFT 0x2 +#define TCA_EDC_CNT__REQ_FIFO_SEC_COUNT__SHIFT 0x4 +#define TCA_EDC_CNT__REQ_FIFO_DED_COUNT__SHIFT 0x6 +#define TCA_EDC_CNT__HOLE_FIFO_SEC_COUNT_MASK 0x00000003L +#define TCA_EDC_CNT__HOLE_FIFO_DED_COUNT_MASK 0x0000000CL +#define TCA_EDC_CNT__REQ_FIFO_SEC_COUNT_MASK 0x00000030L +#define TCA_EDC_CNT__REQ_FIFO_DED_COUNT_MASK 0x000000C0L +//TCC_EDC_CNT +#define TCC_EDC_CNT__CACHE_DATA_SEC_COUNT__SHIFT 0x0 +#define TCC_EDC_CNT__CACHE_DATA_DED_COUNT__SHIFT 0x2 +#define TCC_EDC_CNT__CACHE_DIRTY_SEC_COUNT__SHIFT 0x4 +#define TCC_EDC_CNT__CACHE_DIRTY_DED_COUNT__SHIFT 0x6 +#define TCC_EDC_CNT__HIGH_RATE_TAG_SEC_COUNT__SHIFT 0x8 +#define TCC_EDC_CNT__HIGH_RATE_TAG_DED_COUNT__SHIFT 0xa +#define TCC_EDC_CNT__LOW_RATE_TAG_SEC_COUNT__SHIFT 0xc +#define TCC_EDC_CNT__LOW_RATE_TAG_DED_COUNT__SHIFT 0xe +#define TCC_EDC_CNT__SRC_FIFO_SEC_COUNT__SHIFT 0x10 +#define TCC_EDC_CNT__SRC_FIFO_DED_COUNT__SHIFT 0x12 +#define TCC_EDC_CNT__LATENCY_FIFO_SEC_COUNT__SHIFT 0x14 +#define TCC_EDC_CNT__LATENCY_FIFO_DED_COUNT__SHIFT 0x16 +#define TCC_EDC_CNT__LATENCY_FIFO_NEXT_RAM_SEC_COUNT__SHIFT 0x18 +#define TCC_EDC_CNT__LATENCY_FIFO_NEXT_RAM_DED_COUNT__SHIFT 0x1a +#define TCC_EDC_CNT__CACHE_DATA_SEC_COUNT_MASK 0x00000003L +#define TCC_EDC_CNT__CACHE_DATA_DED_COUNT_MASK 0x0000000CL +#define TCC_EDC_CNT__CACHE_DIRTY_SEC_COUNT_MASK 0x00000030L +#define TCC_EDC_CNT__CACHE_DIRTY_DED_COUNT_MASK 0x000000C0L +#define TCC_EDC_CNT__HIGH_RATE_TAG_SEC_COUNT_MASK 0x00000300L +#define TCC_EDC_CNT__HIGH_RATE_TAG_DED_COUNT_MASK 0x00000C00L +#define TCC_EDC_CNT__LOW_RATE_TAG_SEC_COUNT_MASK 0x00003000L +#define TCC_EDC_CNT__LOW_RATE_TAG_DED_COUNT_MASK 0x0000C000L +#define TCC_EDC_CNT__SRC_FIFO_SEC_COUNT_MASK 0x00030000L +#define TCC_EDC_CNT__SRC_FIFO_DED_COUNT_MASK 0x000C0000L +#define TCC_EDC_CNT__LATENCY_FIFO_SEC_COUNT_MASK 0x00300000L +#define TCC_EDC_CNT__LATENCY_FIFO_DED_COUNT_MASK 0x00C00000L +#define TCC_EDC_CNT__LATENCY_FIFO_NEXT_RAM_SEC_COUNT_MASK 0x03000000L +#define TCC_EDC_CNT__LATENCY_FIFO_NEXT_RAM_DED_COUNT_MASK 0x0C000000L +//TCC_EDC_CNT2 +#define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_SEC_COUNT__SHIFT 0x0 +#define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_DED_COUNT__SHIFT 0x2 +#define TCC_EDC_CNT2__UC_ATOMIC_FIFO_SEC_COUNT__SHIFT 0x4 +#define TCC_EDC_CNT2__UC_ATOMIC_FIFO_DED_COUNT__SHIFT 0x6 +#define TCC_EDC_CNT2__WRITE_CACHE_READ_SEC_COUNT__SHIFT 0x8 +#define TCC_EDC_CNT2__WRITE_CACHE_READ_DED_COUNT__SHIFT 0xa +#define TCC_EDC_CNT2__RETURN_CONTROL_SEC_COUNT__SHIFT 0xc +#define TCC_EDC_CNT2__RETURN_CONTROL_DED_COUNT__SHIFT 0xe +#define TCC_EDC_CNT2__IN_USE_TRANSFER_SEC_COUNT__SHIFT 0x10 +#define TCC_EDC_CNT2__IN_USE_TRANSFER_DED_COUNT__SHIFT 0x12 +#define TCC_EDC_CNT2__IN_USE_DEC_SEC_COUNT__SHIFT 0x14 +#define TCC_EDC_CNT2__IN_USE_DEC_DED_COUNT__SHIFT 0x16 +#define TCC_EDC_CNT2__WRITE_RETURN_SEC_COUNT__SHIFT 0x18 +#define TCC_EDC_CNT2__WRITE_RETURN_DED_COUNT__SHIFT 0x1a +#define TCC_EDC_CNT2__RETURN_DATA_SEC_COUNT__SHIFT 0x1c +#define TCC_EDC_CNT2__RETURN_DATA_DED_COUNT__SHIFT 0x1e +#define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_SEC_COUNT_MASK 0x00000003L +#define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_DED_COUNT_MASK 0x0000000CL +#define TCC_EDC_CNT2__UC_ATOMIC_FIFO_SEC_COUNT_MASK 0x00000030L +#define TCC_EDC_CNT2__UC_ATOMIC_FIFO_DED_COUNT_MASK 0x000000C0L +#define TCC_EDC_CNT2__WRITE_CACHE_READ_SEC_COUNT_MASK 0x00000300L +#define TCC_EDC_CNT2__WRITE_CACHE_READ_DED_COUNT_MASK 0x00000C00L +#define TCC_EDC_CNT2__RETURN_CONTROL_SEC_COUNT_MASK 0x00003000L +#define TCC_EDC_CNT2__RETURN_CONTROL_DED_COUNT_MASK 0x0000C000L +#define TCC_EDC_CNT2__IN_USE_TRANSFER_SEC_COUNT_MASK 0x00030000L +#define TCC_EDC_CNT2__IN_USE_TRANSFER_DED_COUNT_MASK 0x000C0000L +#define TCC_EDC_CNT2__IN_USE_DEC_SEC_COUNT_MASK 0x00300000L +#define TCC_EDC_CNT2__IN_USE_DEC_DED_COUNT_MASK 0x00C00000L +#define TCC_EDC_CNT2__WRITE_RETURN_SEC_COUNT_MASK 0x03000000L +#define TCC_EDC_CNT2__WRITE_RETURN_DED_COUNT_MASK 0x0C000000L +#define TCC_EDC_CNT2__RETURN_DATA_SEC_COUNT_MASK 0x30000000L +#define TCC_EDC_CNT2__RETURN_DATA_DED_COUNT_MASK 0xC0000000L + +// addressBlock: gc_tpdec +//TD_EDC_CNT +#define TD_EDC_CNT__SS_FIFO_LO_SEC_COUNT__SHIFT 0x0 +#define TD_EDC_CNT__SS_FIFO_LO_DED_COUNT__SHIFT 0x2 +#define TD_EDC_CNT__SS_FIFO_HI_SEC_COUNT__SHIFT 0x4 +#define TD_EDC_CNT__SS_FIFO_HI_DED_COUNT__SHIFT 0x6 +#define TD_EDC_CNT__CS_FIFO_SEC_COUNT__SHIFT 0x8 +#define TD_EDC_CNT__CS_FIFO_DED_COUNT__SHIFT 0xa +#define TD_EDC_CNT__SS_FIFO_LO_SEC_COUNT_MASK 0x00000003L +#define TD_EDC_CNT__SS_FIFO_LO_DED_COUNT_MASK 0x0000000CL +#define TD_EDC_CNT__SS_FIFO_HI_SEC_COUNT_MASK 0x00000030L +#define TD_EDC_CNT__SS_FIFO_HI_DED_COUNT_MASK 0x000000C0L +#define TD_EDC_CNT__CS_FIFO_SEC_COUNT_MASK 0x00000300L +#define TD_EDC_CNT__CS_FIFO_DED_COUNT_MASK 0x00000C00L +//TA_EDC_CNT +#define TA_EDC_CNT__TA_FS_DFIFO_SEC_COUNT__SHIFT 0x0 +#define TA_EDC_CNT__TA_FS_DFIFO_DED_COUNT__SHIFT 0x2 +#define TA_EDC_CNT__TA_FS_AFIFO_SEC_COUNT__SHIFT 0x4 +#define TA_EDC_CNT__TA_FS_AFIFO_DED_COUNT__SHIFT 0x6 +#define TA_EDC_CNT__TA_FL_LFIFO_SEC_COUNT__SHIFT 0x8 +#define TA_EDC_CNT__TA_FL_LFIFO_DED_COUNT__SHIFT 0xa +#define TA_EDC_CNT__TA_FX_LFIFO_SEC_COUNT__SHIFT 0xc +#define TA_EDC_CNT__TA_FX_LFIFO_DED_COUNT__SHIFT 0xe +#define TA_EDC_CNT__TA_FS_CFIFO_SEC_COUNT__SHIFT 0x10 +#define TA_EDC_CNT__TA_FS_CFIFO_DED_COUNT__SHIFT 0x12 +#define TA_EDC_CNT__TA_FS_DFIFO_SEC_COUNT_MASK 0x00000003L +#define TA_EDC_CNT__TA_FS_DFIFO_DED_COUNT_MASK 0x0000000CL +#define TA_EDC_CNT__TA_FS_AFIFO_SEC_COUNT_MASK 0x00000030L +#define TA_EDC_CNT__TA_FS_AFIFO_DED_COUNT_MASK 0x000000C0L +#define TA_EDC_CNT__TA_FL_LFIFO_SEC_COUNT_MASK 0x00000300L +#define TA_EDC_CNT__TA_FL_LFIFO_DED_COUNT_MASK 0x00000C00L +#define TA_EDC_CNT__TA_FX_LFIFO_SEC_COUNT_MASK 0x00003000L +#define TA_EDC_CNT__TA_FX_LFIFO_DED_COUNT_MASK 0x0000C000L +#define TA_EDC_CNT__TA_FS_CFIFO_SEC_COUNT_MASK 0x00030000L +#define TA_EDC_CNT__TA_FS_CFIFO_DED_COUNT_MASK 0x000C0000L + +// addressBlock: gc_ea_gceadec2 +//GCEA_EDC_CNT +#define GCEA_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0 +#define GCEA_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2 +#define GCEA_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4 +#define GCEA_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define GCEA_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8 +#define GCEA_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa +#define GCEA_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc +#define GCEA_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe +#define GCEA_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10 +#define GCEA_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12 +#define GCEA_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14 +#define GCEA_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16 +#define GCEA_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x18 +#define GCEA_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a +#define GCEA_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c +#define GCEA_EDC_CNT__MAM_AFMEM_SEC_COUNT__SHIFT 0x1e +#define GCEA_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L +#define GCEA_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL +#define GCEA_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L +#define GCEA_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define GCEA_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L +#define GCEA_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L +#define GCEA_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L +#define GCEA_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L +#define GCEA_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L +#define GCEA_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L +#define GCEA_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L +#define GCEA_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L +#define GCEA_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L +#define GCEA_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L +#define GCEA_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L +#define GCEA_EDC_CNT__MAM_AFMEM_SEC_COUNT_MASK 0xC0000000L +//GCEA_EDC_CNT2 +#define GCEA_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0 +#define GCEA_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2 +#define GCEA_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4 +#define GCEA_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define GCEA_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8 +#define GCEA_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa +#define GCEA_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc +#define GCEA_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define GCEA_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 +#define GCEA_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 +#define GCEA_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 +#define GCEA_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 +#define GCEA_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18 +#define GCEA_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a +#define GCEA_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c +#define GCEA_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e +#define GCEA_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L +#define GCEA_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL +#define GCEA_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L +#define GCEA_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define GCEA_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L +#define GCEA_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L +#define GCEA_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L +#define GCEA_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +#define GCEA_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L +#define GCEA_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L +#define GCEA_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L +#define GCEA_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L +#define GCEA_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L +#define GCEA_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L +#define GCEA_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L +#define GCEA_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L +//GCEA_EDC_CNT3 +#define GCEA_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT__SHIFT 0x0 +#define GCEA_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT__SHIFT 0x2 +#define GCEA_EDC_CNT3__IORD_CMDMEM_DED_COUNT__SHIFT 0x4 +#define GCEA_EDC_CNT3__IOWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define GCEA_EDC_CNT3__IOWR_DATAMEM_DED_COUNT__SHIFT 0x8 +#define GCEA_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT__SHIFT 0xa +#define GCEA_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT__SHIFT 0xc +#define GCEA_EDC_CNT3__MAM_AFMEM_DED_COUNT__SHIFT 0xe +#define GCEA_EDC_CNT3__MAM_A0MEM_SEC_COUNT__SHIFT 0x10 +#define GCEA_EDC_CNT3__MAM_A0MEM_DED_COUNT__SHIFT 0x12 +#define GCEA_EDC_CNT3__MAM_A1MEM_SEC_COUNT__SHIFT 0x14 +#define GCEA_EDC_CNT3__MAM_A1MEM_DED_COUNT__SHIFT 0x16 +#define GCEA_EDC_CNT3__MAM_A2MEM_SEC_COUNT__SHIFT 0x18 +#define GCEA_EDC_CNT3__MAM_A2MEM_DED_COUNT__SHIFT 0x1a +#define GCEA_EDC_CNT3__MAM_A3MEM_SEC_COUNT__SHIFT 0x1c +#define GCEA_EDC_CNT3__MAM_A3MEM_DED_COUNT__SHIFT 0x1e +#define GCEA_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT_MASK 0x00000003L +#define GCEA_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT_MASK 0x0000000CL +#define GCEA_EDC_CNT3__IORD_CMDMEM_DED_COUNT_MASK 0x00000030L +#define GCEA_EDC_CNT3__IOWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define GCEA_EDC_CNT3__IOWR_DATAMEM_DED_COUNT_MASK 0x00000300L +#define GCEA_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT_MASK 0x00000C00L +#define GCEA_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT_MASK 0x00003000L +#define GCEA_EDC_CNT3__MAM_AFMEM_DED_COUNT_MASK 0x0000C000L +#define GCEA_EDC_CNT3__MAM_A0MEM_SEC_COUNT_MASK 0x00030000L +#define GCEA_EDC_CNT3__MAM_A0MEM_DED_COUNT_MASK 0x000C0000L +#define GCEA_EDC_CNT3__MAM_A1MEM_SEC_COUNT_MASK 0x00300000L +#define GCEA_EDC_CNT3__MAM_A1MEM_DED_COUNT_MASK 0x00C00000L +#define GCEA_EDC_CNT3__MAM_A2MEM_SEC_COUNT_MASK 0x03000000L +#define GCEA_EDC_CNT3__MAM_A2MEM_DED_COUNT_MASK 0x0C000000L +#define GCEA_EDC_CNT3__MAM_A3MEM_SEC_COUNT_MASK 0x30000000L +#define GCEA_EDC_CNT3__MAM_A3MEM_DED_COUNT_MASK 0xC0000000L + +// addressBlock: gc_gfxudec +//GRBM_GFX_INDEX +#define GRBM_GFX_INDEX__INSTANCE_INDEX__SHIFT 0x0 +#define GRBM_GFX_INDEX__SH_INDEX__SHIFT 0x8 +#define GRBM_GFX_INDEX__SE_INDEX__SHIFT 0x10 +#define GRBM_GFX_INDEX__SH_BROADCAST_WRITES__SHIFT 0x1d +#define GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES__SHIFT 0x1e +#define GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT 0x1f +#define GRBM_GFX_INDEX__INSTANCE_INDEX_MASK 0x000000FFL +#define GRBM_GFX_INDEX__SH_INDEX_MASK 0x0000FF00L +#define GRBM_GFX_INDEX__SE_INDEX_MASK 0x00FF0000L +#define GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK 0x20000000L +#define GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK 0x40000000L +#define GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK 0x80000000L + +// addressBlock: gc_utcl2_atcl2dec +//ATC_L2_CNTL +//ATC_L2_CACHE_4K_DSM_INDEX +#define ATC_L2_CACHE_4K_DSM_INDEX__INDEX__SHIFT 0x0 +#define ATC_L2_CACHE_4K_DSM_INDEX__INDEX_MASK 0x000000FFL +//ATC_L2_CACHE_2M_DSM_INDEX +#define ATC_L2_CACHE_2M_DSM_INDEX__INDEX__SHIFT 0x0 +#define ATC_L2_CACHE_2M_DSM_INDEX__INDEX_MASK 0x000000FFL +//ATC_L2_CACHE_4K_DSM_CNTL +#define ATC_L2_CACHE_4K_DSM_CNTL__SEC_COUNT__SHIFT 0xd +#define ATC_L2_CACHE_4K_DSM_CNTL__DED_COUNT__SHIFT 0xf +#define ATC_L2_CACHE_4K_DSM_CNTL__SEC_COUNT_MASK 0x00006000L +#define ATC_L2_CACHE_4K_DSM_CNTL__DED_COUNT_MASK 0x00018000L +//ATC_L2_CACHE_2M_DSM_CNTL +#define ATC_L2_CACHE_2M_DSM_CNTL__SEC_COUNT__SHIFT 0xd +#define ATC_L2_CACHE_2M_DSM_CNTL__DED_COUNT__SHIFT 0xf +#define ATC_L2_CACHE_2M_DSM_CNTL__SEC_COUNT_MASK 0x00006000L +#define ATC_L2_CACHE_2M_DSM_CNTL__DED_COUNT_MASK 0x00018000L + +// addressBlock: gc_utcl2_vml2pfdec +//VML2_MEM_ECC_INDEX +#define VML2_MEM_ECC_INDEX__INDEX__SHIFT 0x0 +#define VML2_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL +//VML2_WALKER_MEM_ECC_INDEX +#define VML2_WALKER_MEM_ECC_INDEX__INDEX__SHIFT 0x0 +#define VML2_WALKER_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL +//UTCL2_MEM_ECC_INDEX +#define UTCL2_MEM_ECC_INDEX__INDEX__SHIFT 0x0 +#define UTCL2_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL +//VML2_MEM_ECC_CNTL +#define VML2_MEM_ECC_CNTL__SEC_COUNT__SHIFT 0xc +#define VML2_MEM_ECC_CNTL__DED_COUNT__SHIFT 0xe +#define VML2_MEM_ECC_CNTL__SEC_COUNT_MASK 0x00003000L +#define VML2_MEM_ECC_CNTL__DED_COUNT_MASK 0x0000C000L +//VML2_WALKER_MEM_ECC_CNTL +#define VML2_WALKER_MEM_ECC_CNTL__SEC_COUNT__SHIFT 0xc +#define VML2_WALKER_MEM_ECC_CNTL__DED_COUNT__SHIFT 0xe +#define VML2_WALKER_MEM_ECC_CNTL__SEC_COUNT_MASK 0x00003000L +#define VML2_WALKER_MEM_ECC_CNTL__DED_COUNT_MASK 0x0000C000L +//UTCL2_MEM_ECC_CNTL +#define UTCL2_MEM_ECC_CNTL__SEC_COUNT__SHIFT 0xc +#define UTCL2_MEM_ECC_CNTL__DED_COUNT__SHIFT 0xe +#define UTCL2_MEM_ECC_CNTL__SEC_COUNT_MASK 0x00003000L +#define UTCL2_MEM_ECC_CNTL__DED_COUNT_MASK 0x0000C000L + +// addressBlock: gc_rlcpdec +//RLC_EDC_CNT +#define RLC_EDC_CNT__RLCG_INSTR_RAM_SEC_COUNT__SHIFT 0x0 +#define RLC_EDC_CNT__RLCG_INSTR_RAM_DED_COUNT__SHIFT 0x2 +#define RLC_EDC_CNT__RLCG_SCRATCH_RAM_SEC_COUNT__SHIFT 0x4 +#define RLC_EDC_CNT__RLCG_SCRATCH_RAM_DED_COUNT__SHIFT 0x6 +#define RLC_EDC_CNT__RLCV_INSTR_RAM_SEC_COUNT__SHIFT 0x8 +#define RLC_EDC_CNT__RLCV_INSTR_RAM_DED_COUNT__SHIFT 0xa +#define RLC_EDC_CNT__RLCV_SCRATCH_RAM_SEC_COUNT__SHIFT 0xc +#define RLC_EDC_CNT__RLCV_SCRATCH_RAM_DED_COUNT__SHIFT 0xe +#define RLC_EDC_CNT__RLC_TCTAG_RAM_SEC_COUNT__SHIFT 0x10 +#define RLC_EDC_CNT__RLC_TCTAG_RAM_DED_COUNT__SHIFT 0x12 +#define RLC_EDC_CNT__RLC_SPM_SCRATCH_RAM_SEC_COUNT__SHIFT 0x14 +#define RLC_EDC_CNT__RLC_SPM_SCRATCH_RAM_DED_COUNT__SHIFT 0x16 +#define RLC_EDC_CNT__RLC_SRM_DATA_RAM_SEC_COUNT__SHIFT 0x18 +#define RLC_EDC_CNT__RLC_SRM_DATA_RAM_DED_COUNT__SHIFT 0x1a +#define RLC_EDC_CNT__RLC_SRM_ADDR_RAM_SEC_COUNT__SHIFT 0x1c +#define RLC_EDC_CNT__RLC_SRM_ADDR_RAM_DED_COUNT__SHIFT 0x1e +#define RLC_EDC_CNT__RLCG_INSTR_RAM_SEC_COUNT_MASK 0x00000003L +#define RLC_EDC_CNT__RLCG_INSTR_RAM_DED_COUNT_MASK 0x0000000CL +#define RLC_EDC_CNT__RLCG_SCRATCH_RAM_SEC_COUNT_MASK 0x00000030L +#define RLC_EDC_CNT__RLCG_SCRATCH_RAM_DED_COUNT_MASK 0x000000C0L +#define RLC_EDC_CNT__RLCV_INSTR_RAM_SEC_COUNT_MASK 0x00000300L +#define RLC_EDC_CNT__RLCV_INSTR_RAM_DED_COUNT_MASK 0x00000C00L +#define RLC_EDC_CNT__RLCV_SCRATCH_RAM_SEC_COUNT_MASK 0x00003000L +#define RLC_EDC_CNT__RLCV_SCRATCH_RAM_DED_COUNT_MASK 0x0000C000L +#define RLC_EDC_CNT__RLC_TCTAG_RAM_SEC_COUNT_MASK 0x00030000L +#define RLC_EDC_CNT__RLC_TCTAG_RAM_DED_COUNT_MASK 0x000C0000L +#define RLC_EDC_CNT__RLC_SPM_SCRATCH_RAM_SEC_COUNT_MASK 0x00300000L +#define RLC_EDC_CNT__RLC_SPM_SCRATCH_RAM_DED_COUNT_MASK 0x00C00000L +#define RLC_EDC_CNT__RLC_SRM_DATA_RAM_SEC_COUNT_MASK 0x03000000L +#define RLC_EDC_CNT__RLC_SRM_DATA_RAM_DED_COUNT_MASK 0x0C000000L +#define RLC_EDC_CNT__RLC_SRM_ADDR_RAM_SEC_COUNT_MASK 0x30000000L +#define RLC_EDC_CNT__RLC_SRM_ADDR_RAM_DED_COUNT_MASK 0xC0000000L +//RLC_EDC_CNT2 +#define RLC_EDC_CNT2__RLC_SPM_SE0_SCRATCH_RAM_SEC_COUNT__SHIFT 0x0 +#define RLC_EDC_CNT2__RLC_SPM_SE0_SCRATCH_RAM_DED_COUNT__SHIFT 0x2 +#define RLC_EDC_CNT2__RLC_SPM_SE1_SCRATCH_RAM_SEC_COUNT__SHIFT 0x4 +#define RLC_EDC_CNT2__RLC_SPM_SE1_SCRATCH_RAM_DED_COUNT__SHIFT 0x6 +#define RLC_EDC_CNT2__RLC_SPM_SE2_SCRATCH_RAM_SEC_COUNT__SHIFT 0x8 +#define RLC_EDC_CNT2__RLC_SPM_SE2_SCRATCH_RAM_DED_COUNT__SHIFT 0xa +#define RLC_EDC_CNT2__RLC_SPM_SE3_SCRATCH_RAM_SEC_COUNT__SHIFT 0xc +#define RLC_EDC_CNT2__RLC_SPM_SE3_SCRATCH_RAM_DED_COUNT__SHIFT 0xe +#define RLC_EDC_CNT2__RLC_SPM_SE4_SCRATCH_RAM_SEC_COUNT__SHIFT 0x10 +#define RLC_EDC_CNT2__RLC_SPM_SE4_SCRATCH_RAM_DED_COUNT__SHIFT 0x12 +#define RLC_EDC_CNT2__RLC_SPM_SE5_SCRATCH_RAM_SEC_COUNT__SHIFT 0x14 +#define RLC_EDC_CNT2__RLC_SPM_SE5_SCRATCH_RAM_DED_COUNT__SHIFT 0x16 +#define RLC_EDC_CNT2__RLC_SPM_SE6_SCRATCH_RAM_SEC_COUNT__SHIFT 0x18 +#define RLC_EDC_CNT2__RLC_SPM_SE6_SCRATCH_RAM_DED_COUNT__SHIFT 0x1a +#define RLC_EDC_CNT2__RLC_SPM_SE7_SCRATCH_RAM_SEC_COUNT__SHIFT 0x1c +#define RLC_EDC_CNT2__RLC_SPM_SE7_SCRATCH_RAM_DED_COUNT__SHIFT 0x1e +#define RLC_EDC_CNT2__RLC_SPM_SE0_SCRATCH_RAM_SEC_COUNT_MASK 0x00000003L +#define RLC_EDC_CNT2__RLC_SPM_SE0_SCRATCH_RAM_DED_COUNT_MASK 0x0000000CL +#define RLC_EDC_CNT2__RLC_SPM_SE1_SCRATCH_RAM_SEC_COUNT_MASK 0x00000030L +#define RLC_EDC_CNT2__RLC_SPM_SE1_SCRATCH_RAM_DED_COUNT_MASK 0x000000C0L +#define RLC_EDC_CNT2__RLC_SPM_SE2_SCRATCH_RAM_SEC_COUNT_MASK 0x00000300L +#define RLC_EDC_CNT2__RLC_SPM_SE2_SCRATCH_RAM_DED_COUNT_MASK 0x00000C00L +#define RLC_EDC_CNT2__RLC_SPM_SE3_SCRATCH_RAM_SEC_COUNT_MASK 0x00003000L +#define RLC_EDC_CNT2__RLC_SPM_SE3_SCRATCH_RAM_DED_COUNT_MASK 0x0000C000L +#define RLC_EDC_CNT2__RLC_SPM_SE4_SCRATCH_RAM_SEC_COUNT_MASK 0x00030000L +#define RLC_EDC_CNT2__RLC_SPM_SE4_SCRATCH_RAM_DED_COUNT_MASK 0x000C0000L +#define RLC_EDC_CNT2__RLC_SPM_SE5_SCRATCH_RAM_SEC_COUNT_MASK 0x00300000L +#define RLC_EDC_CNT2__RLC_SPM_SE5_SCRATCH_RAM_DED_COUNT_MASK 0x00C00000L +#define RLC_EDC_CNT2__RLC_SPM_SE6_SCRATCH_RAM_SEC_COUNT_MASK 0x03000000L +#define RLC_EDC_CNT2__RLC_SPM_SE6_SCRATCH_RAM_DED_COUNT_MASK 0x0C000000L +#define RLC_EDC_CNT2__RLC_SPM_SE7_SCRATCH_RAM_SEC_COUNT_MASK 0x30000000L +#define RLC_EDC_CNT2__RLC_SPM_SE7_SCRATCH_RAM_DED_COUNT_MASK 0xC0000000L + +#endif \ No newline at end of file diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h index 40dfbf16bd34de684a48258b099b26b30cb5ad8a..111a71b434e233185616f6822c262ae899732122 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h @@ -11185,6 +11185,14 @@ #define MMEA0_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa #define MMEA0_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc #define MMEA0_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define MMEA0_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 +#define MMEA0_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 +#define MMEA0_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 +#define MMEA0_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 +#define MMEA0_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18 +#define MMEA0_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a +#define MMEA0_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c +#define MMEA0_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e #define MMEA0_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L #define MMEA0_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL #define MMEA0_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L @@ -11193,6 +11201,14 @@ #define MMEA0_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L #define MMEA0_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L #define MMEA0_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +#define MMEA0_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L +#define MMEA0_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L +#define MMEA0_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L +#define MMEA0_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L +#define MMEA0_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L +#define MMEA0_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L +#define MMEA0_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L +#define MMEA0_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L //MMEA0_DSM_CNTL #define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 #define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 @@ -14197,6 +14213,14 @@ #define MMEA1_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa #define MMEA1_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc #define MMEA1_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define MMEA1_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 +#define MMEA1_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 +#define MMEA1_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 +#define MMEA1_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 +#define MMEA1_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18 +#define MMEA1_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a +#define MMEA1_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c +#define MMEA1_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e #define MMEA1_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L #define MMEA1_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL #define MMEA1_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L @@ -14205,6 +14229,14 @@ #define MMEA1_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L #define MMEA1_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L #define MMEA1_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +#define MMEA1_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L +#define MMEA1_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L +#define MMEA1_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L +#define MMEA1_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L +#define MMEA1_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L +#define MMEA1_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L +#define MMEA1_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L +#define MMEA1_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L //MMEA1_DSM_CNTL #define MMEA1_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 #define MMEA1_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 @@ -17209,6 +17241,14 @@ #define MMEA2_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa #define MMEA2_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc #define MMEA2_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define MMEA2_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 +#define MMEA2_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 +#define MMEA2_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 +#define MMEA2_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 +#define MMEA2_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18 +#define MMEA2_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a +#define MMEA2_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c +#define MMEA2_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e #define MMEA2_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L #define MMEA2_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL #define MMEA2_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L @@ -17217,6 +17257,14 @@ #define MMEA2_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L #define MMEA2_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L #define MMEA2_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +#define MMEA2_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L +#define MMEA2_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L +#define MMEA2_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L +#define MMEA2_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L +#define MMEA2_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L +#define MMEA2_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L +#define MMEA2_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L +#define MMEA2_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L //MMEA2_DSM_CNTL #define MMEA2_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 #define MMEA2_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 @@ -20221,6 +20269,14 @@ #define MMEA3_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa #define MMEA3_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc #define MMEA3_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define MMEA3_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 +#define MMEA3_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 +#define MMEA3_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 +#define MMEA3_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 +#define MMEA3_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18 +#define MMEA3_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a +#define MMEA3_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c +#define MMEA3_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e #define MMEA3_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L #define MMEA3_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL #define MMEA3_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L @@ -20229,6 +20285,14 @@ #define MMEA3_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L #define MMEA3_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L #define MMEA3_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +#define MMEA3_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L +#define MMEA3_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L +#define MMEA3_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L +#define MMEA3_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L +#define MMEA3_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L +#define MMEA3_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L +#define MMEA3_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L +#define MMEA3_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L //MMEA3_DSM_CNTL #define MMEA3_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 #define MMEA3_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 @@ -23233,6 +23297,14 @@ #define MMEA4_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa #define MMEA4_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc #define MMEA4_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define MMEA4_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 +#define MMEA4_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 +#define MMEA4_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 +#define MMEA4_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 +#define MMEA4_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18 +#define MMEA4_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a +#define MMEA4_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c +#define MMEA4_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e #define MMEA4_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L #define MMEA4_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL #define MMEA4_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L @@ -23241,6 +23313,14 @@ #define MMEA4_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L #define MMEA4_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L #define MMEA4_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +#define MMEA4_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L +#define MMEA4_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L +#define MMEA4_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L +#define MMEA4_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L +#define MMEA4_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L +#define MMEA4_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L +#define MMEA4_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L +#define MMEA4_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L //MMEA4_DSM_CNTL #define MMEA4_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 #define MMEA4_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 @@ -34952,6 +35032,14 @@ #define MMEA5_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa #define MMEA5_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc #define MMEA5_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define MMEA5_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 +#define MMEA5_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 +#define MMEA5_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 +#define MMEA5_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 +#define MMEA5_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18 +#define MMEA5_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a +#define MMEA5_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c +#define MMEA5_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e #define MMEA5_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L #define MMEA5_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL #define MMEA5_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L @@ -34960,6 +35048,14 @@ #define MMEA5_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L #define MMEA5_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L #define MMEA5_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +#define MMEA5_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L +#define MMEA5_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L +#define MMEA5_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L +#define MMEA5_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L +#define MMEA5_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L +#define MMEA5_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L +#define MMEA5_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L +#define MMEA5_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L //MMEA5_DSM_CNTL #define MMEA5_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 #define MMEA5_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 @@ -37964,6 +38060,14 @@ #define MMEA6_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa #define MMEA6_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc #define MMEA6_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define MMEA6_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 +#define MMEA6_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 +#define MMEA6_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 +#define MMEA6_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 +#define MMEA6_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18 +#define MMEA6_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a +#define MMEA6_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c +#define MMEA6_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e #define MMEA6_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L #define MMEA6_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL #define MMEA6_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L @@ -37972,6 +38076,14 @@ #define MMEA6_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L #define MMEA6_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L #define MMEA6_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +#define MMEA6_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L +#define MMEA6_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L +#define MMEA6_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L +#define MMEA6_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L +#define MMEA6_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L +#define MMEA6_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L +#define MMEA6_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L +#define MMEA6_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L //MMEA6_DSM_CNTL #define MMEA6_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 #define MMEA6_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 @@ -40976,6 +41088,14 @@ #define MMEA7_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa #define MMEA7_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc #define MMEA7_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define MMEA7_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 +#define MMEA7_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 +#define MMEA7_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 +#define MMEA7_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 +#define MMEA7_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18 +#define MMEA7_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a +#define MMEA7_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c +#define MMEA7_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e #define MMEA7_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L #define MMEA7_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL #define MMEA7_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L @@ -40984,6 +41104,14 @@ #define MMEA7_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L #define MMEA7_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L #define MMEA7_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +#define MMEA7_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L +#define MMEA7_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L +#define MMEA7_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L +#define MMEA7_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L +#define MMEA7_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L +#define MMEA7_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L +#define MMEA7_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L +#define MMEA7_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L //MMEA7_DSM_CNTL #define MMEA7_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 #define MMEA7_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_1_offset.h index 043aa695d63fce3223d7031cb5823ded4652aff9..0d6b594be775de1493186dc5bf94f43800c4e53e 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_1_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_1_offset.h @@ -27,5 +27,7 @@ #define mmUMCCH0_0_EccErrCnt_BASE_IDX 0 #define mmMCA_UMC_UMC0_MCUMC_STATUST0 0x03c2 #define mmMCA_UMC_UMC0_MCUMC_STATUST0_BASE_IDX 0 +#define mmMCA_UMC_UMC0_MCUMC_ADDRT0 0x03c4 +#define mmMCA_UMC_UMC0_MCUMC_ADDRT0_BASE_IDX 0 #endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_offset.h new file mode 100644 index 0000000000000000000000000000000000000000..ce005c674a18c1397f61c8d36c22e1a4a79421a8 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_offset.h @@ -0,0 +1,33 @@ +/* + * Copyright (C) 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _umc_6_1_2_OFFSET_HEADER +#define _umc_6_1_2_OFFSET_HEADER + +#define mmUMCCH0_0_EccErrCntSel_ARCT 0x0360 +#define mmUMCCH0_0_EccErrCntSel_ARCT_BASE_IDX 1 +#define mmUMCCH0_0_EccErrCnt_ARCT 0x0361 +#define mmUMCCH0_0_EccErrCnt_ARCT_BASE_IDX 1 +#define mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT 0x03c2 +#define mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT_BASE_IDX 1 +#define mmMCA_UMC_UMC0_MCUMC_ADDRT0_ARCT 0x03c4 +#define mmMCA_UMC_UMC0_MCUMC_ADDRT0_ARCT_BASE_IDX 1 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_sh_mask.h new file mode 100644 index 0000000000000000000000000000000000000000..a5a8c993ec3a8bca03074936670bd444cbfcd2a1 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_sh_mask.h @@ -0,0 +1,91 @@ +/* + * Copyright (C) 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _umc_6_1_2_SH_MASK_HEADER +#define _umc_6_1_2_SH_MASK_HEADER + +//UMCCH0_0_EccErrCntSel_ARCT +#define UMCCH0_0_EccErrCntSel_ARCT__EccErrCntCsSel__SHIFT 0x0 +#define UMCCH0_0_EccErrCntSel_ARCT__EccErrInt__SHIFT 0xc +#define UMCCH0_0_EccErrCntSel_ARCT__EccErrCntEn__SHIFT 0xf +#define UMCCH0_0_EccErrCntSel_ARCT__EccErrCntCsSel_MASK 0x0000000FL +#define UMCCH0_0_EccErrCntSel_ARCT__EccErrInt_MASK 0x00003000L +#define UMCCH0_0_EccErrCntSel_ARCT__EccErrCntEn_MASK 0x00008000L +//UMCCH0_0_EccErrCnt_ARCT +#define UMCCH0_0_EccErrCnt_ARCT__EccErrCnt__SHIFT 0x0 +#define UMCCH0_0_EccErrCnt_ARCT__EccErrCnt_MASK 0x0000FFFFL +//MCA_UMC_UMC0_MCUMC_STATUST0_ARCT +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrorCode__SHIFT 0x0 +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrorCodeExt__SHIFT 0x10 +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV0__SHIFT 0x16 +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrCoreId__SHIFT 0x20 +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV1__SHIFT 0x26 +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Scrub__SHIFT 0x28 +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV2__SHIFT 0x29 +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Poison__SHIFT 0x2b +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Deferred__SHIFT 0x2c +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__UECC__SHIFT 0x2d +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__CECC__SHIFT 0x2e +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV3__SHIFT 0x2f +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Transparent__SHIFT 0x34 +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__SyndV__SHIFT 0x35 +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV4__SHIFT 0x36 +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__TCC__SHIFT 0x37 +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrCoreIdVal__SHIFT 0x38 +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__PCC__SHIFT 0x39 +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__AddrV__SHIFT 0x3a +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__MiscV__SHIFT 0x3b +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__En__SHIFT 0x3c +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__UC__SHIFT 0x3d +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Overflow__SHIFT 0x3e +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Val__SHIFT 0x3f +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrorCode_MASK 0x000000000000FFFFL +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrorCodeExt_MASK 0x00000000003F0000L +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV0_MASK 0x00000000FFC00000L +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrCoreId_MASK 0x0000003F00000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV1_MASK 0x000000C000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Scrub_MASK 0x0000010000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV2_MASK 0x0000060000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Poison_MASK 0x0000080000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Deferred_MASK 0x0000100000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__UECC_MASK 0x0000200000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__CECC_MASK 0x0000400000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV3_MASK 0x000F800000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Transparent_MASK 0x0010000000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__SyndV_MASK 0x0020000000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__RESERV4_MASK 0x0040000000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__TCC_MASK 0x0080000000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__ErrCoreIdVal_MASK 0x0100000000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__PCC_MASK 0x0200000000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__AddrV_MASK 0x0400000000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__MiscV_MASK 0x0800000000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__En_MASK 0x1000000000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__UC_MASK 0x2000000000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Overflow_MASK 0x4000000000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0_ARCT__Val_MASK 0x8000000000000000L +//MCA_UMC_UMC0_MCUMC_ADDRT0_ARCT +#define MCA_UMC_UMC0_MCUMC_ADDRT0_ARCT__ErrorAddr__SHIFT 0x0 +#define MCA_UMC_UMC0_MCUMC_ADDRT0_ARCT__LSB__SHIFT 0x38 +#define MCA_UMC_UMC0_MCUMC_ADDRT0_ARCT__Reserved__SHIFT 0x3e +#define MCA_UMC_UMC0_MCUMC_ADDRT0_ARCT__ErrorAddr_MASK 0x00FFFFFFFFFFFFFFL +#define MCA_UMC_UMC0_MCUMC_ADDRT0_ARCT__LSB_MASK 0x3F00000000000000L +#define MCA_UMC_UMC0_MCUMC_ADDRT0_ARCT__Reserved_MASK 0xC000000000000000L + +#endif diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index dd7cbc00a0aa2ac3a61d1f16fe5cb04825e16656..70146518174cd865acfd23fbbe6ec813ddb579e9 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -672,20 +672,6 @@ struct vram_usagebyfirmware_v2_1 uint16_t used_by_driver_in_kb; }; -/* This is part of vram_usagebyfirmware_v2_1 */ -struct vram_reserve_block -{ - uint32_t start_address_in_kb; - uint16_t used_by_firmware_in_kb; - uint16_t used_by_driver_in_kb; -}; - -/* Definitions for constance */ -enum atomfirmware_internal_constants -{ - ONE_KiB = 0x400, - ONE_MiB = 0x100000, -}; /* *************************************************************************** diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 2cd217e60125d2993b149f1d45454869ec48be88..a607b1034962902534a7ae6affbdab372eab3547 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -256,6 +256,10 @@ struct kfd2kgd_calls { uint32_t wptr_shift, uint32_t wptr_mask, struct mm_struct *mm); + int (*hiq_mqd_load)(struct kgd_dev *kgd, void *mqd, + uint32_t pipe_id, uint32_t queue_id, + uint32_t doorbell_off); + int (*hqd_sdma_load)(struct kgd_dev *kgd, void *mqd, uint32_t __user *wptr, struct mm_struct *mm); @@ -307,8 +311,6 @@ struct kfd2kgd_calls { void (*set_vm_context_page_table_base)(struct kgd_dev *kgd, uint32_t vmid, uint64_t page_table_base); - int (*invalidate_tlbs)(struct kgd_dev *kgd, uint16_t pasid); - int (*invalidate_tlbs_vmid)(struct kgd_dev *kgd, uint16_t vmid); uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd); uint64_t (*get_hive_id)(struct kgd_dev *kgd); diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index 5087d6bdba6095b0bbb7e23d7c979eabb9d9167b..c195575366a3b2e4ea48c0748cd520dc2d2fbd26 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -275,9 +275,6 @@ static int pp_dpm_load_fw(void *handle) { struct pp_hwmgr *hwmgr = handle; - if (!hwmgr->not_vf) - return 0; - if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->start_smu) return -EINVAL; @@ -930,9 +927,12 @@ static int pp_dpm_set_mp1_state(void *handle, enum pp_mp1_state mp1_state) { struct pp_hwmgr *hwmgr = handle; - if (!hwmgr || !hwmgr->pm_en) + if (!hwmgr) return -EINVAL; + if (!hwmgr->pm_en) + return 0; + if (hwmgr->hwmgr_func->set_mp1_state) return hwmgr->hwmgr_func->set_mp1_state(hwmgr, mp1_state); diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 6dddd7818558282796f7489d15cac0c3cd8ae93a..99ad4ddbe12f01c89e36460eac27cc194f66bd72 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -21,6 +21,7 @@ */ #include +#include #include "pp_debug.h" #include "amdgpu.h" @@ -356,6 +357,35 @@ int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type, return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value); } +int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t *min_value, uint32_t *max_value) +{ + int ret = 0; + uint32_t level_count = 0; + + if (!min_value && !max_value) + return -EINVAL; + + if (min_value) { + /* by default, level 0 clock value as min value */ + ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, min_value); + if (ret) + return ret; + } + + if (max_value) { + ret = smu_get_dpm_level_count(smu, clk_type, &level_count); + if (ret) + return ret; + + ret = smu_get_dpm_freq_by_index(smu, clk_type, level_count - 1, max_value); + if (ret) + return ret; + } + + return ret; +} + bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type) { enum smu_feature_mask feature_id = 0; @@ -404,10 +434,10 @@ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type, switch (block_type) { case AMD_IP_BLOCK_TYPE_UVD: - ret = smu_dpm_set_uvd_enable(smu, gate); + ret = smu_dpm_set_uvd_enable(smu, !gate); break; case AMD_IP_BLOCK_TYPE_VCE: - ret = smu_dpm_set_vce_enable(smu, gate); + ret = smu_dpm_set_vce_enable(smu, !gate); break; case AMD_IP_BLOCK_TYPE_GFX: ret = smu_gfx_off_control(smu, gate); @@ -416,7 +446,7 @@ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type, ret = smu_powergate_sdma(smu, gate); break; case AMD_IP_BLOCK_TYPE_JPEG: - ret = smu_dpm_set_jpeg_enable(smu, gate); + ret = smu_dpm_set_jpeg_enable(smu, !gate); break; default: break; @@ -490,26 +520,25 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int { struct smu_table_context *smu_table = &smu->smu_table; struct amdgpu_device *adev = smu->adev; - struct smu_table *table = NULL; - int ret = 0; + struct smu_table *table = &smu_table->driver_table; int table_id = smu_table_get_index(smu, table_index); + uint32_t table_size; + int ret = 0; if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0) return -EINVAL; - table = &smu_table->tables[table_index]; + table_size = smu_table->tables[table_index].size; - if (drv2smu) - memcpy(table->cpu_addr, table_data, table->size); + if (drv2smu) { + memcpy(table->cpu_addr, table_data, table_size); + /* + * Flush hdp cache: to guard the content seen by + * GPU is consitent with CPU. + */ + amdgpu_asic_flush_hdp(adev, NULL); + } - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh, - upper_32_bits(table->mc_address)); - if (ret) - return ret; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow, - lower_32_bits(table->mc_address)); - if (ret) - return ret; ret = smu_send_smc_msg_with_param(smu, drv2smu ? SMU_MSG_TransferTableDram2Smu : SMU_MSG_TransferTableSmu2Dram, @@ -517,11 +546,10 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int if (ret) return ret; - /* flush hdp cache */ - adev->nbio.funcs->hdp_flush(adev, NULL); - - if (!drv2smu) - memcpy(table_data, table->cpu_addr, table->size); + if (!drv2smu) { + amdgpu_asic_flush_hdp(adev, NULL); + memcpy(table_data, table->cpu_addr, table_size); + } return ret; } @@ -531,7 +559,7 @@ bool is_support_sw_smu(struct amdgpu_device *adev) if (adev->asic_type == CHIP_VEGA20) return (amdgpu_dpm == 2) ? true : false; else if (adev->asic_type >= CHIP_ARCTURUS) { - if (amdgpu_sriov_vf(adev)) + if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) return false; else return true; @@ -643,12 +671,11 @@ int smu_feature_init_dpm(struct smu_context *smu) int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask) { - struct amdgpu_device *adev = smu->adev; struct smu_feature *feature = &smu->smu_feature; int feature_id; int ret = 0; - if (adev->flags & AMD_IS_APU) + if (smu->is_apu) return 1; feature_id = smu_feature_get_index(smu, mask); @@ -872,6 +899,7 @@ static int smu_sw_init(void *handle) smu->smu_baco.platform_support = false; mutex_init(&smu->sensor_lock); + mutex_init(&smu->metrics_lock); smu->watermarks_bitmap = 0; smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; @@ -947,32 +975,56 @@ static int smu_init_fb_allocations(struct smu_context *smu) struct amdgpu_device *adev = smu->adev; struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *tables = smu_table->tables; + struct smu_table *driver_table = &(smu_table->driver_table); + uint32_t max_table_size = 0; int ret, i; - for (i = 0; i < SMU_TABLE_COUNT; i++) { - if (tables[i].size == 0) - continue; + /* VRAM allocation for tool table */ + if (tables[SMU_TABLE_PMSTATUSLOG].size) { ret = amdgpu_bo_create_kernel(adev, - tables[i].size, - tables[i].align, - tables[i].domain, - &tables[i].bo, - &tables[i].mc_address, - &tables[i].cpu_addr); - if (ret) - goto failed; + tables[SMU_TABLE_PMSTATUSLOG].size, + tables[SMU_TABLE_PMSTATUSLOG].align, + tables[SMU_TABLE_PMSTATUSLOG].domain, + &tables[SMU_TABLE_PMSTATUSLOG].bo, + &tables[SMU_TABLE_PMSTATUSLOG].mc_address, + &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); + if (ret) { + pr_err("VRAM allocation for tool table failed!\n"); + return ret; + } } - return 0; -failed: - while (--i >= 0) { + /* VRAM allocation for driver table */ + for (i = 0; i < SMU_TABLE_COUNT; i++) { if (tables[i].size == 0) continue; - amdgpu_bo_free_kernel(&tables[i].bo, - &tables[i].mc_address, - &tables[i].cpu_addr); + if (i == SMU_TABLE_PMSTATUSLOG) + continue; + + if (max_table_size < tables[i].size) + max_table_size = tables[i].size; } + + driver_table->size = max_table_size; + driver_table->align = PAGE_SIZE; + driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM; + + ret = amdgpu_bo_create_kernel(adev, + driver_table->size, + driver_table->align, + driver_table->domain, + &driver_table->bo, + &driver_table->mc_address, + &driver_table->cpu_addr); + if (ret) { + pr_err("VRAM allocation for driver table failed!\n"); + if (tables[SMU_TABLE_PMSTATUSLOG].mc_address) + amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo, + &tables[SMU_TABLE_PMSTATUSLOG].mc_address, + &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); + } + return ret; } @@ -980,18 +1032,19 @@ static int smu_fini_fb_allocations(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *tables = smu_table->tables; - uint32_t i = 0; + struct smu_table *driver_table = &(smu_table->driver_table); if (!tables) return 0; - for (i = 0; i < SMU_TABLE_COUNT; i++) { - if (tables[i].size == 0) - continue; - amdgpu_bo_free_kernel(&tables[i].bo, - &tables[i].mc_address, - &tables[i].cpu_addr); - } + if (tables[SMU_TABLE_PMSTATUSLOG].mc_address) + amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo, + &tables[SMU_TABLE_PMSTATUSLOG].mc_address, + &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); + + amdgpu_bo_free_kernel(&driver_table->bo, + &driver_table->mc_address, + &driver_table->cpu_addr); return 0; } @@ -1061,28 +1114,48 @@ static int smu_smc_table_hw_init(struct smu_context *smu, } /* smu_dump_pptable(smu); */ + if (!amdgpu_sriov_vf(adev)) { + ret = smu_set_driver_table_location(smu); + if (ret) + return ret; - /* - * Copy pptable bo in the vram to smc with SMU MSGs such as - * SetDriverDramAddr and TransferTableDram2Smu. - */ - ret = smu_write_pptable(smu); - if (ret) - return ret; - - /* issue Run*Btc msg */ - ret = smu_run_btc(smu); - if (ret) - return ret; + /* + * Copy pptable bo in the vram to smc with SMU MSGs such as + * SetDriverDramAddr and TransferTableDram2Smu. + */ + ret = smu_write_pptable(smu); + if (ret) + return ret; - ret = smu_feature_set_allowed_mask(smu); - if (ret) - return ret; + /* issue Run*Btc msg */ + ret = smu_run_btc(smu); + if (ret) + return ret; + ret = smu_feature_set_allowed_mask(smu); + if (ret) + return ret; - ret = smu_system_features_control(smu, true); - if (ret) - return ret; + ret = smu_system_features_control(smu, true); + if (ret) + return ret; + if (adev->asic_type == CHIP_NAVI10) { + if ((adev->pdev->device == 0x731f && (adev->pdev->revision == 0xc2 || + adev->pdev->revision == 0xc3 || + adev->pdev->revision == 0xca || + adev->pdev->revision == 0xcb)) || + (adev->pdev->device == 0x66af && (adev->pdev->revision == 0xf3 || + adev->pdev->revision == 0xf4 || + adev->pdev->revision == 0xf5 || + adev->pdev->revision == 0xf6))) { + ret = smu_disable_umc_cdr_12gbps_workaround(smu); + if (ret) { + pr_err("Workaround failed to disable UMC CDR feature on 12Gbps SKU!\n"); + return ret; + } + } + } + } if (adev->asic_type != CHIP_ARCTURUS) { ret = smu_notify_display_change(smu); if (ret) @@ -1135,8 +1208,9 @@ static int smu_smc_table_hw_init(struct smu_context *smu, /* * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools. */ - ret = smu_set_tool_table_location(smu); - + if (!amdgpu_sriov_vf(adev)) { + ret = smu_set_tool_table_location(smu); + } if (!smu_is_dpm_running(smu)) pr_info("dpm has been disabled\n"); @@ -1241,13 +1315,16 @@ static int smu_hw_init(void *handle) return ret; } - if (adev->flags & AMD_IS_APU) { + if (smu->is_apu) { smu_powergate_sdma(&adev->smu, false); smu_powergate_vcn(&adev->smu, false); smu_powergate_jpeg(&adev->smu, false); smu_set_gfx_cgpg(&adev->smu, true); } + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return 0; + if (!smu->pm_enabled) return 0; @@ -1290,7 +1367,7 @@ failed: static int smu_stop_dpms(struct smu_context *smu) { - return smu_send_smc_msg(smu, SMU_MSG_DisableAllSmuFeatures); + return smu_system_features_control(smu, false); } static int smu_hw_fini(void *handle) @@ -1300,37 +1377,45 @@ static int smu_hw_fini(void *handle) struct smu_table_context *table_context = &smu->smu_table; int ret = 0; - if (adev->flags & AMD_IS_APU) { + if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) + return 0; + + if (smu->is_apu) { smu_powergate_sdma(&adev->smu, true); smu_powergate_vcn(&adev->smu, true); smu_powergate_jpeg(&adev->smu, true); } - ret = smu_stop_thermal_control(smu); - if (ret) { - pr_warn("Fail to stop thermal control!\n"); - return ret; - } + if (!smu->pm_enabled) + return 0; - /* - * For custom pptable uploading, skip the DPM features - * disable process on Navi1x ASICs. - * - As the gfx related features are under control of - * RLC on those ASICs. RLC reinitialization will be - * needed to reenable them. That will cost much more - * efforts. - * - * - SMU firmware can handle the DPM reenablement - * properly. - */ - if (!smu->uploading_custom_pp_table || - !((adev->asic_type >= CHIP_NAVI10) && - (adev->asic_type <= CHIP_NAVI12))) { - ret = smu_stop_dpms(smu); + if (!amdgpu_sriov_vf(adev)){ + ret = smu_stop_thermal_control(smu); if (ret) { - pr_warn("Fail to stop Dpms!\n"); + pr_warn("Fail to stop thermal control!\n"); return ret; } + + /* + * For custom pptable uploading, skip the DPM features + * disable process on Navi1x ASICs. + * - As the gfx related features are under control of + * RLC on those ASICs. RLC reinitialization will be + * needed to reenable them. That will cost much more + * efforts. + * + * - SMU firmware can handle the DPM reenablement + * properly. + */ + if (!smu->uploading_custom_pp_table || + !((adev->asic_type >= CHIP_NAVI10) && + (adev->asic_type <= CHIP_NAVI12))) { + ret = smu_stop_dpms(smu); + if (ret) { + pr_warn("Fail to stop Dpms!\n"); + return ret; + } + } } kfree(table_context->driver_pptable); @@ -1376,7 +1461,10 @@ static int smu_suspend(void *handle) struct smu_context *smu = &adev->smu; bool baco_feature_is_enabled = false; - if(!(adev->flags & AMD_IS_APU)) + if (!smu->pm_enabled) + return 0; + + if(!smu->is_apu) baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT); ret = smu_system_features_control(smu, false); @@ -1408,6 +1496,12 @@ static int smu_resume(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct smu_context *smu = &adev->smu; + if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) + return 0; + + if (!smu->pm_enabled) + return 0; + pr_info("SMU is resuming...\n"); ret = smu_start_smc_engine(smu); @@ -1606,43 +1700,6 @@ static int smu_enable_umd_pstate(void *handle, return 0; } -static int smu_default_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) -{ - int ret = 0; - uint32_t sclk_mask, mclk_mask, soc_mask; - - switch (level) { - case AMD_DPM_FORCED_LEVEL_HIGH: - ret = smu_force_dpm_limit_value(smu, true); - break; - case AMD_DPM_FORCED_LEVEL_LOW: - ret = smu_force_dpm_limit_value(smu, false); - break; - case AMD_DPM_FORCED_LEVEL_AUTO: - case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: - ret = smu_unforce_dpm_levels(smu); - break; - case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: - case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: - case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: - ret = smu_get_profiling_clk_mask(smu, level, - &sclk_mask, - &mclk_mask, - &soc_mask); - if (ret) - return ret; - smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false); - smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false); - smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false); - break; - case AMD_DPM_FORCED_LEVEL_MANUAL: - case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: - default: - break; - } - return ret; -} - int smu_adjust_power_state_dynamic(struct smu_context *smu, enum amd_dpm_forced_level level, bool skip_display_settings) @@ -1670,7 +1727,7 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu, } if (!skip_display_settings) { - ret = smu_notify_smc_dispaly_config(smu); + ret = smu_notify_smc_display_config(smu); if (ret) { pr_err("Failed to notify smc display config!"); return ret; @@ -1680,11 +1737,8 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu, if (smu_dpm_ctx->dpm_level != level) { ret = smu_asic_set_performance_level(smu, level); if (ret) { - ret = smu_default_set_performance_level(smu, level); - if (ret) { - pr_err("Failed to set performance level!"); - return ret; - } + pr_err("Failed to set performance level!"); + return ret; } /* update the saved copy */ @@ -1926,26 +1980,25 @@ int smu_set_df_cstate(struct smu_context *smu, int smu_write_watermarks_table(struct smu_context *smu) { - int ret = 0; - struct smu_table_context *smu_table = &smu->smu_table; - struct smu_table *table = NULL; - - table = &smu_table->tables[SMU_TABLE_WATERMARKS]; + void *watermarks_table = smu->smu_table.watermarks_table; - if (!table->cpu_addr) + if (!watermarks_table) return -EINVAL; - ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr, + return smu_update_table(smu, + SMU_TABLE_WATERMARKS, + 0, + watermarks_table, true); - - return ret; } int smu_set_watermarks_for_clock_ranges(struct smu_context *smu, struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges) { - struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS]; - void *table = watermarks->cpu_addr; + void *table = smu->smu_table.watermarks_table; + + if (!table) + return -EINVAL; mutex_lock(&smu->mutex); @@ -2284,13 +2337,9 @@ int smu_set_active_display_count(struct smu_context *smu, uint32_t count) { int ret = 0; - mutex_lock(&smu->mutex); - if (smu->ppt_funcs->set_active_display_count) ret = smu->ppt_funcs->set_active_display_count(smu, count); - mutex_unlock(&smu->mutex); - return ret; } @@ -2437,7 +2486,7 @@ bool smu_baco_is_support(struct smu_context *smu) mutex_lock(&smu->mutex); - if (smu->ppt_funcs->baco_is_support) + if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support) ret = smu->ppt_funcs->baco_is_support(smu); mutex_unlock(&smu->mutex); diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index 17eeb546c550bd38a94999dc78661232792d2499..14ba6aa876e2a81aba3bdeb349adb39f6f21f155 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -179,6 +179,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_table_map[SMU_TABLE_COUNT] = { TAB_MAP(DRIVER_SMU_CONFIG), TAB_MAP(OVERDRIVE), TAB_MAP(I2C_COMMANDS), + TAB_MAP(ACTIVITY_MONITOR_COEFF), }; static struct smu_11_0_cmn2aisc_mapping arcturus_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { @@ -302,6 +303,10 @@ static int arcturus_tables_init(struct smu_context *smu, struct smu_table *table SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, + sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM); + smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); if (!smu_table->metrics_table) return -ENOMEM; @@ -867,18 +872,21 @@ static int arcturus_get_metrics_table(struct smu_context *smu, struct smu_table_context *smu_table= &smu->smu_table; int ret = 0; + mutex_lock(&smu->metrics_lock); if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) { ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)smu_table->metrics_table, false); if (ret) { pr_info("Failed to export SMU metrics table!\n"); + mutex_unlock(&smu->metrics_lock); return ret; } smu_table->metrics_time = jiffies; } memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t)); + mutex_unlock(&smu->metrics_lock); return ret; } @@ -1310,6 +1318,8 @@ static int arcturus_get_power_limit(struct smu_context *smu, static int arcturus_get_power_profile_mode(struct smu_context *smu, char *buf) { + struct amdgpu_device *adev = smu->adev; + DpmActivityMonitorCoeffInt_t activity_monitor; static const char *profile_name[] = { "BOOTUP_DEFAULT", "3D_FULL_SCREEN", @@ -1319,14 +1329,35 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu, "COMPUTE", "CUSTOM"}; static const char *title[] = { - "PROFILE_INDEX(NAME)"}; + "PROFILE_INDEX(NAME)", + "CLOCK_TYPE(NAME)", + "FPS", + "UseRlcBusy", + "MinActiveFreqType", + "MinActiveFreq", + "BoosterFreqType", + "BoosterFreq", + "PD_Data_limit_c", + "PD_Data_error_coeff", + "PD_Data_error_rate_coeff"}; uint32_t i, size = 0; int16_t workload_type = 0; + int result = 0; + uint32_t smu_version; - if (!smu->pm_enabled || !buf) + if (!buf) return -EINVAL; - size += sprintf(buf + size, "%16s\n", + result = smu_get_smc_version(smu, NULL, &smu_version); + if (result) + return result; + + if (smu_version >= 0x360d00 && !amdgpu_sriov_vf(adev)) + size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n", + title[0], title[1], title[2], title[3], title[4], title[5], + title[6], title[7], title[8], title[9], title[10]); + else + size += sprintf(buf + size, "%16s\n", title[0]); for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { @@ -1338,8 +1369,50 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu, if (workload_type < 0) continue; + if (smu_version >= 0x360d00 && !amdgpu_sriov_vf(adev)) { + result = smu_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + workload_type, + (void *)(&activity_monitor), + false); + if (result) { + pr_err("[%s] Failed to get activity monitor!", __func__); + return result; + } + } + size += sprintf(buf + size, "%2d %14s%s\n", i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); + + if (smu_version >= 0x360d00 && !amdgpu_sriov_vf(adev)) { + size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", + " ", + 0, + "GFXCLK", + activity_monitor.Gfx_FPS, + activity_monitor.Gfx_UseRlcBusy, + activity_monitor.Gfx_MinActiveFreqType, + activity_monitor.Gfx_MinActiveFreq, + activity_monitor.Gfx_BoosterFreqType, + activity_monitor.Gfx_BoosterFreq, + activity_monitor.Gfx_PD_Data_limit_c, + activity_monitor.Gfx_PD_Data_error_coeff, + activity_monitor.Gfx_PD_Data_error_rate_coeff); + + size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", + " ", + 1, + "UCLK", + activity_monitor.Mem_FPS, + activity_monitor.Mem_UseRlcBusy, + activity_monitor.Mem_MinActiveFreqType, + activity_monitor.Mem_MinActiveFreq, + activity_monitor.Mem_BoosterFreqType, + activity_monitor.Mem_BoosterFreq, + activity_monitor.Mem_PD_Data_limit_c, + activity_monitor.Mem_PD_Data_error_coeff, + activity_monitor.Mem_PD_Data_error_rate_coeff); + } } return size; @@ -1349,18 +1422,69 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) { + DpmActivityMonitorCoeffInt_t activity_monitor; int workload_type = 0; uint32_t profile_mode = input[size]; int ret = 0; - - if (!smu->pm_enabled) - return -EINVAL; + uint32_t smu_version; if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { pr_err("Invalid power profile mode %d\n", profile_mode); return -EINVAL; } + ret = smu_get_smc_version(smu, NULL, &smu_version); + if (ret) + return ret; + + if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) && + (smu_version >=0x360d00)) { + ret = smu_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor), + false); + if (ret) { + pr_err("[%s] Failed to get activity monitor!", __func__); + return ret; + } + + switch (input[0]) { + case 0: /* Gfxclk */ + activity_monitor.Gfx_FPS = input[1]; + activity_monitor.Gfx_UseRlcBusy = input[2]; + activity_monitor.Gfx_MinActiveFreqType = input[3]; + activity_monitor.Gfx_MinActiveFreq = input[4]; + activity_monitor.Gfx_BoosterFreqType = input[5]; + activity_monitor.Gfx_BoosterFreq = input[6]; + activity_monitor.Gfx_PD_Data_limit_c = input[7]; + activity_monitor.Gfx_PD_Data_error_coeff = input[8]; + activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9]; + break; + case 1: /* Uclk */ + activity_monitor.Mem_FPS = input[1]; + activity_monitor.Mem_UseRlcBusy = input[2]; + activity_monitor.Mem_MinActiveFreqType = input[3]; + activity_monitor.Mem_MinActiveFreq = input[4]; + activity_monitor.Mem_BoosterFreqType = input[5]; + activity_monitor.Mem_BoosterFreq = input[6]; + activity_monitor.Mem_PD_Data_limit_c = input[7]; + activity_monitor.Mem_PD_Data_error_coeff = input[8]; + activity_monitor.Mem_PD_Data_error_rate_coeff = input[9]; + break; + } + + ret = smu_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor), + true); + if (ret) { + pr_err("[%s] Failed to set activity monitor!", __func__); + return ret; + } + } + /* * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT * Not all profile modes are supported on arcturus. @@ -1899,7 +2023,7 @@ static int arcturus_i2c_eeprom_read_data(struct i2c_adapter *control, SwI2cRequest_t req; struct amdgpu_device *adev = to_amdgpu_device(control); struct smu_table_context *smu_table = &adev->smu.smu_table; - struct smu_table *table = &smu_table->tables[SMU_TABLE_I2C_COMMANDS]; + struct smu_table *table = &smu_table->driver_table; memset(&req, 0, sizeof(req)); arcturus_fill_eeprom_i2c_req(&req, false, address, numbytes, data); @@ -2053,8 +2177,12 @@ static const struct i2c_algorithm arcturus_i2c_eeprom_i2c_algo = { static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control) { struct amdgpu_device *adev = to_amdgpu_device(control); + struct smu_context *smu = &adev->smu; int res; + if (!smu->pm_enabled) + return -EOPNOTSUPP; + control->owner = THIS_MODULE; control->class = I2C_CLASS_SPD; control->dev.parent = &adev->pdev->dev; @@ -2070,6 +2198,12 @@ static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control) static void arcturus_i2c_eeprom_control_fini(struct i2c_adapter *control) { + struct amdgpu_device *adev = to_amdgpu_device(control); + struct smu_context *smu = &adev->smu; + + if (!smu->pm_enabled) + return; + i2c_del_adapter(control); } @@ -2114,6 +2248,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .get_profiling_clk_mask = arcturus_get_profiling_clk_mask, .get_power_profile_mode = arcturus_get_power_profile_mode, .set_power_profile_mode = arcturus_set_power_profile_mode, + .set_performance_level = smu_v11_0_set_performance_level, /* debug (internal used) */ .dump_pptable = arcturus_dump_pptable, .get_power_limit = arcturus_get_power_limit, @@ -2137,6 +2272,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .check_fw_version = smu_v11_0_check_fw_version, .write_pptable = smu_v11_0_write_pptable, .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep, + .set_driver_table_location = smu_v11_0_set_driver_table_location, .set_tool_table_location = smu_v11_0_set_tool_table_location, .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, .system_features_control = smu_v11_0_system_features_control, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c index 253860d30b20bd9a1973aa1a8f8628adc4edeceb..9454ab50f9a12d9bd937ad3907af081b6c481ac7 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c @@ -99,6 +99,9 @@ int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr) PHM_FUNC_CHECK(hwmgr); + if (!hwmgr->not_vf) + return 0; + if (!smum_is_dpm_running(hwmgr)) { pr_info("dpm has been disabled\n"); return 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index e2b82c9029489af5f9950258c182965d30422e72..f48fdc7f0382ea99b897d6a885e0a41b62e17749 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -282,10 +282,7 @@ err: int hwmgr_hw_fini(struct pp_hwmgr *hwmgr) { - if (!hwmgr->not_vf) - return 0; - - if (!hwmgr || !hwmgr->pm_en) + if (!hwmgr || !hwmgr->pm_en || !hwmgr->not_vf) return 0; phm_stop_thermal_controller(hwmgr); @@ -305,10 +302,7 @@ int hwmgr_suspend(struct pp_hwmgr *hwmgr) { int ret = 0; - if (!hwmgr->not_vf) - return 0; - - if (!hwmgr || !hwmgr->pm_en) + if (!hwmgr || !hwmgr->pm_en || !hwmgr->not_vf) return 0; phm_disable_smc_firmware_ctf(hwmgr); @@ -327,13 +321,10 @@ int hwmgr_resume(struct pp_hwmgr *hwmgr) { int ret = 0; - if (!hwmgr->not_vf) - return 0; - if (!hwmgr) return -EINVAL; - if (!hwmgr->pm_en) + if (!hwmgr->not_vf || !hwmgr->pm_en) return 0; ret = phm_setup_asic(hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index 4e8ab139bb3bd2cdcc234c10214d74ffaeba1914..689072a312a7fb52a015eedf4bd9d53d9720d029 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -1026,12 +1026,15 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr, clocks->num_levels = 0; for (i = 0; i < pclk_vol_table->count; i++) { - clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10; - clocks->data[i].latency_in_us = latency_required ? - smu10_get_mem_latency(hwmgr, - pclk_vol_table->entries[i].clk) : - 0; - clocks->num_levels++; + if (pclk_vol_table->entries[i].clk) { + clocks->data[clocks->num_levels].clocks_in_khz = + pclk_vol_table->entries[i].clk * 10; + clocks->data[clocks->num_levels].latency_in_us = latency_required ? + smu10_get_mem_latency(hwmgr, + pclk_vol_table->entries[i].clk) : + 0; + clocks->num_levels++; + } } return 0; @@ -1077,9 +1080,11 @@ static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, clocks->num_levels = 0; for (i = 0; i < pclk_vol_table->count; i++) { - clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10; - clocks->data[i].voltage_in_mv = pclk_vol_table->entries[i].vol; - clocks->num_levels++; + if (pclk_vol_table->entries[i].clk) { + clocks->data[clocks->num_levels].clocks_in_khz = pclk_vol_table->entries[i].clk * 10; + clocks->data[clocks->num_levels].voltage_in_mv = pclk_vol_table->entries[i].vol; + clocks->num_levels++; + } } return 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index d70abada66bf0fadacd0e2d46b85742ffa750896..bf04cfefb2839afeec239c25de6bf85981f42a38 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -720,7 +720,7 @@ static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr) data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage; /* param1 is for corresponding std voltage */ - data->dpm_table.vddc_table.dpm_levels[i].enabled = 1; + data->dpm_table.vddc_table.dpm_levels[i].enabled = true; } data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count; @@ -730,7 +730,7 @@ static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr) /* Initialize Vddci DPM table based on allow Mclk values */ for (i = 0; i < allowed_vdd_mclk_table->count; i++) { data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; - data->dpm_table.vddci_table.dpm_levels[i].enabled = 1; + data->dpm_table.vddci_table.dpm_levels[i].enabled = true; } data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count; } @@ -744,7 +744,7 @@ static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr) */ for (i = 0; i < allowed_vdd_mclk_table->count; i++) { data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; - data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1; + data->dpm_table.mvdd_table.dpm_levels[i].enabled = true; } data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 148446570e21362d5f666551f582618e922f54b7..92a65e3daff4bd7405ed6c6e06775c45d99a1968 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -3538,7 +3538,8 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr) if (!data->registry_data.mclk_dpm_key_disabled) { if (data->smc_state_table.mem_boot_level != data->dpm_table.mem_table.dpm_state.soft_min_level) { - if (data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1) { + if ((data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1) + && hwmgr->not_vf) { socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinSocclkByIndex, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index 5bcf0d684151457d72aa5cf0f6073bd944837ec0..3b3ec56660510851791c975a5f5fa5e07ddae30a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -872,7 +872,7 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr) "[OverridePcieParameters] Attempt to override pcie params failed!", return ret); - data->pcie_parameters_override = 1; + data->pcie_parameters_override = true; data->pcie_gen_level1 = pcie_gen; data->pcie_width_level1 = pcie_width; diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index ca3fdc6777cf00cbf8e5611b6b44bdbdb923adac..97b6714e83e6729902bab6e23de04b2400f0928b 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -254,15 +254,26 @@ struct smu_table_context unsigned long metrics_time; void *metrics_table; void *clocks_table; + void *watermarks_table; void *max_sustainable_clocks; struct smu_bios_boot_up_values boot_values; void *driver_pptable; struct smu_table *tables; + /* + * The driver table is just a staging buffer for + * uploading/downloading content from the SMU. + * + * And the table_id for SMU_MSG_TransferTableSmu2Dram/ + * SMU_MSG_TransferTableDram2Smu instructs SMU + * which content driver is interested. + */ + struct smu_table driver_table; struct smu_table memory_pool; uint8_t thermal_controller_type; void *overdrive_table; + void *boot_overdrive_table; }; struct smu_dpm_context { @@ -350,6 +361,7 @@ struct smu_context const struct pptable_funcs *ppt_funcs; struct mutex mutex; struct mutex sensor_lock; + struct mutex metrics_lock; uint64_t pool_size; struct smu_table_context smu_table; @@ -443,7 +455,7 @@ struct pptable_funcs { int (*pre_display_config_changed)(struct smu_context *smu); int (*display_config_changed)(struct smu_context *smu); int (*apply_clocks_adjust_rules)(struct smu_context *smu); - int (*notify_smc_dispaly_config)(struct smu_context *smu); + int (*notify_smc_display_config)(struct smu_context *smu); int (*force_dpm_limit_value)(struct smu_context *smu, bool highest); int (*unforce_dpm_levels)(struct smu_context *smu); int (*get_profiling_clk_mask)(struct smu_context *smu, @@ -496,6 +508,7 @@ struct pptable_funcs { int (*set_gfx_cgpg)(struct smu_context *smu, bool enable); int (*write_pptable)(struct smu_context *smu); int (*set_min_dcef_deep_sleep)(struct smu_context *smu); + int (*set_driver_table_location)(struct smu_context *smu); int (*set_tool_table_location)(struct smu_context *smu); int (*notify_memory_pool_location)(struct smu_context *smu); int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu); @@ -553,6 +566,7 @@ struct pptable_funcs { int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max); int (*override_pcie_parameters)(struct smu_context *smu); uint32_t (*get_pptable_power_limit)(struct smu_context *smu); + int (*disable_umc_cdr_12gbps_workaround)(struct smu_context *smu); }; int smu_load_microcode(struct smu_context *smu); @@ -696,6 +710,8 @@ int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max); int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max); +int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t *min_value, uint32_t *max_value); enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu); int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level); int smu_set_display_count(struct smu_context *smu, uint32_t count); diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h index a886f0644d24589acf1fccbb75fcf097da2bf48b..ce5b5011c122ac86f82c3d386bab98e5e9a11921 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h @@ -622,8 +622,14 @@ typedef struct { uint16_t PccThresholdHigh; uint32_t PaddingAPCC[6]; //FIXME pending SPEC + // OOB Settings + uint16_t BasePerformanceCardPower; + uint16_t MaxPerformanceCardPower; + uint16_t BasePerformanceFrequencyCap; //In Mhz + uint16_t MaxPerformanceFrequencyCap; //In Mhz + // SECTION: Reserved - uint32_t Reserved[11]; + uint32_t Reserved[9]; // SECTION: BOARD PARAMETERS @@ -823,7 +829,6 @@ typedef struct { uint32_t MmHubPadding[8]; // SMU internal use } AvfsFuseOverride_t; -/* NOT CURRENTLY USED typedef struct { uint8_t Gfx_ActiveHystLimit; uint8_t Gfx_IdleHystLimit; @@ -866,7 +871,6 @@ typedef struct { uint32_t MmHubPadding[8]; // SMU internal use } DpmActivityMonitorCoeffInt_t; -*/ // These defines are used with the following messages: // SMC_MSG_TransferTableDram2Smu @@ -878,11 +882,11 @@ typedef struct { #define TABLE_PMSTATUSLOG 4 #define TABLE_SMU_METRICS 5 #define TABLE_DRIVER_SMU_CONFIG 6 -//#define TABLE_ACTIVITY_MONITOR_COEFF 7 #define TABLE_OVERDRIVE 7 #define TABLE_WAFL_XGMI_TOPOLOGY 8 #define TABLE_I2C_COMMANDS 9 -#define TABLE_COUNT 10 +#define TABLE_ACTIVITY_MONITOR_COEFF 10 +#define TABLE_COUNT 11 // These defines are used with the SMC_MSG_SetUclkFastSwitch message. typedef enum { diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h index c27c82851468f4a7bbdfbc38d67764cff9c28418..2f85a34c0591ad87585f0bdd6105ba94eef9a740 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h @@ -27,7 +27,7 @@ // *** IMPORTANT *** // SMU TEAM: Always increment the interface version if // any structure is changed in this file -#define SMU12_DRIVER_IF_VERSION 10 +#define SMU12_DRIVER_IF_VERSION 11 typedef struct { int32_t value; @@ -192,6 +192,11 @@ typedef struct { uint16_t SocTemperature; //[centi-Celsius] uint16_t ThrottlerStatus; uint16_t spare; + + uint16_t StapmOriginalLimit; //[mW] + uint16_t StapmCurrentLimit; //[mW] + uint16_t ApuPower; //[mW] + uint16_t dGpuPower; //[mW] } SmuMetrics_t; diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h index d8c9b7f91fcc2825459eabc99ed7388ee9fa00b1..a5b4df1467130f47432ee6e3a15f503b3f9d2fc7 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h @@ -170,6 +170,8 @@ __SMU_DUMMY_MAP(SetSoftMinJpeg), \ __SMU_DUMMY_MAP(SetHardMinFclkByFreq), \ __SMU_DUMMY_MAP(DFCstateControl), \ + __SMU_DUMMY_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE), \ + __SMU_DUMMY_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE), \ #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(type) SMU_MSG_##type diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h index 786de774199043c0a1f34f003dc72adb5e2f9269..d5314d12628aef3a84a69d0cb5c9071344768432 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h @@ -27,7 +27,7 @@ #define SMU11_DRIVER_IF_VERSION_INV 0xFFFFFFFF #define SMU11_DRIVER_IF_VERSION_VG20 0x13 -#define SMU11_DRIVER_IF_VERSION_ARCT 0x10 +#define SMU11_DRIVER_IF_VERSION_ARCT 0x12 #define SMU11_DRIVER_IF_VERSION_NV10 0x33 #define SMU11_DRIVER_IF_VERSION_NV14 0x34 @@ -170,6 +170,8 @@ int smu_v11_0_write_pptable(struct smu_context *smu); int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu); +int smu_v11_0_set_driver_table_location(struct smu_context *smu); + int smu_v11_0_set_tool_table_location(struct smu_context *smu); int smu_v11_0_notify_memory_pool_location(struct smu_context *smu); @@ -262,4 +264,7 @@ int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize, uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu); +int smu_v11_0_set_performance_level(struct smu_context *smu, + enum amd_dpm_forced_level level); + #endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h index 373861ddccd0eaa3d74667c78030242b8eaa5c2f..406bfd187ce864f08de97c1ddb34fab2e0852f2d 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h @@ -120,7 +120,10 @@ #define PPSMC_MSG_GetVoltageByDpmOverdrive 0x45 #define PPSMC_MSG_BacoAudioD3PME 0x48 -#define PPSMC_Message_Count 0x49 +#define PPSMC_MSG_DALDisableDummyPstateChange 0x49 +#define PPSMC_MSG_DALEnableDummyPstateChange 0x4A + +#define PPSMC_Message_Count 0x4B typedef uint32_t PPSMC_Result; typedef uint32_t PPSMC_Msg; diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h index 3f1cd06e273c338ad4f501a2368bb12d895a9fd9..d79e54b5ebf6065a1d37ebd84dcd6f32c51c0765 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h @@ -90,4 +90,6 @@ int smu_v12_0_mode2_reset(struct smu_context *smu); int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max); +int smu_v12_0_set_driver_table_location(struct smu_context *smu); + #endif diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index 15403b7979d6d4d32d43f94a09e0cb4ae6e8b2b5..19a9846b730e1b5e05769b2d4b3a3283620c0189 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -119,6 +119,10 @@ static struct smu_11_0_cmn2aisc_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg), MSG_MAP(BacoAudioD3PME, PPSMC_MSG_BacoAudioD3PME), MSG_MAP(ArmD3, PPSMC_MSG_ArmD3), + MSG_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE,PPSMC_MSG_DALDisableDummyPstateChange), + MSG_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE, PPSMC_MSG_DALEnableDummyPstateChange), + MSG_MAP(GetVoltageByDpm, PPSMC_MSG_GetVoltageByDpm), + MSG_MAP(GetVoltageByDpmOverdrive, PPSMC_MSG_GetVoltageByDpmOverdrive), }; static struct smu_11_0_cmn2aisc_mapping navi10_clk_map[SMU_CLK_COUNT] = { @@ -555,6 +559,10 @@ static int navi10_tables_init(struct smu_context *smu, struct smu_table *tables) return -ENOMEM; smu_table->metrics_time = 0; + smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); + if (!smu_table->watermarks_table) + return -ENOMEM; + return 0; } @@ -564,17 +572,20 @@ static int navi10_get_metrics_table(struct smu_context *smu, struct smu_table_context *smu_table= &smu->smu_table; int ret = 0; + mutex_lock(&smu->metrics_lock); if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) { ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)smu_table->metrics_table, false); if (ret) { pr_info("Failed to export SMU metrics table!\n"); + mutex_unlock(&smu->metrics_lock); return ret; } smu_table->metrics_time = jiffies; } memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t)); + mutex_unlock(&smu->metrics_lock); return ret; } @@ -730,6 +741,15 @@ static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_tabl return od_table->cap[feature]; } +static void navi10_od_setting_get_range(struct smu_11_0_overdrive_table *od_table, + enum SMU_11_0_ODSETTING_ID setting, + uint32_t *min, uint32_t *max) +{ + if (min) + *min = od_table->min[setting]; + if (max) + *max = od_table->max[setting]; +} static int navi10_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) @@ -748,6 +768,7 @@ static int navi10_print_clk_levels(struct smu_context *smu, OverDriveTable_t *od_table = (OverDriveTable_t *)table_context->overdrive_table; struct smu_11_0_overdrive_table *od_settings = smu->od_settings; + uint32_t min_value, max_value; switch (clk_type) { case SMU_GFXCLK: @@ -836,7 +857,7 @@ static int navi10_print_clk_levels(struct smu_context *smu, if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX)) break; size += sprintf(buf + size, "OD_MCLK:\n"); - size += sprintf(buf + size, "0: %uMHz\n", od_table->UclkFmax); + size += sprintf(buf + size, "1: %uMHz\n", od_table->UclkFmax); break; case SMU_OD_VDDC_CURVE: if (!smu->od_enabled || !od_table || !od_settings) @@ -860,6 +881,55 @@ static int navi10_print_clk_levels(struct smu_context *smu, } size += sprintf(buf + size, "%d: %uMHz @ %umV\n", i, curve_settings[0], curve_settings[1] / NAVI10_VOLTAGE_SCALE); } + break; + case SMU_OD_RANGE: + if (!smu->od_enabled || !od_table || !od_settings) + break; + size = sprintf(buf, "%s:\n", "OD_RANGE"); + + if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS)) { + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN, + &min_value, NULL); + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMAX, + NULL, &max_value); + size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n", + min_value, max_value); + } + + if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX)) { + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX, + &min_value, &max_value); + size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n", + min_value, max_value); + } + + if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE)) { + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1, + &min_value, &max_value); + size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n", + min_value, max_value); + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1, + &min_value, &max_value); + size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n", + min_value, max_value); + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2, + &min_value, &max_value); + size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n", + min_value, max_value); + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2, + &min_value, &max_value); + size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n", + min_value, max_value); + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3, + &min_value, &max_value); + size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n", + min_value, max_value); + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3, + &min_value, &max_value); + size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n", + min_value, max_value); + } + break; default: break; @@ -942,6 +1012,8 @@ static int navi10_get_clock_by_type_with_latency(struct smu_context *smu, case SMU_GFXCLK: case SMU_DCEFCLK: case SMU_SOCCLK: + case SMU_MCLK: + case SMU_UCLK: ret = smu_get_dpm_level_count(smu, clk_type, &level_count); if (ret) return ret; @@ -1374,7 +1446,7 @@ static int navi10_get_profiling_clk_mask(struct smu_context *smu, return ret; } -static int navi10_notify_smc_dispaly_config(struct smu_context *smu) +static int navi10_notify_smc_display_config(struct smu_context *smu) { struct smu_clocks min_clocks = {0}; struct pp_display_clock_request clock_req; @@ -1579,12 +1651,44 @@ static int navi10_get_uclk_dpm_states(struct smu_context *smu, uint32_t *clocks_ return 0; } -static int navi10_set_peak_clock_by_device(struct smu_context *smu) +static int navi10_set_performance_level(struct smu_context *smu, + enum amd_dpm_forced_level level); + +static int navi10_set_standard_performance_level(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + int ret = 0; + uint32_t sclk_freq = 0, uclk_freq = 0; + + switch (adev->asic_type) { + case CHIP_NAVI10: + sclk_freq = NAVI10_UMD_PSTATE_PROFILING_GFXCLK; + uclk_freq = NAVI10_UMD_PSTATE_PROFILING_MEMCLK; + break; + case CHIP_NAVI14: + sclk_freq = NAVI14_UMD_PSTATE_PROFILING_GFXCLK; + uclk_freq = NAVI14_UMD_PSTATE_PROFILING_MEMCLK; + break; + default: + /* by default, this is same as auto performance level */ + return navi10_set_performance_level(smu, AMD_DPM_FORCED_LEVEL_AUTO); + } + + ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq); + if (ret) + return ret; + ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq); + if (ret) + return ret; + + return ret; +} + +static int navi10_set_peak_performance_level(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; int ret = 0; uint32_t sclk_freq = 0, uclk_freq = 0; - uint32_t uclk_level = 0; switch (adev->asic_type) { case CHIP_NAVI10: @@ -1625,14 +1729,16 @@ static int navi10_set_peak_clock_by_device(struct smu_context *smu) break; } break; + case CHIP_NAVI12: + sclk_freq = NAVI12_UMD_PSTATE_PEAK_GFXCLK; + break; default: - return -EINVAL; + ret = smu_get_dpm_level_range(smu, SMU_SCLK, NULL, &sclk_freq); + if (ret) + return ret; } - ret = smu_get_dpm_level_count(smu, SMU_UCLK, &uclk_level); - if (ret) - return ret; - ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, uclk_level - 1, &uclk_freq); + ret = smu_get_dpm_level_range(smu, SMU_UCLK, NULL, &uclk_freq); if (ret) return ret; @@ -1646,19 +1752,45 @@ static int navi10_set_peak_clock_by_device(struct smu_context *smu) return ret; } -static int navi10_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) +static int navi10_set_performance_level(struct smu_context *smu, + enum amd_dpm_forced_level level) { int ret = 0; + uint32_t sclk_mask, mclk_mask, soc_mask; switch (level) { + case AMD_DPM_FORCED_LEVEL_HIGH: + ret = smu_force_dpm_limit_value(smu, true); + break; + case AMD_DPM_FORCED_LEVEL_LOW: + ret = smu_force_dpm_limit_value(smu, false); + break; + case AMD_DPM_FORCED_LEVEL_AUTO: + ret = smu_unforce_dpm_levels(smu); + break; + case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: + ret = navi10_set_standard_performance_level(smu); + break; + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: + ret = smu_get_profiling_clk_mask(smu, level, + &sclk_mask, + &mclk_mask, + &soc_mask); + if (ret) + return ret; + smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false); + smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false); + smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false); + break; case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: - ret = navi10_set_peak_clock_by_device(smu); + ret = navi10_set_peak_performance_level(smu); break; + case AMD_DPM_FORCED_LEVEL_MANUAL: + case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: default: - ret = -EINVAL; break; } - return ret; } @@ -1804,6 +1936,28 @@ static int navi10_od_setting_check_range(struct smu_11_0_overdrive_table *od_tab return 0; } +static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu, + uint16_t *voltage, + uint32_t freq) +{ + uint32_t param = (freq & 0xFFFF) | (PPCLK_GFXCLK << 16); + uint32_t value = 0; + int ret; + + ret = smu_send_smc_msg_with_param(smu, + SMU_MSG_GetVoltageByDpm, + param); + if (ret) { + pr_err("[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!"); + return ret; + } + + smu_read_smc_arg(smu, &value); + *voltage = (uint16_t)value; + + return 0; +} + static int navi10_setup_od_limits(struct smu_context *smu) { struct smu_11_0_overdrive_table *overdrive_table = NULL; struct smu_11_0_powerplay_table *powerplay_table = NULL; @@ -1823,23 +1977,54 @@ static int navi10_setup_od_limits(struct smu_context *smu) { } static int navi10_set_default_od_settings(struct smu_context *smu, bool initialize) { - OverDriveTable_t *od_table; + OverDriveTable_t *od_table, *boot_od_table; int ret = 0; ret = smu_v11_0_set_default_od_settings(smu, initialize, sizeof(OverDriveTable_t)); if (ret) return ret; + od_table = (OverDriveTable_t *)smu->smu_table.overdrive_table; + boot_od_table = (OverDriveTable_t *)smu->smu_table.boot_overdrive_table; if (initialize) { ret = navi10_setup_od_limits(smu); if (ret) { pr_err("Failed to retrieve board OD limits\n"); return ret; } + if (od_table) { + if (!od_table->GfxclkVolt1) { + ret = navi10_overdrive_get_gfx_clk_base_voltage(smu, + &od_table->GfxclkVolt1, + od_table->GfxclkFreq1); + if (ret) + od_table->GfxclkVolt1 = 0; + if (boot_od_table) + boot_od_table->GfxclkVolt1 = od_table->GfxclkVolt1; + } + + if (!od_table->GfxclkVolt2) { + ret = navi10_overdrive_get_gfx_clk_base_voltage(smu, + &od_table->GfxclkVolt2, + od_table->GfxclkFreq2); + if (ret) + od_table->GfxclkVolt2 = 0; + if (boot_od_table) + boot_od_table->GfxclkVolt2 = od_table->GfxclkVolt2; + } + if (!od_table->GfxclkVolt3) { + ret = navi10_overdrive_get_gfx_clk_base_voltage(smu, + &od_table->GfxclkVolt3, + od_table->GfxclkFreq3); + if (ret) + od_table->GfxclkVolt3 = 0; + if (boot_od_table) + boot_od_table->GfxclkVolt3 = od_table->GfxclkVolt3; + } + } } - od_table = (OverDriveTable_t *)smu->smu_table.overdrive_table; if (od_table) { navi10_dump_od_table(od_table); } @@ -1935,6 +2120,13 @@ static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABL return ret; od_table->UclkFmax = input[1]; break; + case PP_OD_RESTORE_DEFAULT_TABLE: + if (!(table_context->overdrive_table && table_context->boot_overdrive_table)) { + pr_err("Overdrive table was not initialized!\n"); + return -EINVAL; + } + memcpy(table_context->overdrive_table, table_context->boot_overdrive_table, sizeof(OverDriveTable_t)); + break; case PP_OD_COMMIT_DPM_TABLE: navi10_dump_od_table(od_table); ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, true); @@ -2024,6 +2216,61 @@ static int navi10_run_btc(struct smu_context *smu) return ret; } +static int navi10_dummy_pstate_control(struct smu_context *smu, bool enable) +{ + int result = 0; + + if (!enable) + result = smu_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE); + else + result = smu_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE); + + return result; +} + +static int navi10_disable_umc_cdr_12gbps_workaround(struct smu_context *smu) +{ + uint32_t uclk_count, uclk_min, uclk_max; + uint32_t smu_version; + int ret = 0; + + ret = smu_get_smc_version(smu, NULL, &smu_version); + if (ret) + return ret; + + /* This workaround is available only for 42.50 or later SMC firmwares */ + if (smu_version < 0x2A3200) + return 0; + + ret = smu_get_dpm_level_count(smu, SMU_UCLK, &uclk_count); + if (ret) + return ret; + + ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)0, &uclk_min); + if (ret) + return ret; + + ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)(uclk_count - 1), &uclk_max); + if (ret) + return ret; + + /* Force UCLK out of the highest DPM */ + ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, uclk_min); + if (ret) + return ret; + + /* Revert the UCLK Hardmax */ + ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, uclk_max); + if (ret) + return ret; + + /* + * In this case, SMU already disabled dummy pstate during enablement + * of UCLK DPM, we have to re-enabled it. + * */ + return navi10_dummy_pstate_control(smu, true); +} + static const struct pptable_funcs navi10_ppt_funcs = { .tables_init = navi10_tables_init, .alloc_dpm_context = navi10_allocate_dpm_context, @@ -2047,7 +2294,7 @@ static const struct pptable_funcs navi10_ppt_funcs = { .get_clock_by_type_with_latency = navi10_get_clock_by_type_with_latency, .pre_display_config_changed = navi10_pre_display_config_changed, .display_config_changed = navi10_display_config_changed, - .notify_smc_dispaly_config = navi10_notify_smc_dispaly_config, + .notify_smc_display_config = navi10_notify_smc_display_config, .force_dpm_limit_value = navi10_force_dpm_limit_value, .unforce_dpm_levels = navi10_unforce_dpm_levels, .is_dpm_running = navi10_is_dpm_running, @@ -2080,6 +2327,7 @@ static const struct pptable_funcs navi10_ppt_funcs = { .check_fw_version = smu_v11_0_check_fw_version, .write_pptable = smu_v11_0_write_pptable, .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep, + .set_driver_table_location = smu_v11_0_set_driver_table_location, .set_tool_table_location = smu_v11_0_set_tool_table_location, .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, .system_features_control = smu_v11_0_system_features_control, @@ -2117,6 +2365,7 @@ static const struct pptable_funcs navi10_ppt_funcs = { .od_edit_dpm_table = navi10_od_edit_dpm_table, .get_pptable_power_limit = navi10_get_pptable_power_limit, .run_btc = navi10_run_btc, + .disable_umc_cdr_12gbps_workaround = navi10_disable_umc_cdr_12gbps_workaround, }; void navi10_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h index ec03c7992f6db2b987638248b065ef5af23fe617..2abb4ba01db1a499c328506f75d78c084661b818 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h @@ -27,12 +27,26 @@ #define NAVI10_PEAK_SCLK_XT (1755) #define NAVI10_PEAK_SCLK_XL (1625) +#define NAVI10_UMD_PSTATE_PROFILING_GFXCLK (1300) +#define NAVI10_UMD_PSTATE_PROFILING_SOCCLK (980) +#define NAVI10_UMD_PSTATE_PROFILING_MEMCLK (625) +#define NAVI10_UMD_PSTATE_PROFILING_VCLK (980) +#define NAVI10_UMD_PSTATE_PROFILING_DCLK (850) + #define NAVI14_UMD_PSTATE_PEAK_XT_GFXCLK (1670) #define NAVI14_UMD_PSTATE_PEAK_XTM_GFXCLK (1448) #define NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK (1181) #define NAVI14_UMD_PSTATE_PEAK_XTX_GFXCLK (1717) #define NAVI14_UMD_PSTATE_PEAK_XL_GFXCLK (1448) +#define NAVI14_UMD_PSTATE_PROFILING_GFXCLK (1200) +#define NAVI14_UMD_PSTATE_PROFILING_SOCCLK (900) +#define NAVI14_UMD_PSTATE_PROFILING_MEMCLK (600) +#define NAVI14_UMD_PSTATE_PROFILING_VCLK (900) +#define NAVI14_UMD_PSTATE_PROFILING_DCLK (800) + +#define NAVI12_UMD_PSTATE_PEAK_GFXCLK (1100) + #define NAVI10_VOLTAGE_SCALE (4) #define smnPCIE_LC_SPEED_CNTL 0x11140290 diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c index 89a54f8e08d36e856934910587679754ceeb7c2c..861e6410363bc764b68d6f62721854b78436f25f 100644 --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c @@ -171,17 +171,20 @@ static int renoir_get_metrics_table(struct smu_context *smu, struct smu_table_context *smu_table= &smu->smu_table; int ret = 0; + mutex_lock(&smu->metrics_lock); if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) { ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)smu_table->metrics_table, false); if (ret) { pr_info("Failed to export SMU metrics table!\n"); + mutex_unlock(&smu->metrics_lock); return ret; } smu_table->metrics_time = jiffies; } memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t)); + mutex_unlock(&smu->metrics_lock); return ret; } @@ -206,6 +209,10 @@ static int renoir_tables_init(struct smu_context *smu, struct smu_table *tables) return -ENOMEM; smu_table->metrics_time = 0; + smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); + if (!smu_table->watermarks_table) + return -ENOMEM; + return 0; } @@ -239,8 +246,7 @@ static int renoir_print_clk_levels(struct smu_context *smu, memset(&metrics, 0, sizeof(metrics)); - ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, - (void *)&metrics, false); + ret = renoir_get_metrics_table(smu, &metrics); if (ret) return ret; @@ -706,19 +712,43 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu) return ret; } -static int renoir_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) +static int renoir_set_performance_level(struct smu_context *smu, + enum amd_dpm_forced_level level) { int ret = 0; + uint32_t sclk_mask, mclk_mask, soc_mask; switch (level) { + case AMD_DPM_FORCED_LEVEL_HIGH: + ret = smu_force_dpm_limit_value(smu, true); + break; + case AMD_DPM_FORCED_LEVEL_LOW: + ret = smu_force_dpm_limit_value(smu, false); + break; + case AMD_DPM_FORCED_LEVEL_AUTO: + case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: + ret = smu_unforce_dpm_levels(smu); + break; + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: + ret = smu_get_profiling_clk_mask(smu, level, + &sclk_mask, + &mclk_mask, + &soc_mask); + if (ret) + return ret; + smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false); + smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false); + smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false); + break; case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: ret = renoir_set_peak_clock_by_device(smu); break; + case AMD_DPM_FORCED_LEVEL_MANUAL: + case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: default: - ret = -EINVAL; break; } - return ret; } @@ -777,9 +807,17 @@ static int renoir_set_watermarks_table( } /* pass data to smu controller */ - ret = smu_write_watermarks_table(smu); + if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && + !(smu->watermarks_bitmap & WATERMARKS_LOADED)) { + ret = smu_write_watermarks_table(smu); + if (ret) { + pr_err("Failed to update WMTABLE!"); + return ret; + } + smu->watermarks_bitmap |= WATERMARKS_LOADED; + } - return ret; + return 0; } static int renoir_get_power_profile_mode(struct smu_context *smu, @@ -882,6 +920,7 @@ static const struct pptable_funcs renoir_ppt_funcs = { .get_dpm_ultimate_freq = smu_v12_0_get_dpm_ultimate_freq, .mode2_reset = smu_v12_0_mode2_reset, .set_soft_freq_limited_range = smu_v12_0_set_soft_freq_limited_range, + .set_driver_table_location = smu_v12_0_set_driver_table_location, }; void renoir_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h index 60ce1fccaeb51020444ad212c8de734c501ec128..7bd200ffcda8b873fcd2a76d23440abca4ae0b49 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_internal.h +++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h @@ -61,6 +61,8 @@ ((smu)->ppt_funcs->write_pptable ? (smu)->ppt_funcs->write_pptable((smu)) : 0) #define smu_set_min_dcef_deep_sleep(smu) \ ((smu)->ppt_funcs->set_min_dcef_deep_sleep ? (smu)->ppt_funcs->set_min_dcef_deep_sleep((smu)) : 0) +#define smu_set_driver_table_location(smu) \ + ((smu)->ppt_funcs->set_driver_table_location ? (smu)->ppt_funcs->set_driver_table_location((smu)) : 0) #define smu_set_tool_table_location(smu) \ ((smu)->ppt_funcs->set_tool_table_location ? (smu)->ppt_funcs->set_tool_table_location((smu)) : 0) #define smu_notify_memory_pool_location(smu) \ @@ -129,8 +131,8 @@ int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg); ((smu)->ppt_funcs->display_config_changed ? (smu)->ppt_funcs->display_config_changed((smu)) : 0) #define smu_apply_clocks_adjust_rules(smu) \ ((smu)->ppt_funcs->apply_clocks_adjust_rules ? (smu)->ppt_funcs->apply_clocks_adjust_rules((smu)) : 0) -#define smu_notify_smc_dispaly_config(smu) \ - ((smu)->ppt_funcs->notify_smc_dispaly_config ? (smu)->ppt_funcs->notify_smc_dispaly_config((smu)) : 0) +#define smu_notify_smc_display_config(smu) \ + ((smu)->ppt_funcs->notify_smc_display_config ? (smu)->ppt_funcs->notify_smc_display_config((smu)) : 0) #define smu_force_dpm_limit_value(smu, highest) \ ((smu)->ppt_funcs->force_dpm_limit_value ? (smu)->ppt_funcs->force_dpm_limit_value((smu), (highest)) : 0) #define smu_unforce_dpm_levels(smu) \ @@ -205,4 +207,7 @@ int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg); #define smu_update_pcie_parameters(smu, pcie_gen_cap, pcie_width_cap) \ ((smu)->ppt_funcs->update_pcie_parameters ? (smu)->ppt_funcs->update_pcie_parameters((smu), (pcie_gen_cap), (pcie_width_cap)) : 0) +#define smu_disable_umc_cdr_12gbps_workaround(smu) \ + ((smu)->ppt_funcs->disable_umc_cdr_12gbps_workaround ? (smu)->ppt_funcs->disable_umc_cdr_12gbps_workaround((smu)) : 0) + #endif diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index 7781d245f8ef1d4137ad8811a1e54540f9cafb6e..0dc49479a7ebdabe0650d06b29db032c91843845 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -450,8 +450,10 @@ int smu_v11_0_fini_smc_tables(struct smu_context *smu) kfree(smu_table->tables); kfree(smu_table->metrics_table); + kfree(smu_table->watermarks_table); smu_table->tables = NULL; smu_table->metrics_table = NULL; + smu_table->watermarks_table = NULL; smu_table->metrics_time = 0; ret = smu_v11_0_fini_dpm_context(smu); @@ -774,6 +776,24 @@ int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu) return smu_v11_0_set_deep_sleep_dcefclk(smu, table_context->boot_values.dcefclk / 100); } +int smu_v11_0_set_driver_table_location(struct smu_context *smu) +{ + struct smu_table *driver_table = &smu->smu_table.driver_table; + int ret = 0; + + if (driver_table->mc_address) { + ret = smu_send_smc_msg_with_param(smu, + SMU_MSG_SetDriverDramAddrHigh, + upper_32_bits(driver_table->mc_address)); + if (!ret) + ret = smu_send_smc_msg_with_param(smu, + SMU_MSG_SetDriverDramAddrLow, + lower_32_bits(driver_table->mc_address)); + } + + return ret; +} + int smu_v11_0_set_tool_table_location(struct smu_context *smu) { int ret = 0; @@ -835,27 +855,33 @@ int smu_v11_0_get_enabled_mask(struct smu_context *smu, uint32_t *feature_mask, uint32_t num) { uint32_t feature_mask_high = 0, feature_mask_low = 0; + struct smu_feature *feature = &smu->smu_feature; int ret = 0; if (!feature_mask || num < 2) return -EINVAL; - ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh); - if (ret) - return ret; - ret = smu_read_smc_arg(smu, &feature_mask_high); - if (ret) - return ret; + if (bitmap_empty(feature->enabled, feature->feature_num)) { + ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh); + if (ret) + return ret; + ret = smu_read_smc_arg(smu, &feature_mask_high); + if (ret) + return ret; - ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow); - if (ret) - return ret; - ret = smu_read_smc_arg(smu, &feature_mask_low); - if (ret) - return ret; + ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow); + if (ret) + return ret; + ret = smu_read_smc_arg(smu, &feature_mask_low); + if (ret) + return ret; - feature_mask[0] = feature_mask_low; - feature_mask[1] = feature_mask_high; + feature_mask[0] = feature_mask_low; + feature_mask[1] = feature_mask_high; + } else { + bitmap_copy((unsigned long *)feature_mask, feature->enabled, + feature->feature_num); + } return ret; } @@ -867,21 +893,24 @@ int smu_v11_0_system_features_control(struct smu_context *smu, uint32_t feature_mask[2]; int ret = 0; - if (smu->pm_enabled) { - ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures : - SMU_MSG_DisableAllSmuFeatures)); - if (ret) - return ret; - } - - ret = smu_feature_get_enabled_mask(smu, feature_mask, 2); + ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures : + SMU_MSG_DisableAllSmuFeatures)); if (ret) return ret; - bitmap_copy(feature->enabled, (unsigned long *)&feature_mask, - feature->feature_num); - bitmap_copy(feature->supported, (unsigned long *)&feature_mask, - feature->feature_num); + if (en) { + ret = smu_feature_get_enabled_mask(smu, feature_mask, 2); + if (ret) + return ret; + + bitmap_copy(feature->enabled, (unsigned long *)&feature_mask, + feature->feature_num); + bitmap_copy(feature->supported, (unsigned long *)&feature_mask, + feature->feature_num); + } else { + bitmap_zero(feature->enabled, feature->feature_num); + bitmap_zero(feature->supported, feature->feature_num); + } return ret; } @@ -1125,11 +1154,12 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu, int low = SMU_THERMAL_MINIMUM_ALERT_TEMP; int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP; uint32_t val; + struct smu_table_context *table_context = &smu->smu_table; + struct smu_11_0_powerplay_table *powerplay_table = table_context->power_play_table; low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP, range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES); - high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP, - range.max / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES); + high = min((uint16_t)SMU_THERMAL_MAXIMUM_ALERT_TEMP, powerplay_table->software_shutdown_temp); if (low > high) return -EINVAL; @@ -1852,6 +1882,12 @@ int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize, pr_err("Failed to export overdrive table!\n"); return ret; } + if (!table_context->boot_overdrive_table) { + table_context->boot_overdrive_table = kmemdup(table_context->overdrive_table, overdrive_table_size, GFP_KERNEL); + if (!table_context->boot_overdrive_table) { + return -ENOMEM; + } + } } ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, true); if (ret) { @@ -1860,3 +1896,42 @@ int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize, } return ret; } + +int smu_v11_0_set_performance_level(struct smu_context *smu, + enum amd_dpm_forced_level level) +{ + int ret = 0; + uint32_t sclk_mask, mclk_mask, soc_mask; + + switch (level) { + case AMD_DPM_FORCED_LEVEL_HIGH: + ret = smu_force_dpm_limit_value(smu, true); + break; + case AMD_DPM_FORCED_LEVEL_LOW: + ret = smu_force_dpm_limit_value(smu, false); + break; + case AMD_DPM_FORCED_LEVEL_AUTO: + case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: + ret = smu_unforce_dpm_levels(smu); + break; + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: + case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: + ret = smu_get_profiling_clk_mask(smu, level, + &sclk_mask, + &mclk_mask, + &soc_mask); + if (ret) + return ret; + smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false); + smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false); + smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false); + break; + case AMD_DPM_FORCED_LEVEL_MANUAL: + case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: + default: + break; + } + return ret; +} + diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c index 2ac7f2f231b6ac1d9dc63e8caf4f4dfb1a8a9397..870e6db2907eb67e43d75ee26850c173d2985960 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c @@ -159,7 +159,7 @@ int smu_v12_0_check_fw_version(struct smu_context *smu) int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate) { - if (!(smu->adev->flags & AMD_IS_APU)) + if (!smu->is_apu) return 0; if (gate) @@ -170,7 +170,7 @@ int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate) int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate) { - if (!(smu->adev->flags & AMD_IS_APU)) + if (!smu->is_apu) return 0; if (gate) @@ -181,7 +181,7 @@ int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate) int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate) { - if (!(smu->adev->flags & AMD_IS_APU)) + if (!smu->is_apu) return 0; if (gate) @@ -318,14 +318,6 @@ int smu_v12_0_fini_smc_tables(struct smu_context *smu) int smu_v12_0_populate_smc_tables(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; - struct smu_table *table = NULL; - - table = &smu_table->tables[SMU_TABLE_DPMCLOCKS]; - if (!table) - return -EINVAL; - - if (!table->cpu_addr) - return -EINVAL; return smu_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false); } @@ -514,3 +506,21 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_ return ret; } + +int smu_v12_0_set_driver_table_location(struct smu_context *smu) +{ + struct smu_table *driver_table = &smu->smu_table.driver_table; + int ret = 0; + + if (driver_table->mc_address) { + ret = smu_send_smc_msg_with_param(smu, + SMU_MSG_SetDriverDramAddrHigh, + upper_32_bits(driver_table->mc_address)); + if (!ret) + ret = smu_send_smc_msg_with_param(smu, + SMU_MSG_SetDriverDramAddrLow, + lower_32_bits(driver_table->mc_address)); + } + + return ret; +} diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c index aa0ee2b46135421477deb36d6f65a810b33ebc37..2319400a3fcb60d046323d422e7c9484ccfeaa32 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c @@ -137,7 +137,7 @@ static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr, priv->smu_tables.entry[table_id].table_id); /* flush hdp cache */ - adev->nbio.funcs->hdp_flush(adev, NULL); + amdgpu_asic_flush_hdp(adev, NULL); memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table, priv->smu_tables.entry[table_id].size); @@ -150,6 +150,7 @@ static int smu10_copy_table_to_smc(struct pp_hwmgr *hwmgr, { struct smu10_smumgr *priv = (struct smu10_smumgr *)(hwmgr->smu_backend); + struct amdgpu_device *adev = hwmgr->adev; PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE, "Invalid SMU Table ID!", return -EINVAL;); @@ -161,6 +162,8 @@ static int smu10_copy_table_to_smc(struct pp_hwmgr *hwmgr, memcpy(priv->smu_tables.entry[table_id].table, table, priv->smu_tables.entry[table_id].size); + amdgpu_asic_flush_hdp(adev, NULL); + smu10_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c index 39427ca32a150dc5f9edf17e84177198967bf757..715564009089615e6ce9b0dbc86ec2fe78307a9f 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c @@ -58,7 +58,7 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr, priv->smu_tables.entry[table_id].table_id); /* flush hdp cache */ - adev->nbio.funcs->hdp_flush(adev, NULL); + amdgpu_asic_flush_hdp(adev, NULL); memcpy(table, priv->smu_tables.entry[table_id].table, priv->smu_tables.entry[table_id].size); @@ -70,6 +70,7 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr, uint8_t *table, int16_t table_id) { struct vega10_smumgr *priv = hwmgr->smu_backend; + struct amdgpu_device *adev = hwmgr->adev; /* under sriov, vbios or hypervisor driver * has already copy table to smc so here only skip it @@ -87,6 +88,8 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr, memcpy(priv->smu_tables.entry[table_id].table, table, priv->smu_tables.entry[table_id].size); + amdgpu_asic_flush_hdp(adev, NULL); + smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c index 90c782c132d255a96426cf303f8eb4c09eef3f0b..275dbf65f1a0c5428e21096948806448db7fbf5e 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c @@ -66,7 +66,7 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr, return -EINVAL); /* flush hdp cache */ - adev->nbio.funcs->hdp_flush(adev, NULL); + amdgpu_asic_flush_hdp(adev, NULL); memcpy(table, priv->smu_tables.entry[table_id].table, priv->smu_tables.entry[table_id].size); @@ -84,6 +84,7 @@ static int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr, { struct vega12_smumgr *priv = (struct vega12_smumgr *)(hwmgr->smu_backend); + struct amdgpu_device *adev = hwmgr->adev; PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT, "Invalid SMU Table ID!", return -EINVAL); @@ -95,6 +96,8 @@ static int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr, memcpy(priv->smu_tables.entry[table_id].table, table, priv->smu_tables.entry[table_id].size); + amdgpu_asic_flush_hdp(adev, NULL); + PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0, @@ -125,20 +128,20 @@ int vega12_enable_smc_features(struct pp_hwmgr *hwmgr, if (enable) { PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low) == 0, - "[EnableDisableSMCFeatures] Attemp to enable SMU features Low failed!", + "[EnableDisableSMCFeatures] Attempt to enable SMU features Low failed!", return -EINVAL); PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high) == 0, - "[EnableDisableSMCFeatures] Attemp to enable SMU features High failed!", + "[EnableDisableSMCFeatures] Attempt to enable SMU features High failed!", return -EINVAL); } else { PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low) == 0, - "[EnableDisableSMCFeatures] Attemp to disable SMU features Low failed!", + "[EnableDisableSMCFeatures] Attempt to disable SMU features Low failed!", return -EINVAL); PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high) == 0, - "[EnableDisableSMCFeatures] Attemp to disable SMU features High failed!", + "[EnableDisableSMCFeatures] Attempt to disable SMU features High failed!", return -EINVAL); } @@ -155,13 +158,13 @@ int vega12_get_enabled_smc_features(struct pp_hwmgr *hwmgr, PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeaturesLow) == 0, - "[GetEnabledSMCFeatures] Attemp to get SMU features Low failed!", + "[GetEnabledSMCFeatures] Attempt to get SMU features Low failed!", return -EINVAL); smc_features_low = smu9_get_argument(hwmgr); PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeaturesHigh) == 0, - "[GetEnabledSMCFeatures] Attemp to get SMU features High failed!", + "[GetEnabledSMCFeatures] Attempt to get SMU features High failed!", return -EINVAL); smc_features_high = smu9_get_argument(hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c index f604612f411f3a1f2c2e52eebc4ef3698a0a55f7..49e5ef3e3876cb53de5d3b2990a2470f62abbe8f 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c @@ -189,7 +189,7 @@ static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr, return ret); /* flush hdp cache */ - adev->nbio.funcs->hdp_flush(adev, NULL); + amdgpu_asic_flush_hdp(adev, NULL); memcpy(table, priv->smu_tables.entry[table_id].table, priv->smu_tables.entry[table_id].size); @@ -207,6 +207,7 @@ static int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr, { struct vega20_smumgr *priv = (struct vega20_smumgr *)(hwmgr->smu_backend); + struct amdgpu_device *adev = hwmgr->adev; int ret = 0; PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT, @@ -219,6 +220,8 @@ static int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr, memcpy(priv->smu_tables.entry[table_id].table, table, priv->smu_tables.entry[table_id].size); + amdgpu_asic_flush_hdp(adev, NULL); + PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, upper_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0, @@ -242,11 +245,14 @@ int vega20_set_activity_monitor_coeff(struct pp_hwmgr *hwmgr, { struct vega20_smumgr *priv = (struct vega20_smumgr *)(hwmgr->smu_backend); + struct amdgpu_device *adev = hwmgr->adev; int ret = 0; memcpy(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size); + amdgpu_asic_flush_hdp(adev, NULL); + PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0, @@ -290,7 +296,7 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr, return ret); /* flush hdp cache */ - adev->nbio.funcs->hdp_flush(adev, NULL); + amdgpu_asic_flush_hdp(adev, NULL); memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size); @@ -310,20 +316,20 @@ int vega20_enable_smc_features(struct pp_hwmgr *hwmgr, if (enable) { PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low)) == 0, - "[EnableDisableSMCFeatures] Attemp to enable SMU features Low failed!", + "[EnableDisableSMCFeatures] Attempt to enable SMU features Low failed!", return ret); PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high)) == 0, - "[EnableDisableSMCFeatures] Attemp to enable SMU features High failed!", + "[EnableDisableSMCFeatures] Attempt to enable SMU features High failed!", return ret); } else { PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low)) == 0, - "[EnableDisableSMCFeatures] Attemp to disable SMU features Low failed!", + "[EnableDisableSMCFeatures] Attempt to disable SMU features Low failed!", return ret); PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high)) == 0, - "[EnableDisableSMCFeatures] Attemp to disable SMU features High failed!", + "[EnableDisableSMCFeatures] Attempt to disable SMU features High failed!", return ret); } @@ -341,12 +347,12 @@ int vega20_get_enabled_smc_features(struct pp_hwmgr *hwmgr, PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeaturesLow)) == 0, - "[GetEnabledSMCFeatures] Attemp to get SMU features Low failed!", + "[GetEnabledSMCFeatures] Attempt to get SMU features Low failed!", return ret); smc_features_low = vega20_get_argument(hwmgr); PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeaturesHigh)) == 0, - "[GetEnabledSMCFeatures] Attemp to get SMU features High failed!", + "[GetEnabledSMCFeatures] Attempt to get SMU features High failed!", return ret); smc_features_high = vega20_get_argument(hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c index 12bcc3e3ba99058cd9eb78d5f020b9760d52e45c..4ad8d6c14ee5e6a323dec8d129d0ae6dad4af184 100644 --- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c @@ -338,6 +338,10 @@ static int vega20_tables_init(struct smu_context *smu, struct smu_table *tables) return -ENOMEM; smu_table->metrics_time = 0; + smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); + if (!smu_table->watermarks_table) + return -ENOMEM; + return 0; } @@ -1678,17 +1682,20 @@ static int vega20_get_metrics_table(struct smu_context *smu, struct smu_table_context *smu_table= &smu->smu_table; int ret = 0; + mutex_lock(&smu->metrics_lock); if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) { ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)smu_table->metrics_table, false); if (ret) { pr_info("Failed to export SMU metrics table!\n"); + mutex_unlock(&smu->metrics_lock); return ret; } smu_table->metrics_time = jiffies; } memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t)); + mutex_unlock(&smu->metrics_lock); return ret; } @@ -1699,22 +1706,11 @@ static int vega20_set_default_od_settings(struct smu_context *smu, struct smu_table_context *table_context = &smu->smu_table; int ret; - if (initialize) { - if (table_context->overdrive_table) - return -EINVAL; - - table_context->overdrive_table = kzalloc(sizeof(OverDriveTable_t), GFP_KERNEL); - - if (!table_context->overdrive_table) - return -ENOMEM; - - ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, - table_context->overdrive_table, false); - if (ret) { - pr_err("Failed to export over drive table!\n"); - return ret; - } + ret = smu_v11_0_set_default_od_settings(smu, initialize, sizeof(OverDriveTable_t)); + if (ret) + return ret; + if (initialize) { ret = vega20_set_default_od8_setttings(smu); if (ret) return ret; @@ -2232,7 +2228,7 @@ static int vega20_apply_clocks_adjust_rules(struct smu_context *smu) } static int -vega20_notify_smc_dispaly_config(struct smu_context *smu) +vega20_notify_smc_display_config(struct smu_context *smu) { struct vega20_dpm_table *dpm_table = smu->smu_dpm.dpm_context; struct vega20_single_dpm_table *memtable = &dpm_table->mem_table; @@ -2771,12 +2767,11 @@ static int vega20_odn_edit_dpm_table(struct smu_context *smu, break; case PP_OD_RESTORE_DEFAULT_TABLE: - ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, false); - if (ret) { - pr_err("Failed to export over drive table!\n"); - return ret; + if (!(table_context->overdrive_table && table_context->boot_overdrive_table)) { + pr_err("Overdrive table was not initialized!\n"); + return -EINVAL; } - + memcpy(table_context->overdrive_table, table_context->boot_overdrive_table, sizeof(OverDriveTable_t)); break; case PP_OD_COMMIT_DPM_TABLE: @@ -3191,6 +3186,7 @@ static const struct pptable_funcs vega20_ppt_funcs = { .get_od_percentage = vega20_get_od_percentage, .get_power_profile_mode = vega20_get_power_profile_mode, .set_power_profile_mode = vega20_set_power_profile_mode, + .set_performance_level = smu_v11_0_set_performance_level, .set_od_percentage = vega20_set_od_percentage, .set_default_od_settings = vega20_set_default_od_settings, .od_edit_dpm_table = vega20_odn_edit_dpm_table, @@ -3200,7 +3196,7 @@ static const struct pptable_funcs vega20_ppt_funcs = { .pre_display_config_changed = vega20_pre_display_config_changed, .display_config_changed = vega20_display_config_changed, .apply_clocks_adjust_rules = vega20_apply_clocks_adjust_rules, - .notify_smc_dispaly_config = vega20_notify_smc_dispaly_config, + .notify_smc_display_config = vega20_notify_smc_display_config, .force_dpm_limit_value = vega20_force_dpm_limit_value, .unforce_dpm_levels = vega20_unforce_dpm_levels, .get_profiling_clk_mask = vega20_get_profiling_clk_mask, @@ -3228,6 +3224,7 @@ static const struct pptable_funcs vega20_ppt_funcs = { .check_fw_version = smu_v11_0_check_fw_version, .write_pptable = smu_v11_0_write_pptable, .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep, + .set_driver_table_location = smu_v11_0_set_driver_table_location, .set_tool_table_location = smu_v11_0_set_tool_table_location, .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, .system_features_control = smu_v11_0_system_features_control, diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c index 875a3a9eabfa1e08068dd265c5dbfc7cf3f36411..7d0e7b031e447f24a83e9197d92879754eab4169 100644 --- a/drivers/gpu/drm/arm/malidp_mw.c +++ b/drivers/gpu/drm/arm/malidp_mw.c @@ -56,7 +56,7 @@ malidp_mw_connector_mode_valid(struct drm_connector *connector, return MODE_OK; } -const struct drm_connector_helper_funcs malidp_mw_connector_helper_funcs = { +static const struct drm_connector_helper_funcs malidp_mw_connector_helper_funcs = { .get_modes = malidp_mw_connector_get_modes, .mode_valid = malidp_mw_connector_mode_valid, }; diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index 6fab71985cd49eeb000431172b315399c26ed504..6effe532f82005805c93b1ac98b059011a91e54d 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c @@ -1289,21 +1289,19 @@ struct drm_crtc *analogix_dp_get_new_crtc(struct analogix_dp_device *dp, return conn_state->crtc; } -static void -analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge, - struct drm_bridge_state *old_bridge_state) +static void analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state) { - struct drm_atomic_state *old_state = old_bridge_state->base.state; struct analogix_dp_device *dp = bridge->driver_private; struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state; int ret; - crtc = analogix_dp_get_new_crtc(dp, old_state); + crtc = analogix_dp_get_new_crtc(dp, state); if (!crtc) return; - old_crtc_state = drm_atomic_get_old_crtc_state(old_state, crtc); + old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc); /* Don't touch the panel if we're coming back from PSR */ if (old_crtc_state && old_crtc_state->self_refresh_active) return; @@ -1368,22 +1366,20 @@ out_dp_clk_pre: return ret; } -static void -analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge, - struct drm_bridge_state *old_bridge_state) +static void analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state) { - struct drm_atomic_state *old_state = old_bridge_state->base.state; struct analogix_dp_device *dp = bridge->driver_private; struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state; int timeout_loop = 0; int ret; - crtc = analogix_dp_get_new_crtc(dp, old_state); + crtc = analogix_dp_get_new_crtc(dp, state); if (!crtc) return; - old_crtc_state = drm_atomic_get_old_crtc_state(old_state, crtc); + old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc); /* Not a full enable, just disable PSR and continue */ if (old_crtc_state && old_crtc_state->self_refresh_active) { ret = analogix_dp_disable_psr(dp); @@ -1444,20 +1440,18 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge) dp->dpms_mode = DRM_MODE_DPMS_OFF; } -static void -analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge, - struct drm_bridge_state *old_bridge_state) +static void analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge, + struct drm_atomic_state *state) { - struct drm_atomic_state *old_state = old_bridge_state->base.state; struct analogix_dp_device *dp = bridge->driver_private; struct drm_crtc *crtc; struct drm_crtc_state *new_crtc_state = NULL; - crtc = analogix_dp_get_new_crtc(dp, old_state); + crtc = analogix_dp_get_new_crtc(dp, state); if (!crtc) goto out; - new_crtc_state = drm_atomic_get_new_crtc_state(old_state, crtc); + new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); if (!new_crtc_state) goto out; @@ -1469,21 +1463,20 @@ out: analogix_dp_bridge_disable(bridge); } -static void -analogix_dp_bridge_atomic_post_disable(struct drm_bridge *bridge, - struct drm_bridge_state *old_bridge_state) +static +void analogix_dp_bridge_atomic_post_disable(struct drm_bridge *bridge, + struct drm_atomic_state *state) { - struct drm_atomic_state *old_state = old_bridge_state->base.state; struct analogix_dp_device *dp = bridge->driver_private; struct drm_crtc *crtc; struct drm_crtc_state *new_crtc_state; int ret; - crtc = analogix_dp_get_new_crtc(dp, old_state); + crtc = analogix_dp_get_new_crtc(dp, state); if (!crtc) return; - new_crtc_state = drm_atomic_get_new_crtc_state(old_state, crtc); + new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); if (!new_crtc_state || !new_crtc_state->self_refresh_active) return; diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index bf1b9c37d5152cfeff714ba581022b4002b1c96f..d33691512a8ea885f0d030f12968bd1acc836759 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -30,7 +30,6 @@ #include #include -#include #include #include #include @@ -1018,44 +1017,6 @@ static void drm_atomic_connector_print_state(struct drm_printer *p, connector->funcs->atomic_print_state(p, state); } -/** - * drm_atomic_add_encoder_bridges - add bridges attached to an encoder - * @state: atomic state - * @encoder: DRM encoder - * - * This function adds all bridges attached to @encoder. This is needed to add - * bridge states to @state and make them available when - * &bridge_funcs.atomic_{check,pre_enable,enable,disable_post_disable}() are - * called - * - * Returns: - * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK - * then the w/w mutex code has detected a deadlock and the entire atomic - * sequence must be restarted. All other errors are fatal. - */ -int -drm_atomic_add_encoder_bridges(struct drm_atomic_state *state, - struct drm_encoder *encoder) -{ - struct drm_bridge_state *bridge_state; - struct drm_bridge *bridge; - - if (!encoder) - return 0; - - DRM_DEBUG_ATOMIC("Adding all bridges for [encoder:%d:%s] to %p\n", - encoder->base.id, encoder->name, state); - - drm_for_each_bridge_in_chain(encoder, bridge) { - bridge_state = drm_atomic_get_bridge_state(state, bridge); - if (IS_ERR(bridge_state)) - return PTR_ERR(bridge_state); - } - - return 0; -} -EXPORT_SYMBOL(drm_atomic_add_encoder_bridges); - /** * drm_atomic_add_affected_connectors - add connectors for CRTC * @state: atomic state diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index afe14f72a8242ad2c729466ed895d01f2a56bed5..4511c2e07bb9d60528b2d3143880c63e1300f38d 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -437,12 +437,12 @@ mode_fixup(struct drm_atomic_state *state) funcs = encoder->helper_private; bridge = drm_bridge_chain_get_first_bridge(encoder); - ret = drm_atomic_bridge_chain_check(bridge, - new_crtc_state, - new_conn_state); - if (ret) { - DRM_DEBUG_ATOMIC("Bridge atomic check failed\n"); - return ret; + ret = drm_bridge_chain_mode_fixup(bridge, + &new_crtc_state->mode, + &new_crtc_state->adjusted_mode); + if (!ret) { + DRM_DEBUG_ATOMIC("Bridge fixup failed\n"); + return -EINVAL; } if (funcs && funcs->atomic_check) { @@ -730,26 +730,6 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, return ret; } - /* - * Iterate over all connectors again, and add all affected bridges to - * the state. - */ - for_each_oldnew_connector_in_state(state, connector, - old_connector_state, - new_connector_state, i) { - struct drm_encoder *encoder; - - encoder = old_connector_state->best_encoder; - ret = drm_atomic_add_encoder_bridges(state, encoder); - if (ret) - return ret; - - encoder = new_connector_state->best_encoder; - ret = drm_atomic_add_encoder_bridges(state, encoder); - if (ret) - return ret; - } - ret = mode_valid(state); if (ret) return ret; diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c index 37400607e9b739d302d0bb18edc0808487b4963b..c2cf0c90fa265d5a369194b75bd161dbe272b378 100644 --- a/drivers/gpu/drm/drm_bridge.c +++ b/drivers/gpu/drm/drm_bridge.c @@ -25,7 +25,6 @@ #include #include -#include #include #include @@ -90,74 +89,6 @@ void drm_bridge_remove(struct drm_bridge *bridge) } EXPORT_SYMBOL(drm_bridge_remove); -static struct drm_bridge_state * -drm_atomic_default_bridge_duplicate_state(struct drm_bridge *bridge) -{ - struct drm_bridge_state *new; - - if (WARN_ON(!bridge->base.state)) - return NULL; - - new = kzalloc(sizeof(*new), GFP_KERNEL); - if (new) - __drm_atomic_helper_bridge_duplicate_state(bridge, new); - - return new; -} - -static struct drm_private_state * -drm_bridge_atomic_duplicate_priv_state(struct drm_private_obj *obj) -{ - struct drm_bridge *bridge = drm_priv_to_bridge(obj); - struct drm_bridge_state *state; - - if (bridge->funcs->atomic_duplicate_state) - state = bridge->funcs->atomic_duplicate_state(bridge); - else - state = drm_atomic_default_bridge_duplicate_state(bridge); - - return state ? &state->base : NULL; -} - -static void -drm_atomic_default_bridge_destroy_state(struct drm_bridge *bridge, - struct drm_bridge_state *state) -{ - /* Just a simple kfree() for now */ - kfree(state); -} - -static void -drm_bridge_atomic_destroy_priv_state(struct drm_private_obj *obj, - struct drm_private_state *s) -{ - struct drm_bridge_state *state = drm_priv_to_bridge_state(s); - struct drm_bridge *bridge = drm_priv_to_bridge(obj); - - if (bridge->funcs->atomic_destroy_state) - bridge->funcs->atomic_destroy_state(bridge, state); - else - drm_atomic_default_bridge_destroy_state(bridge, state); -} - -static const struct drm_private_state_funcs drm_bridge_priv_state_funcs = { - .atomic_duplicate_state = drm_bridge_atomic_duplicate_priv_state, - .atomic_destroy_state = drm_bridge_atomic_destroy_priv_state, -}; - -static struct drm_bridge_state * -drm_atomic_default_bridge_reset(struct drm_bridge *bridge) -{ - struct drm_bridge_state *bridge_state; - - bridge_state = kzalloc(sizeof(*bridge_state), GFP_KERNEL); - if (!bridge_state) - return ERR_PTR(-ENOMEM); - - __drm_atomic_helper_bridge_reset(bridge, bridge_state); - return bridge_state; -} - /** * drm_bridge_attach - attach the bridge to an encoder's chain * @@ -183,7 +114,6 @@ drm_atomic_default_bridge_reset(struct drm_bridge *bridge) int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge, struct drm_bridge *previous) { - struct drm_bridge_state *state; int ret; if (!encoder || !bridge) @@ -205,35 +135,15 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge, if (bridge->funcs->attach) { ret = bridge->funcs->attach(bridge); - if (ret < 0) - goto err_reset_bridge; - } - - if (bridge->funcs->atomic_reset) - state = bridge->funcs->atomic_reset(bridge); - else - state = drm_atomic_default_bridge_reset(bridge); - - if (IS_ERR(state)) { - ret = PTR_ERR(state); - goto err_detach_bridge; + if (ret < 0) { + list_del(&bridge->chain_node); + bridge->dev = NULL; + bridge->encoder = NULL; + return ret; + } } - drm_atomic_private_obj_init(bridge->dev, &bridge->base, - &state->base, - &drm_bridge_priv_state_funcs); - return 0; - -err_detach_bridge: - if (bridge->funcs->detach) - bridge->funcs->detach(bridge); - -err_reset_bridge: - bridge->dev = NULL; - bridge->encoder = NULL; - list_del(&bridge->chain_node); - return ret; } EXPORT_SYMBOL(drm_bridge_attach); @@ -245,8 +155,6 @@ void drm_bridge_detach(struct drm_bridge *bridge) if (WARN_ON(!bridge->dev)) return; - drm_atomic_private_obj_fini(&bridge->base); - if (bridge->funcs->detach) bridge->funcs->detach(bridge); @@ -501,19 +409,10 @@ void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge, encoder = bridge->encoder; list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) { - if (iter->funcs->atomic_disable) { - struct drm_bridge_state *old_bridge_state; - - old_bridge_state = - drm_atomic_get_old_bridge_state(old_state, - iter); - if (WARN_ON(!old_bridge_state)) - return; - - iter->funcs->atomic_disable(iter, old_bridge_state); - } else if (iter->funcs->disable) { + if (iter->funcs->atomic_disable) + iter->funcs->atomic_disable(iter, old_state); + else if (iter->funcs->disable) iter->funcs->disable(iter); - } if (iter == bridge) break; @@ -544,20 +443,10 @@ void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge, encoder = bridge->encoder; list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) { - if (bridge->funcs->atomic_post_disable) { - struct drm_bridge_state *old_bridge_state; - - old_bridge_state = - drm_atomic_get_old_bridge_state(old_state, - bridge); - if (WARN_ON(!old_bridge_state)) - return; - - bridge->funcs->atomic_post_disable(bridge, - old_bridge_state); - } else if (bridge->funcs->post_disable) { + if (bridge->funcs->atomic_post_disable) + bridge->funcs->atomic_post_disable(bridge, old_state); + else if (bridge->funcs->post_disable) bridge->funcs->post_disable(bridge); - } } } EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable); @@ -586,19 +475,10 @@ void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge, encoder = bridge->encoder; list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) { - if (iter->funcs->atomic_pre_enable) { - struct drm_bridge_state *old_bridge_state; - - old_bridge_state = - drm_atomic_get_old_bridge_state(old_state, - iter); - if (WARN_ON(!old_bridge_state)) - return; - - iter->funcs->atomic_pre_enable(iter, old_bridge_state); - } else if (iter->funcs->pre_enable) { + if (iter->funcs->atomic_pre_enable) + iter->funcs->atomic_pre_enable(iter, old_state); + else if (iter->funcs->pre_enable) iter->funcs->pre_enable(iter); - } if (iter == bridge) break; @@ -628,385 +508,14 @@ void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge, encoder = bridge->encoder; list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) { - if (bridge->funcs->atomic_enable) { - struct drm_bridge_state *old_bridge_state; - - old_bridge_state = - drm_atomic_get_old_bridge_state(old_state, - bridge); - if (WARN_ON(!old_bridge_state)) - return; - - bridge->funcs->atomic_enable(bridge, old_bridge_state); - } else if (bridge->funcs->enable) { + if (bridge->funcs->atomic_enable) + bridge->funcs->atomic_enable(bridge, old_state); + else if (bridge->funcs->enable) bridge->funcs->enable(bridge); - } } } EXPORT_SYMBOL(drm_atomic_bridge_chain_enable); -static int drm_atomic_bridge_check(struct drm_bridge *bridge, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state) -{ - if (bridge->funcs->atomic_check) { - struct drm_bridge_state *bridge_state; - int ret; - - bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state, - bridge); - if (WARN_ON(!bridge_state)) - return -EINVAL; - - ret = bridge->funcs->atomic_check(bridge, bridge_state, - crtc_state, conn_state); - if (ret) - return ret; - } else if (bridge->funcs->mode_fixup) { - if (!bridge->funcs->mode_fixup(bridge, &crtc_state->mode, - &crtc_state->adjusted_mode)) - return -EINVAL; - } - - return 0; -} - -/** - * drm_atomic_helper_bridge_propagate_bus_fmt() - Propagate output format to - * the input end of a bridge - * @bridge: bridge control structure - * @bridge_state: new bridge state - * @crtc_state: new CRTC state - * @conn_state: new connector state - * @output_fmt: tested output bus format - * @num_input_fmts: will contain the size of the returned array - * - * This helper is a pluggable implementation of the - * &drm_bridge_funcs.atomic_get_input_bus_fmts operation for bridges that don't - * modify the bus configuration between their input and their output. It - * returns an array of input formats with a single element set to @output_fmt. - * - * RETURNS: - * a valid format array of size @num_input_fmts, or NULL if the allocation - * failed - */ -u32 * -drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge, - struct drm_bridge_state *bridge_state, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state, - u32 output_fmt, - unsigned int *num_input_fmts) -{ - u32 *input_fmts; - - input_fmts = kzalloc(sizeof(*input_fmts), GFP_KERNEL); - if (!input_fmts) { - *num_input_fmts = 0; - return NULL; - } - - *num_input_fmts = 1; - input_fmts[0] = output_fmt; - return input_fmts; -} -EXPORT_SYMBOL(drm_atomic_helper_bridge_propagate_bus_fmt); - -static int select_bus_fmt_recursive(struct drm_bridge *first_bridge, - struct drm_bridge *cur_bridge, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state, - u32 out_bus_fmt) -{ - struct drm_bridge_state *cur_state; - unsigned int num_in_bus_fmts, i; - struct drm_bridge *prev_bridge; - u32 *in_bus_fmts; - int ret; - - prev_bridge = drm_bridge_get_prev_bridge(cur_bridge); - cur_state = drm_atomic_get_new_bridge_state(crtc_state->state, - cur_bridge); - if (WARN_ON(!cur_state)) - return -EINVAL; - - /* - * If bus format negotiation is not supported by this bridge, let's - * pass MEDIA_BUS_FMT_FIXED to the previous bridge in the chain and - * hope that it can handle this situation gracefully (by providing - * appropriate default values). - */ - if (!cur_bridge->funcs->atomic_get_input_bus_fmts) { - if (cur_bridge != first_bridge) { - ret = select_bus_fmt_recursive(first_bridge, - prev_bridge, crtc_state, - conn_state, - MEDIA_BUS_FMT_FIXED); - if (ret) - return ret; - } - - cur_state->input_bus_cfg.format = MEDIA_BUS_FMT_FIXED; - cur_state->output_bus_cfg.format = out_bus_fmt; - return 0; - } - - in_bus_fmts = cur_bridge->funcs->atomic_get_input_bus_fmts(cur_bridge, - cur_state, - crtc_state, - conn_state, - out_bus_fmt, - &num_in_bus_fmts); - if (!num_in_bus_fmts) - return -ENOTSUPP; - else if (!in_bus_fmts) - return -ENOMEM; - - if (first_bridge == cur_bridge) { - cur_state->input_bus_cfg.format = in_bus_fmts[0]; - cur_state->output_bus_cfg.format = out_bus_fmt; - kfree(in_bus_fmts); - return 0; - } - - for (i = 0; i < num_in_bus_fmts; i++) { - ret = select_bus_fmt_recursive(first_bridge, prev_bridge, - crtc_state, conn_state, - in_bus_fmts[i]); - if (ret != -ENOTSUPP) - break; - } - - if (!ret) { - cur_state->input_bus_cfg.format = in_bus_fmts[i]; - cur_state->output_bus_cfg.format = out_bus_fmt; - } - - kfree(in_bus_fmts); - return ret; -} - -/* - * This function is called by &drm_atomic_bridge_chain_check() just before - * calling &drm_bridge_funcs.atomic_check() on all elements of the chain. - * It performs bus format negotiation between bridge elements. The negotiation - * happens in reverse order, starting from the last element in the chain up to - * @bridge. - * - * Negotiation starts by retrieving supported output bus formats on the last - * bridge element and testing them one by one. The test is recursive, meaning - * that for each tested output format, the whole chain will be walked backward, - * and each element will have to choose an input bus format that can be - * transcoded to the requested output format. When a bridge element does not - * support transcoding into a specific output format -ENOTSUPP is returned and - * the next bridge element will have to try a different format. If none of the - * combinations worked, -ENOTSUPP is returned and the atomic modeset will fail. - * - * This implementation is relying on - * &drm_bridge_funcs.atomic_get_output_bus_fmts() and - * &drm_bridge_funcs.atomic_get_input_bus_fmts() to gather supported - * input/output formats. - * - * When &drm_bridge_funcs.atomic_get_output_bus_fmts() is not implemented by - * the last element of the chain, &drm_atomic_bridge_chain_select_bus_fmts() - * tries a single format: &drm_connector.display_info.bus_formats[0] if - * available, MEDIA_BUS_FMT_FIXED otherwise. - * - * When &drm_bridge_funcs.atomic_get_input_bus_fmts() is not implemented, - * &drm_atomic_bridge_chain_select_bus_fmts() skips the negotiation on the - * bridge element that lacks this hook and asks the previous element in the - * chain to try MEDIA_BUS_FMT_FIXED. It's up to bridge drivers to decide what - * to do in that case (fail if they want to enforce bus format negotiation, or - * provide a reasonable default if they need to support pipelines where not - * all elements support bus format negotiation). - */ -static int -drm_atomic_bridge_chain_select_bus_fmts(struct drm_bridge *bridge, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state) -{ - struct drm_connector *conn = conn_state->connector; - struct drm_encoder *encoder = bridge->encoder; - struct drm_bridge_state *last_bridge_state; - unsigned int i, num_out_bus_fmts; - struct drm_bridge *last_bridge; - u32 *out_bus_fmts; - int ret = 0; - - last_bridge = list_last_entry(&encoder->bridge_chain, - struct drm_bridge, chain_node); - last_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state, - last_bridge); - if (WARN_ON(!last_bridge_state)) - return -EINVAL; - - if (last_bridge->funcs->atomic_get_output_bus_fmts) { - const struct drm_bridge_funcs *funcs = last_bridge->funcs; - - out_bus_fmts = funcs->atomic_get_output_bus_fmts(last_bridge, - last_bridge_state, - crtc_state, - conn_state, - &num_out_bus_fmts); - if (!num_out_bus_fmts) - return -ENOTSUPP; - else if (!out_bus_fmts) - return -ENOMEM; - } else { - num_out_bus_fmts = 1; - out_bus_fmts = kmalloc(sizeof(*out_bus_fmts), GFP_KERNEL); - if (!out_bus_fmts) - return -ENOMEM; - - if (conn->display_info.num_bus_formats && - conn->display_info.bus_formats) - out_bus_fmts[0] = conn->display_info.bus_formats[0]; - else - out_bus_fmts[0] = MEDIA_BUS_FMT_FIXED; - } - - for (i = 0; i < num_out_bus_fmts; i++) { - ret = select_bus_fmt_recursive(bridge, last_bridge, crtc_state, - conn_state, out_bus_fmts[i]); - if (ret != -ENOTSUPP) - break; - } - - kfree(out_bus_fmts); - - return ret; -} - -static void -drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge, - struct drm_connector *conn, - struct drm_atomic_state *state) -{ - struct drm_bridge_state *bridge_state, *next_bridge_state; - struct drm_bridge *next_bridge; - u32 output_flags; - - bridge_state = drm_atomic_get_new_bridge_state(state, bridge); - next_bridge = drm_bridge_get_next_bridge(bridge); - - /* - * Let's try to apply the most common case here, that is, propagate - * display_info flags for the last bridge, and propagate the input - * flags of the next bridge element to the output end of the current - * bridge when the bridge is not the last one. - * There are exceptions to this rule, like when signal inversion is - * happening at the board level, but that's something drivers can deal - * with from their &drm_bridge_funcs.atomic_check() implementation by - * simply overriding the flags value we've set here. - */ - if (!next_bridge) { - output_flags = conn->display_info.bus_flags; - } else { - next_bridge_state = drm_atomic_get_new_bridge_state(state, - next_bridge); - output_flags = next_bridge_state->input_bus_cfg.flags; - } - - bridge_state->output_bus_cfg.flags = output_flags; - - /* - * Propage the output flags to the input end of the bridge. Again, it's - * not necessarily what all bridges want, but that's what most of them - * do, and by doing that by default we avoid forcing drivers to - * duplicate the "dummy propagation" logic. - */ - bridge_state->input_bus_cfg.flags = output_flags; -} - -/** - * drm_atomic_bridge_chain_check() - Do an atomic check on the bridge chain - * @bridge: bridge control structure - * @crtc_state: new CRTC state - * @conn_state: new connector state - * - * First trigger a bus format negotiation before calling - * &drm_bridge_funcs.atomic_check() (falls back on - * &drm_bridge_funcs.mode_fixup()) op for all the bridges in the encoder chain, - * starting from the last bridge to the first. These are called before calling - * &drm_encoder_helper_funcs.atomic_check() - * - * RETURNS: - * 0 on success, a negative error code on failure - */ -int drm_atomic_bridge_chain_check(struct drm_bridge *bridge, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state) -{ - struct drm_connector *conn = conn_state->connector; - struct drm_encoder *encoder = bridge->encoder; - struct drm_bridge *iter; - int ret; - - ret = drm_atomic_bridge_chain_select_bus_fmts(bridge, crtc_state, - conn_state); - if (ret) - return ret; - - list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) { - int ret; - - /* - * Bus flags are propagated by default. If a bridge needs to - * tweak the input bus flags for any reason, it should happen - * in its &drm_bridge_funcs.atomic_check() implementation such - * that preceding bridges in the chain can propagate the new - * bus flags. - */ - drm_atomic_bridge_propagate_bus_flags(iter, conn, - crtc_state->state); - - ret = drm_atomic_bridge_check(iter, crtc_state, conn_state); - if (ret) - return ret; - - if (iter == bridge) - break; - } - - return 0; -} -EXPORT_SYMBOL(drm_atomic_bridge_chain_check); - -/** - * __drm_atomic_helper_bridge_reset() - Initialize a bridge state to its - * default - * @bridge: the bridge this state is refers to - * @state: bridge state to initialize - * - * Initialize the bridge state to default values. This is meant to be* called - * by the bridge &drm_plane_funcs.reset hook for bridges that subclass the - * bridge state. - */ -void __drm_atomic_helper_bridge_reset(struct drm_bridge *bridge, - struct drm_bridge_state *state) -{ - memset(state, 0, sizeof(*state)); - state->bridge = bridge; -} -EXPORT_SYMBOL(__drm_atomic_helper_bridge_reset); - -/** - * __drm_atomic_helper_bridge_duplicate_state() - Copy atomic bridge state - * @bridge: bridge object - * @state: atomic bridge state - * - * Copies atomic state from a bridge's current state and resets inferred values. - * This is useful for drivers that subclass the bridge state. - */ -void __drm_atomic_helper_bridge_duplicate_state(struct drm_bridge *bridge, - struct drm_bridge_state *state) -{ - __drm_atomic_helper_private_obj_duplicate_state(&bridge->base, - &state->base); - state->bridge = bridge; -} -EXPORT_SYMBOL(__drm_atomic_helper_bridge_duplicate_state); - #ifdef CONFIG_OF /** * of_drm_find_bridge - find the bridge corresponding to the device node in diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c index ca3c55c6b81550270c4953ecac2d3005f7741ad7..e22b812c4b802c0e6610498b36aa30bf99efb54b 100644 --- a/drivers/gpu/drm/drm_debugfs_crc.c +++ b/drivers/gpu/drm/drm_debugfs_crc.c @@ -140,8 +140,8 @@ static ssize_t crc_control_write(struct file *file, const char __user *ubuf, if (IS_ERR(source)) return PTR_ERR(source); - if (source[len] == '\n') - source[len] = '\0'; + if (source[len - 1] == '\n') + source[len - 1] = '\0'; ret = crtc->funcs->verify_crc_source(crtc, source, &values_cnt); if (ret) @@ -258,6 +258,11 @@ static int crtc_crc_release(struct inode *inode, struct file *filep) struct drm_crtc *crtc = filep->f_inode->i_private; struct drm_crtc_crc *crc = &crtc->crc; + /* terminate the infinite while loop if 'drm_dp_aux_crc_work' running */ + spin_lock_irq(&crc->lock); + crc->opened = false; + spin_unlock_irq(&crc->lock); + crtc->funcs->set_crc_source(crtc, NULL); spin_lock_irq(&crc->lock); diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c index 0cfb386754c373325146ba7bd410c51fa5cf2231..2510717d5a08fa148af971cd23d66c6baa4def80 100644 --- a/drivers/gpu/drm/drm_dp_aux_dev.c +++ b/drivers/gpu/drm/drm_dp_aux_dev.c @@ -163,11 +163,7 @@ static ssize_t auxdev_read_iter(struct kiocb *iocb, struct iov_iter *to) break; } - if (aux_dev->aux->is_remote) - res = drm_dp_mst_dpcd_read(aux_dev->aux, pos, buf, - todo); - else - res = drm_dp_dpcd_read(aux_dev->aux, pos, buf, todo); + res = drm_dp_dpcd_read(aux_dev->aux, pos, buf, todo); if (res <= 0) break; @@ -215,11 +211,7 @@ static ssize_t auxdev_write_iter(struct kiocb *iocb, struct iov_iter *from) break; } - if (aux_dev->aux->is_remote) - res = drm_dp_mst_dpcd_write(aux_dev->aux, pos, buf, - todo); - else - res = drm_dp_dpcd_write(aux_dev->aux, pos, buf, todo); + res = drm_dp_dpcd_write(aux_dev->aux, pos, buf, todo); if (res <= 0) break; diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 2c7870aef4695fe4f4eca51572faf88474d9ddcc..a5364b5192b8366907bf39438bd059ce474c27ce 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -32,6 +32,7 @@ #include #include #include +#include #include "drm_crtc_helper_internal.h" @@ -266,7 +267,7 @@ unlock: /** * drm_dp_dpcd_read() - read a series of bytes from the DPCD - * @aux: DisplayPort AUX channel + * @aux: DisplayPort AUX channel (SST or MST) * @offset: address of the (first) register to read * @buffer: buffer to store the register values * @size: number of bytes in @buffer @@ -295,13 +296,18 @@ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, * We just have to do it before any DPCD access and hope that the * monitor doesn't power down exactly after the throw away read. */ - ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, DP_DPCD_REV, buffer, - 1); - if (ret != 1) - goto out; + if (!aux->is_remote) { + ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, DP_DPCD_REV, + buffer, 1); + if (ret != 1) + goto out; + } - ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset, buffer, - size); + if (aux->is_remote) + ret = drm_dp_mst_dpcd_read(aux, offset, buffer, size); + else + ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset, + buffer, size); out: drm_dp_dump_access(aux, DP_AUX_NATIVE_READ, offset, buffer, ret); @@ -311,7 +317,7 @@ EXPORT_SYMBOL(drm_dp_dpcd_read); /** * drm_dp_dpcd_write() - write a series of bytes to the DPCD - * @aux: DisplayPort AUX channel + * @aux: DisplayPort AUX channel (SST or MST) * @offset: address of the (first) register to write * @buffer: buffer containing the values to write * @size: number of bytes in @buffer @@ -328,8 +334,12 @@ ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset, { int ret; - ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_WRITE, offset, buffer, - size); + if (aux->is_remote) + ret = drm_dp_mst_dpcd_write(aux, offset, buffer, size); + else + ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_WRITE, offset, + buffer, size); + drm_dp_dump_access(aux, DP_AUX_NATIVE_WRITE, offset, buffer, ret); return ret; } @@ -968,6 +978,19 @@ static void drm_dp_aux_crc_work(struct work_struct *work) } } +/** + * drm_dp_remote_aux_init() - minimally initialise a remote aux channel + * @aux: DisplayPort AUX channel + * + * Used for remote aux channel in general. Merely initialize the crc work + * struct. + */ +void drm_dp_remote_aux_init(struct drm_dp_aux *aux) +{ + INIT_WORK(&aux->crc_work, drm_dp_aux_crc_work); +} +EXPORT_SYMBOL(drm_dp_remote_aux_init); + /** * drm_dp_aux_init() - minimally initialise an aux channel * @aux: DisplayPort AUX channel @@ -1155,6 +1178,8 @@ static const struct dpcd_quirk dpcd_quirk_list[] = { { OUI(0x00, 0x10, 0xfa), DEVICE_ID_ANY, false, BIT(DP_DPCD_QUIRK_NO_PSR) }, /* CH7511 seems to leave SINK_COUNT zeroed */ { OUI(0x00, 0x00, 0x00), DEVICE_ID('C', 'H', '7', '5', '1', '1'), false, BIT(DP_DPCD_QUIRK_NO_SINK_COUNT) }, + /* Synaptics DP1.4 MST hubs can support DSC without virtual DPCD */ + { OUI(0x90, 0xCC, 0x24), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) }, }; #undef OUI diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index e68d23043973ccd7fe3dd51f15b53cdd70e7face..38bf111e5f9b21dfe96c8126e8bb3e9fc7c2a941 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -398,7 +398,7 @@ drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req, memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes); idx += req->u.i2c_read.transactions[i].num_bytes; - buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5; + buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4; buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf); idx++; } @@ -853,6 +853,7 @@ static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband { int idx = 1; repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf; + repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1; idx++; if (idx > raw->curlen) goto fail_len; @@ -1208,6 +1209,8 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb, txmsg->state == DRM_DP_SIDEBAND_TX_SENT) { mstb->tx_slots[txmsg->seqno] = NULL; } + mgr->is_waiting_for_dwn_reply = false; + } out: if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) { @@ -1217,6 +1220,7 @@ out: } mutex_unlock(&mgr->qlock); + drm_dp_mst_kick_tx(mgr); return ret; } @@ -2174,6 +2178,7 @@ drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) { mutex_lock(&mgr->lock); + port->parent->num_ports--; list_del(&port->next); mutex_unlock(&mgr->lock); drm_dp_mst_topology_put_port(port); @@ -2198,6 +2203,9 @@ drm_dp_mst_add_port(struct drm_device *dev, port->aux.dev = dev->dev; port->aux.is_remote = true; + /* initialize the MST downstream port's AUX crc work queue */ + drm_dp_remote_aux_init(&port->aux); + /* * Make sure the memory allocation for our parent branch stays * around until our own memory allocation is released @@ -2273,6 +2281,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, mutex_lock(&mgr->lock); drm_dp_mst_topology_get_port(port); list_add(&port->next, &mstb->ports); + mstb->num_ports++; mutex_unlock(&mgr->lock); } @@ -2336,7 +2345,7 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, { struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; struct drm_dp_mst_port *port; - int old_ddps, ret; + int old_ddps, old_input, ret, i; u8 new_pdt; bool dowork = false, create_connector = false; @@ -2367,6 +2376,7 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, } old_ddps = port->ddps; + old_input = port->input; port->input = conn_stat->input_port; port->mcs = conn_stat->message_capability_status; port->ldps = conn_stat->legacy_device_plug_status; @@ -2391,6 +2401,28 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, dowork = false; } + if (!old_input && old_ddps != port->ddps && !port->ddps) { + for (i = 0; i < mgr->max_payloads; i++) { + struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i]; + struct drm_dp_mst_port *port_validated; + + if (!vcpi) + continue; + + port_validated = + container_of(vcpi, struct drm_dp_mst_port, vcpi); + port_validated = + drm_dp_mst_topology_get_port_validated(mgr, port_validated); + if (!port_validated) { + mutex_lock(&mgr->payload_lock); + vcpi->num_slots = 0; + mutex_unlock(&mgr->payload_lock); + } else { + drm_dp_mst_topology_put_port(port_validated); + } + } + } + if (port->connector) drm_modeset_unlock(&mgr->base.lock); else if (create_connector) @@ -2753,9 +2785,11 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) ret = process_single_tx_qlock(mgr, txmsg, false); if (ret == 1) { /* txmsg is sent it should be in the slots now */ + mgr->is_waiting_for_dwn_reply = true; list_del(&txmsg->next); } else if (ret) { DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); + mgr->is_waiting_for_dwn_reply = false; list_del(&txmsg->next); if (txmsg->seqno != -1) txmsg->dst->tx_slots[txmsg->seqno] = NULL; @@ -2795,7 +2829,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, drm_dp_mst_dump_sideband_msg_tx(&p, txmsg); } - if (list_is_singular(&mgr->tx_msg_downq)) + if (list_is_singular(&mgr->tx_msg_downq) && + !mgr->is_waiting_for_dwn_reply) process_single_down_tx_qlock(mgr); mutex_unlock(&mgr->qlock); } @@ -2951,6 +2986,7 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, path_res->avail_payload_bw_number); port->available_pbn = path_res->avail_payload_bw_number; + port->fec_capable = path_res->fec_capable; } } @@ -3749,6 +3785,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) mutex_lock(&mgr->qlock); txmsg->state = DRM_DP_SIDEBAND_TX_RX; mstb->tx_slots[slot] = NULL; + mgr->is_waiting_for_dwn_reply = false; mutex_unlock(&mgr->qlock); wake_up_all(&mgr->tx_waitq); @@ -3758,6 +3795,9 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) no_msg: drm_dp_mst_topology_put_mstb(mstb); clear_down_rep_recv: + mutex_lock(&mgr->qlock); + mgr->is_waiting_for_dwn_reply = false; + mutex_unlock(&mgr->qlock); memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); return 0; @@ -4089,6 +4129,7 @@ static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr, * @mgr: MST topology manager for the port * @port: port to find vcpi slots for * @pbn: bandwidth required for the mode in PBN + * @pbn_div: divider for DSC mode that takes FEC into account * * Allocates VCPI slots to @port, replacing any previous VCPI allocations it * may have had. Any atomic drivers which support MST must call this function @@ -4115,11 +4156,12 @@ static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr, */ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_port *port, int pbn) + struct drm_dp_mst_port *port, int pbn, + int pbn_div) { struct drm_dp_mst_topology_state *topology_state; struct drm_dp_vcpi_allocation *pos, *vcpi = NULL; - int prev_slots, req_slots; + int prev_slots, prev_bw, req_slots; topology_state = drm_atomic_get_mst_topology_state(state, mgr); if (IS_ERR(topology_state)) @@ -4130,6 +4172,7 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, if (pos->port == port) { vcpi = pos; prev_slots = vcpi->vcpi; + prev_bw = vcpi->pbn; /* * This should never happen, unless the driver tries @@ -4145,14 +4188,22 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, break; } } - if (!vcpi) + if (!vcpi) { prev_slots = 0; + prev_bw = 0; + } - req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div); + if (pbn_div <= 0) + pbn_div = mgr->pbn_div; + + req_slots = DIV_ROUND_UP(pbn, pbn_div); DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n", port->connector->base.id, port->connector->name, port, prev_slots, req_slots); + DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n", + port->connector->base.id, port->connector->name, + port, prev_bw, pbn); /* Add the new allocation to the state */ if (!vcpi) { @@ -4165,6 +4216,7 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, list_add(&vcpi->next, &topology_state->vcpis); } vcpi->vcpi = req_slots; + vcpi->pbn = pbn; return req_slots; } @@ -4415,10 +4467,11 @@ EXPORT_SYMBOL(drm_dp_check_act_status); * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode. * @clock: dot clock for the mode * @bpp: bpp for the mode. + * @dsc: DSC mode. If true, bpp has units of 1/16 of a bit per pixel * * This uses the formula in the spec to calculate the PBN value for a mode. */ -int drm_dp_calc_pbn_mode(int clock, int bpp) +int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc) { /* * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006 @@ -4429,7 +4482,16 @@ int drm_dp_calc_pbn_mode(int clock, int bpp) * peak_kbps *= (1006/1000) * peak_kbps *= (64/54) * peak_kbps *= 8 convert to bytes + * + * If the bpp is in units of 1/16, further divide by 16. Put this + * factor in the numerator rather than the denominator to avoid + * integer overflow */ + + if (dsc) + return DIV_ROUND_UP_ULL(mul_u32_u32(clock * (bpp / 16), 64 * 1006), + 8 * 54 * 1000 * 1000); + return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006), 8 * 54 * 1000 * 1000); } @@ -4568,7 +4630,7 @@ static void drm_dp_tx_work(struct work_struct *work) struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work); mutex_lock(&mgr->qlock); - if (!list_empty(&mgr->tx_msg_downq)) + if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply) process_single_down_tx_qlock(mgr); mutex_unlock(&mgr->qlock); } @@ -4731,9 +4793,61 @@ static void drm_dp_mst_destroy_state(struct drm_private_obj *obj, kfree(mst_state); } +static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port, + struct drm_dp_mst_branch *branch) +{ + while (port->parent) { + if (port->parent == branch) + return true; + + if (port->parent->port_parent) + port = port->parent->port_parent; + else + break; + } + return false; +} + +static inline +int drm_dp_mst_atomic_check_bw_limit(struct drm_dp_mst_branch *branch, + struct drm_dp_mst_topology_state *mst_state) +{ + struct drm_dp_mst_port *port; + struct drm_dp_vcpi_allocation *vcpi; + int pbn_limit = 0, pbn_used = 0; + + list_for_each_entry(port, &branch->ports, next) { + if (port->mstb) + if (drm_dp_mst_atomic_check_bw_limit(port->mstb, mst_state)) + return -ENOSPC; + + if (port->available_pbn > 0) + pbn_limit = port->available_pbn; + } + DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch has %d PBN available\n", + branch, pbn_limit); + + list_for_each_entry(vcpi, &mst_state->vcpis, next) { + if (!vcpi->pbn) + continue; + + if (drm_dp_mst_port_downstream_of_branch(vcpi->port, branch)) + pbn_used += vcpi->pbn; + } + DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch used %d PBN\n", + branch, pbn_used); + + if (pbn_used > pbn_limit) { + DRM_DEBUG_ATOMIC("[MST BRANCH:%p] No available bandwidth\n", + branch); + return -ENOSPC; + } + return 0; +} + static inline int -drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_topology_state *mst_state) +drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_topology_state *mst_state) { struct drm_dp_vcpi_allocation *vcpi; int avail_slots = 63, payload_count = 0; @@ -4770,6 +4884,128 @@ drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr, return 0; } +/** + * drm_dp_mst_add_affected_dsc_crtcs + * @state: Pointer to the new struct drm_dp_mst_topology_state + * @mgr: MST topology manager + * + * Whenever there is a change in mst topology + * DSC configuration would have to be recalculated + * therefore we need to trigger modeset on all affected + * CRTCs in that topology + * + * See also: + * drm_dp_mst_atomic_enable_dsc() + */ +int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr) +{ + struct drm_dp_mst_topology_state *mst_state; + struct drm_dp_vcpi_allocation *pos; + struct drm_connector *connector; + struct drm_connector_state *conn_state; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + + mst_state = drm_atomic_get_mst_topology_state(state, mgr); + + if (IS_ERR(mst_state)) + return -EINVAL; + + list_for_each_entry(pos, &mst_state->vcpis, next) { + + connector = pos->port->connector; + + if (!connector) + return -EINVAL; + + conn_state = drm_atomic_get_connector_state(state, connector); + + if (IS_ERR(conn_state)) + return PTR_ERR(conn_state); + + crtc = conn_state->crtc; + + if (WARN_ON(!crtc)) + return -EINVAL; + + if (!drm_dp_mst_dsc_aux_for_port(pos->port)) + continue; + + crtc_state = drm_atomic_get_crtc_state(mst_state->base.state, crtc); + + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + DRM_DEBUG_ATOMIC("[MST MGR:%p] Setting mode_changed flag on CRTC %p\n", + mgr, crtc); + + crtc_state->mode_changed = true; + } + return 0; +} +EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs); + +/** + * drm_dp_mst_atomic_enable_dsc - Set DSC Enable Flag to On/Off + * @state: Pointer to the new drm_atomic_state + * @port: Pointer to the affected MST Port + * @pbn: Newly recalculated bw required for link with DSC enabled + * @pbn_div: Divider to calculate correct number of pbn per slot + * @enable: Boolean flag to enable or disable DSC on the port + * + * This function enables DSC on the given Port + * by recalculating its vcpi from pbn provided + * and sets dsc_enable flag to keep track of which + * ports have DSC enabled + * + */ +int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state, + struct drm_dp_mst_port *port, + int pbn, int pbn_div, + bool enable) +{ + struct drm_dp_mst_topology_state *mst_state; + struct drm_dp_vcpi_allocation *pos; + bool found = false; + int vcpi = 0; + + mst_state = drm_atomic_get_mst_topology_state(state, port->mgr); + + if (IS_ERR(mst_state)) + return PTR_ERR(mst_state); + + list_for_each_entry(pos, &mst_state->vcpis, next) { + if (pos->port == port) { + found = true; + break; + } + } + + if (!found) { + DRM_DEBUG_ATOMIC("[MST PORT:%p] Couldn't find VCPI allocation in mst state %p\n", + port, mst_state); + return -EINVAL; + } + + if (pos->dsc_enabled == enable) { + DRM_DEBUG_ATOMIC("[MST PORT:%p] DSC flag is already set to %d, returning %d VCPI slots\n", + port, enable, pos->vcpi); + vcpi = pos->vcpi; + } + + if (enable) { + vcpi = drm_dp_atomic_find_vcpi_slots(state, port->mgr, port, pbn, pbn_div); + DRM_DEBUG_ATOMIC("[MST PORT:%p] Enabling DSC flag, reallocating %d VCPI slots on the port\n", + port, vcpi); + if (vcpi < 0) + return -EINVAL; + } + + pos->dsc_enabled = enable; + + return vcpi; +} +EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc); /** * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an * atomic update is valid @@ -4798,7 +5034,13 @@ int drm_dp_mst_atomic_check(struct drm_atomic_state *state) int i, ret = 0; for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) { - ret = drm_dp_mst_atomic_check_topology_state(mgr, mst_state); + if (!mgr->mst_state) + continue; + + ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state); + if (ret) + break; + ret = drm_dp_mst_atomic_check_bw_limit(mgr->mst_primary, mst_state); if (ret) break; } @@ -5062,3 +5304,173 @@ static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux) { i2c_del_adapter(&aux->ddc); } + +/** + * drm_dp_mst_is_virtual_dpcd() - Is the given port a virtual DP Peer Device + * @port: The port to check + * + * A single physical MST hub object can be represented in the topology + * by multiple branches, with virtual ports between those branches. + * + * As of DP1.4, An MST hub with internal (virtual) ports must expose + * certain DPCD registers over those ports. See sections 2.6.1.1.1 + * and 2.6.1.1.2 of Display Port specification v1.4 for details. + * + * May acquire mgr->lock + * + * Returns: + * true if the port is a virtual DP peer device, false otherwise + */ +static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port) +{ + struct drm_dp_mst_port *downstream_port; + + if (!port || port->dpcd_rev < DP_DPCD_REV_14) + return false; + + /* Virtual DP Sink (Internal Display Panel) */ + if (port->port_num >= 8) + return true; + + /* DP-to-HDMI Protocol Converter */ + if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV && + !port->mcs && + port->ldps) + return true; + + /* DP-to-DP */ + mutex_lock(&port->mgr->lock); + if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING && + port->mstb && + port->mstb->num_ports == 2) { + list_for_each_entry(downstream_port, &port->mstb->ports, next) { + if (downstream_port->pdt == DP_PEER_DEVICE_SST_SINK && + !downstream_port->input) { + mutex_unlock(&port->mgr->lock); + return true; + } + } + } + mutex_unlock(&port->mgr->lock); + + return false; +} + +/** + * drm_dp_mst_dsc_aux_for_port() - Find the correct aux for DSC + * @port: The port to check. A leaf of the MST tree with an attached display. + * + * Depending on the situation, DSC may be enabled via the endpoint aux, + * the immediately upstream aux, or the connector's physical aux. + * + * This is both the correct aux to read DSC_CAPABILITY and the + * correct aux to write DSC_ENABLED. + * + * This operation can be expensive (up to four aux reads), so + * the caller should cache the return. + * + * Returns: + * NULL if DSC cannot be enabled on this port, otherwise the aux device + */ +struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port) +{ + struct drm_dp_mst_port *immediate_upstream_port; + struct drm_dp_mst_port *fec_port; + struct drm_dp_desc desc = { 0 }; + u8 endpoint_fec; + u8 endpoint_dsc; + + if (!port) + return NULL; + + if (port->parent->port_parent) + immediate_upstream_port = port->parent->port_parent; + else + immediate_upstream_port = NULL; + + fec_port = immediate_upstream_port; + while (fec_port) { + /* + * Each physical link (i.e. not a virtual port) between the + * output and the primary device must support FEC + */ + if (!drm_dp_mst_is_virtual_dpcd(fec_port) && + !fec_port->fec_capable) + return NULL; + + fec_port = fec_port->parent->port_parent; + } + + /* DP-to-DP peer device */ + if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) { + u8 upstream_dsc; + + if (drm_dp_dpcd_read(&port->aux, + DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1) + return NULL; + if (drm_dp_dpcd_read(&port->aux, + DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1) + return NULL; + if (drm_dp_dpcd_read(&immediate_upstream_port->aux, + DP_DSC_SUPPORT, &upstream_dsc, 1) != 1) + return NULL; + + /* Enpoint decompression with DP-to-DP peer device */ + if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) && + (endpoint_fec & DP_FEC_CAPABLE) && + (upstream_dsc & 0x2) /* DSC passthrough */) + return &port->aux; + + /* Virtual DPCD decompression with DP-to-DP peer device */ + return &immediate_upstream_port->aux; + } + + /* Virtual DPCD decompression with DP-to-HDMI or Virtual DP Sink */ + if (drm_dp_mst_is_virtual_dpcd(port)) + return &port->aux; + + /* + * Synaptics quirk + * Applies to ports for which: + * - Physical aux has Synaptics OUI + * - DPv1.4 or higher + * - Port is on primary branch device + * - Not a VGA adapter (DP_DWN_STRM_PORT_TYPE_ANALOG) + */ + if (drm_dp_read_desc(port->mgr->aux, &desc, true)) + return NULL; + + if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) && + port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 && + port->parent == port->mgr->mst_primary) { + u8 downstreamport; + + if (drm_dp_dpcd_read(&port->aux, DP_DOWNSTREAMPORT_PRESENT, + &downstreamport, 1) < 0) + return NULL; + + if ((downstreamport & DP_DWN_STRM_PORT_PRESENT) && + ((downstreamport & DP_DWN_STRM_PORT_TYPE_MASK) + != DP_DWN_STRM_PORT_TYPE_ANALOG)) + return port->mgr->aux; + } + + /* + * The check below verifies if the MST sink + * connected to the GPU is capable of DSC - + * therefore the endpoint needs to be + * both DSC and FEC capable. + */ + if (drm_dp_dpcd_read(&port->aux, + DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1) + return NULL; + if (drm_dp_dpcd_read(&port->aux, + DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1) + return NULL; + if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) && + (endpoint_fec & DP_FEC_CAPABLE)) + return &port->aux; + + return NULL; +} +EXPORT_SYMBOL(drm_dp_mst_dsc_aux_for_port); diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c index c0b0f603af633027149f8f4574a8a858752270e5..9801c0333eca29e937a1877525976bd572378547 100644 --- a/drivers/gpu/drm/drm_fb_cma_helper.c +++ b/drivers/gpu/drm/drm_fb_cma_helper.c @@ -9,6 +9,7 @@ * Copyright (C) 2012 Red Hat */ +#include #include #include #include diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 57f510687b85f0717bf70b88293be891e4eee353..4c7cbce7bae73dde0356a08d8f35e38b399c26e2 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1267,7 +1267,7 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var, * Changes struct fb_var_screeninfo are currently not pushed back * to KMS, hence fail if different settings are requested. */ - if (var->bits_per_pixel != fb->format->cpp[0] * 8 || + if (var->bits_per_pixel > fb->format->cpp[0] * 8 || var->xres > fb->width || var->yres > fb->height || var->xres_virtual > fb->width || var->yres_virtual > fb->height) { drm_dbg_kms(dev, "fb requested width/height/bpp can't fit in current fb " @@ -1292,6 +1292,11 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var, drm_fb_helper_fill_pixel_fmt(var, fb->format->depth); } + /* + * Likewise, bits_per_pixel should be rounded up to a supported value. + */ + var->bits_per_pixel = fb->format->cpp[0] * 8; + /* * drm fbdev emulation doesn't support changing the pixel format at all, * so reject all pixel format changing requests. diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c index 2e8ce99d0baaedbf9926aff1145736a8144c474e..2c79e8199e3cec4d62e390f4684d792f76f0b155 100644 --- a/drivers/gpu/drm/drm_lock.c +++ b/drivers/gpu/drm/drm_lock.c @@ -360,7 +360,8 @@ void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *m /* * Since the master is disappearing, so is the * possibility to lock. - */ mutex_lock(&dev->struct_mutex); + */ + mutex_lock(&dev->struct_mutex); if (master->lock.hw_lock) { if (dev->sigdata.lock == master->lock.hw_lock) dev->sigdata.lock = NULL; diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 2a4eb619d7ad0ab3a33ca784e14119a78dd77eff..10336b144c722b5aa03bca91d3d28685c009bf72 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -233,7 +233,7 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay, /* 3) Nominal HSync width (% of line period) - default 8 */ #define CVT_HSYNC_PERCENTAGE 8 unsigned int hblank_percentage; - int vsyncandback_porch, vback_porch, hblank; + int vsyncandback_porch, __maybe_unused vback_porch, hblank; /* estimated the horizontal period */ tmp1 = HV_FACTOR * 1000000 - @@ -386,9 +386,10 @@ drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay, int top_margin, bottom_margin; int interlace; unsigned int hfreq_est; - int vsync_plus_bp, vback_porch; - unsigned int vtotal_lines, vfieldrate_est, hperiod; - unsigned int vfield_rate, vframe_rate; + int vsync_plus_bp, __maybe_unused vback_porch; + unsigned int vtotal_lines, __maybe_unused vfieldrate_est; + unsigned int __maybe_unused hperiod; + unsigned int vfield_rate, __maybe_unused vframe_rate; int left_margin, right_margin; unsigned int total_active_pixels, ideal_duty_cycle; unsigned int hblank, total_pixels, pixel_freq; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 1f9c01be40d779b1620b8c55e8a778a19700a31c..76ecdf8bd31cf53fa887528d1cbf956118e27966 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -65,12 +65,13 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file) for (i = 0; i < ETNA_MAX_PIPES; i++) { struct etnaviv_gpu *gpu = priv->gpu[i]; - struct drm_sched_rq *rq; + struct drm_gpu_scheduler *sched; if (gpu) { - rq = &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; + sched = &gpu->sched; drm_sched_entity_init(&ctx->sched_entity[i], - &rq, 1, NULL); + DRM_SCHED_PRIORITY_NORMAL, &sched, + 1, NULL); } } diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index 6f7d3b3b3628d2229a62f44d59ae6e3e13e645a4..6417f374b923a454a4279753aee7e4fb3a91d64a 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -1,13 +1,13 @@ # SPDX-License-Identifier: GPL-2.0-only config DRM_EXYNOS - tristate "DRM Support for Samsung SoC EXYNOS Series" + tristate "DRM Support for Samsung SoC Exynos Series" depends on OF && DRM && (ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM || COMPILE_TEST) depends on MMU select DRM_KMS_HELPER select VIDEOMODE_HELPERS select SND_SOC_HDMI_CODEC if SND_SOC help - Choose this option if you have a Samsung SoC EXYNOS chipset. + Choose this option if you have a Samsung SoC Exynos chipset. If M is selected the module will be called exynosdrm. if DRM_EXYNOS @@ -62,7 +62,7 @@ config DRM_EXYNOS_DSI This enables support for Exynos MIPI-DSI device. config DRM_EXYNOS_DP - bool "EXYNOS specific extensions for Analogix DP driver" + bool "Exynos specific extensions for Analogix DP driver" depends on DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON select DRM_ANALOGIX_DP default DRM_EXYNOS diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index 2d5cbfda3ca79a7e087a69d742276b511aac6b06..8428ae12dfa5e8c4de938ac46c0689053d3609a5 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -510,7 +510,7 @@ static void decon_swreset(struct decon_context *ctx) ctx->addr + DECON_CRCCTRL); } -static void decon_enable(struct exynos_drm_crtc *crtc) +static void decon_atomic_enable(struct exynos_drm_crtc *crtc) { struct decon_context *ctx = crtc->ctx; @@ -523,7 +523,7 @@ static void decon_enable(struct exynos_drm_crtc *crtc) decon_commit(ctx->crtc); } -static void decon_disable(struct exynos_drm_crtc *crtc) +static void decon_atomic_disable(struct exynos_drm_crtc *crtc) { struct decon_context *ctx = crtc->ctx; int i; @@ -599,8 +599,8 @@ static enum drm_mode_status decon_mode_valid(struct exynos_drm_crtc *crtc, } static const struct exynos_drm_crtc_ops decon_crtc_ops = { - .enable = decon_enable, - .disable = decon_disable, + .atomic_enable = decon_atomic_enable, + .atomic_disable = decon_atomic_disable, .enable_vblank = decon_enable_vblank, .disable_vblank = decon_disable_vblank, .atomic_begin = decon_atomic_begin, @@ -651,7 +651,7 @@ static void decon_unbind(struct device *dev, struct device *master, void *data) { struct decon_context *ctx = dev_get_drvdata(dev); - decon_disable(ctx->crtc); + decon_atomic_disable(ctx->crtc); /* detach this sub driver from iommu mapping if supported. */ exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev); diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index f0640950bd465b385f8e6bea55194c02b8fd7dd8..ff59c641fa809224b96972928c3b6ae6a5fb3c21 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c @@ -526,7 +526,7 @@ static void decon_init(struct decon_context *ctx) writel(VIDCON1_VCLK_HOLD, ctx->regs + VIDCON1(0)); } -static void decon_enable(struct exynos_drm_crtc *crtc) +static void decon_atomic_enable(struct exynos_drm_crtc *crtc) { struct decon_context *ctx = crtc->ctx; @@ -546,7 +546,7 @@ static void decon_enable(struct exynos_drm_crtc *crtc) ctx->suspended = false; } -static void decon_disable(struct exynos_drm_crtc *crtc) +static void decon_atomic_disable(struct exynos_drm_crtc *crtc) { struct decon_context *ctx = crtc->ctx; int i; @@ -568,8 +568,8 @@ static void decon_disable(struct exynos_drm_crtc *crtc) } static const struct exynos_drm_crtc_ops decon_crtc_ops = { - .enable = decon_enable, - .disable = decon_disable, + .atomic_enable = decon_atomic_enable, + .atomic_disable = decon_atomic_disable, .enable_vblank = decon_enable_vblank, .disable_vblank = decon_disable_vblank, .atomic_begin = decon_atomic_begin, @@ -653,7 +653,7 @@ static void decon_unbind(struct device *dev, struct device *master, { struct decon_context *ctx = dev_get_drvdata(dev); - decon_disable(ctx->crtc); + decon_atomic_disable(ctx->crtc); if (ctx->encoder) exynos_dpi_remove(ctx->encoder); diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index 77ce78986408b95f9d36977cf5ea064300479e1b..1c03485676efa33e04376a42480a4a2e1a9aec88 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -23,8 +23,8 @@ static void exynos_drm_crtc_atomic_enable(struct drm_crtc *crtc, { struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); - if (exynos_crtc->ops->enable) - exynos_crtc->ops->enable(exynos_crtc); + if (exynos_crtc->ops->atomic_enable) + exynos_crtc->ops->atomic_enable(exynos_crtc); drm_crtc_vblank_on(crtc); } @@ -36,8 +36,8 @@ static void exynos_drm_crtc_atomic_disable(struct drm_crtc *crtc, drm_crtc_vblank_off(crtc); - if (exynos_crtc->ops->disable) - exynos_crtc->ops->disable(exynos_crtc); + if (exynos_crtc->ops->atomic_disable) + exynos_crtc->ops->atomic_disable(exynos_crtc); if (crtc->state->event && !crtc->state->active) { spin_lock_irq(&crtc->dev->event_lock); diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index d4014ba592fdc1485e70ead1b5f73f0bde3fc23a..d4d21d8cfb9060169ab86bda8ef49b879eaa5b4a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -118,8 +118,8 @@ struct exynos_drm_plane_config { /* * Exynos drm crtc ops * - * @enable: enable the device - * @disable: disable the device + * @atomic_enable: enable the device + * @atomic_disable: disable the device * @enable_vblank: specific driver callback for enabling vblank interrupt. * @disable_vblank: specific driver callback for disabling vblank interrupt. * @mode_valid: specific driver callback for mode validation @@ -133,8 +133,8 @@ struct exynos_drm_plane_config { */ struct exynos_drm_crtc; struct exynos_drm_crtc_ops { - void (*enable)(struct exynos_drm_crtc *crtc); - void (*disable)(struct exynos_drm_crtc *crtc); + void (*atomic_enable)(struct exynos_drm_crtc *crtc); + void (*atomic_disable)(struct exynos_drm_crtc *crtc); int (*enable_vblank)(struct exynos_drm_crtc *crtc); void (*disable_vblank)(struct exynos_drm_crtc *crtc); enum drm_mode_status (*mode_valid)(struct exynos_drm_crtc *crtc, diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 3955f84dc89356b093b0d9749c9fe2a04008347a..33628d85edad9102cf2bf220b7b78fc645752882 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -1378,6 +1378,7 @@ static void exynos_dsi_unregister_te_irq(struct exynos_dsi *dsi) static void exynos_dsi_enable(struct drm_encoder *encoder) { struct exynos_dsi *dsi = encoder_to_dsi(encoder); + struct drm_bridge *iter; int ret; if (dsi->state & DSIM_STATE_ENABLED) @@ -1391,7 +1392,11 @@ static void exynos_dsi_enable(struct drm_encoder *encoder) if (ret < 0) goto err_put_sync; } else { - drm_bridge_chain_pre_enable(dsi->out_bridge); + list_for_each_entry_reverse(iter, &dsi->bridge_chain, + chain_node) { + if (iter->funcs->pre_enable) + iter->funcs->pre_enable(iter); + } } exynos_dsi_set_display_mode(dsi); @@ -1402,7 +1407,10 @@ static void exynos_dsi_enable(struct drm_encoder *encoder) if (ret < 0) goto err_display_disable; } else { - drm_bridge_chain_enable(dsi->out_bridge); + list_for_each_entry(iter, &dsi->bridge_chain, chain_node) { + if (iter->funcs->enable) + iter->funcs->enable(iter); + } } dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE; @@ -1420,6 +1428,7 @@ err_put_sync: static void exynos_dsi_disable(struct drm_encoder *encoder) { struct exynos_dsi *dsi = encoder_to_dsi(encoder); + struct drm_bridge *iter; if (!(dsi->state & DSIM_STATE_ENABLED)) return; @@ -1427,10 +1436,20 @@ static void exynos_dsi_disable(struct drm_encoder *encoder) dsi->state &= ~DSIM_STATE_VIDOUT_AVAILABLE; drm_panel_disable(dsi->panel); - drm_bridge_chain_disable(dsi->out_bridge); + + list_for_each_entry_reverse(iter, &dsi->bridge_chain, chain_node) { + if (iter->funcs->disable) + iter->funcs->disable(iter); + } + exynos_dsi_set_display_enable(dsi, false); drm_panel_unprepare(dsi->panel); - drm_bridge_chain_post_disable(dsi->out_bridge); + + list_for_each_entry(iter, &dsi->bridge_chain, chain_node) { + if (iter->funcs->post_disable) + iter->funcs->post_disable(iter); + } + dsi->state &= ~DSIM_STATE_ENABLED; pm_runtime_put_sync(dsi->dev); } @@ -1523,7 +1542,7 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host, if (out_bridge) { drm_bridge_attach(encoder, out_bridge, NULL); dsi->out_bridge = out_bridge; - list_splice(&encoder->bridge_chain, &dsi->bridge_chain); + list_splice_init(&encoder->bridge_chain, &dsi->bridge_chain); } else { int ret = exynos_dsi_create_connector(encoder); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 8d0a929104e53b6d3ee810c7c48329f9984f7041..21aec38702fc2b731a1ae4a982ed15fced156175 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -894,7 +894,7 @@ static void fimd_disable_plane(struct exynos_drm_crtc *crtc, fimd_enable_shadow_channel_path(ctx, win, false); } -static void fimd_enable(struct exynos_drm_crtc *crtc) +static void fimd_atomic_enable(struct exynos_drm_crtc *crtc) { struct fimd_context *ctx = crtc->ctx; @@ -912,7 +912,7 @@ static void fimd_enable(struct exynos_drm_crtc *crtc) fimd_commit(ctx->crtc); } -static void fimd_disable(struct exynos_drm_crtc *crtc) +static void fimd_atomic_disable(struct exynos_drm_crtc *crtc) { struct fimd_context *ctx = crtc->ctx; int i; @@ -1006,8 +1006,8 @@ static void fimd_dp_clock_enable(struct exynos_drm_clk *clk, bool enable) } static const struct exynos_drm_crtc_ops fimd_crtc_ops = { - .enable = fimd_enable, - .disable = fimd_disable, + .atomic_enable = fimd_atomic_enable, + .atomic_disable = fimd_atomic_disable, .enable_vblank = fimd_enable_vblank, .disable_vblank = fimd_disable_vblank, .atomic_begin = fimd_atomic_begin, @@ -1098,7 +1098,7 @@ static void fimd_unbind(struct device *dev, struct device *master, { struct fimd_context *ctx = dev_get_drvdata(dev); - fimd_disable(ctx->crtc); + fimd_atomic_disable(ctx->crtc); exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev); diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 7ae087b0504df7c7bacc2a4a4be340e017bfaf80..88b6fcaa20be0976d4ff3f3009a75d548a6398f7 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -1313,6 +1313,7 @@ static int gsc_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; + component_del(dev, &gsc_component_ops); pm_runtime_dont_use_autosuspend(dev); pm_runtime_disable(dev); diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 65b891cb9c50b0f9ef570e8ab14407d9bd02babb..b320b3a21ad4f60e4efd373217861829cbdc363c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -120,7 +120,7 @@ static void vidi_update_plane(struct exynos_drm_crtc *crtc, DRM_DEV_DEBUG_KMS(ctx->dev, "dma_addr = %pad\n", &addr); } -static void vidi_enable(struct exynos_drm_crtc *crtc) +static void vidi_atomic_enable(struct exynos_drm_crtc *crtc) { struct vidi_context *ctx = crtc->ctx; @@ -133,7 +133,7 @@ static void vidi_enable(struct exynos_drm_crtc *crtc) drm_crtc_vblank_on(&crtc->base); } -static void vidi_disable(struct exynos_drm_crtc *crtc) +static void vidi_atomic_disable(struct exynos_drm_crtc *crtc) { struct vidi_context *ctx = crtc->ctx; @@ -147,8 +147,8 @@ static void vidi_disable(struct exynos_drm_crtc *crtc) } static const struct exynos_drm_crtc_ops vidi_crtc_ops = { - .enable = vidi_enable, - .disable = vidi_disable, + .atomic_enable = vidi_atomic_enable, + .atomic_disable = vidi_atomic_disable, .enable_vblank = vidi_enable_vblank, .disable_vblank = vidi_disable_vblank, .update_plane = vidi_update_plane, diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 6cfdb95fef2fa171ca34930265c416efbccec53c..38ae9c32feef594f398ab246582067604b9c29f7 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -986,7 +986,7 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc) exynos_crtc_handle_event(crtc); } -static void mixer_enable(struct exynos_drm_crtc *crtc) +static void mixer_atomic_enable(struct exynos_drm_crtc *crtc) { struct mixer_context *ctx = crtc->ctx; @@ -1015,7 +1015,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc) set_bit(MXR_BIT_POWERED, &ctx->flags); } -static void mixer_disable(struct exynos_drm_crtc *crtc) +static void mixer_atomic_disable(struct exynos_drm_crtc *crtc) { struct mixer_context *ctx = crtc->ctx; int i; @@ -1109,8 +1109,8 @@ static bool mixer_mode_fixup(struct exynos_drm_crtc *crtc, } static const struct exynos_drm_crtc_ops mixer_crtc_ops = { - .enable = mixer_enable, - .disable = mixer_disable, + .atomic_enable = mixer_atomic_enable, + .atomic_disable = mixer_atomic_disable, .enable_vblank = mixer_enable_vblank, .disable_vblank = mixer_disable_vblank, .atomic_begin = mixer_atomic_begin, diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c index 40a37e400b023fcc6d4398ae8a37418a9bb6b1e2..91f90016dba9dcafd07ec0bdbf473a5853ab5fd7 100644 --- a/drivers/gpu/drm/gma500/psb_irq.c +++ b/drivers/gpu/drm/gma500/psb_irq.c @@ -470,12 +470,11 @@ void psb_irq_turn_off_dpst(struct drm_device *dev) { struct drm_psb_private *dev_priv = (struct drm_psb_private *) dev->dev_private; - u32 hist_reg; u32 pwm_reg; if (gma_power_begin(dev, false)) { PSB_WVDC32(0x00000000, HISTOGRAM_INT_CONTROL); - hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL); + PSB_RVDC32(HISTOGRAM_INT_CONTROL); psb_disable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE); diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index ffa2aa2222bfe5cdb218c420af63d4c0a06e9dd7..b8aee506d595ca7ba2cd0ecfd60e100643f800d0 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -61,10 +61,11 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, crtc_state->pipe_bpp = bpp; crtc_state->pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, - crtc_state->pipe_bpp); + crtc_state->pipe_bpp, + false); slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr, - port, crtc_state->pbn); + port, crtc_state->pbn, 0); if (slots == -EDEADLK) return slots; if (slots >= 0) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 9c495a4785ecfef612d14ac3c7448d415deba24b..b5baaea2c5359e573067555c04bf46aa67c36b62 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -3177,6 +3177,14 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */ batch = gen8_emit_flush_coherentl3_wa(engine, batch); + /* WaClearSlmSpaceAtContextSwitch:skl,bxt,kbl,glk,cfl */ + batch = gen8_emit_pipe_control(batch, + PIPE_CONTROL_FLUSH_L3 | + PIPE_CONTROL_STORE_DATA_INDEX | + PIPE_CONTROL_CS_STALL | + PIPE_CONTROL_QW_WRITE, + LRC_PPHWSP_SCRATCH_ADDR); + batch = emit_lri(batch, lri, ARRAY_SIZE(lri)); /* WaMediaPoolStateCmdInWABB:bxt,glk */ diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index e451298d11c32adc8fedad9e86341ee308ccf8d8..2477a1e5a1669cf220ccb85b062885714c100d3d 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -36,13 +36,32 @@ #define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12)) +static int vgpu_pin_dma_address(struct intel_vgpu *vgpu, + unsigned long size, + dma_addr_t dma_addr) +{ + int ret = 0; + + if (intel_gvt_hypervisor_dma_pin_guest_page(vgpu, dma_addr)) + ret = -EINVAL; + + return ret; +} + +static void vgpu_unpin_dma_address(struct intel_vgpu *vgpu, + dma_addr_t dma_addr) +{ + intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, dma_addr); +} + static int vgpu_gem_get_pages( struct drm_i915_gem_object *obj) { struct drm_i915_private *dev_priv = to_i915(obj->base.dev); + struct intel_vgpu *vgpu; struct sg_table *st; struct scatterlist *sg; - int i, ret; + int i, j, ret; gen8_pte_t __iomem *gtt_entries; struct intel_vgpu_fb_info *fb_info; u32 page_num; @@ -51,6 +70,10 @@ static int vgpu_gem_get_pages( if (WARN_ON(!fb_info)) return -ENODEV; + vgpu = fb_info->obj->vgpu; + if (WARN_ON(!vgpu)) + return -ENODEV; + st = kmalloc(sizeof(*st), GFP_KERNEL); if (unlikely(!st)) return -ENOMEM; @@ -64,21 +87,53 @@ static int vgpu_gem_get_pages( gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + (fb_info->start >> PAGE_SHIFT); for_each_sg(st->sgl, sg, page_num, i) { + dma_addr_t dma_addr = + GEN8_DECODE_PTE(readq(>t_entries[i])); + if (vgpu_pin_dma_address(vgpu, PAGE_SIZE, dma_addr)) { + ret = -EINVAL; + goto out; + } + sg->offset = 0; sg->length = PAGE_SIZE; - sg_dma_address(sg) = - GEN8_DECODE_PTE(readq(>t_entries[i])); sg_dma_len(sg) = PAGE_SIZE; + sg_dma_address(sg) = dma_addr; } __i915_gem_object_set_pages(obj, st, PAGE_SIZE); +out: + if (ret) { + dma_addr_t dma_addr; + + for_each_sg(st->sgl, sg, i, j) { + dma_addr = sg_dma_address(sg); + if (dma_addr) + vgpu_unpin_dma_address(vgpu, dma_addr); + } + sg_free_table(st); + kfree(st); + } + + return ret; - return 0; } static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) { + struct scatterlist *sg; + + if (obj->base.dma_buf) { + struct intel_vgpu_fb_info *fb_info = obj->gvt_info; + struct intel_vgpu_dmabuf_obj *obj = fb_info->obj; + struct intel_vgpu *vgpu = obj->vgpu; + int i; + + for_each_sg(pages->sgl, sg, fb_info->size, i) + vgpu_unpin_dma_address(vgpu, + sg_dma_address(sg)); + } + sg_free_table(pages); kfree(pages); } @@ -163,6 +218,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev, drm_gem_private_object_init(dev, &obj->base, roundup(info->size, PAGE_SIZE)); i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class); + i915_gem_object_set_readonly(obj); obj->read_domains = I915_GEM_DOMAIN_GTT; obj->write_domain = 0; diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index fa411ee08a9d40220bc85043581cdd462c95943d..ae6700dc9d73e2c61d4aeb21bee6c3b0085868f4 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -341,6 +341,10 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id); engine_mask |= BIT(VCS1); } + if (data & GEN9_GRDOM_GUC) { + gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id); + vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET; + } engine_mask &= INTEL_INFO(vgpu->gvt->dev_priv)->engine_mask; } @@ -1636,6 +1640,16 @@ static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu, return 0; } +static int guc_status_read(struct intel_vgpu *vgpu, + unsigned int offset, void *p_data, + unsigned int bytes) +{ + /* keep MIA_IN_RESET before clearing */ + read_vreg(vgpu, offset, p_data, bytes); + vgpu_vreg(vgpu, offset) &= ~GS_MIA_IN_RESET; + return 0; +} + static int mmio_read_from_hw(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { @@ -2672,6 +2686,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DH(EDP_PSR_IMR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write); MMIO_DH(EDP_PSR_IIR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write); + MMIO_DH(GUC_STATUS, D_ALL, guc_status_read, NULL); + return 0; } diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index 9599c0a762b23b6138f6ed77148e4a49734e8305..b17c4a1599cd57872191084a9fa7d9c3140d326b 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -66,6 +66,8 @@ struct intel_gvt_mpt { unsigned long size, dma_addr_t *dma_addr); void (*dma_unmap_guest_page)(unsigned long handle, dma_addr_t dma_addr); + int (*dma_pin_guest_page)(unsigned long handle, dma_addr_t dma_addr); + int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn, unsigned long mfn, unsigned int nr, bool map); int (*set_trap_area)(unsigned long handle, u64 start, u64 end, diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 04a5a0d9082393de81200181074b9683ed6bce81..3259a1fa69e105a2c91bca299dcec8d063bdfdc0 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1916,6 +1916,28 @@ err_unlock: return ret; } +static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr) +{ + struct kvmgt_guest_info *info; + struct gvt_dma *entry; + int ret = 0; + + if (!handle_valid(handle)) + return -ENODEV; + + info = (struct kvmgt_guest_info *)handle; + + mutex_lock(&info->vgpu->vdev.cache_lock); + entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr); + if (entry) + kref_get(&entry->ref); + else + ret = -ENOMEM; + mutex_unlock(&info->vgpu->vdev.cache_lock); + + return ret; +} + static void __gvt_dma_release(struct kref *ref) { struct gvt_dma *entry = container_of(ref, typeof(*entry), ref); @@ -2027,6 +2049,7 @@ static struct intel_gvt_mpt kvmgt_mpt = { .gfn_to_mfn = kvmgt_gfn_to_pfn, .dma_map_guest_page = kvmgt_dma_map_guest_page, .dma_unmap_guest_page = kvmgt_dma_unmap_guest_page, + .dma_pin_guest_page = kvmgt_dma_pin_guest_page, .set_opregion = kvmgt_set_opregion, .set_edid = kvmgt_set_edid, .get_vfio_device = kvmgt_get_vfio_device, diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 0f944012812304c052ad035917ffa442bd381a24..9ad224df9c68bb2aedbb58fbc0c4d4387a95eada 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -254,6 +254,21 @@ static inline void intel_gvt_hypervisor_dma_unmap_guest_page( intel_gvt_host.mpt->dma_unmap_guest_page(vgpu->handle, dma_addr); } +/** + * intel_gvt_hypervisor_dma_pin_guest_page - pin guest dma buf + * @vgpu: a vGPU + * @dma_addr: guest dma addr + * + * Returns: + * 0 on success, negative error code if failed. + */ +static inline int +intel_gvt_hypervisor_dma_pin_guest_page(struct intel_vgpu *vgpu, + dma_addr_t dma_addr) +{ + return intel_gvt_host.mpt->dma_pin_guest_page(vgpu->handle, dma_addr); +} + /** * intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN * @vgpu: a vGPU diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index d5a6e4e3d0fd7a10f6685d5e8b0365c915eb12fa..85bd9bf4f6eee58b3c00e567a7c5286a017dccb9 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -212,9 +212,9 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt) */ void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu) { - mutex_lock(&vgpu->gvt->lock); + mutex_lock(&vgpu->vgpu_lock); vgpu->active = true; - mutex_unlock(&vgpu->gvt->lock); + mutex_unlock(&vgpu->vgpu_lock); } /** diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c index f522c5f997294ae0aaf70e39f2236ce493176dd1..b561dd05bd62df13db2a4396d7ab783ab6f2347f 100644 --- a/drivers/gpu/drm/lima/lima_sched.c +++ b/drivers/gpu/drm/lima/lima_sched.c @@ -159,9 +159,10 @@ int lima_sched_context_init(struct lima_sched_pipe *pipe, struct lima_sched_context *context, atomic_t *guilty) { - struct drm_sched_rq *rq = pipe->base.sched_rq + DRM_SCHED_PRIORITY_NORMAL; + struct drm_gpu_scheduler *sched = &pipe->base; - return drm_sched_entity_init(&context->base, &rq, 1, guilty); + return drm_sched_entity_init(&context->base, DRM_SCHED_PRIORITY_NORMAL, + &sched, 1, guilty); } void lima_sched_context_fini(struct lima_sched_pipe *pipe, @@ -255,13 +256,17 @@ static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job) return task->fence; } -static void lima_sched_handle_error_task(struct lima_sched_pipe *pipe, - struct lima_sched_task *task) +static void lima_sched_timedout_job(struct drm_sched_job *job) { + struct lima_sched_pipe *pipe = to_lima_pipe(job->sched); + struct lima_sched_task *task = to_lima_task(job); + + if (!pipe->error) + DRM_ERROR("lima job timeout\n"); + drm_sched_stop(&pipe->base, &task->base); - if (task) - drm_sched_increase_karma(&task->base); + drm_sched_increase_karma(&task->base); pipe->task_error(pipe); @@ -284,16 +289,6 @@ static void lima_sched_handle_error_task(struct lima_sched_pipe *pipe, drm_sched_start(&pipe->base, true); } -static void lima_sched_timedout_job(struct drm_sched_job *job) -{ - struct lima_sched_pipe *pipe = to_lima_pipe(job->sched); - struct lima_sched_task *task = to_lima_task(job); - - DRM_ERROR("lima job timeout\n"); - - lima_sched_handle_error_task(pipe, task); -} - static void lima_sched_free_job(struct drm_sched_job *job) { struct lima_sched_task *task = to_lima_task(job); @@ -318,15 +313,6 @@ static const struct drm_sched_backend_ops lima_sched_ops = { .free_job = lima_sched_free_job, }; -static void lima_sched_error_work(struct work_struct *work) -{ - struct lima_sched_pipe *pipe = - container_of(work, struct lima_sched_pipe, error_work); - struct lima_sched_task *task = pipe->current_task; - - lima_sched_handle_error_task(pipe, task); -} - int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name) { unsigned int timeout = lima_sched_timeout_ms > 0 ? @@ -335,8 +321,6 @@ int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name) pipe->fence_context = dma_fence_context_alloc(1); spin_lock_init(&pipe->fence_lock); - INIT_WORK(&pipe->error_work, lima_sched_error_work); - return drm_sched_init(&pipe->base, &lima_sched_ops, 1, 0, msecs_to_jiffies(timeout), name); } @@ -349,7 +333,7 @@ void lima_sched_pipe_fini(struct lima_sched_pipe *pipe) void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe) { if (pipe->error) - schedule_work(&pipe->error_work); + drm_sched_fault(&pipe->base); else { struct lima_sched_task *task = pipe->current_task; diff --git a/drivers/gpu/drm/lima/lima_sched.h b/drivers/gpu/drm/lima/lima_sched.h index 928af91c11184982147c046c5550c614d4a08654..1d814fecbcc0f6236ea616fe7a91c031c3708a8e 100644 --- a/drivers/gpu/drm/lima/lima_sched.h +++ b/drivers/gpu/drm/lima/lima_sched.h @@ -68,8 +68,6 @@ struct lima_sched_pipe { void (*task_fini)(struct lima_sched_pipe *pipe); void (*task_error)(struct lima_sched_pipe *pipe); void (*task_mmu_error)(struct lima_sched_pipe *pipe); - - struct work_struct error_work; }; int lima_sched_task_init(struct lima_sched_task *task, diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile index 5044dfb8e3d62609a9b87d82b501cca4f7b60375..b7a82ed5788f7d50786ea3b56582fc75407867f4 100644 --- a/drivers/gpu/drm/mediatek/Makefile +++ b/drivers/gpu/drm/mediatek/Makefile @@ -20,7 +20,7 @@ obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o mediatek-drm-hdmi-objs := mtk_cec.o \ mtk_hdmi.o \ mtk_hdmi_ddc.o \ - mtk_mt2701_hdmi_phy.o \ + mtk_mt2701_hdmi_phy.o \ mtk_mt8173_hdmi_phy.o \ mtk_hdmi_phy.o diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c index 59de2a46aa49a38b6c51f4b5ce3beb68a64ac713..6fb0d6983a4a507ccfb9ce5e8b9085410025f222 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_color.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c @@ -9,6 +9,7 @@ #include #include #include +#include #include "mtk_drm_crtc.h" #include "mtk_drm_ddp_comp.h" @@ -45,12 +46,12 @@ static inline struct mtk_disp_color *comp_to_color(struct mtk_ddp_comp *comp) static void mtk_color_config(struct mtk_ddp_comp *comp, unsigned int w, unsigned int h, unsigned int vrefresh, - unsigned int bpc) + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) { struct mtk_disp_color *color = comp_to_color(comp); - writel(w, comp->regs + DISP_COLOR_WIDTH(color)); - writel(h, comp->regs + DISP_COLOR_HEIGHT(color)); + mtk_ddp_write(cmdq_pkt, w, comp, DISP_COLOR_WIDTH(color)); + mtk_ddp_write(cmdq_pkt, h, comp, DISP_COLOR_HEIGHT(color)); } static void mtk_color_start(struct mtk_ddp_comp *comp) diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index 4a55bb6e221329eb4183b48d7c0720d72f977984..891d80c73e0452f2a39337c8287c76bb77d985d8 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "mtk_drm_crtc.h" #include "mtk_drm_ddp_comp.h" @@ -124,14 +125,15 @@ static void mtk_ovl_stop(struct mtk_ddp_comp *comp) static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w, unsigned int h, unsigned int vrefresh, - unsigned int bpc) + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) { if (w != 0 && h != 0) - writel_relaxed(h << 16 | w, comp->regs + DISP_REG_OVL_ROI_SIZE); - writel_relaxed(0x0, comp->regs + DISP_REG_OVL_ROI_BGCLR); + mtk_ddp_write_relaxed(cmdq_pkt, h << 16 | w, comp, + DISP_REG_OVL_ROI_SIZE); + mtk_ddp_write_relaxed(cmdq_pkt, 0x0, comp, DISP_REG_OVL_ROI_BGCLR); - writel(0x1, comp->regs + DISP_REG_OVL_RST); - writel(0x0, comp->regs + DISP_REG_OVL_RST); + mtk_ddp_write(cmdq_pkt, 0x1, comp, DISP_REG_OVL_RST); + mtk_ddp_write(cmdq_pkt, 0x0, comp, DISP_REG_OVL_RST); } static unsigned int mtk_ovl_layer_nr(struct mtk_ddp_comp *comp) @@ -175,16 +177,16 @@ static int mtk_ovl_layer_check(struct mtk_ddp_comp *comp, unsigned int idx, return 0; } -static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx) +static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx, + struct cmdq_pkt *cmdq_pkt) { - unsigned int reg; unsigned int gmc_thrshd_l; unsigned int gmc_thrshd_h; unsigned int gmc_value; struct mtk_disp_ovl *ovl = comp_to_ovl(comp); - writel(0x1, comp->regs + DISP_REG_OVL_RDMA_CTRL(idx)); - + mtk_ddp_write(cmdq_pkt, 0x1, comp, + DISP_REG_OVL_RDMA_CTRL(idx)); gmc_thrshd_l = GMC_THRESHOLD_LOW >> (GMC_THRESHOLD_BITS - ovl->data->gmc_bits); gmc_thrshd_h = GMC_THRESHOLD_HIGH >> @@ -194,22 +196,19 @@ static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx) else gmc_value = gmc_thrshd_l | gmc_thrshd_l << 8 | gmc_thrshd_h << 16 | gmc_thrshd_h << 24; - writel(gmc_value, comp->regs + DISP_REG_OVL_RDMA_GMC(idx)); - - reg = readl(comp->regs + DISP_REG_OVL_SRC_CON); - reg = reg | BIT(idx); - writel(reg, comp->regs + DISP_REG_OVL_SRC_CON); + mtk_ddp_write(cmdq_pkt, gmc_value, + comp, DISP_REG_OVL_RDMA_GMC(idx)); + mtk_ddp_write_mask(cmdq_pkt, BIT(idx), comp, + DISP_REG_OVL_SRC_CON, BIT(idx)); } -static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx) +static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx, + struct cmdq_pkt *cmdq_pkt) { - unsigned int reg; - - reg = readl(comp->regs + DISP_REG_OVL_SRC_CON); - reg = reg & ~BIT(idx); - writel(reg, comp->regs + DISP_REG_OVL_SRC_CON); - - writel(0x0, comp->regs + DISP_REG_OVL_RDMA_CTRL(idx)); + mtk_ddp_write_mask(cmdq_pkt, 0, comp, + DISP_REG_OVL_SRC_CON, BIT(idx)); + mtk_ddp_write(cmdq_pkt, 0, comp, + DISP_REG_OVL_RDMA_CTRL(idx)); } static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt) @@ -249,7 +248,8 @@ static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt) } static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, - struct mtk_plane_state *state) + struct mtk_plane_state *state, + struct cmdq_pkt *cmdq_pkt) { struct mtk_disp_ovl *ovl = comp_to_ovl(comp); struct mtk_plane_pending_state *pending = &state->pending; @@ -260,11 +260,13 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, unsigned int src_size = (pending->height << 16) | pending->width; unsigned int con; - if (!pending->enable) - mtk_ovl_layer_off(comp, idx); + if (!pending->enable) { + mtk_ovl_layer_off(comp, idx, cmdq_pkt); + return; + } con = ovl_fmt_convert(ovl, fmt); - if (idx != 0) + if (state->base.fb->format->has_alpha) con |= OVL_CON_AEN | OVL_CON_ALPHA; if (pending->rotation & DRM_MODE_REFLECT_Y) { @@ -277,14 +279,18 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, addr += pending->pitch - 1; } - writel_relaxed(con, comp->regs + DISP_REG_OVL_CON(idx)); - writel_relaxed(pitch, comp->regs + DISP_REG_OVL_PITCH(idx)); - writel_relaxed(src_size, comp->regs + DISP_REG_OVL_SRC_SIZE(idx)); - writel_relaxed(offset, comp->regs + DISP_REG_OVL_OFFSET(idx)); - writel_relaxed(addr, comp->regs + DISP_REG_OVL_ADDR(ovl, idx)); - - if (pending->enable) - mtk_ovl_layer_on(comp, idx); + mtk_ddp_write_relaxed(cmdq_pkt, con, comp, + DISP_REG_OVL_CON(idx)); + mtk_ddp_write_relaxed(cmdq_pkt, pitch, comp, + DISP_REG_OVL_PITCH(idx)); + mtk_ddp_write_relaxed(cmdq_pkt, src_size, comp, + DISP_REG_OVL_SRC_SIZE(idx)); + mtk_ddp_write_relaxed(cmdq_pkt, offset, comp, + DISP_REG_OVL_OFFSET(idx)); + mtk_ddp_write_relaxed(cmdq_pkt, addr, comp, + DISP_REG_OVL_ADDR(ovl, idx)); + + mtk_ovl_layer_on(comp, idx, cmdq_pkt); } static void mtk_ovl_bgclr_in_on(struct mtk_ddp_comp *comp) @@ -313,8 +319,6 @@ static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = { .disable_vblank = mtk_ovl_disable_vblank, .supported_rotations = mtk_ovl_supported_rotations, .layer_nr = mtk_ovl_layer_nr, - .layer_on = mtk_ovl_layer_on, - .layer_off = mtk_ovl_layer_off, .layer_check = mtk_ovl_layer_check, .layer_config = mtk_ovl_layer_config, .bgclr_in_on = mtk_ovl_bgclr_in_on, diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c index 405afef31407eebfb1b6e81e91ee827104b14e08..0cb848d642065ec67a18e2eaf8882f7f6fa90d9c 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c @@ -9,6 +9,7 @@ #include #include #include +#include #include "mtk_drm_crtc.h" #include "mtk_drm_ddp_comp.h" @@ -125,14 +126,16 @@ static void mtk_rdma_stop(struct mtk_ddp_comp *comp) static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width, unsigned int height, unsigned int vrefresh, - unsigned int bpc) + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) { unsigned int threshold; unsigned int reg; struct mtk_disp_rdma *rdma = comp_to_rdma(comp); - rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, 0xfff, width); - rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_1, 0xfffff, height); + mtk_ddp_write_mask(cmdq_pkt, width, comp, + DISP_REG_RDMA_SIZE_CON_0, 0xfff); + mtk_ddp_write_mask(cmdq_pkt, height, comp, + DISP_REG_RDMA_SIZE_CON_1, 0xfffff); /* * Enable FIFO underflow since DSI and DPI can't be blocked. @@ -144,7 +147,7 @@ static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width, reg = RDMA_FIFO_UNDERFLOW_EN | RDMA_FIFO_PSEUDO_SIZE(RDMA_FIFO_SIZE(rdma)) | RDMA_OUTPUT_VALID_FIFO_THRESHOLD(threshold); - writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON); + mtk_ddp_write(cmdq_pkt, reg, comp, DISP_REG_RDMA_FIFO_CON); } static unsigned int rdma_fmt_convert(struct mtk_disp_rdma *rdma, @@ -190,7 +193,8 @@ static unsigned int mtk_rdma_layer_nr(struct mtk_ddp_comp *comp) } static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, - struct mtk_plane_state *state) + struct mtk_plane_state *state, + struct cmdq_pkt *cmdq_pkt) { struct mtk_disp_rdma *rdma = comp_to_rdma(comp); struct mtk_plane_pending_state *pending = &state->pending; @@ -200,24 +204,27 @@ static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, unsigned int con; con = rdma_fmt_convert(rdma, fmt); - writel_relaxed(con, comp->regs + DISP_RDMA_MEM_CON); + mtk_ddp_write_relaxed(cmdq_pkt, con, comp, DISP_RDMA_MEM_CON); if (fmt == DRM_FORMAT_UYVY || fmt == DRM_FORMAT_YUYV) { - rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, - RDMA_MATRIX_ENABLE, RDMA_MATRIX_ENABLE); - rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, - RDMA_MATRIX_INT_MTX_SEL, - RDMA_MATRIX_INT_MTX_BT601_to_RGB); + mtk_ddp_write_mask(cmdq_pkt, RDMA_MATRIX_ENABLE, comp, + DISP_REG_RDMA_SIZE_CON_0, + RDMA_MATRIX_ENABLE); + mtk_ddp_write_mask(cmdq_pkt, RDMA_MATRIX_INT_MTX_BT601_to_RGB, + comp, DISP_REG_RDMA_SIZE_CON_0, + RDMA_MATRIX_INT_MTX_SEL); } else { - rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, - RDMA_MATRIX_ENABLE, 0); + mtk_ddp_write_mask(cmdq_pkt, 0, comp, + DISP_REG_RDMA_SIZE_CON_0, + RDMA_MATRIX_ENABLE); } + mtk_ddp_write_relaxed(cmdq_pkt, addr, comp, DISP_RDMA_MEM_START_ADDR); + mtk_ddp_write_relaxed(cmdq_pkt, pitch, comp, DISP_RDMA_MEM_SRC_PITCH); + mtk_ddp_write(cmdq_pkt, RDMA_MEM_GMC, comp, + DISP_RDMA_MEM_GMC_SETTING_0); + mtk_ddp_write_mask(cmdq_pkt, RDMA_MODE_MEMORY, comp, + DISP_REG_RDMA_GLOBAL_CON, RDMA_MODE_MEMORY); - writel_relaxed(addr, comp->regs + DISP_RDMA_MEM_START_ADDR); - writel_relaxed(pitch, comp->regs + DISP_RDMA_MEM_SRC_PITCH); - writel(RDMA_MEM_GMC, comp->regs + DISP_RDMA_MEM_GMC_SETTING_0); - rdma_update_bits(comp, DISP_REG_RDMA_GLOBAL_CON, - RDMA_MODE_MEMORY, RDMA_MODE_MEMORY); } static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = { diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index f80a8ba759770f82fa435cc97f5b63649477c52e..0dfcd1787e6519a3c6371938cb5ba8a761be1a9c 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -5,6 +5,7 @@ #include #include +#include #include #include @@ -42,11 +43,20 @@ struct mtk_drm_crtc { struct drm_plane *planes; unsigned int layer_nr; bool pending_planes; + bool pending_async_planes; + +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + struct cmdq_client *cmdq_client; + u32 cmdq_event; +#endif void __iomem *config_regs; struct mtk_disp_mutex *mutex; unsigned int ddp_comp_nr; struct mtk_ddp_comp **ddp_comp; + + /* lock for display hardware access */ + struct mutex hw_lock; }; struct mtk_crtc_state { @@ -215,11 +225,12 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc, struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); struct mtk_ddp_comp *comp; int i, count = 0; + unsigned int local_index = plane - mtk_crtc->planes; for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { comp = mtk_crtc->ddp_comp[i]; - if (plane->index < (count + mtk_ddp_comp_layer_nr(comp))) { - *local_layer = plane->index - count; + if (local_index < (count + mtk_ddp_comp_layer_nr(comp))) { + *local_layer = local_index - count; return comp; } count += mtk_ddp_comp_layer_nr(comp); @@ -229,6 +240,13 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc, return NULL; } +#if IS_REACHABLE(CONFIG_MTK_CMDQ) +static void ddp_cmdq_cb(struct cmdq_cb_data data) +{ + cmdq_pkt_destroy(data.data); +} +#endif + static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) { struct drm_crtc *crtc = &mtk_crtc->base; @@ -297,7 +315,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) if (i == 1) mtk_ddp_comp_bgclr_in_on(comp); - mtk_ddp_comp_config(comp, width, height, vrefresh, bpc); + mtk_ddp_comp_config(comp, width, height, vrefresh, bpc, NULL); mtk_ddp_comp_start(comp); } @@ -310,7 +328,9 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) plane_state = to_mtk_plane_state(plane->state); comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer); - mtk_ddp_comp_layer_config(comp, local_layer, plane_state); + if (comp) + mtk_ddp_comp_layer_config(comp, local_layer, + plane_state, NULL); } return 0; @@ -325,6 +345,7 @@ err_pm_runtime_put: static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc) { struct drm_device *drm = mtk_crtc->base.dev; + struct drm_crtc *crtc = &mtk_crtc->base; int i; DRM_DEBUG_DRIVER("%s\n", __func__); @@ -350,9 +371,17 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc) mtk_disp_mutex_unprepare(mtk_crtc->mutex); pm_runtime_put(drm->dev); + + if (crtc->state->event && !crtc->state->active) { + spin_lock_irq(&crtc->dev->event_lock); + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + spin_unlock_irq(&crtc->dev->event_lock); + } } -static void mtk_crtc_ddp_config(struct drm_crtc *crtc) +static void mtk_crtc_ddp_config(struct drm_crtc *crtc, + struct cmdq_pkt *cmdq_handle) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state); @@ -368,7 +397,8 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) if (state->pending_config) { mtk_ddp_comp_config(comp, state->pending_width, state->pending_height, - state->pending_vrefresh, 0); + state->pending_vrefresh, 0, + cmdq_handle); state->pending_config = false; } @@ -386,12 +416,84 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer); - mtk_ddp_comp_layer_config(comp, local_layer, - plane_state); + if (comp) + mtk_ddp_comp_layer_config(comp, local_layer, + plane_state, + cmdq_handle); plane_state->pending.config = false; } mtk_crtc->pending_planes = false; } + + if (mtk_crtc->pending_async_planes) { + for (i = 0; i < mtk_crtc->layer_nr; i++) { + struct drm_plane *plane = &mtk_crtc->planes[i]; + struct mtk_plane_state *plane_state; + + plane_state = to_mtk_plane_state(plane->state); + + if (!plane_state->pending.async_config) + continue; + + comp = mtk_drm_ddp_comp_for_plane(crtc, plane, + &local_layer); + + if (comp) + mtk_ddp_comp_layer_config(comp, local_layer, + plane_state, + cmdq_handle); + plane_state->pending.async_config = false; + } + mtk_crtc->pending_async_planes = false; + } +} + +static void mtk_drm_crtc_hw_config(struct mtk_drm_crtc *mtk_crtc) +{ +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + struct cmdq_pkt *cmdq_handle; +#endif + struct drm_crtc *crtc = &mtk_crtc->base; + struct mtk_drm_private *priv = crtc->dev->dev_private; + unsigned int pending_planes = 0, pending_async_planes = 0; + int i; + + mutex_lock(&mtk_crtc->hw_lock); + for (i = 0; i < mtk_crtc->layer_nr; i++) { + struct drm_plane *plane = &mtk_crtc->planes[i]; + struct mtk_plane_state *plane_state; + + plane_state = to_mtk_plane_state(plane->state); + if (plane_state->pending.dirty) { + plane_state->pending.config = true; + plane_state->pending.dirty = false; + pending_planes |= BIT(i); + } else if (plane_state->pending.async_dirty) { + plane_state->pending.async_config = true; + plane_state->pending.async_dirty = false; + pending_async_planes |= BIT(i); + } + } + if (pending_planes) + mtk_crtc->pending_planes = true; + if (pending_async_planes) + mtk_crtc->pending_async_planes = true; + + if (priv->data->shadow_register) { + mtk_disp_mutex_acquire(mtk_crtc->mutex); + mtk_crtc_ddp_config(crtc, NULL); + mtk_disp_mutex_release(mtk_crtc->mutex); + } +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + if (mtk_crtc->cmdq_client) { + cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE); + cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event); + cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event); + mtk_crtc_ddp_config(crtc, cmdq_handle); + cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle); + } +#endif + mutex_unlock(&mtk_crtc->hw_lock); } int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane, @@ -401,7 +503,23 @@ int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane, struct mtk_ddp_comp *comp; comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer); - return mtk_ddp_comp_layer_check(comp, local_layer, state); + if (comp) + return mtk_ddp_comp_layer_check(comp, local_layer, state); + return 0; +} + +void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane, + struct drm_plane_state *new_state) +{ + struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); + const struct drm_plane_helper_funcs *plane_helper_funcs = + plane->helper_private; + + if (!mtk_crtc->enabled) + return; + + plane_helper_funcs->atomic_update(plane, new_state); + mtk_drm_crtc_hw_config(mtk_crtc); } static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc, @@ -451,6 +569,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, } mtk_crtc->pending_planes = true; + mtk_drm_crtc_hw_config(mtk_crtc); /* Wait for planes to be disabled */ drm_crtc_wait_one_vblank(crtc); @@ -482,34 +601,16 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); - struct mtk_drm_private *priv = crtc->dev->dev_private; - unsigned int pending_planes = 0; int i; if (mtk_crtc->event) mtk_crtc->pending_needs_vblank = true; - for (i = 0; i < mtk_crtc->layer_nr; i++) { - struct drm_plane *plane = &mtk_crtc->planes[i]; - struct mtk_plane_state *plane_state; - - plane_state = to_mtk_plane_state(plane->state); - if (plane_state->pending.dirty) { - plane_state->pending.config = true; - plane_state->pending.dirty = false; - pending_planes |= BIT(i); - } - } - if (pending_planes) - mtk_crtc->pending_planes = true; if (crtc->state->color_mgmt_changed) - for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { mtk_ddp_gamma_set(mtk_crtc->ddp_comp[i], crtc->state); - - if (priv->data->shadow_register) { - mtk_disp_mutex_acquire(mtk_crtc->mutex); - mtk_crtc_ddp_config(crtc); - mtk_disp_mutex_release(mtk_crtc->mutex); - } + mtk_ddp_ctm_set(mtk_crtc->ddp_comp[i], crtc->state); + } + mtk_drm_crtc_hw_config(mtk_crtc); } static const struct drm_crtc_funcs mtk_crtc_funcs = { @@ -559,8 +660,12 @@ void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp) struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); struct mtk_drm_private *priv = crtc->dev->dev_private; +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + if (!priv->data->shadow_register && !mtk_crtc->cmdq_client) +#else if (!priv->data->shadow_register) - mtk_crtc_ddp_config(crtc); +#endif + mtk_crtc_ddp_config(crtc, NULL); mtk_drm_finish_page_flip(mtk_crtc); } @@ -627,6 +732,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, int pipe = priv->num_pipes; int ret; int i; + bool has_ctm = false; + uint gamma_lut_size = 0; if (!path) return 0; @@ -677,6 +784,14 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, } mtk_crtc->ddp_comp[i] = comp; + + if (comp->funcs) { + if (comp->funcs->gamma_set) + gamma_lut_size = MTK_LUT_SIZE; + + if (comp->funcs->ctm_set) + has_ctm = true; + } } for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) @@ -697,9 +812,28 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, NULL, pipe); if (ret < 0) return ret; - drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE); - drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, false, MTK_LUT_SIZE); - priv->num_pipes++; + if (gamma_lut_size) + drm_mode_crtc_set_gamma_size(&mtk_crtc->base, gamma_lut_size); + drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, has_ctm, gamma_lut_size); + priv->num_pipes++; + mutex_init(&mtk_crtc->hw_lock); + +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + mtk_crtc->cmdq_client = + cmdq_mbox_create(dev, drm_crtc_index(&mtk_crtc->base), + 2000); + if (IS_ERR(mtk_crtc->cmdq_client)) { + dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n", + drm_crtc_index(&mtk_crtc->base)); + mtk_crtc->cmdq_client = NULL; + } + ret = of_property_read_u32_index(dev->of_node, "mediatek,gce-events", + drm_crtc_index(&mtk_crtc->base), + &mtk_crtc->cmdq_event); + if (ret) + dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n", + drm_crtc_index(&mtk_crtc->base)); +#endif return 0; } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h index 6afe1c19557a4de4a18851fb20059a69dd87c07d..a2b4677a451cf42d7cffd8904eabef0b1e404534 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h @@ -21,5 +21,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, unsigned int path_len); int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane, struct mtk_plane_state *state); +void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane, + struct drm_plane_state *plane_state); #endif /* MTK_DRM_CRTC_H */ diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c index 7f21307cda75b4d31b834efd261f0e734f86b10b..1f5a112bb034d76ed145f09863b003bd6cb31a4a 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c @@ -12,7 +12,7 @@ #include #include #include - +#include #include "mtk_drm_drv.h" #include "mtk_drm_plane.h" #include "mtk_drm_ddp_comp.h" @@ -37,7 +37,15 @@ #define CCORR_EN BIT(0) #define DISP_CCORR_CFG 0x0020 #define CCORR_RELAY_MODE BIT(0) +#define CCORR_ENGINE_EN BIT(1) +#define CCORR_GAMMA_OFF BIT(2) +#define CCORR_WGAMUT_SRC_CLIP BIT(3) #define DISP_CCORR_SIZE 0x0030 +#define DISP_CCORR_COEF_0 0x0080 +#define DISP_CCORR_COEF_1 0x0084 +#define DISP_CCORR_COEF_2 0x0088 +#define DISP_CCORR_COEF_3 0x008C +#define DISP_CCORR_COEF_4 0x0090 #define DISP_DITHER_EN 0x0000 #define DITHER_EN BIT(0) @@ -76,36 +84,84 @@ #define DITHER_ADD_LSHIFT_G(x) (((x) & 0x7) << 4) #define DITHER_ADD_RSHIFT_G(x) (((x) & 0x7) << 0) +void mtk_ddp_write(struct cmdq_pkt *cmdq_pkt, unsigned int value, + struct mtk_ddp_comp *comp, unsigned int offset) +{ +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + if (cmdq_pkt) + cmdq_pkt_write(cmdq_pkt, comp->subsys, + comp->regs_pa + offset, value); + else +#endif + writel(value, comp->regs + offset); +} + +void mtk_ddp_write_relaxed(struct cmdq_pkt *cmdq_pkt, unsigned int value, + struct mtk_ddp_comp *comp, + unsigned int offset) +{ +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + if (cmdq_pkt) + cmdq_pkt_write(cmdq_pkt, comp->subsys, + comp->regs_pa + offset, value); + else +#endif + writel_relaxed(value, comp->regs + offset); +} + +void mtk_ddp_write_mask(struct cmdq_pkt *cmdq_pkt, + unsigned int value, + struct mtk_ddp_comp *comp, + unsigned int offset, + unsigned int mask) +{ +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + if (cmdq_pkt) { + cmdq_pkt_write_mask(cmdq_pkt, comp->subsys, + comp->regs_pa + offset, value, mask); + } else { +#endif + u32 tmp = readl(comp->regs + offset); + + tmp = (tmp & ~mask) | (value & mask); + writel(tmp, comp->regs + offset); +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + } +#endif +} + void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc, - unsigned int CFG) + unsigned int CFG, struct cmdq_pkt *cmdq_pkt) { /* If bpc equal to 0, the dithering function didn't be enabled */ if (bpc == 0) return; if (bpc >= MTK_MIN_BPC) { - writel(0, comp->regs + DISP_DITHER_5); - writel(0, comp->regs + DISP_DITHER_7); - writel(DITHER_LSB_ERR_SHIFT_R(MTK_MAX_BPC - bpc) | - DITHER_ADD_LSHIFT_R(MTK_MAX_BPC - bpc) | - DITHER_NEW_BIT_MODE, - comp->regs + DISP_DITHER_15); - writel(DITHER_LSB_ERR_SHIFT_B(MTK_MAX_BPC - bpc) | - DITHER_ADD_LSHIFT_B(MTK_MAX_BPC - bpc) | - DITHER_LSB_ERR_SHIFT_G(MTK_MAX_BPC - bpc) | - DITHER_ADD_LSHIFT_G(MTK_MAX_BPC - bpc), - comp->regs + DISP_DITHER_16); - writel(DISP_DITHERING, comp->regs + CFG); + mtk_ddp_write(cmdq_pkt, 0, comp, DISP_DITHER_5); + mtk_ddp_write(cmdq_pkt, 0, comp, DISP_DITHER_7); + mtk_ddp_write(cmdq_pkt, + DITHER_LSB_ERR_SHIFT_R(MTK_MAX_BPC - bpc) | + DITHER_ADD_LSHIFT_R(MTK_MAX_BPC - bpc) | + DITHER_NEW_BIT_MODE, + comp, DISP_DITHER_15); + mtk_ddp_write(cmdq_pkt, + DITHER_LSB_ERR_SHIFT_B(MTK_MAX_BPC - bpc) | + DITHER_ADD_LSHIFT_B(MTK_MAX_BPC - bpc) | + DITHER_LSB_ERR_SHIFT_G(MTK_MAX_BPC - bpc) | + DITHER_ADD_LSHIFT_G(MTK_MAX_BPC - bpc), + comp, DISP_DITHER_16); + mtk_ddp_write(cmdq_pkt, DISP_DITHERING, comp, CFG); } } static void mtk_od_config(struct mtk_ddp_comp *comp, unsigned int w, unsigned int h, unsigned int vrefresh, - unsigned int bpc) + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) { - writel(w << 16 | h, comp->regs + DISP_OD_SIZE); - writel(OD_RELAYMODE, comp->regs + DISP_OD_CFG); - mtk_dither_set(comp, bpc, DISP_OD_CFG); + mtk_ddp_write(cmdq_pkt, w << 16 | h, comp, DISP_OD_SIZE); + mtk_ddp_write(cmdq_pkt, OD_RELAYMODE, comp, DISP_OD_CFG); + mtk_dither_set(comp, bpc, DISP_OD_CFG, cmdq_pkt); } static void mtk_od_start(struct mtk_ddp_comp *comp) @@ -120,9 +176,9 @@ static void mtk_ufoe_start(struct mtk_ddp_comp *comp) static void mtk_aal_config(struct mtk_ddp_comp *comp, unsigned int w, unsigned int h, unsigned int vrefresh, - unsigned int bpc) + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) { - writel(h << 16 | w, comp->regs + DISP_AAL_SIZE); + mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_AAL_SIZE); } static void mtk_aal_start(struct mtk_ddp_comp *comp) @@ -137,10 +193,10 @@ static void mtk_aal_stop(struct mtk_ddp_comp *comp) static void mtk_ccorr_config(struct mtk_ddp_comp *comp, unsigned int w, unsigned int h, unsigned int vrefresh, - unsigned int bpc) + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) { - writel(h << 16 | w, comp->regs + DISP_CCORR_SIZE); - writel(CCORR_RELAY_MODE, comp->regs + DISP_CCORR_CFG); + mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_CCORR_SIZE); + mtk_ddp_write(cmdq_pkt, CCORR_ENGINE_EN, comp, DISP_CCORR_CFG); } static void mtk_ccorr_start(struct mtk_ddp_comp *comp) @@ -153,12 +209,63 @@ static void mtk_ccorr_stop(struct mtk_ddp_comp *comp) writel_relaxed(0x0, comp->regs + DISP_CCORR_EN); } +/* Converts a DRM S31.32 value to the HW S1.10 format. */ +static u16 mtk_ctm_s31_32_to_s1_10(u64 in) +{ + u16 r; + + /* Sign bit. */ + r = in & BIT_ULL(63) ? BIT(11) : 0; + + if ((in & GENMASK_ULL(62, 33)) > 0) { + /* identity value 0x100000000 -> 0x400, */ + /* if bigger this, set it to max 0x7ff. */ + r |= GENMASK(10, 0); + } else { + /* take the 11 most important bits. */ + r |= (in >> 22) & GENMASK(10, 0); + } + + return r; +} + +static void mtk_ccorr_ctm_set(struct mtk_ddp_comp *comp, + struct drm_crtc_state *state) +{ + struct drm_property_blob *blob = state->ctm; + struct drm_color_ctm *ctm; + const u64 *input; + uint16_t coeffs[9] = { 0 }; + int i; + struct cmdq_pkt *cmdq_pkt = NULL; + + if (!blob) + return; + + ctm = (struct drm_color_ctm *)blob->data; + input = ctm->matrix; + + for (i = 0; i < ARRAY_SIZE(coeffs); i++) + coeffs[i] = mtk_ctm_s31_32_to_s1_10(input[i]); + + mtk_ddp_write(cmdq_pkt, coeffs[0] << 16 | coeffs[1], + comp, DISP_CCORR_COEF_0); + mtk_ddp_write(cmdq_pkt, coeffs[2] << 16 | coeffs[3], + comp, DISP_CCORR_COEF_1); + mtk_ddp_write(cmdq_pkt, coeffs[4] << 16 | coeffs[5], + comp, DISP_CCORR_COEF_2); + mtk_ddp_write(cmdq_pkt, coeffs[6] << 16 | coeffs[7], + comp, DISP_CCORR_COEF_3); + mtk_ddp_write(cmdq_pkt, coeffs[8] << 16, + comp, DISP_CCORR_COEF_4); +} + static void mtk_dither_config(struct mtk_ddp_comp *comp, unsigned int w, unsigned int h, unsigned int vrefresh, - unsigned int bpc) + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) { - writel(h << 16 | w, comp->regs + DISP_DITHER_SIZE); - writel(DITHER_RELAY_MODE, comp->regs + DISP_DITHER_CFG); + mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_DITHER_SIZE); + mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, comp, DISP_DITHER_CFG); } static void mtk_dither_start(struct mtk_ddp_comp *comp) @@ -173,10 +280,10 @@ static void mtk_dither_stop(struct mtk_ddp_comp *comp) static void mtk_gamma_config(struct mtk_ddp_comp *comp, unsigned int w, unsigned int h, unsigned int vrefresh, - unsigned int bpc) + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) { - writel(h << 16 | w, comp->regs + DISP_GAMMA_SIZE); - mtk_dither_set(comp, bpc, DISP_GAMMA_CFG); + mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_GAMMA_SIZE); + mtk_dither_set(comp, bpc, DISP_GAMMA_CFG, cmdq_pkt); } static void mtk_gamma_start(struct mtk_ddp_comp *comp) @@ -223,6 +330,7 @@ static const struct mtk_ddp_comp_funcs ddp_ccorr = { .config = mtk_ccorr_config, .start = mtk_ccorr_start, .stop = mtk_ccorr_stop, + .ctm_set = mtk_ccorr_ctm_set, }; static const struct mtk_ddp_comp_funcs ddp_dither = { @@ -326,6 +434,11 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node, enum mtk_ddp_comp_type type; struct device_node *larb_node; struct platform_device *larb_pdev; +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + struct resource res; + struct cmdq_client_reg cmdq_reg; + int ret; +#endif if (comp_id < 0 || comp_id >= DDP_COMPONENT_ID_MAX) return -EINVAL; @@ -379,6 +492,19 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node, comp->larb_dev = &larb_pdev->dev; +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + if (of_address_to_resource(node, 0, &res) != 0) { + dev_err(dev, "Missing reg in %s node\n", node->full_name); + return -EINVAL; + } + comp->regs_pa = res.start; + + ret = cmdq_dev_get_client_reg(dev, &cmdq_reg, 0); + if (ret) + dev_dbg(dev, "get mediatek,gce-client-reg fail!\n"); + else + comp->subsys = cmdq_reg.subsys; +#endif return 0; } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h index 2f1e9e75b8da4699c4e8cc5ae4a228061d3fda8c..debe36395fe721e2a57007daee484ad9b3d4db7e 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h @@ -69,27 +69,29 @@ enum mtk_ddp_comp_id { }; struct mtk_ddp_comp; - +struct cmdq_pkt; struct mtk_ddp_comp_funcs { void (*config)(struct mtk_ddp_comp *comp, unsigned int w, - unsigned int h, unsigned int vrefresh, unsigned int bpc); + unsigned int h, unsigned int vrefresh, + unsigned int bpc, struct cmdq_pkt *cmdq_pkt); void (*start)(struct mtk_ddp_comp *comp); void (*stop)(struct mtk_ddp_comp *comp); void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc); void (*disable_vblank)(struct mtk_ddp_comp *comp); unsigned int (*supported_rotations)(struct mtk_ddp_comp *comp); unsigned int (*layer_nr)(struct mtk_ddp_comp *comp); - void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx); - void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx); int (*layer_check)(struct mtk_ddp_comp *comp, unsigned int idx, struct mtk_plane_state *state); void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx, - struct mtk_plane_state *state); + struct mtk_plane_state *state, + struct cmdq_pkt *cmdq_pkt); void (*gamma_set)(struct mtk_ddp_comp *comp, struct drm_crtc_state *state); void (*bgclr_in_on)(struct mtk_ddp_comp *comp); void (*bgclr_in_off)(struct mtk_ddp_comp *comp); + void (*ctm_set)(struct mtk_ddp_comp *comp, + struct drm_crtc_state *state); }; struct mtk_ddp_comp { @@ -99,14 +101,17 @@ struct mtk_ddp_comp { struct device *larb_dev; enum mtk_ddp_comp_id id; const struct mtk_ddp_comp_funcs *funcs; + resource_size_t regs_pa; + u8 subsys; }; static inline void mtk_ddp_comp_config(struct mtk_ddp_comp *comp, unsigned int w, unsigned int h, - unsigned int vrefresh, unsigned int bpc) + unsigned int vrefresh, unsigned int bpc, + struct cmdq_pkt *cmdq_pkt) { if (comp->funcs && comp->funcs->config) - comp->funcs->config(comp, w, h, vrefresh, bpc); + comp->funcs->config(comp, w, h, vrefresh, bpc, cmdq_pkt); } static inline void mtk_ddp_comp_start(struct mtk_ddp_comp *comp) @@ -151,20 +156,6 @@ static inline unsigned int mtk_ddp_comp_layer_nr(struct mtk_ddp_comp *comp) return 0; } -static inline void mtk_ddp_comp_layer_on(struct mtk_ddp_comp *comp, - unsigned int idx) -{ - if (comp->funcs && comp->funcs->layer_on) - comp->funcs->layer_on(comp, idx); -} - -static inline void mtk_ddp_comp_layer_off(struct mtk_ddp_comp *comp, - unsigned int idx) -{ - if (comp->funcs && comp->funcs->layer_off) - comp->funcs->layer_off(comp, idx); -} - static inline int mtk_ddp_comp_layer_check(struct mtk_ddp_comp *comp, unsigned int idx, struct mtk_plane_state *state) @@ -176,10 +167,11 @@ static inline int mtk_ddp_comp_layer_check(struct mtk_ddp_comp *comp, static inline void mtk_ddp_comp_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, - struct mtk_plane_state *state) + struct mtk_plane_state *state, + struct cmdq_pkt *cmdq_pkt) { if (comp->funcs && comp->funcs->layer_config) - comp->funcs->layer_config(comp, idx, state); + comp->funcs->layer_config(comp, idx, state, cmdq_pkt); } static inline void mtk_ddp_gamma_set(struct mtk_ddp_comp *comp, @@ -201,6 +193,13 @@ static inline void mtk_ddp_comp_bgclr_in_off(struct mtk_ddp_comp *comp) comp->funcs->bgclr_in_off(comp); } +static inline void mtk_ddp_ctm_set(struct mtk_ddp_comp *comp, + struct drm_crtc_state *state) +{ + if (comp->funcs && comp->funcs->ctm_set) + comp->funcs->ctm_set(comp, state); +} + int mtk_ddp_comp_get_id(struct device_node *node, enum mtk_ddp_comp_type comp_type); int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node, @@ -209,6 +208,13 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node, int mtk_ddp_comp_register(struct drm_device *drm, struct mtk_ddp_comp *comp); void mtk_ddp_comp_unregister(struct drm_device *drm, struct mtk_ddp_comp *comp); void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc, - unsigned int CFG); - + unsigned int CFG, struct cmdq_pkt *cmdq_pkt); +enum mtk_ddp_comp_type mtk_ddp_comp_get_type(enum mtk_ddp_comp_id comp_id); +void mtk_ddp_write(struct cmdq_pkt *cmdq_pkt, unsigned int value, + struct mtk_ddp_comp *comp, unsigned int offset); +void mtk_ddp_write_relaxed(struct cmdq_pkt *cmdq_pkt, unsigned int value, + struct mtk_ddp_comp *comp, unsigned int offset); +void mtk_ddp_write_mask(struct cmdq_pkt *cmdq_pkt, unsigned int value, + struct mtk_ddp_comp *comp, unsigned int offset, + unsigned int mask); #endif /* MTK_DRM_DDP_COMP_H */ diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 2b1c122066eab97fe869ad6bdfb4930c9caa9db3..0563c6813333ecf70fd6264065bf79cbae801b24 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -37,84 +37,9 @@ #define DRIVER_MAJOR 1 #define DRIVER_MINOR 0 -static void mtk_atomic_schedule(struct mtk_drm_private *private, - struct drm_atomic_state *state) -{ - private->commit.state = state; - schedule_work(&private->commit.work); -} - -static void mtk_atomic_complete(struct mtk_drm_private *private, - struct drm_atomic_state *state) -{ - struct drm_device *drm = private->drm; - - drm_atomic_helper_wait_for_fences(drm, state, false); - - /* - * Mediatek drm supports runtime PM, so plane registers cannot be - * written when their crtc is disabled. - * - * The comment for drm_atomic_helper_commit states: - * For drivers supporting runtime PM the recommended sequence is - * - * drm_atomic_helper_commit_modeset_disables(dev, state); - * drm_atomic_helper_commit_modeset_enables(dev, state); - * drm_atomic_helper_commit_planes(dev, state, - * DRM_PLANE_COMMIT_ACTIVE_ONLY); - * - * See the kerneldoc entries for these three functions for more details. - */ - drm_atomic_helper_commit_modeset_disables(drm, state); - drm_atomic_helper_commit_modeset_enables(drm, state); - drm_atomic_helper_commit_planes(drm, state, - DRM_PLANE_COMMIT_ACTIVE_ONLY); - - drm_atomic_helper_wait_for_vblanks(drm, state); - - drm_atomic_helper_cleanup_planes(drm, state); - drm_atomic_state_put(state); -} - -static void mtk_atomic_work(struct work_struct *work) -{ - struct mtk_drm_private *private = container_of(work, - struct mtk_drm_private, commit.work); - - mtk_atomic_complete(private, private->commit.state); -} - -static int mtk_atomic_commit(struct drm_device *drm, - struct drm_atomic_state *state, - bool async) -{ - struct mtk_drm_private *private = drm->dev_private; - int ret; - - ret = drm_atomic_helper_prepare_planes(drm, state); - if (ret) - return ret; - - mutex_lock(&private->commit.lock); - flush_work(&private->commit.work); - - ret = drm_atomic_helper_swap_state(state, true); - if (ret) { - mutex_unlock(&private->commit.lock); - drm_atomic_helper_cleanup_planes(drm, state); - return ret; - } - - drm_atomic_state_get(state); - if (async) - mtk_atomic_schedule(private, state); - else - mtk_atomic_complete(private, state); - - mutex_unlock(&private->commit.lock); - - return 0; -} +static const struct drm_mode_config_helper_funcs mtk_drm_mode_config_helpers = { + .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, +}; static struct drm_framebuffer * mtk_drm_mode_fb_create(struct drm_device *dev, @@ -132,7 +57,7 @@ mtk_drm_mode_fb_create(struct drm_device *dev, static const struct drm_mode_config_funcs mtk_drm_mode_config_funcs = { .fb_create = mtk_drm_mode_fb_create, .atomic_check = drm_atomic_helper_check, - .atomic_commit = mtk_atomic_commit, + .atomic_commit = drm_atomic_helper_commit, }; static const enum mtk_ddp_comp_id mt2701_mtk_ddp_main[] = { @@ -250,6 +175,7 @@ static int mtk_drm_kms_init(struct drm_device *drm) drm->mode_config.max_width = 4096; drm->mode_config.max_height = 4096; drm->mode_config.funcs = &mtk_drm_mode_config_funcs; + drm->mode_config.helper_private = &mtk_drm_mode_config_helpers; ret = component_bind_all(drm->dev, drm); if (ret) @@ -509,8 +435,6 @@ static int mtk_drm_probe(struct platform_device *pdev) if (!private) return -ENOMEM; - mutex_init(&private->commit.lock); - INIT_WORK(&private->commit.work, mtk_atomic_work); private->data = of_device_get_match_data(dev); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h index e03fea12ff5988de393ee6fc2165dce7189700c9..17bc99b9f5d44417ec7cdeae59fe433acf2ad96d 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h @@ -43,13 +43,6 @@ struct mtk_drm_private { struct device_node *comp_node[DDP_COMPONENT_ID_MAX]; struct mtk_ddp_comp *ddp_comp[DDP_COMPONENT_ID_MAX]; const struct mtk_mmsys_driver_data *data; - - struct { - struct drm_atomic_state *state; - struct work_struct work; - struct mutex lock; - } commit; - struct drm_atomic_state *suspend_state; bool dma_parms_allocated; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c index f0b0325381e0e9d98469278986533bf728f096a5..914cc7619cd7747373577fd1ca67b6236000ec42 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include @@ -75,6 +76,50 @@ static void mtk_drm_plane_destroy_state(struct drm_plane *plane, kfree(to_mtk_plane_state(state)); } +static int mtk_plane_atomic_async_check(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct drm_crtc_state *crtc_state; + + if (plane != state->crtc->cursor) + return -EINVAL; + + if (!plane->state) + return -EINVAL; + + if (!plane->state->fb) + return -EINVAL; + + if (state->state) + crtc_state = drm_atomic_get_existing_crtc_state(state->state, + state->crtc); + else /* Special case for asynchronous cursor updates. */ + crtc_state = state->crtc->state; + + return drm_atomic_helper_check_plane_state(plane->state, crtc_state, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + true, true); +} + +static void mtk_plane_atomic_async_update(struct drm_plane *plane, + struct drm_plane_state *new_state) +{ + struct mtk_plane_state *state = to_mtk_plane_state(plane->state); + + plane->state->crtc_x = new_state->crtc_x; + plane->state->crtc_y = new_state->crtc_y; + plane->state->crtc_h = new_state->crtc_h; + plane->state->crtc_w = new_state->crtc_w; + plane->state->src_x = new_state->src_x; + plane->state->src_y = new_state->src_y; + plane->state->src_h = new_state->src_h; + plane->state->src_w = new_state->src_w; + state->pending.async_dirty = true; + + mtk_drm_crtc_async_update(new_state->crtc, plane, new_state); +} + static const struct drm_plane_funcs mtk_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, @@ -163,6 +208,8 @@ static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = { .atomic_check = mtk_plane_atomic_check, .atomic_update = mtk_plane_atomic_update, .atomic_disable = mtk_plane_atomic_disable, + .atomic_async_update = mtk_plane_atomic_async_update, + .atomic_async_check = mtk_plane_atomic_async_check, }; int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.h b/drivers/gpu/drm/mediatek/mtk_drm_plane.h index 760885e35b275c36c713bae2d31c02e5d551bd8b..d454bece95356629ca271beecc5330873c8d7865 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_plane.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.h @@ -22,6 +22,8 @@ struct mtk_plane_pending_state { unsigned int height; unsigned int rotation; bool dirty; + bool async_dirty; + bool async_config; }; struct mtk_plane_state { diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 3b5e016d16c48095b6fb2b0b589d77f045a270c7..5fa1073cf26b9f78f64ffd7f9ba5b7337f1bf0f7 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -230,28 +230,25 @@ static void mtk_dsi_mask(struct mtk_dsi *dsi, u32 offset, u32 mask, u32 data) static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi) { u32 timcon0, timcon1, timcon2, timcon3; - u32 ui, cycle_time; + u32 data_rate_mhz = DIV_ROUND_UP(dsi->data_rate, 1000000); struct mtk_phy_timing *timing = &dsi->phy_timing; - ui = DIV_ROUND_UP(1000000000, dsi->data_rate); - cycle_time = div_u64(8000000000ULL, dsi->data_rate); + timing->lpx = (60 * data_rate_mhz / (8 * 1000)) + 1; + timing->da_hs_prepare = (80 * data_rate_mhz + 4 * 1000) / 8000; + timing->da_hs_zero = (170 * data_rate_mhz + 10 * 1000) / 8000 + 1 - + timing->da_hs_prepare; + timing->da_hs_trail = timing->da_hs_prepare + 1; - timing->lpx = NS_TO_CYCLE(60, cycle_time); - timing->da_hs_prepare = NS_TO_CYCLE(50 + 5 * ui, cycle_time); - timing->da_hs_zero = NS_TO_CYCLE(110 + 6 * ui, cycle_time); - timing->da_hs_trail = NS_TO_CYCLE(77 + 4 * ui, cycle_time); + timing->ta_go = 4 * timing->lpx - 2; + timing->ta_sure = timing->lpx + 2; + timing->ta_get = 4 * timing->lpx; + timing->da_hs_exit = 2 * timing->lpx + 1; - timing->ta_go = 4 * timing->lpx; - timing->ta_sure = 3 * timing->lpx / 2; - timing->ta_get = 5 * timing->lpx; - timing->da_hs_exit = 2 * timing->lpx; - - timing->clk_hs_zero = NS_TO_CYCLE(336, cycle_time); - timing->clk_hs_trail = NS_TO_CYCLE(100, cycle_time) + 10; - - timing->clk_hs_prepare = NS_TO_CYCLE(64, cycle_time); - timing->clk_hs_post = NS_TO_CYCLE(80 + 52 * ui, cycle_time); - timing->clk_hs_exit = 2 * timing->lpx; + timing->clk_hs_prepare = 70 * data_rate_mhz / (8 * 1000); + timing->clk_hs_post = timing->clk_hs_prepare + 8; + timing->clk_hs_trail = timing->clk_hs_prepare; + timing->clk_hs_zero = timing->clk_hs_trail * 4; + timing->clk_hs_exit = 2 * timing->clk_hs_trail; timcon0 = timing->lpx | timing->da_hs_prepare << 8 | timing->da_hs_zero << 16 | timing->da_hs_trail << 24; @@ -482,27 +479,39 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi) dsi_tmp_buf_bpp - 10); data_phy_cycles = timing->lpx + timing->da_hs_prepare + - timing->da_hs_zero + timing->da_hs_exit + 2; + timing->da_hs_zero + timing->da_hs_exit + 3; if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) { - if (vm->hfront_porch * dsi_tmp_buf_bpp > + if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp > data_phy_cycles * dsi->lanes + 18) { - horizontal_frontporch_byte = vm->hfront_porch * - dsi_tmp_buf_bpp - - data_phy_cycles * - dsi->lanes - 18; + horizontal_frontporch_byte = + vm->hfront_porch * dsi_tmp_buf_bpp - + (data_phy_cycles * dsi->lanes + 18) * + vm->hfront_porch / + (vm->hfront_porch + vm->hback_porch); + + horizontal_backporch_byte = + horizontal_backporch_byte - + (data_phy_cycles * dsi->lanes + 18) * + vm->hback_porch / + (vm->hfront_porch + vm->hback_porch); } else { DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n"); horizontal_frontporch_byte = vm->hfront_porch * dsi_tmp_buf_bpp; } } else { - if (vm->hfront_porch * dsi_tmp_buf_bpp > + if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp > data_phy_cycles * dsi->lanes + 12) { - horizontal_frontporch_byte = vm->hfront_porch * - dsi_tmp_buf_bpp - - data_phy_cycles * - dsi->lanes - 12; + horizontal_frontporch_byte = + vm->hfront_porch * dsi_tmp_buf_bpp - + (data_phy_cycles * dsi->lanes + 12) * + vm->hfront_porch / + (vm->hfront_porch + vm->hback_porch); + horizontal_backporch_byte = horizontal_backporch_byte - + (data_phy_cycles * dsi->lanes + 12) * + vm->hback_porch / + (vm->hfront_porch + vm->hback_porch); } else { DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n"); horizontal_frontporch_byte = vm->hfront_porch * diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h index f9a0c8e9d4d06cba183659c26a9a321d30f2c912..04fdf3826643276eff31c67d2212c8d1202d561c 100644 --- a/drivers/gpu/drm/meson/meson_drv.h +++ b/drivers/gpu/drm/meson/meson_drv.h @@ -135,7 +135,7 @@ struct meson_drm { } venc; struct { - dma_addr_t addr_phys; + dma_addr_t addr_dma; uint32_t *addr; unsigned int offset; } rdma; diff --git a/drivers/gpu/drm/meson/meson_rdma.c b/drivers/gpu/drm/meson/meson_rdma.c index 25b34b1e72a7abb075f9564f560b41685570a17e..130382178c6310db0ba9e56f1c01f306f4cdeaf5 100644 --- a/drivers/gpu/drm/meson/meson_rdma.c +++ b/drivers/gpu/drm/meson/meson_rdma.c @@ -27,7 +27,7 @@ int meson_rdma_init(struct meson_drm *priv) /* Allocate a PAGE buffer */ priv->rdma.addr = dma_alloc_coherent(priv->dev, SZ_4K, - &priv->rdma.addr_phys, + &priv->rdma.addr_dma, GFP_KERNEL); if (!priv->rdma.addr) return -ENOMEM; @@ -47,16 +47,16 @@ int meson_rdma_init(struct meson_drm *priv) void meson_rdma_free(struct meson_drm *priv) { - if (!priv->rdma.addr && !priv->rdma.addr_phys) + if (!priv->rdma.addr && !priv->rdma.addr_dma) return; meson_rdma_stop(priv); dma_free_coherent(priv->dev, SZ_4K, - priv->rdma.addr, priv->rdma.addr_phys); + priv->rdma.addr, priv->rdma.addr_dma); priv->rdma.addr = NULL; - priv->rdma.addr_phys = (dma_addr_t)NULL; + priv->rdma.addr_dma = (dma_addr_t)0; } void meson_rdma_setup(struct meson_drm *priv) @@ -118,11 +118,11 @@ void meson_rdma_flush(struct meson_drm *priv) meson_rdma_stop(priv); /* Start of Channel 1 register writes buffer */ - writel(priv->rdma.addr_phys, + writel(priv->rdma.addr_dma, priv->io_base + _REG(RDMA_AHB_START_ADDR_1)); /* Last byte on Channel 1 register writes buffer */ - writel(priv->rdma.addr_phys + (priv->rdma.offset * RDMA_DESC_SIZE) - 1, + writel(priv->rdma.addr_dma + (priv->rdma.offset * RDMA_DESC_SIZE) - 1, priv->io_base + _REG(RDMA_AHB_END_ADDR_1)); /* Trigger Channel 1 on VSYNC event */ diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index 7ad14937fcdf80f37de996ee7fbaba872331c288..b67f888727264805e3d487f17162b6c28ea970b0 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -506,6 +506,14 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) goto fail; } + /* + * Set the ICC path to maximum speed for now by multiplying the fastest + * frequency by the bus width (8). We'll want to scale this later on to + * improve battery life. + */ + icc_set_bw(gpu->icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8); + icc_set_bw(gpu->ocmem_icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8); + return gpu; fail: diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index b01388a9e89e41ed6e6494ce277382286023cdc0..253d8d85daad651c12672081172fbaf4491925da 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -591,6 +591,14 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev) goto fail; } + /* + * Set the ICC path to maximum speed for now by multiplying the fastest + * frequency by the bus width (8). We'll want to scale this later on to + * improve battery life. + */ + icc_set_bw(gpu->icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8); + icc_set_bw(gpu->ocmem_icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8); + return gpu; fail: diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index b02e2042547f6bcc540a38738121ad0298004475..7d9e63e20dedd73434ab06514b8ba59359410d8c 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -753,11 +753,18 @@ static int a5xx_hw_init(struct msm_gpu *gpu) gpu->funcs->flush(gpu, gpu->rb[0]); if (!a5xx_idle(gpu, gpu->rb[0])) return -EINVAL; - } else { - /* Print a warning so if we die, we know why */ + } else if (ret == -ENODEV) { + /* + * This device does not use zap shader (but print a warning + * just in case someone got their dt wrong.. hopefully they + * have a debug UART to realize the error of their ways... + * if you mess this up you are about to crash horribly) + */ dev_warn_once(gpu->dev->dev, "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n"); gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0); + } else { + return ret; } /* Last step - yield the ringbuffer */ diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h index f44553ec3193577a0ee7f4ff6c8939ea3ff5b63e..ed78fee2a26219a08a11e77cb5824bf2bcb3a655 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx.xml.h +++ b/drivers/gpu/drm/msm/adreno/a6xx.xml.h @@ -16,11 +16,11 @@ The rules-ng-ng source files this header was generated from are: - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13) - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13) - /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54) -- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54) +- /home/smasetty/playarea/envytools/rnndb/adreno/a6xx.xml ( 161969 bytes, from 2019-11-29 07:18:16) - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07) - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13) -Copyright (C) 2013-2018 by the following authors: +Copyright (C) 2013-2019 by the following authors: - Rob Clark (robclark) - Ilia Mirkin (imirkin) @@ -2519,6 +2519,54 @@ static inline uint32_t A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL(uint32_t val) #define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH2 0x0000311a +#define REG_A6XX_GBIF_SCACHE_CNTL1 0x00003c02 + +#define REG_A6XX_GBIF_QSB_SIDE0 0x00003c03 + +#define REG_A6XX_GBIF_QSB_SIDE1 0x00003c04 + +#define REG_A6XX_GBIF_QSB_SIDE2 0x00003c05 + +#define REG_A6XX_GBIF_QSB_SIDE3 0x00003c06 + +#define REG_A6XX_GBIF_HALT 0x00003c45 + +#define REG_A6XX_GBIF_HALT_ACK 0x00003c46 + +#define REG_A6XX_GBIF_PERF_PWR_CNT_EN 0x00003cc0 + +#define REG_A6XX_GBIF_PERF_CNT_SEL 0x00003cc2 + +#define REG_A6XX_GBIF_PERF_PWR_CNT_SEL 0x00003cc3 + +#define REG_A6XX_GBIF_PERF_CNT_LOW0 0x00003cc4 + +#define REG_A6XX_GBIF_PERF_CNT_LOW1 0x00003cc5 + +#define REG_A6XX_GBIF_PERF_CNT_LOW2 0x00003cc6 + +#define REG_A6XX_GBIF_PERF_CNT_LOW3 0x00003cc7 + +#define REG_A6XX_GBIF_PERF_CNT_HIGH0 0x00003cc8 + +#define REG_A6XX_GBIF_PERF_CNT_HIGH1 0x00003cc9 + +#define REG_A6XX_GBIF_PERF_CNT_HIGH2 0x00003cca + +#define REG_A6XX_GBIF_PERF_CNT_HIGH3 0x00003ccb + +#define REG_A6XX_GBIF_PWR_CNT_LOW0 0x00003ccc + +#define REG_A6XX_GBIF_PWR_CNT_LOW1 0x00003ccd + +#define REG_A6XX_GBIF_PWR_CNT_LOW2 0x00003cce + +#define REG_A6XX_GBIF_PWR_CNT_HIGH0 0x00003ccf + +#define REG_A6XX_GBIF_PWR_CNT_HIGH1 0x00003cd0 + +#define REG_A6XX_GBIF_PWR_CNT_HIGH2 0x00003cd1 + #define REG_A6XX_RB_WINDOW_OFFSET2 0x000088d4 #define A6XX_RB_WINDOW_OFFSET2_WINDOW_OFFSET_DISABLE 0x80000000 #define A6XX_RB_WINDOW_OFFSET2_X__MASK 0x00007fff diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 85f14feafdecd4d383dc78724fc0964e3a55f2f0..983afeaee737ea27593f988ae5eb00a68e0a1c76 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */ +/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */ #include #include @@ -149,6 +149,8 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq) if (freq == gmu->gpu_freqs[perf_index]) break; + gmu->current_perf_index = perf_index; + __a6xx_gmu_set_freq(gmu, perf_index); } @@ -433,6 +435,8 @@ static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) { + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; struct platform_device *pdev = to_platform_device(gmu->dev); void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc"); void __iomem *seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq"); @@ -480,20 +484,34 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108); pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000); pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0); + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108); - pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080); + if (adreno_is_a618(adreno_gpu)) + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30090); + else + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080); pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0); + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7); pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0); pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0); pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108); pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010); pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2); + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108); pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000); - pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3); + if (adreno_is_a618(adreno_gpu)) + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x2); + else + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3); + + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108); - pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080); + if (adreno_is_a618(adreno_gpu)) + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30090); + else + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080); pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3); /* Setup GPU PDC */ @@ -741,8 +759,8 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK); enable_irq(gmu->hfi_irq); - /* Set the GPU to the highest power frequency */ - __a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1); + /* Set the GPU to the current freq */ + __a6xx_gmu_set_freq(gmu, gmu->current_perf_index); /* * "enable" the GX power domain which won't actually do anything but it @@ -1166,6 +1184,8 @@ static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu) gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev, gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs)); + gmu->current_perf_index = gmu->nr_gpu_freqs - 1; + /* Build the list of RPMh votes that we'll send to the GMU */ return a6xx_gmu_rpmh_votes_init(gmu); } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h index 39a26dd6367440d77f3b95bccbeadf61ce717f26..2af91ed7ed0ca498f992453118b565cf4cfb4d39 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h @@ -63,6 +63,9 @@ struct a6xx_gmu { struct clk_bulk_data *clocks; struct clk *core_clk; + /* current performance index set externally */ + int current_perf_index; + int nr_gpu_freqs; unsigned long gpu_freqs[16]; u32 gx_arc_votes[16]; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index dc8ec2c94301b67f057f918e6d7bdb36722938d3..daf07800cde02a1ecc4797508dc1a631f38892da 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */ +/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */ #include "msm_gem.h" @@ -378,6 +378,18 @@ static int a6xx_hw_init(struct msm_gpu *gpu) struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); int ret; + /* + * During a previous slumber, GBIF halt is asserted to ensure + * no further transaction can go through GPU before GPU + * headswitch is turned off. + * + * This halt is deasserted once headswitch goes off but + * incase headswitch doesn't goes off clear GBIF halt + * here to ensure GPU wake-up doesn't fail because of + * halted GPU transactions. + */ + gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0); + /* Make sure the GMU keeps the GPU on while we set it up */ a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); @@ -406,12 +418,17 @@ static int a6xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1); gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1); - /* enable hardware clockgating */ - a6xx_set_hwcg(gpu, true); + /* + * enable hardware clockgating + * For now enable clock gating only for a630 + */ + if (adreno_is_a630(adreno_gpu)) + a6xx_set_hwcg(gpu, true); - /* VBIF start */ - gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009); + /* VBIF/GBIF start*/ gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3); + if (adreno_is_a630(adreno_gpu)) + gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009); /* Make all blocks contribute to the GPU BUSY perf counter */ gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff); @@ -537,12 +554,19 @@ static int a6xx_hw_init(struct msm_gpu *gpu) a6xx_flush(gpu, gpu->rb[0]); if (!a6xx_idle(gpu, gpu->rb[0])) return -EINVAL; - } else { - /* Print a warning so if we die, we know why */ + } else if (ret == -ENODEV) { + /* + * This device does not use zap shader (but print a warning + * just in case someone got their dt wrong.. hopefully they + * have a debug UART to realize the error of their ways... + * if you mess this up you are about to crash horribly) + */ dev_warn_once(gpu->dev->dev, "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n"); gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0); ret = 0; + } else { + return ret; } out: @@ -724,6 +748,39 @@ static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = { REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL), }; +#define GBIF_CLIENT_HALT_MASK BIT(0) +#define GBIF_ARB_HALT_MASK BIT(1) + +static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu) +{ + struct msm_gpu *gpu = &adreno_gpu->base; + + if(!a6xx_has_gbif(adreno_gpu)){ + gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf); + spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & + 0xf) == 0xf); + gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0); + + return; + } + + /* Halt new client requests on GBIF */ + gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK); + spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & + (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK); + + /* Halt all AXI requests on GBIF */ + gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK); + spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & + (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK); + + /* + * GMU needs DDR access in slumber path. Deassert GBIF halt now + * to allow for GMU to access system memory. + */ + gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0); +} + static int a6xx_pm_resume(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); @@ -748,6 +805,16 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu) devfreq_suspend_device(gpu->devfreq.devfreq); + /* + * Make sure the GMU is idle before continuing (because some transitions + * may use VBIF + */ + a6xx_gmu_wait_for_idle(&a6xx_gpu->gmu); + + /* Clear the VBIF pipe before shutting down */ + /* FIXME: This accesses the GPU - do we need to make sure it is on? */ + a6xx_bus_clear_pending_transactions(adreno_gpu); + return a6xx_gmu_stop(a6xx_gpu); } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h index 64399554f2ddf3404e8ed1b46c92700a0dfed33b..7239b8b6093994d2dac5796f207118afaeefe6f8 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2017 The Linux Foundation. All rights reserved. */ +/* Copyright (c) 2017, 2019 The Linux Foundation. All rights reserved. */ #ifndef __A6XX_GPU_H__ #define __A6XX_GPU_H__ @@ -42,6 +42,13 @@ struct a6xx_gpu { #define A6XX_PROTECT_RDONLY(_reg, _len) \ ((((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF)) +static inline bool a6xx_has_gbif(struct adreno_gpu *gpu) +{ + if(adreno_is_a630(gpu)) + return false; + + return true; +} int a6xx_gmu_resume(struct a6xx_gpu *gpu); int a6xx_gmu_stop(struct a6xx_gpu *gpu); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c index 691c1a277d91b7fe2db468786f8ead24dbc61f54..d6023ba8033c053b168edac484f7cc92db71d4ca 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */ +/* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. */ #include #include "msm_gem.h" @@ -320,6 +320,7 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu, { struct resource *res; void __iomem *cxdbg = NULL; + int nr_debugbus_blocks; /* Set up the GX debug bus */ @@ -374,9 +375,11 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu, cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0); } - a6xx_state->debugbus = state_kcalloc(a6xx_state, - ARRAY_SIZE(a6xx_debugbus_blocks), - sizeof(*a6xx_state->debugbus)); + nr_debugbus_blocks = ARRAY_SIZE(a6xx_debugbus_blocks) + + (a6xx_has_gbif(to_adreno_gpu(gpu)) ? 1 : 0); + + a6xx_state->debugbus = state_kcalloc(a6xx_state, nr_debugbus_blocks, + sizeof(*a6xx_state->debugbus)); if (a6xx_state->debugbus) { int i; @@ -388,15 +391,31 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu, &a6xx_state->debugbus[i]); a6xx_state->nr_debugbus = ARRAY_SIZE(a6xx_debugbus_blocks); + + /* + * GBIF has same debugbus as of other GPU blocks, fall back to + * default path if GPU uses GBIF, also GBIF uses exactly same + * ID as of VBIF. + */ + if (a6xx_has_gbif(to_adreno_gpu(gpu))) { + a6xx_get_debugbus_block(gpu, a6xx_state, + &a6xx_gbif_debugbus_block, + &a6xx_state->debugbus[i]); + + a6xx_state->nr_debugbus += 1; + } } - a6xx_state->vbif_debugbus = - state_kcalloc(a6xx_state, 1, - sizeof(*a6xx_state->vbif_debugbus)); + /* Dump the VBIF debugbus on applicable targets */ + if (!a6xx_has_gbif(to_adreno_gpu(gpu))) { + a6xx_state->vbif_debugbus = + state_kcalloc(a6xx_state, 1, + sizeof(*a6xx_state->vbif_debugbus)); - if (a6xx_state->vbif_debugbus) - a6xx_get_vbif_debugbus_block(gpu, a6xx_state, - a6xx_state->vbif_debugbus); + if (a6xx_state->vbif_debugbus) + a6xx_get_vbif_debugbus_block(gpu, a6xx_state, + a6xx_state->vbif_debugbus); + } if (cxdbg) { a6xx_state->cx_debugbus = @@ -770,14 +789,16 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu, &a6xx_state->gmu_registers[1]); } +#define A6XX_GBIF_REGLIST_SIZE 1 static void a6xx_get_registers(struct msm_gpu *gpu, struct a6xx_gpu_state *a6xx_state, struct a6xx_crashdumper *dumper) { int i, count = ARRAY_SIZE(a6xx_ahb_reglist) + ARRAY_SIZE(a6xx_reglist) + - ARRAY_SIZE(a6xx_hlsq_reglist); + ARRAY_SIZE(a6xx_hlsq_reglist) + A6XX_GBIF_REGLIST_SIZE; int index = 0; + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); a6xx_state->registers = state_kcalloc(a6xx_state, count, sizeof(*a6xx_state->registers)); @@ -792,6 +813,15 @@ static void a6xx_get_registers(struct msm_gpu *gpu, a6xx_state, &a6xx_ahb_reglist[i], &a6xx_state->registers[index++]); + if (a6xx_has_gbif(adreno_gpu)) + a6xx_get_ahb_gpu_registers(gpu, + a6xx_state, &a6xx_gbif_reglist, + &a6xx_state->registers[index++]); + else + a6xx_get_ahb_gpu_registers(gpu, + a6xx_state, &a6xx_vbif_reglist, + &a6xx_state->registers[index++]); + for (i = 0; i < ARRAY_SIZE(a6xx_reglist); i++) a6xx_get_crashdumper_registers(gpu, a6xx_state, &a6xx_reglist[i], diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h index 68cccfa2870a364280b13c2d1af4e920a3614c6a..e67c20c415afedc3d94b0d82b3f822074d2291ae 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */ +/* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. */ #ifndef _A6XX_CRASH_DUMP_H_ #define _A6XX_CRASH_DUMP_H_ @@ -307,11 +307,20 @@ static const u32 a6xx_vbif_registers[] = { 0x3410, 0x3410, 0x3800, 0x3801, }; +static const u32 a6xx_gbif_registers[] = { + 0x3C00, 0X3C0B, 0X3C40, 0X3C47, 0X3CC0, 0X3CD1, 0xE3A, 0xE3A, +}; + static const struct a6xx_registers a6xx_ahb_reglist[] = { REGS(a6xx_ahb_registers, 0, 0), - REGS(a6xx_vbif_registers, 0, 0), }; +static const struct a6xx_registers a6xx_vbif_reglist = + REGS(a6xx_vbif_registers, 0, 0); + +static const struct a6xx_registers a6xx_gbif_reglist = + REGS(a6xx_gbif_registers, 0, 0); + static const u32 a6xx_gmu_gx_registers[] = { /* GMU GX */ 0x0000, 0x0000, 0x0010, 0x0013, 0x0016, 0x0016, 0x0018, 0x001b, @@ -422,6 +431,9 @@ static const struct a6xx_debugbus_block { DEBUGBUS(A6XX_DBGBUS_TPL1_3, 0x100), }; +static const struct a6xx_debugbus_block a6xx_gbif_debugbus_block = + DEBUGBUS(A6XX_DBGBUS_VBIF, 0x100); + static const struct a6xx_debugbus_block a6xx_cx_debugbus_blocks[] = { DEBUGBUS(A6XX_DBGBUS_GMU_CX, 0x100), DEBUGBUS(A6XX_DBGBUS_CX, 0x100), diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index fbbdf86504f53a37686bbf73444ebe4f4b272637..cb3a6e597d7606dee06f2183d5f7006dbe8c90e3 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -166,6 +166,17 @@ static const struct adreno_info gpulist[] = { .quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE, .init = a5xx_gpu_init, .zapfw = "a540_zap.mdt", + }, { + .rev = ADRENO_REV(6, 1, 8, ANY_ID), + .revn = 618, + .name = "A618", + .fw = { + [ADRENO_FW_SQE] = "a630_sqe.fw", + [ADRENO_FW_GMU] = "a630_gmu.bin", + }, + .gmem = SZ_512K, + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .init = a6xx_gpu_init, }, { .rev = ADRENO_REV(6, 3, 0, ANY_ID), .revn = 630, diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 0783e4b5486a48304b1ea5a59f24e87069bf5c21..7fd29829b2fad47f066d6e6fd250fa4f9b75e4e7 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -26,6 +26,7 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname, { struct device *dev = &gpu->pdev->dev; const struct firmware *fw; + const char *signed_fwname = NULL; struct device_node *np, *mem_np; struct resource r; phys_addr_t mem_phys; @@ -58,8 +59,43 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname, mem_phys = r.start; - /* Request the MDT file for the firmware */ - fw = adreno_request_fw(to_adreno_gpu(gpu), fwname); + /* + * Check for a firmware-name property. This is the new scheme + * to handle firmware that may be signed with device specific + * keys, allowing us to have a different zap fw path for different + * devices. + * + * If the firmware-name property is found, we bypass the + * adreno_request_fw() mechanism, because we don't need to handle + * the /lib/firmware/qcom/... vs /lib/firmware/... case. + * + * If the firmware-name property is not found, for backwards + * compatibility we fall back to the fwname from the gpulist + * table. + */ + of_property_read_string_index(np, "firmware-name", 0, &signed_fwname); + if (signed_fwname) { + fwname = signed_fwname; + ret = request_firmware_direct(&fw, fwname, gpu->dev->dev); + if (ret) + fw = ERR_PTR(ret); + } else if (fwname) { + /* Request the MDT file from the default location: */ + fw = adreno_request_fw(to_adreno_gpu(gpu), fwname); + } else { + /* + * For new targets, we require the firmware-name property, + * if a zap-shader is required, rather than falling back + * to a firmware name specified in gpulist. + * + * Because the firmware is signed with a (potentially) + * device specific key, having the name come from gpulist + * was a bad idea, and is only provided for backwards + * compatibility for older targets. + */ + return -ENODEV; + } + if (IS_ERR(fw)) { DRM_DEV_ERROR(dev, "Unable to load %s\n", fwname); return PTR_ERR(fw); @@ -95,7 +131,7 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname, * not. But since we've already gotten through adreno_request_fw() * we know which of the two cases it is: */ - if (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY) { + if (signed_fwname || (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY)) { ret = qcom_mdt_load(dev, fw, fwname, pasid, mem_region, mem_phys, mem_size, NULL); } else { @@ -146,14 +182,6 @@ int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid) return -EPROBE_DEFER; } - /* Each GPU has a target specific zap shader firmware name to use */ - if (!adreno_gpu->info->zapfw) { - zap_available = false; - DRM_DEV_ERROR(&pdev->dev, - "Zap shader firmware file not specified for this target\n"); - return -ENODEV; - } - return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid); } @@ -826,7 +854,7 @@ static int adreno_get_legacy_pwrlevels(struct device *dev) node = of_get_compatible_child(dev->of_node, "qcom,gpu-pwrlevels"); if (!node) { - DRM_DEV_ERROR(dev, "Could not find the GPU powerlevels\n"); + DRM_DEV_DEBUG(dev, "Could not find the GPU powerlevels\n"); return -ENXIO; } @@ -887,10 +915,21 @@ static int adreno_get_pwrlevels(struct device *dev, DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate); /* Check for an interconnect path for the bus */ - gpu->icc_path = of_icc_get(dev, NULL); + gpu->icc_path = of_icc_get(dev, "gfx-mem"); + if (!gpu->icc_path) { + /* + * Keep compatbility with device trees that don't have an + * interconnect-names property. + */ + gpu->icc_path = of_icc_get(dev, NULL); + } if (IS_ERR(gpu->icc_path)) gpu->icc_path = NULL; + gpu->ocmem_icc_path = of_icc_get(dev, "ocmem"); + if (IS_ERR(gpu->ocmem_icc_path)) + gpu->ocmem_icc_path = NULL; + return 0; } @@ -977,6 +1016,7 @@ void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu) release_firmware(adreno_gpu->fw[i]); icc_put(gpu->icc_path); + icc_put(gpu->ocmem_icc_path); msm_gpu_cleanup(&adreno_gpu->base); } diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index e71a7570ef7296d30289b6bd2eec07acca770fd1..9ff4e550e7bd92942eae860e6cd626fe28319203 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -3,7 +3,7 @@ * Copyright (C) 2013 Red Hat * Author: Rob Clark * - * Copyright (c) 2014,2017 The Linux Foundation. All rights reserved. + * Copyright (c) 2014,2017, 2019 The Linux Foundation. All rights reserved. */ #ifndef __ADRENO_GPU_H__ @@ -227,6 +227,16 @@ static inline int adreno_is_a540(struct adreno_gpu *gpu) return gpu->revn == 540; } +static inline int adreno_is_a618(struct adreno_gpu *gpu) +{ + return gpu->revn == 618; +} + +static inline int adreno_is_a630(struct adreno_gpu *gpu) +{ + return gpu->revn == 630; +} + int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value); const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname); @@ -330,10 +340,7 @@ OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt) static inline bool adreno_reg_check(struct adreno_gpu *gpu, enum adreno_regs offset_name) { - if (offset_name >= REG_ADRENO_REGISTER_MAX || - !gpu->reg_offsets[offset_name]) { - BUG(); - } + BUG_ON(offset_name >= REG_ADRENO_REGISTER_MAX || !gpu->reg_offsets[offset_name]); /* * REG_SKIP is a special value that tell us that the register in diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index f197dce545761ae92eb5d2b1ba642c7218bcc9aa..bf513411b243a10b820706056a4352d58fec3b97 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -197,10 +197,6 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc) DPU_DEBUG("%s\n", dpu_crtc->name); for (i = 0; i < cstate->num_mixers; i++) { - if (!mixer[i].hw_lm || !mixer[i].lm_ctl) { - DPU_ERROR("invalid lm or ctl assigned to mixer\n"); - return; - } mixer[i].mixer_op_mode = 0; mixer[i].flush_mask = 0; if (mixer[i].lm_ctl->ops.clear_all_blendstages) @@ -1113,14 +1109,9 @@ static int _dpu_debugfs_status_show(struct seq_file *s, void *data) for (i = 0; i < cstate->num_mixers; ++i) { m = &cstate->mixers[i]; - if (!m->hw_lm) - seq_printf(s, "\tmixer[%d] has no lm\n", i); - else if (!m->lm_ctl) - seq_printf(s, "\tmixer[%d] has no ctl\n", i); - else - seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n", - m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0, - out_width, mode->vdisplay); + seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n", + m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0, + out_width, mode->vdisplay); } seq_puts(s, "\n"); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index f96e142c43611c9a72c7a7bbcaeb8aa333019a64..f8ac3bf60fd6055ce7fb3a719cc44e73aff0dedb 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -58,7 +58,7 @@ #define IDLE_SHORT_TIMEOUT 1 -#define MAX_VDISPLAY_SPLIT 1080 +#define MAX_HDISPLAY_SPLIT 1080 /* timeout in frames waiting for frame done */ #define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5 @@ -233,7 +233,7 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc, u32 irq_status; int ret; - if (!phys_enc || !wait_info || intr_idx >= INTR_IDX_MAX) { + if (!wait_info || intr_idx >= INTR_IDX_MAX) { DPU_ERROR("invalid params\n"); return -EINVAL; } @@ -308,7 +308,7 @@ int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc, struct dpu_encoder_irq *irq; int ret = 0; - if (!phys_enc || intr_idx >= INTR_IDX_MAX) { + if (intr_idx >= INTR_IDX_MAX) { DPU_ERROR("invalid params\n"); return -EINVAL; } @@ -363,10 +363,6 @@ int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc, struct dpu_encoder_irq *irq; int ret; - if (!phys_enc) { - DPU_ERROR("invalid encoder\n"); - return -EINVAL; - } irq = &phys_enc->irq[intr_idx]; /* silently skip irqs that weren't registered */ @@ -415,7 +411,7 @@ void dpu_encoder_get_hw_resources(struct drm_encoder *drm_enc, for (i = 0; i < dpu_enc->num_phys_encs; i++) { struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; - if (phys && phys->ops.get_hw_resources) + if (phys->ops.get_hw_resources) phys->ops.get_hw_resources(phys, hw_res); } } @@ -438,7 +434,7 @@ static void dpu_encoder_destroy(struct drm_encoder *drm_enc) for (i = 0; i < dpu_enc->num_phys_encs; i++) { struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; - if (phys && phys->ops.destroy) { + if (phys->ops.destroy) { phys->ops.destroy(phys); --dpu_enc->num_phys_encs; dpu_enc->phys_encs[i] = NULL; @@ -464,7 +460,7 @@ void dpu_encoder_helper_split_config( struct dpu_hw_mdp *hw_mdptop; struct msm_display_info *disp_info; - if (!phys_enc || !phys_enc->hw_mdptop || !phys_enc->parent) { + if (!phys_enc->hw_mdptop || !phys_enc->parent) { DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0); return; } @@ -534,8 +530,23 @@ static struct msm_display_topology dpu_encoder_get_topology( if (dpu_enc->phys_encs[i]) intf_count++; - /* User split topology for width > 1080 */ - topology.num_lm = (mode->vdisplay > MAX_VDISPLAY_SPLIT) ? 2 : 1; + /* Datapath topology selection + * + * Dual display + * 2 LM, 2 INTF ( Split display using 2 interfaces) + * + * Single display + * 1 LM, 1 INTF + * 2 LM, 1 INTF (stream merge to support high resolution interfaces) + * + */ + if (intf_count == 2) + topology.num_lm = 2; + else if (!dpu_kms->catalog->caps->has_3d_merge) + topology.num_lm = 1; + else + topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1; + topology.num_enc = 0; topology.num_intf = intf_count; @@ -583,10 +594,10 @@ static int dpu_encoder_virt_atomic_check( for (i = 0; i < dpu_enc->num_phys_encs; i++) { struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; - if (phys && phys->ops.atomic_check) + if (phys->ops.atomic_check) ret = phys->ops.atomic_check(phys, crtc_state, conn_state); - else if (phys && phys->ops.mode_fixup) + else if (phys->ops.mode_fixup) if (!phys->ops.mode_fixup(phys, mode, adj_mode)) ret = -EINVAL; @@ -682,7 +693,7 @@ static void _dpu_encoder_irq_control(struct drm_encoder *drm_enc, bool enable) for (i = 0; i < dpu_enc->num_phys_encs; i++) { struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; - if (phys && phys->ops.irq_control) + if (phys->ops.irq_control) phys->ops.irq_control(phys, enable); } @@ -1032,46 +1043,43 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, for (i = 0; i < dpu_enc->num_phys_encs; i++) { struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; - if (phys) { - if (!dpu_enc->hw_pp[i]) { - DPU_ERROR_ENC(dpu_enc, "no pp block assigned" - "at idx: %d\n", i); - goto error; - } + if (!dpu_enc->hw_pp[i]) { + DPU_ERROR_ENC(dpu_enc, + "no pp block assigned at idx: %d\n", i); + goto error; + } - if (!hw_ctl[i]) { - DPU_ERROR_ENC(dpu_enc, "no ctl block assigned" - "at idx: %d\n", i); - goto error; - } + if (!hw_ctl[i]) { + DPU_ERROR_ENC(dpu_enc, + "no ctl block assigned at idx: %d\n", i); + goto error; + } - phys->hw_pp = dpu_enc->hw_pp[i]; - phys->hw_ctl = hw_ctl[i]; + phys->hw_pp = dpu_enc->hw_pp[i]; + phys->hw_ctl = hw_ctl[i]; - dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, - DPU_HW_BLK_INTF); - for (j = 0; j < MAX_CHANNELS_PER_ENC; j++) { - struct dpu_hw_intf *hw_intf; + dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, + DPU_HW_BLK_INTF); + for (j = 0; j < MAX_CHANNELS_PER_ENC; j++) { + struct dpu_hw_intf *hw_intf; - if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter)) - break; + if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter)) + break; - hw_intf = (struct dpu_hw_intf *)hw_iter.hw; - if (hw_intf->idx == phys->intf_idx) - phys->hw_intf = hw_intf; - } + hw_intf = (struct dpu_hw_intf *)hw_iter.hw; + if (hw_intf->idx == phys->intf_idx) + phys->hw_intf = hw_intf; + } - if (!phys->hw_intf) { - DPU_ERROR_ENC(dpu_enc, - "no intf block assigned at idx: %d\n", - i); + if (!phys->hw_intf) { + DPU_ERROR_ENC(dpu_enc, + "no intf block assigned at idx: %d\n", i); goto error; - } - - phys->connector = conn->state->connector; - if (phys->ops.mode_set) - phys->ops.mode_set(phys, mode, adj_mode); } + + phys->connector = conn->state->connector; + if (phys->ops.mode_set) + phys->ops.mode_set(phys, mode, adj_mode); } dpu_enc->mode_set_complete = true; @@ -1203,7 +1211,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) for (i = 0; i < dpu_enc->num_phys_encs; i++) { struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; - if (phys && phys->ops.disable) + if (phys->ops.disable) phys->ops.disable(phys); } @@ -1216,8 +1224,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP); for (i = 0; i < dpu_enc->num_phys_encs; i++) { - if (dpu_enc->phys_encs[i]) - dpu_enc->phys_encs[i]->connector = NULL; + dpu_enc->phys_encs[i]->connector = NULL; } DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n"); @@ -1307,7 +1314,7 @@ void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc, for (i = 0; i < dpu_enc->num_phys_encs; i++) { struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; - if (phys && phys->ops.control_vblank_irq) + if (phys->ops.control_vblank_irq) phys->ops.control_vblank_irq(phys, enable); } } @@ -1419,7 +1426,7 @@ static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc, } ctl = phys->hw_ctl; - if (!ctl || !ctl->ops.trigger_flush) { + if (!ctl->ops.trigger_flush) { DPU_ERROR("missing trigger cb\n"); return; } @@ -1463,13 +1470,8 @@ void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc) { struct dpu_hw_ctl *ctl; - if (!phys_enc) { - DPU_ERROR("invalid encoder\n"); - return; - } - ctl = phys_enc->hw_ctl; - if (ctl && ctl->ops.trigger_start) { + if (ctl->ops.trigger_start) { ctl->ops.trigger_start(ctl); trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx); } @@ -1506,14 +1508,10 @@ static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc) struct dpu_hw_ctl *ctl; int rc; - if (!phys_enc) { - DPU_ERROR("invalid encoder\n"); - return; - } dpu_enc = to_dpu_encoder_virt(phys_enc->parent); ctl = phys_enc->hw_ctl; - if (!ctl || !ctl->ops.reset) + if (!ctl->ops.reset) return; DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(phys_enc->parent), @@ -1550,12 +1548,10 @@ static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc) for (i = 0; i < dpu_enc->num_phys_encs; i++) { struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; - if (!phys || phys->enable_state == DPU_ENC_DISABLED) + if (phys->enable_state == DPU_ENC_DISABLED) continue; ctl = phys->hw_ctl; - if (!ctl) - continue; /* * This is cleared in frame_done worker, which isn't invoked @@ -1603,17 +1599,15 @@ void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc) for (i = 0; i < dpu_enc->num_phys_encs; i++) { phys = dpu_enc->phys_encs[i]; - if (phys && phys->hw_ctl) { - ctl = phys->hw_ctl; - if (ctl->ops.clear_pending_flush) - ctl->ops.clear_pending_flush(ctl); + ctl = phys->hw_ctl; + if (ctl->ops.clear_pending_flush) + ctl->ops.clear_pending_flush(ctl); - /* update only for command mode primary ctl */ - if ((phys == dpu_enc->cur_master) && - (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) - && ctl->ops.trigger_pending) - ctl->ops.trigger_pending(ctl); - } + /* update only for command mode primary ctl */ + if ((phys == dpu_enc->cur_master) && + (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) + && ctl->ops.trigger_pending) + ctl->ops.trigger_pending(ctl); } } @@ -1773,12 +1767,10 @@ void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc) DPU_ATRACE_BEGIN("enc_prepare_for_kickoff"); for (i = 0; i < dpu_enc->num_phys_encs; i++) { phys = dpu_enc->phys_encs[i]; - if (phys) { - if (phys->ops.prepare_for_kickoff) - phys->ops.prepare_for_kickoff(phys); - if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET) - needs_hw_reset = true; - } + if (phys->ops.prepare_for_kickoff) + phys->ops.prepare_for_kickoff(phys); + if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET) + needs_hw_reset = true; } DPU_ATRACE_END("enc_prepare_for_kickoff"); @@ -1819,7 +1811,7 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc) /* allow phys encs to handle any post-kickoff business */ for (i = 0; i < dpu_enc->num_phys_encs; i++) { phys = dpu_enc->phys_encs[i]; - if (phys && phys->ops.handle_post_kickoff) + if (phys->ops.handle_post_kickoff) phys->ops.handle_post_kickoff(phys); } @@ -1848,7 +1840,7 @@ void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc) for (i = 0; i < dpu_enc->num_phys_encs; i++) { phys = dpu_enc->phys_encs[i]; - if (phys && phys->ops.prepare_commit) + if (phys->ops.prepare_commit) phys->ops.prepare_commit(phys); } } @@ -1863,9 +1855,6 @@ static int _dpu_encoder_status_show(struct seq_file *s, void *data) for (i = 0; i < dpu_enc->num_phys_encs; i++) { struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; - if (!phys) - continue; - seq_printf(s, "intf:%d vsync:%8d underrun:%8d ", phys->intf_idx - INTF_0, atomic_read(&phys->vsync_cnt), @@ -1924,8 +1913,7 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc) dpu_enc->debugfs_root, dpu_enc, &debugfs_status_fops); for (i = 0; i < dpu_enc->num_phys_encs; i++) - if (dpu_enc->phys_encs[i] && - dpu_enc->phys_encs[i]->ops.late_register) + if (dpu_enc->phys_encs[i]->ops.late_register) dpu_enc->phys_encs[i]->ops.late_register( dpu_enc->phys_encs[i], dpu_enc->debugfs_root); @@ -2094,11 +2082,8 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc, for (i = 0; i < dpu_enc->num_phys_encs; i++) { struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; - - if (phys) { - atomic_set(&phys->vsync_cnt, 0); - atomic_set(&phys->underrun_cnt, 0); - } + atomic_set(&phys->vsync_cnt, 0); + atomic_set(&phys->underrun_cnt, 0); } mutex_unlock(&dpu_enc->enc_lock); @@ -2240,8 +2225,6 @@ int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc, for (i = 0; i < dpu_enc->num_phys_encs; i++) { struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; - if (!phys) - continue; switch (event) { case MSM_ENC_COMMIT_DONE: @@ -2257,7 +2240,7 @@ int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc, DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n", event); return -EINVAL; - }; + } if (fn_wait) { DPU_ATRACE_BEGIN("wait_for_completion_event"); @@ -2274,7 +2257,6 @@ int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc, enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder) { struct dpu_encoder_virt *dpu_enc = NULL; - int i; if (!encoder) { DPU_ERROR("invalid encoder\n"); @@ -2285,12 +2267,8 @@ enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder) if (dpu_enc->cur_master) return dpu_enc->cur_master->intf_mode; - for (i = 0; i < dpu_enc->num_phys_encs; i++) { - struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; - - if (phys) - return phys->intf_mode; - } + if (dpu_enc->num_phys_encs) + return dpu_enc->phys_encs[0]->intf_mode; return INTF_MODE_NONE; } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c index 047960949fbb69f3d273a60959f7e110a68e28cb..39e1e280ba4432b37ba6eff5abd432b41ad4f5c2 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c @@ -45,8 +45,7 @@ static bool dpu_encoder_phys_cmd_mode_fixup( const struct drm_display_mode *mode, struct drm_display_mode *adj_mode) { - if (phys_enc) - DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc), "\n"); + DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc), "\n"); return true; } @@ -58,11 +57,8 @@ static void _dpu_encoder_phys_cmd_update_intf_cfg( struct dpu_hw_ctl *ctl; struct dpu_hw_intf_cfg intf_cfg = { 0 }; - if (!phys_enc) - return; - ctl = phys_enc->hw_ctl; - if (!ctl || !ctl->ops.setup_intf_cfg) + if (!ctl->ops.setup_intf_cfg) return; intf_cfg.intf = phys_enc->intf_idx; @@ -79,7 +75,7 @@ static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx) int new_cnt; u32 event = DPU_ENCODER_FRAME_EVENT_DONE; - if (!phys_enc || !phys_enc->hw_pp) + if (!phys_enc->hw_pp) return; DPU_ATRACE_BEGIN("pp_done_irq"); @@ -106,7 +102,7 @@ static void dpu_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx) struct dpu_encoder_phys *phys_enc = arg; struct dpu_encoder_phys_cmd *cmd_enc; - if (!phys_enc || !phys_enc->hw_pp) + if (!phys_enc->hw_pp) return; DPU_ATRACE_BEGIN("rd_ptr_irq"); @@ -125,9 +121,6 @@ static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx) { struct dpu_encoder_phys *phys_enc = arg; - if (!phys_enc || !phys_enc->hw_ctl) - return; - DPU_ATRACE_BEGIN("ctl_start_irq"); atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0); @@ -141,9 +134,6 @@ static void dpu_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx) { struct dpu_encoder_phys *phys_enc = arg; - if (!phys_enc) - return; - if (phys_enc->parent_ops->handle_underrun_virt) phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent, phys_enc); @@ -179,7 +169,7 @@ static void dpu_encoder_phys_cmd_mode_set( struct dpu_encoder_phys_cmd *cmd_enc = to_dpu_encoder_phys_cmd(phys_enc); - if (!phys_enc || !mode || !adj_mode) { + if (!mode || !adj_mode) { DPU_ERROR("invalid args\n"); return; } @@ -198,7 +188,7 @@ static int _dpu_encoder_phys_cmd_handle_ppdone_timeout( u32 frame_event = DPU_ENCODER_FRAME_EVENT_ERROR; bool do_log = false; - if (!phys_enc || !phys_enc->hw_pp || !phys_enc->hw_ctl) + if (!phys_enc->hw_pp) return -EINVAL; cmd_enc->pp_timeout_report_cnt++; @@ -247,11 +237,6 @@ static int _dpu_encoder_phys_cmd_wait_for_idle( struct dpu_encoder_wait_info wait_info; int ret; - if (!phys_enc) { - DPU_ERROR("invalid encoder\n"); - return -EINVAL; - } - wait_info.wq = &phys_enc->pending_kickoff_wq; wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt; wait_info.timeout_ms = KICKOFF_TIMEOUT_MS; @@ -273,7 +258,7 @@ static int dpu_encoder_phys_cmd_control_vblank_irq( int ret = 0; int refcount; - if (!phys_enc || !phys_enc->hw_pp) { + if (!phys_enc->hw_pp) { DPU_ERROR("invalid encoder\n"); return -EINVAL; } @@ -314,9 +299,6 @@ end: static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc, bool enable) { - if (!phys_enc) - return; - trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0, enable, atomic_read(&phys_enc->vblank_refcount)); @@ -351,7 +333,7 @@ static void dpu_encoder_phys_cmd_tearcheck_config( u32 vsync_hz; struct dpu_kms *dpu_kms; - if (!phys_enc || !phys_enc->hw_pp) { + if (!phys_enc->hw_pp) { DPU_ERROR("invalid encoder\n"); return; } @@ -428,8 +410,7 @@ static void _dpu_encoder_phys_cmd_pingpong_config( struct dpu_encoder_phys_cmd *cmd_enc = to_dpu_encoder_phys_cmd(phys_enc); - if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp - || !phys_enc->hw_ctl->ops.setup_intf_cfg) { + if (!phys_enc->hw_pp || !phys_enc->hw_ctl->ops.setup_intf_cfg) { DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != 0); return; } @@ -458,7 +439,7 @@ static void dpu_encoder_phys_cmd_enable_helper( struct dpu_hw_ctl *ctl; u32 flush_mask = 0; - if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp) { + if (!phys_enc->hw_pp) { DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0); return; } @@ -480,7 +461,7 @@ static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc) struct dpu_encoder_phys_cmd *cmd_enc = to_dpu_encoder_phys_cmd(phys_enc); - if (!phys_enc || !phys_enc->hw_pp) { + if (!phys_enc->hw_pp) { DPU_ERROR("invalid phys encoder\n"); return; } @@ -499,8 +480,7 @@ static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc) static void _dpu_encoder_phys_cmd_connect_te( struct dpu_encoder_phys *phys_enc, bool enable) { - if (!phys_enc || !phys_enc->hw_pp || - !phys_enc->hw_pp->ops.connect_external_te) + if (!phys_enc->hw_pp || !phys_enc->hw_pp->ops.connect_external_te) return; trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable); @@ -518,7 +498,7 @@ static int dpu_encoder_phys_cmd_get_line_count( { struct dpu_hw_pingpong *hw_pp; - if (!phys_enc || !phys_enc->hw_pp) + if (!phys_enc->hw_pp) return -EINVAL; if (!dpu_encoder_phys_cmd_is_master(phys_enc)) @@ -536,7 +516,7 @@ static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc) struct dpu_encoder_phys_cmd *cmd_enc = to_dpu_encoder_phys_cmd(phys_enc); - if (!phys_enc || !phys_enc->hw_pp) { + if (!phys_enc->hw_pp) { DPU_ERROR("invalid encoder\n"); return; } @@ -559,10 +539,6 @@ static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc) struct dpu_encoder_phys_cmd *cmd_enc = to_dpu_encoder_phys_cmd(phys_enc); - if (!phys_enc) { - DPU_ERROR("invalid encoder\n"); - return; - } kfree(cmd_enc); } @@ -580,7 +556,7 @@ static void dpu_encoder_phys_cmd_prepare_for_kickoff( to_dpu_encoder_phys_cmd(phys_enc); int ret; - if (!phys_enc || !phys_enc->hw_pp) { + if (!phys_enc->hw_pp) { DPU_ERROR("invalid encoder\n"); return; } @@ -614,11 +590,6 @@ static int _dpu_encoder_phys_cmd_wait_for_ctl_start( struct dpu_encoder_wait_info wait_info; int ret; - if (!phys_enc || !phys_enc->hw_ctl) { - DPU_ERROR("invalid argument(s)\n"); - return -EINVAL; - } - wait_info.wq = &phys_enc->pending_kickoff_wq; wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt; wait_info.timeout_ms = KICKOFF_TIMEOUT_MS; @@ -639,9 +610,6 @@ static int dpu_encoder_phys_cmd_wait_for_tx_complete( { int rc; - if (!phys_enc) - return -EINVAL; - rc = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc); if (rc) { DRM_ERROR("failed wait_for_idle: id:%u ret:%d intf:%d\n", @@ -658,9 +626,6 @@ static int dpu_encoder_phys_cmd_wait_for_commit_done( int rc = 0; struct dpu_encoder_phys_cmd *cmd_enc; - if (!phys_enc) - return -EINVAL; - cmd_enc = to_dpu_encoder_phys_cmd(phys_enc); /* only required for master controller */ @@ -681,9 +646,6 @@ static int dpu_encoder_phys_cmd_wait_for_vblank( struct dpu_encoder_phys_cmd *cmd_enc; struct dpu_encoder_wait_info wait_info; - if (!phys_enc) - return -EINVAL; - cmd_enc = to_dpu_encoder_phys_cmd(phys_enc); /* only required for master controller */ @@ -715,9 +677,6 @@ static void dpu_encoder_phys_cmd_handle_post_kickoff( static void dpu_encoder_phys_cmd_trigger_start( struct dpu_encoder_phys *phys_enc) { - if (!phys_enc) - return; - dpu_encoder_helper_trigger_start(phys_enc); } @@ -816,6 +775,4 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init( DPU_DEBUG_CMDENC(cmd_enc, "created\n"); return phys_enc; - - return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c index 3123ef873cdfeb0e847da6808c0de3485efbb301..c71c18de5966c7c16889923dcf9d87c299de75bc 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c @@ -220,8 +220,7 @@ static bool dpu_encoder_phys_vid_mode_fixup( const struct drm_display_mode *mode, struct drm_display_mode *adj_mode) { - if (phys_enc) - DPU_DEBUG_VIDENC(phys_enc, "\n"); + DPU_DEBUG_VIDENC(phys_enc, "\n"); /* * Modifying mode has consequences when the mode comes back to us @@ -239,7 +238,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine( unsigned long lock_flags; struct dpu_hw_intf_cfg intf_cfg = { 0 }; - if (!phys_enc || !phys_enc->hw_ctl->ops.setup_intf_cfg) { + if (!phys_enc->hw_ctl->ops.setup_intf_cfg) { DPU_ERROR("invalid encoder %d\n", phys_enc != 0); return; } @@ -280,6 +279,14 @@ static void dpu_encoder_phys_vid_setup_timing_engine( phys_enc->hw_intf->ops.setup_timing_gen(phys_enc->hw_intf, &timing_params, fmt); phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg); + + /* setup which pp blk will connect to this intf */ + if (phys_enc->hw_intf->ops.bind_pingpong_blk) + phys_enc->hw_intf->ops.bind_pingpong_blk( + phys_enc->hw_intf, + true, + phys_enc->hw_pp->idx); + spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags); programmable_fetch_config(phys_enc, &timing_params); @@ -293,12 +300,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx) u32 flush_register = 0; int new_cnt = -1, old_cnt = -1; - if (!phys_enc) - return; - hw_ctl = phys_enc->hw_ctl; - if (!hw_ctl) - return; DPU_ATRACE_BEGIN("vblank_irq"); @@ -314,7 +316,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx) * so we need to double-check with hw that it accepted the flush bits */ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags); - if (hw_ctl && hw_ctl->ops.get_flush_register) + if (hw_ctl->ops.get_flush_register) flush_register = hw_ctl->ops.get_flush_register(hw_ctl); if (!(flush_register & hw_ctl->ops.get_pending_flush(hw_ctl))) @@ -335,9 +337,6 @@ static void dpu_encoder_phys_vid_underrun_irq(void *arg, int irq_idx) { struct dpu_encoder_phys *phys_enc = arg; - if (!phys_enc) - return; - if (phys_enc->parent_ops->handle_underrun_virt) phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent, phys_enc); @@ -374,11 +373,6 @@ static void dpu_encoder_phys_vid_mode_set( struct drm_display_mode *mode, struct drm_display_mode *adj_mode) { - if (!phys_enc) { - DPU_ERROR("invalid encoder/kms\n"); - return; - } - if (adj_mode) { phys_enc->cached_mode = *adj_mode; drm_mode_debug_printmodeline(adj_mode); @@ -395,11 +389,6 @@ static int dpu_encoder_phys_vid_control_vblank_irq( int ret = 0; int refcount; - if (!phys_enc) { - DPU_ERROR("invalid encoder\n"); - return -EINVAL; - } - refcount = atomic_read(&phys_enc->vblank_refcount); /* Slave encoders don't report vblank */ @@ -435,6 +424,7 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc) { struct dpu_hw_ctl *ctl; u32 flush_mask = 0; + u32 intf_flush_mask = 0; ctl = phys_enc->hw_ctl; @@ -459,10 +449,18 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc) ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->hw_intf->idx); ctl->ops.update_pending_flush(ctl, flush_mask); + if (ctl->ops.get_bitmask_active_intf) + ctl->ops.get_bitmask_active_intf(ctl, &intf_flush_mask, + phys_enc->hw_intf->idx); + + if (ctl->ops.update_pending_intf_flush) + ctl->ops.update_pending_intf_flush(ctl, intf_flush_mask); + skip_flush: DPU_DEBUG_VIDENC(phys_enc, - "update pending flush ctl %d flush_mask %x\n", - ctl->idx - CTL_0, flush_mask); + "update pending flush ctl %d flush_mask 0%x intf_mask 0x%x\n", + ctl->idx - CTL_0, flush_mask, intf_flush_mask); + /* ctl_flush & timing engine enable will be triggered by framework */ if (phys_enc->enable_state == DPU_ENC_DISABLED) @@ -471,11 +469,6 @@ skip_flush: static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc) { - if (!phys_enc) { - DPU_ERROR("invalid encoder\n"); - return; - } - DPU_DEBUG_VIDENC(phys_enc, "\n"); kfree(phys_enc); } @@ -493,11 +486,6 @@ static int dpu_encoder_phys_vid_wait_for_vblank( struct dpu_encoder_wait_info wait_info; int ret; - if (!phys_enc) { - pr_err("invalid encoder\n"); - return -EINVAL; - } - wait_info.wq = &phys_enc->pending_kickoff_wq; wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt; wait_info.timeout_ms = KICKOFF_TIMEOUT_MS; @@ -543,13 +531,8 @@ static void dpu_encoder_phys_vid_prepare_for_kickoff( struct dpu_hw_ctl *ctl; int rc; - if (!phys_enc) { - DPU_ERROR("invalid encoder/parameters\n"); - return; - } - ctl = phys_enc->hw_ctl; - if (!ctl || !ctl->ops.wait_reset_status) + if (!ctl->ops.wait_reset_status) return; /* @@ -569,12 +552,12 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc) unsigned long lock_flags; int ret; - if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev) { + if (!phys_enc->parent || !phys_enc->parent->dev) { DPU_ERROR("invalid encoder/device\n"); return; } - if (!phys_enc->hw_intf || !phys_enc->hw_ctl) { + if (!phys_enc->hw_intf) { DPU_ERROR("invalid hw_intf %d hw_ctl %d\n", phys_enc->hw_intf != 0, phys_enc->hw_ctl != 0); return; @@ -639,9 +622,6 @@ static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc, { int ret; - if (!phys_enc) - return; - trace_dpu_enc_phys_vid_irq_ctrl(DRMID(phys_enc->parent), phys_enc->hw_intf->idx - INTF_0, enable, @@ -662,9 +642,6 @@ static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc, static int dpu_encoder_phys_vid_get_line_count( struct dpu_encoder_phys *phys_enc) { - if (!phys_enc) - return -EINVAL; - if (!dpu_encoder_phys_vid_is_master(phys_enc)) return -EINVAL; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c index 24ab6249083a0e23c25a7a1cd58d7a7fcb223a83..528632690f1ef4f6d0ab476f10d3c26e68540d1c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c @@ -489,12 +489,28 @@ static const struct dpu_format dpu_format_map_ubwc[] = { true, 4, DPU_FORMAT_FLAG_COMPRESSED, DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC), + /* ARGB8888 and ABGR8888 purposely have the same color + * ordering. The hardware only supports ABGR8888 UBWC + * natively. + */ + INTERLEAVED_RGB_FMT_TILED(ARGB8888, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + true, 4, DPU_FORMAT_FLAG_COMPRESSED, + DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC), + INTERLEAVED_RGB_FMT_TILED(XBGR8888, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, false, 4, DPU_FORMAT_FLAG_COMPRESSED, DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC), + INTERLEAVED_RGB_FMT_TILED(XRGB8888, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + false, 4, DPU_FORMAT_FLAG_COMPRESSED, + DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC), + INTERLEAVED_RGB_FMT_TILED(ABGR2101010, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, @@ -550,7 +566,9 @@ static int _dpu_format_get_media_color_ubwc(const struct dpu_format *fmt) { static const struct dpu_media_color_map dpu_media_ubwc_map[] = { {DRM_FORMAT_ABGR8888, COLOR_FMT_RGBA8888_UBWC}, + {DRM_FORMAT_ARGB8888, COLOR_FMT_RGBA8888_UBWC}, {DRM_FORMAT_XBGR8888, COLOR_FMT_RGBA8888_UBWC}, + {DRM_FORMAT_XRGB8888, COLOR_FMT_RGBA8888_UBWC}, {DRM_FORMAT_ABGR2101010, COLOR_FMT_RGBA1010102_UBWC}, {DRM_FORMAT_XBGR2101010, COLOR_FMT_RGBA1010102_UBWC}, {DRM_FORMAT_BGR565, COLOR_FMT_RGB565_UBWC}, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c index 04c8c44f5b9ccf1a465edbf93f2a126d49c029d8..c567917541e8b596496b44499d334a0724a65199 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c @@ -11,11 +11,17 @@ #include "dpu_hw_catalog_format.h" #include "dpu_kms.h" -#define VIG_SDM845_MASK \ - (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_SCALER_QSEED3) | BIT(DPU_SSPP_QOS) |\ +#define VIG_MASK \ + (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) |\ BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_QOS_8LVL) |\ BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_EXCL_RECT)) +#define VIG_SDM845_MASK \ + (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3)) + +#define VIG_SC7180_MASK \ + (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED4)) + #define DMA_SDM845_MASK \ (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\ BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\ @@ -27,6 +33,9 @@ #define MIXER_SDM845_MASK \ (BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER)) +#define MIXER_SC7180_MASK \ + (BIT(DPU_DIM_LAYER)) + #define PINGPONG_SDM845_MASK BIT(DPU_PINGPONG_DITHER) #define PINGPONG_SDM845_SPLIT_MASK \ @@ -58,9 +67,20 @@ static const struct dpu_caps sdm845_dpu_caps = { .has_src_split = true, .has_dim_layer = true, .has_idle_pc = true, + .has_3d_merge = true, +}; + +static const struct dpu_caps sc7180_dpu_caps = { + .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, + .max_mixer_blendstages = 0x9, + .qseed_type = DPU_SSPP_SCALER_QSEED4, + .smart_dma_rev = DPU_SSPP_SMART_DMA_V2, + .ubwc_version = DPU_HW_UBWC_VER_20, + .has_dim_layer = true, + .has_idle_pc = true, }; -static struct dpu_mdp_cfg sdm845_mdp[] = { +static const struct dpu_mdp_cfg sdm845_mdp[] = { { .name = "top_0", .id = MDP_TOP, .base = 0x0, .len = 0x45C, @@ -85,10 +105,27 @@ static struct dpu_mdp_cfg sdm845_mdp[] = { }, }; +static const struct dpu_mdp_cfg sc7180_mdp[] = { + { + .name = "top_0", .id = MDP_TOP, + .base = 0x0, .len = 0x494, + .features = 0, + .highest_bank_bit = 0x3, + .clk_ctrls[DPU_CLK_CTRL_VIG0] = { + .reg_off = 0x2AC, .bit_off = 0}, + .clk_ctrls[DPU_CLK_CTRL_DMA0] = { + .reg_off = 0x2AC, .bit_off = 8}, + .clk_ctrls[DPU_CLK_CTRL_DMA1] = { + .reg_off = 0x2B4, .bit_off = 8}, + .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = { + .reg_off = 0x2BC, .bit_off = 8}, + }, +}; + /************************************************************* * CTL sub blocks config *************************************************************/ -static struct dpu_ctl_cfg sdm845_ctl[] = { +static const struct dpu_ctl_cfg sdm845_ctl[] = { { .name = "ctl_0", .id = CTL_0, .base = 0x1000, .len = 0xE4, @@ -116,6 +153,24 @@ static struct dpu_ctl_cfg sdm845_ctl[] = { }, }; +static const struct dpu_ctl_cfg sc7180_ctl[] = { + { + .name = "ctl_0", .id = CTL_0, + .base = 0x1000, .len = 0xE4, + .features = BIT(DPU_CTL_ACTIVE_CFG) + }, + { + .name = "ctl_1", .id = CTL_1, + .base = 0x1200, .len = 0xE4, + .features = BIT(DPU_CTL_ACTIVE_CFG) + }, + { + .name = "ctl_2", .id = CTL_2, + .base = 0x1400, .len = 0xE4, + .features = BIT(DPU_CTL_ACTIVE_CFG) + }, +}; + /************************************************************* * SSPP sub blocks config *************************************************************/ @@ -128,7 +183,7 @@ static const struct dpu_sspp_blks_common sdm845_sspp_common = { .maxvdeciexp = MAX_VERT_DECIMATION, }; -#define _VIG_SBLK(num, sdma_pri) \ +#define _VIG_SBLK(num, sdma_pri, qseed_ver) \ { \ .common = &sdm845_sspp_common, \ .maxdwnscale = MAX_DOWNSCALE_RATIO, \ @@ -137,7 +192,7 @@ static const struct dpu_sspp_blks_common sdm845_sspp_common = { .src_blk = {.name = STRCAT("sspp_src_", num), \ .id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \ .scaler_blk = {.name = STRCAT("sspp_scaler", num), \ - .id = DPU_SSPP_SCALER_QSEED3, \ + .id = qseed_ver, \ .base = 0xa00, .len = 0xa0,}, \ .csc_blk = {.name = STRCAT("sspp_csc", num), \ .id = DPU_SSPP_CSC_10BIT, \ @@ -162,10 +217,14 @@ static const struct dpu_sspp_blks_common sdm845_sspp_common = { .virt_num_formats = ARRAY_SIZE(plane_formats), \ } -static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 = _VIG_SBLK("0", 5); -static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 = _VIG_SBLK("1", 6); -static const struct dpu_sspp_sub_blks sdm845_vig_sblk_2 = _VIG_SBLK("2", 7); -static const struct dpu_sspp_sub_blks sdm845_vig_sblk_3 = _VIG_SBLK("3", 8); +static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 = + _VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED3); +static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 = + _VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED3); +static const struct dpu_sspp_sub_blks sdm845_vig_sblk_2 = + _VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED3); +static const struct dpu_sspp_sub_blks sdm845_vig_sblk_3 = + _VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED3); static const struct dpu_sspp_sub_blks sdm845_dma_sblk_0 = _DMA_SBLK("8", 1); static const struct dpu_sspp_sub_blks sdm845_dma_sblk_1 = _DMA_SBLK("9", 2); @@ -184,7 +243,7 @@ static const struct dpu_sspp_sub_blks sdm845_dma_sblk_3 = _DMA_SBLK("11", 4); .clk_ctrl = _clkctrl \ } -static struct dpu_sspp_cfg sdm845_sspp[] = { +static const struct dpu_sspp_cfg sdm845_sspp[] = { SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SDM845_MASK, sdm845_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0), SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SDM845_MASK, @@ -203,9 +262,26 @@ static struct dpu_sspp_cfg sdm845_sspp[] = { sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1), }; +static const struct dpu_sspp_sub_blks sc7180_vig_sblk_0 = + _VIG_SBLK("0", 4, DPU_SSPP_SCALER_QSEED4); + +static const struct dpu_sspp_cfg sc7180_sspp[] = { + SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK, + sc7180_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0), + SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK, + sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0), + SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_SDM845_MASK, + sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1), + SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK, + sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0), +}; + /************************************************************* * MIXER sub blocks config *************************************************************/ + +/* SDM845 */ + static const struct dpu_lm_sub_blks sdm845_lm_sblk = { .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH, .maxblendstages = 11, /* excluding base layer */ @@ -215,23 +291,46 @@ static const struct dpu_lm_sub_blks sdm845_lm_sblk = { }, }; -#define LM_BLK(_name, _id, _base, _pp, _lmpair) \ +#define LM_BLK(_name, _id, _base, _fmask, _sblk, _pp, _lmpair) \ { \ .name = _name, .id = _id, \ .base = _base, .len = 0x320, \ - .features = MIXER_SDM845_MASK, \ - .sblk = &sdm845_lm_sblk, \ + .features = _fmask, \ + .sblk = _sblk, \ .pingpong = _pp, \ .lm_pair_mask = (1 << _lmpair) \ } -static struct dpu_lm_cfg sdm845_lm[] = { - LM_BLK("lm_0", LM_0, 0x44000, PINGPONG_0, LM_1), - LM_BLK("lm_1", LM_1, 0x45000, PINGPONG_1, LM_0), - LM_BLK("lm_2", LM_2, 0x46000, PINGPONG_2, LM_5), - LM_BLK("lm_3", LM_3, 0x0, PINGPONG_MAX, 0), - LM_BLK("lm_4", LM_4, 0x0, PINGPONG_MAX, 0), - LM_BLK("lm_5", LM_5, 0x49000, PINGPONG_3, LM_2), +static const struct dpu_lm_cfg sdm845_lm[] = { + LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK, + &sdm845_lm_sblk, PINGPONG_0, LM_1), + LM_BLK("lm_1", LM_1, 0x45000, MIXER_SDM845_MASK, + &sdm845_lm_sblk, PINGPONG_1, LM_0), + LM_BLK("lm_2", LM_2, 0x46000, MIXER_SDM845_MASK, + &sdm845_lm_sblk, PINGPONG_2, LM_5), + LM_BLK("lm_3", LM_3, 0x0, MIXER_SDM845_MASK, + &sdm845_lm_sblk, PINGPONG_MAX, 0), + LM_BLK("lm_4", LM_4, 0x0, MIXER_SDM845_MASK, + &sdm845_lm_sblk, PINGPONG_MAX, 0), + LM_BLK("lm_5", LM_5, 0x49000, MIXER_SDM845_MASK, + &sdm845_lm_sblk, PINGPONG_3, LM_2), +}; + +/* SC7180 */ + +static const struct dpu_lm_sub_blks sc7180_lm_sblk = { + .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH, + .maxblendstages = 7, /* excluding base layer */ + .blendstage_base = { /* offsets relative to mixer base */ + 0x20, 0x38, 0x50, 0x68, 0x80, 0x98, 0xb0 + }, +}; + +static const struct dpu_lm_cfg sc7180_lm[] = { + LM_BLK("lm_0", LM_0, 0x44000, MIXER_SC7180_MASK, + &sc7180_lm_sblk, PINGPONG_0, LM_1), + LM_BLK("lm_1", LM_1, 0x45000, MIXER_SC7180_MASK, + &sc7180_lm_sblk, PINGPONG_1, LM_0), }; /************************************************************* @@ -264,13 +363,18 @@ static const struct dpu_pingpong_sub_blks sdm845_pp_sblk = { .sblk = &sdm845_pp_sblk \ } -static struct dpu_pingpong_cfg sdm845_pp[] = { +static const struct dpu_pingpong_cfg sdm845_pp[] = { PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000), PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800), PP_BLK("pingpong_2", PINGPONG_2, 0x71000), PP_BLK("pingpong_3", PINGPONG_3, 0x71800), }; +static struct dpu_pingpong_cfg sc7180_pp[] = { + PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000), + PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800), +}; + /************************************************************* * INTF sub blocks config *************************************************************/ @@ -278,26 +382,32 @@ static struct dpu_pingpong_cfg sdm845_pp[] = { {\ .name = _name, .id = _id, \ .base = _base, .len = 0x280, \ + .features = BIT(DPU_CTL_ACTIVE_CFG), \ .type = _type, \ .controller_id = _ctrl_id, \ .prog_fetch_lines_worst_case = 24 \ } -static struct dpu_intf_cfg sdm845_intf[] = { +static const struct dpu_intf_cfg sdm845_intf[] = { INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0), INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0), INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1), INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1), }; +static const struct dpu_intf_cfg sc7180_intf[] = { + INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0), + INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0), +}; + /************************************************************* * VBIF sub blocks config *************************************************************/ /* VBIF QOS remap */ -static u32 sdm845_rt_pri_lvl[] = {3, 3, 4, 4, 5, 5, 6, 6}; -static u32 sdm845_nrt_pri_lvl[] = {3, 3, 3, 3, 3, 3, 3, 3}; +static const u32 sdm845_rt_pri_lvl[] = {3, 3, 4, 4, 5, 5, 6, 6}; +static const u32 sdm845_nrt_pri_lvl[] = {3, 3, 3, 3, 3, 3, 3, 3}; -static struct dpu_vbif_cfg sdm845_vbif[] = { +static const struct dpu_vbif_cfg sdm845_vbif[] = { { .name = "vbif_0", .id = VBIF_0, .base = 0, .len = 0x1040, @@ -316,7 +426,7 @@ static struct dpu_vbif_cfg sdm845_vbif[] = { }, }; -static struct dpu_reg_dma_cfg sdm845_regdma = { +static const struct dpu_reg_dma_cfg sdm845_regdma = { .base = 0x0, .version = 0x1, .trigger_sel_off = 0x119c }; @@ -325,7 +435,7 @@ static struct dpu_reg_dma_cfg sdm845_regdma = { *************************************************************/ /* SSPP QOS LUTs */ -static struct dpu_qos_lut_entry sdm845_qos_linear[] = { +static const struct dpu_qos_lut_entry sdm845_qos_linear[] = { {.fl = 4, .lut = 0x357}, {.fl = 5, .lut = 0x3357}, {.fl = 6, .lut = 0x23357}, @@ -340,7 +450,11 @@ static struct dpu_qos_lut_entry sdm845_qos_linear[] = { {.fl = 0, .lut = 0x11222222223357} }; -static struct dpu_qos_lut_entry sdm845_qos_macrotile[] = { +static const struct dpu_qos_lut_entry sc7180_qos_linear[] = { + {.fl = 0, .lut = 0x0011222222335777}, +}; + +static const struct dpu_qos_lut_entry sdm845_qos_macrotile[] = { {.fl = 10, .lut = 0x344556677}, {.fl = 11, .lut = 0x3344556677}, {.fl = 12, .lut = 0x23344556677}, @@ -349,11 +463,19 @@ static struct dpu_qos_lut_entry sdm845_qos_macrotile[] = { {.fl = 0, .lut = 0x112233344556677}, }; -static struct dpu_qos_lut_entry sdm845_qos_nrt[] = { +static const struct dpu_qos_lut_entry sc7180_qos_macrotile[] = { + {.fl = 0, .lut = 0x0011223344556677}, +}; + +static const struct dpu_qos_lut_entry sdm845_qos_nrt[] = { + {.fl = 0, .lut = 0x0}, +}; + +static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = { {.fl = 0, .lut = 0x0}, }; -static struct dpu_perf_cfg sdm845_perf_data = { +static const struct dpu_perf_cfg sdm845_perf_data = { .max_bw_low = 6800000, .max_bw_high = 6800000, .min_core_ib = 2400000, @@ -392,6 +514,30 @@ static struct dpu_perf_cfg sdm845_perf_data = { }, }; +static const struct dpu_perf_cfg sc7180_perf_data = { + .max_bw_low = 3900000, + .max_bw_high = 5500000, + .min_core_ib = 2400000, + .min_llcc_ib = 800000, + .min_dram_ib = 800000, + .danger_lut_tbl = {0xff, 0xffff, 0x0}, + .qos_lut_tbl = { + {.nentry = ARRAY_SIZE(sc7180_qos_linear), + .entries = sc7180_qos_linear + }, + {.nentry = ARRAY_SIZE(sc7180_qos_macrotile), + .entries = sc7180_qos_macrotile + }, + {.nentry = ARRAY_SIZE(sc7180_qos_nrt), + .entries = sc7180_qos_nrt + }, + }, + .cdp_cfg = { + {.rd_enable = 1, .wr_enable = 1}, + {.rd_enable = 1, .wr_enable = 0} + }, +}; + /************************************************************* * Hardware catalog init *************************************************************/ @@ -421,12 +567,43 @@ static void sdm845_cfg_init(struct dpu_mdss_cfg *dpu_cfg) .reg_dma_count = 1, .dma_cfg = sdm845_regdma, .perf = sdm845_perf_data, + .mdss_irqs = 0x3ff, + }; +} + +/* + * sc7180_cfg_init(): populate sc7180 dpu sub-blocks reg offsets + * and instance counts. + */ +static void sc7180_cfg_init(struct dpu_mdss_cfg *dpu_cfg) +{ + *dpu_cfg = (struct dpu_mdss_cfg){ + .caps = &sc7180_dpu_caps, + .mdp_count = ARRAY_SIZE(sc7180_mdp), + .mdp = sc7180_mdp, + .ctl_count = ARRAY_SIZE(sc7180_ctl), + .ctl = sc7180_ctl, + .sspp_count = ARRAY_SIZE(sc7180_sspp), + .sspp = sc7180_sspp, + .mixer_count = ARRAY_SIZE(sc7180_lm), + .mixer = sc7180_lm, + .pingpong_count = ARRAY_SIZE(sc7180_pp), + .pingpong = sc7180_pp, + .intf_count = ARRAY_SIZE(sc7180_intf), + .intf = sc7180_intf, + .vbif_count = ARRAY_SIZE(sdm845_vbif), + .vbif = sdm845_vbif, + .reg_dma_count = 1, + .dma_cfg = sdm845_regdma, + .perf = sc7180_perf_data, + .mdss_irqs = 0x3f, }; } -static struct dpu_mdss_hw_cfg_handler cfg_handler[] = { +static const struct dpu_mdss_hw_cfg_handler cfg_handler[] = { { .hw_rev = DPU_HW_VER_400, .cfg_init = sdm845_cfg_init}, { .hw_rev = DPU_HW_VER_401, .cfg_init = sdm845_cfg_init}, + { .hw_rev = DPU_HW_VER_620, .cfg_init = sc7180_cfg_init}, }; void dpu_hw_catalog_deinit(struct dpu_mdss_cfg *dpu_cfg) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h index ec76b8687a985ba90f7daf8db2b125e7dbecdf03..09df7d87dd43f3fe56e0b80b0d0ad3a5bc0a74c6 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h @@ -38,6 +38,7 @@ #define DPU_HW_VER_401 DPU_HW_VER(4, 0, 1) /* sdm845 v2.0 */ #define DPU_HW_VER_410 DPU_HW_VER(4, 1, 0) /* sdm670 v1.0 */ #define DPU_HW_VER_500 DPU_HW_VER(5, 0, 0) /* sdm855 v1.0 */ +#define DPU_HW_VER_620 DPU_HW_VER(6, 2, 0) /* sc7180 v1.0 */ #define IS_MSM8996_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_170) @@ -45,6 +46,7 @@ #define IS_SDM845_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_400) #define IS_SDM670_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_410) #define IS_SDM855_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_500) +#define IS_SC7180_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_620) #define DPU_HW_BLK_NAME_LEN 16 @@ -92,6 +94,7 @@ enum { * @DPU_SSPP_SRC Src and fetch part of the pipes, * @DPU_SSPP_SCALER_QSEED2, QSEED2 algorithm support * @DPU_SSPP_SCALER_QSEED3, QSEED3 alogorithm support + * @DPU_SSPP_SCALER_QSEED4, QSEED4 algorithm support * @DPU_SSPP_SCALER_RGB, RGB Scaler, supported by RGB pipes * @DPU_SSPP_CSC, Support of Color space converion * @DPU_SSPP_CSC_10BIT, Support of 10-bit Color space conversion @@ -110,6 +113,7 @@ enum { DPU_SSPP_SRC = 0x1, DPU_SSPP_SCALER_QSEED2, DPU_SSPP_SCALER_QSEED3, + DPU_SSPP_SCALER_QSEED4, DPU_SSPP_SCALER_RGB, DPU_SSPP_CSC, DPU_SSPP_CSC_10BIT, @@ -166,6 +170,7 @@ enum { */ enum { DPU_CTL_SPLIT_DISPLAY = 0x1, + DPU_CTL_ACTIVE_CFG, DPU_CTL_MAX }; @@ -269,7 +274,7 @@ struct dpu_qos_lut_entry { */ struct dpu_qos_lut_tbl { u32 nentry; - struct dpu_qos_lut_entry *entries; + const struct dpu_qos_lut_entry *entries; }; /** @@ -283,6 +288,7 @@ struct dpu_qos_lut_tbl { * @has_src_split source split feature status * @has_dim_layer dim layer feature status * @has_idle_pc indicate if idle power collapse feature is supported + * @has_3d_merge indicate if 3D merge is supported */ struct dpu_caps { u32 max_mixer_width; @@ -293,6 +299,7 @@ struct dpu_caps { bool has_src_split; bool has_dim_layer; bool has_idle_pc; + bool has_3d_merge; }; /** @@ -320,6 +327,7 @@ struct dpu_sspp_blks_common { * @maxupscale: maxupscale ratio supported * @smart_dma_priority: hw priority of rect1 of multirect pipe * @max_per_pipe_bw: maximum allowable bandwidth of this pipe in kBps + * @qseed_ver: qseed version * @src_blk: * @scaler_blk: * @csc_blk: @@ -340,6 +348,7 @@ struct dpu_sspp_sub_blks { u32 maxupscale; u32 smart_dma_priority; u32 max_per_pipe_bw; + u32 qseed_ver; struct dpu_src_blk src_blk; struct dpu_scaler_blk scaler_blk; struct dpu_pp_blk csc_blk; @@ -511,7 +520,7 @@ struct dpu_vbif_dynamic_ot_cfg { */ struct dpu_vbif_dynamic_ot_tbl { u32 count; - struct dpu_vbif_dynamic_ot_cfg *cfg; + const struct dpu_vbif_dynamic_ot_cfg *cfg; }; /** @@ -521,7 +530,7 @@ struct dpu_vbif_dynamic_ot_tbl { */ struct dpu_vbif_qos_tbl { u32 npriority_lvl; - u32 *priority_lvl; + const u32 *priority_lvl; }; /** @@ -646,6 +655,7 @@ struct dpu_perf_cfg { * @dma_formats Supported formats for dma pipe * @cursor_formats Supported formats for cursor pipe * @vig_formats Supported formats for vig pipe + * @mdss_irqs: Bitmap with the irqs supported by the target */ struct dpu_mdss_cfg { u32 hwversion; @@ -653,25 +663,25 @@ struct dpu_mdss_cfg { const struct dpu_caps *caps; u32 mdp_count; - struct dpu_mdp_cfg *mdp; + const struct dpu_mdp_cfg *mdp; u32 ctl_count; - struct dpu_ctl_cfg *ctl; + const struct dpu_ctl_cfg *ctl; u32 sspp_count; - struct dpu_sspp_cfg *sspp; + const struct dpu_sspp_cfg *sspp; u32 mixer_count; - struct dpu_lm_cfg *mixer; + const struct dpu_lm_cfg *mixer; u32 pingpong_count; - struct dpu_pingpong_cfg *pingpong; + const struct dpu_pingpong_cfg *pingpong; u32 intf_count; - struct dpu_intf_cfg *intf; + const struct dpu_intf_cfg *intf; u32 vbif_count; - struct dpu_vbif_cfg *vbif; + const struct dpu_vbif_cfg *vbif; u32 reg_dma_count; struct dpu_reg_dma_cfg dma_cfg; @@ -681,9 +691,11 @@ struct dpu_mdss_cfg { /* Add additional block data structures here */ struct dpu_perf_cfg perf; - struct dpu_format_extended *dma_formats; - struct dpu_format_extended *cursor_formats; - struct dpu_format_extended *vig_formats; + const struct dpu_format_extended *dma_formats; + const struct dpu_format_extended *cursor_formats; + const struct dpu_format_extended *vig_formats; + + unsigned long mdss_irqs; }; struct dpu_mdss_hw_cfg_handler { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h index bb6112c949aecf5c09b68f3a2c4586d2268270a8..3766f0fd0bf0830dca3c17d8667814d7f6059c83 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h @@ -6,8 +6,12 @@ static const uint32_t qcom_compressed_supported_formats[] = { DRM_FORMAT_ABGR8888, + DRM_FORMAT_ARGB8888, DRM_FORMAT_XBGR8888, + DRM_FORMAT_XRGB8888, DRM_FORMAT_BGR565, + + DRM_FORMAT_NV12, }; static const uint32_t plane_formats[] = { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c index 179e8d52cadb41346f9089ae59bd90b23adcfd85..831e5f7a9b7f0ed676377aa3dc145af7756e627a 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c @@ -22,14 +22,18 @@ #define CTL_PREPARE 0x0d0 #define CTL_SW_RESET 0x030 #define CTL_LAYER_EXTN_OFFSET 0x40 +#define CTL_INTF_ACTIVE 0x0F4 +#define CTL_INTF_FLUSH 0x110 +#define CTL_INTF_MASTER 0x134 #define CTL_MIXER_BORDER_OUT BIT(24) #define CTL_FLUSH_MASK_CTL BIT(17) #define DPU_REG_RESET_TIMEOUT_US 2000 +#define INTF_IDX 31 -static struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl, - struct dpu_mdss_cfg *m, +static const struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl, + const struct dpu_mdss_cfg *m, void __iomem *addr, struct dpu_hw_blk_reg_map *b) { @@ -100,11 +104,27 @@ static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx, ctx->pending_flush_mask |= flushbits; } +static inline void dpu_hw_ctl_update_pending_intf_flush(struct dpu_hw_ctl *ctx, + u32 flushbits) +{ + ctx->pending_intf_flush_mask |= flushbits; +} + static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx) { return ctx->pending_flush_mask; } +static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx) +{ + + if (ctx->pending_flush_mask & BIT(INTF_IDX)) + DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH, + ctx->pending_intf_flush_mask); + + DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask); +} + static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx) { trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask, @@ -222,6 +242,36 @@ static int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx, return 0; } +static int dpu_hw_ctl_get_bitmask_intf_v1(struct dpu_hw_ctl *ctx, + u32 *flushbits, enum dpu_intf intf) +{ + switch (intf) { + case INTF_0: + case INTF_1: + *flushbits |= BIT(31); + break; + default: + return 0; + } + return 0; +} + +static int dpu_hw_ctl_active_get_bitmask_intf(struct dpu_hw_ctl *ctx, + u32 *flushbits, enum dpu_intf intf) +{ + switch (intf) { + case INTF_0: + *flushbits |= BIT(0); + break; + case INTF_1: + *flushbits |= BIT(1); + break; + default: + return 0; + } + return 0; +} + static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us) { struct dpu_hw_blk_reg_map *c = &ctx->hw; @@ -422,6 +472,24 @@ exit: DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3); } + +static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx, + struct dpu_hw_intf_cfg *cfg) +{ + struct dpu_hw_blk_reg_map *c = &ctx->hw; + u32 intf_active = 0; + u32 mode_sel = 0; + + if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD) + mode_sel |= BIT(17); + + intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE); + intf_active |= BIT(cfg->intf - INTF_0); + + DPU_REG_WRITE(c, CTL_TOP, mode_sel); + DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active); +} + static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx, struct dpu_hw_intf_cfg *cfg) { @@ -455,31 +523,41 @@ static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx, static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops, unsigned long cap) { + if (cap & BIT(DPU_CTL_ACTIVE_CFG)) { + ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1; + ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1; + ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf_v1; + ops->get_bitmask_active_intf = + dpu_hw_ctl_active_get_bitmask_intf; + ops->update_pending_intf_flush = + dpu_hw_ctl_update_pending_intf_flush; + } else { + ops->trigger_flush = dpu_hw_ctl_trigger_flush; + ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg; + ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf; + } ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush; ops->update_pending_flush = dpu_hw_ctl_update_pending_flush; ops->get_pending_flush = dpu_hw_ctl_get_pending_flush; - ops->trigger_flush = dpu_hw_ctl_trigger_flush; ops->get_flush_register = dpu_hw_ctl_get_flush_register; ops->trigger_start = dpu_hw_ctl_trigger_start; ops->trigger_pending = dpu_hw_ctl_trigger_pending; - ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg; ops->reset = dpu_hw_ctl_reset_control; ops->wait_reset_status = dpu_hw_ctl_wait_reset_status; ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages; ops->setup_blendstage = dpu_hw_ctl_setup_blendstage; ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp; ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer; - ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf; }; static struct dpu_hw_blk_ops dpu_hw_ops; struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx, void __iomem *addr, - struct dpu_mdss_cfg *m) + const struct dpu_mdss_cfg *m) { struct dpu_hw_ctl *c; - struct dpu_ctl_cfg *cfg; + const struct dpu_ctl_cfg *cfg; c = kzalloc(sizeof(*c), GFP_KERNEL); if (!c) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h index d3ae939ef9f8bc7f3deb6e08b584ccdc9e6baf64..09e1263c72e241c4e1b035522ae357ece5fe0905 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h @@ -90,6 +90,15 @@ struct dpu_hw_ctl_ops { void (*update_pending_flush)(struct dpu_hw_ctl *ctx, u32 flushbits); + /** + * OR in the given flushbits to the cached pending_intf_flush_mask + * No effect on hardware + * @ctx : ctl path ctx pointer + * @flushbits : module flushmask + */ + void (*update_pending_intf_flush)(struct dpu_hw_ctl *ctx, + u32 flushbits); + /** * Write the value of the pending_flush_mask to hardware * @ctx : ctl path ctx pointer @@ -130,10 +139,23 @@ struct dpu_hw_ctl_ops { uint32_t (*get_bitmask_mixer)(struct dpu_hw_ctl *ctx, enum dpu_lm blk); + /** + * Query the value of the intf flush mask + * No effect on hardware + * @ctx : ctl path ctx pointer + */ int (*get_bitmask_intf)(struct dpu_hw_ctl *ctx, u32 *flushbits, enum dpu_intf blk); + /** + * Query the value of the intf active flush mask + * No effect on hardware + * @ctx : ctl path ctx pointer + */ + int (*get_bitmask_active_intf)(struct dpu_hw_ctl *ctx, + u32 *flushbits, enum dpu_intf blk); + /** * Set all blend stages to disabled * @ctx : ctl path ctx pointer @@ -159,6 +181,7 @@ struct dpu_hw_ctl_ops { * @mixer_count: number of mixers * @mixer_hw_caps: mixer hardware capabilities * @pending_flush_mask: storage for pending ctl_flush managed via ops + * @pending_intf_flush_mask: pending INTF flush * @ops: operation list */ struct dpu_hw_ctl { @@ -171,6 +194,7 @@ struct dpu_hw_ctl { int mixer_count; const struct dpu_lm_cfg *mixer_hw_caps; u32 pending_flush_mask; + u32 pending_intf_flush_mask; /* ops */ struct dpu_hw_ctl_ops ops; @@ -195,7 +219,7 @@ static inline struct dpu_hw_ctl *to_dpu_hw_ctl(struct dpu_hw_blk *hw) */ struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx, void __iomem *addr, - struct dpu_mdss_cfg *m); + const struct dpu_mdss_cfg *m); /** * dpu_hw_ctl_destroy(): Destroys ctl driver context diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c index 8bfa7d0eede6c9a4cc8ae3f7a58664cadc8968e3..d84a84f7fe1a9d8ca37cebc44db7833b0d7dc96f 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c @@ -800,8 +800,8 @@ static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr, start_idx = reg_idx * 32; end_idx = start_idx + 32; - if (start_idx >= ARRAY_SIZE(dpu_irq_map) || - end_idx > ARRAY_SIZE(dpu_irq_map)) + if (!test_bit(reg_idx, &intr->irq_mask) || + start_idx >= ARRAY_SIZE(dpu_irq_map)) continue; /* @@ -955,8 +955,11 @@ static int dpu_hw_intr_clear_irqs(struct dpu_hw_intr *intr) if (!intr) return -EINVAL; - for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) - DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].clr_off, 0xffffffff); + for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) { + if (test_bit(i, &intr->irq_mask)) + DPU_REG_WRITE(&intr->hw, + dpu_intr_set[i].clr_off, 0xffffffff); + } /* ensure register writes go through */ wmb(); @@ -971,8 +974,11 @@ static int dpu_hw_intr_disable_irqs(struct dpu_hw_intr *intr) if (!intr) return -EINVAL; - for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) - DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].en_off, 0x00000000); + for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) { + if (test_bit(i, &intr->irq_mask)) + DPU_REG_WRITE(&intr->hw, + dpu_intr_set[i].en_off, 0x00000000); + } /* ensure register writes go through */ wmb(); @@ -991,6 +997,9 @@ static void dpu_hw_intr_get_interrupt_statuses(struct dpu_hw_intr *intr) spin_lock_irqsave(&intr->irq_lock, irq_flags); for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) { + if (!test_bit(i, &intr->irq_mask)) + continue; + /* Read interrupt status */ intr->save_irq_status[i] = DPU_REG_READ(&intr->hw, dpu_intr_set[i].status_off); @@ -1115,6 +1124,7 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr, return ERR_PTR(-ENOMEM); } + intr->irq_mask = m->mdss_irqs; spin_lock_init(&intr->irq_lock); return intr; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h index 4edcf402dc46160f071a8ea28a36f8853102c760..fc9c98617281f17a1a6d8f3d5b20aa2f78b7635e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h @@ -187,6 +187,7 @@ struct dpu_hw_intr { u32 *save_irq_status; u32 irq_idx_tbl_size; spinlock_t irq_lock; + unsigned long irq_mask; }; /** diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c index dcd87cda13fe0b8fa1c226f4097a264fb6f66387..efe9a5719c6b76c55b8a573ebddc8b5e4e0ff5ea 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c @@ -56,8 +56,10 @@ #define INTF_FRAME_COUNT 0x0AC #define INTF_LINE_COUNT 0x0B0 -static struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf, - struct dpu_mdss_cfg *m, +#define INTF_MUX 0x25C + +static const struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf, + const struct dpu_mdss_cfg *m, void __iomem *addr, struct dpu_hw_blk_reg_map *b) { @@ -218,6 +220,30 @@ static void dpu_hw_intf_setup_prg_fetch( DPU_REG_WRITE(c, INTF_CONFIG, fetch_enable); } +static void dpu_hw_intf_bind_pingpong_blk( + struct dpu_hw_intf *intf, + bool enable, + const enum dpu_pingpong pp) +{ + struct dpu_hw_blk_reg_map *c; + u32 mux_cfg; + + if (!intf) + return; + + c = &intf->hw; + + mux_cfg = DPU_REG_READ(c, INTF_MUX); + mux_cfg &= ~0xf; + + if (enable) + mux_cfg |= (pp - PINGPONG_0) & 0x7; + else + mux_cfg |= 0xf; + + DPU_REG_WRITE(c, INTF_MUX, mux_cfg); +} + static void dpu_hw_intf_get_status( struct dpu_hw_intf *intf, struct intf_status *s) @@ -254,16 +280,18 @@ static void _setup_intf_ops(struct dpu_hw_intf_ops *ops, ops->get_status = dpu_hw_intf_get_status; ops->enable_timing = dpu_hw_intf_enable_timing_engine; ops->get_line_count = dpu_hw_intf_get_line_count; + if (cap & BIT(DPU_CTL_ACTIVE_CFG)) + ops->bind_pingpong_blk = dpu_hw_intf_bind_pingpong_blk; } static struct dpu_hw_blk_ops dpu_hw_ops; struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx, void __iomem *addr, - struct dpu_mdss_cfg *m) + const struct dpu_mdss_cfg *m) { struct dpu_hw_intf *c; - struct dpu_intf_cfg *cfg; + const struct dpu_intf_cfg *cfg; c = kzalloc(sizeof(*c), GFP_KERNEL); if (!c) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h index b03acc225c9b37e7b37e0682ceb5e36dd8ca55fd..85468981632de7e5dcb7ec78db6e3577138105e8 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h @@ -52,6 +52,8 @@ struct intf_status { * @ enable_timing: enable/disable timing engine * @ get_status: returns if timing engine is enabled or not * @ get_line_count: reads current vertical line counter + * @bind_pingpong_blk: enable/disable the connection with pingpong which will + * feed pixels to this interface */ struct dpu_hw_intf_ops { void (*setup_timing_gen)(struct dpu_hw_intf *intf, @@ -68,6 +70,10 @@ struct dpu_hw_intf_ops { struct intf_status *status); u32 (*get_line_count)(struct dpu_hw_intf *intf); + + void (*bind_pingpong_blk)(struct dpu_hw_intf *intf, + bool enable, + const enum dpu_pingpong pp); }; struct dpu_hw_intf { @@ -92,7 +98,7 @@ struct dpu_hw_intf { */ struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx, void __iomem *addr, - struct dpu_mdss_cfg *m); + const struct dpu_mdss_cfg *m); /** * dpu_hw_intf_destroy(): Destroys INTF driver context diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c index 5bc39baa746a2436e7ceebe8714071596b4bb41e..37becd43bd542fac065d5bb8353026e1f5262190 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c @@ -24,8 +24,8 @@ #define LM_BLEND0_FG_ALPHA 0x04 #define LM_BLEND0_BG_ALPHA 0x08 -static struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer, - struct dpu_mdss_cfg *m, +static const struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer, + const struct dpu_mdss_cfg *m, void __iomem *addr, struct dpu_hw_blk_reg_map *b) { @@ -147,12 +147,13 @@ static void dpu_hw_lm_setup_color3(struct dpu_hw_mixer *ctx, DPU_REG_WRITE(c, LM_OP_MODE, op_mode); } -static void _setup_mixer_ops(struct dpu_mdss_cfg *m, +static void _setup_mixer_ops(const struct dpu_mdss_cfg *m, struct dpu_hw_lm_ops *ops, unsigned long features) { ops->setup_mixer_out = dpu_hw_lm_setup_out; - if (IS_SDM845_TARGET(m->hwversion) || IS_SDM670_TARGET(m->hwversion)) + if (IS_SDM845_TARGET(m->hwversion) || IS_SDM670_TARGET(m->hwversion) + || IS_SC7180_TARGET(m->hwversion)) ops->setup_blend_config = dpu_hw_lm_setup_blend_config_sdm845; else ops->setup_blend_config = dpu_hw_lm_setup_blend_config; @@ -164,10 +165,10 @@ static struct dpu_hw_blk_ops dpu_hw_ops; struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx, void __iomem *addr, - struct dpu_mdss_cfg *m) + const struct dpu_mdss_cfg *m) { struct dpu_hw_mixer *c; - struct dpu_lm_cfg *cfg; + const struct dpu_lm_cfg *cfg; c = kzalloc(sizeof(*c), GFP_KERNEL); if (!c) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h index 147ace31cfc253a0035320a394416d3b1ed263b7..4a6b2de19ef6e771fa12d6605585edff041a5a20 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h @@ -91,7 +91,7 @@ static inline struct dpu_hw_mixer *to_dpu_hw_mixer(struct dpu_hw_blk *hw) */ struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx, void __iomem *addr, - struct dpu_mdss_cfg *m); + const struct dpu_mdss_cfg *m); /** * dpu_hw_lm_destroy(): Destroys layer mixer driver context diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c index 5dbaba9fd1808d6ac43a5705312634901ade0ef5..d110a40f0e7308c99814399a08b1523d13687038 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c @@ -28,8 +28,8 @@ #define PP_FBC_BUDGET_CTL 0x038 #define PP_FBC_LOSSY_MODE 0x03C -static struct dpu_pingpong_cfg *_pingpong_offset(enum dpu_pingpong pp, - struct dpu_mdss_cfg *m, +static const struct dpu_pingpong_cfg *_pingpong_offset(enum dpu_pingpong pp, + const struct dpu_mdss_cfg *m, void __iomem *addr, struct dpu_hw_blk_reg_map *b) { @@ -195,10 +195,10 @@ static struct dpu_hw_blk_ops dpu_hw_ops; struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx, void __iomem *addr, - struct dpu_mdss_cfg *m) + const struct dpu_mdss_cfg *m) { struct dpu_hw_pingpong *c; - struct dpu_pingpong_cfg *cfg; + const struct dpu_pingpong_cfg *cfg; c = kzalloc(sizeof(*c), GFP_KERNEL); if (!c) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h index 58bdb9279aa8f9a0b158c1009335f9cf8d125cd4..3d6f46b1db30810aa464ec2df6ace4a625662657 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h @@ -106,7 +106,7 @@ struct dpu_hw_pingpong { */ struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx, void __iomem *addr, - struct dpu_mdss_cfg *m); + const struct dpu_mdss_cfg *m); /** * dpu_hw_pingpong_destroy - destroys pingpong driver context diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c index 4f8b813aab81019d17671cd2d845f11b575aa5c7..82c5dbfdabc7594ac0eaf9cbe43ef438715387d8 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c @@ -132,6 +132,7 @@ /* traffic shaper clock in Hz */ #define TS_CLK 19200000 + static int _sspp_subblk_offset(struct dpu_hw_pipe *ctx, int s_id, u32 *idx) @@ -657,7 +658,8 @@ static void _setup_layer_ops(struct dpu_hw_pipe *c, test_bit(DPU_SSPP_SMART_DMA_V2, &c->cap->features)) c->ops.setup_multirect = dpu_hw_sspp_setup_multirect; - if (test_bit(DPU_SSPP_SCALER_QSEED3, &features)) { + if (test_bit(DPU_SSPP_SCALER_QSEED3, &features) || + test_bit(DPU_SSPP_SCALER_QSEED4, &features)) { c->ops.setup_scaler = _dpu_hw_sspp_setup_scaler3; c->ops.get_scaler_ver = _dpu_hw_sspp_get_scaler3_ver; } @@ -666,7 +668,7 @@ static void _setup_layer_ops(struct dpu_hw_pipe *c, c->ops.setup_cdp = dpu_hw_sspp_setup_cdp; } -static struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp, +static const struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp, void __iomem *addr, struct dpu_mdss_cfg *catalog, struct dpu_hw_blk_reg_map *b) @@ -696,7 +698,7 @@ struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx, bool is_virtual_pipe) { struct dpu_hw_pipe *hw_pipe; - struct dpu_sspp_cfg *cfg; + const struct dpu_sspp_cfg *cfg; if (!addr || !catalog) return ERR_PTR(-EINVAL); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h index a3680b482b41b3b4267db1aa4822d03c3aa8f4ff..85b018a9b03c2edf367791c18eb827bae75f9524 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h @@ -27,7 +27,8 @@ struct dpu_hw_pipe; */ #define DPU_SSPP_SCALER ((1UL << DPU_SSPP_SCALER_RGB) | \ (1UL << DPU_SSPP_SCALER_QSEED2) | \ - (1UL << DPU_SSPP_SCALER_QSEED3)) + (1UL << DPU_SSPP_SCALER_QSEED3) | \ + (1UL << DPU_SSPP_SCALER_QSEED4)) /** * Component indices @@ -373,7 +374,7 @@ struct dpu_hw_pipe { struct dpu_hw_blk base; struct dpu_hw_blk_reg_map hw; struct dpu_mdss_cfg *catalog; - struct dpu_mdp_cfg *mdp; + const struct dpu_mdp_cfg *mdp; /* Pipe */ enum dpu_sspp idx; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c index 27fbeb504362b310f517a01974f59893c7d8f2b4..078afc5f58820e81b2f8994f10ff6538719a033a 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c @@ -93,19 +93,12 @@ int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable) DEV_DBG("%pS->%s: enable '%s'\n", __builtin_return_address(0), __func__, clk_arry[i].clk_name); - if (clk_arry[i].clk) { - rc = clk_prepare_enable(clk_arry[i].clk); - if (rc) - DEV_ERR("%pS->%s: %s en fail. rc=%d\n", - __builtin_return_address(0), - __func__, - clk_arry[i].clk_name, rc); - } else { - DEV_ERR("%pS->%s: '%s' is not available\n", - __builtin_return_address(0), __func__, - clk_arry[i].clk_name); - rc = -EPERM; - } + rc = clk_prepare_enable(clk_arry[i].clk); + if (rc) + DEV_ERR("%pS->%s: %s en fail. rc=%d\n", + __builtin_return_address(0), + __func__, + clk_arry[i].clk_name, rc); if (rc && i) { msm_dss_enable_clk(&clk_arry[i - 1], @@ -119,12 +112,7 @@ int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable) __builtin_return_address(0), __func__, clk_arry[i].clk_name); - if (clk_arry[i].clk) - clk_disable_unprepare(clk_arry[i].clk); - else - DEV_ERR("%pS->%s: '%s' is not available\n", - __builtin_return_address(0), __func__, - clk_arry[i].clk_name); + clk_disable_unprepare(clk_arry[i].clk); } } @@ -187,6 +175,7 @@ int msm_dss_parse_clock(struct platform_device *pdev, continue; mp->clk_config[i].rate = rate; mp->clk_config[i].type = DSS_CLK_PCLK; + mp->clk_config[i].max_rate = rate; } mp->num_clk = num_clk; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index 6c92f0fbeac983f013eb6f5a50f27b8cf93a6c3f..cb08fafb1dc144195027be506be679794390ccd4 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -1059,6 +1059,7 @@ static const struct dev_pm_ops dpu_pm_ops = { static const struct of_device_id dpu_dt_match[] = { { .compatible = "qcom,sdm845-dpu", }, + { .compatible = "qcom,sc7180-dpu", }, {} }; MODULE_DEVICE_TABLE(of, dpu_dt_match); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index 58d5acbcfc5c21dccb286488b17f87d4a6906321..3b9c33e694bf4c76f42b4536f3b739fdcc47cb35 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -53,8 +53,13 @@ enum { R_MAX }; +/* + * Default Preload Values + */ #define DPU_QSEED3_DEFAULT_PRELOAD_H 0x4 #define DPU_QSEED3_DEFAULT_PRELOAD_V 0x3 +#define DPU_QSEED4_DEFAULT_PRELOAD_V 0x2 +#define DPU_QSEED4_DEFAULT_PRELOAD_H 0x4 #define DEFAULT_REFRESH_RATE 60 @@ -477,8 +482,16 @@ static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu, scale_cfg->src_width[i] /= chroma_subsmpl_h; scale_cfg->src_height[i] /= chroma_subsmpl_v; } - scale_cfg->preload_x[i] = DPU_QSEED3_DEFAULT_PRELOAD_H; - scale_cfg->preload_y[i] = DPU_QSEED3_DEFAULT_PRELOAD_V; + + if (pdpu->pipe_hw->cap->features & + BIT(DPU_SSPP_SCALER_QSEED4)) { + scale_cfg->preload_x[i] = DPU_QSEED4_DEFAULT_PRELOAD_H; + scale_cfg->preload_y[i] = DPU_QSEED4_DEFAULT_PRELOAD_V; + } else { + scale_cfg->preload_x[i] = DPU_QSEED3_DEFAULT_PRELOAD_H; + scale_cfg->preload_y[i] = DPU_QSEED3_DEFAULT_PRELOAD_V; + } + pstate->pixel_ext.num_ext_pxls_top[i] = scale_cfg->src_height[i]; pstate->pixel_ext.num_ext_pxls_left[i] = @@ -738,7 +751,7 @@ done: } else { pstate[R0]->multirect_index = DPU_SSPP_RECT_0; pstate[R1]->multirect_index = DPU_SSPP_RECT_1; - }; + } DPU_DEBUG_PLANE(dpu_plane[R0], "R0: %d - %d\n", pstate[R0]->multirect_mode, pstate[R0]->multirect_index); @@ -858,7 +871,7 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, pdpu->pipe_sblk->maxupscale << 16, true, true); if (ret) { - DPU_ERROR_PLANE(pdpu, "Check plane state failed (%d)\n", ret); + DPU_DEBUG_PLANE(pdpu, "Check plane state failed (%d)\n", ret); return ret; } if (!state->visible) @@ -884,13 +897,13 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, (!(pdpu->features & DPU_SSPP_SCALER) || !(pdpu->features & (BIT(DPU_SSPP_CSC) | BIT(DPU_SSPP_CSC_10BIT))))) { - DPU_ERROR_PLANE(pdpu, + DPU_DEBUG_PLANE(pdpu, "plane doesn't have scaler/csc for yuv\n"); return -EINVAL; /* check src bounds */ } else if (!dpu_plane_validate_src(&src, &fb_rect, min_src_size)) { - DPU_ERROR_PLANE(pdpu, "invalid source " DRM_RECT_FMT "\n", + DPU_DEBUG_PLANE(pdpu, "invalid source " DRM_RECT_FMT "\n", DRM_RECT_ARG(&src)); return -E2BIG; @@ -899,19 +912,19 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, (src.x1 & 0x1 || src.y1 & 0x1 || drm_rect_width(&src) & 0x1 || drm_rect_height(&src) & 0x1)) { - DPU_ERROR_PLANE(pdpu, "invalid yuv source " DRM_RECT_FMT "\n", + DPU_DEBUG_PLANE(pdpu, "invalid yuv source " DRM_RECT_FMT "\n", DRM_RECT_ARG(&src)); return -EINVAL; /* min dst support */ } else if (drm_rect_width(&dst) < 0x1 || drm_rect_height(&dst) < 0x1) { - DPU_ERROR_PLANE(pdpu, "invalid dest rect " DRM_RECT_FMT "\n", + DPU_DEBUG_PLANE(pdpu, "invalid dest rect " DRM_RECT_FMT "\n", DRM_RECT_ARG(&dst)); return -EINVAL; /* check decimated source width */ } else if (drm_rect_width(&src) > max_linewidth) { - DPU_ERROR_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n", + DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n", DRM_RECT_ARG(&src), max_linewidth); return -E2BIG; } @@ -1337,7 +1350,8 @@ static int _dpu_plane_init_debugfs(struct drm_plane *plane) pdpu->debugfs_root, &pdpu->debugfs_src); if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) || - cfg->features & BIT(DPU_SSPP_SCALER_QSEED2)) { + cfg->features & BIT(DPU_SSPP_SCALER_QSEED2) || + cfg->features & BIT(DPU_SSPP_SCALER_QSEED4)) { dpu_debugfs_setup_regset32(&pdpu->debugfs_scaler, sblk->scaler_blk.base + cfg->base, sblk->scaler_blk.len, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c index ddc8412731afa46014dbf35ab0201495a87cfffa..23f5b1433b357d1293e2b0b80505670bb60f6f79 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c @@ -141,11 +141,11 @@ int dpu_rm_destroy(struct dpu_rm *rm) static int _dpu_rm_hw_blk_create( struct dpu_rm *rm, - struct dpu_mdss_cfg *cat, + const struct dpu_mdss_cfg *cat, void __iomem *mmio, enum dpu_hw_blk_type type, uint32_t id, - void *hw_catalog_info) + const void *hw_catalog_info) { struct dpu_rm_hw_blk *blk; void *hw; @@ -215,7 +215,7 @@ int dpu_rm_init(struct dpu_rm *rm, /* Interrogate HW catalog and create tracking items for hw blocks */ for (i = 0; i < cat->mixer_count; i++) { - struct dpu_lm_cfg *lm = &cat->mixer[i]; + const struct dpu_lm_cfg *lm = &cat->mixer[i]; if (lm->pingpong == PINGPONG_MAX) { DPU_DEBUG("skip mixer %d without pingpong\n", lm->id); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c index 991f4c8f8a1278a83bcccb444680aa7c775ae3e6..93ab36bd8df3b46968ca76ea55933e1f39527b96 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c @@ -299,7 +299,7 @@ void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root) entry = debugfs_create_dir("vbif", debugfs_root); for (i = 0; i < dpu_kms->catalog->vbif_count; i++) { - struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i]; + const struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i]; snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id); @@ -318,7 +318,7 @@ void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root) (u32 *)&vbif->default_ot_wr_limit); for (j = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) { - struct dpu_vbif_dynamic_ot_cfg *cfg = + const struct dpu_vbif_dynamic_ot_cfg *cfg = &vbif->dynamic_ot_rd_tbl.cfg[j]; snprintf(vbif_name, sizeof(vbif_name), @@ -332,7 +332,7 @@ void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root) } for (j = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) { - struct dpu_vbif_dynamic_ot_cfg *cfg = + const struct dpu_vbif_dynamic_ot_cfg *cfg = &vbif->dynamic_ot_wr_tbl.cfg[j]; snprintf(vbif_name, sizeof(vbif_name), diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c index 772f0753ed38fc9a1ece103f51ba5c7f90effb9b..aaf2f26f8505cb1cb6bf9a314071b41cb1d579f6 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c @@ -121,7 +121,7 @@ static void mdp4_dsi_encoder_enable(struct drm_encoder *encoder) if (mdp4_dsi_encoder->enabled) return; - mdp4_crtc_set_config(encoder->crtc, + mdp4_crtc_set_config(encoder->crtc, MDP4_DMA_CONFIG_PACK_ALIGN_MSB | MDP4_DMA_CONFIG_DEFLKR_EN | MDP4_DMA_CONFIG_DITHER_EN | diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c index 1f48f64539a2a54894c37843bfd216a44016e2a1..e3c4c250238b70c63d153e432cfa3c10b8b1ef5b 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c @@ -902,7 +902,7 @@ struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms, major, minor); ret = -ENXIO; goto fail; - }; + } /* only after mdp5_cfg global pointer's init can we access the hw */ for (i = 0; i < num_handlers; i++) { diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h index eff1a4c612584b10f36f9e43b1a0b8d458e7ae97..4de771d6f0be88a394f17f99027ffb36f84dc6a8 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.h +++ b/drivers/gpu/drm/msm/dsi/dsi.h @@ -178,6 +178,8 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host, int msm_dsi_host_init(struct msm_dsi *msm_dsi); int msm_dsi_runtime_suspend(struct device *dev); int msm_dsi_runtime_resume(struct device *dev); +int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host); +int dsi_link_clk_set_rate_v2(struct msm_dsi_host *msm_host); int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host); int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host); void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host); diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c index 86ad3fdf207d6f362576af08880774419459b023..813d69deb5e8d4760510eaa701fe3e502d2b5b30 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c @@ -153,6 +153,10 @@ static const char * const dsi_sdm845_bus_clk_names[] = { "iface", "bus", }; +static const char * const dsi_sc7180_bus_clk_names[] = { + "iface", "bus", +}; + static const struct msm_dsi_config sdm845_dsi_cfg = { .io_offset = DSI_6G_REG_SHIFT, .reg_cfg = { @@ -167,7 +171,22 @@ static const struct msm_dsi_config sdm845_dsi_cfg = { .num_dsi = 2, }; +static const struct msm_dsi_config sc7180_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .reg_cfg = { + .num = 1, + .regs = { + {"vdda", 21800, 4 }, /* 1.2 V */ + }, + }, + .bus_clk_names = dsi_sc7180_bus_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_sc7180_bus_clk_names), + .io_start = { 0xae94000 }, + .num_dsi = 1, +}; + static const struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = { + .link_clk_set_rate = dsi_link_clk_set_rate_v2, .link_clk_enable = dsi_link_clk_enable_v2, .link_clk_disable = dsi_link_clk_disable_v2, .clk_init_ver = dsi_clk_init_v2, @@ -179,6 +198,7 @@ static const struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = { }; static const struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = { + .link_clk_set_rate = dsi_link_clk_set_rate_6g, .link_clk_enable = dsi_link_clk_enable_6g, .link_clk_disable = dsi_link_clk_disable_6g, .clk_init_ver = NULL, @@ -190,6 +210,7 @@ static const struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = { }; static const struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_host_ops = { + .link_clk_set_rate = dsi_link_clk_set_rate_6g, .link_clk_enable = dsi_link_clk_enable_6g, .link_clk_disable = dsi_link_clk_disable_6g, .clk_init_ver = dsi_clk_init_6g_v2, @@ -223,6 +244,9 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = { &msm8998_dsi_cfg, &msm_dsi_6g_v2_host_ops}, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1, &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_1, + &sc7180_dsi_cfg, &msm_dsi_6g_v2_host_ops}, + }; const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor) diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h index 50a37ceb6a253119cb185ba8cf6c5aeff1ed4bc7..217e24a651781d5b95d944544566efcd77ee3597 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h @@ -20,6 +20,7 @@ #define MSM_DSI_6G_VER_MINOR_V1_4_2 0x10040002 #define MSM_DSI_6G_VER_MINOR_V2_2_0 0x20000000 #define MSM_DSI_6G_VER_MINOR_V2_2_1 0x20020001 +#define MSM_DSI_6G_VER_MINOR_V2_4_1 0x20040001 #define MSM_DSI_V2_VER_MINOR_8064 0x0 @@ -35,6 +36,7 @@ struct msm_dsi_config { }; struct msm_dsi_host_cfg_ops { + int (*link_clk_set_rate)(struct msm_dsi_host *msm_host); int (*link_clk_enable)(struct msm_dsi_host *msm_host); void (*link_clk_disable)(struct msm_dsi_host *msm_host); int (*clk_init_ver)(struct msm_dsi_host *msm_host); diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 458cec82ae13fbe2d45ff39577ff17aa49d46b41..11ae5b8444c329f17b3193a22631e0df875fe707 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -505,7 +505,7 @@ int msm_dsi_runtime_resume(struct device *dev) return dsi_bus_clk_enable(msm_host); } -int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host) +int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host) { int ret; @@ -515,13 +515,13 @@ int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host) ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate); if (ret) { pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret); - goto error; + return ret; } ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate); if (ret) { pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret); - goto error; + return ret; } if (msm_host->byte_intf_clk) { @@ -530,10 +530,18 @@ int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host) if (ret) { pr_err("%s: Failed to set rate byte intf clk, %d\n", __func__, ret); - goto error; + return ret; } } + return 0; +} + + +int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host) +{ + int ret; + ret = clk_prepare_enable(msm_host->esc_clk); if (ret) { pr_err("%s: Failed to enable dsi esc clk\n", __func__); @@ -573,7 +581,7 @@ error: return ret; } -int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host) +int dsi_link_clk_set_rate_v2(struct msm_dsi_host *msm_host) { int ret; @@ -584,27 +592,34 @@ int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host) ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate); if (ret) { pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret); - goto error; + return ret; } ret = clk_set_rate(msm_host->esc_clk, msm_host->esc_clk_rate); if (ret) { pr_err("%s: Failed to set rate esc clk, %d\n", __func__, ret); - goto error; + return ret; } ret = clk_set_rate(msm_host->src_clk, msm_host->src_clk_rate); if (ret) { pr_err("%s: Failed to set rate src clk, %d\n", __func__, ret); - goto error; + return ret; } ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate); if (ret) { pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret); - goto error; + return ret; } + return 0; +} + +int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host) +{ + int ret; + ret = clk_prepare_enable(msm_host->byte_clk); if (ret) { pr_err("%s: Failed to enable dsi byte clk\n", __func__); @@ -818,7 +833,7 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable, u32 flags = msm_host->mode_flags; enum mipi_dsi_pixel_format mipi_fmt = msm_host->format; const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; - u32 data = 0; + u32 data = 0, lane_ctrl = 0; if (!enable) { dsi_write(msm_host, REG_DSI_CTRL, 0); @@ -906,9 +921,11 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable, dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL, DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(msm_host->dlane_swap)); - if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) + if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) { + lane_ctrl = dsi_read(msm_host, REG_DSI_LANE_CTRL); dsi_write(msm_host, REG_DSI_LANE_CTRL, - DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST); + lane_ctrl | DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST); + } data |= DSI_CTRL_ENABLE; @@ -1996,6 +2013,7 @@ int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host, * mdp clock need to be enabled to receive dsi interrupt */ pm_runtime_get_sync(&msm_host->pdev->dev); + cfg_hnd->ops->link_clk_set_rate(msm_host); cfg_hnd->ops->link_clk_enable(msm_host); /* TODO: vote for bus bandwidth */ @@ -2344,7 +2362,9 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host, } pm_runtime_get_sync(&msm_host->pdev->dev); - ret = cfg_hnd->ops->link_clk_enable(msm_host); + ret = cfg_hnd->ops->link_clk_set_rate(msm_host); + if (!ret) + ret = cfg_hnd->ops->link_clk_enable(msm_host); if (ret) { pr_err("%s: failed to enable link clocks. ret=%d\n", __func__, ret); diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index 0fc29f1be8cc6ddb4b02aac6be4bc66b8aec3741..104115d112eba6aebc87cb464c9b89a3c513b945 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -432,20 +432,8 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge) } } - if (panel) { - ret = drm_panel_enable(panel); - if (ret) { - pr_err("%s: enable panel %d failed, %d\n", __func__, id, - ret); - goto panel_en_fail; - } - } - return; -panel_en_fail: - if (is_dual_dsi && msm_dsi1) - msm_dsi_host_disable(msm_dsi1->host); host1_en_fail: msm_dsi_host_disable(host); host_en_fail: @@ -464,12 +452,51 @@ phy_en_fail: static void dsi_mgr_bridge_enable(struct drm_bridge *bridge) { - DBG(""); + int id = dsi_mgr_bridge_get_id(bridge); + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + struct drm_panel *panel = msm_dsi->panel; + bool is_dual_dsi = IS_DUAL_DSI(); + int ret; + + DBG("id=%d", id); + if (!msm_dsi_device_connected(msm_dsi)) + return; + + /* Do nothing with the host if it is slave-DSI in case of dual DSI */ + if (is_dual_dsi && !IS_MASTER_DSI_LINK(id)) + return; + + if (panel) { + ret = drm_panel_enable(panel); + if (ret) { + pr_err("%s: enable panel %d failed, %d\n", __func__, id, + ret); + } + } } static void dsi_mgr_bridge_disable(struct drm_bridge *bridge) { - DBG(""); + int id = dsi_mgr_bridge_get_id(bridge); + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + struct drm_panel *panel = msm_dsi->panel; + bool is_dual_dsi = IS_DUAL_DSI(); + int ret; + + DBG("id=%d", id); + if (!msm_dsi_device_connected(msm_dsi)) + return; + + /* Do nothing with the host if it is slave-DSI in case of dual DSI */ + if (is_dual_dsi && !IS_MASTER_DSI_LINK(id)) + return; + + if (panel) { + ret = drm_panel_disable(panel); + if (ret) + pr_err("%s: Panel %d OFF failed, %d\n", __func__, id, + ret); + } } static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge) @@ -495,13 +522,6 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge) if (is_dual_dsi && !IS_MASTER_DSI_LINK(id)) goto disable_phy; - if (panel) { - ret = drm_panel_disable(panel); - if (ret) - pr_err("%s: Panel %d OFF failed, %d\n", __func__, id, - ret); - } - ret = msm_dsi_host_disable(host); if (ret) pr_err("%s: host %d disable failed, %d\n", __func__, id, ret); diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c index 59702684d576c3a7eb49f7542188705b938b35cb..58707a1f3878f0e5c711d441a646c5255ad3d011 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c @@ -101,7 +101,7 @@ static int gpio_config(struct hdmi *hdmi, bool on) gpiod_set_value_cansleep(gpio.gpiod, value); } - }; + } DBG("gpio off"); } diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index c84f0a8b3f2cebf76f99a70c01e4377fde610d0e..f50fefb87040976699546a686e5e0163f8ba33a4 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -1192,7 +1192,8 @@ static int add_display_components(struct device *dev, * the interfaces to our components list. */ if (of_device_is_compatible(dev->of_node, "qcom,mdss") || - of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss")) { + of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss") || + of_device_is_compatible(dev->of_node, "qcom,sc7180-mdss")) { ret = of_platform_populate(dev->of_node, NULL, NULL, dev); if (ret) { DRM_DEV_ERROR(dev, "failed to populate children devices\n"); @@ -1317,6 +1318,7 @@ static const struct of_device_id dt_match[] = { { .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 }, { .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 }, { .compatible = "qcom,sdm845-mdss", .data = (void *)KMS_DPU }, + { .compatible = "qcom,sc7180-mdss", .data = (void *)KMS_DPU }, {} }; MODULE_DEVICE_TABLE(of, dt_match); diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index ab8f0f9c9dc8823bdf0fb8657b6942d6f41aee67..be5bc2e8425c579de32cd5015e3cc71bf005c1a0 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -111,8 +111,15 @@ struct msm_gpu { struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk; uint32_t fast_rate; + /* The gfx-mem interconnect path that's used by all GPU types. */ struct icc_path *icc_path; + /* + * Second interconnect path for some A3xx and all A4xx GPUs to the + * On Chip MEMory (OCMEM). + */ + struct icc_path *ocmem_icc_path; + /* Hang and Inactivity Detection: */ #define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */ diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig index 9c990266e8768674dc18c8fdf0bd0343265a9044..d6e4ae1ef7053a1730345a9821db4da3d836be50 100644 --- a/drivers/gpu/drm/nouveau/Kconfig +++ b/drivers/gpu/drm/nouveau/Kconfig @@ -2,6 +2,7 @@ config DRM_NOUVEAU tristate "Nouveau (NVIDIA) cards" depends on DRM && PCI && MMU + select IOMMU_API select FW_LOADER select DRM_KMS_HELPER select DRM_TTM @@ -16,6 +17,7 @@ config DRM_NOUVEAU select INPUT if ACPI && X86 select THERMAL if ACPI && X86 select ACPI_VIDEO if ACPI && X86 + select SND_HDA_COMPONENT if SND_HDA_CORE help Choose this option for open-source NVIDIA support. diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c index 362495535e693050f169e0061b70dfea8e349422..9d4a2d97507e03660b432a410497717a497c4a43 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/arb.c +++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c @@ -53,8 +53,8 @@ struct nv_sim_state { static void nv04_calc_arb(struct nv_fifo_info *fifo, struct nv_sim_state *arb) { - int pagemiss, cas, width, bpp; - int nvclks, mclks, pclks, crtpagemiss; + int pagemiss, cas, bpp; + int nvclks, mclks, crtpagemiss; int found, mclk_extra, mclk_loop, cbs, m1, p1; int mclk_freq, pclk_freq, nvclk_freq; int us_m, us_n, us_p, crtc_drain_rate; @@ -65,11 +65,9 @@ nv04_calc_arb(struct nv_fifo_info *fifo, struct nv_sim_state *arb) nvclk_freq = arb->nvclk_khz; pagemiss = arb->mem_page_miss; cas = arb->mem_latency; - width = arb->memory_width >> 6; bpp = arb->bpp; cbs = 128; - pclks = 2; nvclks = 10; mclks = 13 + cas; mclk_extra = 3; diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c index 03466f04c7419162f91659f91d12cada10458a39..3a9489ed6544f0be5763ad55d0d89d5dc9e88f8c 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c +++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c @@ -644,16 +644,13 @@ static int nv17_tv_create_resources(struct drm_encoder *encoder, int i; if (nouveau_tv_norm) { - for (i = 0; i < num_tv_norms; i++) { - if (!strcmp(nv17_tv_norm_names[i], nouveau_tv_norm)) { - tv_enc->tv_norm = i; - break; - } - } - - if (i == num_tv_norms) + i = match_string(nv17_tv_norm_names, num_tv_norms, + nouveau_tv_norm); + if (i < 0) NV_WARN(drm, "Invalid TV norm setting \"%s\"\n", nouveau_tv_norm); + else + tv_enc->tv_norm = i; } drm_mode_create_tv_properties(dev, num_tv_norms, nv17_tv_norm_names); diff --git a/drivers/gpu/drm/nouveau/dispnv50/base907c.c b/drivers/gpu/drm/nouveau/dispnv50/base907c.c index 5f2de77e0f32e1bcce6a48a41c1546f4ba9b09a8..224a34c340fe72f49ae7900594a9a30b36b1fcc5 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/base907c.c +++ b/drivers/gpu/drm/nouveau/dispnv50/base907c.c @@ -75,12 +75,16 @@ base907c_xlut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) } } -static void -base907c_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) +static bool +base907c_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size) { - asyw->xlut.i.mode = 7; + if (size != 256 && size != 1024) + return false; + + asyw->xlut.i.mode = size == 1024 ? 4 : 7; asyw->xlut.i.enable = 2; asyw->xlut.i.load = head907d_olut_load; + return true; } static inline u32 @@ -160,6 +164,7 @@ base907c = { .csc_set = base907c_csc_set, .csc_clr = base907c_csc_clr, .olut_core = true, + .ilut_size = 1024, .xlut_set = base907c_xlut_set, .xlut_clr = base907c_xlut_clr, .image_set = base907c_image_set, diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.h b/drivers/gpu/drm/nouveau/dispnv50/core.h index df8336b593f7c4adc02878a171f8abf8d927e721..ff94f3f6f264ef0c01e8c5811e104546f621a8d1 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/core.h +++ b/drivers/gpu/drm/nouveau/dispnv50/core.h @@ -6,6 +6,7 @@ struct nv50_core { const struct nv50_core_func *func; struct nv50_dmac chan; + bool assign_windows; }; int nv50_core_new(struct nouveau_drm *, struct nv50_core **); @@ -18,6 +19,10 @@ struct nv50_core_func { struct nvif_device *); void (*update)(struct nv50_core *, u32 *interlock, bool ntfy); + struct { + void (*owner)(struct nv50_core *); + } wndw; + const struct nv50_head_func *head; const struct nv50_outp_func { void (*ctrl)(struct nv50_core *, int or, u32 ctrl, @@ -48,6 +53,7 @@ int core917d_new(struct nouveau_drm *, s32, struct nv50_core **); int corec37d_new(struct nouveau_drm *, s32, struct nv50_core **); int corec37d_ntfy_wait_done(struct nouveau_bo *, u32, struct nvif_device *); void corec37d_update(struct nv50_core *, u32 *, bool); +void corec37d_wndw_owner(struct nv50_core *); extern const struct nv50_outp_func sorc37d; int corec57d_new(struct nouveau_drm *, s32, struct nv50_core **); diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c index 40d9b654ab8c12286b552796cbb5d40390961741..3b36dc8d36b2d6103d6626007805280267574740 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c @@ -24,6 +24,20 @@ #include +void +corec37d_wndw_owner(struct nv50_core *core) +{ + const u32 windows = 8; /*XXX*/ + u32 *push, i; + if ((push = evo_wait(&core->chan, 2 * windows))) { + for (i = 0; i < windows; i++) { + evo_mthd(push, 0x1000 + (i * 0x080), 1); + evo_data(push, i >> 1); + } + evo_kick(push, &core->chan); + } +} + void corec37d_update(struct nv50_core *core, u32 *interlock, bool ntfy) { @@ -76,20 +90,18 @@ corec37d_init(struct nv50_core *core) { const u32 windows = 8; /*XXX*/ u32 *push, i; - if ((push = evo_wait(&core->chan, 2 + 6 * windows + 2))) { + if ((push = evo_wait(&core->chan, 2 + 5 * windows))) { evo_mthd(push, 0x0208, 1); evo_data(push, core->chan.sync.handle); for (i = 0; i < windows; i++) { - evo_mthd(push, 0x1000 + (i * 0x080), 3); - evo_data(push, i >> 1); + evo_mthd(push, 0x1004 + (i * 0x080), 2); evo_data(push, 0x0000001f); evo_data(push, 0x00000000); evo_mthd(push, 0x1010 + (i * 0x080), 1); evo_data(push, 0x00127fff); } - evo_mthd(push, 0x0200, 1); - evo_data(push, 0x00000001); evo_kick(push, &core->chan); + core->assign_windows = true; } } @@ -99,6 +111,7 @@ corec37d = { .ntfy_init = corec37d_ntfy_init, .ntfy_wait_done = corec37d_ntfy_wait_done, .update = corec37d_update, + .wndw.owner = corec37d_wndw_owner, .head = &headc37d, .sor = &sorc37d, }; diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c index b606d68cda10cd48883792c5262f8cfb392f8ab7..147adcd609378829f25dc5a1f4449703ebe9e713 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c @@ -27,20 +27,18 @@ corec57d_init(struct nv50_core *core) { const u32 windows = 8; /*XXX*/ u32 *push, i; - if ((push = evo_wait(&core->chan, 2 + 6 * windows + 2))) { + if ((push = evo_wait(&core->chan, 2 + 5 * windows))) { evo_mthd(push, 0x0208, 1); evo_data(push, core->chan.sync.handle); for (i = 0; i < windows; i++) { - evo_mthd(push, 0x1000 + (i * 0x080), 3); - evo_data(push, i >> 1); + evo_mthd(push, 0x1004 + (i * 0x080), 2); evo_data(push, 0x0000000f); evo_data(push, 0x00000000); evo_mthd(push, 0x1010 + (i * 0x080), 1); evo_data(push, 0x00117fff); } - evo_mthd(push, 0x0200, 1); - evo_data(push, 0x00000001); evo_kick(push, &core->chan); + core->assign_windows = true; } } @@ -50,6 +48,7 @@ corec57d = { .ntfy_init = corec37d_ntfy_init, .ntfy_wait_done = corec37d_ntfy_wait_done, .update = corec37d_update, + .wndw.owner = corec37d_wndw_owner, .head = &headc57d, .sor = &sorc37d, }; diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 63425e24601896cb31238ddbcba0b9b7fae03e41..a3dc2ba19fb2b92704a280014544defa02e4a8e8 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -29,6 +29,7 @@ #include #include +#include #include #include @@ -476,12 +477,113 @@ nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe) return 0; } +/* + * audio component binding for ELD notification + */ +static void +nv50_audio_component_eld_notify(struct drm_audio_component *acomp, int port) +{ + if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) + acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, + port, -1); +} + +static int +nv50_audio_component_get_eld(struct device *kdev, int port, int pipe, + bool *enabled, unsigned char *buf, int max_bytes) +{ + struct drm_device *drm_dev = dev_get_drvdata(kdev); + struct nouveau_drm *drm = nouveau_drm(drm_dev); + struct drm_encoder *encoder; + struct nouveau_encoder *nv_encoder; + struct nouveau_connector *nv_connector; + struct nouveau_crtc *nv_crtc; + int ret = 0; + + *enabled = false; + drm_for_each_encoder(encoder, drm->dev) { + nv_encoder = nouveau_encoder(encoder); + nv_connector = nouveau_encoder_connector_get(nv_encoder); + nv_crtc = nouveau_crtc(encoder->crtc); + if (!nv_connector || !nv_crtc || nv_crtc->index != port) + continue; + *enabled = drm_detect_monitor_audio(nv_connector->edid); + if (*enabled) { + ret = drm_eld_size(nv_connector->base.eld); + memcpy(buf, nv_connector->base.eld, + min(max_bytes, ret)); + } + break; + } + return ret; +} + +static const struct drm_audio_component_ops nv50_audio_component_ops = { + .get_eld = nv50_audio_component_get_eld, +}; + +static int +nv50_audio_component_bind(struct device *kdev, struct device *hda_kdev, + void *data) +{ + struct drm_device *drm_dev = dev_get_drvdata(kdev); + struct nouveau_drm *drm = nouveau_drm(drm_dev); + struct drm_audio_component *acomp = data; + + if (WARN_ON(!device_link_add(hda_kdev, kdev, DL_FLAG_STATELESS))) + return -ENOMEM; + + drm_modeset_lock_all(drm_dev); + acomp->ops = &nv50_audio_component_ops; + acomp->dev = kdev; + drm->audio.component = acomp; + drm_modeset_unlock_all(drm_dev); + return 0; +} + +static void +nv50_audio_component_unbind(struct device *kdev, struct device *hda_kdev, + void *data) +{ + struct drm_device *drm_dev = dev_get_drvdata(kdev); + struct nouveau_drm *drm = nouveau_drm(drm_dev); + struct drm_audio_component *acomp = data; + + drm_modeset_lock_all(drm_dev); + drm->audio.component = NULL; + acomp->ops = NULL; + acomp->dev = NULL; + drm_modeset_unlock_all(drm_dev); +} + +static const struct component_ops nv50_audio_component_bind_ops = { + .bind = nv50_audio_component_bind, + .unbind = nv50_audio_component_unbind, +}; + +static void +nv50_audio_component_init(struct nouveau_drm *drm) +{ + if (!component_add(drm->dev->dev, &nv50_audio_component_bind_ops)) + drm->audio.component_registered = true; +} + +static void +nv50_audio_component_fini(struct nouveau_drm *drm) +{ + if (drm->audio.component_registered) { + component_del(drm->dev->dev, &nv50_audio_component_bind_ops); + drm->audio.component_registered = false; + } +} + /****************************************************************************** * Audio *****************************************************************************/ static void nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc) { + struct nouveau_drm *drm = nouveau_drm(encoder->dev); struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nv50_disp *disp = nv50_disp(encoder->dev); struct { @@ -496,11 +598,14 @@ nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc) }; nvif_mthd(&disp->disp->object, 0, &args, sizeof(args)); + + nv50_audio_component_eld_notify(drm->audio.component, nv_crtc->index); } static void nv50_audio_enable(struct drm_encoder *encoder, struct drm_display_mode *mode) { + struct nouveau_drm *drm = nouveau_drm(encoder->dev); struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); struct nouveau_connector *nv_connector; @@ -527,6 +632,8 @@ nv50_audio_enable(struct drm_encoder *encoder, struct drm_display_mode *mode) nvif_mthd(&disp->disp->object, 0, &args, sizeof(args.base) + drm_eld_size(args.data)); + + nv50_audio_component_eld_notify(drm->audio.component, nv_crtc->index); } /****************************************************************************** @@ -660,7 +767,6 @@ struct nv50_mstm { struct nouveau_encoder *outp; struct drm_dp_mst_topology_mgr mgr; - struct nv50_msto *msto[4]; bool modified; bool disabled; @@ -726,7 +832,6 @@ nv50_msto_cleanup(struct nv50_msto *msto) drm_dp_mst_deallocate_vcpi(&mstm->mgr, mstc->port); msto->mstc = NULL; - msto->head = NULL; msto->disabled = false; } @@ -806,11 +911,11 @@ nv50_msto_atomic_check(struct drm_encoder *encoder, * topology */ asyh->or.bpc = min(connector->display_info.bpc, 8U); - asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3); + asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3, false); } slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, mstc->port, - asyh->dp.pbn); + asyh->dp.pbn, 0); if (slots < 0) return slots; @@ -872,7 +977,6 @@ nv50_msto_enable(struct drm_encoder *encoder) mstm->outp->update(mstm->outp, head->base.index, armh, proto, nv50_dp_bpc_to_depth(armh->or.bpc)); - msto->head = head; msto->mstc = mstc; mstm->modified = true; } @@ -913,45 +1017,40 @@ nv50_msto = { .destroy = nv50_msto_destroy, }; -static int -nv50_msto_new(struct drm_device *dev, u32 heads, const char *name, int id, - struct nv50_msto **pmsto) +static struct nv50_msto * +nv50_msto_new(struct drm_device *dev, struct nv50_head *head, int id) { struct nv50_msto *msto; int ret; - if (!(msto = *pmsto = kzalloc(sizeof(*msto), GFP_KERNEL))) - return -ENOMEM; + msto = kzalloc(sizeof(*msto), GFP_KERNEL); + if (!msto) + return ERR_PTR(-ENOMEM); ret = drm_encoder_init(dev, &msto->encoder, &nv50_msto, - DRM_MODE_ENCODER_DPMST, "%s-mst-%d", name, id); + DRM_MODE_ENCODER_DPMST, "mst-%d", id); if (ret) { - kfree(*pmsto); - *pmsto = NULL; - return ret; + kfree(msto); + return ERR_PTR(ret); } drm_encoder_helper_add(&msto->encoder, &nv50_msto_help); - msto->encoder.possible_crtcs = heads; - return 0; + msto->encoder.possible_crtcs = drm_crtc_mask(&head->base.base); + msto->head = head; + return msto; } static struct drm_encoder * nv50_mstc_atomic_best_encoder(struct drm_connector *connector, struct drm_connector_state *connector_state) { - struct nv50_head *head = nv50_head(connector_state->crtc); struct nv50_mstc *mstc = nv50_mstc(connector); + struct drm_crtc *crtc = connector_state->crtc; - return &mstc->mstm->msto[head->base.index]->encoder; -} - -static struct drm_encoder * -nv50_mstc_best_encoder(struct drm_connector *connector) -{ - struct nv50_mstc *mstc = nv50_mstc(connector); + if (!(mstc->mstm->outp->dcb->heads & drm_crtc_mask(crtc))) + return NULL; - return &mstc->mstm->msto[0]->encoder; + return &nv50_head(crtc)->msto->encoder; } static enum drm_mode_status @@ -1038,7 +1137,6 @@ static const struct drm_connector_helper_funcs nv50_mstc_help = { .get_modes = nv50_mstc_get_modes, .mode_valid = nv50_mstc_mode_valid, - .best_encoder = nv50_mstc_best_encoder, .atomic_best_encoder = nv50_mstc_atomic_best_encoder, .atomic_check = nv50_mstc_atomic_check, .detect_ctx = nv50_mstc_detect, @@ -1071,8 +1169,9 @@ nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port, const char *path, struct nv50_mstc **pmstc) { struct drm_device *dev = mstm->outp->base.base.dev; + struct drm_crtc *crtc; struct nv50_mstc *mstc; - int ret, i; + int ret; if (!(mstc = *pmstc = kzalloc(sizeof(*mstc), GFP_KERNEL))) return -ENOMEM; @@ -1092,8 +1191,13 @@ nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port, mstc->connector.funcs->reset(&mstc->connector); nouveau_conn_attach_properties(&mstc->connector); - for (i = 0; i < ARRAY_SIZE(mstm->msto) && mstm->msto[i]; i++) - drm_connector_attach_encoder(&mstc->connector, &mstm->msto[i]->encoder); + drm_for_each_crtc(crtc, dev) { + if (!(mstm->outp->dcb->heads & drm_crtc_mask(crtc))) + continue; + + drm_connector_attach_encoder(&mstc->connector, + &nv50_head(crtc)->msto->encoder); + } drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0); drm_object_attach_property(&mstc->connector.base, dev->mode_config.tile_property, 0); @@ -1367,7 +1471,7 @@ nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max, const int max_payloads = hweight8(outp->dcb->heads); struct drm_device *dev = outp->base.base.dev; struct nv50_mstm *mstm; - int ret, i; + int ret; u8 dpcd; /* This is a workaround for some monitors not functioning @@ -1390,13 +1494,6 @@ nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max, if (ret) return ret; - for (i = 0; i < max_payloads; i++) { - ret = nv50_msto_new(dev, outp->dcb->heads, outp->base.base.name, - i, &mstm->msto[i]); - if (ret) - return ret; - } - return 0; } @@ -1569,17 +1666,24 @@ nv50_sor_func = { .destroy = nv50_sor_destroy, }; +static bool nv50_has_mst(struct nouveau_drm *drm) +{ + struct nvkm_bios *bios = nvxx_bios(&drm->client.device); + u32 data; + u8 ver, hdr, cnt, len; + + data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len); + return data && ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04); +} + static int nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe) { struct nouveau_connector *nv_connector = nouveau_connector(connector); struct nouveau_drm *drm = nouveau_drm(connector->dev); - struct nvkm_bios *bios = nvxx_bios(&drm->client.device); struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device); struct nouveau_encoder *nv_encoder; struct drm_encoder *encoder; - u8 ver, hdr, cnt, len; - u32 data; int type, ret; switch (dcbe->type) { @@ -1624,10 +1728,9 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe) } if (nv_connector->type != DCB_CONNECTOR_eDP && - (data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len)) && - ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04)) { - ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16, - nv_connector->base.base.id, + nv50_has_mst(drm)) { + ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, + 16, nv_connector->base.base.id, &nv_encoder->dp.mstm); if (ret) return ret; @@ -1673,7 +1776,6 @@ nv50_pior_enable(struct drm_encoder *encoder) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); - struct nouveau_connector *nv_connector; struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state); struct nv50_core *core = nv50_disp(encoder->dev)->core; u8 owner = 1 << nv_crtc->index; @@ -1681,7 +1783,6 @@ nv50_pior_enable(struct drm_encoder *encoder) nv50_outp_acquire(nv_encoder); - nv_connector = nouveau_encoder_connector_get(nv_encoder); switch (asyh->or.bpc) { case 10: asyh->or.depth = 0x6; break; case 8: asyh->or.depth = 0x5; break; @@ -1832,6 +1933,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) struct nouveau_drm *drm = nouveau_drm(dev); struct nv50_disp *disp = nv50_disp(dev); struct nv50_atom *atom = nv50_atom(state); + struct nv50_core *core = disp->core; struct nv50_outp_atom *outp, *outt; u32 interlock[NV50_DISP_INTERLOCK__SIZE] = {}; int i; @@ -1950,6 +2052,21 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) } } + /* Update window->head assignment. + * + * This has to happen in an update that's not interlocked with + * any window channels to avoid hitting HW error checks. + * + *TODO: Proper handling of window ownership (Turing apparently + * supports non-fixed mappings). + */ + if (core->assign_windows) { + core->func->wndw.owner(core); + core->func->update(core, interlock, false); + core->assign_windows = false; + interlock[NV50_DISP_INTERLOCK_CORE] = 0; + } + /* Update plane(s). */ for_each_new_plane_in_state(state, plane, new_plane_state, i) { struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state); @@ -2302,6 +2419,8 @@ nv50_display_destroy(struct drm_device *dev) { struct nv50_disp *disp = nv50_disp(dev); + nv50_audio_component_fini(nouveau_drm(dev)); + nv50_core_del(&disp->core); nouveau_bo_unmap(disp->sync); @@ -2323,6 +2442,7 @@ nv50_display_create(struct drm_device *dev) struct nv50_disp *disp; struct dcb_output *dcbe; int crtcs, ret, i; + bool has_mst = nv50_has_mst(drm); disp = kzalloc(sizeof(*disp), GFP_KERNEL); if (!disp) @@ -2371,11 +2491,37 @@ nv50_display_create(struct drm_device *dev) crtcs = 0x3; for (i = 0; i < fls(crtcs); i++) { + struct nv50_head *head; + if (!(crtcs & (1 << i))) continue; - ret = nv50_head_create(dev, i); - if (ret) + + head = nv50_head_create(dev, i); + if (IS_ERR(head)) { + ret = PTR_ERR(head); goto out; + } + + if (has_mst) { + head->msto = nv50_msto_new(dev, head, i); + if (IS_ERR(head->msto)) { + ret = PTR_ERR(head->msto); + head->msto = NULL; + goto out; + } + + /* + * FIXME: This is a hack to workaround the following + * issues: + * + * https://gitlab.gnome.org/GNOME/mutter/issues/759 + * https://gitlab.freedesktop.org/xorg/xserver/merge_requests/277 + * + * Once these issues are closed, this should be + * removed + */ + head->msto->encoder.possible_crtcs = crtcs; + } } /* create encoder/connector objects based on VBIOS DCB table */ @@ -2423,6 +2569,8 @@ nv50_display_create(struct drm_device *dev) /* Disable vblank irqs aggressively for power-saving, safe on nv50+ */ dev->vblank_disable_immediate = true; + nv50_audio_component_init(drm); + out: if (ret) nv50_display_destroy(dev); diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h index c0a79531b0873d4242291981ce0ca94840e1258a..d54fe00ac3a3c22e1e0fc06638899c156a4cb5c9 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.h +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h @@ -4,6 +4,8 @@ #include "nouveau_display.h" +struct nv50_msto; + struct nv50_disp { struct nvif_disp *disp; struct nv50_core *core; diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c index c9692df2b76cca51604c0fb008643ff26a972be0..d9d64602947d516032ed658cb979dfbf86b0a440 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head.c +++ b/drivers/gpu/drm/nouveau/dispnv50/head.c @@ -213,6 +213,7 @@ nv50_head_atomic_check_lut(struct nv50_head *head, { struct nv50_disp *disp = nv50_disp(head->base.base.dev); struct drm_property_blob *olut = asyh->state.gamma_lut; + int size; /* Determine whether core output LUT should be enabled. */ if (olut) { @@ -229,14 +230,23 @@ nv50_head_atomic_check_lut(struct nv50_head *head, } } - if (!olut && !head->func->olut_identity) { - asyh->olut.handle = 0; - return 0; + if (!olut) { + if (!head->func->olut_identity) { + asyh->olut.handle = 0; + return 0; + } + size = 0; + } else { + size = drm_color_lut_size(olut); } + if (!head->func->olut(head, asyh, size)) { + DRM_DEBUG_KMS("Invalid olut\n"); + return -EINVAL; + } asyh->olut.handle = disp->core->chan.vram.handle; asyh->olut.buffer = !asyh->olut.buffer; - head->func->olut(head, asyh); + return 0; } @@ -473,7 +483,7 @@ nv50_head_func = { .atomic_destroy_state = nv50_head_atomic_destroy_state, }; -int +struct nv50_head * nv50_head_create(struct drm_device *dev, int index) { struct nouveau_drm *drm = nouveau_drm(dev); @@ -485,7 +495,7 @@ nv50_head_create(struct drm_device *dev, int index) head = kzalloc(sizeof(*head), GFP_KERNEL); if (!head) - return -ENOMEM; + return ERR_PTR(-ENOMEM); head->func = disp->core->func->head; head->base.index = index; @@ -503,27 +513,26 @@ nv50_head_create(struct drm_device *dev, int index) ret = nv50_curs_new(drm, head->base.index, &curs); if (ret) { kfree(head); - return ret; + return ERR_PTR(ret); } crtc = &head->base.base; drm_crtc_init_with_planes(dev, crtc, &base->plane, &curs->plane, &nv50_head_func, "head-%d", head->base.index); drm_crtc_helper_add(crtc, &nv50_head_help); + /* Keep the legacy gamma size at 256 to avoid compatibility issues */ drm_mode_crtc_set_gamma_size(crtc, 256); - if (disp->disp->object.oclass >= GF110_DISP) - drm_crtc_enable_color_mgmt(crtc, 256, true, 256); - else - drm_crtc_enable_color_mgmt(crtc, 0, false, 256); + drm_crtc_enable_color_mgmt(crtc, base->func->ilut_size, + disp->disp->object.oclass >= GF110_DISP, + head->func->olut_size); if (head->func->olut_set) { ret = nv50_lut_init(disp, &drm->client.mmu, &head->olut); - if (ret) - goto out; + if (ret) { + nv50_head_destroy(crtc); + return ERR_PTR(ret); + } } -out: - if (ret) - nv50_head_destroy(crtc); - return ret; + return head; } diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.h b/drivers/gpu/drm/nouveau/dispnv50/head.h index d1c002f534d4edaffb48bff227a0b1b81d90d986..c32b27cdaefc95db9902e6087c78dea7e732af48 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head.h +++ b/drivers/gpu/drm/nouveau/dispnv50/head.h @@ -11,17 +11,19 @@ struct nv50_head { const struct nv50_head_func *func; struct nouveau_crtc base; struct nv50_lut olut; + struct nv50_msto *msto; }; -int nv50_head_create(struct drm_device *, int index); +struct nv50_head *nv50_head_create(struct drm_device *, int index); void nv50_head_flush_set(struct nv50_head *, struct nv50_head_atom *); void nv50_head_flush_clr(struct nv50_head *, struct nv50_head_atom *, bool y); struct nv50_head_func { void (*view)(struct nv50_head *, struct nv50_head_atom *); void (*mode)(struct nv50_head *, struct nv50_head_atom *); - void (*olut)(struct nv50_head *, struct nv50_head_atom *); + bool (*olut)(struct nv50_head *, struct nv50_head_atom *, int); bool olut_identity; + int olut_size; void (*olut_set)(struct nv50_head *, struct nv50_head_atom *); void (*olut_clr)(struct nv50_head *); void (*core_calc)(struct nv50_head *, struct nv50_head_atom *); @@ -43,7 +45,7 @@ struct nv50_head_func { extern const struct nv50_head_func head507d; void head507d_view(struct nv50_head *, struct nv50_head_atom *); void head507d_mode(struct nv50_head *, struct nv50_head_atom *); -void head507d_olut(struct nv50_head *, struct nv50_head_atom *); +bool head507d_olut(struct nv50_head *, struct nv50_head_atom *, int); void head507d_core_calc(struct nv50_head *, struct nv50_head_atom *); void head507d_core_clr(struct nv50_head *); int head507d_curs_layout(struct nv50_head *, struct nv50_wndw_atom *, @@ -60,7 +62,7 @@ extern const struct nv50_head_func head827d; extern const struct nv50_head_func head907d; void head907d_view(struct nv50_head *, struct nv50_head_atom *); void head907d_mode(struct nv50_head *, struct nv50_head_atom *); -void head907d_olut(struct nv50_head *, struct nv50_head_atom *); +bool head907d_olut(struct nv50_head *, struct nv50_head_atom *, int); void head907d_olut_set(struct nv50_head *, struct nv50_head_atom *); void head907d_olut_clr(struct nv50_head *); void head907d_core_set(struct nv50_head *, struct nv50_head_atom *); diff --git a/drivers/gpu/drm/nouveau/dispnv50/head507d.c b/drivers/gpu/drm/nouveau/dispnv50/head507d.c index 7561be5ca707c5d2447ecb027349e60a004fc6fd..66ccf36b56a2be9179299c9da41e7e0b9d9eb946 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head507d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/head507d.c @@ -271,15 +271,19 @@ head507d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem) writew(readw(mem - 4), mem + 4); } -void -head507d_olut(struct nv50_head *head, struct nv50_head_atom *asyh) +bool +head507d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size) { + if (size != 256) + return false; + if (asyh->base.cpp == 1) asyh->olut.mode = 0; else asyh->olut.mode = 1; asyh->olut.load = head507d_olut_load; + return true; } void @@ -328,6 +332,7 @@ head507d = { .view = head507d_view, .mode = head507d_mode, .olut = head507d_olut, + .olut_size = 256, .olut_set = head507d_olut_set, .olut_clr = head507d_olut_clr, .core_calc = head507d_core_calc, diff --git a/drivers/gpu/drm/nouveau/dispnv50/head827d.c b/drivers/gpu/drm/nouveau/dispnv50/head827d.c index af5e7bd5978bf4ff848e11cc8852de8e16240a30..11877119eea49c316071370efac7cee859b66450 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head827d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/head827d.c @@ -108,6 +108,7 @@ head827d = { .view = head507d_view, .mode = head507d_mode, .olut = head507d_olut, + .olut_size = 256, .olut_set = head827d_olut_set, .olut_clr = head827d_olut_clr, .core_calc = head507d_core_calc, diff --git a/drivers/gpu/drm/nouveau/dispnv50/head907d.c b/drivers/gpu/drm/nouveau/dispnv50/head907d.c index c2d09dd97b1ff1a01c91a6593f7b8f4d18d12ea8..3002ec23d7a6fb91b0547c5f23f086ee8e7f5f9f 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head907d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/head907d.c @@ -230,11 +230,15 @@ head907d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem) writew(readw(mem - 4), mem + 4); } -void -head907d_olut(struct nv50_head *head, struct nv50_head_atom *asyh) +bool +head907d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size) { - asyh->olut.mode = 7; + if (size != 256 && size != 1024) + return false; + + asyh->olut.mode = size == 1024 ? 4 : 7; asyh->olut.load = head907d_olut_load; + return true; } void @@ -285,6 +289,7 @@ head907d = { .view = head907d_view, .mode = head907d_mode, .olut = head907d_olut, + .olut_size = 1024, .olut_set = head907d_olut_set, .olut_clr = head907d_olut_clr, .core_calc = head507d_core_calc, diff --git a/drivers/gpu/drm/nouveau/dispnv50/head917d.c b/drivers/gpu/drm/nouveau/dispnv50/head917d.c index 303df8459ca83c522807188543a1fae77560da91..76958cedd51fd59185c3583e9dded9a35bb6452d 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head917d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/head917d.c @@ -83,6 +83,7 @@ head917d = { .view = head907d_view, .mode = head907d_mode, .olut = head907d_olut, + .olut_size = 1024, .olut_set = head907d_olut_set, .olut_clr = head907d_olut_clr, .core_calc = head507d_core_calc, diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c index ef6a99d95a9cd009cfe9ad637dc0646e9ddbadad..00011ce109a629da6a1f5f47453e1ecc3f298aac 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c @@ -148,14 +148,18 @@ headc37d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh) } } -static void -headc37d_olut(struct nv50_head *head, struct nv50_head_atom *asyh) +static bool +headc37d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size) { + if (size != 256 && size != 1024) + return false; + asyh->olut.mode = 2; - asyh->olut.size = 0; + asyh->olut.size = size == 1024 ? 2 : 0; asyh->olut.range = 0; asyh->olut.output_mode = 1; asyh->olut.load = head907d_olut_load; + return true; } static void @@ -201,6 +205,7 @@ headc37d = { .view = headc37d_view, .mode = headc37d_mode, .olut = headc37d_olut, + .olut_size = 1024, .olut_set = headc37d_olut_set, .olut_clr = headc37d_olut_clr, .curs_layout = head917d_curs_layout, diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c index 32a7f9e85fb0e529b3451d5930786024c659f8a6..938d910a1b1e4acd1a65900415367141d056d0fd 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c @@ -151,17 +151,20 @@ headc57d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem) writew(readw(mem - 4), mem + 4); } -void -headc57d_olut(struct nv50_head *head, struct nv50_head_atom *asyh) +bool +headc57d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size) { + if (size != 0 && size != 256 && size != 1024) + return false; + asyh->olut.mode = 2; /* DIRECT10 */ asyh->olut.size = 4 /* VSS header. */ + 1024 + 1 /* Entries. */; asyh->olut.output_mode = 1; /* INTERPOLATE_ENABLE. */ - if (asyh->state.gamma_lut && - asyh->state.gamma_lut->length / sizeof(struct drm_color_lut) == 256) + if (size == 256) asyh->olut.load = headc57d_olut_load_8; else asyh->olut.load = headc57d_olut_load; + return true; } static void @@ -194,6 +197,7 @@ headc57d = { .mode = headc57d_mode, .olut = headc57d_olut, .olut_identity = true, + .olut_size = 1024, .olut_set = headc57d_olut_set, .olut_clr = headc57d_olut_clr, .curs_layout = head917d_curs_layout, diff --git a/drivers/gpu/drm/nouveau/dispnv50/lut.c b/drivers/gpu/drm/nouveau/dispnv50/lut.c index 994def4fd51a8da9deb2f99374ecfe3a88bc08fc..4e95ca5604ab7d1b4b2a783f798001dc8ea4c36d 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/lut.c +++ b/drivers/gpu/drm/nouveau/dispnv50/lut.c @@ -49,7 +49,7 @@ nv50_lut_load(struct nv50_lut *lut, int buffer, struct drm_property_blob *blob, kvfree(in); } } else { - load(in, blob->length / sizeof(*in), mem); + load(in, drm_color_lut_size(blob), mem); } return addr; diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c index 5193b6257061a4614570cafb830e0b8775a35290..890315291b01eff70a13ca6a3232517f970f4e65 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c @@ -318,7 +318,7 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset, return wndw->func->acquire(wndw, asyw, asyh); } -static void +static int nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw, struct nv50_wndw_atom *armw, struct nv50_wndw_atom *asyw, @@ -340,7 +340,7 @@ nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw, */ if (!(ilut = asyh->state.gamma_lut)) { asyw->visible = false; - return; + return 0; } if (wndw->func->ilut) @@ -359,7 +359,10 @@ nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw, /* Recalculate LUT state. */ memset(&asyw->xlut, 0x00, sizeof(asyw->xlut)); if ((asyw->ilut = wndw->func->ilut ? ilut : NULL)) { - wndw->func->ilut(wndw, asyw); + if (!wndw->func->ilut(wndw, asyw, drm_color_lut_size(ilut))) { + DRM_DEBUG_KMS("Invalid ilut\n"); + return -EINVAL; + } asyw->xlut.handle = wndw->wndw.vram.handle; asyw->xlut.i.buffer = !asyw->xlut.i.buffer; asyw->set.xlut = true; @@ -384,6 +387,7 @@ nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw, /* Can't do an immediate flip while changing the LUT. */ asyh->state.async_flip = false; + return 0; } static int @@ -424,8 +428,11 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) (!armw->visible || asyh->state.color_mgmt_changed || asyw->state.fb->format->format != - armw->state.fb->format->format)) - nv50_wndw_atomic_check_lut(wndw, armw, asyw, asyh); + armw->state.fb->format->format)) { + ret = nv50_wndw_atomic_check_lut(wndw, armw, asyw, asyh); + if (ret) + return ret; + } /* Calculate new window state. */ if (asyw->visible) { diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h index c63bd3bdaf0690ea8de8acc5ba1a51bf69b384b1..caf3974759181acc8fc652f19a68ec0675dc2dc5 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h @@ -64,12 +64,13 @@ struct nv50_wndw_func { void (*ntfy_clr)(struct nv50_wndw *); int (*ntfy_wait_begun)(struct nouveau_bo *, u32 offset, struct nvif_device *); - void (*ilut)(struct nv50_wndw *, struct nv50_wndw_atom *); + bool (*ilut)(struct nv50_wndw *, struct nv50_wndw_atom *, int); void (*csc)(struct nv50_wndw *, struct nv50_wndw_atom *, const struct drm_color_ctm *); void (*csc_set)(struct nv50_wndw *, struct nv50_wndw_atom *); void (*csc_clr)(struct nv50_wndw *); bool ilut_identity; + int ilut_size; bool olut_core; void (*xlut_set)(struct nv50_wndw *, struct nv50_wndw_atom *); void (*xlut_clr)(struct nv50_wndw *); diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c index 0f9402162bde92eab82c0b86e0b554606dddd2e3..b92dc3461bbd0577f92c83f15d0cca5847577ffa 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c @@ -71,14 +71,18 @@ wndwc37e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) } } -static void -wndwc37e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) +static bool +wndwc37e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size) { + if (size != 256 && size != 1024) + return false; + asyw->xlut.i.mode = 2; - asyw->xlut.i.size = 0; + asyw->xlut.i.size = size == 1024 ? 2 : 0; asyw->xlut.i.range = 0; asyw->xlut.i.output_mode = 1; asyw->xlut.i.load = head907d_olut_load; + return true; } void @@ -261,6 +265,7 @@ wndwc37e = { .ntfy_reset = corec37d_ntfy_init, .ntfy_wait_begun = base507c_ntfy_wait_begun, .ilut = wndwc37e_ilut, + .ilut_size = 1024, .xlut_set = wndwc37e_ilut_set, .xlut_clr = wndwc37e_ilut_clr, .csc = base907c_csc, diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c index a311c79e52953fc243f09c0924c3dee833381b22..35c9c52fab263ec7e8c1b99114fbe2203da0c86a 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c @@ -156,19 +156,21 @@ wndwc57e_ilut_load(struct drm_color_lut *in, int size, void __iomem *mem) writew(readw(mem - 4), mem + 4); } -static void -wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) +static bool +wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size) { - u16 size = asyw->ilut->length / sizeof(struct drm_color_lut); + if (size = size ? size : 1024, size != 256 && size != 1024) + return false; + if (size == 256) { asyw->xlut.i.mode = 1; /* DIRECT8. */ } else { asyw->xlut.i.mode = 2; /* DIRECT10. */ - size = 1024; } asyw->xlut.i.size = 4 /* VSS header. */ + size + 1 /* Entries. */; asyw->xlut.i.output_mode = 0; /* INTERPOLATE_DISABLE. */ asyw->xlut.i.load = wndwc57e_ilut_load; + return true; } static const struct nv50_wndw_func @@ -183,6 +185,7 @@ wndwc57e = { .ntfy_wait_begun = base507c_ntfy_wait_begun, .ilut = wndwc57e_ilut, .ilut_identity = true, + .ilut_size = 1024, .xlut_set = wndwc57e_ilut_set, .xlut_clr = wndwc57e_ilut_clr, .csc = base907c_csc, diff --git a/drivers/gpu/drm/nouveau/include/nvfw/acr.h b/drivers/gpu/drm/nouveau/include/nvfw/acr.h new file mode 100644 index 0000000000000000000000000000000000000000..e65d6a8db104bd858cb2e77dea5a5b8a15b8a916 --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvfw/acr.h @@ -0,0 +1,152 @@ +#ifndef __NVFW_ACR_H__ +#define __NVFW_ACR_H__ + +struct wpr_header { +#define WPR_HEADER_V0_FALCON_ID_INVALID 0xffffffff + u32 falcon_id; + u32 lsb_offset; + u32 bootstrap_owner; + u32 lazy_bootstrap; +#define WPR_HEADER_V0_STATUS_NONE 0 +#define WPR_HEADER_V0_STATUS_COPY 1 +#define WPR_HEADER_V0_STATUS_VALIDATION_CODE_FAILED 2 +#define WPR_HEADER_V0_STATUS_VALIDATION_DATA_FAILED 3 +#define WPR_HEADER_V0_STATUS_VALIDATION_DONE 4 +#define WPR_HEADER_V0_STATUS_VALIDATION_SKIPPED 5 +#define WPR_HEADER_V0_STATUS_BOOTSTRAP_READY 6 + u32 status; +}; + +void wpr_header_dump(struct nvkm_subdev *, const struct wpr_header *); + +struct wpr_header_v1 { +#define WPR_HEADER_V1_FALCON_ID_INVALID 0xffffffff + u32 falcon_id; + u32 lsb_offset; + u32 bootstrap_owner; + u32 lazy_bootstrap; + u32 bin_version; +#define WPR_HEADER_V1_STATUS_NONE 0 +#define WPR_HEADER_V1_STATUS_COPY 1 +#define WPR_HEADER_V1_STATUS_VALIDATION_CODE_FAILED 2 +#define WPR_HEADER_V1_STATUS_VALIDATION_DATA_FAILED 3 +#define WPR_HEADER_V1_STATUS_VALIDATION_DONE 4 +#define WPR_HEADER_V1_STATUS_VALIDATION_SKIPPED 5 +#define WPR_HEADER_V1_STATUS_BOOTSTRAP_READY 6 +#define WPR_HEADER_V1_STATUS_REVOCATION_CHECK_FAILED 7 + u32 status; +}; + +void wpr_header_v1_dump(struct nvkm_subdev *, const struct wpr_header_v1 *); + +struct lsf_signature { + u8 prd_keys[2][16]; + u8 dbg_keys[2][16]; + u32 b_prd_present; + u32 b_dbg_present; + u32 falcon_id; +}; + +struct lsf_signature_v1 { + u8 prd_keys[2][16]; + u8 dbg_keys[2][16]; + u32 b_prd_present; + u32 b_dbg_present; + u32 falcon_id; + u32 supports_versioning; + u32 version; + u32 depmap_count; + u8 depmap[11/*LSF_LSB_DEPMAP_SIZE*/ * 2 * 4]; + u8 kdf[16]; +}; + +struct lsb_header_tail { + u32 ucode_off; + u32 ucode_size; + u32 data_size; + u32 bl_code_size; + u32 bl_imem_off; + u32 bl_data_off; + u32 bl_data_size; + u32 app_code_off; + u32 app_code_size; + u32 app_data_off; + u32 app_data_size; + u32 flags; +}; + +struct lsb_header { + struct lsf_signature signature; + struct lsb_header_tail tail; +}; + +void lsb_header_dump(struct nvkm_subdev *, struct lsb_header *); + +struct lsb_header_v1 { + struct lsf_signature_v1 signature; + struct lsb_header_tail tail; +}; + +void lsb_header_v1_dump(struct nvkm_subdev *, struct lsb_header_v1 *); + +struct flcn_acr_desc { + union { + u8 reserved_dmem[0x200]; + u32 signatures[4]; + } ucode_reserved_space; + u32 wpr_region_id; + u32 wpr_offset; + u32 mmu_mem_range; + struct { + u32 no_regions; + struct { + u32 start_addr; + u32 end_addr; + u32 region_id; + u32 read_mask; + u32 write_mask; + u32 client_mask; + } region_props[2]; + } regions; + u32 ucode_blob_size; + u64 ucode_blob_base __aligned(8); + struct { + u32 vpr_enabled; + u32 vpr_start; + u32 vpr_end; + u32 hdcp_policies; + } vpr_desc; +}; + +void flcn_acr_desc_dump(struct nvkm_subdev *, struct flcn_acr_desc *); + +struct flcn_acr_desc_v1 { + u8 reserved_dmem[0x200]; + u32 signatures[4]; + u32 wpr_region_id; + u32 wpr_offset; + u32 mmu_memory_range; + struct { + u32 no_regions; + struct { + u32 start_addr; + u32 end_addr; + u32 region_id; + u32 read_mask; + u32 write_mask; + u32 client_mask; + u32 shadow_mem_start_addr; + } region_props[2]; + } regions; + u32 ucode_blob_size; + u64 ucode_blob_base __aligned(8); + struct { + u32 vpr_enabled; + u32 vpr_start; + u32 vpr_end; + u32 hdcp_policies; + } vpr_desc; +}; + +void flcn_acr_desc_v1_dump(struct nvkm_subdev *, struct flcn_acr_desc_v1 *); +#endif diff --git a/drivers/gpu/drm/nouveau/include/nvfw/flcn.h b/drivers/gpu/drm/nouveau/include/nvfw/flcn.h new file mode 100644 index 0000000000000000000000000000000000000000..e090f347d2204c410e15ee296752a72545f4477e --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvfw/flcn.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __NVFW_FLCN_H__ +#define __NVFW_FLCN_H__ +#include +struct nvkm_subdev; + +struct loader_config { + u32 dma_idx; + u32 code_dma_base; + u32 code_size_total; + u32 code_size_to_load; + u32 code_entry_point; + u32 data_dma_base; + u32 data_size; + u32 overlay_dma_base; + u32 argc; + u32 argv; + u32 code_dma_base1; + u32 data_dma_base1; + u32 overlay_dma_base1; +}; + +void +loader_config_dump(struct nvkm_subdev *, const struct loader_config *); + +struct loader_config_v1 { + u32 reserved; + u32 dma_idx; + u64 code_dma_base; + u32 code_size_total; + u32 code_size_to_load; + u32 code_entry_point; + u64 data_dma_base; + u32 data_size; + u64 overlay_dma_base; + u32 argc; + u32 argv; +} __packed; + +void +loader_config_v1_dump(struct nvkm_subdev *, const struct loader_config_v1 *); + +struct flcn_bl_dmem_desc { + u32 reserved[4]; + u32 signature[4]; + u32 ctx_dma; + u32 code_dma_base; + u32 non_sec_code_off; + u32 non_sec_code_size; + u32 sec_code_off; + u32 sec_code_size; + u32 code_entry_point; + u32 data_dma_base; + u32 data_size; + u32 code_dma_base1; + u32 data_dma_base1; +}; + +void +flcn_bl_dmem_desc_dump(struct nvkm_subdev *, const struct flcn_bl_dmem_desc *); + +struct flcn_bl_dmem_desc_v1 { + u32 reserved[4]; + u32 signature[4]; + u32 ctx_dma; + u64 code_dma_base; + u32 non_sec_code_off; + u32 non_sec_code_size; + u32 sec_code_off; + u32 sec_code_size; + u32 code_entry_point; + u64 data_dma_base; + u32 data_size; +} __packed; + +void flcn_bl_dmem_desc_v1_dump(struct nvkm_subdev *, + const struct flcn_bl_dmem_desc_v1 *); + +struct flcn_bl_dmem_desc_v2 { + u32 reserved[4]; + u32 signature[4]; + u32 ctx_dma; + u64 code_dma_base; + u32 non_sec_code_off; + u32 non_sec_code_size; + u32 sec_code_off; + u32 sec_code_size; + u32 code_entry_point; + u64 data_dma_base; + u32 data_size; + u32 argc; + u32 argv; +} __packed; + +void flcn_bl_dmem_desc_v2_dump(struct nvkm_subdev *, + const struct flcn_bl_dmem_desc_v2 *); +#endif diff --git a/drivers/gpu/drm/nouveau/include/nvfw/fw.h b/drivers/gpu/drm/nouveau/include/nvfw/fw.h new file mode 100644 index 0000000000000000000000000000000000000000..a7cf1188c9d6512f0a10de3f69fd2c9d2522a63c --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvfw/fw.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __NVFW_FW_H__ +#define __NVFW_FW_H__ +#include +struct nvkm_subdev; + +struct nvfw_bin_hdr { + u32 bin_magic; + u32 bin_ver; + u32 bin_size; + u32 header_offset; + u32 data_offset; + u32 data_size; +}; + +const struct nvfw_bin_hdr *nvfw_bin_hdr(struct nvkm_subdev *, const void *); + +struct nvfw_bl_desc { + u32 start_tag; + u32 dmem_load_off; + u32 code_off; + u32 code_size; + u32 data_off; + u32 data_size; +}; + +const struct nvfw_bl_desc *nvfw_bl_desc(struct nvkm_subdev *, const void *); +#endif diff --git a/drivers/gpu/drm/nouveau/include/nvfw/hs.h b/drivers/gpu/drm/nouveau/include/nvfw/hs.h new file mode 100644 index 0000000000000000000000000000000000000000..64d0d32200c22597fda3d3f8852f1da945377706 --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvfw/hs.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __NVFW_HS_H__ +#define __NVFW_HS_H__ +#include +struct nvkm_subdev; + +struct nvfw_hs_header { + u32 sig_dbg_offset; + u32 sig_dbg_size; + u32 sig_prod_offset; + u32 sig_prod_size; + u32 patch_loc; + u32 patch_sig; + u32 hdr_offset; + u32 hdr_size; +}; + +const struct nvfw_hs_header *nvfw_hs_header(struct nvkm_subdev *, const void *); + +struct nvfw_hs_load_header { + u32 non_sec_code_off; + u32 non_sec_code_size; + u32 data_dma_base; + u32 data_size; + u32 num_apps; + u32 apps[0]; +}; + +const struct nvfw_hs_load_header * +nvfw_hs_load_header(struct nvkm_subdev *, const void *); +#endif diff --git a/drivers/gpu/drm/nouveau/include/nvfw/ls.h b/drivers/gpu/drm/nouveau/include/nvfw/ls.h new file mode 100644 index 0000000000000000000000000000000000000000..f63692a2a16cfb716bfeec8d6d0159507ae45d1f --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvfw/ls.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __NVFW_LS_H__ +#define __NVFW_LS_H__ +#include +struct nvkm_subdev; + +struct nvfw_ls_desc_head { + u32 descriptor_size; + u32 image_size; + u32 tools_version; + u32 app_version; + char date[64]; + u32 bootloader_start_offset; + u32 bootloader_size; + u32 bootloader_imem_offset; + u32 bootloader_entry_point; + u32 app_start_offset; + u32 app_size; + u32 app_imem_offset; + u32 app_imem_entry; + u32 app_dmem_offset; + u32 app_resident_code_offset; + u32 app_resident_code_size; + u32 app_resident_data_offset; + u32 app_resident_data_size; +}; + +struct nvfw_ls_desc { + struct nvfw_ls_desc_head head; + u32 nb_overlays; + struct { + u32 start; + u32 size; + } load_ovl[64]; + u32 compressed; +}; + +const struct nvfw_ls_desc *nvfw_ls_desc(struct nvkm_subdev *, const void *); + +struct nvfw_ls_desc_v1 { + struct nvfw_ls_desc_head head; + u32 nb_imem_overlays; + u32 nb_dmem_overlays; + struct { + u32 start; + u32 size; + } load_ovl[64]; + u32 compressed; +}; + +const struct nvfw_ls_desc_v1 * +nvfw_ls_desc_v1(struct nvkm_subdev *, const void *); +#endif diff --git a/drivers/gpu/drm/nouveau/include/nvfw/pmu.h b/drivers/gpu/drm/nouveau/include/nvfw/pmu.h new file mode 100644 index 0000000000000000000000000000000000000000..452ed7d03827c03e6b6a52a22e3dfa9acbd72313 --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvfw/pmu.h @@ -0,0 +1,98 @@ +#ifndef __NVFW_PMU_H__ +#define __NVFW_PMU_H__ + +struct nv_pmu_args { + u32 reserved; + u32 freq_hz; + u32 trace_size; + u32 trace_dma_base; + u16 trace_dma_base1; + u8 trace_dma_offset; + u32 trace_dma_idx; + bool secure_mode; + bool raise_priv_sec; + struct { + u32 dma_base; + u16 dma_base1; + u8 dma_offset; + u16 fb_size; + u8 dma_idx; + } gc6_ctx; + u8 pad; +}; + +#define NV_PMU_UNIT_INIT 0x07 +#define NV_PMU_UNIT_ACR 0x0a + +struct nv_pmu_init_msg { + struct nv_falcon_msg hdr; +#define NV_PMU_INIT_MSG_INIT 0x00 + u8 msg_type; + + u8 pad; + u16 os_debug_entry_point; + + struct { + u16 size; + u16 offset; + u8 index; + u8 pad; + } queue_info[5]; + + u16 sw_managed_area_offset; + u16 sw_managed_area_size; +}; + +struct nv_pmu_acr_cmd { + struct nv_falcon_cmd hdr; +#define NV_PMU_ACR_CMD_INIT_WPR_REGION 0x00 +#define NV_PMU_ACR_CMD_BOOTSTRAP_FALCON 0x01 +#define NV_PMU_ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS 0x03 + u8 cmd_type; +}; + +struct nv_pmu_acr_msg { + struct nv_falcon_cmd hdr; + u8 msg_type; +}; + +struct nv_pmu_acr_init_wpr_region_cmd { + struct nv_pmu_acr_cmd cmd; + u32 region_id; + u32 wpr_offset; +}; + +struct nv_pmu_acr_init_wpr_region_msg { + struct nv_pmu_acr_msg msg; + u32 error_code; +}; + +struct nv_pmu_acr_bootstrap_falcon_cmd { + struct nv_pmu_acr_cmd cmd; +#define NV_PMU_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES 0x00000000 +#define NV_PMU_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_NO 0x00000001 + u32 flags; + u32 falcon_id; +}; + +struct nv_pmu_acr_bootstrap_falcon_msg { + struct nv_pmu_acr_msg msg; + u32 falcon_id; +}; + +struct nv_pmu_acr_bootstrap_multiple_falcons_cmd { + struct nv_pmu_acr_cmd cmd; +#define NV_PMU_ACR_BOOTSTRAP_MULTIPLE_FALCONS_FLAGS_RESET_YES 0x00000000 +#define NV_PMU_ACR_BOOTSTRAP_MULTIPLE_FALCONS_FLAGS_RESET_NO 0x00000001 + u32 flags; + u32 falcon_mask; + u32 use_va_mask; + u32 wpr_lo; + u32 wpr_hi; +}; + +struct nv_pmu_acr_bootstrap_multiple_falcons_msg { + struct nv_pmu_acr_msg msg; + u32 falcon_mask; +}; +#endif diff --git a/drivers/gpu/drm/nouveau/include/nvfw/sec2.h b/drivers/gpu/drm/nouveau/include/nvfw/sec2.h new file mode 100644 index 0000000000000000000000000000000000000000..03496558b775dddb080462ca1a8e0cdad413de65 --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvfw/sec2.h @@ -0,0 +1,60 @@ +#ifndef __NVFW_SEC2_H__ +#define __NVFW_SEC2_H__ + +struct nv_sec2_args { + u32 freq_hz; + u32 falc_trace_size; + u32 falc_trace_dma_base; + u32 falc_trace_dma_idx; + bool secure_mode; +}; + +#define NV_SEC2_UNIT_INIT 0x01 +#define NV_SEC2_UNIT_ACR 0x08 + +struct nv_sec2_init_msg { + struct nv_falcon_msg hdr; +#define NV_SEC2_INIT_MSG_INIT 0x00 + u8 msg_type; + + u8 num_queues; + u16 os_debug_entry_point; + + struct { + u32 offset; + u16 size; + u8 index; +#define NV_SEC2_INIT_MSG_QUEUE_ID_CMDQ 0x00 +#define NV_SEC2_INIT_MSG_QUEUE_ID_MSGQ 0x01 + u8 id; + } queue_info[2]; + + u32 sw_managed_area_offset; + u16 sw_managed_area_size; +}; + +struct nv_sec2_acr_cmd { + struct nv_falcon_cmd hdr; +#define NV_SEC2_ACR_CMD_BOOTSTRAP_FALCON 0x00 + u8 cmd_type; +}; + +struct nv_sec2_acr_msg { + struct nv_falcon_cmd hdr; + u8 msg_type; +}; + +struct nv_sec2_acr_bootstrap_falcon_cmd { + struct nv_sec2_acr_cmd cmd; +#define NV_SEC2_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES 0x00000000 +#define NV_SEC2_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_NO 0x00000001 + u32 flags; + u32 falcon_id; +}; + +struct nv_sec2_acr_bootstrap_falcon_msg { + struct nv_sec2_acr_msg msg; + u32 error_code; + u32 falcon_id; +}; +#endif diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h index f704ae600e948b2eb708cc3f4f6c841381f354eb..30659747ffe83e2ca130b35fbde938be1e938b8d 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/class.h +++ b/drivers/gpu/drm/nouveau/include/nvif/class.h @@ -166,6 +166,8 @@ #define VOLTA_A /* cl9097.h */ 0x0000c397 +#define TURING_A /* cl9097.h */ 0x0000c597 + #define NV74_BSP 0x000074b0 #define GT212_MSVLD 0x000085b1 @@ -207,6 +209,7 @@ #define PASCAL_COMPUTE_A 0x0000c0c0 #define PASCAL_COMPUTE_B 0x0000c1c0 #define VOLTA_COMPUTE_A 0x0000c3c0 +#define TURING_COMPUTE_A 0x0000c5c0 #define NV74_CIPHER 0x000074c1 #endif diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0008.h b/drivers/gpu/drm/nouveau/include/nvif/if0008.h index 8450127420f550c7ff1d038f1b671b2c544f942c..c21d09f04f1d6db5c0323c7e4622f2f53ffc86aa 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/if0008.h +++ b/drivers/gpu/drm/nouveau/include/nvif/if0008.h @@ -35,7 +35,7 @@ struct nvif_mmu_type_v0 { struct nvif_mmu_kind_v0 { __u8 version; - __u8 pad01[1]; + __u8 kind_inv; __u16 count; __u8 data[]; }; diff --git a/drivers/gpu/drm/nouveau/include/nvif/mmu.h b/drivers/gpu/drm/nouveau/include/nvif/mmu.h index 747ecf67e40382e165431323b233ffb2ac13e588..cec1e88a0a05c7eee4cd4fc08d4a6e70f93f1139 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/mmu.h +++ b/drivers/gpu/drm/nouveau/include/nvif/mmu.h @@ -7,6 +7,7 @@ struct nvif_mmu { u8 dmabits; u8 heap_nr; u8 type_nr; + u8 kind_inv; u16 kind_nr; s32 mem; @@ -36,9 +37,8 @@ void nvif_mmu_fini(struct nvif_mmu *); static inline bool nvif_mmu_kind_valid(struct nvif_mmu *mmu, u8 kind) { - const u8 invalid = mmu->kind_nr - 1; if (kind) { - if (kind >= mmu->kind_nr || mmu->kind[kind] == invalid) + if (kind >= mmu->kind_nr || mmu->kind[kind] == mmu->kind_inv) return false; } return true; diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h index 6d55cd0476aaab10a19c2060a46e622ac2c088ab..5c007ce62fc34e1a42f5d2be2d0da98106a76416 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h @@ -23,13 +23,13 @@ enum nvkm_devidx { NVKM_SUBDEV_MMU, NVKM_SUBDEV_BAR, NVKM_SUBDEV_FAULT, + NVKM_SUBDEV_ACR, NVKM_SUBDEV_PMU, NVKM_SUBDEV_VOLT, NVKM_SUBDEV_ICCSENSE, NVKM_SUBDEV_THERM, NVKM_SUBDEV_CLK, NVKM_SUBDEV_GSP, - NVKM_SUBDEV_SECBOOT, NVKM_ENGINE_BSP, @@ -129,6 +129,7 @@ struct nvkm_device { struct notifier_block nb; } acpi; + struct nvkm_acr *acr; struct nvkm_bar *bar; struct nvkm_bios *bios; struct nvkm_bus *bus; @@ -149,7 +150,6 @@ struct nvkm_device { struct nvkm_subdev *mxm; struct nvkm_pci *pci; struct nvkm_pmu *pmu; - struct nvkm_secboot *secboot; struct nvkm_therm *therm; struct nvkm_timer *timer; struct nvkm_top *top; @@ -169,7 +169,7 @@ struct nvkm_device { struct nvkm_engine *mspdec; struct nvkm_engine *msppp; struct nvkm_engine *msvld; - struct nvkm_engine *nvenc[3]; + struct nvkm_nvenc *nvenc[3]; struct nvkm_nvdec *nvdec[3]; struct nvkm_pm *pm; struct nvkm_engine *sec; @@ -202,6 +202,7 @@ struct nvkm_device_quirk { struct nvkm_device_chip { const char *name; + int (*acr )(struct nvkm_device *, int idx, struct nvkm_acr **); int (*bar )(struct nvkm_device *, int idx, struct nvkm_bar **); int (*bios )(struct nvkm_device *, int idx, struct nvkm_bios **); int (*bus )(struct nvkm_device *, int idx, struct nvkm_bus **); @@ -222,7 +223,6 @@ struct nvkm_device_chip { int (*mxm )(struct nvkm_device *, int idx, struct nvkm_subdev **); int (*pci )(struct nvkm_device *, int idx, struct nvkm_pci **); int (*pmu )(struct nvkm_device *, int idx, struct nvkm_pmu **); - int (*secboot )(struct nvkm_device *, int idx, struct nvkm_secboot **); int (*therm )(struct nvkm_device *, int idx, struct nvkm_therm **); int (*timer )(struct nvkm_device *, int idx, struct nvkm_timer **); int (*top )(struct nvkm_device *, int idx, struct nvkm_top **); @@ -242,7 +242,7 @@ struct nvkm_device_chip { int (*mspdec )(struct nvkm_device *, int idx, struct nvkm_engine **); int (*msppp )(struct nvkm_device *, int idx, struct nvkm_engine **); int (*msvld )(struct nvkm_device *, int idx, struct nvkm_engine **); - int (*nvenc[3])(struct nvkm_device *, int idx, struct nvkm_engine **); + int (*nvenc[3])(struct nvkm_device *, int idx, struct nvkm_nvenc **); int (*nvdec[3])(struct nvkm_device *, int idx, struct nvkm_nvdec **); int (*pm )(struct nvkm_device *, int idx, struct nvkm_pm **); int (*sec )(struct nvkm_device *, int idx, struct nvkm_engine **); diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h new file mode 100644 index 0000000000000000000000000000000000000000..daa8e4bfb6bfbe19f83ed49c59936a4729449b7f --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h @@ -0,0 +1,77 @@ +#ifndef __NVKM_FALCON_H__ +#define __NVKM_FALCON_H__ +#include + +int nvkm_falcon_ctor(const struct nvkm_falcon_func *, struct nvkm_subdev *owner, + const char *name, u32 addr, struct nvkm_falcon *); +void nvkm_falcon_dtor(struct nvkm_falcon *); + +void nvkm_falcon_v1_load_imem(struct nvkm_falcon *, + void *, u32, u32, u16, u8, bool); +void nvkm_falcon_v1_load_dmem(struct nvkm_falcon *, void *, u32, u32, u8); +void nvkm_falcon_v1_read_dmem(struct nvkm_falcon *, u32, u32, u8, void *); +void nvkm_falcon_v1_bind_context(struct nvkm_falcon *, struct nvkm_memory *); +int nvkm_falcon_v1_wait_for_halt(struct nvkm_falcon *, u32); +int nvkm_falcon_v1_clear_interrupt(struct nvkm_falcon *, u32); +void nvkm_falcon_v1_set_start_addr(struct nvkm_falcon *, u32 start_addr); +void nvkm_falcon_v1_start(struct nvkm_falcon *); +int nvkm_falcon_v1_enable(struct nvkm_falcon *); +void nvkm_falcon_v1_disable(struct nvkm_falcon *); + +void gp102_sec2_flcn_bind_context(struct nvkm_falcon *, struct nvkm_memory *); +int gp102_sec2_flcn_enable(struct nvkm_falcon *); + +#define FLCN_PRINTK(t,f,fmt,a...) do { \ + if (nvkm_subdev_name[(f)->owner->index] != (f)->name) \ + nvkm_##t((f)->owner, "%s: "fmt"\n", (f)->name, ##a); \ + else \ + nvkm_##t((f)->owner, fmt"\n", ##a); \ +} while(0) +#define FLCN_DBG(f,fmt,a...) FLCN_PRINTK(debug, (f), fmt, ##a) +#define FLCN_ERR(f,fmt,a...) FLCN_PRINTK(error, (f), fmt, ##a) + +/** + * struct nv_falcon_msg - header for all messages + * + * @unit_id: id of firmware process that sent the message + * @size: total size of message + * @ctrl_flags: control flags + * @seq_id: used to match a message from its corresponding command + */ +struct nv_falcon_msg { + u8 unit_id; + u8 size; + u8 ctrl_flags; + u8 seq_id; +}; + +#define nv_falcon_cmd nv_falcon_msg +#define NV_FALCON_CMD_UNIT_ID_REWIND 0x00 + +struct nvkm_falcon_qmgr; +int nvkm_falcon_qmgr_new(struct nvkm_falcon *, struct nvkm_falcon_qmgr **); +void nvkm_falcon_qmgr_del(struct nvkm_falcon_qmgr **); + +typedef int +(*nvkm_falcon_qmgr_callback)(void *priv, struct nv_falcon_msg *); + +struct nvkm_falcon_cmdq; +int nvkm_falcon_cmdq_new(struct nvkm_falcon_qmgr *, const char *name, + struct nvkm_falcon_cmdq **); +void nvkm_falcon_cmdq_del(struct nvkm_falcon_cmdq **); +void nvkm_falcon_cmdq_init(struct nvkm_falcon_cmdq *, + u32 index, u32 offset, u32 size); +void nvkm_falcon_cmdq_fini(struct nvkm_falcon_cmdq *); +int nvkm_falcon_cmdq_send(struct nvkm_falcon_cmdq *, struct nv_falcon_cmd *, + nvkm_falcon_qmgr_callback, void *priv, + unsigned long timeout_jiffies); + +struct nvkm_falcon_msgq; +int nvkm_falcon_msgq_new(struct nvkm_falcon_qmgr *, const char *name, + struct nvkm_falcon_msgq **); +void nvkm_falcon_msgq_del(struct nvkm_falcon_msgq **); +void nvkm_falcon_msgq_init(struct nvkm_falcon_msgq *, + u32 index, u32 offset, u32 size); +int nvkm_falcon_msgq_recv_initmsg(struct nvkm_falcon_msgq *, void *, u32 size); +void nvkm_falcon_msgq_recv(struct nvkm_falcon_msgq *); +#endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h b/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h index 383370c3242807b154f3ac8d1cf845cb22ae71ae..d14b7fb073682916b7e0569cbf91bc8fba08ce12 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h @@ -1,12 +1,55 @@ /* SPDX-License-Identifier: MIT */ #ifndef __NVKM_FIRMWARE_H__ #define __NVKM_FIRMWARE_H__ +#include #include -int nvkm_firmware_get_version(const struct nvkm_subdev *, const char *fwname, - int min_version, int max_version, - const struct firmware **); -int nvkm_firmware_get(const struct nvkm_subdev *, const char *fwname, +int nvkm_firmware_get(const struct nvkm_subdev *, const char *fwname, int ver, const struct firmware **); void nvkm_firmware_put(const struct firmware *); + +int nvkm_firmware_load_blob(const struct nvkm_subdev *subdev, const char *path, + const char *name, int ver, struct nvkm_blob *); +int nvkm_firmware_load_name(const struct nvkm_subdev *subdev, const char *path, + const char *name, int ver, + const struct firmware **); + +#define nvkm_firmware_load(s,l,o,p...) ({ \ + struct nvkm_subdev *_s = (s); \ + const char *_opts = (o); \ + char _option[32]; \ + typeof(l[0]) *_list = (l), *_next, *_fwif = NULL; \ + int _ver, _fwv, _ret = 0; \ + \ + snprintf(_option, sizeof(_option), "Nv%sFw", _opts); \ + _ver = nvkm_longopt(_s->device->cfgopt, _option, -2); \ + if (_ver >= -1) { \ + for (_next = _list; !_fwif && _next->load; _next++) { \ + if (_next->version == _ver) \ + _fwif = _next; \ + } \ + _ret = _fwif ? 0 : -EINVAL; \ + } \ + \ + if (_ret == 0) { \ + snprintf(_option, sizeof(_option), "Nv%sFwVer", _opts); \ + _fwv = _fwif ? _fwif->version : -1; \ + _ver = nvkm_longopt(_s->device->cfgopt, _option, _fwv); \ + for (_next = _fwif ? _fwif : _list; _next->load; _next++) { \ + _fwv = (_ver >= 0) ? _ver : _next->version; \ + _ret = _next->load(p, _fwv, _next); \ + if (_ret == 0 || _ver >= 0) { \ + _fwif = _next; \ + break; \ + } \ + } \ + } \ + \ + if (_ret) { \ + nvkm_error(_s, "failed to load firmware\n"); \ + _fwif = ERR_PTR(_ret); \ + } \ + \ + _fwif; \ +}) #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h index b23bf6109f2d28a634df13382707e1bcb9813492..74d3f1a809d7ca9c4d6364ea470107120583e2be 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h @@ -84,6 +84,22 @@ void nvkm_memory_tags_put(struct nvkm_memory *, struct nvkm_device *, nvkm_wo32((o), __a + 4, upper_32_bits(__d)); \ } while(0) +#define nvkm_robj(o,a,p,s) do { \ + u32 _addr = (a), _size = (s) >> 2, *_data = (void *)(p); \ + while (_size--) { \ + *(_data++) = nvkm_ro32((o), _addr); \ + _addr += 4; \ + } \ +} while(0) + +#define nvkm_wobj(o,a,p,s) do { \ + u32 _addr = (a), _size = (s) >> 2, *_data = (void *)(p); \ + while (_size--) { \ + nvkm_wo32((o), _addr, *(_data++)); \ + _addr += 4; \ + } \ +} while(0) + #define nvkm_fill(t,s,o,a,d,c) do { \ u64 _a = (a), _c = (c), _d = (d), _o = _a >> s, _s = _c << s; \ u##t __iomem *_m = nvkm_kmap(o); \ diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/os.h b/drivers/gpu/drm/nouveau/include/nvkm/core/os.h index 029a416197db3c64cf687448d0e0d8ee95d672bb..d7ba3205207fe28d1c93d16e4bb234e9ad4dd67f 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/os.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/os.h @@ -21,4 +21,17 @@ iowrite32_native(lower_32_bits(_v), &_p[0]); \ iowrite32_native(upper_32_bits(_v), &_p[1]); \ } while(0) + +struct nvkm_blob { + void *data; + u32 size; +}; + +static inline void +nvkm_blob_dtor(struct nvkm_blob *blob) +{ + kfree(blob->data); + blob->data = NULL; + blob->size = 0; +} #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h index 23b582d696c619522cbc9e6e41827ddf99dfb559..27c1f868552cdb4380d0904cfda06eab792c725c 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: MIT */ -#ifndef __NVKM_FALCON_H__ -#define __NVKM_FALCON_H__ +#ifndef __NVKM_FLCNEN_H__ +#define __NVKM_FLCNEN_H__ #define nvkm_falcon(p) container_of((p), struct nvkm_falcon, engine) #include struct nvkm_fifo_chan; @@ -23,12 +23,13 @@ struct nvkm_falcon { struct mutex mutex; struct mutex dmem_mutex; + bool oneinit; + const struct nvkm_subdev *user; u8 version; u8 secret; bool debug; - bool has_emem; struct nvkm_memory *core; bool external; @@ -76,9 +77,14 @@ struct nvkm_falcon_func { } data; void (*init)(struct nvkm_falcon *); void (*intr)(struct nvkm_falcon *, struct nvkm_fifo_chan *); + + u32 debug; + u32 fbif; + void (*load_imem)(struct nvkm_falcon *, void *, u32, u32, u16, u8, bool); void (*load_dmem)(struct nvkm_falcon *, void *, u32, u32, u8); void (*read_dmem)(struct nvkm_falcon *, u32, u32, u8, void *); + u32 emem_addr; void (*bind_context)(struct nvkm_falcon *, struct nvkm_memory *); int (*wait_for_halt)(struct nvkm_falcon *, u32); int (*clear_interrupt)(struct nvkm_falcon *, u32); @@ -86,6 +92,13 @@ struct nvkm_falcon_func { void (*start)(struct nvkm_falcon *); int (*enable)(struct nvkm_falcon *falcon); void (*disable)(struct nvkm_falcon *falcon); + int (*reset)(struct nvkm_falcon *); + + struct { + u32 head; + u32 tail; + u32 stride; + } cmdq, msgq; struct nvkm_sclass sclass[]; }; @@ -122,5 +135,4 @@ int nvkm_falcon_clear_interrupt(struct nvkm_falcon *, u32); int nvkm_falcon_enable(struct nvkm_falcon *); void nvkm_falcon_disable(struct nvkm_falcon *); int nvkm_falcon_reset(struct nvkm_falcon *); - #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h index 2cde36f3c064ed2f2aef4ae7450a9d280964b6ab..1530c81f86a28548b0db6eea8092faf40b4c7e42 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h @@ -50,6 +50,8 @@ int gp100_gr_new(struct nvkm_device *, int, struct nvkm_gr **); int gp102_gr_new(struct nvkm_device *, int, struct nvkm_gr **); int gp104_gr_new(struct nvkm_device *, int, struct nvkm_gr **); int gp107_gr_new(struct nvkm_device *, int, struct nvkm_gr **); +int gp108_gr_new(struct nvkm_device *, int, struct nvkm_gr **); int gp10b_gr_new(struct nvkm_device *, int, struct nvkm_gr **); int gv100_gr_new(struct nvkm_device *, int, struct nvkm_gr **); +int tu102_gr_new(struct nvkm_device *, int, struct nvkm_gr **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h index 7c7d7f0abfccc991289fe64a87803442070c2530..1b3183e31606823a7d5c35f0926b6e13d1843a8d 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h @@ -3,13 +3,13 @@ #define __NVKM_NVDEC_H__ #define nvkm_nvdec(p) container_of((p), struct nvkm_nvdec, engine) #include +#include struct nvkm_nvdec { + const struct nvkm_nvdec_func *func; struct nvkm_engine engine; - u32 addr; - - struct nvkm_falcon *falcon; + struct nvkm_falcon falcon; }; -int gp102_nvdec_new(struct nvkm_device *, int, struct nvkm_nvdec **); +int gm107_nvdec_new(struct nvkm_device *, int, struct nvkm_nvdec **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h index 21624046d0a1255665c8fea8ff58874dab1e74e0..33e6ba8adc8dc7ab0972349b004657ddf70f207d 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h @@ -1,5 +1,15 @@ /* SPDX-License-Identifier: MIT */ #ifndef __NVKM_NVENC_H__ #define __NVKM_NVENC_H__ +#define nvkm_nvenc(p) container_of((p), struct nvkm_nvenc, engine) #include +#include + +struct nvkm_nvenc { + const struct nvkm_nvenc_func *func; + struct nvkm_engine engine; + struct nvkm_falcon falcon; +}; + +int gm107_nvenc_new(struct nvkm_device *, int, struct nvkm_nvenc **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h index 33078f86c7790eb91efcfee0cb7f7fbc9b4283ab..34dc765648d56f371b024f8ca4448701b85fcbea 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h @@ -1,17 +1,24 @@ /* SPDX-License-Identifier: MIT */ #ifndef __NVKM_SEC2_H__ #define __NVKM_SEC2_H__ +#define nvkm_sec2(p) container_of((p), struct nvkm_sec2, engine) #include +#include struct nvkm_sec2 { + const struct nvkm_sec2_func *func; struct nvkm_engine engine; - u32 addr; + struct nvkm_falcon falcon; + + struct nvkm_falcon_qmgr *qmgr; + struct nvkm_falcon_cmdq *cmdq; + struct nvkm_falcon_msgq *msgq; - struct nvkm_falcon *falcon; - struct nvkm_msgqueue *queue; struct work_struct work; + bool initmsg_received; }; int gp102_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **); +int gp108_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **); int tu102_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h new file mode 100644 index 0000000000000000000000000000000000000000..5d9c3a966de64450f631842327b48b74fb2732b8 --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __NVKM_ACR_H__ +#define __NVKM_ACR_H__ +#define nvkm_acr(p) container_of((p), struct nvkm_acr, subdev) +#include +#include + +enum nvkm_acr_lsf_id { + NVKM_ACR_LSF_PMU = 0, + NVKM_ACR_LSF_GSPLITE = 1, + NVKM_ACR_LSF_FECS = 2, + NVKM_ACR_LSF_GPCCS = 3, + NVKM_ACR_LSF_NVDEC = 4, + NVKM_ACR_LSF_SEC2 = 7, + NVKM_ACR_LSF_MINION = 10, + NVKM_ACR_LSF_NUM +}; + +static inline const char * +nvkm_acr_lsf_id(enum nvkm_acr_lsf_id id) +{ + switch (id) { + case NVKM_ACR_LSF_PMU : return "pmu"; + case NVKM_ACR_LSF_GSPLITE: return "gsplite"; + case NVKM_ACR_LSF_FECS : return "fecs"; + case NVKM_ACR_LSF_GPCCS : return "gpccs"; + case NVKM_ACR_LSF_NVDEC : return "nvdec"; + case NVKM_ACR_LSF_SEC2 : return "sec2"; + case NVKM_ACR_LSF_MINION : return "minion"; + default: + return "unknown"; + } +} + +struct nvkm_acr { + const struct nvkm_acr_func *func; + struct nvkm_subdev subdev; + + struct list_head hsfw, hsf; + struct list_head lsfw, lsf; + + struct nvkm_memory *wpr; + u64 wpr_start; + u64 wpr_end; + u64 shadow_start; + + struct nvkm_memory *inst; + struct nvkm_vmm *vmm; + + bool done; + + const struct firmware *wpr_fw; + bool wpr_comp; + u64 wpr_prev; +}; + +bool nvkm_acr_managed_falcon(struct nvkm_device *, enum nvkm_acr_lsf_id); +int nvkm_acr_bootstrap_falcons(struct nvkm_device *, unsigned long mask); + +int gm200_acr_new(struct nvkm_device *, int, struct nvkm_acr **); +int gm20b_acr_new(struct nvkm_device *, int, struct nvkm_acr **); +int gp102_acr_new(struct nvkm_device *, int, struct nvkm_acr **); +int gp108_acr_new(struct nvkm_device *, int, struct nvkm_acr **); +int gp10b_acr_new(struct nvkm_device *, int, struct nvkm_acr **); +int tu102_acr_new(struct nvkm_device *, int, struct nvkm_acr **); + +struct nvkm_acr_lsfw { + const struct nvkm_acr_lsf_func *func; + struct nvkm_falcon *falcon; + enum nvkm_acr_lsf_id id; + + struct list_head head; + + struct nvkm_blob img; + + const struct firmware *sig; + + u32 bootloader_size; + u32 bootloader_imem_offset; + + u32 app_size; + u32 app_start_offset; + u32 app_imem_entry; + u32 app_resident_code_offset; + u32 app_resident_code_size; + u32 app_resident_data_offset; + u32 app_resident_data_size; + + u32 ucode_size; + u32 data_size; + + struct { + u32 lsb; + u32 img; + u32 bld; + } offset; + u32 bl_data_size; +}; + +struct nvkm_acr_lsf_func { +/* The (currently) map directly to LSB header flags. */ +#define NVKM_ACR_LSF_LOAD_CODE_AT_0 0x00000001 +#define NVKM_ACR_LSF_DMACTL_REQ_CTX 0x00000004 +#define NVKM_ACR_LSF_FORCE_PRIV_LOAD 0x00000008 + u32 flags; + u32 bld_size; + void (*bld_write)(struct nvkm_acr *, u32 bld, struct nvkm_acr_lsfw *); + void (*bld_patch)(struct nvkm_acr *, u32 bld, s64 adjust); + int (*boot)(struct nvkm_falcon *); + int (*bootstrap_falcon)(struct nvkm_falcon *, enum nvkm_acr_lsf_id); + int (*bootstrap_multiple_falcons)(struct nvkm_falcon *, u32 mask); +}; + +int +nvkm_acr_lsfw_load_sig_image_desc(struct nvkm_subdev *, struct nvkm_falcon *, + enum nvkm_acr_lsf_id, const char *path, + int ver, const struct nvkm_acr_lsf_func *); +int +nvkm_acr_lsfw_load_sig_image_desc_v1(struct nvkm_subdev *, struct nvkm_falcon *, + enum nvkm_acr_lsf_id, const char *path, + int ver, const struct nvkm_acr_lsf_func *); +int +nvkm_acr_lsfw_load_bl_inst_data_sig(struct nvkm_subdev *, struct nvkm_falcon *, + enum nvkm_acr_lsf_id, const char *path, + int ver, const struct nvkm_acr_lsf_func *); +#endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h index 97322f95b3ee6cfb73db7875f6b6281aef401613..a513c16ab10569a595eb573e60ee9687aac26b65 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h @@ -31,6 +31,7 @@ struct nvkm_fault_data { }; int gp100_fault_new(struct nvkm_device *, int, struct nvkm_fault **); +int gp10b_fault_new(struct nvkm_device *, int, struct nvkm_fault **); int gv100_fault_new(struct nvkm_device *, int, struct nvkm_fault **); int tu102_fault_new(struct nvkm_device *, int, struct nvkm_fault **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h index 239ad222b95ab5d0bc5b19081c98163b2926a396..34b56b10218a8f782ef0a581eabbbed9b3bf14ed 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h @@ -33,6 +33,8 @@ struct nvkm_fb { const struct nvkm_fb_func *func; struct nvkm_subdev subdev; + struct nvkm_blob vpr_scrubber; + struct nvkm_ram *ram; struct nvkm_mm tags; diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h index 4c672a5c4cd5bf926a554b3bc553e196b7e97032..06db67610a5002d90fbed4a962d215203b045f99 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h @@ -2,12 +2,11 @@ #define __NVKM_GSP_H__ #define nvkm_gsp(p) container_of((p), struct nvkm_gsp, subdev) #include +#include struct nvkm_gsp { struct nvkm_subdev subdev; - u32 addr; - - struct nvkm_falcon *falcon; + struct nvkm_falcon falcon; }; int gv100_gsp_new(struct nvkm_device *, int, struct nvkm_gsp **); diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h index 644d527c3b969171e54a247a4442bff015ae23b6..d76f60d7d29a6924b32910211d02ca1c46b1c547 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h @@ -40,4 +40,5 @@ int gm107_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); int gm200_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); int gp100_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); int gp102_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); +int gp10b_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h index 4752006880f39469648551b60f207a28796fc355..da553089d2d86c5ef738a804cef1067b13a33758 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h @@ -2,13 +2,20 @@ #ifndef __NVKM_PMU_H__ #define __NVKM_PMU_H__ #include -#include +#include struct nvkm_pmu { const struct nvkm_pmu_func *func; struct nvkm_subdev subdev; - struct nvkm_falcon *falcon; - struct nvkm_msgqueue *queue; + struct nvkm_falcon falcon; + + struct nvkm_falcon_qmgr *qmgr; + struct nvkm_falcon_cmdq *hpq; + struct nvkm_falcon_cmdq *lpq; + struct nvkm_falcon_msgq *msgq; + bool initmsg_received; + + struct completion wpr_ready; struct { u32 base; @@ -43,6 +50,7 @@ int gm107_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); int gm20b_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); int gp100_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); int gp102_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); +int gp10b_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); /* interface to MEMX process running on PMU */ struct nvkm_memx; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index f8015e0318d71fbd5d8c199a6cee40c15922e279..1b62ccc57aef03685c0f9b4d3095173dd372dd8b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1162,7 +1162,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, void nouveau_bo_move_init(struct nouveau_drm *drm) { - static const struct { + static const struct _method_table { const char *name; int engine; s32 oclass; @@ -1192,7 +1192,8 @@ nouveau_bo_move_init(struct nouveau_drm *drm) { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init }, {}, { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init }, - }, *mthd = _methods; + }; + const struct _method_table *mthd = _methods; const char *name = "CPU"; int ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index 282fd90b65e134373525fb086c9505878c0d1c25..d9381a0531693d779fd1df592c442fa11acd65d3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c @@ -55,6 +55,8 @@ nouveau_channel_killed(struct nvif_notify *ntfy) struct nouveau_cli *cli = (void *)chan->user.client; NV_PRINTK(warn, cli, "channel %d killed!\n", chan->chid); atomic_set(&chan->killed, 1); + if (chan->fence) + nouveau_fence_context_kill(chan->fence, -ENODEV); return NVIF_NOTIFY_DROP; } diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index fa14399415965efc6a607d140ba8e06b60f2c972..0ad5d87b5a8e57d6a093594476772ce8be3a6143 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -635,10 +635,10 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm, unsigned long c, i; int ret = -ENOMEM; - args.src = kcalloc(max, sizeof(args.src), GFP_KERNEL); + args.src = kcalloc(max, sizeof(*args.src), GFP_KERNEL); if (!args.src) goto out; - args.dst = kcalloc(max, sizeof(args.dst), GFP_KERNEL); + args.dst = kcalloc(max, sizeof(*args.dst), GFP_KERNEL); if (!args.dst) goto out_free_src; diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 2cd83849600f34b2ff22ca9990af042e4ad8a16b..b65ae817eabf5fde0d178fb96f759b28d2dd2692 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -715,7 +715,6 @@ fail_nvkm: void nouveau_drm_device_remove(struct drm_device *dev) { - struct pci_dev *pdev = dev->pdev; struct nouveau_drm *drm = nouveau_drm(dev); struct nvkm_client *client; struct nvkm_device *device; @@ -727,7 +726,6 @@ nouveau_drm_device_remove(struct drm_device *dev) device = nvkm_device_find(client->device); nouveau_drm_device_fini(dev); - pci_disable_device(pdev); drm_dev_put(dev); nvkm_device_del(&device); } @@ -738,6 +736,7 @@ nouveau_drm_remove(struct pci_dev *pdev) struct drm_device *dev = pci_get_drvdata(pdev); nouveau_drm_device_remove(dev); + pci_disable_device(pdev); } static int diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index da8c46e0994316b11612227aef899d8f96d240d1..c2c332fbde9793c6c93be0b53b920c1359d9bf0f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -58,6 +58,8 @@ #include #include +#include + #include "uapi/drm/nouveau_drm.h" struct nouveau_channel; @@ -211,6 +213,11 @@ struct nouveau_drm { struct nouveau_svm *svm; struct nouveau_dmem *dmem; + + struct { + struct drm_audio_component *component; + bool component_registered; + } audio; }; static inline struct nouveau_drm * diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 9118df035b28da6fb00e5c51b12fc86d5f4d2e85..666f2090d92b269b5826d5a5256873cd32620ed1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -87,7 +87,7 @@ nouveau_local_fence(struct dma_fence *fence, struct nouveau_drm *drm) } void -nouveau_fence_context_del(struct nouveau_fence_chan *fctx) +nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error) { struct nouveau_fence *fence; @@ -95,11 +95,19 @@ nouveau_fence_context_del(struct nouveau_fence_chan *fctx) while (!list_empty(&fctx->pending)) { fence = list_entry(fctx->pending.next, typeof(*fence), head); + if (error) + dma_fence_set_error(&fence->base, error); + if (nouveau_fence_signal(fence)) nvif_notify_put(&fctx->notify); } spin_unlock_irq(&fctx->lock); +} +void +nouveau_fence_context_del(struct nouveau_fence_chan *fctx) +{ + nouveau_fence_context_kill(fctx, 0); nvif_notify_fini(&fctx->notify); fctx->dead = 1; @@ -156,7 +164,7 @@ nouveau_fence_wait_uevent_handler(struct nvif_notify *notify) fence = list_entry(fctx->pending.next, typeof(*fence), head); chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock)); - if (nouveau_fence_update(fence->channel, fctx)) + if (nouveau_fence_update(chan, fctx)) ret = NVIF_NOTIFY_DROP; } spin_unlock_irqrestore(&fctx->lock, flags); diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h index c9e24baaaa4f9b9bb39cbb18c82e9fd15e23ca80..4887caa69c6577256a188e34450690c5a759686d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.h +++ b/drivers/gpu/drm/nouveau/nouveau_fence.h @@ -63,6 +63,7 @@ struct nouveau_fence_priv { void nouveau_fence_context_new(struct nouveau_channel *, struct nouveau_fence_chan *); void nouveau_fence_context_del(struct nouveau_fence_chan *); void nouveau_fence_context_free(struct nouveau_fence_chan *); +void nouveau_fence_context_kill(struct nouveau_fence_chan *, int error); int nv04_fence_create(struct nouveau_drm *); int nv04_fence_mthd(struct nouveau_channel *, u32, u32, u32); diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 05ec8edd6a8b787213b3c2b4628c4aad33d360a4..f5ece1f9497348a892ca8a7ddc97354df409d60b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -688,7 +688,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, struct validate_op op; struct nouveau_fence *fence = NULL; int i, j, ret = 0; - bool do_reloc = false; + bool do_reloc = false, sync = false; if (unlikely(!abi16)) return -ENOMEM; @@ -702,6 +702,10 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, if (!chan) return nouveau_abi16_put(abi16, -ENOENT); + if (unlikely(atomic_read(&chan->killed))) + return nouveau_abi16_put(abi16, -ENODEV); + + sync = req->vram_available & NOUVEAU_GEM_PUSHBUF_SYNC; req->vram_available = drm->gem.vram_available; req->gart_available = drm->gem.gart_available; @@ -850,6 +854,13 @@ revalidate: goto out; } + if (sync) { + if (!(ret = nouveau_fence_wait(fence, false, false))) { + if ((ret = dma_fence_get_status(&fence->base)) == 1) + ret = 0; + } + } + out: validate_fini(&op, chan, fence, bo); nouveau_fence_unref(&fence); diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c index d445c6f3fecebb4c6775b413349972ccfc532917..1c3104d205713f78796e66a7744241639b28a572 100644 --- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c +++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c @@ -741,7 +741,7 @@ nouveau_hwmon_init(struct drm_device *dev) special_groups[i++] = &pwm_fan_sensor_group; } - special_groups[i] = 0; + special_groups[i] = NULL; hwmon_dev = hwmon_device_register_with_info(dev->dev, "nouveau", dev, &nouveau_chip_info, special_groups); diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 77a0c6ad3cef55e12c04fb88499af5d6899ce506..7ca0a24985327ee59823cdf4a754fa7d0e73161c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -63,14 +63,12 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man, { struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_drm *drm = nouveau_bdev(bo->bdev); - struct nouveau_mem *mem; int ret; if (drm->client.device.info.ram_size == 0) return -ENOMEM; ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg); - mem = nouveau_mem(reg); if (ret) return ret; @@ -103,11 +101,9 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man, { struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_drm *drm = nouveau_bdev(bo->bdev); - struct nouveau_mem *mem; int ret; ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg); - mem = nouveau_mem(reg); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.c b/drivers/gpu/drm/nouveau/nouveau_vmm.c index 77061182a1cfc312a011c9b658e33c5e0eae6329..b28c7dc13ad6d083f7cbe0773f8d4a60186a838b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_vmm.c +++ b/drivers/gpu/drm/nouveau/nouveau_vmm.c @@ -69,8 +69,8 @@ nouveau_vma_del(struct nouveau_vma **pvma) } list_del(&vma->head); kfree(*pvma); - *pvma = NULL; } + *pvma = NULL; } int diff --git a/drivers/gpu/drm/nouveau/nvif/mmu.c b/drivers/gpu/drm/nouveau/nvif/mmu.c index 5641bda2046da6efb3d73cca0f5adde90ef13ae2..47efc408efa6a1d75e21c6f48ae65a76b428352c 100644 --- a/drivers/gpu/drm/nouveau/nvif/mmu.c +++ b/drivers/gpu/drm/nouveau/nvif/mmu.c @@ -121,6 +121,7 @@ nvif_mmu_init(struct nvif_object *parent, s32 oclass, struct nvif_mmu *mmu) kind, argc); if (ret == 0) memcpy(mmu->kind, kind->data, kind->count); + mmu->kind_inv = kind->kind_inv; kfree(kind); } diff --git a/drivers/gpu/drm/nouveau/nvkm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/Kbuild index b53de9ba8c7398f51d5c8dc033aa408a8c24ab3a..db3ade125fa99564b2aa3186a553dcf78a53c0bf 100644 --- a/drivers/gpu/drm/nouveau/nvkm/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/Kbuild @@ -1,5 +1,6 @@ # SPDX-License-Identifier: MIT include $(src)/nvkm/core/Kbuild +include $(src)/nvkm/nvfw/Kbuild include $(src)/nvkm/falcon/Kbuild include $(src)/nvkm/subdev/Kbuild include $(src)/nvkm/engine/Kbuild diff --git a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c index 092acdec2c39f77af23abfbb3481ac771be92374..8b25367917ca092fff8bc9aa87a3d5b48e44f788 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c @@ -22,6 +22,40 @@ #include #include +int +nvkm_firmware_load_name(const struct nvkm_subdev *subdev, const char *base, + const char *name, int ver, const struct firmware **pfw) +{ + char path[64]; + int ret; + + snprintf(path, sizeof(path), "%s%s", base, name); + ret = nvkm_firmware_get(subdev, path, ver, pfw); + if (ret < 0) + return ret; + + return 0; +} + +int +nvkm_firmware_load_blob(const struct nvkm_subdev *subdev, const char *base, + const char *name, int ver, struct nvkm_blob *blob) +{ + const struct firmware *fw; + int ret; + + ret = nvkm_firmware_load_name(subdev, base, name, ver, &fw); + if (ret == 0) { + blob->data = kmemdup(fw->data, fw->size, GFP_KERNEL); + blob->size = fw->size; + nvkm_firmware_put(fw); + if (!blob->data) + return -ENOMEM; + } + + return ret; +} + /** * nvkm_firmware_get - load firmware from the official nvidia/chip/ directory * @subdev subdevice that will use that firmware @@ -32,9 +66,8 @@ * Firmware files released by NVIDIA will always follow this format. */ int -nvkm_firmware_get_version(const struct nvkm_subdev *subdev, const char *fwname, - int min_version, int max_version, - const struct firmware **fw) +nvkm_firmware_get(const struct nvkm_subdev *subdev, const char *fwname, int ver, + const struct firmware **fw) { struct nvkm_device *device = subdev->device; char f[64]; @@ -50,31 +83,21 @@ nvkm_firmware_get_version(const struct nvkm_subdev *subdev, const char *fwname, cname[i] = tolower(cname[i]); } - for (i = max_version; i >= min_version; i--) { - if (i != 0) - snprintf(f, sizeof(f), "nvidia/%s/%s-%d.bin", cname, fwname, i); - else - snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname); - - if (!firmware_request_nowarn(fw, f, device->dev)) { - nvkm_debug(subdev, "firmware \"%s\" loaded\n", f); - return i; - } + if (ver != 0) + snprintf(f, sizeof(f), "nvidia/%s/%s-%d.bin", cname, fwname, ver); + else + snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname); - nvkm_debug(subdev, "firmware \"%s\" unavailable\n", f); + if (!firmware_request_nowarn(fw, f, device->dev)) { + nvkm_debug(subdev, "firmware \"%s\" loaded - %zu byte(s)\n", + f, (*fw)->size); + return 0; } - nvkm_error(subdev, "failed to load firmware \"%s\"", fwname); + nvkm_debug(subdev, "firmware \"%s\" unavailable\n", f); return -ENOENT; } -int -nvkm_firmware_get(const struct nvkm_subdev *subdev, const char *fwname, - const struct firmware **fw) -{ - return nvkm_firmware_get_version(subdev, fwname, 0, 0, fw); -} - /** * nvkm_firmware_put - release firmware loaded with nvkm_firmware_get */ diff --git a/drivers/gpu/drm/nouveau/nvkm/core/memory.c b/drivers/gpu/drm/nouveau/nvkm/core/memory.c index e85a08ecd9da578d51fd778f171ea07e55bf9bb1..4cc186262d3441fa051b8be0514ee453eb568d7b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/memory.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/memory.c @@ -91,8 +91,8 @@ nvkm_memory_tags_get(struct nvkm_memory *memory, struct nvkm_device *device, } refcount_set(&tags->refcount, 1); + *ptags = memory->tags = tags; mutex_unlock(&fb->subdev.mutex); - *ptags = tags; return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c index 245990de1e9031be825bd09a8bfe99d2380d2d3e..79a8f9d305c58d19c69a295cb37cb090ebf3179b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c @@ -30,6 +30,7 @@ static struct lock_class_key nvkm_subdev_lock_class[NVKM_SUBDEV_NR]; const char * nvkm_subdev_name[NVKM_SUBDEV_NR] = { + [NVKM_SUBDEV_ACR ] = "acr", [NVKM_SUBDEV_BAR ] = "bar", [NVKM_SUBDEV_VBIOS ] = "bios", [NVKM_SUBDEV_BUS ] = "bus", @@ -50,7 +51,6 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = { [NVKM_SUBDEV_MXM ] = "mxm", [NVKM_SUBDEV_PCI ] = "pci", [NVKM_SUBDEV_PMU ] = "pmu", - [NVKM_SUBDEV_SECBOOT ] = "secboot", [NVKM_SUBDEV_THERM ] = "therm", [NVKM_SUBDEV_TIMER ] = "tmr", [NVKM_SUBDEV_TOP ] = "top", diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index c3c7159f34112208f8db58f5b3c490c6a06e8cd9..c7d700916eae7356fa8f9e7e501c9a1efe255967 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -1987,6 +1987,8 @@ nv117_chipset = { .dma = gf119_dma_new, .fifo = gm107_fifo_new, .gr = gm107_gr_new, + .nvdec[0] = gm107_nvdec_new, + .nvenc[0] = gm107_nvenc_new, .sw = gf100_sw_new, }; @@ -2027,6 +2029,7 @@ nv118_chipset = { static const struct nvkm_device_chip nv120_chipset = { .name = "GM200", + .acr = gm200_acr_new, .bar = gm107_bar_new, .bios = nvkm_bios_new, .bus = gf100_bus_new, @@ -2045,7 +2048,6 @@ nv120_chipset = { .pci = gk104_pci_new, .pmu = gm107_pmu_new, .therm = gm200_therm_new, - .secboot = gm200_secboot_new, .timer = gk20a_timer_new, .top = gk104_top_new, .volt = gk104_volt_new, @@ -2056,12 +2058,16 @@ nv120_chipset = { .dma = gf119_dma_new, .fifo = gm200_fifo_new, .gr = gm200_gr_new, + .nvdec[0] = gm107_nvdec_new, + .nvenc[0] = gm107_nvenc_new, + .nvenc[1] = gm107_nvenc_new, .sw = gf100_sw_new, }; static const struct nvkm_device_chip nv124_chipset = { .name = "GM204", + .acr = gm200_acr_new, .bar = gm107_bar_new, .bios = nvkm_bios_new, .bus = gf100_bus_new, @@ -2080,7 +2086,6 @@ nv124_chipset = { .pci = gk104_pci_new, .pmu = gm107_pmu_new, .therm = gm200_therm_new, - .secboot = gm200_secboot_new, .timer = gk20a_timer_new, .top = gk104_top_new, .volt = gk104_volt_new, @@ -2091,12 +2096,16 @@ nv124_chipset = { .dma = gf119_dma_new, .fifo = gm200_fifo_new, .gr = gm200_gr_new, + .nvdec[0] = gm107_nvdec_new, + .nvenc[0] = gm107_nvenc_new, + .nvenc[1] = gm107_nvenc_new, .sw = gf100_sw_new, }; static const struct nvkm_device_chip nv126_chipset = { .name = "GM206", + .acr = gm200_acr_new, .bar = gm107_bar_new, .bios = nvkm_bios_new, .bus = gf100_bus_new, @@ -2115,7 +2124,6 @@ nv126_chipset = { .pci = gk104_pci_new, .pmu = gm107_pmu_new, .therm = gm200_therm_new, - .secboot = gm200_secboot_new, .timer = gk20a_timer_new, .top = gk104_top_new, .volt = gk104_volt_new, @@ -2126,12 +2134,15 @@ nv126_chipset = { .dma = gf119_dma_new, .fifo = gm200_fifo_new, .gr = gm200_gr_new, + .nvdec[0] = gm107_nvdec_new, + .nvenc[0] = gm107_nvenc_new, .sw = gf100_sw_new, }; static const struct nvkm_device_chip nv12b_chipset = { .name = "GM20B", + .acr = gm20b_acr_new, .bar = gm20b_bar_new, .bus = gf100_bus_new, .clk = gm20b_clk_new, @@ -2143,7 +2154,6 @@ nv12b_chipset = { .mc = gk20a_mc_new, .mmu = gm20b_mmu_new, .pmu = gm20b_pmu_new, - .secboot = gm20b_secboot_new, .timer = gk20a_timer_new, .top = gk104_top_new, .ce[2] = gm200_ce_new, @@ -2157,6 +2167,7 @@ nv12b_chipset = { static const struct nvkm_device_chip nv130_chipset = { .name = "GP100", + .acr = gm200_acr_new, .bar = gm107_bar_new, .bios = nvkm_bios_new, .bus = gf100_bus_new, @@ -2172,7 +2183,6 @@ nv130_chipset = { .mc = gp100_mc_new, .mmu = gp100_mmu_new, .therm = gp100_therm_new, - .secboot = gm200_secboot_new, .pci = gp100_pci_new, .pmu = gp100_pmu_new, .timer = gk20a_timer_new, @@ -2187,12 +2197,17 @@ nv130_chipset = { .disp = gp100_disp_new, .fifo = gp100_fifo_new, .gr = gp100_gr_new, + .nvdec[0] = gm107_nvdec_new, + .nvenc[0] = gm107_nvenc_new, + .nvenc[1] = gm107_nvenc_new, + .nvenc[2] = gm107_nvenc_new, .sw = gf100_sw_new, }; static const struct nvkm_device_chip nv132_chipset = { .name = "GP102", + .acr = gp102_acr_new, .bar = gm107_bar_new, .bios = nvkm_bios_new, .bus = gf100_bus_new, @@ -2208,7 +2223,6 @@ nv132_chipset = { .mc = gp100_mc_new, .mmu = gp100_mmu_new, .therm = gp100_therm_new, - .secboot = gp102_secboot_new, .pci = gp100_pci_new, .pmu = gp102_pmu_new, .timer = gk20a_timer_new, @@ -2221,7 +2235,9 @@ nv132_chipset = { .dma = gf119_dma_new, .fifo = gp100_fifo_new, .gr = gp102_gr_new, - .nvdec[0] = gp102_nvdec_new, + .nvdec[0] = gm107_nvdec_new, + .nvenc[0] = gm107_nvenc_new, + .nvenc[1] = gm107_nvenc_new, .sec2 = gp102_sec2_new, .sw = gf100_sw_new, }; @@ -2229,6 +2245,7 @@ nv132_chipset = { static const struct nvkm_device_chip nv134_chipset = { .name = "GP104", + .acr = gp102_acr_new, .bar = gm107_bar_new, .bios = nvkm_bios_new, .bus = gf100_bus_new, @@ -2244,7 +2261,6 @@ nv134_chipset = { .mc = gp100_mc_new, .mmu = gp100_mmu_new, .therm = gp100_therm_new, - .secboot = gp102_secboot_new, .pci = gp100_pci_new, .pmu = gp102_pmu_new, .timer = gk20a_timer_new, @@ -2257,7 +2273,9 @@ nv134_chipset = { .dma = gf119_dma_new, .fifo = gp100_fifo_new, .gr = gp104_gr_new, - .nvdec[0] = gp102_nvdec_new, + .nvdec[0] = gm107_nvdec_new, + .nvenc[0] = gm107_nvenc_new, + .nvenc[1] = gm107_nvenc_new, .sec2 = gp102_sec2_new, .sw = gf100_sw_new, }; @@ -2265,6 +2283,7 @@ nv134_chipset = { static const struct nvkm_device_chip nv136_chipset = { .name = "GP106", + .acr = gp102_acr_new, .bar = gm107_bar_new, .bios = nvkm_bios_new, .bus = gf100_bus_new, @@ -2280,7 +2299,6 @@ nv136_chipset = { .mc = gp100_mc_new, .mmu = gp100_mmu_new, .therm = gp100_therm_new, - .secboot = gp102_secboot_new, .pci = gp100_pci_new, .pmu = gp102_pmu_new, .timer = gk20a_timer_new, @@ -2293,7 +2311,8 @@ nv136_chipset = { .dma = gf119_dma_new, .fifo = gp100_fifo_new, .gr = gp104_gr_new, - .nvdec[0] = gp102_nvdec_new, + .nvdec[0] = gm107_nvdec_new, + .nvenc[0] = gm107_nvenc_new, .sec2 = gp102_sec2_new, .sw = gf100_sw_new, }; @@ -2301,6 +2320,7 @@ nv136_chipset = { static const struct nvkm_device_chip nv137_chipset = { .name = "GP107", + .acr = gp102_acr_new, .bar = gm107_bar_new, .bios = nvkm_bios_new, .bus = gf100_bus_new, @@ -2316,7 +2336,6 @@ nv137_chipset = { .mc = gp100_mc_new, .mmu = gp100_mmu_new, .therm = gp100_therm_new, - .secboot = gp102_secboot_new, .pci = gp100_pci_new, .pmu = gp102_pmu_new, .timer = gk20a_timer_new, @@ -2329,7 +2348,9 @@ nv137_chipset = { .dma = gf119_dma_new, .fifo = gp100_fifo_new, .gr = gp107_gr_new, - .nvdec[0] = gp102_nvdec_new, + .nvdec[0] = gm107_nvdec_new, + .nvenc[0] = gm107_nvenc_new, + .nvenc[1] = gm107_nvenc_new, .sec2 = gp102_sec2_new, .sw = gf100_sw_new, }; @@ -2337,6 +2358,7 @@ nv137_chipset = { static const struct nvkm_device_chip nv138_chipset = { .name = "GP108", + .acr = gp108_acr_new, .bar = gm107_bar_new, .bios = nvkm_bios_new, .bus = gf100_bus_new, @@ -2352,7 +2374,6 @@ nv138_chipset = { .mc = gp100_mc_new, .mmu = gp100_mmu_new, .therm = gp100_therm_new, - .secboot = gp108_secboot_new, .pci = gp100_pci_new, .pmu = gp102_pmu_new, .timer = gk20a_timer_new, @@ -2364,30 +2385,30 @@ nv138_chipset = { .disp = gp102_disp_new, .dma = gf119_dma_new, .fifo = gp100_fifo_new, - .gr = gp107_gr_new, - .nvdec[0] = gp102_nvdec_new, - .sec2 = gp102_sec2_new, + .gr = gp108_gr_new, + .nvdec[0] = gm107_nvdec_new, + .sec2 = gp108_sec2_new, .sw = gf100_sw_new, }; static const struct nvkm_device_chip nv13b_chipset = { .name = "GP10B", + .acr = gp10b_acr_new, .bar = gm20b_bar_new, .bus = gf100_bus_new, - .fault = gp100_fault_new, + .fault = gp10b_fault_new, .fb = gp10b_fb_new, .fuse = gm107_fuse_new, .ibus = gp10b_ibus_new, .imem = gk20a_instmem_new, - .ltc = gp102_ltc_new, + .ltc = gp10b_ltc_new, .mc = gp10b_mc_new, .mmu = gp10b_mmu_new, - .secboot = gp10b_secboot_new, - .pmu = gm20b_pmu_new, + .pmu = gp10b_pmu_new, .timer = gk20a_timer_new, .top = gk104_top_new, - .ce[2] = gp102_ce_new, + .ce[0] = gp100_ce_new, .dma = gf119_dma_new, .fifo = gp10b_fifo_new, .gr = gp10b_gr_new, @@ -2397,6 +2418,7 @@ nv13b_chipset = { static const struct nvkm_device_chip nv140_chipset = { .name = "GV100", + .acr = gp108_acr_new, .bar = gm107_bar_new, .bios = nvkm_bios_new, .bus = gf100_bus_new, @@ -2414,7 +2436,6 @@ nv140_chipset = { .mmu = gv100_mmu_new, .pci = gp100_pci_new, .pmu = gp102_pmu_new, - .secboot = gp108_secboot_new, .therm = gp100_therm_new, .timer = gk20a_timer_new, .top = gk104_top_new, @@ -2431,13 +2452,17 @@ nv140_chipset = { .dma = gv100_dma_new, .fifo = gv100_fifo_new, .gr = gv100_gr_new, - .nvdec[0] = gp102_nvdec_new, - .sec2 = gp102_sec2_new, + .nvdec[0] = gm107_nvdec_new, + .nvenc[0] = gm107_nvenc_new, + .nvenc[1] = gm107_nvenc_new, + .nvenc[2] = gm107_nvenc_new, + .sec2 = gp108_sec2_new, }; static const struct nvkm_device_chip nv162_chipset = { .name = "TU102", + .acr = tu102_acr_new, .bar = tu102_bar_new, .bios = nvkm_bios_new, .bus = gf100_bus_new, @@ -2466,13 +2491,16 @@ nv162_chipset = { .disp = tu102_disp_new, .dma = gv100_dma_new, .fifo = tu102_fifo_new, - .nvdec[0] = gp102_nvdec_new, + .gr = tu102_gr_new, + .nvdec[0] = gm107_nvdec_new, + .nvenc[0] = gm107_nvenc_new, .sec2 = tu102_sec2_new, }; static const struct nvkm_device_chip nv164_chipset = { .name = "TU104", + .acr = tu102_acr_new, .bar = tu102_bar_new, .bios = nvkm_bios_new, .bus = gf100_bus_new, @@ -2501,13 +2529,17 @@ nv164_chipset = { .disp = tu102_disp_new, .dma = gv100_dma_new, .fifo = tu102_fifo_new, - .nvdec[0] = gp102_nvdec_new, + .gr = tu102_gr_new, + .nvdec[0] = gm107_nvdec_new, + .nvdec[1] = gm107_nvdec_new, + .nvenc[0] = gm107_nvenc_new, .sec2 = tu102_sec2_new, }; static const struct nvkm_device_chip nv166_chipset = { .name = "TU106", + .acr = tu102_acr_new, .bar = tu102_bar_new, .bios = nvkm_bios_new, .bus = gf100_bus_new, @@ -2536,7 +2568,11 @@ nv166_chipset = { .disp = tu102_disp_new, .dma = gv100_dma_new, .fifo = tu102_fifo_new, - .nvdec[0] = gp102_nvdec_new, + .gr = tu102_gr_new, + .nvdec[0] = gm107_nvdec_new, + .nvdec[1] = gm107_nvdec_new, + .nvdec[2] = gm107_nvdec_new, + .nvenc[0] = gm107_nvenc_new, .sec2 = tu102_sec2_new, }; @@ -2571,7 +2607,8 @@ nv167_chipset = { .disp = tu102_disp_new, .dma = gv100_dma_new, .fifo = tu102_fifo_new, - .nvdec[0] = gp102_nvdec_new, + .nvdec[0] = gm107_nvdec_new, + .nvenc[0] = gm107_nvenc_new, .sec2 = tu102_sec2_new, }; @@ -2606,7 +2643,8 @@ nv168_chipset = { .disp = tu102_disp_new, .dma = gv100_dma_new, .fifo = tu102_fifo_new, - .nvdec[0] = gp102_nvdec_new, + .nvdec[0] = gm107_nvdec_new, + .nvenc[0] = gm107_nvenc_new, .sec2 = tu102_sec2_new, }; @@ -2638,6 +2676,7 @@ nvkm_device_subdev(struct nvkm_device *device, int index) switch (index) { #define _(n,p,m) case NVKM_SUBDEV_##n: if (p) return (m); break + _(ACR , device->acr , &device->acr->subdev); _(BAR , device->bar , &device->bar->subdev); _(VBIOS , device->bios , &device->bios->subdev); _(BUS , device->bus , &device->bus->subdev); @@ -2658,7 +2697,6 @@ nvkm_device_subdev(struct nvkm_device *device, int index) _(MXM , device->mxm , device->mxm); _(PCI , device->pci , &device->pci->subdev); _(PMU , device->pmu , &device->pmu->subdev); - _(SECBOOT , device->secboot , &device->secboot->subdev); _(THERM , device->therm , &device->therm->subdev); _(TIMER , device->timer , &device->timer->subdev); _(TOP , device->top , &device->top->subdev); @@ -2703,9 +2741,9 @@ nvkm_device_engine(struct nvkm_device *device, int index) _(MSPDEC , device->mspdec , device->mspdec); _(MSPPP , device->msppp , device->msppp); _(MSVLD , device->msvld , device->msvld); - _(NVENC0 , device->nvenc[0], device->nvenc[0]); - _(NVENC1 , device->nvenc[1], device->nvenc[1]); - _(NVENC2 , device->nvenc[2], device->nvenc[2]); + _(NVENC0 , device->nvenc[0], &device->nvenc[0]->engine); + _(NVENC1 , device->nvenc[1], &device->nvenc[1]->engine); + _(NVENC2 , device->nvenc[2], &device->nvenc[2]->engine); _(NVDEC0 , device->nvdec[0], &device->nvdec[0]->engine); _(NVDEC1 , device->nvdec[1], &device->nvdec[1]->engine); _(NVDEC2 , device->nvdec[2], &device->nvdec[2]->engine); @@ -3144,6 +3182,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func, } \ break switch (i) { + _(NVKM_SUBDEV_ACR , acr); _(NVKM_SUBDEV_BAR , bar); _(NVKM_SUBDEV_VBIOS , bios); _(NVKM_SUBDEV_BUS , bus); @@ -3164,7 +3203,6 @@ nvkm_device_ctor(const struct nvkm_device_func *func, _(NVKM_SUBDEV_MXM , mxm); _(NVKM_SUBDEV_PCI , pci); _(NVKM_SUBDEV_PMU , pmu); - _(NVKM_SUBDEV_SECBOOT , secboot); _(NVKM_SUBDEV_THERM , therm); _(NVKM_SUBDEV_TIMER , timer); _(NVKM_SUBDEV_TOP , top); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h index d8be2f77ac66338dd8466dcb7ee2ab1d806144a6..54eab5e042307c3b6d8df3af5df668e7d138f866 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h @@ -3,6 +3,7 @@ #define __NVKM_DEVICE_PRIV_H__ #include +#include #include #include #include @@ -27,7 +28,6 @@ #include #include #include -#include #include #include diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c index 0e372a190d3f111fc982aba65a56294b648fa368..d0d52c1d4aee05868a6b2c89f010828c325ff010 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c @@ -52,18 +52,18 @@ nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev) clk_set_rate(tdev->clk_pwr, 204000000); udelay(10); - reset_control_assert(tdev->rst); - udelay(10); - if (!tdev->pdev->dev.pm_domain) { + reset_control_assert(tdev->rst); + udelay(10); + ret = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D); if (ret) goto err_clamp; udelay(10); - } - reset_control_deassert(tdev->rst); - udelay(10); + reset_control_deassert(tdev->rst); + udelay(10); + } return 0; @@ -279,6 +279,7 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func, struct nvkm_device **pdevice) { struct nvkm_device_tegra *tdev; + unsigned long rate; int ret; if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL))) @@ -307,6 +308,17 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func, goto free; } + rate = clk_get_rate(tdev->clk); + if (rate == 0) { + ret = clk_set_rate(tdev->clk, ULONG_MAX); + if (ret < 0) + goto free; + + rate = clk_get_rate(tdev->clk); + + dev_dbg(&pdev->dev, "GPU clock set to %lu\n", rate); + } + if (func->require_ref_clk) tdev->clk_ref = devm_clk_get(&pdev->dev, "ref"); if (IS_ERR(tdev->clk_ref)) { diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c index bcf32d92ee5a9ec28de558715566e5eccc89432a..50e3539f33d22daa631e90a0bbcc8236f9899ff5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c @@ -74,6 +74,8 @@ nv50_disp_chan_mthd(struct nv50_disp_chan *chan, int debug) if (debug > subdev->debug) return; + if (!mthd) + return; for (i = 0; (list = mthd->data[i].mthd) != NULL; i++) { u32 base = chan->head * mthd->addr; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c index 818d21bd28d3122898f11d88c87bc0fec348c5ee..3800aeb507d0167d4e7bb41210c6fb5fd925cdc2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c @@ -365,7 +365,7 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps) * and it's better to have a failed modeset than that. */ for (cfg = nvkm_dp_rates; cfg->rate; cfg++) { - if (cfg->nr <= outp_nr && cfg->nr <= outp_bw) { + if (cfg->nr <= outp_nr && cfg->bw <= outp_bw) { /* Try to respect sink limits too when selecting * lowest link configuration. */ diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c index 892be6c9b76ca9de64238d1e35567cbd13a65a65..c1032527f79111aaa237cfd51e33b01f574eebf4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c @@ -101,15 +101,26 @@ gv100_disp_exception(struct nv50_disp *disp, int chid) u32 stat = nvkm_rd32(device, 0x611020 + (chid * 12)); u32 type = (stat & 0x00007000) >> 12; u32 mthd = (stat & 0x00000fff) << 2; - u32 data = nvkm_rd32(device, 0x611024 + (chid * 12)); - u32 code = nvkm_rd32(device, 0x611028 + (chid * 12)); const struct nvkm_enum *reason = nvkm_enum_find(nv50_disp_intr_error_type, type); - nvkm_error(subdev, "chid %d stat %08x reason %d [%s] mthd %04x " - "data %08x code %08x\n", - chid, stat, type, reason ? reason->name : "", - mthd, data, code); + /*TODO: Suspect 33->41 are for WRBK channel exceptions, but we + * don't support those currently. + * + * CORE+WIN CHIDs map directly to the FE_EXCEPT() slots. + */ + if (chid <= 32) { + u32 data = nvkm_rd32(device, 0x611024 + (chid * 12)); + u32 code = nvkm_rd32(device, 0x611028 + (chid * 12)); + nvkm_error(subdev, "chid %d stat %08x reason %d [%s] " + "mthd %04x data %08x code %08x\n", + chid, stat, type, reason ? reason->name : "", + mthd, data, code); + } else { + nvkm_error(subdev, "chid %d stat %08x reason %d [%s] " + "mthd %04x\n", + chid, stat, type, reason ? reason->name : "", mthd); + } if (chid < ARRAY_SIZE(disp->chan) && disp->chan[chid]) { switch (mthd) { @@ -144,6 +155,12 @@ gv100_disp_intr_ctrl_disp(struct nv50_disp *disp) if (stat & 0x00000008) stat &= ~0x00000008; + if (stat & 0x00000080) { + u32 error = nvkm_mask(device, 0x611848, 0x00000000, 0x00000000); + nvkm_warn(subdev, "error %08x\n", error); + stat &= ~0x00000080; + } + if (stat & 0x00000100) { unsigned long wndws = nvkm_rd32(device, 0x611858); unsigned long other = nvkm_rd32(device, 0x61185c); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild index 73724a8cb861c2a00b6e3568e17a42ebc21d19f7..558c86fd8e82e8cbdf11e2cbed5a10d7cc6a4e94 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild @@ -36,8 +36,10 @@ nvkm-y += nvkm/engine/gr/gp100.o nvkm-y += nvkm/engine/gr/gp102.o nvkm-y += nvkm/engine/gr/gp104.o nvkm-y += nvkm/engine/gr/gp107.o +nvkm-y += nvkm/engine/gr/gp108.o nvkm-y += nvkm/engine/gr/gp10b.o nvkm-y += nvkm/engine/gr/gv100.o +nvkm-y += nvkm/engine/gr/tu102.o nvkm-y += nvkm/engine/gr/ctxnv40.o nvkm-y += nvkm/engine/gr/ctxnv50.o @@ -60,3 +62,4 @@ nvkm-y += nvkm/engine/gr/ctxgp102.o nvkm-y += nvkm/engine/gr/ctxgp104.o nvkm-y += nvkm/engine/gr/ctxgp107.o nvkm-y += nvkm/engine/gr/ctxgv100.o +nvkm-y += nvkm/engine/gr/ctxtu102.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c index 85f2d1e950e82dbdff10b588b79bb1246dccf08e..297915719bf26bc6cdd5ce69d5a9c6c1ba43f76d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c @@ -1324,10 +1324,8 @@ gf100_grctx_generate_sm_id(struct gf100_gr *gr, int gpc, int tpc, int sm) void gf100_grctx_generate_floorsweep(struct gf100_gr *gr) { - struct nvkm_device *device = gr->base.engine.subdev.device; const struct gf100_grctx_func *func = gr->func->grctx; - int gpc, sm, i, j; - u32 data; + int sm; for (sm = 0; sm < gr->sm_nr; sm++) { func->sm_id(gr, gr->sm[sm].gpc, gr->sm[sm].tpc, sm); @@ -1335,12 +1333,9 @@ gf100_grctx_generate_floorsweep(struct gf100_gr *gr) func->tpc_nr(gr, gr->sm[sm].gpc); } - for (gpc = 0, i = 0; i < 4; i++) { - for (data = 0, j = 0; j < 8 && gpc < gr->gpc_nr; j++, gpc++) - data |= gr->tpc_nr[gpc] << (j * 4); - nvkm_wr32(device, 0x406028 + (i * 4), data); - nvkm_wr32(device, 0x405870 + (i * 4), data); - } + gf100_gr_init_num_tpc_per_gpc(gr, false, true); + if (!func->skip_pd_num_tpc_per_gpc) + gf100_gr_init_num_tpc_per_gpc(gr, true, false); if (func->r4060a8) func->r4060a8(gr); @@ -1374,7 +1369,7 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) nvkm_mc_unk260(device, 0); - if (!gr->fuc_sw_ctx) { + if (!gr->sw_ctx) { gf100_gr_mmio(gr, grctx->hub); gf100_gr_mmio(gr, grctx->gpc_0); gf100_gr_mmio(gr, grctx->zcull); @@ -1382,7 +1377,7 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) gf100_gr_mmio(gr, grctx->tpc); gf100_gr_mmio(gr, grctx->ppc); } else { - gf100_gr_mmio(gr, gr->fuc_sw_ctx); + gf100_gr_mmio(gr, gr->sw_ctx); } gf100_gr_wait_idle(gr); @@ -1401,8 +1396,8 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) gf100_gr_wait_idle(gr); if (grctx->r400088) grctx->r400088(gr, false); - if (gr->fuc_bundle) - gf100_gr_icmd(gr, gr->fuc_bundle); + if (gr->bundle) + gf100_gr_icmd(gr, gr->bundle); else gf100_gr_icmd(gr, grctx->icmd); if (grctx->sw_veid_bundle_init) @@ -1411,8 +1406,8 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) nvkm_wr32(device, 0x404154, idle_timeout); - if (gr->fuc_method) - gf100_gr_mthd(gr, gr->fuc_method); + if (gr->method) + gf100_gr_mthd(gr, gr->method); else gf100_gr_mthd(gr, grctx->mthd); nvkm_mc_unk260(device, 1); @@ -1431,6 +1426,8 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) grctx->r419a3c(gr); if (grctx->r408840) grctx->r408840(gr); + if (grctx->r419c0c) + grctx->r419c0c(gr); } #define CB_RESERVED 0x80000 diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h index 478b4723d0f9cfdf8a2c53eb37c428383d56a1c5..32bbddc0993e8a455b2583161cb82bf2bd2d16ca 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h @@ -57,6 +57,7 @@ struct gf100_grctx_func { /* floorsweeping */ void (*sm_id)(struct gf100_gr *, int gpc, int tpc, int sm); void (*tpc_nr)(struct gf100_gr *, int gpc); + bool skip_pd_num_tpc_per_gpc; void (*r4060a8)(struct gf100_gr *); void (*rop_mapping)(struct gf100_gr *); void (*alpha_beta_tables)(struct gf100_gr *); @@ -76,6 +77,7 @@ struct gf100_grctx_func { void (*r418e94)(struct gf100_gr *); void (*r419a3c)(struct gf100_gr *); void (*r408840)(struct gf100_gr *); + void (*r419c0c)(struct gf100_gr *); }; extern const struct gf100_grctx_func gf100_grctx; @@ -153,6 +155,14 @@ extern const struct gf100_grctx_func gp107_grctx; extern const struct gf100_grctx_func gv100_grctx; +extern const struct gf100_grctx_func tu102_grctx; +void gv100_grctx_unkn88c(struct gf100_gr *, bool); +void gv100_grctx_generate_unkn(struct gf100_gr *); +extern const struct gf100_gr_init gv100_grctx_init_sw_veid_bundle_init_0[]; +void gv100_grctx_generate_attrib(struct gf100_grctx *); +void gv100_grctx_generate_rop_mapping(struct gf100_gr *); +void gv100_grctx_generate_r400088(struct gf100_gr *, bool); + /* context init value lists */ extern const struct gf100_gr_pack gf100_grctx_pack_icmd[]; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c index 896d473dcc0f5380937efe9057598de14bfcb0f3..c0d36bc601f9b44d94e0116ade25e7772fa8e3cf 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c @@ -32,7 +32,7 @@ gk20a_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) u32 idle_timeout; int i; - gf100_gr_mmio(gr, gr->fuc_sw_ctx); + gf100_gr_mmio(gr, gr->sw_ctx); gf100_gr_wait_idle(gr); @@ -56,10 +56,10 @@ gk20a_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) nvkm_wr32(device, 0x404154, idle_timeout); gf100_gr_wait_idle(gr); - gf100_gr_mthd(gr, gr->fuc_method); + gf100_gr_mthd(gr, gr->method); gf100_gr_wait_idle(gr); - gf100_gr_icmd(gr, gr->fuc_bundle); + gf100_gr_icmd(gr, gr->bundle); grctx->pagepool(info); grctx->bundle(info); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c index a1d9e114ebeb0fa9e922b5f8927b23b06118f86c..6b92f8aa18a3599d660192fe3afc7d78c4412a51 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c @@ -29,7 +29,7 @@ gm20b_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) u32 idle_timeout; int i, tmp; - gf100_gr_mmio(gr, gr->fuc_sw_ctx); + gf100_gr_mmio(gr, gr->sw_ctx); gf100_gr_wait_idle(gr); @@ -59,10 +59,10 @@ gm20b_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) nvkm_wr32(device, 0x404154, idle_timeout); gf100_gr_wait_idle(gr); - gf100_gr_mthd(gr, gr->fuc_method); + gf100_gr_mthd(gr, gr->method); gf100_gr_wait_idle(gr); - gf100_gr_icmd(gr, gr->fuc_bundle); + gf100_gr_icmd(gr, gr->bundle); grctx->pagepool(info); grctx->bundle(info); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c index 0990765ef1916f3de1a828ef015b649c677dfc34..39553d55d3f3a325d99d0769472471cf08f757af 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c @@ -25,7 +25,7 @@ * PGRAPH context implementation ******************************************************************************/ -static const struct gf100_gr_init +const struct gf100_gr_init gv100_grctx_init_sw_veid_bundle_init_0[] = { { 0x00001000, 64, 0x00100000, 0x00000008 }, { 0x00000941, 64, 0x00100000, 0x00000000 }, @@ -58,7 +58,7 @@ gv100_grctx_pack_sw_veid_bundle_init[] = { {} }; -static void +void gv100_grctx_generate_attrib(struct gf100_grctx *info) { struct gf100_gr *gr = info->gr; @@ -67,14 +67,14 @@ gv100_grctx_generate_attrib(struct gf100_grctx *info) const u32 attrib = grctx->attrib_nr; const u32 gfxp = grctx->gfxp_nr; const int s = 12; - const int max_batches = 0xffff; u32 size = grctx->alpha_nr_max * gr->tpc_total; u32 ao = 0; u32 bo = ao + size; int gpc, ppc, b, n = 0; - size += grctx->gfxp_nr * gr->tpc_total; - size = ((size * 0x20) + 128) & ~127; + for (gpc = 0; gpc < gr->gpc_nr; gpc++) + size += grctx->gfxp_nr * gr->ppc_nr[gpc] * gr->ppc_tpc_max; + size = ((size * 0x20) + 127) & ~127; b = mmio_vram(info, size, (1 << s), false); mmio_refn(info, 0x418810, 0x80000000, s, b); @@ -84,13 +84,12 @@ gv100_grctx_generate_attrib(struct gf100_grctx *info) mmio_wr32(info, 0x419e04, 0x80000000 | size >> 7); mmio_wr32(info, 0x405830, attrib); mmio_wr32(info, 0x40585c, alpha); - mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches); for (gpc = 0; gpc < gr->gpc_nr; gpc++) { for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++, n++) { const u32 as = alpha * gr->ppc_tpc_nr[gpc][ppc]; - const u32 bs = attrib * gr->ppc_tpc_nr[gpc][ppc]; - const u32 gs = gfxp * gr->ppc_tpc_nr[gpc][ppc]; + const u32 bs = attrib * gr->ppc_tpc_max; + const u32 gs = gfxp * gr->ppc_tpc_max; const u32 u = 0x418ea0 + (n * 0x04); const u32 o = PPC_UNIT(gpc, ppc, 0); if (!(gr->ppc_mask[gpc] & (1 << ppc))) @@ -110,7 +109,7 @@ gv100_grctx_generate_attrib(struct gf100_grctx *info) mmio_wr32(info, 0x41befc, 0x00000100); } -static void +void gv100_grctx_generate_rop_mapping(struct gf100_gr *gr) { struct nvkm_device *device = gr->base.engine.subdev.device; @@ -147,7 +146,7 @@ gv100_grctx_generate_rop_mapping(struct gf100_gr *gr) gr->screen_tile_row_offset); } -static void +void gv100_grctx_generate_r400088(struct gf100_gr *gr, bool on) { struct nvkm_device *device = gr->base.engine.subdev.device; @@ -163,7 +162,7 @@ gv100_grctx_generate_sm_id(struct gf100_gr *gr, int gpc, int tpc, int sm) nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), sm); } -static void +void gv100_grctx_generate_unkn(struct gf100_gr *gr) { struct nvkm_device *device = gr->base.engine.subdev.device; @@ -174,7 +173,7 @@ gv100_grctx_generate_unkn(struct gf100_gr *gr) nvkm_mask(device, 0x419c00, 0x00000008, 0x00000008); } -static void +void gv100_grctx_unkn88c(struct gf100_gr *gr, bool on) { struct nvkm_device *device = gr->base.engine.subdev.device; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxtu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxtu102.c new file mode 100644 index 0000000000000000000000000000000000000000..2299ca07d04ada633a8ded5d1ba335b68e186e51 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxtu102.c @@ -0,0 +1,95 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "ctxgf100.h" + +static void +tu102_grctx_generate_r419c0c(struct gf100_gr *gr) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + nvkm_mask(device, 0x419c0c, 0x80000000, 0x80000000); + nvkm_mask(device, 0x40584c, 0x00000008, 0x00000000); + nvkm_mask(device, 0x400080, 0x00000000, 0x00000000); +} + +static void +tu102_grctx_generate_sm_id(struct gf100_gr *gr, int gpc, int tpc, int sm) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x608), sm); + nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), sm); +} + +static const struct gf100_gr_init +tu102_grctx_init_unknown_bundle_init_0[] = { + { 0x00001000, 1, 0x00000001, 0x00000004 }, + { 0x00002020, 64, 0x00000001, 0x00000000 }, + { 0x0001e100, 1, 0x00000001, 0x00000001 }, + {} +}; + +static const struct gf100_gr_pack +tu102_grctx_pack_sw_veid_bundle_init[] = { + { gv100_grctx_init_sw_veid_bundle_init_0 }, + { tu102_grctx_init_unknown_bundle_init_0 }, + {} +}; + +static void +tu102_grctx_generate_attrib(struct gf100_grctx *info) +{ + const u64 size = 0x80000; /*XXX: educated guess */ + const int s = 8; + const int b = mmio_vram(info, size, (1 << s), true); + + gv100_grctx_generate_attrib(info); + + mmio_refn(info, 0x408070, 0x00000000, s, b); + mmio_wr32(info, 0x408074, size >> s); /*XXX: guess */ + mmio_refn(info, 0x419034, 0x00000000, s, b); + mmio_wr32(info, 0x408078, 0x00000000); +} + +const struct gf100_grctx_func +tu102_grctx = { + .unkn88c = gv100_grctx_unkn88c, + .main = gf100_grctx_generate_main, + .unkn = gv100_grctx_generate_unkn, + .sw_veid_bundle_init = tu102_grctx_pack_sw_veid_bundle_init, + .bundle = gm107_grctx_generate_bundle, + .bundle_size = 0x3000, + .bundle_min_gpm_fifo_depth = 0x180, + .bundle_token_limit = 0xa80, + .pagepool = gp100_grctx_generate_pagepool, + .pagepool_size = 0x20000, + .attrib = tu102_grctx_generate_attrib, + .attrib_nr_max = 0x800, + .attrib_nr = 0x700, + .alpha_nr_max = 0xc00, + .alpha_nr = 0x800, + .gfxp_nr = 0xfa8, + .sm_id = tu102_grctx_generate_sm_id, + .skip_pd_num_tpc_per_gpc = true, + .rop_mapping = gv100_grctx_generate_rop_mapping, + .r406500 = gm200_grctx_generate_r406500, + .r400088 = gv100_grctx_generate_r400088, + .r419c0c = tu102_grctx_generate_r419c0c, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h index c24f35ad56a6e988a5117ae2e3db4497b4ab5a87..ae2d5b6891cbad7edfc84b1f391b76dde012d6dd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h @@ -441,7 +441,7 @@ static uint32_t gk208_grhub_code[] = { 0x020014fe, 0x12004002, 0xbd0002f6, - 0x05c94104, + 0x05ca4104, 0xbd0010fe, 0x07004024, 0xbd0002f6, @@ -460,423 +460,423 @@ static uint32_t gk208_grhub_code[] = { 0x01039204, 0x03090080, 0xbd0003f6, - 0x87044204, - 0xf6040040, - 0x04bd0002, - 0x00400402, - 0x0002f603, - 0x31f404bd, - 0x96048e10, - 0x00657e40, - 0xc7feb200, - 0x01b590f1, - 0x1ff4f003, - 0x01020fb5, - 0x041fbb01, - 0x800112b6, - 0xf6010300, - 0x04bd0001, - 0x01040080, + 0x87048204, + 0x04004000, + 0xbd0002f6, + 0x40040204, + 0x02f60300, + 0xf404bd00, + 0x048e1031, + 0x657e4096, + 0xfeb20000, + 0xb590f1c7, + 0xf4f00301, + 0x020fb51f, + 0x1fbb0101, + 0x0112b604, + 0x01030080, 0xbd0001f6, - 0x01004104, - 0xac7e020f, - 0xbb7e0006, - 0x100f0006, - 0x0006fd7e, - 0x98000e98, - 0x207e010f, - 0x14950001, - 0xc0008008, - 0x0004f601, - 0x008004bd, - 0x04f601c1, - 0xb704bd00, - 0xbb130030, - 0xf5b6001f, - 0xd3008002, - 0x000ff601, - 0x15b604bd, - 0x0110b608, - 0xb20814b6, - 0x02687e1f, - 0x001fbb00, - 0x84020398, -/* 0x041f: init_gpc */ - 0xb8502000, - 0x0008044e, - 0x8f7e1fb2, + 0x04008004, + 0x0001f601, + 0x004104bd, + 0x7e020f01, + 0x7e0006ad, + 0x0f0006bc, + 0x06fe7e10, + 0x000e9800, + 0x7e010f98, + 0x95000120, + 0x00800814, + 0x04f601c0, + 0x8004bd00, + 0xf601c100, + 0x04bd0004, + 0x130030b7, + 0xb6001fbb, + 0x008002f5, + 0x0ff601d3, + 0xb604bd00, + 0x10b60815, + 0x0814b601, + 0x687e1fb2, + 0x1fbb0002, + 0x02039800, + 0x50200084, +/* 0x0420: init_gpc */ + 0x08044eb8, + 0x7e1fb200, + 0xb800008f, + 0x00010c4e, + 0x8f7ef4bd, 0x4eb80000, - 0xbd00010c, - 0x008f7ef4, - 0x044eb800, - 0x8f7e0001, + 0x7e000104, + 0xb800008f, + 0x0001004e, + 0x8f7e020f, 0x4eb80000, - 0x0f000100, - 0x008f7e02, - 0x004eb800, -/* 0x044e: init_gpc_wait */ +/* 0x044f: init_gpc_wait */ + 0x7e000800, + 0xc8000065, + 0x0bf41fff, + 0x044eb8f9, 0x657e0008, - 0xffc80000, - 0xf90bf41f, - 0x08044eb8, - 0x00657e00, - 0x001fbb00, - 0x800040b7, - 0xf40132b6, - 0x000fb41b, - 0x0006fd7e, - 0xac7e000f, - 0x00800006, - 0x01f60201, - 0xbd04bd00, - 0x1f19f014, - 0x02300080, - 0xbd0001f6, -/* 0x0491: wait */ - 0x0028f404, -/* 0x0497: main */ - 0x0d0031f4, - 0x00377e10, - 0xf401f400, - 0x4001e4b1, - 0x00c71bf5, - 0x99f094bd, - 0x37008004, - 0x0009f602, - 0x008104bd, - 0x11cf02c0, - 0xc1008200, - 0x0022cf02, - 0xf41f13c8, - 0x23c8770b, - 0x550bf41f, - 0x12b220f9, - 0x99f094bd, - 0x37008007, - 0x0009f602, - 0x32f404bd, - 0x0231f401, - 0x0008807e, - 0x99f094bd, - 0x17008007, - 0x0009f602, - 0x20fc04bd, - 0x99f094bd, - 0x37008006, - 0x0009f602, - 0x31f404bd, - 0x08807e01, + 0x1fbb0000, + 0x0040b700, + 0x0132b680, + 0x0fb41bf4, + 0x06fe7e00, + 0x7e000f00, + 0x800006ad, + 0xf6020100, + 0x04bd0001, + 0x19f014bd, + 0x3000801f, + 0x0001f602, +/* 0x0492: wait */ + 0x28f404bd, + 0x0031f400, +/* 0x0498: main */ + 0x377e100d, + 0x01f40000, + 0x01e4b1f4, + 0xc71bf540, 0xf094bd00, - 0x00800699, + 0x00800499, + 0x09f60237, + 0x8104bd00, + 0xcf02c000, + 0x00820011, + 0x22cf02c1, + 0x1f13c800, + 0xc8770bf4, + 0x0bf41f23, + 0xb220f955, + 0xf094bd12, + 0x00800799, + 0x09f60237, + 0xf404bd00, + 0x31f40132, + 0x08817e02, + 0xf094bd00, + 0x00800799, 0x09f60217, + 0xfc04bd00, + 0xf094bd20, + 0x00800699, + 0x09f60237, 0xf404bd00, -/* 0x0522: chsw_prev_no_next */ - 0x20f92f0e, - 0x32f412b2, - 0x0232f401, - 0x0008807e, - 0x008020fc, - 0x02f602c0, + 0x817e0131, + 0x94bd0008, + 0x800699f0, + 0xf6021700, + 0x04bd0009, +/* 0x0523: chsw_prev_no_next */ + 0xf92f0ef4, + 0xf412b220, + 0x32f40132, + 0x08817e02, + 0x8020fc00, + 0xf602c000, + 0x04bd0002, +/* 0x053f: chsw_no_prev */ + 0xc8130ef4, + 0x0bf41f23, + 0x0131f40d, + 0x7e0232f4, +/* 0x054f: chsw_done */ + 0x02000881, + 0xc3008001, + 0x0002f602, + 0x94bd04bd, + 0x800499f0, + 0xf6021700, + 0x04bd0009, + 0xff300ef5, +/* 0x056c: main_not_ctx_switch */ + 0xf401e4b0, + 0xf2b20c1b, + 0x0008217e, +/* 0x057b: main_not_ctx_chan */ + 0xb0400ef4, + 0x1bf402e4, + 0xf094bd2c, + 0x00800799, + 0x09f60237, 0xf404bd00, -/* 0x053e: chsw_no_prev */ - 0x23c8130e, - 0x0d0bf41f, - 0xf40131f4, - 0x807e0232, -/* 0x054e: chsw_done */ - 0x01020008, - 0x02c30080, - 0xbd0002f6, - 0xf094bd04, - 0x00800499, + 0x32f40132, + 0x08817e02, + 0xf094bd00, + 0x00800799, 0x09f60217, - 0xf504bd00, -/* 0x056b: main_not_ctx_switch */ - 0xb0ff300e, - 0x1bf401e4, - 0x7ef2b20c, - 0xf4000820, -/* 0x057a: main_not_ctx_chan */ - 0xe4b0400e, - 0x2c1bf402, - 0x99f094bd, - 0x37008007, - 0x0009f602, - 0x32f404bd, - 0x0232f401, - 0x0008807e, - 0x99f094bd, - 0x17008007, - 0x0009f602, - 0x0ef404bd, -/* 0x05a9: main_not_ctx_save */ - 0x10ef9411, - 0x7e01f5f0, - 0xf50002f8, -/* 0x05b7: main_done */ - 0xbdfee40e, - 0x1f29f024, - 0x02300080, - 0xbd0002f6, - 0xd20ef504, -/* 0x05c9: ih */ - 0xf900f9fe, - 0x0188fe80, - 0x90f980f9, - 0xb0f9a0f9, - 0xe0f9d0f9, - 0x04bdf0f9, - 0xcf02004a, - 0xabc400aa, - 0x230bf404, - 0x004e100d, - 0x00eecf1a, - 0xcf19004f, - 0x047e00ff, - 0xb0b70000, - 0x010e0400, - 0xf61d0040, - 0x04bd000e, -/* 0x060c: ih_no_fifo */ - 0x0100abe4, - 0x0d0c0bf4, - 0x40014e10, - 0x0000047e, -/* 0x061c: ih_no_ctxsw */ - 0x0400abe4, - 0x8e560bf4, - 0x7e400708, + 0xf404bd00, +/* 0x05aa: main_not_ctx_save */ + 0xef94110e, + 0x01f5f010, + 0x0002f87e, + 0xfee40ef5, +/* 0x05b8: main_done */ + 0x29f024bd, + 0x3000801f, + 0x0002f602, + 0x0ef504bd, +/* 0x05ca: ih */ + 0x00f9fed2, + 0x88fe80f9, + 0xf980f901, + 0xf9a0f990, + 0xf9d0f9b0, + 0xbdf0f9e0, + 0x02004a04, + 0xc400aacf, + 0x0bf404ab, + 0x4e100d23, + 0xeecf1a00, + 0x19004f00, + 0x7e00ffcf, + 0xb7000004, + 0x0e0400b0, + 0x1d004001, + 0xbd000ef6, +/* 0x060d: ih_no_fifo */ + 0x00abe404, + 0x0c0bf401, + 0x014e100d, + 0x00047e40, +/* 0x061d: ih_no_ctxsw */ + 0x00abe400, + 0x560bf404, + 0x4007088e, + 0x0000657e, + 0x0080ffb2, + 0x0ff60204, + 0x8e04bd00, + 0x7e400704, 0xb2000065, - 0x040080ff, + 0x030080ff, 0x000ff602, - 0x048e04bd, - 0x657e4007, - 0xffb20000, - 0x02030080, - 0xbd000ff6, - 0x50fec704, - 0x8f02ee94, - 0xbb400700, - 0x657e00ef, - 0x00800000, - 0x0ff60202, + 0xfec704bd, + 0x02ee9450, + 0x4007008f, + 0x7e00efbb, + 0x80000065, + 0xf6020200, + 0x04bd000f, + 0xf87e030f, + 0x004b0002, + 0x8ebfb201, + 0x7e400144, +/* 0x0677: ih_no_fwmthd */ + 0x4b00008f, + 0xb0bd0504, + 0xf4b4abff, + 0x00800c0b, + 0x0bf60307, +/* 0x068b: ih_no_other */ + 0x4004bd00, + 0x0af60100, + 0xfc04bd00, + 0xfce0fcf0, + 0xfcb0fcd0, + 0xfc90fca0, + 0x0088fe80, + 0x00fc80fc, + 0xf80032f4, +/* 0x06ad: ctx_4170s */ + 0x10f5f001, + 0x708effb2, + 0x8f7e4041, + 0x00f80000, +/* 0x06bc: ctx_4170w */ + 0x4041708e, + 0x0000657e, + 0xf4f0ffb2, + 0xf31bf410, +/* 0x06ce: ctx_redswitch */ + 0x004e00f8, + 0x40e5f002, + 0xf020e5f0, + 0x008010e5, + 0x0ef60185, 0x0f04bd00, - 0x02f87e03, - 0x01004b00, - 0x448ebfb2, - 0x8f7e4001, -/* 0x0676: ih_no_fwmthd */ - 0x044b0000, - 0xffb0bd05, - 0x0bf4b4ab, - 0x0700800c, - 0x000bf603, -/* 0x068a: ih_no_other */ - 0x004004bd, - 0x000af601, - 0xf0fc04bd, - 0xd0fce0fc, - 0xa0fcb0fc, - 0x80fc90fc, - 0xfc0088fe, - 0xf400fc80, - 0x01f80032, -/* 0x06ac: ctx_4170s */ - 0xb210f5f0, - 0x41708eff, +/* 0x06e5: ctx_redswitch_delay */ + 0x01f2b608, + 0xf1fd1bf4, + 0xf10400e5, + 0x800100e5, + 0xf6018500, + 0x04bd000e, +/* 0x06fe: ctx_86c */ + 0x008000f8, + 0x0ff60223, + 0xb204bd00, + 0x8a148eff, 0x008f7e40, -/* 0x06bb: ctx_4170w */ - 0x8e00f800, - 0x7e404170, - 0xb2000065, - 0x10f4f0ff, - 0xf8f31bf4, -/* 0x06cd: ctx_redswitch */ - 0x02004e00, - 0xf040e5f0, - 0xe5f020e5, - 0x85008010, - 0x000ef601, - 0x080f04bd, -/* 0x06e4: ctx_redswitch_delay */ - 0xf401f2b6, - 0xe5f1fd1b, - 0xe5f10400, - 0x00800100, - 0x0ef60185, - 0xf804bd00, -/* 0x06fd: ctx_86c */ - 0x23008000, + 0x8effb200, + 0x7e41a88c, + 0xf800008f, +/* 0x071d: ctx_mem */ + 0x84008000, 0x000ff602, - 0xffb204bd, - 0x408a148e, - 0x00008f7e, - 0x8c8effb2, - 0x8f7e41a8, - 0x00f80000, -/* 0x071c: ctx_mem */ - 0x02840080, - 0xbd000ff6, -/* 0x0725: ctx_mem_wait */ - 0x84008f04, - 0x00ffcf02, - 0xf405fffd, - 0x00f8f61b, -/* 0x0734: ctx_load */ - 0x99f094bd, - 0x37008005, - 0x0009f602, - 0x0c0a04bd, - 0x0000b87e, - 0x0080f4bd, - 0x0ff60289, - 0x8004bd00, - 0xf602c100, - 0x04bd0002, - 0x02830080, +/* 0x0726: ctx_mem_wait */ + 0x008f04bd, + 0xffcf0284, + 0x05fffd00, + 0xf8f61bf4, +/* 0x0735: ctx_load */ + 0xf094bd00, + 0x00800599, + 0x09f60237, + 0x0a04bd00, + 0x00b87e0c, + 0x80f4bd00, + 0xf6028900, + 0x04bd000f, + 0x02c10080, 0xbd0002f6, - 0x7e070f04, - 0x8000071c, - 0xf602c000, - 0x04bd0002, - 0xf0000bfe, - 0x24b61f2a, - 0x0220b604, - 0x99f094bd, - 0x37008008, - 0x0009f602, - 0x008004bd, - 0x02f60281, - 0xd204bd00, - 0x80000000, - 0x800225f0, - 0xf6028800, - 0x04bd0002, - 0x00421001, - 0x0223f002, - 0xf80512fa, - 0xf094bd03, + 0x83008004, + 0x0002f602, + 0x070f04bd, + 0x00071d7e, + 0x02c00080, + 0xbd0002f6, + 0x000bfe04, + 0xb61f2af0, + 0x20b60424, + 0xf094bd02, 0x00800899, - 0x09f60217, - 0x9804bd00, - 0x14b68101, - 0x80029818, - 0xfd0825b6, - 0x01b50512, - 0xf094bd16, - 0x00800999, 0x09f60237, 0x8004bd00, 0xf6028100, - 0x04bd0001, - 0x00800102, - 0x02f60288, - 0x4104bd00, - 0x13f00100, - 0x0501fa06, + 0x04bd0002, + 0x000000d2, + 0x0225f080, + 0x02880080, + 0xbd0002f6, + 0x42100104, + 0x23f00200, + 0x0512fa02, 0x94bd03f8, - 0x800999f0, + 0x800899f0, 0xf6021700, 0x04bd0009, - 0x99f094bd, - 0x17008005, - 0x0009f602, - 0x00f804bd, -/* 0x0820: ctx_chan */ - 0x0007347e, - 0xb87e0c0a, - 0x050f0000, - 0x00071c7e, -/* 0x0832: ctx_mmio_exec */ - 0x039800f8, - 0x81008041, - 0x0003f602, - 0x34bd04bd, -/* 0x0840: ctx_mmio_loop */ - 0xf4ff34c4, - 0x00450e1b, - 0x0653f002, - 0xf80535fa, -/* 0x0851: ctx_mmio_pull */ - 0x804e9803, - 0x7e814f98, - 0xb600008f, - 0x12b60830, - 0xdf1bf401, -/* 0x0864: ctx_mmio_done */ - 0x80160398, - 0xf6028100, - 0x04bd0003, - 0x414000b5, - 0x13f00100, - 0x0601fa06, - 0x00f803f8, -/* 0x0880: ctx_xfer */ - 0x0080040e, - 0x0ef60302, -/* 0x088b: ctx_xfer_idle */ - 0x8e04bd00, - 0xcf030000, - 0xe4f100ee, - 0x1bf42000, - 0x0611f4f5, -/* 0x089f: ctx_xfer_pre */ - 0x0f0c02f4, - 0x06fd7e10, - 0x1b11f400, -/* 0x08a8: ctx_xfer_pre_load */ - 0xac7e020f, - 0xbb7e0006, - 0xcd7e0006, - 0xf4bd0006, - 0x0006ac7e, - 0x0007347e, -/* 0x08c0: ctx_xfer_exec */ - 0xbd160198, - 0x05008024, - 0x0002f601, - 0x1fb204bd, - 0x41a5008e, - 0x00008f7e, - 0xf001fcf0, - 0x24b6022c, - 0x05f2fd01, - 0x048effb2, - 0x8f7e41a5, - 0x167e0000, - 0x24bd0002, - 0x0247fc80, - 0xbd0002f6, - 0x012cf004, - 0x800320b6, - 0xf6024afc, + 0xb6810198, + 0x02981814, + 0x0825b680, + 0xb50512fd, + 0x94bd1601, + 0x800999f0, + 0xf6023700, + 0x04bd0009, + 0x02810080, + 0xbd0001f6, + 0x80010204, + 0xf6028800, 0x04bd0002, - 0xf001acf0, - 0x000b06a5, - 0x98000c98, - 0x000e010d, - 0x00013d7e, - 0xec7e080a, - 0x0a7e0000, - 0x01f40002, - 0x7e0c0a12, + 0xf0010041, + 0x01fa0613, + 0xbd03f805, + 0x0999f094, + 0x02170080, + 0xbd0009f6, + 0xf094bd04, + 0x00800599, + 0x09f60217, + 0xf804bd00, +/* 0x0821: ctx_chan */ + 0x07357e00, + 0x7e0c0a00, 0x0f0000b8, - 0x071c7e05, - 0x2d02f400, -/* 0x093c: ctx_xfer_post */ - 0xac7e020f, - 0xf4bd0006, - 0x0006fd7e, - 0x0002277e, - 0x0006bb7e, - 0xac7ef4bd, + 0x071d7e05, +/* 0x0833: ctx_mmio_exec */ + 0x9800f800, + 0x00804103, + 0x03f60281, + 0xbd04bd00, +/* 0x0841: ctx_mmio_loop */ + 0xff34c434, + 0x450e1bf4, + 0x53f00200, + 0x0535fa06, +/* 0x0852: ctx_mmio_pull */ + 0x4e9803f8, + 0x814f9880, + 0x00008f7e, + 0xb60830b6, + 0x1bf40112, +/* 0x0865: ctx_mmio_done */ + 0x160398df, + 0x02810080, + 0xbd0003f6, + 0x4000b504, + 0xf0010041, + 0x01fa0613, + 0xf803f806, +/* 0x0881: ctx_xfer */ + 0x80040e00, + 0xf6030200, + 0x04bd000e, +/* 0x088c: ctx_xfer_idle */ + 0x0300008e, + 0xf100eecf, + 0xf42000e4, + 0x11f4f51b, + 0x0c02f406, +/* 0x08a0: ctx_xfer_pre */ + 0xfe7e100f, 0x11f40006, - 0x40019810, - 0xf40511fd, - 0x327e070b, -/* 0x0966: ctx_xfer_no_post_mmio */ -/* 0x0966: ctx_xfer_done */ - 0x00f80008, +/* 0x08a9: ctx_xfer_pre_load */ + 0x7e020f1b, + 0x7e0006ad, + 0x7e0006bc, + 0xbd0006ce, + 0x06ad7ef4, + 0x07357e00, +/* 0x08c1: ctx_xfer_exec */ + 0x16019800, + 0x008024bd, + 0x02f60105, + 0xb204bd00, + 0xa5008e1f, + 0x008f7e41, + 0x01fcf000, + 0xb6022cf0, + 0xf2fd0124, + 0x8effb205, + 0x7e41a504, + 0x7e00008f, + 0xbd000216, + 0x47fc8024, + 0x0002f602, + 0x2cf004bd, + 0x0320b601, + 0x024afc80, + 0xbd0002f6, + 0x01acf004, + 0x0b06a5f0, + 0x000c9800, + 0x0e010d98, + 0x013d7e00, + 0x7e080a00, + 0x7e0000ec, + 0xf400020a, + 0x0c0a1201, + 0x0000b87e, + 0x1d7e050f, + 0x02f40007, +/* 0x093d: ctx_xfer_post */ + 0x7e020f2d, + 0xbd0006ad, + 0x06fe7ef4, + 0x02277e00, + 0x06bc7e00, + 0x7ef4bd00, + 0xf40006ad, + 0x01981011, + 0x0511fd40, + 0x7e070bf4, +/* 0x0967: ctx_xfer_no_post_mmio */ +/* 0x0967: ctx_xfer_done */ + 0xf8000833, 0x00000000, 0x00000000, 0x00000000, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h index 649a442b439037bab43c0923603470c864ae475d..449dae75320314e7391cc8c305bd2bd88a49b346 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h @@ -441,7 +441,7 @@ static uint32_t gm107_grhub_code[] = { 0x020014fe, 0x12004002, 0xbd0002f6, - 0x05c94104, + 0x05ca4104, 0xbd0010fe, 0x07004024, 0xbd0002f6, @@ -460,423 +460,423 @@ static uint32_t gm107_grhub_code[] = { 0x01039204, 0x03090080, 0xbd0003f6, - 0x87044204, - 0xf6040040, - 0x04bd0002, - 0x00400402, - 0x0002f603, - 0x31f404bd, - 0x96048e10, - 0x00657e40, - 0xc7feb200, - 0x01b590f1, - 0x1ff4f003, - 0x01020fb5, - 0x041fbb01, - 0x800112b6, - 0xf6010300, - 0x04bd0001, - 0x01040080, + 0x87048204, + 0x04004000, + 0xbd0002f6, + 0x40040204, + 0x02f60300, + 0xf404bd00, + 0x048e1031, + 0x657e4096, + 0xfeb20000, + 0xb590f1c7, + 0xf4f00301, + 0x020fb51f, + 0x1fbb0101, + 0x0112b604, + 0x01030080, 0xbd0001f6, - 0x01004104, - 0xac7e020f, - 0xbb7e0006, - 0x100f0006, - 0x0006fd7e, - 0x98000e98, - 0x207e010f, - 0x14950001, - 0xc0008008, - 0x0004f601, - 0x008004bd, - 0x04f601c1, - 0xb704bd00, - 0xbb130030, - 0xf5b6001f, - 0xd3008002, - 0x000ff601, - 0x15b604bd, - 0x0110b608, - 0xb20814b6, - 0x02687e1f, - 0x001fbb00, - 0x84020398, -/* 0x041f: init_gpc */ - 0xb8502000, - 0x0008044e, - 0x8f7e1fb2, + 0x04008004, + 0x0001f601, + 0x004104bd, + 0x7e020f01, + 0x7e0006ad, + 0x0f0006bc, + 0x06fe7e10, + 0x000e9800, + 0x7e010f98, + 0x95000120, + 0x00800814, + 0x04f601c0, + 0x8004bd00, + 0xf601c100, + 0x04bd0004, + 0x130030b7, + 0xb6001fbb, + 0x008002f5, + 0x0ff601d3, + 0xb604bd00, + 0x10b60815, + 0x0814b601, + 0x687e1fb2, + 0x1fbb0002, + 0x02039800, + 0x50200084, +/* 0x0420: init_gpc */ + 0x08044eb8, + 0x7e1fb200, + 0xb800008f, + 0x00010c4e, + 0x8f7ef4bd, 0x4eb80000, - 0xbd00010c, - 0x008f7ef4, - 0x044eb800, - 0x8f7e0001, + 0x7e000104, + 0xb800008f, + 0x0001004e, + 0x8f7e020f, 0x4eb80000, - 0x0f000100, - 0x008f7e02, - 0x004eb800, -/* 0x044e: init_gpc_wait */ +/* 0x044f: init_gpc_wait */ + 0x7e000800, + 0xc8000065, + 0x0bf41fff, + 0x044eb8f9, 0x657e0008, - 0xffc80000, - 0xf90bf41f, - 0x08044eb8, - 0x00657e00, - 0x001fbb00, - 0x800040b7, - 0xf40132b6, - 0x000fb41b, - 0x0006fd7e, - 0xac7e000f, - 0x00800006, - 0x01f60201, - 0xbd04bd00, - 0x1f19f014, - 0x02300080, - 0xbd0001f6, -/* 0x0491: wait */ - 0x0028f404, -/* 0x0497: main */ - 0x0d0031f4, - 0x00377e10, - 0xf401f400, - 0x4001e4b1, - 0x00c71bf5, - 0x99f094bd, - 0x37008004, - 0x0009f602, - 0x008104bd, - 0x11cf02c0, - 0xc1008200, - 0x0022cf02, - 0xf41f13c8, - 0x23c8770b, - 0x550bf41f, - 0x12b220f9, - 0x99f094bd, - 0x37008007, - 0x0009f602, - 0x32f404bd, - 0x0231f401, - 0x0008807e, - 0x99f094bd, - 0x17008007, - 0x0009f602, - 0x20fc04bd, - 0x99f094bd, - 0x37008006, - 0x0009f602, - 0x31f404bd, - 0x08807e01, + 0x1fbb0000, + 0x0040b700, + 0x0132b680, + 0x0fb41bf4, + 0x06fe7e00, + 0x7e000f00, + 0x800006ad, + 0xf6020100, + 0x04bd0001, + 0x19f014bd, + 0x3000801f, + 0x0001f602, +/* 0x0492: wait */ + 0x28f404bd, + 0x0031f400, +/* 0x0498: main */ + 0x377e100d, + 0x01f40000, + 0x01e4b1f4, + 0xc71bf540, 0xf094bd00, - 0x00800699, + 0x00800499, + 0x09f60237, + 0x8104bd00, + 0xcf02c000, + 0x00820011, + 0x22cf02c1, + 0x1f13c800, + 0xc8770bf4, + 0x0bf41f23, + 0xb220f955, + 0xf094bd12, + 0x00800799, + 0x09f60237, + 0xf404bd00, + 0x31f40132, + 0x08817e02, + 0xf094bd00, + 0x00800799, 0x09f60217, + 0xfc04bd00, + 0xf094bd20, + 0x00800699, + 0x09f60237, 0xf404bd00, -/* 0x0522: chsw_prev_no_next */ - 0x20f92f0e, - 0x32f412b2, - 0x0232f401, - 0x0008807e, - 0x008020fc, - 0x02f602c0, + 0x817e0131, + 0x94bd0008, + 0x800699f0, + 0xf6021700, + 0x04bd0009, +/* 0x0523: chsw_prev_no_next */ + 0xf92f0ef4, + 0xf412b220, + 0x32f40132, + 0x08817e02, + 0x8020fc00, + 0xf602c000, + 0x04bd0002, +/* 0x053f: chsw_no_prev */ + 0xc8130ef4, + 0x0bf41f23, + 0x0131f40d, + 0x7e0232f4, +/* 0x054f: chsw_done */ + 0x02000881, + 0xc3008001, + 0x0002f602, + 0x94bd04bd, + 0x800499f0, + 0xf6021700, + 0x04bd0009, + 0xff300ef5, +/* 0x056c: main_not_ctx_switch */ + 0xf401e4b0, + 0xf2b20c1b, + 0x0008217e, +/* 0x057b: main_not_ctx_chan */ + 0xb0400ef4, + 0x1bf402e4, + 0xf094bd2c, + 0x00800799, + 0x09f60237, 0xf404bd00, -/* 0x053e: chsw_no_prev */ - 0x23c8130e, - 0x0d0bf41f, - 0xf40131f4, - 0x807e0232, -/* 0x054e: chsw_done */ - 0x01020008, - 0x02c30080, - 0xbd0002f6, - 0xf094bd04, - 0x00800499, + 0x32f40132, + 0x08817e02, + 0xf094bd00, + 0x00800799, 0x09f60217, - 0xf504bd00, -/* 0x056b: main_not_ctx_switch */ - 0xb0ff300e, - 0x1bf401e4, - 0x7ef2b20c, - 0xf4000820, -/* 0x057a: main_not_ctx_chan */ - 0xe4b0400e, - 0x2c1bf402, - 0x99f094bd, - 0x37008007, - 0x0009f602, - 0x32f404bd, - 0x0232f401, - 0x0008807e, - 0x99f094bd, - 0x17008007, - 0x0009f602, - 0x0ef404bd, -/* 0x05a9: main_not_ctx_save */ - 0x10ef9411, - 0x7e01f5f0, - 0xf50002f8, -/* 0x05b7: main_done */ - 0xbdfee40e, - 0x1f29f024, - 0x02300080, - 0xbd0002f6, - 0xd20ef504, -/* 0x05c9: ih */ - 0xf900f9fe, - 0x0188fe80, - 0x90f980f9, - 0xb0f9a0f9, - 0xe0f9d0f9, - 0x04bdf0f9, - 0xcf02004a, - 0xabc400aa, - 0x230bf404, - 0x004e100d, - 0x00eecf1a, - 0xcf19004f, - 0x047e00ff, - 0xb0b70000, - 0x010e0400, - 0xf61d0040, - 0x04bd000e, -/* 0x060c: ih_no_fifo */ - 0x0100abe4, - 0x0d0c0bf4, - 0x40014e10, - 0x0000047e, -/* 0x061c: ih_no_ctxsw */ - 0x0400abe4, - 0x8e560bf4, - 0x7e400708, + 0xf404bd00, +/* 0x05aa: main_not_ctx_save */ + 0xef94110e, + 0x01f5f010, + 0x0002f87e, + 0xfee40ef5, +/* 0x05b8: main_done */ + 0x29f024bd, + 0x3000801f, + 0x0002f602, + 0x0ef504bd, +/* 0x05ca: ih */ + 0x00f9fed2, + 0x88fe80f9, + 0xf980f901, + 0xf9a0f990, + 0xf9d0f9b0, + 0xbdf0f9e0, + 0x02004a04, + 0xc400aacf, + 0x0bf404ab, + 0x4e100d23, + 0xeecf1a00, + 0x19004f00, + 0x7e00ffcf, + 0xb7000004, + 0x0e0400b0, + 0x1d004001, + 0xbd000ef6, +/* 0x060d: ih_no_fifo */ + 0x00abe404, + 0x0c0bf401, + 0x014e100d, + 0x00047e40, +/* 0x061d: ih_no_ctxsw */ + 0x00abe400, + 0x560bf404, + 0x4007088e, + 0x0000657e, + 0x0080ffb2, + 0x0ff60204, + 0x8e04bd00, + 0x7e400704, 0xb2000065, - 0x040080ff, + 0x030080ff, 0x000ff602, - 0x048e04bd, - 0x657e4007, - 0xffb20000, - 0x02030080, - 0xbd000ff6, - 0x50fec704, - 0x8f02ee94, - 0xbb400700, - 0x657e00ef, - 0x00800000, - 0x0ff60202, + 0xfec704bd, + 0x02ee9450, + 0x4007008f, + 0x7e00efbb, + 0x80000065, + 0xf6020200, + 0x04bd000f, + 0xf87e030f, + 0x004b0002, + 0x8ebfb201, + 0x7e400144, +/* 0x0677: ih_no_fwmthd */ + 0x4b00008f, + 0xb0bd0504, + 0xf4b4abff, + 0x00800c0b, + 0x0bf60307, +/* 0x068b: ih_no_other */ + 0x4004bd00, + 0x0af60100, + 0xfc04bd00, + 0xfce0fcf0, + 0xfcb0fcd0, + 0xfc90fca0, + 0x0088fe80, + 0x00fc80fc, + 0xf80032f4, +/* 0x06ad: ctx_4170s */ + 0x10f5f001, + 0x708effb2, + 0x8f7e4041, + 0x00f80000, +/* 0x06bc: ctx_4170w */ + 0x4041708e, + 0x0000657e, + 0xf4f0ffb2, + 0xf31bf410, +/* 0x06ce: ctx_redswitch */ + 0x004e00f8, + 0x40e5f002, + 0xf020e5f0, + 0x008010e5, + 0x0ef60185, 0x0f04bd00, - 0x02f87e03, - 0x01004b00, - 0x448ebfb2, - 0x8f7e4001, -/* 0x0676: ih_no_fwmthd */ - 0x044b0000, - 0xffb0bd05, - 0x0bf4b4ab, - 0x0700800c, - 0x000bf603, -/* 0x068a: ih_no_other */ - 0x004004bd, - 0x000af601, - 0xf0fc04bd, - 0xd0fce0fc, - 0xa0fcb0fc, - 0x80fc90fc, - 0xfc0088fe, - 0xf400fc80, - 0x01f80032, -/* 0x06ac: ctx_4170s */ - 0xb210f5f0, - 0x41708eff, +/* 0x06e5: ctx_redswitch_delay */ + 0x01f2b608, + 0xf1fd1bf4, + 0xf10400e5, + 0x800100e5, + 0xf6018500, + 0x04bd000e, +/* 0x06fe: ctx_86c */ + 0x008000f8, + 0x0ff60223, + 0xb204bd00, + 0x8a148eff, 0x008f7e40, -/* 0x06bb: ctx_4170w */ - 0x8e00f800, - 0x7e404170, - 0xb2000065, - 0x10f4f0ff, - 0xf8f31bf4, -/* 0x06cd: ctx_redswitch */ - 0x02004e00, - 0xf040e5f0, - 0xe5f020e5, - 0x85008010, - 0x000ef601, - 0x080f04bd, -/* 0x06e4: ctx_redswitch_delay */ - 0xf401f2b6, - 0xe5f1fd1b, - 0xe5f10400, - 0x00800100, - 0x0ef60185, - 0xf804bd00, -/* 0x06fd: ctx_86c */ - 0x23008000, + 0x8effb200, + 0x7e41a88c, + 0xf800008f, +/* 0x071d: ctx_mem */ + 0x84008000, 0x000ff602, - 0xffb204bd, - 0x408a148e, - 0x00008f7e, - 0x8c8effb2, - 0x8f7e41a8, - 0x00f80000, -/* 0x071c: ctx_mem */ - 0x02840080, - 0xbd000ff6, -/* 0x0725: ctx_mem_wait */ - 0x84008f04, - 0x00ffcf02, - 0xf405fffd, - 0x00f8f61b, -/* 0x0734: ctx_load */ - 0x99f094bd, - 0x37008005, - 0x0009f602, - 0x0c0a04bd, - 0x0000b87e, - 0x0080f4bd, - 0x0ff60289, - 0x8004bd00, - 0xf602c100, - 0x04bd0002, - 0x02830080, +/* 0x0726: ctx_mem_wait */ + 0x008f04bd, + 0xffcf0284, + 0x05fffd00, + 0xf8f61bf4, +/* 0x0735: ctx_load */ + 0xf094bd00, + 0x00800599, + 0x09f60237, + 0x0a04bd00, + 0x00b87e0c, + 0x80f4bd00, + 0xf6028900, + 0x04bd000f, + 0x02c10080, 0xbd0002f6, - 0x7e070f04, - 0x8000071c, - 0xf602c000, - 0x04bd0002, - 0xf0000bfe, - 0x24b61f2a, - 0x0220b604, - 0x99f094bd, - 0x37008008, - 0x0009f602, - 0x008004bd, - 0x02f60281, - 0xd204bd00, - 0x80000000, - 0x800225f0, - 0xf6028800, - 0x04bd0002, - 0x00421001, - 0x0223f002, - 0xf80512fa, - 0xf094bd03, + 0x83008004, + 0x0002f602, + 0x070f04bd, + 0x00071d7e, + 0x02c00080, + 0xbd0002f6, + 0x000bfe04, + 0xb61f2af0, + 0x20b60424, + 0xf094bd02, 0x00800899, - 0x09f60217, - 0x9804bd00, - 0x14b68101, - 0x80029818, - 0xfd0825b6, - 0x01b50512, - 0xf094bd16, - 0x00800999, 0x09f60237, 0x8004bd00, 0xf6028100, - 0x04bd0001, - 0x00800102, - 0x02f60288, - 0x4104bd00, - 0x13f00100, - 0x0501fa06, + 0x04bd0002, + 0x000000d2, + 0x0225f080, + 0x02880080, + 0xbd0002f6, + 0x42100104, + 0x23f00200, + 0x0512fa02, 0x94bd03f8, - 0x800999f0, + 0x800899f0, 0xf6021700, 0x04bd0009, - 0x99f094bd, - 0x17008005, - 0x0009f602, - 0x00f804bd, -/* 0x0820: ctx_chan */ - 0x0007347e, - 0xb87e0c0a, - 0x050f0000, - 0x00071c7e, -/* 0x0832: ctx_mmio_exec */ - 0x039800f8, - 0x81008041, - 0x0003f602, - 0x34bd04bd, -/* 0x0840: ctx_mmio_loop */ - 0xf4ff34c4, - 0x00450e1b, - 0x0653f002, - 0xf80535fa, -/* 0x0851: ctx_mmio_pull */ - 0x804e9803, - 0x7e814f98, - 0xb600008f, - 0x12b60830, - 0xdf1bf401, -/* 0x0864: ctx_mmio_done */ - 0x80160398, - 0xf6028100, - 0x04bd0003, - 0x414000b5, - 0x13f00100, - 0x0601fa06, - 0x00f803f8, -/* 0x0880: ctx_xfer */ - 0x0080040e, - 0x0ef60302, -/* 0x088b: ctx_xfer_idle */ - 0x8e04bd00, - 0xcf030000, - 0xe4f100ee, - 0x1bf42000, - 0x0611f4f5, -/* 0x089f: ctx_xfer_pre */ - 0x0f0c02f4, - 0x06fd7e10, - 0x1b11f400, -/* 0x08a8: ctx_xfer_pre_load */ - 0xac7e020f, - 0xbb7e0006, - 0xcd7e0006, - 0xf4bd0006, - 0x0006ac7e, - 0x0007347e, -/* 0x08c0: ctx_xfer_exec */ - 0xbd160198, - 0x05008024, - 0x0002f601, - 0x1fb204bd, - 0x41a5008e, - 0x00008f7e, - 0xf001fcf0, - 0x24b6022c, - 0x05f2fd01, - 0x048effb2, - 0x8f7e41a5, - 0x167e0000, - 0x24bd0002, - 0x0247fc80, - 0xbd0002f6, - 0x012cf004, - 0x800320b6, - 0xf6024afc, + 0xb6810198, + 0x02981814, + 0x0825b680, + 0xb50512fd, + 0x94bd1601, + 0x800999f0, + 0xf6023700, + 0x04bd0009, + 0x02810080, + 0xbd0001f6, + 0x80010204, + 0xf6028800, 0x04bd0002, - 0xf001acf0, - 0x000b06a5, - 0x98000c98, - 0x000e010d, - 0x00013d7e, - 0xec7e080a, - 0x0a7e0000, - 0x01f40002, - 0x7e0c0a12, + 0xf0010041, + 0x01fa0613, + 0xbd03f805, + 0x0999f094, + 0x02170080, + 0xbd0009f6, + 0xf094bd04, + 0x00800599, + 0x09f60217, + 0xf804bd00, +/* 0x0821: ctx_chan */ + 0x07357e00, + 0x7e0c0a00, 0x0f0000b8, - 0x071c7e05, - 0x2d02f400, -/* 0x093c: ctx_xfer_post */ - 0xac7e020f, - 0xf4bd0006, - 0x0006fd7e, - 0x0002277e, - 0x0006bb7e, - 0xac7ef4bd, + 0x071d7e05, +/* 0x0833: ctx_mmio_exec */ + 0x9800f800, + 0x00804103, + 0x03f60281, + 0xbd04bd00, +/* 0x0841: ctx_mmio_loop */ + 0xff34c434, + 0x450e1bf4, + 0x53f00200, + 0x0535fa06, +/* 0x0852: ctx_mmio_pull */ + 0x4e9803f8, + 0x814f9880, + 0x00008f7e, + 0xb60830b6, + 0x1bf40112, +/* 0x0865: ctx_mmio_done */ + 0x160398df, + 0x02810080, + 0xbd0003f6, + 0x4000b504, + 0xf0010041, + 0x01fa0613, + 0xf803f806, +/* 0x0881: ctx_xfer */ + 0x80040e00, + 0xf6030200, + 0x04bd000e, +/* 0x088c: ctx_xfer_idle */ + 0x0300008e, + 0xf100eecf, + 0xf42000e4, + 0x11f4f51b, + 0x0c02f406, +/* 0x08a0: ctx_xfer_pre */ + 0xfe7e100f, 0x11f40006, - 0x40019810, - 0xf40511fd, - 0x327e070b, -/* 0x0966: ctx_xfer_no_post_mmio */ -/* 0x0966: ctx_xfer_done */ - 0x00f80008, +/* 0x08a9: ctx_xfer_pre_load */ + 0x7e020f1b, + 0x7e0006ad, + 0x7e0006bc, + 0xbd0006ce, + 0x06ad7ef4, + 0x07357e00, +/* 0x08c1: ctx_xfer_exec */ + 0x16019800, + 0x008024bd, + 0x02f60105, + 0xb204bd00, + 0xa5008e1f, + 0x008f7e41, + 0x01fcf000, + 0xb6022cf0, + 0xf2fd0124, + 0x8effb205, + 0x7e41a504, + 0x7e00008f, + 0xbd000216, + 0x47fc8024, + 0x0002f602, + 0x2cf004bd, + 0x0320b601, + 0x024afc80, + 0xbd0002f6, + 0x01acf004, + 0x0b06a5f0, + 0x000c9800, + 0x0e010d98, + 0x013d7e00, + 0x7e080a00, + 0x7e0000ec, + 0xf400020a, + 0x0c0a1201, + 0x0000b87e, + 0x1d7e050f, + 0x02f40007, +/* 0x093d: ctx_xfer_post */ + 0x7e020f2d, + 0xbd0006ad, + 0x06fe7ef4, + 0x02277e00, + 0x06bc7e00, + 0x7ef4bd00, + 0xf40006ad, + 0x01981011, + 0x0511fd40, + 0x7e070bf4, +/* 0x0967: ctx_xfer_no_post_mmio */ +/* 0x0967: ctx_xfer_done */ + 0xf8000833, 0x00000000, 0x00000000, 0x00000000, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c index c578deb5867a8d0cd2495a5f9bf55d6330bfe86a..dd8f85b8b3a7e49089d9368e80d73547bded8a63 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c @@ -26,9 +26,9 @@ #include "fuc/os.h" #include -#include #include -#include +#include +#include #include #include #include @@ -1636,7 +1636,7 @@ gf100_gr_intr(struct nvkm_gr *base) static void gf100_gr_init_fw(struct nvkm_falcon *falcon, - struct gf100_gr_fuc *code, struct gf100_gr_fuc *data) + struct nvkm_blob *code, struct nvkm_blob *data) { nvkm_falcon_load_dmem(falcon, data->data, 0x0, data->size, 0); nvkm_falcon_load_imem(falcon, code->data, 0x0, code->size, 0, 0, false); @@ -1690,26 +1690,30 @@ gf100_gr_init_ctxctl_ext(struct gf100_gr *gr) { struct nvkm_subdev *subdev = &gr->base.engine.subdev; struct nvkm_device *device = subdev->device; - struct nvkm_secboot *sb = device->secboot; - u32 secboot_mask = 0; + u32 lsf_mask = 0; int ret; /* load fuc microcode */ nvkm_mc_unk260(device, 0); /* securely-managed falcons must be reset using secure boot */ - if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS)) - secboot_mask |= BIT(NVKM_SECBOOT_FALCON_FECS); - else - gf100_gr_init_fw(gr->fecs.falcon, &gr->fuc409c, &gr->fuc409d); - if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS)) - secboot_mask |= BIT(NVKM_SECBOOT_FALCON_GPCCS); - else - gf100_gr_init_fw(gr->gpccs.falcon, &gr->fuc41ac, &gr->fuc41ad); + if (!nvkm_acr_managed_falcon(device, NVKM_ACR_LSF_FECS)) { + gf100_gr_init_fw(&gr->fecs.falcon, &gr->fecs.inst, + &gr->fecs.data); + } else { + lsf_mask |= BIT(NVKM_ACR_LSF_FECS); + } - if (secboot_mask != 0) { - int ret = nvkm_secboot_reset(sb, secboot_mask); + if (!nvkm_acr_managed_falcon(device, NVKM_ACR_LSF_GPCCS)) { + gf100_gr_init_fw(&gr->gpccs.falcon, &gr->gpccs.inst, + &gr->gpccs.data); + } else { + lsf_mask |= BIT(NVKM_ACR_LSF_GPCCS); + } + + if (lsf_mask) { + ret = nvkm_acr_bootstrap_falcons(device, lsf_mask); if (ret) return ret; } @@ -1721,8 +1725,8 @@ gf100_gr_init_ctxctl_ext(struct gf100_gr *gr) nvkm_wr32(device, 0x41a10c, 0x00000000); nvkm_wr32(device, 0x40910c, 0x00000000); - nvkm_falcon_start(gr->gpccs.falcon); - nvkm_falcon_start(gr->fecs.falcon); + nvkm_falcon_start(&gr->gpccs.falcon); + nvkm_falcon_start(&gr->fecs.falcon); if (nvkm_msec(device, 2000, if (nvkm_rd32(device, 0x409800) & 0x00000001) @@ -1784,18 +1788,18 @@ gf100_gr_init_ctxctl_int(struct gf100_gr *gr) /* load HUB microcode */ nvkm_mc_unk260(device, 0); - nvkm_falcon_load_dmem(gr->fecs.falcon, + nvkm_falcon_load_dmem(&gr->fecs.falcon, gr->func->fecs.ucode->data.data, 0x0, gr->func->fecs.ucode->data.size, 0); - nvkm_falcon_load_imem(gr->fecs.falcon, + nvkm_falcon_load_imem(&gr->fecs.falcon, gr->func->fecs.ucode->code.data, 0x0, gr->func->fecs.ucode->code.size, 0, 0, false); /* load GPC microcode */ - nvkm_falcon_load_dmem(gr->gpccs.falcon, + nvkm_falcon_load_dmem(&gr->gpccs.falcon, gr->func->gpccs.ucode->data.data, 0x0, gr->func->gpccs.ucode->data.size, 0); - nvkm_falcon_load_imem(gr->gpccs.falcon, + nvkm_falcon_load_imem(&gr->gpccs.falcon, gr->func->gpccs.ucode->code.data, 0x0, gr->func->gpccs.ucode->code.size, 0, 0, false); nvkm_mc_unk260(device, 1); @@ -1941,17 +1945,6 @@ gf100_gr_oneinit(struct nvkm_gr *base) struct nvkm_subdev *subdev = &gr->base.engine.subdev; struct nvkm_device *device = subdev->device; int i, j; - int ret; - - ret = nvkm_falcon_v1_new(subdev, "FECS", 0x409000, &gr->fecs.falcon); - if (ret) - return ret; - - mutex_init(&gr->fecs.mutex); - - ret = nvkm_falcon_v1_new(subdev, "GPCCS", 0x41a000, &gr->gpccs.falcon); - if (ret) - return ret; nvkm_pmu_pgob(device->pmu, false); @@ -1992,11 +1985,11 @@ gf100_gr_init_(struct nvkm_gr *base) nvkm_pmu_pgob(gr->base.engine.subdev.device->pmu, false); - ret = nvkm_falcon_get(gr->fecs.falcon, subdev); + ret = nvkm_falcon_get(&gr->fecs.falcon, subdev); if (ret) return ret; - ret = nvkm_falcon_get(gr->gpccs.falcon, subdev); + ret = nvkm_falcon_get(&gr->gpccs.falcon, subdev); if (ret) return ret; @@ -2004,49 +1997,34 @@ gf100_gr_init_(struct nvkm_gr *base) } static int -gf100_gr_fini_(struct nvkm_gr *base, bool suspend) +gf100_gr_fini(struct nvkm_gr *base, bool suspend) { struct gf100_gr *gr = gf100_gr(base); struct nvkm_subdev *subdev = &gr->base.engine.subdev; - nvkm_falcon_put(gr->gpccs.falcon, subdev); - nvkm_falcon_put(gr->fecs.falcon, subdev); + nvkm_falcon_put(&gr->gpccs.falcon, subdev); + nvkm_falcon_put(&gr->fecs.falcon, subdev); return 0; } -void -gf100_gr_dtor_fw(struct gf100_gr_fuc *fuc) -{ - kfree(fuc->data); - fuc->data = NULL; -} - -static void -gf100_gr_dtor_init(struct gf100_gr_pack *pack) -{ - vfree(pack); -} - void * gf100_gr_dtor(struct nvkm_gr *base) { struct gf100_gr *gr = gf100_gr(base); - if (gr->func->dtor) - gr->func->dtor(gr); kfree(gr->data); - nvkm_falcon_del(&gr->gpccs.falcon); - nvkm_falcon_del(&gr->fecs.falcon); + nvkm_falcon_dtor(&gr->gpccs.falcon); + nvkm_falcon_dtor(&gr->fecs.falcon); - gf100_gr_dtor_fw(&gr->fuc409c); - gf100_gr_dtor_fw(&gr->fuc409d); - gf100_gr_dtor_fw(&gr->fuc41ac); - gf100_gr_dtor_fw(&gr->fuc41ad); + nvkm_blob_dtor(&gr->fecs.inst); + nvkm_blob_dtor(&gr->fecs.data); + nvkm_blob_dtor(&gr->gpccs.inst); + nvkm_blob_dtor(&gr->gpccs.data); - gf100_gr_dtor_init(gr->fuc_bundle); - gf100_gr_dtor_init(gr->fuc_method); - gf100_gr_dtor_init(gr->fuc_sw_ctx); - gf100_gr_dtor_init(gr->fuc_sw_nonctx); + vfree(gr->bundle); + vfree(gr->method); + vfree(gr->sw_ctx); + vfree(gr->sw_nonctx); return gr; } @@ -2056,7 +2034,7 @@ gf100_gr_ = { .dtor = gf100_gr_dtor, .oneinit = gf100_gr_oneinit, .init = gf100_gr_init_, - .fini = gf100_gr_fini_, + .fini = gf100_gr_fini, .intr = gf100_gr_intr, .units = gf100_gr_units, .chan_new = gf100_gr_chan_new, @@ -2067,87 +2045,24 @@ gf100_gr_ = { .ctxsw.inst = gf100_gr_ctxsw_inst, }; -int -gf100_gr_ctor_fw_legacy(struct gf100_gr *gr, const char *fwname, - struct gf100_gr_fuc *fuc, int ret) -{ - struct nvkm_subdev *subdev = &gr->base.engine.subdev; - struct nvkm_device *device = subdev->device; - const struct firmware *fw; - char f[32]; - - /* see if this firmware has a legacy path */ - if (!strcmp(fwname, "fecs_inst")) - fwname = "fuc409c"; - else if (!strcmp(fwname, "fecs_data")) - fwname = "fuc409d"; - else if (!strcmp(fwname, "gpccs_inst")) - fwname = "fuc41ac"; - else if (!strcmp(fwname, "gpccs_data")) - fwname = "fuc41ad"; - else { - /* nope, let's just return the error we got */ - nvkm_error(subdev, "failed to load %s\n", fwname); - return ret; - } - - /* yes, try to load from the legacy path */ - nvkm_debug(subdev, "%s: falling back to legacy path\n", fwname); - - snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, fwname); - ret = request_firmware(&fw, f, device->dev); - if (ret) { - snprintf(f, sizeof(f), "nouveau/%s", fwname); - ret = request_firmware(&fw, f, device->dev); - if (ret) { - nvkm_error(subdev, "failed to load %s\n", fwname); - return ret; - } - } - - fuc->size = fw->size; - fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL); - release_firmware(fw); - return (fuc->data != NULL) ? 0 : -ENOMEM; -} - -int -gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname, - struct gf100_gr_fuc *fuc) -{ - const struct firmware *fw; - int ret; - - ret = nvkm_firmware_get(&gr->base.engine.subdev, fwname, &fw); - if (ret) { - ret = gf100_gr_ctor_fw_legacy(gr, fwname, fuc, ret); - if (ret) - return -ENODEV; - return 0; - } - - fuc->size = fw->size; - fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL); - nvkm_firmware_put(fw); - return (fuc->data != NULL) ? 0 : -ENOMEM; -} - -int -gf100_gr_ctor(const struct gf100_gr_func *func, struct nvkm_device *device, - int index, struct gf100_gr *gr) -{ - gr->func = func; - gr->firmware = nvkm_boolopt(device->cfgopt, "NvGrUseFW", - func->fecs.ucode == NULL); - - return nvkm_gr_ctor(&gf100_gr_, device, index, - gr->firmware || func->fecs.ucode != NULL, - &gr->base); -} +static const struct nvkm_falcon_func +gf100_gr_flcn = { + .fbif = 0x600, + .load_imem = nvkm_falcon_v1_load_imem, + .load_dmem = nvkm_falcon_v1_load_dmem, + .read_dmem = nvkm_falcon_v1_read_dmem, + .bind_context = nvkm_falcon_v1_bind_context, + .wait_for_halt = nvkm_falcon_v1_wait_for_halt, + .clear_interrupt = nvkm_falcon_v1_clear_interrupt, + .set_start_addr = nvkm_falcon_v1_set_start_addr, + .start = nvkm_falcon_v1_start, + .enable = nvkm_falcon_v1_enable, + .disable = nvkm_falcon_v1_disable, +}; int -gf100_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device, - int index, struct nvkm_gr **pgr) +gf100_gr_new_(const struct gf100_gr_fwif *fwif, + struct nvkm_device *device, int index, struct nvkm_gr **pgr) { struct gf100_gr *gr; int ret; @@ -2156,21 +2071,48 @@ gf100_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device, return -ENOMEM; *pgr = &gr->base; - ret = gf100_gr_ctor(func, device, index, gr); + ret = nvkm_gr_ctor(&gf100_gr_, device, index, true, &gr->base); if (ret) return ret; - if (gr->firmware) { - if (gf100_gr_ctor_fw(gr, "fecs_inst", &gr->fuc409c) || - gf100_gr_ctor_fw(gr, "fecs_data", &gr->fuc409d) || - gf100_gr_ctor_fw(gr, "gpccs_inst", &gr->fuc41ac) || - gf100_gr_ctor_fw(gr, "gpccs_data", &gr->fuc41ad)) - return -ENODEV; - } + fwif = nvkm_firmware_load(&gr->base.engine.subdev, fwif, "Gr", gr); + if (IS_ERR(fwif)) + return -ENODEV; + + gr->func = fwif->func; + + ret = nvkm_falcon_ctor(&gf100_gr_flcn, &gr->base.engine.subdev, + "fecs", 0x409000, &gr->fecs.falcon); + if (ret) + return ret; + + mutex_init(&gr->fecs.mutex); + + ret = nvkm_falcon_ctor(&gf100_gr_flcn, &gr->base.engine.subdev, + "gpccs", 0x41a000, &gr->gpccs.falcon); + if (ret) + return ret; return 0; } +void +gf100_gr_init_num_tpc_per_gpc(struct gf100_gr *gr, bool pd, bool ds) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + int gpc, i, j; + u32 data; + + for (gpc = 0, i = 0; i < 4; i++) { + for (data = 0, j = 0; j < 8 && gpc < gr->gpc_nr; j++, gpc++) + data |= gr->tpc_nr[gpc] << (j * 4); + if (pd) + nvkm_wr32(device, 0x406028 + (i * 4), data); + if (ds) + nvkm_wr32(device, 0x405870 + (i * 4), data); + } +} + void gf100_gr_init_400054(struct gf100_gr *gr) { @@ -2295,8 +2237,8 @@ gf100_gr_init(struct gf100_gr *gr) gr->func->init_gpc_mmu(gr); - if (gr->fuc_sw_nonctx) - gf100_gr_mmio(gr, gr->fuc_sw_nonctx); + if (gr->sw_nonctx) + gf100_gr_mmio(gr, gr->sw_nonctx); else gf100_gr_mmio(gr, gr->func->mmio); @@ -2320,6 +2262,8 @@ gf100_gr_init(struct gf100_gr *gr) gr->func->init_bios_2(gr); if (gr->func->init_swdx_pes_mask) gr->func->init_swdx_pes_mask(gr); + if (gr->func->init_fs) + gr->func->init_fs(gr); nvkm_wr32(device, 0x400500, 0x00010001); @@ -2338,8 +2282,8 @@ gf100_gr_init(struct gf100_gr *gr) if (gr->func->init_40601c) gr->func->init_40601c(gr); - nvkm_wr32(device, 0x404490, 0xc0000000); nvkm_wr32(device, 0x406018, 0xc0000000); + nvkm_wr32(device, 0x404490, 0xc0000000); if (gr->func->init_sked_hww_esr) gr->func->init_sked_hww_esr(gr); @@ -2453,8 +2397,67 @@ gf100_gr = { } }; +int +gf100_gr_nofw(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif) +{ + gr->firmware = false; + return 0; +} + +static int +gf100_gr_load_fw(struct gf100_gr *gr, const char *name, + struct nvkm_blob *blob) +{ + struct nvkm_subdev *subdev = &gr->base.engine.subdev; + struct nvkm_device *device = subdev->device; + const struct firmware *fw; + char f[32]; + int ret; + + snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, name); + ret = request_firmware(&fw, f, device->dev); + if (ret) { + snprintf(f, sizeof(f), "nouveau/%s", name); + ret = request_firmware(&fw, f, device->dev); + if (ret) { + nvkm_error(subdev, "failed to load %s\n", name); + return ret; + } + } + + blob->size = fw->size; + blob->data = kmemdup(fw->data, blob->size, GFP_KERNEL); + release_firmware(fw); + return (blob->data != NULL) ? 0 : -ENOMEM; +} + +int +gf100_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + + if (!nvkm_boolopt(device->cfgopt, "NvGrUseFW", false)) + return -EINVAL; + + if (gf100_gr_load_fw(gr, "fuc409c", &gr->fecs.inst) || + gf100_gr_load_fw(gr, "fuc409d", &gr->fecs.data) || + gf100_gr_load_fw(gr, "fuc41ac", &gr->gpccs.inst) || + gf100_gr_load_fw(gr, "fuc41ad", &gr->gpccs.data)) + return -ENOENT; + + gr->firmware = true; + return 0; +} + +static const struct gf100_gr_fwif +gf100_gr_fwif[] = { + { -1, gf100_gr_load, &gf100_gr }, + { -1, gf100_gr_nofw, &gf100_gr }, + {} +}; + int gf100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gf100_gr_new_(&gf100_gr, device, index, pgr); + return gf100_gr_new_(gf100_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h index fafdd0bbea9bdb705e3205f2302c6d8027ae7c57..88bcb57c2e0755cc2970ba8541ac1f9ffdabdf54 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h @@ -31,6 +31,8 @@ #include #include +struct nvkm_acr_lsfw; + #define GPC_MAX 32 #define TPC_MAX_PER_GPC 8 #define TPC_MAX (GPC_MAX * TPC_MAX_PER_GPC) @@ -55,11 +57,6 @@ struct gf100_gr_mmio { int buffer; }; -struct gf100_gr_fuc { - u32 *data; - u32 size; -}; - struct gf100_gr_zbc_color { u32 format; u32 ds[4]; @@ -83,29 +80,30 @@ struct gf100_gr { struct nvkm_gr base; struct { - struct nvkm_falcon *falcon; + struct nvkm_falcon falcon; + struct nvkm_blob inst; + struct nvkm_blob data; + struct mutex mutex; u32 disable; } fecs; struct { - struct nvkm_falcon *falcon; + struct nvkm_falcon falcon; + struct nvkm_blob inst; + struct nvkm_blob data; } gpccs; - struct gf100_gr_fuc fuc409c; - struct gf100_gr_fuc fuc409d; - struct gf100_gr_fuc fuc41ac; - struct gf100_gr_fuc fuc41ad; bool firmware; /* * Used if the register packs are loaded from NVIDIA fw instead of * using hardcoded arrays. To be allocated with vzalloc(). */ - struct gf100_gr_pack *fuc_sw_nonctx; - struct gf100_gr_pack *fuc_sw_ctx; - struct gf100_gr_pack *fuc_bundle; - struct gf100_gr_pack *fuc_method; + struct gf100_gr_pack *sw_nonctx; + struct gf100_gr_pack *sw_ctx; + struct gf100_gr_pack *bundle; + struct gf100_gr_pack *method; struct gf100_gr_zbc_color zbc_color[NVKM_LTC_MAX_ZBC_CNT]; struct gf100_gr_zbc_depth zbc_depth[NVKM_LTC_MAX_ZBC_CNT]; @@ -140,12 +138,6 @@ struct gf100_gr { u32 size_pm; }; -int gf100_gr_ctor(const struct gf100_gr_func *, struct nvkm_device *, - int, struct gf100_gr *); -int gf100_gr_new_(const struct gf100_gr_func *, struct nvkm_device *, - int, struct nvkm_gr **); -void *gf100_gr_dtor(struct nvkm_gr *); - int gf100_gr_fecs_bind_pointer(struct gf100_gr *, u32 inst); struct gf100_gr_func_zbc { @@ -157,7 +149,6 @@ struct gf100_gr_func_zbc { }; struct gf100_gr_func { - void (*dtor)(struct gf100_gr *); void (*oneinit_tiles)(struct gf100_gr *); void (*oneinit_sm_id)(struct gf100_gr *); int (*init)(struct gf100_gr *); @@ -171,6 +162,7 @@ struct gf100_gr_func { void (*init_rop_active_fbps)(struct gf100_gr *); void (*init_bios_2)(struct gf100_gr *); void (*init_swdx_pes_mask)(struct gf100_gr *); + void (*init_fs)(struct gf100_gr *); void (*init_fecs_exceptions)(struct gf100_gr *); void (*init_ds_hww_esr_2)(struct gf100_gr *); void (*init_40601c)(struct gf100_gr *); @@ -217,6 +209,7 @@ void gf100_gr_init_419eb4(struct gf100_gr *); void gf100_gr_init_tex_hww_esr(struct gf100_gr *, int, int); void gf100_gr_init_shader_exceptions(struct gf100_gr *, int, int); void gf100_gr_init_400054(struct gf100_gr *); +void gf100_gr_init_num_tpc_per_gpc(struct gf100_gr *, bool, bool); extern const struct gf100_gr_func_zbc gf100_gr_zbc; void gf117_gr_init_zcull(struct gf100_gr *); @@ -245,10 +238,18 @@ void gp100_gr_init_fecs_exceptions(struct gf100_gr *); void gp100_gr_init_shader_exceptions(struct gf100_gr *, int, int); void gp100_gr_zbc_clear_color(struct gf100_gr *, int); void gp100_gr_zbc_clear_depth(struct gf100_gr *, int); +extern const struct gf100_gr_func_zbc gp100_gr_zbc; void gp102_gr_init_swdx_pes_mask(struct gf100_gr *); extern const struct gf100_gr_func_zbc gp102_gr_zbc; +extern const struct gf100_gr_func gp107_gr; + +void gv100_gr_init_419bd8(struct gf100_gr *); +void gv100_gr_init_504430(struct gf100_gr *, int, int); +void gv100_gr_init_shader_exceptions(struct gf100_gr *, int, int); +void gv100_gr_trap_mp(struct gf100_gr *, int, int); + #define gf100_gr_chan(p) container_of((p), struct gf100_gr_chan, object) #include @@ -269,9 +270,6 @@ struct gf100_gr_chan { void gf100_gr_ctxctl_debug(struct gf100_gr *); -void gf100_gr_dtor_fw(struct gf100_gr_fuc *); -int gf100_gr_ctor_fw(struct gf100_gr *, const char *, - struct gf100_gr_fuc *); u64 gf100_gr_units(struct nvkm_gr *); void gf100_gr_zbc_init(struct gf100_gr *); @@ -294,8 +292,8 @@ struct gf100_gr_pack { for (init = pack->init; init && init->count; init++) struct gf100_gr_ucode { - struct gf100_gr_fuc code; - struct gf100_gr_fuc data; + struct nvkm_blob code; + struct nvkm_blob data; }; extern struct gf100_gr_ucode gf100_gr_fecs_ucode; @@ -310,17 +308,6 @@ void gf100_gr_icmd(struct gf100_gr *, const struct gf100_gr_pack *); void gf100_gr_mthd(struct gf100_gr *, const struct gf100_gr_pack *); int gf100_gr_init_ctxctl(struct gf100_gr *); -/* external bundles loading functions */ -int gk20a_gr_av_to_init(struct gf100_gr *, const char *, - struct gf100_gr_pack **); -int gk20a_gr_aiv_to_init(struct gf100_gr *, const char *, - struct gf100_gr_pack **); -int gk20a_gr_av_to_method(struct gf100_gr *, const char *, - struct gf100_gr_pack **); - -int gm200_gr_new_(const struct gf100_gr_func *, struct nvkm_device *, int, - struct nvkm_gr **); - /* register init value lists */ extern const struct gf100_gr_init gf100_gr_init_main_0[]; @@ -403,4 +390,31 @@ extern const struct gf100_gr_init gm107_gr_init_cbm_0[]; void gm107_gr_init_bios(struct gf100_gr *); void gm200_gr_init_gpc_mmu(struct gf100_gr *); + +struct gf100_gr_fwif { + int version; + int (*load)(struct gf100_gr *, int ver, const struct gf100_gr_fwif *); + const struct gf100_gr_func *func; + const struct nvkm_acr_lsf_func *fecs; + const struct nvkm_acr_lsf_func *gpccs; +}; + +int gf100_gr_load(struct gf100_gr *, int, const struct gf100_gr_fwif *); +int gf100_gr_nofw(struct gf100_gr *, int, const struct gf100_gr_fwif *); + +int gk20a_gr_load_sw(struct gf100_gr *, const char *path, int ver); + +int gm200_gr_load(struct gf100_gr *, int, const struct gf100_gr_fwif *); +extern const struct nvkm_acr_lsf_func gm200_gr_gpccs_acr; +extern const struct nvkm_acr_lsf_func gm200_gr_fecs_acr; + +extern const struct nvkm_acr_lsf_func gm20b_gr_fecs_acr; +void gm20b_gr_acr_bld_write(struct nvkm_acr *, u32, struct nvkm_acr_lsfw *); +void gm20b_gr_acr_bld_patch(struct nvkm_acr *, u32, s64); + +extern const struct nvkm_acr_lsf_func gp108_gr_gpccs_acr; +extern const struct nvkm_acr_lsf_func gp108_gr_fecs_acr; + +int gf100_gr_new_(const struct gf100_gr_fwif *, struct nvkm_device *, int, + struct nvkm_gr **); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c index 42c2fd9fc04e67230ea94ed1a2b65f16f496e531..0536fe8b2b9258b0f32b049cadb6723ae57552d4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c @@ -144,8 +144,15 @@ gf104_gr = { } }; +static const struct gf100_gr_fwif +gf104_gr_fwif[] = { + { -1, gf100_gr_load, &gf104_gr }, + { -1, gf100_gr_nofw, &gf104_gr }, + {} +}; + int gf104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gf100_gr_new_(&gf104_gr, device, index, pgr); + return gf100_gr_new_(gf104_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c index 4731a460adc7b29645c1b7a14ececc0c864e3ee3..14284b06112f7a2926a8a0ac46738d9d4ef22978 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c @@ -143,8 +143,15 @@ gf108_gr = { } }; +const struct gf100_gr_fwif +gf108_gr_fwif[] = { + { -1, gf100_gr_load, &gf108_gr }, + { -1, gf100_gr_nofw, &gf108_gr }, + {} +}; + int gf108_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gf100_gr_new_(&gf108_gr, device, index, pgr); + return gf100_gr_new_(gf108_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c index cdf759c8cd7fbf46595fab2e0c5d8a01e5ec4029..280752551a3ad1853be03f1ba7e4cfb5bf0f1eef 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c @@ -119,8 +119,15 @@ gf110_gr = { } }; +static const struct gf100_gr_fwif +gf110_gr_fwif[] = { + { -1, gf100_gr_load, &gf110_gr }, + { -1, gf100_gr_nofw, &gf110_gr }, + {} +}; + int gf110_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gf100_gr_new_(&gf110_gr, device, index, pgr); + return gf100_gr_new_(gf110_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c index a4158f84c64998e4435a0c4dea9dbc7f73963623..235c3fbe4b957b56ab41a2715ee0a4d9e4f60a1c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c @@ -184,8 +184,15 @@ gf117_gr = { } }; +static const struct gf100_gr_fwif +gf117_gr_fwif[] = { + { -1, gf100_gr_load, &gf117_gr }, + { -1, gf100_gr_nofw, &gf117_gr }, + {} +}; + int gf117_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gf100_gr_new_(&gf117_gr, device, index, pgr); + return gf100_gr_new_(gf117_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c index 4197844870b3761aff488e9ebe77ecdb88dc539b..7eac385ece972c04c63d93859b0d1a4e6d1600e0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c @@ -210,8 +210,15 @@ gf119_gr = { } }; +static const struct gf100_gr_fwif +gf119_gr_fwif[] = { + { -1, gf100_gr_load, &gf119_gr }, + { -1, gf100_gr_nofw, &gf119_gr }, + {} +}; + int gf119_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gf100_gr_new_(&gf119_gr, device, index, pgr); + return gf100_gr_new_(gf119_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c index 477fee3e37153c87d7e2275ff233759406714ad3..89f51d76082bcc5e7b3a8bafca16e3edc7fa4130 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c @@ -489,8 +489,15 @@ gk104_gr = { } }; +static const struct gf100_gr_fwif +gk104_gr_fwif[] = { + { -1, gf100_gr_load, &gk104_gr }, + { -1, gf100_gr_nofw, &gk104_gr }, + {} +}; + int gk104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gf100_gr_new_(&gk104_gr, device, index, pgr); + return gf100_gr_new_(gk104_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c index 7cd628c84e075fb81cf4ca5dfca369848623eb1b..735f05e54d62af1714e820c0294ed6601dcc961a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c @@ -385,8 +385,15 @@ gk110_gr = { } }; +static const struct gf100_gr_fwif +gk110_gr_fwif[] = { + { -1, gf100_gr_load, &gk110_gr }, + { -1, gf100_gr_nofw, &gk110_gr }, + {} +}; + int gk110_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gf100_gr_new_(&gk110_gr, device, index, pgr); + return gf100_gr_new_(gk110_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c index a38faa2156352fe5a2a262c44322ef85ef18cc0d..adc971be8f3b5c641670897ea80d50149b0a8096 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c @@ -136,8 +136,15 @@ gk110b_gr = { } }; +static const struct gf100_gr_fwif +gk110b_gr_fwif[] = { + { -1, gf100_gr_load, &gk110b_gr }, + { -1, gf100_gr_nofw, &gk110b_gr }, + {} +}; + int gk110b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gf100_gr_new_(&gk110b_gr, device, index, pgr); + return gf100_gr_new_(gk110b_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c index 58456660e603d820f6060d2df29879722dd145f0..aa0eff6795ac76b5d847421fbacd255b44d4df50 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c @@ -194,8 +194,15 @@ gk208_gr = { } }; +static const struct gf100_gr_fwif +gk208_gr_fwif[] = { + { -1, gf100_gr_load, &gk208_gr }, + { -1, gf100_gr_nofw, &gk208_gr }, + {} +}; + int gk208_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gf100_gr_new_(&gk208_gr, device, index, pgr); + return gf100_gr_new_(gk208_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c index 500cb08dd608011005f178b4b469987d336242ca..4209b24a46d703ad4c0c24821a768121edb81e4e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c @@ -22,6 +22,7 @@ #include "gf100.h" #include "ctxgf100.h" +#include #include #include @@ -33,21 +34,22 @@ struct gk20a_fw_av }; int -gk20a_gr_av_to_init(struct gf100_gr *gr, const char *fw_name, - struct gf100_gr_pack **ppack) +gk20a_gr_av_to_init(struct gf100_gr *gr, const char *path, const char *name, + int ver, struct gf100_gr_pack **ppack) { - struct gf100_gr_fuc fuc; + struct nvkm_subdev *subdev = &gr->base.engine.subdev; + struct nvkm_blob blob; struct gf100_gr_init *init; struct gf100_gr_pack *pack; int nent; int ret; int i; - ret = gf100_gr_ctor_fw(gr, fw_name, &fuc); + ret = nvkm_firmware_load_blob(subdev, path, name, ver, &blob); if (ret) return ret; - nent = (fuc.size / sizeof(struct gk20a_fw_av)); + nent = (blob.size / sizeof(struct gk20a_fw_av)); pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1))); if (!pack) { ret = -ENOMEM; @@ -59,7 +61,7 @@ gk20a_gr_av_to_init(struct gf100_gr *gr, const char *fw_name, for (i = 0; i < nent; i++) { struct gf100_gr_init *ent = &init[i]; - struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc.data)[i]; + struct gk20a_fw_av *av = &((struct gk20a_fw_av *)blob.data)[i]; ent->addr = av->addr; ent->data = av->data; @@ -70,7 +72,7 @@ gk20a_gr_av_to_init(struct gf100_gr *gr, const char *fw_name, *ppack = pack; end: - gf100_gr_dtor_fw(&fuc); + nvkm_blob_dtor(&blob); return ret; } @@ -82,21 +84,22 @@ struct gk20a_fw_aiv }; int -gk20a_gr_aiv_to_init(struct gf100_gr *gr, const char *fw_name, - struct gf100_gr_pack **ppack) +gk20a_gr_aiv_to_init(struct gf100_gr *gr, const char *path, const char *name, + int ver, struct gf100_gr_pack **ppack) { - struct gf100_gr_fuc fuc; + struct nvkm_subdev *subdev = &gr->base.engine.subdev; + struct nvkm_blob blob; struct gf100_gr_init *init; struct gf100_gr_pack *pack; int nent; int ret; int i; - ret = gf100_gr_ctor_fw(gr, fw_name, &fuc); + ret = nvkm_firmware_load_blob(subdev, path, name, ver, &blob); if (ret) return ret; - nent = (fuc.size / sizeof(struct gk20a_fw_aiv)); + nent = (blob.size / sizeof(struct gk20a_fw_aiv)); pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1))); if (!pack) { ret = -ENOMEM; @@ -108,7 +111,7 @@ gk20a_gr_aiv_to_init(struct gf100_gr *gr, const char *fw_name, for (i = 0; i < nent; i++) { struct gf100_gr_init *ent = &init[i]; - struct gk20a_fw_aiv *av = &((struct gk20a_fw_aiv *)fuc.data)[i]; + struct gk20a_fw_aiv *av = &((struct gk20a_fw_aiv *)blob.data)[i]; ent->addr = av->addr; ent->data = av->data; @@ -119,15 +122,16 @@ gk20a_gr_aiv_to_init(struct gf100_gr *gr, const char *fw_name, *ppack = pack; end: - gf100_gr_dtor_fw(&fuc); + nvkm_blob_dtor(&blob); return ret; } int -gk20a_gr_av_to_method(struct gf100_gr *gr, const char *fw_name, - struct gf100_gr_pack **ppack) +gk20a_gr_av_to_method(struct gf100_gr *gr, const char *path, const char *name, + int ver, struct gf100_gr_pack **ppack) { - struct gf100_gr_fuc fuc; + struct nvkm_subdev *subdev = &gr->base.engine.subdev; + struct nvkm_blob blob; struct gf100_gr_init *init; struct gf100_gr_pack *pack; /* We don't suppose we will initialize more than 16 classes here... */ @@ -137,29 +141,30 @@ gk20a_gr_av_to_method(struct gf100_gr *gr, const char *fw_name, int ret; int i; - ret = gf100_gr_ctor_fw(gr, fw_name, &fuc); + ret = nvkm_firmware_load_blob(subdev, path, name, ver, &blob); if (ret) return ret; - nent = (fuc.size / sizeof(struct gk20a_fw_av)); + nent = (blob.size / sizeof(struct gk20a_fw_av)); - pack = vzalloc((sizeof(*pack) * max_classes) + - (sizeof(*init) * (nent + 1))); + pack = vzalloc((sizeof(*pack) * (max_classes + 1)) + + (sizeof(*init) * (nent + max_classes + 1))); if (!pack) { ret = -ENOMEM; goto end; } - init = (void *)(pack + max_classes); + init = (void *)(pack + max_classes + 1); - for (i = 0; i < nent; i++) { - struct gf100_gr_init *ent = &init[i]; - struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc.data)[i]; + for (i = 0; i < nent; i++, init++) { + struct gk20a_fw_av *av = &((struct gk20a_fw_av *)blob.data)[i]; u32 class = av->addr & 0xffff; u32 addr = (av->addr & 0xffff0000) >> 14; if (prevclass != class) { - pack[classidx].init = ent; + if (prevclass) /* Add terminator to the method list. */ + init++; + pack[classidx].init = init; pack[classidx].type = class; prevclass = class; if (++classidx >= max_classes) { @@ -169,16 +174,16 @@ gk20a_gr_av_to_method(struct gf100_gr *gr, const char *fw_name, } } - ent->addr = addr; - ent->data = av->data; - ent->count = 1; - ent->pitch = 1; + init->addr = addr; + init->data = av->data; + init->count = 1; + init->pitch = 1; } *ppack = pack; end: - gf100_gr_dtor_fw(&fuc); + nvkm_blob_dtor(&blob); return ret; } @@ -224,7 +229,7 @@ gk20a_gr_init(struct gf100_gr *gr) /* Clear SCC RAM */ nvkm_wr32(device, 0x40802c, 0x1); - gf100_gr_mmio(gr, gr->fuc_sw_nonctx); + gf100_gr_mmio(gr, gr->sw_nonctx); ret = gk20a_gr_wait_mem_scrubbing(gr); if (ret) @@ -303,40 +308,45 @@ gk20a_gr = { }; int -gk20a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +gk20a_gr_load_sw(struct gf100_gr *gr, const char *path, int ver) { - struct gf100_gr *gr; - int ret; + if (gk20a_gr_av_to_init(gr, path, "sw_nonctx", ver, &gr->sw_nonctx) || + gk20a_gr_aiv_to_init(gr, path, "sw_ctx", ver, &gr->sw_ctx) || + gk20a_gr_av_to_init(gr, path, "sw_bundle_init", ver, &gr->bundle) || + gk20a_gr_av_to_method(gr, path, "sw_method_init", ver, &gr->method)) + return -ENOENT; - if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL))) - return -ENOMEM; - *pgr = &gr->base; - - ret = gf100_gr_ctor(&gk20a_gr, device, index, gr); - if (ret) - return ret; + return 0; +} - if (gf100_gr_ctor_fw(gr, "fecs_inst", &gr->fuc409c) || - gf100_gr_ctor_fw(gr, "fecs_data", &gr->fuc409d) || - gf100_gr_ctor_fw(gr, "gpccs_inst", &gr->fuc41ac) || - gf100_gr_ctor_fw(gr, "gpccs_data", &gr->fuc41ad)) - return -ENODEV; +static int +gk20a_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif) +{ + struct nvkm_subdev *subdev = &gr->base.engine.subdev; - ret = gk20a_gr_av_to_init(gr, "sw_nonctx", &gr->fuc_sw_nonctx); - if (ret) - return ret; + if (nvkm_firmware_load_blob(subdev, "", "fecs_inst", ver, + &gr->fecs.inst) || + nvkm_firmware_load_blob(subdev, "", "fecs_data", ver, + &gr->fecs.data) || + nvkm_firmware_load_blob(subdev, "", "gpccs_inst", ver, + &gr->gpccs.inst) || + nvkm_firmware_load_blob(subdev, "", "gpccs_data", ver, + &gr->gpccs.data)) + return -ENOENT; - ret = gk20a_gr_aiv_to_init(gr, "sw_ctx", &gr->fuc_sw_ctx); - if (ret) - return ret; + gr->firmware = true; - ret = gk20a_gr_av_to_init(gr, "sw_bundle_init", &gr->fuc_bundle); - if (ret) - return ret; + return gk20a_gr_load_sw(gr, "", ver); +} - ret = gk20a_gr_av_to_method(gr, "sw_method_init", &gr->fuc_method); - if (ret) - return ret; +static const struct gf100_gr_fwif +gk20a_gr_fwif[] = { + { -1, gk20a_gr_load, &gk20a_gr }, + {} +}; - return 0; +int +gk20a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +{ + return gf100_gr_new_(gk20a_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c index 92e31d397207561e844cd15f37dd01082ab886c5..09bb78ba9d0005c2ef5eb7d0a863e19b11094dbe 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c @@ -429,8 +429,15 @@ gm107_gr = { } }; +static const struct gf100_gr_fwif +gm107_gr_fwif[] = { + { -1, gf100_gr_load, &gm107_gr }, + { -1, gf100_gr_nofw, &gm107_gr }, + {} +}; + int gm107_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gf100_gr_new_(&gm107_gr, device, index, pgr); + return gf100_gr_new_(gm107_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c index eff30662b9841300f473621c138f7d04a599add7..3d67cfb08395714e5b8843b2e93c1663968bb6b4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c @@ -24,14 +24,64 @@ #include "gf100.h" #include "ctxgf100.h" +#include +#include #include +#include + #include /******************************************************************************* * PGRAPH engine/subdev functions ******************************************************************************/ +static void +gm200_gr_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust) +{ + struct flcn_bl_dmem_desc_v1 hdr; + nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr)); + hdr.code_dma_base = hdr.code_dma_base + adjust; + hdr.data_dma_base = hdr.data_dma_base + adjust; + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); + flcn_bl_dmem_desc_v1_dump(&acr->subdev, &hdr); +} + +static void +gm200_gr_acr_bld_write(struct nvkm_acr *acr, u32 bld, + struct nvkm_acr_lsfw *lsfw) +{ + const u64 base = lsfw->offset.img + lsfw->app_start_offset; + const u64 code = base + lsfw->app_resident_code_offset; + const u64 data = base + lsfw->app_resident_data_offset; + const struct flcn_bl_dmem_desc_v1 hdr = { + .ctx_dma = FALCON_DMAIDX_UCODE, + .code_dma_base = code, + .non_sec_code_off = lsfw->app_resident_code_offset, + .non_sec_code_size = lsfw->app_resident_code_size, + .code_entry_point = lsfw->app_imem_entry, + .data_dma_base = data, + .data_size = lsfw->app_resident_data_size, + }; + + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); +} + +const struct nvkm_acr_lsf_func +gm200_gr_gpccs_acr = { + .flags = NVKM_ACR_LSF_FORCE_PRIV_LOAD, + .bld_size = sizeof(struct flcn_bl_dmem_desc_v1), + .bld_write = gm200_gr_acr_bld_write, + .bld_patch = gm200_gr_acr_bld_patch, +}; + +const struct nvkm_acr_lsf_func +gm200_gr_fecs_acr = { + .bld_size = sizeof(struct flcn_bl_dmem_desc_v1), + .bld_write = gm200_gr_acr_bld_write, + .bld_patch = gm200_gr_acr_bld_patch, +}; + int gm200_gr_rops(struct gf100_gr *gr) { @@ -124,44 +174,6 @@ gm200_gr_oneinit_tiles(struct gf100_gr *gr) } } -int -gm200_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device, - int index, struct nvkm_gr **pgr) -{ - struct gf100_gr *gr; - int ret; - - if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL))) - return -ENOMEM; - *pgr = &gr->base; - - ret = gf100_gr_ctor(func, device, index, gr); - if (ret) - return ret; - - /* Load firmwares for non-secure falcons */ - if (!nvkm_secboot_is_managed(device->secboot, - NVKM_SECBOOT_FALCON_FECS)) { - if ((ret = gf100_gr_ctor_fw(gr, "gr/fecs_inst", &gr->fuc409c)) || - (ret = gf100_gr_ctor_fw(gr, "gr/fecs_data", &gr->fuc409d))) - return ret; - } - if (!nvkm_secboot_is_managed(device->secboot, - NVKM_SECBOOT_FALCON_GPCCS)) { - if ((ret = gf100_gr_ctor_fw(gr, "gr/gpccs_inst", &gr->fuc41ac)) || - (ret = gf100_gr_ctor_fw(gr, "gr/gpccs_data", &gr->fuc41ad))) - return ret; - } - - if ((ret = gk20a_gr_av_to_init(gr, "gr/sw_nonctx", &gr->fuc_sw_nonctx)) || - (ret = gk20a_gr_aiv_to_init(gr, "gr/sw_ctx", &gr->fuc_sw_ctx)) || - (ret = gk20a_gr_av_to_init(gr, "gr/sw_bundle_init", &gr->fuc_bundle)) || - (ret = gk20a_gr_av_to_method(gr, "gr/sw_method_init", &gr->fuc_method))) - return ret; - - return 0; -} - static const struct gf100_gr_func gm200_gr = { .oneinit_tiles = gm200_gr_oneinit_tiles, @@ -197,8 +209,78 @@ gm200_gr = { } }; +int +gm200_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif) +{ + int ret; + + ret = nvkm_acr_lsfw_load_bl_inst_data_sig(&gr->base.engine.subdev, + &gr->fecs.falcon, + NVKM_ACR_LSF_FECS, + "gr/fecs_", ver, fwif->fecs); + if (ret) + return ret; + + ret = nvkm_acr_lsfw_load_bl_inst_data_sig(&gr->base.engine.subdev, + &gr->gpccs.falcon, + NVKM_ACR_LSF_GPCCS, + "gr/gpccs_", ver, + fwif->gpccs); + if (ret) + return ret; + + gr->firmware = true; + + return gk20a_gr_load_sw(gr, "gr/", ver); +} + +MODULE_FIRMWARE("nvidia/gm200/gr/fecs_bl.bin"); +MODULE_FIRMWARE("nvidia/gm200/gr/fecs_inst.bin"); +MODULE_FIRMWARE("nvidia/gm200/gr/fecs_data.bin"); +MODULE_FIRMWARE("nvidia/gm200/gr/fecs_sig.bin"); +MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_bl.bin"); +MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_inst.bin"); +MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_data.bin"); +MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_sig.bin"); +MODULE_FIRMWARE("nvidia/gm200/gr/sw_ctx.bin"); +MODULE_FIRMWARE("nvidia/gm200/gr/sw_nonctx.bin"); +MODULE_FIRMWARE("nvidia/gm200/gr/sw_bundle_init.bin"); +MODULE_FIRMWARE("nvidia/gm200/gr/sw_method_init.bin"); + +MODULE_FIRMWARE("nvidia/gm204/gr/fecs_bl.bin"); +MODULE_FIRMWARE("nvidia/gm204/gr/fecs_inst.bin"); +MODULE_FIRMWARE("nvidia/gm204/gr/fecs_data.bin"); +MODULE_FIRMWARE("nvidia/gm204/gr/fecs_sig.bin"); +MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_bl.bin"); +MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_inst.bin"); +MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_data.bin"); +MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_sig.bin"); +MODULE_FIRMWARE("nvidia/gm204/gr/sw_ctx.bin"); +MODULE_FIRMWARE("nvidia/gm204/gr/sw_nonctx.bin"); +MODULE_FIRMWARE("nvidia/gm204/gr/sw_bundle_init.bin"); +MODULE_FIRMWARE("nvidia/gm204/gr/sw_method_init.bin"); + +MODULE_FIRMWARE("nvidia/gm206/gr/fecs_bl.bin"); +MODULE_FIRMWARE("nvidia/gm206/gr/fecs_inst.bin"); +MODULE_FIRMWARE("nvidia/gm206/gr/fecs_data.bin"); +MODULE_FIRMWARE("nvidia/gm206/gr/fecs_sig.bin"); +MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_bl.bin"); +MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_inst.bin"); +MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_data.bin"); +MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_sig.bin"); +MODULE_FIRMWARE("nvidia/gm206/gr/sw_ctx.bin"); +MODULE_FIRMWARE("nvidia/gm206/gr/sw_nonctx.bin"); +MODULE_FIRMWARE("nvidia/gm206/gr/sw_bundle_init.bin"); +MODULE_FIRMWARE("nvidia/gm206/gr/sw_method_init.bin"); + +static const struct gf100_gr_fwif +gm200_gr_fwif[] = { + { 0, gm200_gr_load, &gm200_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr }, + {} +}; + int gm200_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gm200_gr_new_(&gm200_gr, device, index, pgr); + return gf100_gr_new_(gm200_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c index a667770ce3cbe379640805946674a23cd169bb85..09d8c5d5b000c9937c4d82850556e1544ab2e54d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c @@ -22,10 +22,61 @@ #include "gf100.h" #include "ctxgf100.h" +#include +#include #include +#include + #include +void +gm20b_gr_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust) +{ + struct flcn_bl_dmem_desc hdr; + u64 addr; + + nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr)); + addr = ((u64)hdr.code_dma_base1 << 40 | hdr.code_dma_base << 8); + hdr.code_dma_base = lower_32_bits((addr + adjust) >> 8); + hdr.code_dma_base1 = upper_32_bits((addr + adjust) >> 8); + addr = ((u64)hdr.data_dma_base1 << 40 | hdr.data_dma_base << 8); + hdr.data_dma_base = lower_32_bits((addr + adjust) >> 8); + hdr.data_dma_base1 = upper_32_bits((addr + adjust) >> 8); + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); + + flcn_bl_dmem_desc_dump(&acr->subdev, &hdr); +} + +void +gm20b_gr_acr_bld_write(struct nvkm_acr *acr, u32 bld, + struct nvkm_acr_lsfw *lsfw) +{ + const u64 base = lsfw->offset.img + lsfw->app_start_offset; + const u64 code = (base + lsfw->app_resident_code_offset) >> 8; + const u64 data = (base + lsfw->app_resident_data_offset) >> 8; + const struct flcn_bl_dmem_desc hdr = { + .ctx_dma = FALCON_DMAIDX_UCODE, + .code_dma_base = lower_32_bits(code), + .non_sec_code_off = lsfw->app_resident_code_offset, + .non_sec_code_size = lsfw->app_resident_code_size, + .code_entry_point = lsfw->app_imem_entry, + .data_dma_base = lower_32_bits(data), + .data_size = lsfw->app_resident_data_size, + .code_dma_base1 = upper_32_bits(code), + .data_dma_base1 = upper_32_bits(data), + }; + + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); +} + +const struct nvkm_acr_lsf_func +gm20b_gr_fecs_acr = { + .bld_size = sizeof(struct flcn_bl_dmem_desc), + .bld_write = gm20b_gr_acr_bld_write, + .bld_patch = gm20b_gr_acr_bld_patch, +}; + static void gm20b_gr_init_gpc_mmu(struct gf100_gr *gr) { @@ -33,7 +84,7 @@ gm20b_gr_init_gpc_mmu(struct gf100_gr *gr) u32 val; /* Bypass MMU check for non-secure boot */ - if (!device->secboot) { + if (!device->acr) { nvkm_wr32(device, 0x100ce4, 0xffffffff); if (nvkm_rd32(device, 0x100ce4) != 0xffffffff) @@ -85,8 +136,51 @@ gm20b_gr = { } }; +static int +gm20b_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif) +{ + struct nvkm_subdev *subdev = &gr->base.engine.subdev; + int ret; + + ret = nvkm_acr_lsfw_load_bl_inst_data_sig(subdev, &gr->fecs.falcon, + NVKM_ACR_LSF_FECS, + "gr/fecs_", ver, fwif->fecs); + if (ret) + return ret; + + + if (nvkm_firmware_load_blob(subdev, "gr/", "gpccs_inst", ver, + &gr->gpccs.inst) || + nvkm_firmware_load_blob(subdev, "gr/", "gpccs_data", ver, + &gr->gpccs.data)) + return -ENOENT; + + gr->firmware = true; + + return gk20a_gr_load_sw(gr, "gr/", ver); +} + +#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) +MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_bl.bin"); +MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_inst.bin"); +MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_data.bin"); +MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_sig.bin"); +MODULE_FIRMWARE("nvidia/gm20b/gr/gpccs_inst.bin"); +MODULE_FIRMWARE("nvidia/gm20b/gr/gpccs_data.bin"); +MODULE_FIRMWARE("nvidia/gm20b/gr/sw_ctx.bin"); +MODULE_FIRMWARE("nvidia/gm20b/gr/sw_nonctx.bin"); +MODULE_FIRMWARE("nvidia/gm20b/gr/sw_bundle_init.bin"); +MODULE_FIRMWARE("nvidia/gm20b/gr/sw_method_init.bin"); +#endif + +static const struct gf100_gr_fwif +gm20b_gr_fwif[] = { + { 0, gm20b_gr_load, &gm20b_gr, &gm20b_gr_fecs_acr }, + {} +}; + int gm20b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gm200_gr_new_(&gm20b_gr, device, index, pgr); + return gf100_gr_new_(gm20b_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c index 9d0521ce309a22e079a93bde870f245f5b7542f2..33c8634ae56741c6542786dd276787ba81aaac44 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c @@ -62,7 +62,7 @@ gp100_gr_zbc_clear_depth(struct gf100_gr *gr, int zbc) gr->zbc_depth[zbc].format << ((znum % 4) * 7)); } -static const struct gf100_gr_func_zbc +const struct gf100_gr_func_zbc gp100_gr_zbc = { .clear_color = gp100_gr_zbc_clear_color, .clear_depth = gp100_gr_zbc_clear_depth, @@ -135,8 +135,27 @@ gp100_gr = { } }; +MODULE_FIRMWARE("nvidia/gp100/gr/fecs_bl.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/fecs_inst.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/fecs_data.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/fecs_sig.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_bl.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_inst.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_data.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_sig.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/sw_ctx.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/sw_nonctx.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/sw_bundle_init.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/sw_method_init.bin"); + +static const struct gf100_gr_fwif +gp100_gr_fwif[] = { + { 0, gm200_gr_load, &gp100_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr }, + {} +}; + int gp100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gm200_gr_new_(&gp100_gr, device, index, pgr); + return gf100_gr_new_(gp100_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c index 37f7d739bf80dcbc8a40b5d542e856946b394928..7baf67f743f4dd7c75b632d3715cf78fed460a87 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c @@ -131,8 +131,27 @@ gp102_gr = { } }; +MODULE_FIRMWARE("nvidia/gp102/gr/fecs_bl.bin"); +MODULE_FIRMWARE("nvidia/gp102/gr/fecs_inst.bin"); +MODULE_FIRMWARE("nvidia/gp102/gr/fecs_data.bin"); +MODULE_FIRMWARE("nvidia/gp102/gr/fecs_sig.bin"); +MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_bl.bin"); +MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_inst.bin"); +MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_data.bin"); +MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_sig.bin"); +MODULE_FIRMWARE("nvidia/gp102/gr/sw_ctx.bin"); +MODULE_FIRMWARE("nvidia/gp102/gr/sw_nonctx.bin"); +MODULE_FIRMWARE("nvidia/gp102/gr/sw_bundle_init.bin"); +MODULE_FIRMWARE("nvidia/gp102/gr/sw_method_init.bin"); + +static const struct gf100_gr_fwif +gp102_gr_fwif[] = { + { 0, gm200_gr_load, &gp102_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr }, + {} +}; + int gp102_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gm200_gr_new_(&gp102_gr, device, index, pgr); + return gf100_gr_new_(gp102_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c index 4573c914c021e2206eb6117f6e6f4e14c3ad1d48..d9b8ef875f8d9066362f960ce9402ee3ce5d09a6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c @@ -59,8 +59,40 @@ gp104_gr = { } }; +MODULE_FIRMWARE("nvidia/gp104/gr/fecs_bl.bin"); +MODULE_FIRMWARE("nvidia/gp104/gr/fecs_inst.bin"); +MODULE_FIRMWARE("nvidia/gp104/gr/fecs_data.bin"); +MODULE_FIRMWARE("nvidia/gp104/gr/fecs_sig.bin"); +MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_bl.bin"); +MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_inst.bin"); +MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_data.bin"); +MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_sig.bin"); +MODULE_FIRMWARE("nvidia/gp104/gr/sw_ctx.bin"); +MODULE_FIRMWARE("nvidia/gp104/gr/sw_nonctx.bin"); +MODULE_FIRMWARE("nvidia/gp104/gr/sw_bundle_init.bin"); +MODULE_FIRMWARE("nvidia/gp104/gr/sw_method_init.bin"); + +MODULE_FIRMWARE("nvidia/gp106/gr/fecs_bl.bin"); +MODULE_FIRMWARE("nvidia/gp106/gr/fecs_inst.bin"); +MODULE_FIRMWARE("nvidia/gp106/gr/fecs_data.bin"); +MODULE_FIRMWARE("nvidia/gp106/gr/fecs_sig.bin"); +MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_bl.bin"); +MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_inst.bin"); +MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_data.bin"); +MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_sig.bin"); +MODULE_FIRMWARE("nvidia/gp106/gr/sw_ctx.bin"); +MODULE_FIRMWARE("nvidia/gp106/gr/sw_nonctx.bin"); +MODULE_FIRMWARE("nvidia/gp106/gr/sw_bundle_init.bin"); +MODULE_FIRMWARE("nvidia/gp106/gr/sw_method_init.bin"); + +static const struct gf100_gr_fwif +gp104_gr_fwif[] = { + { 0, gm200_gr_load, &gp104_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr }, + {} +}; + int gp104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gm200_gr_new_(&gp104_gr, device, index, pgr); + return gf100_gr_new_(gp104_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c index 812aba91653fad7f66f1e761fa4167e3dc1e7165..2b1ad5522184d6771a3c01b199a0b758887ca68f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c @@ -26,7 +26,7 @@ #include -static const struct gf100_gr_func +const struct gf100_gr_func gp107_gr = { .oneinit_tiles = gm200_gr_oneinit_tiles, .oneinit_sm_id = gm200_gr_oneinit_sm_id, @@ -61,8 +61,27 @@ gp107_gr = { } }; +MODULE_FIRMWARE("nvidia/gp107/gr/fecs_bl.bin"); +MODULE_FIRMWARE("nvidia/gp107/gr/fecs_inst.bin"); +MODULE_FIRMWARE("nvidia/gp107/gr/fecs_data.bin"); +MODULE_FIRMWARE("nvidia/gp107/gr/fecs_sig.bin"); +MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_bl.bin"); +MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_inst.bin"); +MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_data.bin"); +MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_sig.bin"); +MODULE_FIRMWARE("nvidia/gp107/gr/sw_ctx.bin"); +MODULE_FIRMWARE("nvidia/gp107/gr/sw_nonctx.bin"); +MODULE_FIRMWARE("nvidia/gp107/gr/sw_bundle_init.bin"); +MODULE_FIRMWARE("nvidia/gp107/gr/sw_method_init.bin"); + +static const struct gf100_gr_fwif +gp107_gr_fwif[] = { + { 0, gm200_gr_load, &gp107_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr }, + {} +}; + int gp107_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gm200_gr_new_(&gp107_gr, device, index, pgr); + return gf100_gr_new_(gp107_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c new file mode 100644 index 0000000000000000000000000000000000000000..113e4c1ba9e88b0137e3b709d14788cfa141b5f6 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c @@ -0,0 +1,97 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "gf100.h" + +#include + +#include + +static void +gp108_gr_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust) +{ + struct flcn_bl_dmem_desc_v2 hdr; + nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr)); + hdr.code_dma_base = hdr.code_dma_base + adjust; + hdr.data_dma_base = hdr.data_dma_base + adjust; + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); + flcn_bl_dmem_desc_v2_dump(&acr->subdev, &hdr); +} + +static void +gp108_gr_acr_bld_write(struct nvkm_acr *acr, u32 bld, + struct nvkm_acr_lsfw *lsfw) +{ + const u64 base = lsfw->offset.img + lsfw->app_start_offset; + const u64 code = base + lsfw->app_resident_code_offset; + const u64 data = base + lsfw->app_resident_data_offset; + const struct flcn_bl_dmem_desc_v2 hdr = { + .ctx_dma = FALCON_DMAIDX_UCODE, + .code_dma_base = code, + .non_sec_code_off = lsfw->app_resident_code_offset, + .non_sec_code_size = lsfw->app_resident_code_size, + .code_entry_point = lsfw->app_imem_entry, + .data_dma_base = data, + .data_size = lsfw->app_resident_data_size, + }; + + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); +} + +const struct nvkm_acr_lsf_func +gp108_gr_gpccs_acr = { + .flags = NVKM_ACR_LSF_FORCE_PRIV_LOAD, + .bld_size = sizeof(struct flcn_bl_dmem_desc_v2), + .bld_write = gp108_gr_acr_bld_write, + .bld_patch = gp108_gr_acr_bld_patch, +}; + +const struct nvkm_acr_lsf_func +gp108_gr_fecs_acr = { + .bld_size = sizeof(struct flcn_bl_dmem_desc_v2), + .bld_write = gp108_gr_acr_bld_write, + .bld_patch = gp108_gr_acr_bld_patch, +}; + +MODULE_FIRMWARE("nvidia/gp108/gr/fecs_bl.bin"); +MODULE_FIRMWARE("nvidia/gp108/gr/fecs_inst.bin"); +MODULE_FIRMWARE("nvidia/gp108/gr/fecs_data.bin"); +MODULE_FIRMWARE("nvidia/gp108/gr/fecs_sig.bin"); +MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_bl.bin"); +MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_inst.bin"); +MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_data.bin"); +MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_sig.bin"); +MODULE_FIRMWARE("nvidia/gp108/gr/sw_ctx.bin"); +MODULE_FIRMWARE("nvidia/gp108/gr/sw_nonctx.bin"); +MODULE_FIRMWARE("nvidia/gp108/gr/sw_bundle_init.bin"); +MODULE_FIRMWARE("nvidia/gp108/gr/sw_method_init.bin"); + +static const struct gf100_gr_fwif +gp108_gr_fwif[] = { + { 0, gm200_gr_load, &gp107_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr }, + {} +}; + +int +gp108_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +{ + return gf100_gr_new_(gp108_gr_fwif, device, index, pgr); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c index 303dceddd4a858b1c4583f85e66eca7db38a534a..eaf913eb5aa3e6e81986c94257b6322addf77f9f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c @@ -23,8 +23,20 @@ #include "gf100.h" #include "ctxgf100.h" +#include + #include +#include + +static const struct nvkm_acr_lsf_func +gp10b_gr_gpccs_acr = { + .flags = NVKM_ACR_LSF_FORCE_PRIV_LOAD, + .bld_size = sizeof(struct flcn_bl_dmem_desc), + .bld_write = gm20b_gr_acr_bld_write, + .bld_patch = gm20b_gr_acr_bld_patch, +}; + static const struct gf100_gr_func gp10b_gr = { .oneinit_tiles = gm200_gr_oneinit_tiles, @@ -48,8 +60,8 @@ gp10b_gr = { .gpc_nr = 1, .tpc_nr = 2, .ppc_nr = 1, - .grctx = &gp102_grctx, - .zbc = &gp102_gr_zbc, + .grctx = &gp100_grctx, + .zbc = &gp100_gr_zbc, .sclass = { { -1, -1, FERMI_TWOD_A }, { -1, -1, KEPLER_INLINE_TO_MEMORY_B }, @@ -59,8 +71,29 @@ gp10b_gr = { } }; +#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) +MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_bl.bin"); +MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_inst.bin"); +MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_data.bin"); +MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_sig.bin"); +MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_bl.bin"); +MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_inst.bin"); +MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_data.bin"); +MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_sig.bin"); +MODULE_FIRMWARE("nvidia/gp10b/gr/sw_ctx.bin"); +MODULE_FIRMWARE("nvidia/gp10b/gr/sw_nonctx.bin"); +MODULE_FIRMWARE("nvidia/gp10b/gr/sw_bundle_init.bin"); +MODULE_FIRMWARE("nvidia/gp10b/gr/sw_method_init.bin"); +#endif + +static const struct gf100_gr_fwif +gp10b_gr_fwif[] = { + { 0, gm200_gr_load, &gp10b_gr, &gm20b_gr_fecs_acr, &gp10b_gr_gpccs_acr }, + {} +}; + int gp10b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gm200_gr_new_(&gp10b_gr, device, index, pgr); + return gf100_gr_new_(gp10b_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c index 3b3327789ae78a39f7ea824dadd50b3854f33e9d..70639d88b8e6b419c1b2c8ec62b12a47ba7fa693 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c @@ -45,7 +45,7 @@ gv100_gr_trap_sm(struct gf100_gr *gr, int gpc, int tpc, int sm) nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x734 + sm * 0x80), gerr); } -static void +void gv100_gr_trap_mp(struct gf100_gr *gr, int gpc, int tpc) { gv100_gr_trap_sm(gr, gpc, tpc, 0); @@ -59,7 +59,7 @@ gv100_gr_init_4188a4(struct gf100_gr *gr) nvkm_mask(device, 0x4188a4, 0x03000000, 0x03000000); } -static void +void gv100_gr_init_shader_exceptions(struct gf100_gr *gr, int gpc, int tpc) { struct nvkm_device *device = gr->base.engine.subdev.device; @@ -71,14 +71,14 @@ gv100_gr_init_shader_exceptions(struct gf100_gr *gr, int gpc, int tpc) } } -static void +void gv100_gr_init_504430(struct gf100_gr *gr, int gpc, int tpc) { struct nvkm_device *device = gr->base.engine.subdev.device; nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0x403f0000); } -static void +void gv100_gr_init_419bd8(struct gf100_gr *gr) { struct nvkm_device *device = gr->base.engine.subdev.device; @@ -120,8 +120,27 @@ gv100_gr = { } }; +MODULE_FIRMWARE("nvidia/gv100/gr/fecs_bl.bin"); +MODULE_FIRMWARE("nvidia/gv100/gr/fecs_inst.bin"); +MODULE_FIRMWARE("nvidia/gv100/gr/fecs_data.bin"); +MODULE_FIRMWARE("nvidia/gv100/gr/fecs_sig.bin"); +MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_bl.bin"); +MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_inst.bin"); +MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_data.bin"); +MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_sig.bin"); +MODULE_FIRMWARE("nvidia/gv100/gr/sw_ctx.bin"); +MODULE_FIRMWARE("nvidia/gv100/gr/sw_nonctx.bin"); +MODULE_FIRMWARE("nvidia/gv100/gr/sw_bundle_init.bin"); +MODULE_FIRMWARE("nvidia/gv100/gr/sw_method_init.bin"); + +static const struct gf100_gr_fwif +gv100_gr_fwif[] = { + { 0, gm200_gr_load, &gv100_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr }, + {} +}; + int gv100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) { - return gm200_gr_new_(&gv100_gr, device, index, pgr); + return gf100_gr_new_(gv100_gr_fwif, device, index, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c new file mode 100644 index 0000000000000000000000000000000000000000..454668b1cf54d5975f59a6694c5fd474845fb0f5 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c @@ -0,0 +1,177 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "gf100.h" +#include "ctxgf100.h" + +#include + +static void +tu102_gr_init_fecs_exceptions(struct gf100_gr *gr) +{ + nvkm_wr32(gr->base.engine.subdev.device, 0x409c24, 0x006f0002); +} + +static void +tu102_gr_init_fs(struct gf100_gr *gr) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + int sm; + + gp100_grctx_generate_smid_config(gr); + gk104_grctx_generate_gpc_tpc_nr(gr); + + for (sm = 0; sm < gr->sm_nr; sm++) { + nvkm_wr32(device, GPC_UNIT(gr->sm[sm].gpc, 0x0c10 + + gr->sm[sm].tpc * 4), sm); + } + + gm200_grctx_generate_dist_skip_table(gr); + gf100_gr_init_num_tpc_per_gpc(gr, true, true); +} + +static void +tu102_gr_init_zcull(struct gf100_gr *gr) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total); + const u8 tile_nr = ALIGN(gr->tpc_total, 64); + u8 bank[GPC_MAX] = {}, gpc, i, j; + u32 data; + + for (i = 0; i < tile_nr; i += 8) { + for (data = 0, j = 0; j < 8 && i + j < gr->tpc_total; j++) { + data |= bank[gr->tile[i + j]] << (j * 4); + bank[gr->tile[i + j]]++; + } + nvkm_wr32(device, GPC_BCAST(0x0980 + ((i / 8) * 4)), data); + } + + for (gpc = 0; gpc < gr->gpc_nr; gpc++) { + nvkm_wr32(device, GPC_UNIT(gpc, 0x0914), + gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]); + nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 | + gr->tpc_total); + nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918); + } + + nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918); +} + +static void +tu102_gr_init_gpc_mmu(struct gf100_gr *gr) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + + nvkm_wr32(device, 0x418880, nvkm_rd32(device, 0x100c80) & 0xf8001fff); + nvkm_wr32(device, 0x418890, 0x00000000); + nvkm_wr32(device, 0x418894, 0x00000000); + + nvkm_wr32(device, 0x4188b4, nvkm_rd32(device, 0x100cc8)); + nvkm_wr32(device, 0x4188b8, nvkm_rd32(device, 0x100ccc)); + nvkm_wr32(device, 0x4188b0, nvkm_rd32(device, 0x100cc4)); +} + +static const struct gf100_gr_func +tu102_gr = { + .oneinit_tiles = gm200_gr_oneinit_tiles, + .oneinit_sm_id = gm200_gr_oneinit_sm_id, + .init = gf100_gr_init, + .init_419bd8 = gv100_gr_init_419bd8, + .init_gpc_mmu = tu102_gr_init_gpc_mmu, + .init_vsc_stream_master = gk104_gr_init_vsc_stream_master, + .init_zcull = tu102_gr_init_zcull, + .init_num_active_ltcs = gf100_gr_init_num_active_ltcs, + .init_rop_active_fbps = gp100_gr_init_rop_active_fbps, + .init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask, + .init_fs = tu102_gr_init_fs, + .init_fecs_exceptions = tu102_gr_init_fecs_exceptions, + .init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2, + .init_sked_hww_esr = gk104_gr_init_sked_hww_esr, + .init_ppc_exceptions = gk104_gr_init_ppc_exceptions, + .init_504430 = gv100_gr_init_504430, + .init_shader_exceptions = gv100_gr_init_shader_exceptions, + .trap_mp = gv100_gr_trap_mp, + .rops = gm200_gr_rops, + .gpc_nr = 6, + .tpc_nr = 5, + .ppc_nr = 3, + .grctx = &tu102_grctx, + .zbc = &gp102_gr_zbc, + .sclass = { + { -1, -1, FERMI_TWOD_A }, + { -1, -1, KEPLER_INLINE_TO_MEMORY_B }, + { -1, -1, TURING_A, &gf100_fermi }, + { -1, -1, TURING_COMPUTE_A }, + {} + } +}; + +MODULE_FIRMWARE("nvidia/tu102/gr/fecs_bl.bin"); +MODULE_FIRMWARE("nvidia/tu102/gr/fecs_inst.bin"); +MODULE_FIRMWARE("nvidia/tu102/gr/fecs_data.bin"); +MODULE_FIRMWARE("nvidia/tu102/gr/fecs_sig.bin"); +MODULE_FIRMWARE("nvidia/tu102/gr/gpccs_bl.bin"); +MODULE_FIRMWARE("nvidia/tu102/gr/gpccs_inst.bin"); +MODULE_FIRMWARE("nvidia/tu102/gr/gpccs_data.bin"); +MODULE_FIRMWARE("nvidia/tu102/gr/gpccs_sig.bin"); +MODULE_FIRMWARE("nvidia/tu102/gr/sw_ctx.bin"); +MODULE_FIRMWARE("nvidia/tu102/gr/sw_nonctx.bin"); +MODULE_FIRMWARE("nvidia/tu102/gr/sw_bundle_init.bin"); +MODULE_FIRMWARE("nvidia/tu102/gr/sw_method_init.bin"); + +MODULE_FIRMWARE("nvidia/tu104/gr/fecs_bl.bin"); +MODULE_FIRMWARE("nvidia/tu104/gr/fecs_inst.bin"); +MODULE_FIRMWARE("nvidia/tu104/gr/fecs_data.bin"); +MODULE_FIRMWARE("nvidia/tu104/gr/fecs_sig.bin"); +MODULE_FIRMWARE("nvidia/tu104/gr/gpccs_bl.bin"); +MODULE_FIRMWARE("nvidia/tu104/gr/gpccs_inst.bin"); +MODULE_FIRMWARE("nvidia/tu104/gr/gpccs_data.bin"); +MODULE_FIRMWARE("nvidia/tu104/gr/gpccs_sig.bin"); +MODULE_FIRMWARE("nvidia/tu104/gr/sw_ctx.bin"); +MODULE_FIRMWARE("nvidia/tu104/gr/sw_nonctx.bin"); +MODULE_FIRMWARE("nvidia/tu104/gr/sw_bundle_init.bin"); +MODULE_FIRMWARE("nvidia/tu104/gr/sw_method_init.bin"); + +MODULE_FIRMWARE("nvidia/tu106/gr/fecs_bl.bin"); +MODULE_FIRMWARE("nvidia/tu106/gr/fecs_inst.bin"); +MODULE_FIRMWARE("nvidia/tu106/gr/fecs_data.bin"); +MODULE_FIRMWARE("nvidia/tu106/gr/fecs_sig.bin"); +MODULE_FIRMWARE("nvidia/tu106/gr/gpccs_bl.bin"); +MODULE_FIRMWARE("nvidia/tu106/gr/gpccs_inst.bin"); +MODULE_FIRMWARE("nvidia/tu106/gr/gpccs_data.bin"); +MODULE_FIRMWARE("nvidia/tu106/gr/gpccs_sig.bin"); +MODULE_FIRMWARE("nvidia/tu106/gr/sw_ctx.bin"); +MODULE_FIRMWARE("nvidia/tu106/gr/sw_nonctx.bin"); +MODULE_FIRMWARE("nvidia/tu106/gr/sw_bundle_init.bin"); +MODULE_FIRMWARE("nvidia/tu106/gr/sw_method_init.bin"); + +static const struct gf100_gr_fwif +tu102_gr_fwif[] = { + { 0, gm200_gr_load, &tu102_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr }, + {} +}; + +int +tu102_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +{ + return gf100_gr_new_(tu102_gr_fwif, device, index, pgr); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild index cdf631822282f180231507fbbb67933b338ffeea..9a0fd9812750dd2224de1316f7a2351322ed3566 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild @@ -1,3 +1,3 @@ # SPDX-License-Identifier: MIT nvkm-y += nvkm/engine/nvdec/base.o -nvkm-y += nvkm/engine/nvdec/gp102.o +nvkm-y += nvkm/engine/nvdec/gm107.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c index 4a63581bdd5e174a575982a48335197bc1c1561c..9b23c1b70ebfe07c9820e9976045b435b2cf33d8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c @@ -20,48 +20,42 @@ * DEALINGS IN THE SOFTWARE. */ #include "priv.h" - -#include -#include - -static int -nvkm_nvdec_oneinit(struct nvkm_engine *engine) -{ - struct nvkm_nvdec *nvdec = nvkm_nvdec(engine); - struct nvkm_subdev *subdev = &nvdec->engine.subdev; - - nvdec->addr = nvkm_top_addr(subdev->device, subdev->index); - if (!nvdec->addr) - return -EINVAL; - - /*XXX: fix naming of this when adding support for multiple-NVDEC */ - return nvkm_falcon_v1_new(subdev, "NVDEC", nvdec->addr, - &nvdec->falcon); -} +#include static void * nvkm_nvdec_dtor(struct nvkm_engine *engine) { struct nvkm_nvdec *nvdec = nvkm_nvdec(engine); - nvkm_falcon_del(&nvdec->falcon); + nvkm_falcon_dtor(&nvdec->falcon); return nvdec; } static const struct nvkm_engine_func nvkm_nvdec = { .dtor = nvkm_nvdec_dtor, - .oneinit = nvkm_nvdec_oneinit, }; int -nvkm_nvdec_new_(struct nvkm_device *device, int index, - struct nvkm_nvdec **pnvdec) +nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, struct nvkm_device *device, + int index, struct nvkm_nvdec **pnvdec) { struct nvkm_nvdec *nvdec; + int ret; if (!(nvdec = *pnvdec = kzalloc(sizeof(*nvdec), GFP_KERNEL))) return -ENOMEM; - return nvkm_engine_ctor(&nvkm_nvdec, device, index, true, - &nvdec->engine); + ret = nvkm_engine_ctor(&nvkm_nvdec, device, index, true, + &nvdec->engine); + if (ret) + return ret; + + fwif = nvkm_firmware_load(&nvdec->engine.subdev, fwif, "Nvdec", nvdec); + if (IS_ERR(fwif)) + return -ENODEV; + + nvdec->func = fwif->func; + + return nvkm_falcon_ctor(nvdec->func->flcn, &nvdec->engine.subdev, + nvkm_subdev_name[index], 0, &nvdec->falcon); }; diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c similarity index 53% rename from drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h rename to drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c index bf3e532665fbf93b91d87611c1dc0a623d73c9bf..0ab27ab4d8ee6d94e3541bc9cfec475d1a5c792c 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c @@ -19,25 +19,45 @@ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ +#include "priv.h" -#ifndef __NVKM_CORE_MSGQUEUE_H -#define __NVKM_CORE_MSGQUEUE_H -#include -struct nvkm_msgqueue; +static const struct nvkm_falcon_func +gm107_nvdec_flcn = { + .debug = 0xd00, + .fbif = 0x600, + .load_imem = nvkm_falcon_v1_load_imem, + .load_dmem = nvkm_falcon_v1_load_dmem, + .read_dmem = nvkm_falcon_v1_read_dmem, + .bind_context = nvkm_falcon_v1_bind_context, + .wait_for_halt = nvkm_falcon_v1_wait_for_halt, + .clear_interrupt = nvkm_falcon_v1_clear_interrupt, + .set_start_addr = nvkm_falcon_v1_set_start_addr, + .start = nvkm_falcon_v1_start, + .enable = nvkm_falcon_v1_enable, + .disable = nvkm_falcon_v1_disable, +}; -/* Hopefully we will never have firmware arguments larger than that... */ -#define NVKM_MSGQUEUE_CMDLINE_SIZE 0x100 +static const struct nvkm_nvdec_func +gm107_nvdec = { + .flcn = &gm107_nvdec_flcn, +}; -int nvkm_msgqueue_new(u32, struct nvkm_falcon *, const struct nvkm_secboot *, - struct nvkm_msgqueue **); -void nvkm_msgqueue_del(struct nvkm_msgqueue **); -void nvkm_msgqueue_recv(struct nvkm_msgqueue *); -int nvkm_msgqueue_reinit(struct nvkm_msgqueue *); +static int +gm107_nvdec_nofw(struct nvkm_nvdec *nvdec, int ver, + const struct nvkm_nvdec_fwif *fwif) +{ + return 0; +} -/* useful if we run a NVIDIA-signed firmware */ -void nvkm_msgqueue_write_cmdline(struct nvkm_msgqueue *, void *); +static const struct nvkm_nvdec_fwif +gm107_nvdec_fwif[] = { + { -1, gm107_nvdec_nofw, &gm107_nvdec }, + {} +}; -/* interface to ACR unit running on falcon (NVIDIA signed firmware) */ -int nvkm_msgqueue_acr_boot_falcons(struct nvkm_msgqueue *, unsigned long); - -#endif +int +gm107_nvdec_new(struct nvkm_device *device, int index, + struct nvkm_nvdec **pnvdec) +{ + return nvkm_nvdec_new_(gm107_nvdec_fwif, device, index, pnvdec); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h index 57bfa3aa1835ef84fb8dc1f05a18064f8cad6ab0..e14da8b000d02f815b6356756b982b1a06d4e4fd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h @@ -3,5 +3,17 @@ #define __NVKM_NVDEC_PRIV_H__ #include -int nvkm_nvdec_new_(struct nvkm_device *, int, struct nvkm_nvdec **); +struct nvkm_nvdec_func { + const struct nvkm_falcon_func *flcn; +}; + +struct nvkm_nvdec_fwif { + int version; + int (*load)(struct nvkm_nvdec *, int ver, + const struct nvkm_nvdec_fwif *); + const struct nvkm_nvdec_func *func; +}; + +int nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, + struct nvkm_device *, int, struct nvkm_nvdec **); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild index f316de8d45a88109dba3941222b640af21ac1f16..75bf4436bf3fe1fc039c43e0064dc82262ac2bc0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild @@ -1,2 +1,3 @@ # SPDX-License-Identifier: MIT -#nvkm-y += nvkm/engine/nvenc/base.o +nvkm-y += nvkm/engine/nvenc/base.o +nvkm-y += nvkm/engine/nvenc/gm107.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c new file mode 100644 index 0000000000000000000000000000000000000000..484100e156680fcf0c05413d370e3077d810ba26 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c @@ -0,0 +1,63 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include "priv.h" +#include + +static void * +nvkm_nvenc_dtor(struct nvkm_engine *engine) +{ + struct nvkm_nvenc *nvenc = nvkm_nvenc(engine); + nvkm_falcon_dtor(&nvenc->falcon); + return nvenc; +} + +static const struct nvkm_engine_func +nvkm_nvenc = { + .dtor = nvkm_nvenc_dtor, +}; + +int +nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *fwif, struct nvkm_device *device, + int index, struct nvkm_nvenc **pnvenc) +{ + struct nvkm_nvenc *nvenc; + int ret; + + if (!(nvenc = *pnvenc = kzalloc(sizeof(*nvenc), GFP_KERNEL))) + return -ENOMEM; + + ret = nvkm_engine_ctor(&nvkm_nvenc, device, index, true, + &nvenc->engine); + if (ret) + return ret; + + fwif = nvkm_firmware_load(&nvenc->engine.subdev, fwif, "Nvenc", nvenc); + if (IS_ERR(fwif)) + return -ENODEV; + + nvenc->func = fwif->func; + + return nvkm_falcon_ctor(nvenc->func->flcn, &nvenc->engine.subdev, + nvkm_subdev_name[index], 0, &nvenc->falcon); +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c new file mode 100644 index 0000000000000000000000000000000000000000..d249c8ffb2d5c3b9374b1b1eb98a2ef32f0ad795 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c @@ -0,0 +1,63 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "priv.h" + +static const struct nvkm_falcon_func +gm107_nvenc_flcn = { + .fbif = 0x800, + .load_imem = nvkm_falcon_v1_load_imem, + .load_dmem = nvkm_falcon_v1_load_dmem, + .read_dmem = nvkm_falcon_v1_read_dmem, + .bind_context = nvkm_falcon_v1_bind_context, + .wait_for_halt = nvkm_falcon_v1_wait_for_halt, + .clear_interrupt = nvkm_falcon_v1_clear_interrupt, + .set_start_addr = nvkm_falcon_v1_set_start_addr, + .start = nvkm_falcon_v1_start, + .enable = nvkm_falcon_v1_enable, + .disable = nvkm_falcon_v1_disable, +}; + +static const struct nvkm_nvenc_func +gm107_nvenc = { + .flcn = &gm107_nvenc_flcn, +}; + +static int +gm107_nvenc_nofw(struct nvkm_nvenc *nvenc, int ver, + const struct nvkm_nvenc_fwif *fwif) +{ + return 0; +} + +static const struct nvkm_nvenc_fwif +gm107_nvenc_fwif[] = { + { -1, gm107_nvenc_nofw, &gm107_nvenc }, + {} +}; + +int +gm107_nvenc_new(struct nvkm_device *device, int index, + struct nvkm_nvenc **pnvenc) +{ + return nvkm_nvenc_new_(gm107_nvenc_fwif, device, index, pnvenc); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h new file mode 100644 index 0000000000000000000000000000000000000000..100fa5ebbeefad4e93c6d70f899d4bda92dc3521 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __NVKM_NVENC_PRIV_H__ +#define __NVKM_NVENC_PRIV_H__ +#include + +struct nvkm_nvenc_func { + const struct nvkm_falcon_func *flcn; +}; + +struct nvkm_nvenc_fwif { + int version; + int (*load)(struct nvkm_nvenc *, int ver, + const struct nvkm_nvenc_fwif *); + const struct nvkm_nvenc_func *func; +}; + +int nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *, struct nvkm_device *, + int, struct nvkm_nvenc **pnvenc); +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild index 97c4696171f0ff20cc0988c6c8b87bff07bfaac7..63cd2be3de08fa810bb9affa51876996b0ac0c7e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild @@ -1,4 +1,5 @@ # SPDX-License-Identifier: MIT nvkm-y += nvkm/engine/sec2/base.o nvkm-y += nvkm/engine/sec2/gp102.o +nvkm-y += nvkm/engine/sec2/gp108.o nvkm-y += nvkm/engine/sec2/tu102.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c index 1b49e5b6717fd0145dbbdc2cd05ae2e65fc5743c..41318aa0d481db2ab31a7e481dd1ddeede400824 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c @@ -21,97 +21,98 @@ */ #include "priv.h" -#include +#include #include -#include - -static void * -nvkm_sec2_dtor(struct nvkm_engine *engine) -{ - struct nvkm_sec2 *sec2 = nvkm_sec2(engine); - nvkm_msgqueue_del(&sec2->queue); - nvkm_falcon_del(&sec2->falcon); - return sec2; -} static void -nvkm_sec2_intr(struct nvkm_engine *engine) +nvkm_sec2_recv(struct work_struct *work) { - struct nvkm_sec2 *sec2 = nvkm_sec2(engine); - struct nvkm_subdev *subdev = &engine->subdev; - struct nvkm_device *device = subdev->device; - u32 disp = nvkm_rd32(device, sec2->addr + 0x01c); - u32 intr = nvkm_rd32(device, sec2->addr + 0x008) & disp & ~(disp >> 16); - - if (intr & 0x00000040) { - schedule_work(&sec2->work); - nvkm_wr32(device, sec2->addr + 0x004, 0x00000040); - intr &= ~0x00000040; - } + struct nvkm_sec2 *sec2 = container_of(work, typeof(*sec2), work); - if (intr) { - nvkm_error(subdev, "unhandled intr %08x\n", intr); - nvkm_wr32(device, sec2->addr + 0x004, intr); + if (!sec2->initmsg_received) { + int ret = sec2->func->initmsg(sec2); + if (ret) { + nvkm_error(&sec2->engine.subdev, + "error parsing init message: %d\n", ret); + return; + } + sec2->initmsg_received = true; } + + nvkm_falcon_msgq_recv(sec2->msgq); } static void -nvkm_sec2_recv(struct work_struct *work) +nvkm_sec2_intr(struct nvkm_engine *engine) { - struct nvkm_sec2 *sec2 = container_of(work, typeof(*sec2), work); - - if (!sec2->queue) { - nvkm_warn(&sec2->engine.subdev, - "recv function called while no firmware set!\n"); - return; - } - - nvkm_msgqueue_recv(sec2->queue); + struct nvkm_sec2 *sec2 = nvkm_sec2(engine); + sec2->func->intr(sec2); } - static int -nvkm_sec2_oneinit(struct nvkm_engine *engine) +nvkm_sec2_fini(struct nvkm_engine *engine, bool suspend) { struct nvkm_sec2 *sec2 = nvkm_sec2(engine); - struct nvkm_subdev *subdev = &sec2->engine.subdev; - if (!sec2->addr) { - sec2->addr = nvkm_top_addr(subdev->device, subdev->index); - if (WARN_ON(!sec2->addr)) - return -EINVAL; + flush_work(&sec2->work); + + if (suspend) { + nvkm_falcon_cmdq_fini(sec2->cmdq); + sec2->initmsg_received = false; } - return nvkm_falcon_v1_new(subdev, "SEC2", sec2->addr, &sec2->falcon); + return 0; } -static int -nvkm_sec2_fini(struct nvkm_engine *engine, bool suspend) +static void * +nvkm_sec2_dtor(struct nvkm_engine *engine) { struct nvkm_sec2 *sec2 = nvkm_sec2(engine); - flush_work(&sec2->work); - return 0; + nvkm_falcon_msgq_del(&sec2->msgq); + nvkm_falcon_cmdq_del(&sec2->cmdq); + nvkm_falcon_qmgr_del(&sec2->qmgr); + nvkm_falcon_dtor(&sec2->falcon); + return sec2; } static const struct nvkm_engine_func nvkm_sec2 = { .dtor = nvkm_sec2_dtor, - .oneinit = nvkm_sec2_oneinit, .fini = nvkm_sec2_fini, .intr = nvkm_sec2_intr, }; int -nvkm_sec2_new_(struct nvkm_device *device, int index, u32 addr, - struct nvkm_sec2 **psec2) +nvkm_sec2_new_(const struct nvkm_sec2_fwif *fwif, struct nvkm_device *device, + int index, u32 addr, struct nvkm_sec2 **psec2) { struct nvkm_sec2 *sec2; + int ret; if (!(sec2 = *psec2 = kzalloc(sizeof(*sec2), GFP_KERNEL))) return -ENOMEM; - sec2->addr = addr; - INIT_WORK(&sec2->work, nvkm_sec2_recv); - return nvkm_engine_ctor(&nvkm_sec2, device, index, true, &sec2->engine); + ret = nvkm_engine_ctor(&nvkm_sec2, device, index, true, &sec2->engine); + if (ret) + return ret; + + fwif = nvkm_firmware_load(&sec2->engine.subdev, fwif, "Sec2", sec2); + if (IS_ERR(fwif)) + return PTR_ERR(fwif); + + sec2->func = fwif->func; + + ret = nvkm_falcon_ctor(sec2->func->flcn, &sec2->engine.subdev, + nvkm_subdev_name[index], addr, &sec2->falcon); + if (ret) + return ret; + + if ((ret = nvkm_falcon_qmgr_new(&sec2->falcon, &sec2->qmgr)) || + (ret = nvkm_falcon_cmdq_new(sec2->qmgr, "cmdq", &sec2->cmdq)) || + (ret = nvkm_falcon_msgq_new(sec2->qmgr, "msgq", &sec2->msgq))) + return ret; + + INIT_WORK(&sec2->work, nvkm_sec2_recv); + return 0; }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c index 858cf27fa0103e3951cb5672c69e74e4e37fa14e..368f2a0042ffdcded99738696e66bb188c800945 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c @@ -19,12 +19,316 @@ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ - #include "priv.h" +#include +#include +#include + +#include +#include + +static int +gp102_sec2_acr_bootstrap_falcon_callback(void *priv, struct nv_falcon_msg *hdr) +{ + struct nv_sec2_acr_bootstrap_falcon_msg *msg = + container_of(hdr, typeof(*msg), msg.hdr); + struct nvkm_subdev *subdev = priv; + const char *name = nvkm_acr_lsf_id(msg->falcon_id); + + if (msg->error_code) { + nvkm_error(subdev, "ACR_BOOTSTRAP_FALCON failed for " + "falcon %d [%s]: %08x\n", + msg->falcon_id, name, msg->error_code); + return -EINVAL; + } + + nvkm_debug(subdev, "%s booted\n", name); + return 0; +} + +static int +gp102_sec2_acr_bootstrap_falcon(struct nvkm_falcon *falcon, + enum nvkm_acr_lsf_id id) +{ + struct nvkm_sec2 *sec2 = container_of(falcon, typeof(*sec2), falcon); + struct nv_sec2_acr_bootstrap_falcon_cmd cmd = { + .cmd.hdr.unit_id = sec2->func->unit_acr, + .cmd.hdr.size = sizeof(cmd), + .cmd.cmd_type = NV_SEC2_ACR_CMD_BOOTSTRAP_FALCON, + .flags = NV_SEC2_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES, + .falcon_id = id, + }; + + return nvkm_falcon_cmdq_send(sec2->cmdq, &cmd.cmd.hdr, + gp102_sec2_acr_bootstrap_falcon_callback, + &sec2->engine.subdev, + msecs_to_jiffies(1000)); +} + +static int +gp102_sec2_acr_boot(struct nvkm_falcon *falcon) +{ + struct nv_sec2_args args = {}; + nvkm_falcon_load_dmem(falcon, &args, + falcon->func->emem_addr, sizeof(args), 0); + nvkm_falcon_start(falcon); + return 0; +} + +static void +gp102_sec2_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust) +{ + struct loader_config_v1 hdr; + nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr)); + hdr.code_dma_base = hdr.code_dma_base + adjust; + hdr.data_dma_base = hdr.data_dma_base + adjust; + hdr.overlay_dma_base = hdr.overlay_dma_base + adjust; + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); + loader_config_v1_dump(&acr->subdev, &hdr); +} + +static void +gp102_sec2_acr_bld_write(struct nvkm_acr *acr, u32 bld, + struct nvkm_acr_lsfw *lsfw) +{ + const struct loader_config_v1 hdr = { + .dma_idx = FALCON_SEC2_DMAIDX_UCODE, + .code_dma_base = lsfw->offset.img + lsfw->app_start_offset, + .code_size_total = lsfw->app_size, + .code_size_to_load = lsfw->app_resident_code_size, + .code_entry_point = lsfw->app_imem_entry, + .data_dma_base = lsfw->offset.img + lsfw->app_start_offset + + lsfw->app_resident_data_offset, + .data_size = lsfw->app_resident_data_size, + .overlay_dma_base = lsfw->offset.img + lsfw->app_start_offset, + .argc = 1, + .argv = lsfw->falcon->func->emem_addr, + }; + + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); +} + +static const struct nvkm_acr_lsf_func +gp102_sec2_acr_0 = { + .bld_size = sizeof(struct loader_config_v1), + .bld_write = gp102_sec2_acr_bld_write, + .bld_patch = gp102_sec2_acr_bld_patch, + .boot = gp102_sec2_acr_boot, + .bootstrap_falcon = gp102_sec2_acr_bootstrap_falcon, +}; + +int +gp102_sec2_initmsg(struct nvkm_sec2 *sec2) +{ + struct nv_sec2_init_msg msg; + int ret, i; + + ret = nvkm_falcon_msgq_recv_initmsg(sec2->msgq, &msg, sizeof(msg)); + if (ret) + return ret; + + if (msg.hdr.unit_id != NV_SEC2_UNIT_INIT || + msg.msg_type != NV_SEC2_INIT_MSG_INIT) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(msg.queue_info); i++) { + if (msg.queue_info[i].id == NV_SEC2_INIT_MSG_QUEUE_ID_MSGQ) { + nvkm_falcon_msgq_init(sec2->msgq, + msg.queue_info[i].index, + msg.queue_info[i].offset, + msg.queue_info[i].size); + } else { + nvkm_falcon_cmdq_init(sec2->cmdq, + msg.queue_info[i].index, + msg.queue_info[i].offset, + msg.queue_info[i].size); + } + } + + return 0; +} + +void +gp102_sec2_intr(struct nvkm_sec2 *sec2) +{ + struct nvkm_subdev *subdev = &sec2->engine.subdev; + struct nvkm_falcon *falcon = &sec2->falcon; + u32 disp = nvkm_falcon_rd32(falcon, 0x01c); + u32 intr = nvkm_falcon_rd32(falcon, 0x008) & disp & ~(disp >> 16); + + if (intr & 0x00000040) { + schedule_work(&sec2->work); + nvkm_falcon_wr32(falcon, 0x004, 0x00000040); + intr &= ~0x00000040; + } + + if (intr) { + nvkm_error(subdev, "unhandled intr %08x\n", intr); + nvkm_falcon_wr32(falcon, 0x004, intr); + } +} + +int +gp102_sec2_flcn_enable(struct nvkm_falcon *falcon) +{ + nvkm_falcon_mask(falcon, 0x3c0, 0x00000001, 0x00000001); + udelay(10); + nvkm_falcon_mask(falcon, 0x3c0, 0x00000001, 0x00000000); + return nvkm_falcon_v1_enable(falcon); +} + +void +gp102_sec2_flcn_bind_context(struct nvkm_falcon *falcon, + struct nvkm_memory *ctx) +{ + struct nvkm_device *device = falcon->owner->device; + + nvkm_falcon_v1_bind_context(falcon, ctx); + if (!ctx) + return; + + /* Not sure if this is a WAR for a HW issue, or some additional + * programming sequence that's needed to properly complete the + * context switch we trigger above. + * + * Fixes unreliability of booting the SEC2 RTOS on Quadro P620, + * particularly when resuming from suspend. + * + * Also removes the need for an odd workaround where we needed + * to program SEC2's FALCON_CPUCTL_ALIAS_STARTCPU twice before + * the SEC2 RTOS would begin executing. + */ + nvkm_msec(device, 10, + u32 irqstat = nvkm_falcon_rd32(falcon, 0x008); + u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc); + if ((irqstat & 0x00000008) && + (flcn0dc & 0x00007000) == 0x00005000) + break; + ); + + nvkm_falcon_mask(falcon, 0x004, 0x00000008, 0x00000008); + nvkm_falcon_mask(falcon, 0x058, 0x00000002, 0x00000002); + + nvkm_msec(device, 10, + u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc); + if ((flcn0dc & 0x00007000) == 0x00000000) + break; + ); +} + +static const struct nvkm_falcon_func +gp102_sec2_flcn = { + .debug = 0x408, + .fbif = 0x600, + .load_imem = nvkm_falcon_v1_load_imem, + .load_dmem = nvkm_falcon_v1_load_dmem, + .read_dmem = nvkm_falcon_v1_read_dmem, + .emem_addr = 0x01000000, + .bind_context = gp102_sec2_flcn_bind_context, + .wait_for_halt = nvkm_falcon_v1_wait_for_halt, + .clear_interrupt = nvkm_falcon_v1_clear_interrupt, + .set_start_addr = nvkm_falcon_v1_set_start_addr, + .start = nvkm_falcon_v1_start, + .enable = gp102_sec2_flcn_enable, + .disable = nvkm_falcon_v1_disable, + .cmdq = { 0xa00, 0xa04, 8 }, + .msgq = { 0xa30, 0xa34, 8 }, +}; + +const struct nvkm_sec2_func +gp102_sec2 = { + .flcn = &gp102_sec2_flcn, + .unit_acr = NV_SEC2_UNIT_ACR, + .intr = gp102_sec2_intr, + .initmsg = gp102_sec2_initmsg, +}; + +MODULE_FIRMWARE("nvidia/gp102/sec2/desc.bin"); +MODULE_FIRMWARE("nvidia/gp102/sec2/image.bin"); +MODULE_FIRMWARE("nvidia/gp102/sec2/sig.bin"); +MODULE_FIRMWARE("nvidia/gp104/sec2/desc.bin"); +MODULE_FIRMWARE("nvidia/gp104/sec2/image.bin"); +MODULE_FIRMWARE("nvidia/gp104/sec2/sig.bin"); +MODULE_FIRMWARE("nvidia/gp106/sec2/desc.bin"); +MODULE_FIRMWARE("nvidia/gp106/sec2/image.bin"); +MODULE_FIRMWARE("nvidia/gp106/sec2/sig.bin"); +MODULE_FIRMWARE("nvidia/gp107/sec2/desc.bin"); +MODULE_FIRMWARE("nvidia/gp107/sec2/image.bin"); +MODULE_FIRMWARE("nvidia/gp107/sec2/sig.bin"); + +static void +gp102_sec2_acr_bld_patch_1(struct nvkm_acr *acr, u32 bld, s64 adjust) +{ + struct flcn_bl_dmem_desc_v2 hdr; + nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr)); + hdr.code_dma_base = hdr.code_dma_base + adjust; + hdr.data_dma_base = hdr.data_dma_base + adjust; + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); + flcn_bl_dmem_desc_v2_dump(&acr->subdev, &hdr); +} + +static void +gp102_sec2_acr_bld_write_1(struct nvkm_acr *acr, u32 bld, + struct nvkm_acr_lsfw *lsfw) +{ + const struct flcn_bl_dmem_desc_v2 hdr = { + .ctx_dma = FALCON_SEC2_DMAIDX_UCODE, + .code_dma_base = lsfw->offset.img + lsfw->app_start_offset, + .non_sec_code_off = lsfw->app_resident_code_offset, + .non_sec_code_size = lsfw->app_resident_code_size, + .code_entry_point = lsfw->app_imem_entry, + .data_dma_base = lsfw->offset.img + lsfw->app_start_offset + + lsfw->app_resident_data_offset, + .data_size = lsfw->app_resident_data_size, + .argc = 1, + .argv = lsfw->falcon->func->emem_addr, + }; + + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); +} + +const struct nvkm_acr_lsf_func +gp102_sec2_acr_1 = { + .bld_size = sizeof(struct flcn_bl_dmem_desc_v2), + .bld_write = gp102_sec2_acr_bld_write_1, + .bld_patch = gp102_sec2_acr_bld_patch_1, + .boot = gp102_sec2_acr_boot, + .bootstrap_falcon = gp102_sec2_acr_bootstrap_falcon, +}; + +int +gp102_sec2_load(struct nvkm_sec2 *sec2, int ver, + const struct nvkm_sec2_fwif *fwif) +{ + return nvkm_acr_lsfw_load_sig_image_desc_v1(&sec2->engine.subdev, + &sec2->falcon, + NVKM_ACR_LSF_SEC2, "sec2/", + ver, fwif->acr); +} + +MODULE_FIRMWARE("nvidia/gp102/sec2/desc-1.bin"); +MODULE_FIRMWARE("nvidia/gp102/sec2/image-1.bin"); +MODULE_FIRMWARE("nvidia/gp102/sec2/sig-1.bin"); +MODULE_FIRMWARE("nvidia/gp104/sec2/desc-1.bin"); +MODULE_FIRMWARE("nvidia/gp104/sec2/image-1.bin"); +MODULE_FIRMWARE("nvidia/gp104/sec2/sig-1.bin"); +MODULE_FIRMWARE("nvidia/gp106/sec2/desc-1.bin"); +MODULE_FIRMWARE("nvidia/gp106/sec2/image-1.bin"); +MODULE_FIRMWARE("nvidia/gp106/sec2/sig-1.bin"); +MODULE_FIRMWARE("nvidia/gp107/sec2/desc-1.bin"); +MODULE_FIRMWARE("nvidia/gp107/sec2/image-1.bin"); +MODULE_FIRMWARE("nvidia/gp107/sec2/sig-1.bin"); + +static const struct nvkm_sec2_fwif +gp102_sec2_fwif[] = { + { 1, gp102_sec2_load, &gp102_sec2, &gp102_sec2_acr_1 }, + { 0, gp102_sec2_load, &gp102_sec2, &gp102_sec2_acr_0 }, + {} +}; + int -gp102_sec2_new(struct nvkm_device *device, int index, - struct nvkm_sec2 **psec2) +gp102_sec2_new(struct nvkm_device *device, int index, struct nvkm_sec2 **psec2) { - return nvkm_sec2_new_(device, index, 0, psec2); + return nvkm_sec2_new_(gp102_sec2_fwif, device, index, 0, psec2); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c similarity index 50% rename from drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h rename to drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c index 8bdfb3e5cd1cf888f4d14cc9597755b4611c35cc..232a9d7c51e5d20d02277441cee77c4ecfb328a1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * Copyright 2019 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -14,23 +14,26 @@ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. */ +#include "priv.h" +#include -#ifndef __NVKM_SECBOOT_ACR_R367_H__ -#define __NVKM_SECBOOT_ACR_R367_H__ +MODULE_FIRMWARE("nvidia/gp108/sec2/desc.bin"); +MODULE_FIRMWARE("nvidia/gp108/sec2/image.bin"); +MODULE_FIRMWARE("nvidia/gp108/sec2/sig.bin"); -#include "acr_r352.h" +static const struct nvkm_sec2_fwif +gp108_sec2_fwif[] = { + { 0, gp102_sec2_load, &gp102_sec2, &gp102_sec2_acr_1 }, + {} +}; -void acr_r367_fixup_hs_desc(struct acr_r352 *, struct nvkm_secboot *, void *); - -struct ls_ucode_img *acr_r367_ls_ucode_img_load(const struct acr_r352 *, - const struct nvkm_secboot *, - enum nvkm_secboot_falcon); -int acr_r367_ls_fill_headers(struct acr_r352 *, struct list_head *); -int acr_r367_ls_write_wpr(struct acr_r352 *, struct list_head *, - struct nvkm_gpuobj *, u64); -#endif +int +gp108_sec2_new(struct nvkm_device *device, int index, struct nvkm_sec2 **psec2) +{ + return nvkm_sec2_new_(gp108_sec2_fwif, device, index, 0, psec2); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h index b331b00517e68629cd672022dd489430222f024f..bb88117e018a9b23af4b1f760fce703511c98ce2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h @@ -3,7 +3,27 @@ #define __NVKM_SEC2_PRIV_H__ #include -#define nvkm_sec2(p) container_of((p), struct nvkm_sec2, engine) +struct nvkm_sec2_func { + const struct nvkm_falcon_func *flcn; + u8 unit_acr; + void (*intr)(struct nvkm_sec2 *); + int (*initmsg)(struct nvkm_sec2 *); +}; -int nvkm_sec2_new_(struct nvkm_device *, int, u32 addr, struct nvkm_sec2 **); +void gp102_sec2_intr(struct nvkm_sec2 *); +int gp102_sec2_initmsg(struct nvkm_sec2 *); + +struct nvkm_sec2_fwif { + int version; + int (*load)(struct nvkm_sec2 *, int ver, const struct nvkm_sec2_fwif *); + const struct nvkm_sec2_func *func; + const struct nvkm_acr_lsf_func *acr; +}; + +int gp102_sec2_load(struct nvkm_sec2 *, int, const struct nvkm_sec2_fwif *); +extern const struct nvkm_sec2_func gp102_sec2; +extern const struct nvkm_acr_lsf_func gp102_sec2_acr_1; + +int nvkm_sec2_new_(const struct nvkm_sec2_fwif *, struct nvkm_device *, + int, u32 addr, struct nvkm_sec2 **); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c index d655576164b11e65cdec978a5d3d1034bcf5c0c4..b6ebd95c9ba1ee907a526a472d0ce3700b0c8535 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c @@ -19,15 +19,54 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ - #include "priv.h" +#include + +static const struct nvkm_falcon_func +tu102_sec2_flcn = { + .debug = 0x408, + .fbif = 0x600, + .load_imem = nvkm_falcon_v1_load_imem, + .load_dmem = nvkm_falcon_v1_load_dmem, + .read_dmem = nvkm_falcon_v1_read_dmem, + .emem_addr = 0x01000000, + .bind_context = gp102_sec2_flcn_bind_context, + .wait_for_halt = nvkm_falcon_v1_wait_for_halt, + .clear_interrupt = nvkm_falcon_v1_clear_interrupt, + .set_start_addr = nvkm_falcon_v1_set_start_addr, + .start = nvkm_falcon_v1_start, + .enable = nvkm_falcon_v1_enable, + .disable = nvkm_falcon_v1_disable, + .cmdq = { 0xc00, 0xc04, 8 }, + .msgq = { 0xc80, 0xc84, 8 }, +}; + +static const struct nvkm_sec2_func +tu102_sec2 = { + .flcn = &tu102_sec2_flcn, + .unit_acr = 0x07, + .intr = gp102_sec2_intr, + .initmsg = gp102_sec2_initmsg, +}; + +static int +tu102_sec2_nofw(struct nvkm_sec2 *sec2, int ver, + const struct nvkm_sec2_fwif *fwif) +{ + return 0; +} + +static const struct nvkm_sec2_fwif +tu102_sec2_fwif[] = { + { 0, gp102_sec2_load, &tu102_sec2, &gp102_sec2_acr_1 }, + { -1, tu102_sec2_nofw, &tu102_sec2 } +}; int -tu102_sec2_new(struct nvkm_device *device, int index, - struct nvkm_sec2 **psec2) +tu102_sec2_new(struct nvkm_device *device, int index, struct nvkm_sec2 **psec2) { /* TOP info wasn't updated on Turing to reflect the PRI * address change for some reason. We override it here. */ - return nvkm_sec2_new_(device, index, 0x840000, psec2); + return nvkm_sec2_new_(tu102_sec2_fwif, device, index, 0x840000, psec2); } diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild b/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild index b5665ada850a76168bbb1c4b68c2a8c64951af1e..d79d783904eecabb9f587f34c88129d0b9c426ee 100644 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild @@ -1,6 +1,6 @@ # SPDX-License-Identifier: MIT nvkm-y += nvkm/falcon/base.o +nvkm-y += nvkm/falcon/cmdq.o +nvkm-y += nvkm/falcon/msgq.o +nvkm-y += nvkm/falcon/qmgr.o nvkm-y += nvkm/falcon/v1.o -nvkm-y += nvkm/falcon/msgqueue.o -nvkm-y += nvkm/falcon/msgqueue_0137c63d.o -nvkm-y += nvkm/falcon/msgqueue_0148cdec.o diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c index 366c87de6e721e15ce25242e57377699ac03158e..c6a3448180d6fdb9102026bbbd17bffad1ca1775 100644 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c @@ -22,6 +22,7 @@ #include "priv.h" #include +#include void nvkm_falcon_load_imem(struct nvkm_falcon *falcon, void *data, u32 start, @@ -134,6 +135,37 @@ nvkm_falcon_clear_interrupt(struct nvkm_falcon *falcon, u32 mask) return falcon->func->clear_interrupt(falcon, mask); } +static int +nvkm_falcon_oneinit(struct nvkm_falcon *falcon) +{ + const struct nvkm_falcon_func *func = falcon->func; + const struct nvkm_subdev *subdev = falcon->owner; + u32 reg; + + if (!falcon->addr) { + falcon->addr = nvkm_top_addr(subdev->device, subdev->index); + if (WARN_ON(!falcon->addr)) + return -ENODEV; + } + + reg = nvkm_falcon_rd32(falcon, 0x12c); + falcon->version = reg & 0xf; + falcon->secret = (reg >> 4) & 0x3; + falcon->code.ports = (reg >> 8) & 0xf; + falcon->data.ports = (reg >> 12) & 0xf; + + reg = nvkm_falcon_rd32(falcon, 0x108); + falcon->code.limit = (reg & 0x1ff) << 8; + falcon->data.limit = (reg & 0x3fe00) >> 1; + + if (func->debug) { + u32 val = nvkm_falcon_rd32(falcon, func->debug); + falcon->debug = (val >> 20) & 0x1; + } + + return 0; +} + void nvkm_falcon_put(struct nvkm_falcon *falcon, const struct nvkm_subdev *user) { @@ -151,6 +183,8 @@ nvkm_falcon_put(struct nvkm_falcon *falcon, const struct nvkm_subdev *user) int nvkm_falcon_get(struct nvkm_falcon *falcon, const struct nvkm_subdev *user) { + int ret = 0; + mutex_lock(&falcon->mutex); if (falcon->user) { nvkm_error(user, "%s falcon already acquired by %s!\n", @@ -160,70 +194,37 @@ nvkm_falcon_get(struct nvkm_falcon *falcon, const struct nvkm_subdev *user) } nvkm_debug(user, "acquired %s falcon\n", falcon->name); + if (!falcon->oneinit) + ret = nvkm_falcon_oneinit(falcon); falcon->user = user; mutex_unlock(&falcon->mutex); - return 0; + return ret; } void +nvkm_falcon_dtor(struct nvkm_falcon *falcon) +{ +} + +int nvkm_falcon_ctor(const struct nvkm_falcon_func *func, struct nvkm_subdev *subdev, const char *name, u32 addr, struct nvkm_falcon *falcon) { - u32 debug_reg; - u32 reg; - falcon->func = func; falcon->owner = subdev; falcon->name = name; falcon->addr = addr; mutex_init(&falcon->mutex); mutex_init(&falcon->dmem_mutex); - - reg = nvkm_falcon_rd32(falcon, 0x12c); - falcon->version = reg & 0xf; - falcon->secret = (reg >> 4) & 0x3; - falcon->code.ports = (reg >> 8) & 0xf; - falcon->data.ports = (reg >> 12) & 0xf; - - reg = nvkm_falcon_rd32(falcon, 0x108); - falcon->code.limit = (reg & 0x1ff) << 8; - falcon->data.limit = (reg & 0x3fe00) >> 1; - - switch (subdev->index) { - case NVKM_ENGINE_GR: - debug_reg = 0x0; - break; - case NVKM_SUBDEV_PMU: - debug_reg = 0xc08; - break; - case NVKM_ENGINE_NVDEC0: - debug_reg = 0xd00; - break; - case NVKM_ENGINE_SEC2: - debug_reg = 0x408; - falcon->has_emem = true; - break; - case NVKM_SUBDEV_GSP: - debug_reg = 0x0; /*XXX*/ - break; - default: - nvkm_warn(subdev, "unsupported falcon %s!\n", - nvkm_subdev_name[subdev->index]); - debug_reg = 0; - break; - } - - if (debug_reg) { - u32 val = nvkm_falcon_rd32(falcon, debug_reg); - falcon->debug = (val >> 20) & 0x1; - } + return 0; } void nvkm_falcon_del(struct nvkm_falcon **pfalcon) { if (*pfalcon) { + nvkm_falcon_dtor(*pfalcon); kfree(*pfalcon); *pfalcon = NULL; } diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c b/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c new file mode 100644 index 0000000000000000000000000000000000000000..40e3f3fc83ef68413d0e001b9e5c3d48c3325920 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c @@ -0,0 +1,214 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "qmgr.h" + +static bool +nvkm_falcon_cmdq_has_room(struct nvkm_falcon_cmdq *cmdq, u32 size, bool *rewind) +{ + u32 head = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->head_reg); + u32 tail = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->tail_reg); + u32 free; + + size = ALIGN(size, QUEUE_ALIGNMENT); + + if (head >= tail) { + free = cmdq->offset + cmdq->size - head; + free -= HDR_SIZE; + + if (size > free) { + *rewind = true; + head = cmdq->offset; + } + } + + if (head < tail) + free = tail - head - 1; + + return size <= free; +} + +static void +nvkm_falcon_cmdq_push(struct nvkm_falcon_cmdq *cmdq, void *data, u32 size) +{ + struct nvkm_falcon *falcon = cmdq->qmgr->falcon; + nvkm_falcon_load_dmem(falcon, data, cmdq->position, size, 0); + cmdq->position += ALIGN(size, QUEUE_ALIGNMENT); +} + +static void +nvkm_falcon_cmdq_rewind(struct nvkm_falcon_cmdq *cmdq) +{ + struct nv_falcon_cmd cmd; + + cmd.unit_id = NV_FALCON_CMD_UNIT_ID_REWIND; + cmd.size = sizeof(cmd); + nvkm_falcon_cmdq_push(cmdq, &cmd, cmd.size); + + cmdq->position = cmdq->offset; +} + +static int +nvkm_falcon_cmdq_open(struct nvkm_falcon_cmdq *cmdq, u32 size) +{ + struct nvkm_falcon *falcon = cmdq->qmgr->falcon; + bool rewind = false; + + mutex_lock(&cmdq->mutex); + + if (!nvkm_falcon_cmdq_has_room(cmdq, size, &rewind)) { + FLCNQ_DBG(cmdq, "queue full"); + mutex_unlock(&cmdq->mutex); + return -EAGAIN; + } + + cmdq->position = nvkm_falcon_rd32(falcon, cmdq->head_reg); + + if (rewind) + nvkm_falcon_cmdq_rewind(cmdq); + + return 0; +} + +static void +nvkm_falcon_cmdq_close(struct nvkm_falcon_cmdq *cmdq) +{ + nvkm_falcon_wr32(cmdq->qmgr->falcon, cmdq->head_reg, cmdq->position); + mutex_unlock(&cmdq->mutex); +} + +static int +nvkm_falcon_cmdq_write(struct nvkm_falcon_cmdq *cmdq, struct nv_falcon_cmd *cmd) +{ + static unsigned timeout = 2000; + unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout); + int ret = -EAGAIN; + + while (ret == -EAGAIN && time_before(jiffies, end_jiffies)) + ret = nvkm_falcon_cmdq_open(cmdq, cmd->size); + if (ret) { + FLCNQ_ERR(cmdq, "timeout waiting for queue space"); + return ret; + } + + nvkm_falcon_cmdq_push(cmdq, cmd, cmd->size); + nvkm_falcon_cmdq_close(cmdq); + return ret; +} + +/* specifies that we want to know the command status in the answer message */ +#define CMD_FLAGS_STATUS BIT(0) +/* specifies that we want an interrupt when the answer message is queued */ +#define CMD_FLAGS_INTR BIT(1) + +int +nvkm_falcon_cmdq_send(struct nvkm_falcon_cmdq *cmdq, struct nv_falcon_cmd *cmd, + nvkm_falcon_qmgr_callback cb, void *priv, + unsigned long timeout) +{ + struct nvkm_falcon_qmgr_seq *seq; + int ret; + + if (!wait_for_completion_timeout(&cmdq->ready, + msecs_to_jiffies(1000))) { + FLCNQ_ERR(cmdq, "timeout waiting for queue ready"); + return -ETIMEDOUT; + } + + seq = nvkm_falcon_qmgr_seq_acquire(cmdq->qmgr); + if (IS_ERR(seq)) + return PTR_ERR(seq); + + cmd->seq_id = seq->id; + cmd->ctrl_flags = CMD_FLAGS_STATUS | CMD_FLAGS_INTR; + + seq->state = SEQ_STATE_USED; + seq->async = !timeout; + seq->callback = cb; + seq->priv = priv; + + ret = nvkm_falcon_cmdq_write(cmdq, cmd); + if (ret) { + seq->state = SEQ_STATE_PENDING; + nvkm_falcon_qmgr_seq_release(cmdq->qmgr, seq); + return ret; + } + + if (!seq->async) { + if (!wait_for_completion_timeout(&seq->done, timeout)) { + FLCNQ_ERR(cmdq, "timeout waiting for reply"); + return -ETIMEDOUT; + } + ret = seq->result; + nvkm_falcon_qmgr_seq_release(cmdq->qmgr, seq); + } + + return ret; +} + +void +nvkm_falcon_cmdq_fini(struct nvkm_falcon_cmdq *cmdq) +{ + reinit_completion(&cmdq->ready); +} + +void +nvkm_falcon_cmdq_init(struct nvkm_falcon_cmdq *cmdq, + u32 index, u32 offset, u32 size) +{ + const struct nvkm_falcon_func *func = cmdq->qmgr->falcon->func; + + cmdq->head_reg = func->cmdq.head + index * func->cmdq.stride; + cmdq->tail_reg = func->cmdq.tail + index * func->cmdq.stride; + cmdq->offset = offset; + cmdq->size = size; + complete_all(&cmdq->ready); + + FLCNQ_DBG(cmdq, "initialised @ index %d offset 0x%08x size 0x%08x", + index, cmdq->offset, cmdq->size); +} + +void +nvkm_falcon_cmdq_del(struct nvkm_falcon_cmdq **pcmdq) +{ + struct nvkm_falcon_cmdq *cmdq = *pcmdq; + if (cmdq) { + kfree(*pcmdq); + *pcmdq = NULL; + } +} + +int +nvkm_falcon_cmdq_new(struct nvkm_falcon_qmgr *qmgr, const char *name, + struct nvkm_falcon_cmdq **pcmdq) +{ + struct nvkm_falcon_cmdq *cmdq = *pcmdq; + + if (!(cmdq = *pcmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL))) + return -ENOMEM; + + cmdq->qmgr = qmgr; + cmdq->name = name; + mutex_init(&cmdq->mutex); + init_completion(&cmdq->ready); + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c new file mode 100644 index 0000000000000000000000000000000000000000..cbfe09a561a1efb43bc4b3fe7fd1c2e58225e262 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c @@ -0,0 +1,213 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "qmgr.h" + +static void +nvkm_falcon_msgq_open(struct nvkm_falcon_msgq *msgq) +{ + mutex_lock(&msgq->mutex); + msgq->position = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->tail_reg); +} + +static void +nvkm_falcon_msgq_close(struct nvkm_falcon_msgq *msgq, bool commit) +{ + struct nvkm_falcon *falcon = msgq->qmgr->falcon; + + if (commit) + nvkm_falcon_wr32(falcon, msgq->tail_reg, msgq->position); + + mutex_unlock(&msgq->mutex); +} + +static bool +nvkm_falcon_msgq_empty(struct nvkm_falcon_msgq *msgq) +{ + u32 head = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->head_reg); + u32 tail = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->tail_reg); + return head == tail; +} + +static int +nvkm_falcon_msgq_pop(struct nvkm_falcon_msgq *msgq, void *data, u32 size) +{ + struct nvkm_falcon *falcon = msgq->qmgr->falcon; + u32 head, tail, available; + + head = nvkm_falcon_rd32(falcon, msgq->head_reg); + /* has the buffer looped? */ + if (head < msgq->position) + msgq->position = msgq->offset; + + tail = msgq->position; + + available = head - tail; + if (size > available) { + FLCNQ_ERR(msgq, "requested %d bytes, but only %d available", + size, available); + return -EINVAL; + } + + nvkm_falcon_read_dmem(falcon, tail, size, 0, data); + msgq->position += ALIGN(size, QUEUE_ALIGNMENT); + return 0; +} + +static int +nvkm_falcon_msgq_read(struct nvkm_falcon_msgq *msgq, struct nv_falcon_msg *hdr) +{ + int ret = 0; + + nvkm_falcon_msgq_open(msgq); + + if (nvkm_falcon_msgq_empty(msgq)) + goto close; + + ret = nvkm_falcon_msgq_pop(msgq, hdr, HDR_SIZE); + if (ret) { + FLCNQ_ERR(msgq, "failed to read message header"); + goto close; + } + + if (hdr->size > MSG_BUF_SIZE) { + FLCNQ_ERR(msgq, "message too big, %d bytes", hdr->size); + ret = -ENOSPC; + goto close; + } + + if (hdr->size > HDR_SIZE) { + u32 read_size = hdr->size - HDR_SIZE; + + ret = nvkm_falcon_msgq_pop(msgq, (hdr + 1), read_size); + if (ret) { + FLCNQ_ERR(msgq, "failed to read message data"); + goto close; + } + } + + ret = 1; +close: + nvkm_falcon_msgq_close(msgq, (ret >= 0)); + return ret; +} + +static int +nvkm_falcon_msgq_exec(struct nvkm_falcon_msgq *msgq, struct nv_falcon_msg *hdr) +{ + struct nvkm_falcon_qmgr_seq *seq; + + seq = &msgq->qmgr->seq.id[hdr->seq_id]; + if (seq->state != SEQ_STATE_USED && seq->state != SEQ_STATE_CANCELLED) { + FLCNQ_ERR(msgq, "message for unknown sequence %08x", seq->id); + return -EINVAL; + } + + if (seq->state == SEQ_STATE_USED) { + if (seq->callback) + seq->result = seq->callback(seq->priv, hdr); + } + + if (seq->async) { + nvkm_falcon_qmgr_seq_release(msgq->qmgr, seq); + return 0; + } + + complete_all(&seq->done); + return 0; +} + +void +nvkm_falcon_msgq_recv(struct nvkm_falcon_msgq *msgq) +{ + /* + * We are invoked from a worker thread, so normally we have plenty of + * stack space to work with. + */ + u8 msg_buffer[MSG_BUF_SIZE]; + struct nv_falcon_msg *hdr = (void *)msg_buffer; + + while (nvkm_falcon_msgq_read(msgq, hdr) > 0) + nvkm_falcon_msgq_exec(msgq, hdr); +} + +int +nvkm_falcon_msgq_recv_initmsg(struct nvkm_falcon_msgq *msgq, + void *data, u32 size) +{ + struct nvkm_falcon *falcon = msgq->qmgr->falcon; + struct nv_falcon_msg *hdr = data; + int ret; + + msgq->head_reg = falcon->func->msgq.head; + msgq->tail_reg = falcon->func->msgq.tail; + msgq->offset = nvkm_falcon_rd32(falcon, falcon->func->msgq.tail); + + nvkm_falcon_msgq_open(msgq); + ret = nvkm_falcon_msgq_pop(msgq, data, size); + if (ret == 0 && hdr->size != size) { + FLCN_ERR(falcon, "unexpected init message size %d vs %d", + hdr->size, size); + ret = -EINVAL; + } + nvkm_falcon_msgq_close(msgq, ret == 0); + return ret; +} + +void +nvkm_falcon_msgq_init(struct nvkm_falcon_msgq *msgq, + u32 index, u32 offset, u32 size) +{ + const struct nvkm_falcon_func *func = msgq->qmgr->falcon->func; + + msgq->head_reg = func->msgq.head + index * func->msgq.stride; + msgq->tail_reg = func->msgq.tail + index * func->msgq.stride; + msgq->offset = offset; + + FLCNQ_DBG(msgq, "initialised @ index %d offset 0x%08x size 0x%08x", + index, msgq->offset, size); +} + +void +nvkm_falcon_msgq_del(struct nvkm_falcon_msgq **pmsgq) +{ + struct nvkm_falcon_msgq *msgq = *pmsgq; + if (msgq) { + kfree(*pmsgq); + *pmsgq = NULL; + } +} + +int +nvkm_falcon_msgq_new(struct nvkm_falcon_qmgr *qmgr, const char *name, + struct nvkm_falcon_msgq **pmsgq) +{ + struct nvkm_falcon_msgq *msgq = *pmsgq; + + if (!(msgq = *pmsgq = kzalloc(sizeof(*msgq), GFP_KERNEL))) + return -ENOMEM; + + msgq->qmgr = qmgr; + msgq->name = name; + mutex_init(&msgq->mutex); + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c deleted file mode 100644 index a8bee1e046aaae61ddfa6a404e14b44ad0395335..0000000000000000000000000000000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c +++ /dev/null @@ -1,577 +0,0 @@ -/* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include "msgqueue.h" -#include - -#include - - -#define HDR_SIZE sizeof(struct nvkm_msgqueue_hdr) -#define QUEUE_ALIGNMENT 4 -/* max size of the messages we can receive */ -#define MSG_BUF_SIZE 128 - -static int -msg_queue_open(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue) -{ - struct nvkm_falcon *falcon = priv->falcon; - - mutex_lock(&queue->mutex); - - queue->position = nvkm_falcon_rd32(falcon, queue->tail_reg); - - return 0; -} - -static void -msg_queue_close(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue, - bool commit) -{ - struct nvkm_falcon *falcon = priv->falcon; - - if (commit) - nvkm_falcon_wr32(falcon, queue->tail_reg, queue->position); - - mutex_unlock(&queue->mutex); -} - -static bool -msg_queue_empty(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue) -{ - struct nvkm_falcon *falcon = priv->falcon; - u32 head, tail; - - head = nvkm_falcon_rd32(falcon, queue->head_reg); - tail = nvkm_falcon_rd32(falcon, queue->tail_reg); - - return head == tail; -} - -static int -msg_queue_pop(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue, - void *data, u32 size) -{ - struct nvkm_falcon *falcon = priv->falcon; - const struct nvkm_subdev *subdev = priv->falcon->owner; - u32 head, tail, available; - - head = nvkm_falcon_rd32(falcon, queue->head_reg); - /* has the buffer looped? */ - if (head < queue->position) - queue->position = queue->offset; - - tail = queue->position; - - available = head - tail; - - if (available == 0) { - nvkm_warn(subdev, "no message data available\n"); - return 0; - } - - if (size > available) { - nvkm_warn(subdev, "message data smaller than read request\n"); - size = available; - } - - nvkm_falcon_read_dmem(priv->falcon, tail, size, 0, data); - queue->position += ALIGN(size, QUEUE_ALIGNMENT); - - return size; -} - -static int -msg_queue_read(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue, - struct nvkm_msgqueue_hdr *hdr) -{ - const struct nvkm_subdev *subdev = priv->falcon->owner; - int err; - - err = msg_queue_open(priv, queue); - if (err) { - nvkm_error(subdev, "fail to open queue %d\n", queue->index); - return err; - } - - if (msg_queue_empty(priv, queue)) { - err = 0; - goto close; - } - - err = msg_queue_pop(priv, queue, hdr, HDR_SIZE); - if (err >= 0 && err != HDR_SIZE) - err = -EINVAL; - if (err < 0) { - nvkm_error(subdev, "failed to read message header: %d\n", err); - goto close; - } - - if (hdr->size > MSG_BUF_SIZE) { - nvkm_error(subdev, "message too big (%d bytes)\n", hdr->size); - err = -ENOSPC; - goto close; - } - - if (hdr->size > HDR_SIZE) { - u32 read_size = hdr->size - HDR_SIZE; - - err = msg_queue_pop(priv, queue, (hdr + 1), read_size); - if (err >= 0 && err != read_size) - err = -EINVAL; - if (err < 0) { - nvkm_error(subdev, "failed to read message: %d\n", err); - goto close; - } - } - -close: - msg_queue_close(priv, queue, (err >= 0)); - - return err; -} - -static bool -cmd_queue_has_room(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue, - u32 size, bool *rewind) -{ - struct nvkm_falcon *falcon = priv->falcon; - u32 head, tail, free; - - size = ALIGN(size, QUEUE_ALIGNMENT); - - head = nvkm_falcon_rd32(falcon, queue->head_reg); - tail = nvkm_falcon_rd32(falcon, queue->tail_reg); - - if (head >= tail) { - free = queue->offset + queue->size - head; - free -= HDR_SIZE; - - if (size > free) { - *rewind = true; - head = queue->offset; - } - } - - if (head < tail) - free = tail - head - 1; - - return size <= free; -} - -static int -cmd_queue_push(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue, - void *data, u32 size) -{ - nvkm_falcon_load_dmem(priv->falcon, data, queue->position, size, 0); - queue->position += ALIGN(size, QUEUE_ALIGNMENT); - - return 0; -} - -/* REWIND unit is always 0x00 */ -#define MSGQUEUE_UNIT_REWIND 0x00 - -static void -cmd_queue_rewind(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue) -{ - const struct nvkm_subdev *subdev = priv->falcon->owner; - struct nvkm_msgqueue_hdr cmd; - int err; - - cmd.unit_id = MSGQUEUE_UNIT_REWIND; - cmd.size = sizeof(cmd); - err = cmd_queue_push(priv, queue, &cmd, cmd.size); - if (err) - nvkm_error(subdev, "queue %d rewind failed\n", queue->index); - else - nvkm_error(subdev, "queue %d rewinded\n", queue->index); - - queue->position = queue->offset; -} - -static int -cmd_queue_open(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue, - u32 size) -{ - struct nvkm_falcon *falcon = priv->falcon; - const struct nvkm_subdev *subdev = priv->falcon->owner; - bool rewind = false; - - mutex_lock(&queue->mutex); - - if (!cmd_queue_has_room(priv, queue, size, &rewind)) { - nvkm_error(subdev, "queue full\n"); - mutex_unlock(&queue->mutex); - return -EAGAIN; - } - - queue->position = nvkm_falcon_rd32(falcon, queue->head_reg); - - if (rewind) - cmd_queue_rewind(priv, queue); - - return 0; -} - -static void -cmd_queue_close(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue, - bool commit) -{ - struct nvkm_falcon *falcon = priv->falcon; - - if (commit) - nvkm_falcon_wr32(falcon, queue->head_reg, queue->position); - - mutex_unlock(&queue->mutex); -} - -static int -cmd_write(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *cmd, - struct nvkm_msgqueue_queue *queue) -{ - const struct nvkm_subdev *subdev = priv->falcon->owner; - static unsigned timeout = 2000; - unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout); - int ret = -EAGAIN; - bool commit = true; - - while (ret == -EAGAIN && time_before(jiffies, end_jiffies)) - ret = cmd_queue_open(priv, queue, cmd->size); - if (ret) { - nvkm_error(subdev, "pmu_queue_open_write failed\n"); - return ret; - } - - ret = cmd_queue_push(priv, queue, cmd, cmd->size); - if (ret) { - nvkm_error(subdev, "pmu_queue_push failed\n"); - commit = false; - } - - cmd_queue_close(priv, queue, commit); - - return ret; -} - -static struct nvkm_msgqueue_seq * -msgqueue_seq_acquire(struct nvkm_msgqueue *priv) -{ - const struct nvkm_subdev *subdev = priv->falcon->owner; - struct nvkm_msgqueue_seq *seq; - u32 index; - - mutex_lock(&priv->seq_lock); - - index = find_first_zero_bit(priv->seq_tbl, NVKM_MSGQUEUE_NUM_SEQUENCES); - - if (index >= NVKM_MSGQUEUE_NUM_SEQUENCES) { - nvkm_error(subdev, "no free sequence available\n"); - mutex_unlock(&priv->seq_lock); - return ERR_PTR(-EAGAIN); - } - - set_bit(index, priv->seq_tbl); - - mutex_unlock(&priv->seq_lock); - - seq = &priv->seq[index]; - seq->state = SEQ_STATE_PENDING; - - return seq; -} - -static void -msgqueue_seq_release(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_seq *seq) -{ - /* no need to acquire seq_lock since clear_bit is atomic */ - seq->state = SEQ_STATE_FREE; - seq->callback = NULL; - seq->completion = NULL; - clear_bit(seq->id, priv->seq_tbl); -} - -/* specifies that we want to know the command status in the answer message */ -#define CMD_FLAGS_STATUS BIT(0) -/* specifies that we want an interrupt when the answer message is queued */ -#define CMD_FLAGS_INTR BIT(1) - -int -nvkm_msgqueue_post(struct nvkm_msgqueue *priv, enum msgqueue_msg_priority prio, - struct nvkm_msgqueue_hdr *cmd, nvkm_msgqueue_callback cb, - struct completion *completion, bool wait_init) -{ - struct nvkm_msgqueue_seq *seq; - struct nvkm_msgqueue_queue *queue; - int ret; - - if (wait_init && !wait_for_completion_timeout(&priv->init_done, - msecs_to_jiffies(1000))) - return -ETIMEDOUT; - - queue = priv->func->cmd_queue(priv, prio); - if (IS_ERR(queue)) - return PTR_ERR(queue); - - seq = msgqueue_seq_acquire(priv); - if (IS_ERR(seq)) - return PTR_ERR(seq); - - cmd->seq_id = seq->id; - cmd->ctrl_flags = CMD_FLAGS_STATUS | CMD_FLAGS_INTR; - - seq->callback = cb; - seq->state = SEQ_STATE_USED; - seq->completion = completion; - - ret = cmd_write(priv, cmd, queue); - if (ret) { - seq->state = SEQ_STATE_PENDING; - msgqueue_seq_release(priv, seq); - } - - return ret; -} - -static int -msgqueue_msg_handle(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *hdr) -{ - const struct nvkm_subdev *subdev = priv->falcon->owner; - struct nvkm_msgqueue_seq *seq; - - seq = &priv->seq[hdr->seq_id]; - if (seq->state != SEQ_STATE_USED && seq->state != SEQ_STATE_CANCELLED) { - nvkm_error(subdev, "msg for unknown sequence %d", seq->id); - return -EINVAL; - } - - if (seq->state == SEQ_STATE_USED) { - if (seq->callback) - seq->callback(priv, hdr); - } - - if (seq->completion) - complete(seq->completion); - - msgqueue_seq_release(priv, seq); - - return 0; -} - -static int -msgqueue_handle_init_msg(struct nvkm_msgqueue *priv, - struct nvkm_msgqueue_hdr *hdr) -{ - struct nvkm_falcon *falcon = priv->falcon; - const struct nvkm_subdev *subdev = falcon->owner; - u32 tail; - u32 tail_reg; - int ret; - - /* - * Of course the message queue registers vary depending on the falcon - * used... - */ - switch (falcon->owner->index) { - case NVKM_SUBDEV_PMU: - tail_reg = 0x4cc; - break; - case NVKM_ENGINE_SEC2: - tail_reg = 0xa34; - break; - default: - nvkm_error(subdev, "falcon %s unsupported for msgqueue!\n", - nvkm_subdev_name[falcon->owner->index]); - return -EINVAL; - } - - /* - * Read the message - queues are not initialized yet so we cannot rely - * on msg_queue_read() - */ - tail = nvkm_falcon_rd32(falcon, tail_reg); - nvkm_falcon_read_dmem(falcon, tail, HDR_SIZE, 0, hdr); - - if (hdr->size > MSG_BUF_SIZE) { - nvkm_error(subdev, "message too big (%d bytes)\n", hdr->size); - return -ENOSPC; - } - - nvkm_falcon_read_dmem(falcon, tail + HDR_SIZE, hdr->size - HDR_SIZE, 0, - (hdr + 1)); - - tail += ALIGN(hdr->size, QUEUE_ALIGNMENT); - nvkm_falcon_wr32(falcon, tail_reg, tail); - - ret = priv->func->init_func->init_callback(priv, hdr); - if (ret) - return ret; - - return 0; -} - -void -nvkm_msgqueue_process_msgs(struct nvkm_msgqueue *priv, - struct nvkm_msgqueue_queue *queue) -{ - /* - * We are invoked from a worker thread, so normally we have plenty of - * stack space to work with. - */ - u8 msg_buffer[MSG_BUF_SIZE]; - struct nvkm_msgqueue_hdr *hdr = (void *)msg_buffer; - int ret; - - /* the first message we receive must be the init message */ - if ((!priv->init_msg_received)) { - ret = msgqueue_handle_init_msg(priv, hdr); - if (!ret) - priv->init_msg_received = true; - } else { - while (msg_queue_read(priv, queue, hdr) > 0) - msgqueue_msg_handle(priv, hdr); - } -} - -void -nvkm_msgqueue_write_cmdline(struct nvkm_msgqueue *queue, void *buf) -{ - if (!queue || !queue->func || !queue->func->init_func) - return; - - queue->func->init_func->gen_cmdline(queue, buf); -} - -int -nvkm_msgqueue_acr_boot_falcons(struct nvkm_msgqueue *queue, - unsigned long falcon_mask) -{ - unsigned long falcon; - - if (!queue || !queue->func->acr_func) - return -ENODEV; - - /* Does the firmware support booting multiple falcons? */ - if (queue->func->acr_func->boot_multiple_falcons) - return queue->func->acr_func->boot_multiple_falcons(queue, - falcon_mask); - - /* Else boot all requested falcons individually */ - if (!queue->func->acr_func->boot_falcon) - return -ENODEV; - - for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END) { - int ret = queue->func->acr_func->boot_falcon(queue, falcon); - - if (ret) - return ret; - } - - return 0; -} - -int -nvkm_msgqueue_new(u32 version, struct nvkm_falcon *falcon, - const struct nvkm_secboot *sb, struct nvkm_msgqueue **queue) -{ - const struct nvkm_subdev *subdev = falcon->owner; - int ret = -EINVAL; - - switch (version) { - case 0x0137c63d: - ret = msgqueue_0137c63d_new(falcon, sb, queue); - break; - case 0x0137bca5: - ret = msgqueue_0137bca5_new(falcon, sb, queue); - break; - case 0x0148cdec: - case 0x015ccf3e: - case 0x0167d263: - ret = msgqueue_0148cdec_new(falcon, sb, queue); - break; - default: - nvkm_error(subdev, "unhandled firmware version 0x%08x\n", - version); - break; - } - - if (ret == 0) { - nvkm_debug(subdev, "firmware version: 0x%08x\n", version); - (*queue)->fw_version = version; - } - - return ret; -} - -void -nvkm_msgqueue_del(struct nvkm_msgqueue **queue) -{ - if (*queue) { - (*queue)->func->dtor(*queue); - *queue = NULL; - } -} - -void -nvkm_msgqueue_recv(struct nvkm_msgqueue *queue) -{ - if (!queue->func || !queue->func->recv) { - const struct nvkm_subdev *subdev = queue->falcon->owner; - - nvkm_warn(subdev, "missing msgqueue recv function\n"); - return; - } - - queue->func->recv(queue); -} - -int -nvkm_msgqueue_reinit(struct nvkm_msgqueue *queue) -{ - /* firmware not set yet... */ - if (!queue) - return 0; - - queue->init_msg_received = false; - reinit_completion(&queue->init_done); - - return 0; -} - -void -nvkm_msgqueue_ctor(const struct nvkm_msgqueue_func *func, - struct nvkm_falcon *falcon, - struct nvkm_msgqueue *queue) -{ - int i; - - queue->func = func; - queue->falcon = falcon; - mutex_init(&queue->seq_lock); - for (i = 0; i < NVKM_MSGQUEUE_NUM_SEQUENCES; i++) - queue->seq[i].id = i; - - init_completion(&queue->init_done); - - -} diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h deleted file mode 100644 index 13b54f8d8e049e5edfd052519306f3a17cc2d32a..0000000000000000000000000000000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h +++ /dev/null @@ -1,213 +0,0 @@ -/* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef __NVKM_CORE_FALCON_MSGQUEUE_H -#define __NVKM_CORE_FALCON_MSGQUEUE_H - -#include - -/* - * The struct nvkm_msgqueue (named so for lack of better candidate) manages - * a firmware (typically, NVIDIA signed firmware) running under a given falcon. - * - * Such firmwares expect to receive commands (through one or several command - * queues) and will reply to such command by sending messages (using one - * message queue). - * - * Each firmware can support one or several units - ACR for managing secure - * falcons, PMU for power management, etc. A unit can be seen as a class to - * which command can be sent. - * - * One usage example would be to send a command to the SEC falcon to ask it to - * reset a secure falcon. The SEC falcon will receive the command, process it, - * and send a message to signal success or failure. Only when the corresponding - * message is received can the requester assume the request has been processed. - * - * Since we expect many variations between the firmwares NVIDIA will release - * across GPU generations, this library is built in a very modular way. Message - * formats and queues details (such as number of usage) are left to - * specializations of struct nvkm_msgqueue, while the functions in msgqueue.c - * take care of posting commands and processing messages in a fashion that is - * universal. - * - */ - -enum msgqueue_msg_priority { - MSGQUEUE_MSG_PRIORITY_HIGH, - MSGQUEUE_MSG_PRIORITY_LOW, -}; - -/** - * struct nvkm_msgqueue_hdr - header for all commands/messages - * @unit_id: id of firmware using receiving the command/sending the message - * @size: total size of command/message - * @ctrl_flags: type of command/message - * @seq_id: used to match a message from its corresponding command - */ -struct nvkm_msgqueue_hdr { - u8 unit_id; - u8 size; - u8 ctrl_flags; - u8 seq_id; -}; - -/** - * struct nvkm_msgqueue_msg - base message. - * - * This is just a header and a message (or command) type. Useful when - * building command-specific structures. - */ -struct nvkm_msgqueue_msg { - struct nvkm_msgqueue_hdr hdr; - u8 msg_type; -}; - -struct nvkm_msgqueue; -typedef void -(*nvkm_msgqueue_callback)(struct nvkm_msgqueue *, struct nvkm_msgqueue_hdr *); - -/** - * struct nvkm_msgqueue_init_func - msgqueue functions related to initialization - * - * @gen_cmdline: build the commandline into a pre-allocated buffer - * @init_callback: called to process the init message - */ -struct nvkm_msgqueue_init_func { - void (*gen_cmdline)(struct nvkm_msgqueue *, void *); - int (*init_callback)(struct nvkm_msgqueue *, struct nvkm_msgqueue_hdr *); -}; - -/** - * struct nvkm_msgqueue_acr_func - msgqueue functions related to ACR - * - * @boot_falcon: build and send the command to reset a given falcon - * @boot_multiple_falcons: build and send the command to reset several falcons - */ -struct nvkm_msgqueue_acr_func { - int (*boot_falcon)(struct nvkm_msgqueue *, enum nvkm_secboot_falcon); - int (*boot_multiple_falcons)(struct nvkm_msgqueue *, unsigned long); -}; - -struct nvkm_msgqueue_func { - const struct nvkm_msgqueue_init_func *init_func; - const struct nvkm_msgqueue_acr_func *acr_func; - void (*dtor)(struct nvkm_msgqueue *); - struct nvkm_msgqueue_queue *(*cmd_queue)(struct nvkm_msgqueue *, - enum msgqueue_msg_priority); - void (*recv)(struct nvkm_msgqueue *queue); -}; - -/** - * struct nvkm_msgqueue_queue - information about a command or message queue - * - * The number of queues is firmware-dependent. All queues must have their - * information filled by the init message handler. - * - * @mutex_lock: to be acquired when the queue is being used - * @index: physical queue index - * @offset: DMEM offset where this queue begins - * @size: size allocated to this queue in DMEM (in bytes) - * @position: current write position - * @head_reg: address of the HEAD register for this queue - * @tail_reg: address of the TAIL register for this queue - */ -struct nvkm_msgqueue_queue { - struct mutex mutex; - u32 index; - u32 offset; - u32 size; - u32 position; - - u32 head_reg; - u32 tail_reg; -}; - -/** - * struct nvkm_msgqueue_seq - keep track of ongoing commands - * - * Every time a command is sent, a sequence is assigned to it so the - * corresponding message can be matched. Upon receiving the message, a callback - * can be called and/or a completion signaled. - * - * @id: sequence ID - * @state: current state - * @callback: callback to call upon receiving matching message - * @completion: completion to signal after callback is called - */ -struct nvkm_msgqueue_seq { - u16 id; - enum { - SEQ_STATE_FREE = 0, - SEQ_STATE_PENDING, - SEQ_STATE_USED, - SEQ_STATE_CANCELLED - } state; - nvkm_msgqueue_callback callback; - struct completion *completion; -}; - -/* - * We can have an arbitrary number of sequences, but realistically we will - * probably not use that much simultaneously. - */ -#define NVKM_MSGQUEUE_NUM_SEQUENCES 16 - -/** - * struct nvkm_msgqueue - manage a command/message based FW on a falcon - * - * @falcon: falcon to be managed - * @func: implementation of the firmware to use - * @init_msg_received: whether the init message has already been received - * @init_done: whether all init is complete and commands can be processed - * @seq_lock: protects seq and seq_tbl - * @seq: sequences to match commands and messages - * @seq_tbl: bitmap of sequences currently in use - */ -struct nvkm_msgqueue { - struct nvkm_falcon *falcon; - const struct nvkm_msgqueue_func *func; - u32 fw_version; - bool init_msg_received; - struct completion init_done; - - struct mutex seq_lock; - struct nvkm_msgqueue_seq seq[NVKM_MSGQUEUE_NUM_SEQUENCES]; - unsigned long seq_tbl[BITS_TO_LONGS(NVKM_MSGQUEUE_NUM_SEQUENCES)]; -}; - -void nvkm_msgqueue_ctor(const struct nvkm_msgqueue_func *, struct nvkm_falcon *, - struct nvkm_msgqueue *); -int nvkm_msgqueue_post(struct nvkm_msgqueue *, enum msgqueue_msg_priority, - struct nvkm_msgqueue_hdr *, nvkm_msgqueue_callback, - struct completion *, bool); -void nvkm_msgqueue_process_msgs(struct nvkm_msgqueue *, - struct nvkm_msgqueue_queue *); - -int msgqueue_0137c63d_new(struct nvkm_falcon *, const struct nvkm_secboot *, - struct nvkm_msgqueue **); -int msgqueue_0137bca5_new(struct nvkm_falcon *, const struct nvkm_secboot *, - struct nvkm_msgqueue **); -int msgqueue_0148cdec_new(struct nvkm_falcon *, const struct nvkm_secboot *, - struct nvkm_msgqueue **); - -#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c deleted file mode 100644 index fec0273158f6a7f99499c09a7073ec450e019ee3..0000000000000000000000000000000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c +++ /dev/null @@ -1,436 +0,0 @@ -/* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include "msgqueue.h" -#include -#include - -/* Queues identifiers */ -enum { - /* High Priority Command Queue for Host -> PMU communication */ - MSGQUEUE_0137C63D_COMMAND_QUEUE_HPQ = 0, - /* Low Priority Command Queue for Host -> PMU communication */ - MSGQUEUE_0137C63D_COMMAND_QUEUE_LPQ = 1, - /* Message queue for PMU -> Host communication */ - MSGQUEUE_0137C63D_MESSAGE_QUEUE = 4, - MSGQUEUE_0137C63D_NUM_QUEUES = 5, -}; - -struct msgqueue_0137c63d { - struct nvkm_msgqueue base; - - struct nvkm_msgqueue_queue queue[MSGQUEUE_0137C63D_NUM_QUEUES]; -}; -#define msgqueue_0137c63d(q) \ - container_of(q, struct msgqueue_0137c63d, base) - -struct msgqueue_0137bca5 { - struct msgqueue_0137c63d base; - - u64 wpr_addr; -}; -#define msgqueue_0137bca5(q) \ - container_of(container_of(q, struct msgqueue_0137c63d, base), \ - struct msgqueue_0137bca5, base); - -static struct nvkm_msgqueue_queue * -msgqueue_0137c63d_cmd_queue(struct nvkm_msgqueue *queue, - enum msgqueue_msg_priority priority) -{ - struct msgqueue_0137c63d *priv = msgqueue_0137c63d(queue); - const struct nvkm_subdev *subdev = priv->base.falcon->owner; - - switch (priority) { - case MSGQUEUE_MSG_PRIORITY_HIGH: - return &priv->queue[MSGQUEUE_0137C63D_COMMAND_QUEUE_HPQ]; - case MSGQUEUE_MSG_PRIORITY_LOW: - return &priv->queue[MSGQUEUE_0137C63D_COMMAND_QUEUE_LPQ]; - default: - nvkm_error(subdev, "invalid command queue!\n"); - return ERR_PTR(-EINVAL); - } -} - -static void -msgqueue_0137c63d_process_msgs(struct nvkm_msgqueue *queue) -{ - struct msgqueue_0137c63d *priv = msgqueue_0137c63d(queue); - struct nvkm_msgqueue_queue *q_queue = - &priv->queue[MSGQUEUE_0137C63D_MESSAGE_QUEUE]; - - nvkm_msgqueue_process_msgs(&priv->base, q_queue); -} - -/* Init unit */ -#define MSGQUEUE_0137C63D_UNIT_INIT 0x07 - -enum { - INIT_MSG_INIT = 0x0, -}; - -static void -init_gen_cmdline(struct nvkm_msgqueue *queue, void *buf) -{ - struct { - u32 reserved; - u32 freq_hz; - u32 trace_size; - u32 trace_dma_base; - u16 trace_dma_base1; - u8 trace_dma_offset; - u32 trace_dma_idx; - bool secure_mode; - bool raise_priv_sec; - struct { - u32 dma_base; - u16 dma_base1; - u8 dma_offset; - u16 fb_size; - u8 dma_idx; - } gc6_ctx; - u8 pad; - } *args = buf; - - args->secure_mode = 1; -} - -/* forward declaration */ -static int acr_init_wpr(struct nvkm_msgqueue *queue); - -static int -init_callback(struct nvkm_msgqueue *_queue, struct nvkm_msgqueue_hdr *hdr) -{ - struct msgqueue_0137c63d *priv = msgqueue_0137c63d(_queue); - struct { - struct nvkm_msgqueue_msg base; - - u8 pad; - u16 os_debug_entry_point; - - struct { - u16 size; - u16 offset; - u8 index; - u8 pad; - } queue_info[MSGQUEUE_0137C63D_NUM_QUEUES]; - - u16 sw_managed_area_offset; - u16 sw_managed_area_size; - } *init = (void *)hdr; - const struct nvkm_subdev *subdev = _queue->falcon->owner; - int i; - - if (init->base.hdr.unit_id != MSGQUEUE_0137C63D_UNIT_INIT) { - nvkm_error(subdev, "expected message from init unit\n"); - return -EINVAL; - } - - if (init->base.msg_type != INIT_MSG_INIT) { - nvkm_error(subdev, "expected PMU init msg\n"); - return -EINVAL; - } - - for (i = 0; i < MSGQUEUE_0137C63D_NUM_QUEUES; i++) { - struct nvkm_msgqueue_queue *queue = &priv->queue[i]; - - mutex_init(&queue->mutex); - - queue->index = init->queue_info[i].index; - queue->offset = init->queue_info[i].offset; - queue->size = init->queue_info[i].size; - - if (i != MSGQUEUE_0137C63D_MESSAGE_QUEUE) { - queue->head_reg = 0x4a0 + (queue->index * 4); - queue->tail_reg = 0x4b0 + (queue->index * 4); - } else { - queue->head_reg = 0x4c8; - queue->tail_reg = 0x4cc; - } - - nvkm_debug(subdev, - "queue %d: index %d, offset 0x%08x, size 0x%08x\n", - i, queue->index, queue->offset, queue->size); - } - - /* Complete initialization by initializing WPR region */ - return acr_init_wpr(&priv->base); -} - -static const struct nvkm_msgqueue_init_func -msgqueue_0137c63d_init_func = { - .gen_cmdline = init_gen_cmdline, - .init_callback = init_callback, -}; - - - -/* ACR unit */ -#define MSGQUEUE_0137C63D_UNIT_ACR 0x0a - -enum { - ACR_CMD_INIT_WPR_REGION = 0x00, - ACR_CMD_BOOTSTRAP_FALCON = 0x01, - ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS = 0x03, -}; - -static void -acr_init_wpr_callback(struct nvkm_msgqueue *queue, - struct nvkm_msgqueue_hdr *hdr) -{ - struct { - struct nvkm_msgqueue_msg base; - u32 error_code; - } *msg = (void *)hdr; - const struct nvkm_subdev *subdev = queue->falcon->owner; - - if (msg->error_code) { - nvkm_error(subdev, "ACR WPR init failure: %d\n", - msg->error_code); - return; - } - - nvkm_debug(subdev, "ACR WPR init complete\n"); - complete_all(&queue->init_done); -} - -static int -acr_init_wpr(struct nvkm_msgqueue *queue) -{ - /* - * region_id: region ID in WPR region - * wpr_offset: offset in WPR region - */ - struct { - struct nvkm_msgqueue_hdr hdr; - u8 cmd_type; - u32 region_id; - u32 wpr_offset; - } cmd; - memset(&cmd, 0, sizeof(cmd)); - - cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR; - cmd.hdr.size = sizeof(cmd); - cmd.cmd_type = ACR_CMD_INIT_WPR_REGION; - cmd.region_id = 0x01; - cmd.wpr_offset = 0x00; - - nvkm_msgqueue_post(queue, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr, - acr_init_wpr_callback, NULL, false); - - return 0; -} - - -static void -acr_boot_falcon_callback(struct nvkm_msgqueue *priv, - struct nvkm_msgqueue_hdr *hdr) -{ - struct acr_bootstrap_falcon_msg { - struct nvkm_msgqueue_msg base; - - u32 falcon_id; - } *msg = (void *)hdr; - const struct nvkm_subdev *subdev = priv->falcon->owner; - u32 falcon_id = msg->falcon_id; - - if (falcon_id >= NVKM_SECBOOT_FALCON_END) { - nvkm_error(subdev, "in bootstrap falcon callback:\n"); - nvkm_error(subdev, "invalid falcon ID 0x%x\n", falcon_id); - return; - } - nvkm_debug(subdev, "%s booted\n", nvkm_secboot_falcon_name[falcon_id]); -} - -enum { - ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES = 0, - ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_NO = 1, -}; - -static int -acr_boot_falcon(struct nvkm_msgqueue *priv, enum nvkm_secboot_falcon falcon) -{ - DECLARE_COMPLETION_ONSTACK(completed); - /* - * flags - Flag specifying RESET or no RESET. - * falcon id - Falcon id specifying falcon to bootstrap. - */ - struct { - struct nvkm_msgqueue_hdr hdr; - u8 cmd_type; - u32 flags; - u32 falcon_id; - } cmd; - - memset(&cmd, 0, sizeof(cmd)); - - cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR; - cmd.hdr.size = sizeof(cmd); - cmd.cmd_type = ACR_CMD_BOOTSTRAP_FALCON; - cmd.flags = ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES; - cmd.falcon_id = falcon; - nvkm_msgqueue_post(priv, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr, - acr_boot_falcon_callback, &completed, true); - - if (!wait_for_completion_timeout(&completed, msecs_to_jiffies(1000))) - return -ETIMEDOUT; - - return 0; -} - -static void -acr_boot_multiple_falcons_callback(struct nvkm_msgqueue *priv, - struct nvkm_msgqueue_hdr *hdr) -{ - struct acr_bootstrap_falcon_msg { - struct nvkm_msgqueue_msg base; - - u32 falcon_mask; - } *msg = (void *)hdr; - const struct nvkm_subdev *subdev = priv->falcon->owner; - unsigned long falcon_mask = msg->falcon_mask; - u32 falcon_id, falcon_treated = 0; - - for_each_set_bit(falcon_id, &falcon_mask, NVKM_SECBOOT_FALCON_END) { - nvkm_debug(subdev, "%s booted\n", - nvkm_secboot_falcon_name[falcon_id]); - falcon_treated |= BIT(falcon_id); - } - - if (falcon_treated != msg->falcon_mask) { - nvkm_error(subdev, "in bootstrap falcon callback:\n"); - nvkm_error(subdev, "invalid falcon mask 0x%x\n", - msg->falcon_mask); - return; - } -} - -static int -acr_boot_multiple_falcons(struct nvkm_msgqueue *priv, unsigned long falcon_mask) -{ - DECLARE_COMPLETION_ONSTACK(completed); - /* - * flags - Flag specifying RESET or no RESET. - * falcon id - Falcon id specifying falcon to bootstrap. - */ - struct { - struct nvkm_msgqueue_hdr hdr; - u8 cmd_type; - u32 flags; - u32 falcon_mask; - u32 use_va_mask; - u32 wpr_lo; - u32 wpr_hi; - } cmd; - struct msgqueue_0137bca5 *queue = msgqueue_0137bca5(priv); - - memset(&cmd, 0, sizeof(cmd)); - - cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR; - cmd.hdr.size = sizeof(cmd); - cmd.cmd_type = ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS; - cmd.flags = ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES; - cmd.falcon_mask = falcon_mask; - cmd.wpr_lo = lower_32_bits(queue->wpr_addr); - cmd.wpr_hi = upper_32_bits(queue->wpr_addr); - nvkm_msgqueue_post(priv, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr, - acr_boot_multiple_falcons_callback, &completed, true); - - if (!wait_for_completion_timeout(&completed, msecs_to_jiffies(1000))) - return -ETIMEDOUT; - - return 0; -} - -static const struct nvkm_msgqueue_acr_func -msgqueue_0137c63d_acr_func = { - .boot_falcon = acr_boot_falcon, -}; - -static const struct nvkm_msgqueue_acr_func -msgqueue_0137bca5_acr_func = { - .boot_falcon = acr_boot_falcon, - .boot_multiple_falcons = acr_boot_multiple_falcons, -}; - -static void -msgqueue_0137c63d_dtor(struct nvkm_msgqueue *queue) -{ - kfree(msgqueue_0137c63d(queue)); -} - -static const struct nvkm_msgqueue_func -msgqueue_0137c63d_func = { - .init_func = &msgqueue_0137c63d_init_func, - .acr_func = &msgqueue_0137c63d_acr_func, - .cmd_queue = msgqueue_0137c63d_cmd_queue, - .recv = msgqueue_0137c63d_process_msgs, - .dtor = msgqueue_0137c63d_dtor, -}; - -int -msgqueue_0137c63d_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb, - struct nvkm_msgqueue **queue) -{ - struct msgqueue_0137c63d *ret; - - ret = kzalloc(sizeof(*ret), GFP_KERNEL); - if (!ret) - return -ENOMEM; - - *queue = &ret->base; - - nvkm_msgqueue_ctor(&msgqueue_0137c63d_func, falcon, &ret->base); - - return 0; -} - -static const struct nvkm_msgqueue_func -msgqueue_0137bca5_func = { - .init_func = &msgqueue_0137c63d_init_func, - .acr_func = &msgqueue_0137bca5_acr_func, - .cmd_queue = msgqueue_0137c63d_cmd_queue, - .recv = msgqueue_0137c63d_process_msgs, - .dtor = msgqueue_0137c63d_dtor, -}; - -int -msgqueue_0137bca5_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb, - struct nvkm_msgqueue **queue) -{ - struct msgqueue_0137bca5 *ret; - - ret = kzalloc(sizeof(*ret), GFP_KERNEL); - if (!ret) - return -ENOMEM; - - *queue = &ret->base.base; - - /* - * FIXME this must be set to the address of a *GPU* mapping within the - * ACR address space! - */ - /* ret->wpr_addr = sb->wpr_addr; */ - - nvkm_msgqueue_ctor(&msgqueue_0137bca5_func, falcon, &ret->base.base); - - return 0; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c deleted file mode 100644 index 9424803b9ef499d9c15b4102e6789f277863319d..0000000000000000000000000000000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c +++ /dev/null @@ -1,264 +0,0 @@ -/* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include "msgqueue.h" -#include -#include - -/* - * This firmware runs on the SEC falcon. It only has one command and one - * message queue, and uses a different command line and init message. - */ - -enum { - MSGQUEUE_0148CDEC_COMMAND_QUEUE = 0, - MSGQUEUE_0148CDEC_MESSAGE_QUEUE = 1, - MSGQUEUE_0148CDEC_NUM_QUEUES, -}; - -struct msgqueue_0148cdec { - struct nvkm_msgqueue base; - - struct nvkm_msgqueue_queue queue[MSGQUEUE_0148CDEC_NUM_QUEUES]; -}; -#define msgqueue_0148cdec(q) \ - container_of(q, struct msgqueue_0148cdec, base) - -static struct nvkm_msgqueue_queue * -msgqueue_0148cdec_cmd_queue(struct nvkm_msgqueue *queue, - enum msgqueue_msg_priority priority) -{ - struct msgqueue_0148cdec *priv = msgqueue_0148cdec(queue); - - return &priv->queue[MSGQUEUE_0148CDEC_COMMAND_QUEUE]; -} - -static void -msgqueue_0148cdec_process_msgs(struct nvkm_msgqueue *queue) -{ - struct msgqueue_0148cdec *priv = msgqueue_0148cdec(queue); - struct nvkm_msgqueue_queue *q_queue = - &priv->queue[MSGQUEUE_0148CDEC_MESSAGE_QUEUE]; - - nvkm_msgqueue_process_msgs(&priv->base, q_queue); -} - - -/* Init unit */ -#define MSGQUEUE_0148CDEC_UNIT_INIT 0x01 - -enum { - INIT_MSG_INIT = 0x0, -}; - -static void -init_gen_cmdline(struct nvkm_msgqueue *queue, void *buf) -{ - struct { - u32 freq_hz; - u32 falc_trace_size; - u32 falc_trace_dma_base; - u32 falc_trace_dma_idx; - bool secure_mode; - } *args = buf; - - args->secure_mode = false; -} - -static int -init_callback(struct nvkm_msgqueue *_queue, struct nvkm_msgqueue_hdr *hdr) -{ - struct msgqueue_0148cdec *priv = msgqueue_0148cdec(_queue); - struct { - struct nvkm_msgqueue_msg base; - - u8 num_queues; - u16 os_debug_entry_point; - - struct { - u32 offset; - u16 size; - u8 index; - u8 id; - } queue_info[MSGQUEUE_0148CDEC_NUM_QUEUES]; - - u16 sw_managed_area_offset; - u16 sw_managed_area_size; - } *init = (void *)hdr; - const struct nvkm_subdev *subdev = _queue->falcon->owner; - int i; - - if (init->base.hdr.unit_id != MSGQUEUE_0148CDEC_UNIT_INIT) { - nvkm_error(subdev, "expected message from init unit\n"); - return -EINVAL; - } - - if (init->base.msg_type != INIT_MSG_INIT) { - nvkm_error(subdev, "expected SEC init msg\n"); - return -EINVAL; - } - - for (i = 0; i < MSGQUEUE_0148CDEC_NUM_QUEUES; i++) { - u8 id = init->queue_info[i].id; - struct nvkm_msgqueue_queue *queue = &priv->queue[id]; - - mutex_init(&queue->mutex); - - queue->index = init->queue_info[i].index; - queue->offset = init->queue_info[i].offset; - queue->size = init->queue_info[i].size; - - if (id == MSGQUEUE_0148CDEC_MESSAGE_QUEUE) { - queue->head_reg = 0xa30 + (queue->index * 8); - queue->tail_reg = 0xa34 + (queue->index * 8); - } else { - queue->head_reg = 0xa00 + (queue->index * 8); - queue->tail_reg = 0xa04 + (queue->index * 8); - } - - nvkm_debug(subdev, - "queue %d: index %d, offset 0x%08x, size 0x%08x\n", - id, queue->index, queue->offset, queue->size); - } - - complete_all(&_queue->init_done); - - return 0; -} - -static const struct nvkm_msgqueue_init_func -msgqueue_0148cdec_init_func = { - .gen_cmdline = init_gen_cmdline, - .init_callback = init_callback, -}; - - - -/* ACR unit */ -#define MSGQUEUE_0148CDEC_UNIT_ACR 0x08 - -enum { - ACR_CMD_BOOTSTRAP_FALCON = 0x00, -}; - -static void -acr_boot_falcon_callback(struct nvkm_msgqueue *priv, - struct nvkm_msgqueue_hdr *hdr) -{ - struct acr_bootstrap_falcon_msg { - struct nvkm_msgqueue_msg base; - - u32 error_code; - u32 falcon_id; - } *msg = (void *)hdr; - const struct nvkm_subdev *subdev = priv->falcon->owner; - u32 falcon_id = msg->falcon_id; - - if (msg->error_code) { - nvkm_error(subdev, "in bootstrap falcon callback:\n"); - nvkm_error(subdev, "expected error code 0x%x\n", - msg->error_code); - return; - } - - if (falcon_id >= NVKM_SECBOOT_FALCON_END) { - nvkm_error(subdev, "in bootstrap falcon callback:\n"); - nvkm_error(subdev, "invalid falcon ID 0x%x\n", falcon_id); - return; - } - - nvkm_debug(subdev, "%s booted\n", nvkm_secboot_falcon_name[falcon_id]); -} - -enum { - ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES = 0, - ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_NO = 1, -}; - -static int -acr_boot_falcon(struct nvkm_msgqueue *priv, enum nvkm_secboot_falcon falcon) -{ - DECLARE_COMPLETION_ONSTACK(completed); - /* - * flags - Flag specifying RESET or no RESET. - * falcon id - Falcon id specifying falcon to bootstrap. - */ - struct { - struct nvkm_msgqueue_hdr hdr; - u8 cmd_type; - u32 flags; - u32 falcon_id; - } cmd; - - memset(&cmd, 0, sizeof(cmd)); - - cmd.hdr.unit_id = MSGQUEUE_0148CDEC_UNIT_ACR; - cmd.hdr.size = sizeof(cmd); - cmd.cmd_type = ACR_CMD_BOOTSTRAP_FALCON; - cmd.flags = ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES; - cmd.falcon_id = falcon; - nvkm_msgqueue_post(priv, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr, - acr_boot_falcon_callback, &completed, true); - - if (!wait_for_completion_timeout(&completed, msecs_to_jiffies(1000))) - return -ETIMEDOUT; - - return 0; -} - -const struct nvkm_msgqueue_acr_func -msgqueue_0148cdec_acr_func = { - .boot_falcon = acr_boot_falcon, -}; - -static void -msgqueue_0148cdec_dtor(struct nvkm_msgqueue *queue) -{ - kfree(msgqueue_0148cdec(queue)); -} - -const struct nvkm_msgqueue_func -msgqueue_0148cdec_func = { - .init_func = &msgqueue_0148cdec_init_func, - .acr_func = &msgqueue_0148cdec_acr_func, - .cmd_queue = msgqueue_0148cdec_cmd_queue, - .recv = msgqueue_0148cdec_process_msgs, - .dtor = msgqueue_0148cdec_dtor, -}; - -int -msgqueue_0148cdec_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb, - struct nvkm_msgqueue **queue) -{ - struct msgqueue_0148cdec *ret; - - ret = kzalloc(sizeof(*ret), GFP_KERNEL); - if (!ret) - return -ENOMEM; - - *queue = &ret->base; - - nvkm_msgqueue_ctor(&msgqueue_0148cdec_func, falcon, &ret->base); - - return 0; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h b/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h index 900fe1d37b4d774bb5a991d8f40e73d9e7d73a94..466188752eb08f49afaa6dc1218dd44a68223e0e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h @@ -1,9 +1,5 @@ /* SPDX-License-Identifier: MIT */ #ifndef __NVKM_FALCON_PRIV_H__ #define __NVKM_FALCON_PRIV_H__ -#include - -void -nvkm_falcon_ctor(const struct nvkm_falcon_func *, struct nvkm_subdev *, - const char *, u32, struct nvkm_falcon *); +#include #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c new file mode 100644 index 0000000000000000000000000000000000000000..a453de341a75de72b7f6ab9a13b2744c2de02986 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "qmgr.h" + +struct nvkm_falcon_qmgr_seq * +nvkm_falcon_qmgr_seq_acquire(struct nvkm_falcon_qmgr *qmgr) +{ + const struct nvkm_subdev *subdev = qmgr->falcon->owner; + struct nvkm_falcon_qmgr_seq *seq; + u32 index; + + mutex_lock(&qmgr->seq.mutex); + index = find_first_zero_bit(qmgr->seq.tbl, NVKM_FALCON_QMGR_SEQ_NUM); + if (index >= NVKM_FALCON_QMGR_SEQ_NUM) { + nvkm_error(subdev, "no free sequence available\n"); + mutex_unlock(&qmgr->seq.mutex); + return ERR_PTR(-EAGAIN); + } + + set_bit(index, qmgr->seq.tbl); + mutex_unlock(&qmgr->seq.mutex); + + seq = &qmgr->seq.id[index]; + seq->state = SEQ_STATE_PENDING; + return seq; +} + +void +nvkm_falcon_qmgr_seq_release(struct nvkm_falcon_qmgr *qmgr, + struct nvkm_falcon_qmgr_seq *seq) +{ + /* no need to acquire seq.mutex since clear_bit is atomic */ + seq->state = SEQ_STATE_FREE; + seq->callback = NULL; + reinit_completion(&seq->done); + clear_bit(seq->id, qmgr->seq.tbl); +} + +void +nvkm_falcon_qmgr_del(struct nvkm_falcon_qmgr **pqmgr) +{ + struct nvkm_falcon_qmgr *qmgr = *pqmgr; + if (qmgr) { + kfree(*pqmgr); + *pqmgr = NULL; + } +} + +int +nvkm_falcon_qmgr_new(struct nvkm_falcon *falcon, + struct nvkm_falcon_qmgr **pqmgr) +{ + struct nvkm_falcon_qmgr *qmgr; + int i; + + if (!(qmgr = *pqmgr = kzalloc(sizeof(*qmgr), GFP_KERNEL))) + return -ENOMEM; + + qmgr->falcon = falcon; + mutex_init(&qmgr->seq.mutex); + for (i = 0; i < NVKM_FALCON_QMGR_SEQ_NUM; i++) { + qmgr->seq.id[i].id = i; + init_completion(&qmgr->seq.id[i].done); + } + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h new file mode 100644 index 0000000000000000000000000000000000000000..a45cd705e4f763794fb6834e86958c59d47e7735 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __NVKM_FALCON_QMGR_H__ +#define __NVKM_FALCON_QMGR_H__ +#include + +#define HDR_SIZE sizeof(struct nv_falcon_msg) +#define QUEUE_ALIGNMENT 4 +/* max size of the messages we can receive */ +#define MSG_BUF_SIZE 128 + +/** + * struct nvkm_falcon_qmgr_seq - keep track of ongoing commands + * + * Every time a command is sent, a sequence is assigned to it so the + * corresponding message can be matched. Upon receiving the message, a callback + * can be called and/or a completion signaled. + * + * @id: sequence ID + * @state: current state + * @callback: callback to call upon receiving matching message + * @completion: completion to signal after callback is called + */ +struct nvkm_falcon_qmgr_seq { + u16 id; + enum { + SEQ_STATE_FREE = 0, + SEQ_STATE_PENDING, + SEQ_STATE_USED, + SEQ_STATE_CANCELLED + } state; + bool async; + nvkm_falcon_qmgr_callback callback; + void *priv; + struct completion done; + int result; +}; + +/* + * We can have an arbitrary number of sequences, but realistically we will + * probably not use that much simultaneously. + */ +#define NVKM_FALCON_QMGR_SEQ_NUM 16 + +struct nvkm_falcon_qmgr { + struct nvkm_falcon *falcon; + + struct { + struct mutex mutex; + struct nvkm_falcon_qmgr_seq id[NVKM_FALCON_QMGR_SEQ_NUM]; + unsigned long tbl[BITS_TO_LONGS(NVKM_FALCON_QMGR_SEQ_NUM)]; + } seq; +}; + +struct nvkm_falcon_qmgr_seq * +nvkm_falcon_qmgr_seq_acquire(struct nvkm_falcon_qmgr *); +void nvkm_falcon_qmgr_seq_release(struct nvkm_falcon_qmgr *, + struct nvkm_falcon_qmgr_seq *); + +struct nvkm_falcon_cmdq { + struct nvkm_falcon_qmgr *qmgr; + const char *name; + struct mutex mutex; + struct completion ready; + + u32 head_reg; + u32 tail_reg; + u32 offset; + u32 size; + + u32 position; +}; + +struct nvkm_falcon_msgq { + struct nvkm_falcon_qmgr *qmgr; + const char *name; + struct mutex mutex; + + u32 head_reg; + u32 tail_reg; + u32 offset; + + u32 position; +}; + +#define FLCNQ_PRINTK(t,q,f,a...) \ + FLCN_PRINTK(t, (q)->qmgr->falcon, "%s: "f, (q)->name, ##a) +#define FLCNQ_DBG(q,f,a...) FLCNQ_PRINTK(debug, (q), f, ##a) +#define FLCNQ_ERR(q,f,a...) FLCNQ_PRINTK(error, (q), f, ##a) +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c index 6d978feebbd7637f79d83487f7e5ac6d502f3a98..1ff9b9c2e651ff8614d3d7718806258b0b892878 100644 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c @@ -25,7 +25,7 @@ #include #include -static void +void nvkm_falcon_v1_load_imem(struct nvkm_falcon *falcon, void *data, u32 start, u32 size, u16 tag, u8 port, bool secure) { @@ -89,18 +89,17 @@ nvkm_falcon_v1_load_emem(struct nvkm_falcon *falcon, void *data, u32 start, } } -static const u32 EMEM_START_ADDR = 0x1000000; - -static void +void nvkm_falcon_v1_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start, - u32 size, u8 port) + u32 size, u8 port) { + const struct nvkm_falcon_func *func = falcon->func; u8 rem = size % 4; int i; - if (start >= EMEM_START_ADDR && falcon->has_emem) + if (func->emem_addr && start >= func->emem_addr) return nvkm_falcon_v1_load_emem(falcon, data, - start - EMEM_START_ADDR, size, + start - func->emem_addr, size, port); size -= rem; @@ -148,15 +147,16 @@ nvkm_falcon_v1_read_emem(struct nvkm_falcon *falcon, u32 start, u32 size, } } -static void +void nvkm_falcon_v1_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size, u8 port, void *data) { + const struct nvkm_falcon_func *func = falcon->func; u8 rem = size % 4; int i; - if (start >= EMEM_START_ADDR && falcon->has_emem) - return nvkm_falcon_v1_read_emem(falcon, start - EMEM_START_ADDR, + if (func->emem_addr && start >= func->emem_addr) + return nvkm_falcon_v1_read_emem(falcon, start - func->emem_addr, size, port, data); size -= rem; @@ -179,12 +179,11 @@ nvkm_falcon_v1_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size, } } -static void +void nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *ctx) { - struct nvkm_device *device = falcon->owner->device; + const u32 fbif = falcon->func->fbif; u32 inst_loc; - u32 fbif; /* disable instance block binding */ if (ctx == NULL) { @@ -192,20 +191,6 @@ nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *ctx) return; } - switch (falcon->owner->index) { - case NVKM_ENGINE_NVENC0: - case NVKM_ENGINE_NVENC1: - case NVKM_ENGINE_NVENC2: - fbif = 0x800; - break; - case NVKM_SUBDEV_PMU: - fbif = 0xe00; - break; - default: - fbif = 0x600; - break; - } - nvkm_falcon_wr32(falcon, 0x10c, 0x1); /* setup apertures - virtual */ @@ -234,50 +219,15 @@ nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *ctx) nvkm_falcon_mask(falcon, 0x090, 0x10000, 0x10000); nvkm_falcon_mask(falcon, 0x0a4, 0x8, 0x8); - - /* Not sure if this is a WAR for a HW issue, or some additional - * programming sequence that's needed to properly complete the - * context switch we trigger above. - * - * Fixes unreliability of booting the SEC2 RTOS on Quadro P620, - * particularly when resuming from suspend. - * - * Also removes the need for an odd workaround where we needed - * to program SEC2's FALCON_CPUCTL_ALIAS_STARTCPU twice before - * the SEC2 RTOS would begin executing. - */ - switch (falcon->owner->index) { - case NVKM_SUBDEV_GSP: - case NVKM_ENGINE_SEC2: - nvkm_msec(device, 10, - u32 irqstat = nvkm_falcon_rd32(falcon, 0x008); - u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc); - if ((irqstat & 0x00000008) && - (flcn0dc & 0x00007000) == 0x00005000) - break; - ); - - nvkm_falcon_mask(falcon, 0x004, 0x00000008, 0x00000008); - nvkm_falcon_mask(falcon, 0x058, 0x00000002, 0x00000002); - - nvkm_msec(device, 10, - u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc); - if ((flcn0dc & 0x00007000) == 0x00000000) - break; - ); - break; - default: - break; - } } -static void +void nvkm_falcon_v1_set_start_addr(struct nvkm_falcon *falcon, u32 start_addr) { nvkm_falcon_wr32(falcon, 0x104, start_addr); } -static void +void nvkm_falcon_v1_start(struct nvkm_falcon *falcon) { u32 reg = nvkm_falcon_rd32(falcon, 0x100); @@ -288,7 +238,7 @@ nvkm_falcon_v1_start(struct nvkm_falcon *falcon) nvkm_falcon_wr32(falcon, 0x100, 0x2); } -static int +int nvkm_falcon_v1_wait_for_halt(struct nvkm_falcon *falcon, u32 ms) { struct nvkm_device *device = falcon->owner->device; @@ -301,7 +251,7 @@ nvkm_falcon_v1_wait_for_halt(struct nvkm_falcon *falcon, u32 ms) return 0; } -static int +int nvkm_falcon_v1_clear_interrupt(struct nvkm_falcon *falcon, u32 mask) { struct nvkm_device *device = falcon->owner->device; @@ -330,7 +280,7 @@ falcon_v1_wait_idle(struct nvkm_falcon *falcon) return 0; } -static int +int nvkm_falcon_v1_enable(struct nvkm_falcon *falcon) { struct nvkm_device *device = falcon->owner->device; @@ -352,7 +302,7 @@ nvkm_falcon_v1_enable(struct nvkm_falcon *falcon) return 0; } -static void +void nvkm_falcon_v1_disable(struct nvkm_falcon *falcon) { /* disable IRQs and wait for any previous code to complete */ diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/Kbuild b/drivers/gpu/drm/nouveau/nvkm/nvfw/Kbuild new file mode 100644 index 0000000000000000000000000000000000000000..41d75f98e60303fc00df4d69e5a7677ce1abd9f9 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/Kbuild @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: MIT +nvkm-y += nvkm/nvfw/fw.o +nvkm-y += nvkm/nvfw/hs.o +nvkm-y += nvkm/nvfw/ls.o + +nvkm-y += nvkm/nvfw/acr.o +nvkm-y += nvkm/nvfw/flcn.o diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/acr.c b/drivers/gpu/drm/nouveau/nvkm/nvfw/acr.c new file mode 100644 index 0000000000000000000000000000000000000000..0d063b8317f7248948b7b20a46b5017de8a05a27 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/acr.c @@ -0,0 +1,165 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include +#include + +void +wpr_header_dump(struct nvkm_subdev *subdev, const struct wpr_header *hdr) +{ + nvkm_debug(subdev, "wprHeader\n"); + nvkm_debug(subdev, "\tfalconID : %d\n", hdr->falcon_id); + nvkm_debug(subdev, "\tlsbOffset : 0x%x\n", hdr->lsb_offset); + nvkm_debug(subdev, "\tbootstrapOwner: %d\n", hdr->bootstrap_owner); + nvkm_debug(subdev, "\tlazyBootstrap : %d\n", hdr->lazy_bootstrap); + nvkm_debug(subdev, "\tstatus : %d\n", hdr->status); +} + +void +wpr_header_v1_dump(struct nvkm_subdev *subdev, const struct wpr_header_v1 *hdr) +{ + nvkm_debug(subdev, "wprHeader\n"); + nvkm_debug(subdev, "\tfalconID : %d\n", hdr->falcon_id); + nvkm_debug(subdev, "\tlsbOffset : 0x%x\n", hdr->lsb_offset); + nvkm_debug(subdev, "\tbootstrapOwner: %d\n", hdr->bootstrap_owner); + nvkm_debug(subdev, "\tlazyBootstrap : %d\n", hdr->lazy_bootstrap); + nvkm_debug(subdev, "\tbinVersion : %d\n", hdr->bin_version); + nvkm_debug(subdev, "\tstatus : %d\n", hdr->status); +} + +void +lsb_header_tail_dump(struct nvkm_subdev *subdev, + struct lsb_header_tail *hdr) +{ + nvkm_debug(subdev, "lsbHeader\n"); + nvkm_debug(subdev, "\tucodeOff : 0x%x\n", hdr->ucode_off); + nvkm_debug(subdev, "\tucodeSize : 0x%x\n", hdr->ucode_size); + nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size); + nvkm_debug(subdev, "\tblCodeSize : 0x%x\n", hdr->bl_code_size); + nvkm_debug(subdev, "\tblImemOff : 0x%x\n", hdr->bl_imem_off); + nvkm_debug(subdev, "\tblDataOff : 0x%x\n", hdr->bl_data_off); + nvkm_debug(subdev, "\tblDataSize : 0x%x\n", hdr->bl_data_size); + nvkm_debug(subdev, "\tappCodeOff : 0x%x\n", hdr->app_code_off); + nvkm_debug(subdev, "\tappCodeSize : 0x%x\n", hdr->app_code_size); + nvkm_debug(subdev, "\tappDataOff : 0x%x\n", hdr->app_data_off); + nvkm_debug(subdev, "\tappDataSize : 0x%x\n", hdr->app_data_size); + nvkm_debug(subdev, "\tflags : 0x%x\n", hdr->flags); +} + +void +lsb_header_dump(struct nvkm_subdev *subdev, struct lsb_header *hdr) +{ + lsb_header_tail_dump(subdev, &hdr->tail); +} + +void +lsb_header_v1_dump(struct nvkm_subdev *subdev, struct lsb_header_v1 *hdr) +{ + lsb_header_tail_dump(subdev, &hdr->tail); +} + +void +flcn_acr_desc_dump(struct nvkm_subdev *subdev, struct flcn_acr_desc *hdr) +{ + int i; + + nvkm_debug(subdev, "acrDesc\n"); + nvkm_debug(subdev, "\twprRegionId : %d\n", hdr->wpr_region_id); + nvkm_debug(subdev, "\twprOffset : 0x%x\n", hdr->wpr_offset); + nvkm_debug(subdev, "\tmmuMemRange : 0x%x\n", + hdr->mmu_mem_range); + nvkm_debug(subdev, "\tnoRegions : %d\n", + hdr->regions.no_regions); + + for (i = 0; i < ARRAY_SIZE(hdr->regions.region_props); i++) { + nvkm_debug(subdev, "\tregion[%d] :\n", i); + nvkm_debug(subdev, "\t startAddr : 0x%x\n", + hdr->regions.region_props[i].start_addr); + nvkm_debug(subdev, "\t endAddr : 0x%x\n", + hdr->regions.region_props[i].end_addr); + nvkm_debug(subdev, "\t regionId : %d\n", + hdr->regions.region_props[i].region_id); + nvkm_debug(subdev, "\t readMask : 0x%x\n", + hdr->regions.region_props[i].read_mask); + nvkm_debug(subdev, "\t writeMask : 0x%x\n", + hdr->regions.region_props[i].write_mask); + nvkm_debug(subdev, "\t clientMask : 0x%x\n", + hdr->regions.region_props[i].client_mask); + } + + nvkm_debug(subdev, "\tucodeBlobSize: %d\n", + hdr->ucode_blob_size); + nvkm_debug(subdev, "\tucodeBlobBase: 0x%llx\n", + hdr->ucode_blob_base); + nvkm_debug(subdev, "\tvprEnabled : %d\n", + hdr->vpr_desc.vpr_enabled); + nvkm_debug(subdev, "\tvprStart : 0x%x\n", + hdr->vpr_desc.vpr_start); + nvkm_debug(subdev, "\tvprEnd : 0x%x\n", + hdr->vpr_desc.vpr_end); + nvkm_debug(subdev, "\thdcpPolicies : 0x%x\n", + hdr->vpr_desc.hdcp_policies); +} + +void +flcn_acr_desc_v1_dump(struct nvkm_subdev *subdev, struct flcn_acr_desc_v1 *hdr) +{ + int i; + + nvkm_debug(subdev, "acrDesc\n"); + nvkm_debug(subdev, "\twprRegionId : %d\n", hdr->wpr_region_id); + nvkm_debug(subdev, "\twprOffset : 0x%x\n", hdr->wpr_offset); + nvkm_debug(subdev, "\tmmuMemoryRange : 0x%x\n", + hdr->mmu_memory_range); + nvkm_debug(subdev, "\tnoRegions : %d\n", + hdr->regions.no_regions); + + for (i = 0; i < ARRAY_SIZE(hdr->regions.region_props); i++) { + nvkm_debug(subdev, "\tregion[%d] :\n", i); + nvkm_debug(subdev, "\t startAddr : 0x%x\n", + hdr->regions.region_props[i].start_addr); + nvkm_debug(subdev, "\t endAddr : 0x%x\n", + hdr->regions.region_props[i].end_addr); + nvkm_debug(subdev, "\t regionId : %d\n", + hdr->regions.region_props[i].region_id); + nvkm_debug(subdev, "\t readMask : 0x%x\n", + hdr->regions.region_props[i].read_mask); + nvkm_debug(subdev, "\t writeMask : 0x%x\n", + hdr->regions.region_props[i].write_mask); + nvkm_debug(subdev, "\t clientMask : 0x%x\n", + hdr->regions.region_props[i].client_mask); + nvkm_debug(subdev, "\t shadowMemStartAddr: 0x%x\n", + hdr->regions.region_props[i].shadow_mem_start_addr); + } + + nvkm_debug(subdev, "\tucodeBlobSize : %d\n", + hdr->ucode_blob_size); + nvkm_debug(subdev, "\tucodeBlobBase : 0x%llx\n", + hdr->ucode_blob_base); + nvkm_debug(subdev, "\tvprEnabled : %d\n", + hdr->vpr_desc.vpr_enabled); + nvkm_debug(subdev, "\tvprStart : 0x%x\n", + hdr->vpr_desc.vpr_start); + nvkm_debug(subdev, "\tvprEnd : 0x%x\n", + hdr->vpr_desc.vpr_end); + nvkm_debug(subdev, "\thdcpPolicies : 0x%x\n", + hdr->vpr_desc.hdcp_policies); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/flcn.c b/drivers/gpu/drm/nouveau/nvkm/nvfw/flcn.c new file mode 100644 index 0000000000000000000000000000000000000000..00ec764e1aab1207a3f9c1220cf29fa279549c40 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/flcn.c @@ -0,0 +1,115 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include +#include + +void +loader_config_dump(struct nvkm_subdev *subdev, const struct loader_config *hdr) +{ + nvkm_debug(subdev, "loaderConfig\n"); + nvkm_debug(subdev, "\tdmaIdx : %d\n", hdr->dma_idx); + nvkm_debug(subdev, "\tcodeDmaBase : 0x%xx\n", hdr->code_dma_base); + nvkm_debug(subdev, "\tcodeSizeTotal : 0x%x\n", hdr->code_size_total); + nvkm_debug(subdev, "\tcodeSizeToLoad: 0x%x\n", hdr->code_size_to_load); + nvkm_debug(subdev, "\tcodeEntryPoint: 0x%x\n", hdr->code_entry_point); + nvkm_debug(subdev, "\tdataDmaBase : 0x%x\n", hdr->data_dma_base); + nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size); + nvkm_debug(subdev, "\toverlayDmaBase: 0x%x\n", hdr->overlay_dma_base); + nvkm_debug(subdev, "\targc : 0x%08x\n", hdr->argc); + nvkm_debug(subdev, "\targv : 0x%08x\n", hdr->argv); + nvkm_debug(subdev, "\tcodeDmaBase1 : 0x%x\n", hdr->code_dma_base1); + nvkm_debug(subdev, "\tdataDmaBase1 : 0x%x\n", hdr->data_dma_base1); + nvkm_debug(subdev, "\tovlyDmaBase1 : 0x%x\n", hdr->overlay_dma_base1); +} + +void +loader_config_v1_dump(struct nvkm_subdev *subdev, + const struct loader_config_v1 *hdr) +{ + nvkm_debug(subdev, "loaderConfig\n"); + nvkm_debug(subdev, "\treserved : 0x%08x\n", hdr->reserved); + nvkm_debug(subdev, "\tdmaIdx : %d\n", hdr->dma_idx); + nvkm_debug(subdev, "\tcodeDmaBase : 0x%llxx\n", hdr->code_dma_base); + nvkm_debug(subdev, "\tcodeSizeTotal : 0x%x\n", hdr->code_size_total); + nvkm_debug(subdev, "\tcodeSizeToLoad: 0x%x\n", hdr->code_size_to_load); + nvkm_debug(subdev, "\tcodeEntryPoint: 0x%x\n", hdr->code_entry_point); + nvkm_debug(subdev, "\tdataDmaBase : 0x%llx\n", hdr->data_dma_base); + nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size); + nvkm_debug(subdev, "\toverlayDmaBase: 0x%llx\n", hdr->overlay_dma_base); + nvkm_debug(subdev, "\targc : 0x%08x\n", hdr->argc); + nvkm_debug(subdev, "\targv : 0x%08x\n", hdr->argv); +} + +void +flcn_bl_dmem_desc_dump(struct nvkm_subdev *subdev, + const struct flcn_bl_dmem_desc *hdr) +{ + nvkm_debug(subdev, "flcnBlDmemDesc\n"); + nvkm_debug(subdev, "\treserved : 0x%08x 0x%08x 0x%08x 0x%08x\n", + hdr->reserved[0], hdr->reserved[1], hdr->reserved[2], + hdr->reserved[3]); + nvkm_debug(subdev, "\tsignature : 0x%08x 0x%08x 0x%08x 0x%08x\n", + hdr->signature[0], hdr->signature[1], hdr->signature[2], + hdr->signature[3]); + nvkm_debug(subdev, "\tctxDma : %d\n", hdr->ctx_dma); + nvkm_debug(subdev, "\tcodeDmaBase : 0x%x\n", hdr->code_dma_base); + nvkm_debug(subdev, "\tnonSecCodeOff : 0x%x\n", hdr->non_sec_code_off); + nvkm_debug(subdev, "\tnonSecCodeSize: 0x%x\n", hdr->non_sec_code_size); + nvkm_debug(subdev, "\tsecCodeOff : 0x%x\n", hdr->sec_code_off); + nvkm_debug(subdev, "\tsecCodeSize : 0x%x\n", hdr->sec_code_size); + nvkm_debug(subdev, "\tcodeEntryPoint: 0x%x\n", hdr->code_entry_point); + nvkm_debug(subdev, "\tdataDmaBase : 0x%x\n", hdr->data_dma_base); + nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size); + nvkm_debug(subdev, "\tcodeDmaBase1 : 0x%x\n", hdr->code_dma_base1); + nvkm_debug(subdev, "\tdataDmaBase1 : 0x%x\n", hdr->data_dma_base1); +} + +void +flcn_bl_dmem_desc_v1_dump(struct nvkm_subdev *subdev, + const struct flcn_bl_dmem_desc_v1 *hdr) +{ + nvkm_debug(subdev, "flcnBlDmemDesc\n"); + nvkm_debug(subdev, "\treserved : 0x%08x 0x%08x 0x%08x 0x%08x\n", + hdr->reserved[0], hdr->reserved[1], hdr->reserved[2], + hdr->reserved[3]); + nvkm_debug(subdev, "\tsignature : 0x%08x 0x%08x 0x%08x 0x%08x\n", + hdr->signature[0], hdr->signature[1], hdr->signature[2], + hdr->signature[3]); + nvkm_debug(subdev, "\tctxDma : %d\n", hdr->ctx_dma); + nvkm_debug(subdev, "\tcodeDmaBase : 0x%llx\n", hdr->code_dma_base); + nvkm_debug(subdev, "\tnonSecCodeOff : 0x%x\n", hdr->non_sec_code_off); + nvkm_debug(subdev, "\tnonSecCodeSize: 0x%x\n", hdr->non_sec_code_size); + nvkm_debug(subdev, "\tsecCodeOff : 0x%x\n", hdr->sec_code_off); + nvkm_debug(subdev, "\tsecCodeSize : 0x%x\n", hdr->sec_code_size); + nvkm_debug(subdev, "\tcodeEntryPoint: 0x%x\n", hdr->code_entry_point); + nvkm_debug(subdev, "\tdataDmaBase : 0x%llx\n", hdr->data_dma_base); + nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size); +} + +void +flcn_bl_dmem_desc_v2_dump(struct nvkm_subdev *subdev, + const struct flcn_bl_dmem_desc_v2 *hdr) +{ + flcn_bl_dmem_desc_v1_dump(subdev, (void *)hdr); + nvkm_debug(subdev, "\targc : 0x%08x\n", hdr->argc); + nvkm_debug(subdev, "\targv : 0x%08x\n", hdr->argv); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/fw.c b/drivers/gpu/drm/nouveau/nvkm/nvfw/fw.c new file mode 100644 index 0000000000000000000000000000000000000000..746803bd53181fefd35561ba7f9718ad82b91e89 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/fw.c @@ -0,0 +1,51 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include +#include + +const struct nvfw_bin_hdr * +nvfw_bin_hdr(struct nvkm_subdev *subdev, const void *data) +{ + const struct nvfw_bin_hdr *hdr = data; + nvkm_debug(subdev, "binHdr:\n"); + nvkm_debug(subdev, "\tbinMagic : 0x%08x\n", hdr->bin_magic); + nvkm_debug(subdev, "\tbinVer : %d\n", hdr->bin_ver); + nvkm_debug(subdev, "\tbinSize : %d\n", hdr->bin_size); + nvkm_debug(subdev, "\theaderOffset : 0x%x\n", hdr->header_offset); + nvkm_debug(subdev, "\tdataOffset : 0x%x\n", hdr->data_offset); + nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size); + return hdr; +} + +const struct nvfw_bl_desc * +nvfw_bl_desc(struct nvkm_subdev *subdev, const void *data) +{ + const struct nvfw_bl_desc *hdr = data; + nvkm_debug(subdev, "blDesc\n"); + nvkm_debug(subdev, "\tstartTag : 0x%x\n", hdr->start_tag); + nvkm_debug(subdev, "\tdmemLoadOff : 0x%x\n", hdr->dmem_load_off); + nvkm_debug(subdev, "\tcodeOff : 0x%x\n", hdr->code_off); + nvkm_debug(subdev, "\tcodeSize : 0x%x\n", hdr->code_size); + nvkm_debug(subdev, "\tdataOff : 0x%x\n", hdr->data_off); + nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size); + return hdr; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/hs.c b/drivers/gpu/drm/nouveau/nvkm/nvfw/hs.c new file mode 100644 index 0000000000000000000000000000000000000000..04ed77cb2ebabee2ff1ba55167e054f6294a23d2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/hs.c @@ -0,0 +1,62 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include +#include + +const struct nvfw_hs_header * +nvfw_hs_header(struct nvkm_subdev *subdev, const void *data) +{ + const struct nvfw_hs_header *hdr = data; + nvkm_debug(subdev, "hsHeader:\n"); + nvkm_debug(subdev, "\tsigDbgOffset : 0x%x\n", hdr->sig_dbg_offset); + nvkm_debug(subdev, "\tsigDbgSize : 0x%x\n", hdr->sig_dbg_size); + nvkm_debug(subdev, "\tsigProdOffset : 0x%x\n", hdr->sig_prod_offset); + nvkm_debug(subdev, "\tsigProdSize : 0x%x\n", hdr->sig_prod_size); + nvkm_debug(subdev, "\tpatchLoc : 0x%x\n", hdr->patch_loc); + nvkm_debug(subdev, "\tpatchSig : 0x%x\n", hdr->patch_sig); + nvkm_debug(subdev, "\thdrOffset : 0x%x\n", hdr->hdr_offset); + nvkm_debug(subdev, "\thdrSize : 0x%x\n", hdr->hdr_size); + return hdr; +} + +const struct nvfw_hs_load_header * +nvfw_hs_load_header(struct nvkm_subdev *subdev, const void *data) +{ + const struct nvfw_hs_load_header *hdr = data; + int i; + + nvkm_debug(subdev, "hsLoadHeader:\n"); + nvkm_debug(subdev, "\tnonSecCodeOff : 0x%x\n", + hdr->non_sec_code_off); + nvkm_debug(subdev, "\tnonSecCodeSize : 0x%x\n", + hdr->non_sec_code_size); + nvkm_debug(subdev, "\tdataDmaBase : 0x%x\n", hdr->data_dma_base); + nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size); + nvkm_debug(subdev, "\tnumApps : 0x%x\n", hdr->num_apps); + for (i = 0; i < hdr->num_apps; i++) { + nvkm_debug(subdev, + "\tApp[%d] : offset 0x%x size 0x%x\n", i, + hdr->apps[(i * 2) + 0], hdr->apps[(i * 2) + 1]); + } + + return hdr; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/ls.c b/drivers/gpu/drm/nouveau/nvkm/nvfw/ls.c new file mode 100644 index 0000000000000000000000000000000000000000..b847f281ce975cb4348c1412d02bf8596306bbc2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/ls.c @@ -0,0 +1,108 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include +#include + +static void +nvfw_ls_desc_head(struct nvkm_subdev *subdev, + const struct nvfw_ls_desc_head *hdr) +{ + char *date; + + nvkm_debug(subdev, "lsUcodeImgDesc:\n"); + nvkm_debug(subdev, "\tdescriptorSize : %d\n", + hdr->descriptor_size); + nvkm_debug(subdev, "\timageSize : %d\n", hdr->image_size); + nvkm_debug(subdev, "\ttoolsVersion : 0x%x\n", + hdr->tools_version); + nvkm_debug(subdev, "\tappVersion : 0x%x\n", hdr->app_version); + + date = kstrndup(hdr->date, sizeof(hdr->date), GFP_KERNEL); + nvkm_debug(subdev, "\tdate : %s\n", date); + kfree(date); + + nvkm_debug(subdev, "\tbootloaderStartOffset: 0x%x\n", + hdr->bootloader_start_offset); + nvkm_debug(subdev, "\tbootloaderSize : 0x%x\n", + hdr->bootloader_size); + nvkm_debug(subdev, "\tbootloaderImemOffset : 0x%x\n", + hdr->bootloader_imem_offset); + nvkm_debug(subdev, "\tbootloaderEntryPoint : 0x%x\n", + hdr->bootloader_entry_point); + + nvkm_debug(subdev, "\tappStartOffset : 0x%x\n", + hdr->app_start_offset); + nvkm_debug(subdev, "\tappSize : 0x%x\n", hdr->app_size); + nvkm_debug(subdev, "\tappImemOffset : 0x%x\n", + hdr->app_imem_offset); + nvkm_debug(subdev, "\tappImemEntry : 0x%x\n", + hdr->app_imem_entry); + nvkm_debug(subdev, "\tappDmemOffset : 0x%x\n", + hdr->app_dmem_offset); + nvkm_debug(subdev, "\tappResidentCodeOffset: 0x%x\n", + hdr->app_resident_code_offset); + nvkm_debug(subdev, "\tappResidentCodeSize : 0x%x\n", + hdr->app_resident_code_size); + nvkm_debug(subdev, "\tappResidentDataOffset: 0x%x\n", + hdr->app_resident_data_offset); + nvkm_debug(subdev, "\tappResidentDataSize : 0x%x\n", + hdr->app_resident_data_size); +} + +const struct nvfw_ls_desc * +nvfw_ls_desc(struct nvkm_subdev *subdev, const void *data) +{ + const struct nvfw_ls_desc *hdr = data; + int i; + + nvfw_ls_desc_head(subdev, &hdr->head); + + nvkm_debug(subdev, "\tnbOverlays : %d\n", hdr->nb_overlays); + for (i = 0; i < ARRAY_SIZE(hdr->load_ovl); i++) { + nvkm_debug(subdev, "\tloadOvl[%d] : 0x%x %d\n", i, + hdr->load_ovl[i].start, hdr->load_ovl[i].size); + } + nvkm_debug(subdev, "\tcompressed : %d\n", hdr->compressed); + + return hdr; +} + +const struct nvfw_ls_desc_v1 * +nvfw_ls_desc_v1(struct nvkm_subdev *subdev, const void *data) +{ + const struct nvfw_ls_desc_v1 *hdr = data; + int i; + + nvfw_ls_desc_head(subdev, &hdr->head); + + nvkm_debug(subdev, "\tnbImemOverlays : %d\n", + hdr->nb_imem_overlays); + nvkm_debug(subdev, "\tnbDmemOverlays : %d\n", + hdr->nb_imem_overlays); + for (i = 0; i < ARRAY_SIZE(hdr->load_ovl); i++) { + nvkm_debug(subdev, "\tloadOvl[%2d] : 0x%x %d\n", i, + hdr->load_ovl[i].start, hdr->load_ovl[i].size); + } + nvkm_debug(subdev, "\tcompressed : %d\n", hdr->compressed); + + return hdr; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild index 4e136f3d7c28f0bd9068aa7a3be4160d8144f361..fb4fff1222afea261c23ade9b13dd95f44f7ddb4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild @@ -1,4 +1,5 @@ # SPDX-License-Identifier: MIT +include $(src)/nvkm/subdev/acr/Kbuild include $(src)/nvkm/subdev/bar/Kbuild include $(src)/nvkm/subdev/bios/Kbuild include $(src)/nvkm/subdev/bus/Kbuild @@ -19,7 +20,6 @@ include $(src)/nvkm/subdev/mmu/Kbuild include $(src)/nvkm/subdev/mxm/Kbuild include $(src)/nvkm/subdev/pci/Kbuild include $(src)/nvkm/subdev/pmu/Kbuild -include $(src)/nvkm/subdev/secboot/Kbuild include $(src)/nvkm/subdev/therm/Kbuild include $(src)/nvkm/subdev/timer/Kbuild include $(src)/nvkm/subdev/top/Kbuild diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/Kbuild new file mode 100644 index 0000000000000000000000000000000000000000..5b9f64a8957f8f7b210f561511e74a316e0526b1 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/Kbuild @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: MIT +nvkm-y += nvkm/subdev/acr/base.o +nvkm-y += nvkm/subdev/acr/hsfw.o +nvkm-y += nvkm/subdev/acr/lsfw.o +nvkm-y += nvkm/subdev/acr/gm200.o +nvkm-y += nvkm/subdev/acr/gm20b.o +nvkm-y += nvkm/subdev/acr/gp102.o +nvkm-y += nvkm/subdev/acr/gp108.o +nvkm-y += nvkm/subdev/acr/gp10b.o +nvkm-y += nvkm/subdev/acr/tu102.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c new file mode 100644 index 0000000000000000000000000000000000000000..8eb2a930a9b5e33b857cb4af43f399ff7eaf747f --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c @@ -0,0 +1,411 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include +#include +#include + +static struct nvkm_acr_hsf * +nvkm_acr_hsf_find(struct nvkm_acr *acr, const char *name) +{ + struct nvkm_acr_hsf *hsf; + list_for_each_entry(hsf, &acr->hsf, head) { + if (!strcmp(hsf->name, name)) + return hsf; + } + return NULL; +} + +int +nvkm_acr_hsf_boot(struct nvkm_acr *acr, const char *name) +{ + struct nvkm_subdev *subdev = &acr->subdev; + struct nvkm_acr_hsf *hsf; + int ret; + + hsf = nvkm_acr_hsf_find(acr, name); + if (!hsf) + return -EINVAL; + + nvkm_debug(subdev, "executing %s binary\n", hsf->name); + ret = nvkm_falcon_get(hsf->falcon, subdev); + if (ret) + return ret; + + ret = hsf->func->boot(acr, hsf); + nvkm_falcon_put(hsf->falcon, subdev); + if (ret) { + nvkm_error(subdev, "%s binary failed\n", hsf->name); + return ret; + } + + nvkm_debug(subdev, "%s binary completed successfully\n", hsf->name); + return 0; +} + +static void +nvkm_acr_unload(struct nvkm_acr *acr) +{ + if (acr->done) { + nvkm_acr_hsf_boot(acr, "unload"); + acr->done = false; + } +} + +static int +nvkm_acr_load(struct nvkm_acr *acr) +{ + struct nvkm_subdev *subdev = &acr->subdev; + struct nvkm_acr_lsf *lsf; + u64 start, limit; + int ret; + + if (list_empty(&acr->lsf)) { + nvkm_debug(subdev, "No LSF(s) present.\n"); + return 0; + } + + ret = acr->func->init(acr); + if (ret) + return ret; + + acr->func->wpr_check(acr, &start, &limit); + + if (start != acr->wpr_start || limit != acr->wpr_end) { + nvkm_error(subdev, "WPR not configured as expected: " + "%016llx-%016llx vs %016llx-%016llx\n", + acr->wpr_start, acr->wpr_end, start, limit); + return -EIO; + } + + acr->done = true; + + list_for_each_entry(lsf, &acr->lsf, head) { + if (lsf->func->boot) { + ret = lsf->func->boot(lsf->falcon); + if (ret) + break; + } + } + + return ret; +} + +static int +nvkm_acr_reload(struct nvkm_acr *acr) +{ + nvkm_acr_unload(acr); + return nvkm_acr_load(acr); +} + +static struct nvkm_acr_lsf * +nvkm_acr_falcon(struct nvkm_device *device) +{ + struct nvkm_acr *acr = device->acr; + struct nvkm_acr_lsf *lsf; + + if (acr) { + list_for_each_entry(lsf, &acr->lsf, head) { + if (lsf->func->bootstrap_falcon) + return lsf; + } + } + + return NULL; +} + +int +nvkm_acr_bootstrap_falcons(struct nvkm_device *device, unsigned long mask) +{ + struct nvkm_acr_lsf *acrflcn = nvkm_acr_falcon(device); + struct nvkm_acr *acr = device->acr; + unsigned long id; + + if (!acrflcn) { + int ret = nvkm_acr_reload(acr); + if (ret) + return ret; + + return acr->done ? 0 : -EINVAL; + } + + if (acrflcn->func->bootstrap_multiple_falcons) { + return acrflcn->func-> + bootstrap_multiple_falcons(acrflcn->falcon, mask); + } + + for_each_set_bit(id, &mask, NVKM_ACR_LSF_NUM) { + int ret = acrflcn->func->bootstrap_falcon(acrflcn->falcon, id); + if (ret) + return ret; + } + + return 0; +} + +bool +nvkm_acr_managed_falcon(struct nvkm_device *device, enum nvkm_acr_lsf_id id) +{ + struct nvkm_acr *acr = device->acr; + struct nvkm_acr_lsf *lsf; + + if (acr) { + list_for_each_entry(lsf, &acr->lsf, head) { + if (lsf->id == id) + return true; + } + } + + return false; +} + +static int +nvkm_acr_fini(struct nvkm_subdev *subdev, bool suspend) +{ + nvkm_acr_unload(nvkm_acr(subdev)); + return 0; +} + +static int +nvkm_acr_init(struct nvkm_subdev *subdev) +{ + if (!nvkm_acr_falcon(subdev->device)) + return 0; + + return nvkm_acr_load(nvkm_acr(subdev)); +} + +static void +nvkm_acr_cleanup(struct nvkm_acr *acr) +{ + nvkm_acr_lsfw_del_all(acr); + nvkm_acr_hsfw_del_all(acr); + nvkm_firmware_put(acr->wpr_fw); + acr->wpr_fw = NULL; +} + +static int +nvkm_acr_oneinit(struct nvkm_subdev *subdev) +{ + struct nvkm_device *device = subdev->device; + struct nvkm_acr *acr = nvkm_acr(subdev); + struct nvkm_acr_hsfw *hsfw; + struct nvkm_acr_lsfw *lsfw, *lsft; + struct nvkm_acr_lsf *lsf; + u32 wpr_size = 0; + int ret, i; + + if (list_empty(&acr->hsfw)) { + nvkm_debug(subdev, "No HSFW(s)\n"); + nvkm_acr_cleanup(acr); + return 0; + } + + /* Determine layout/size of WPR image up-front, as we need to know + * it to allocate memory before we begin constructing it. + */ + list_for_each_entry_safe(lsfw, lsft, &acr->lsfw, head) { + /* Cull unknown falcons that are present in WPR image. */ + if (acr->wpr_fw) { + if (!lsfw->func) { + nvkm_acr_lsfw_del(lsfw); + continue; + } + + wpr_size = acr->wpr_fw->size; + } + + /* Ensure we've fetched falcon configuration. */ + ret = nvkm_falcon_get(lsfw->falcon, subdev); + if (ret) + return ret; + + nvkm_falcon_put(lsfw->falcon, subdev); + + if (!(lsf = kmalloc(sizeof(*lsf), GFP_KERNEL))) + return -ENOMEM; + lsf->func = lsfw->func; + lsf->falcon = lsfw->falcon; + lsf->id = lsfw->id; + list_add_tail(&lsf->head, &acr->lsf); + } + + if (!acr->wpr_fw || acr->wpr_comp) + wpr_size = acr->func->wpr_layout(acr); + + /* Allocate/Locate WPR + fill ucode blob pointer. + * + * dGPU: allocate WPR + shadow blob + * Tegra: locate WPR with regs, ensure size is sufficient, + * allocate ucode blob. + */ + ret = acr->func->wpr_alloc(acr, wpr_size); + if (ret) + return ret; + + nvkm_debug(subdev, "WPR region is from 0x%llx-0x%llx (shadow 0x%llx)\n", + acr->wpr_start, acr->wpr_end, acr->shadow_start); + + /* Write WPR to ucode blob. */ + nvkm_kmap(acr->wpr); + if (acr->wpr_fw && !acr->wpr_comp) + nvkm_wobj(acr->wpr, 0, acr->wpr_fw->data, acr->wpr_fw->size); + + if (!acr->wpr_fw || acr->wpr_comp) + acr->func->wpr_build(acr, nvkm_acr_falcon(device)); + acr->func->wpr_patch(acr, (s64)acr->wpr_start - acr->wpr_prev); + + if (acr->wpr_fw && acr->wpr_comp) { + nvkm_kmap(acr->wpr); + for (i = 0; i < acr->wpr_fw->size; i += 4) { + u32 us = nvkm_ro32(acr->wpr, i); + u32 fw = ((u32 *)acr->wpr_fw->data)[i/4]; + if (fw != us) { + nvkm_warn(subdev, "%08x: %08x %08x\n", + i, us, fw); + } + } + return -EINVAL; + } + nvkm_done(acr->wpr); + + /* Allocate instance block for ACR-related stuff. */ + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, true, + &acr->inst); + if (ret) + return ret; + + ret = nvkm_vmm_new(device, 0, 0, NULL, 0, NULL, "acr", &acr->vmm); + if (ret) + return ret; + + acr->vmm->debug = acr->subdev.debug; + + ret = nvkm_vmm_join(acr->vmm, acr->inst); + if (ret) + return ret; + + /* Load HS firmware blobs into ACR VMM. */ + list_for_each_entry(hsfw, &acr->hsfw, head) { + nvkm_debug(subdev, "loading %s fw\n", hsfw->name); + ret = hsfw->func->load(acr, hsfw); + if (ret) + return ret; + } + + /* Kill temporary data. */ + nvkm_acr_cleanup(acr); + return 0; +} + +static void * +nvkm_acr_dtor(struct nvkm_subdev *subdev) +{ + struct nvkm_acr *acr = nvkm_acr(subdev); + struct nvkm_acr_hsf *hsf, *hst; + struct nvkm_acr_lsf *lsf, *lst; + + list_for_each_entry_safe(hsf, hst, &acr->hsf, head) { + nvkm_vmm_put(acr->vmm, &hsf->vma); + nvkm_memory_unref(&hsf->ucode); + kfree(hsf->imem); + list_del(&hsf->head); + kfree(hsf); + } + + nvkm_vmm_part(acr->vmm, acr->inst); + nvkm_vmm_unref(&acr->vmm); + nvkm_memory_unref(&acr->inst); + + nvkm_memory_unref(&acr->wpr); + + list_for_each_entry_safe(lsf, lst, &acr->lsf, head) { + list_del(&lsf->head); + kfree(lsf); + } + + nvkm_acr_cleanup(acr); + return acr; +} + +static const struct nvkm_subdev_func +nvkm_acr = { + .dtor = nvkm_acr_dtor, + .oneinit = nvkm_acr_oneinit, + .init = nvkm_acr_init, + .fini = nvkm_acr_fini, +}; + +static int +nvkm_acr_ctor_wpr(struct nvkm_acr *acr, int ver) +{ + struct nvkm_subdev *subdev = &acr->subdev; + struct nvkm_device *device = subdev->device; + int ret; + + ret = nvkm_firmware_get(subdev, "acr/wpr", ver, &acr->wpr_fw); + if (ret < 0) + return ret; + + /* Pre-add LSFs in the order they appear in the FW WPR image so that + * we're able to do a binary comparison with our own generator. + */ + ret = acr->func->wpr_parse(acr); + if (ret) + return ret; + + acr->wpr_comp = nvkm_boolopt(device->cfgopt, "NvAcrWprCompare", false); + acr->wpr_prev = nvkm_longopt(device->cfgopt, "NvAcrWprPrevAddr", 0); + return 0; +} + +int +nvkm_acr_new_(const struct nvkm_acr_fwif *fwif, struct nvkm_device *device, + int index, struct nvkm_acr **pacr) +{ + struct nvkm_acr *acr; + long wprfw; + + if (!(acr = *pacr = kzalloc(sizeof(*acr), GFP_KERNEL))) + return -ENOMEM; + nvkm_subdev_ctor(&nvkm_acr, device, index, &acr->subdev); + INIT_LIST_HEAD(&acr->hsfw); + INIT_LIST_HEAD(&acr->lsfw); + INIT_LIST_HEAD(&acr->hsf); + INIT_LIST_HEAD(&acr->lsf); + + fwif = nvkm_firmware_load(&acr->subdev, fwif, "Acr", acr); + if (IS_ERR(fwif)) + return PTR_ERR(fwif); + + acr->func = fwif->func; + + wprfw = nvkm_longopt(device->cfgopt, "NvAcrWpr", -1); + if (wprfw >= 0) { + int ret = nvkm_acr_ctor_wpr(acr, wprfw); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c new file mode 100644 index 0000000000000000000000000000000000000000..9a6394085cf05cee315969bc630cc14749df6bae --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c @@ -0,0 +1,470 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +int +gm200_acr_init(struct nvkm_acr *acr) +{ + return nvkm_acr_hsf_boot(acr, "load"); +} + +void +gm200_acr_wpr_check(struct nvkm_acr *acr, u64 *start, u64 *limit) +{ + struct nvkm_device *device = acr->subdev.device; + + nvkm_wr32(device, 0x100cd4, 2); + *start = (u64)(nvkm_rd32(device, 0x100cd4) & 0xffffff00) << 8; + nvkm_wr32(device, 0x100cd4, 3); + *limit = (u64)(nvkm_rd32(device, 0x100cd4) & 0xffffff00) << 8; + *limit = *limit + 0x20000; +} + +void +gm200_acr_wpr_patch(struct nvkm_acr *acr, s64 adjust) +{ + struct nvkm_subdev *subdev = &acr->subdev; + struct wpr_header hdr; + struct lsb_header lsb; + struct nvkm_acr_lsf *lsfw; + u32 offset = 0; + + do { + nvkm_robj(acr->wpr, offset, &hdr, sizeof(hdr)); + wpr_header_dump(subdev, &hdr); + + list_for_each_entry(lsfw, &acr->lsfw, head) { + if (lsfw->id != hdr.falcon_id) + continue; + + nvkm_robj(acr->wpr, hdr.lsb_offset, &lsb, sizeof(lsb)); + lsb_header_dump(subdev, &lsb); + + lsfw->func->bld_patch(acr, lsb.tail.bl_data_off, adjust); + break; + } + offset += sizeof(hdr); + } while (hdr.falcon_id != WPR_HEADER_V0_FALCON_ID_INVALID); +} + +void +gm200_acr_wpr_build_lsb_tail(struct nvkm_acr_lsfw *lsfw, + struct lsb_header_tail *hdr) +{ + hdr->ucode_off = lsfw->offset.img; + hdr->ucode_size = lsfw->ucode_size; + hdr->data_size = lsfw->data_size; + hdr->bl_code_size = lsfw->bootloader_size; + hdr->bl_imem_off = lsfw->bootloader_imem_offset; + hdr->bl_data_off = lsfw->offset.bld; + hdr->bl_data_size = lsfw->bl_data_size; + hdr->app_code_off = lsfw->app_start_offset + + lsfw->app_resident_code_offset; + hdr->app_code_size = lsfw->app_resident_code_size; + hdr->app_data_off = lsfw->app_start_offset + + lsfw->app_resident_data_offset; + hdr->app_data_size = lsfw->app_resident_data_size; + hdr->flags = lsfw->func->flags; +} + +static int +gm200_acr_wpr_build_lsb(struct nvkm_acr *acr, struct nvkm_acr_lsfw *lsfw) +{ + struct lsb_header hdr; + + if (WARN_ON(lsfw->sig->size != sizeof(hdr.signature))) + return -EINVAL; + + memcpy(&hdr.signature, lsfw->sig->data, lsfw->sig->size); + gm200_acr_wpr_build_lsb_tail(lsfw, &hdr.tail); + + nvkm_wobj(acr->wpr, lsfw->offset.lsb, &hdr, sizeof(hdr)); + return 0; +} + +int +gm200_acr_wpr_build(struct nvkm_acr *acr, struct nvkm_acr_lsf *rtos) +{ + struct nvkm_acr_lsfw *lsfw; + u32 offset = 0; + int ret; + + /* Fill per-LSF structures. */ + list_for_each_entry(lsfw, &acr->lsfw, head) { + struct wpr_header hdr = { + .falcon_id = lsfw->id, + .lsb_offset = lsfw->offset.lsb, + .bootstrap_owner = NVKM_ACR_LSF_PMU, + .lazy_bootstrap = rtos && lsfw->id != rtos->id, + .status = WPR_HEADER_V0_STATUS_COPY, + }; + + /* Write WPR header. */ + nvkm_wobj(acr->wpr, offset, &hdr, sizeof(hdr)); + offset += sizeof(hdr); + + /* Write LSB header. */ + ret = gm200_acr_wpr_build_lsb(acr, lsfw); + if (ret) + return ret; + + /* Write ucode image. */ + nvkm_wobj(acr->wpr, lsfw->offset.img, + lsfw->img.data, + lsfw->img.size); + + /* Write bootloader data. */ + lsfw->func->bld_write(acr, lsfw->offset.bld, lsfw); + } + + /* Finalise WPR. */ + nvkm_wo32(acr->wpr, offset, WPR_HEADER_V0_FALCON_ID_INVALID); + return 0; +} + +static int +gm200_acr_wpr_alloc(struct nvkm_acr *acr, u32 wpr_size) +{ + int ret = nvkm_memory_new(acr->subdev.device, NVKM_MEM_TARGET_INST, + ALIGN(wpr_size, 0x40000), 0x40000, true, + &acr->wpr); + if (ret) + return ret; + + acr->wpr_start = nvkm_memory_addr(acr->wpr); + acr->wpr_end = acr->wpr_start + nvkm_memory_size(acr->wpr); + return 0; +} + +u32 +gm200_acr_wpr_layout(struct nvkm_acr *acr) +{ + struct nvkm_acr_lsfw *lsfw; + u32 wpr = 0; + + wpr += 11 /* MAX_LSF */ * sizeof(struct wpr_header); + + list_for_each_entry(lsfw, &acr->lsfw, head) { + wpr = ALIGN(wpr, 256); + lsfw->offset.lsb = wpr; + wpr += sizeof(struct lsb_header); + + wpr = ALIGN(wpr, 4096); + lsfw->offset.img = wpr; + wpr += lsfw->img.size; + + wpr = ALIGN(wpr, 256); + lsfw->offset.bld = wpr; + lsfw->bl_data_size = ALIGN(lsfw->func->bld_size, 256); + wpr += lsfw->bl_data_size; + } + + return wpr; +} + +int +gm200_acr_wpr_parse(struct nvkm_acr *acr) +{ + const struct wpr_header *hdr = (void *)acr->wpr_fw->data; + + while (hdr->falcon_id != WPR_HEADER_V0_FALCON_ID_INVALID) { + wpr_header_dump(&acr->subdev, hdr); + if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id)) + return -ENOMEM; + } + + return 0; +} + +void +gm200_acr_hsfw_bld(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf) +{ + struct flcn_bl_dmem_desc_v1 hsdesc = { + .ctx_dma = FALCON_DMAIDX_VIRT, + .code_dma_base = hsf->vma->addr, + .non_sec_code_off = hsf->non_sec_addr, + .non_sec_code_size = hsf->non_sec_size, + .sec_code_off = hsf->sec_addr, + .sec_code_size = hsf->sec_size, + .code_entry_point = 0, + .data_dma_base = hsf->vma->addr + hsf->data_addr, + .data_size = hsf->data_size, + }; + + flcn_bl_dmem_desc_v1_dump(&acr->subdev, &hsdesc); + + nvkm_falcon_load_dmem(hsf->falcon, &hsdesc, 0, sizeof(hsdesc), 0); +} + +int +gm200_acr_hsfw_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf, + u32 intr_clear, u32 mbox0_ok) +{ + struct nvkm_subdev *subdev = &acr->subdev; + struct nvkm_device *device = subdev->device; + struct nvkm_falcon *falcon = hsf->falcon; + u32 mbox0, mbox1; + int ret; + + /* Reset falcon. */ + nvkm_falcon_reset(falcon); + nvkm_falcon_bind_context(falcon, acr->inst); + + /* Load bootloader into IMEM. */ + nvkm_falcon_load_imem(falcon, hsf->imem, + falcon->code.limit - hsf->imem_size, + hsf->imem_size, + hsf->imem_tag, + 0, false); + + /* Load bootloader data into DMEM. */ + hsf->func->bld(acr, hsf); + + /* Boot the falcon. */ + nvkm_mc_intr_mask(device, falcon->owner->index, false); + + nvkm_falcon_wr32(falcon, 0x040, 0xdeada5a5); + nvkm_falcon_set_start_addr(falcon, hsf->imem_tag << 8); + nvkm_falcon_start(falcon); + ret = nvkm_falcon_wait_for_halt(falcon, 100); + if (ret) + return ret; + + /* Check for successful completion. */ + mbox0 = nvkm_falcon_rd32(falcon, 0x040); + mbox1 = nvkm_falcon_rd32(falcon, 0x044); + nvkm_debug(subdev, "mailbox %08x %08x\n", mbox0, mbox1); + if (mbox0 && mbox0 != mbox0_ok) + return -EIO; + + nvkm_falcon_clear_interrupt(falcon, intr_clear); + nvkm_mc_intr_mask(device, falcon->owner->index, true); + return ret; +} + +int +gm200_acr_hsfw_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw, + struct nvkm_falcon *falcon) +{ + struct nvkm_subdev *subdev = &acr->subdev; + struct nvkm_acr_hsf *hsf; + int ret; + + /* Patch the appropriate signature (production/debug) into the FW + * image, as determined by the mode the falcon is in. + */ + ret = nvkm_falcon_get(falcon, subdev); + if (ret) + return ret; + + if (hsfw->sig.patch_loc) { + if (!falcon->debug) { + nvkm_debug(subdev, "patching production signature\n"); + memcpy(hsfw->image + hsfw->sig.patch_loc, + hsfw->sig.prod.data, + hsfw->sig.prod.size); + } else { + nvkm_debug(subdev, "patching debug signature\n"); + memcpy(hsfw->image + hsfw->sig.patch_loc, + hsfw->sig.dbg.data, + hsfw->sig.dbg.size); + } + } + + nvkm_falcon_put(falcon, subdev); + + if (!(hsf = kzalloc(sizeof(*hsf), GFP_KERNEL))) + return -ENOMEM; + hsf->func = hsfw->func; + hsf->name = hsfw->name; + list_add_tail(&hsf->head, &acr->hsf); + + hsf->imem_size = hsfw->imem_size; + hsf->imem_tag = hsfw->imem_tag; + hsf->imem = kmemdup(hsfw->imem, hsfw->imem_size, GFP_KERNEL); + if (!hsf->imem) + return -ENOMEM; + + hsf->non_sec_addr = hsfw->non_sec_addr; + hsf->non_sec_size = hsfw->non_sec_size; + hsf->sec_addr = hsfw->sec_addr; + hsf->sec_size = hsfw->sec_size; + hsf->data_addr = hsfw->data_addr; + hsf->data_size = hsfw->data_size; + + /* Make the FW image accessible to the HS bootloader. */ + ret = nvkm_memory_new(subdev->device, NVKM_MEM_TARGET_INST, + hsfw->image_size, 0x1000, false, &hsf->ucode); + if (ret) + return ret; + + nvkm_kmap(hsf->ucode); + nvkm_wobj(hsf->ucode, 0, hsfw->image, hsfw->image_size); + nvkm_done(hsf->ucode); + + ret = nvkm_vmm_get(acr->vmm, 12, nvkm_memory_size(hsf->ucode), + &hsf->vma); + if (ret) + return ret; + + ret = nvkm_memory_map(hsf->ucode, 0, acr->vmm, hsf->vma, NULL, 0); + if (ret) + return ret; + + hsf->falcon = falcon; + return 0; +} + +int +gm200_acr_unload_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf) +{ + return gm200_acr_hsfw_boot(acr, hsf, 0, 0x1d); +} + +int +gm200_acr_unload_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw) +{ + return gm200_acr_hsfw_load(acr, hsfw, &acr->subdev.device->pmu->falcon); +} + +const struct nvkm_acr_hsf_func +gm200_acr_unload_0 = { + .load = gm200_acr_unload_load, + .boot = gm200_acr_unload_boot, + .bld = gm200_acr_hsfw_bld, +}; + +MODULE_FIRMWARE("nvidia/gm200/acr/ucode_unload.bin"); +MODULE_FIRMWARE("nvidia/gm204/acr/ucode_unload.bin"); +MODULE_FIRMWARE("nvidia/gm206/acr/ucode_unload.bin"); +MODULE_FIRMWARE("nvidia/gp100/acr/ucode_unload.bin"); + +static const struct nvkm_acr_hsf_fwif +gm200_acr_unload_fwif[] = { + { 0, nvkm_acr_hsfw_load, &gm200_acr_unload_0 }, + {} +}; + +int +gm200_acr_load_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf) +{ + return gm200_acr_hsfw_boot(acr, hsf, 0x10, 0); +} + +static int +gm200_acr_load_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw) +{ + struct flcn_acr_desc *desc = (void *)&hsfw->image[hsfw->data_addr]; + + desc->wpr_region_id = 1; + desc->regions.no_regions = 2; + desc->regions.region_props[0].start_addr = acr->wpr_start >> 8; + desc->regions.region_props[0].end_addr = acr->wpr_end >> 8; + desc->regions.region_props[0].region_id = 1; + desc->regions.region_props[0].read_mask = 0xf; + desc->regions.region_props[0].write_mask = 0xc; + desc->regions.region_props[0].client_mask = 0x2; + flcn_acr_desc_dump(&acr->subdev, desc); + + return gm200_acr_hsfw_load(acr, hsfw, &acr->subdev.device->pmu->falcon); +} + +static const struct nvkm_acr_hsf_func +gm200_acr_load_0 = { + .load = gm200_acr_load_load, + .boot = gm200_acr_load_boot, + .bld = gm200_acr_hsfw_bld, +}; + +MODULE_FIRMWARE("nvidia/gm200/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gm200/acr/ucode_load.bin"); + +MODULE_FIRMWARE("nvidia/gm204/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gm204/acr/ucode_load.bin"); + +MODULE_FIRMWARE("nvidia/gm206/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gm206/acr/ucode_load.bin"); + +MODULE_FIRMWARE("nvidia/gp100/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gp100/acr/ucode_load.bin"); + +static const struct nvkm_acr_hsf_fwif +gm200_acr_load_fwif[] = { + { 0, nvkm_acr_hsfw_load, &gm200_acr_load_0 }, + {} +}; + +static const struct nvkm_acr_func +gm200_acr = { + .load = gm200_acr_load_fwif, + .unload = gm200_acr_unload_fwif, + .wpr_parse = gm200_acr_wpr_parse, + .wpr_layout = gm200_acr_wpr_layout, + .wpr_alloc = gm200_acr_wpr_alloc, + .wpr_build = gm200_acr_wpr_build, + .wpr_patch = gm200_acr_wpr_patch, + .wpr_check = gm200_acr_wpr_check, + .init = gm200_acr_init, +}; + +static int +gm200_acr_load(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif) +{ + struct nvkm_subdev *subdev = &acr->subdev; + const struct nvkm_acr_hsf_fwif *hsfwif; + + hsfwif = nvkm_firmware_load(subdev, fwif->func->load, "AcrLoad", + acr, "acr/bl", "acr/ucode_load", "load"); + if (IS_ERR(hsfwif)) + return PTR_ERR(hsfwif); + + hsfwif = nvkm_firmware_load(subdev, fwif->func->unload, "AcrUnload", + acr, "acr/bl", "acr/ucode_unload", + "unload"); + if (IS_ERR(hsfwif)) + return PTR_ERR(hsfwif); + + return 0; +} + +static const struct nvkm_acr_fwif +gm200_acr_fwif[] = { + { 0, gm200_acr_load, &gm200_acr }, + {} +}; + +int +gm200_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr) +{ + return nvkm_acr_new_(gm200_acr_fwif, device, index, pacr); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c new file mode 100644 index 0000000000000000000000000000000000000000..034a6ede70c74d079030fe50cb5b1669be78db89 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c @@ -0,0 +1,134 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include +#include +#include +#include + +#include +#include + +int +gm20b_acr_wpr_alloc(struct nvkm_acr *acr, u32 wpr_size) +{ + struct nvkm_subdev *subdev = &acr->subdev; + + acr->func->wpr_check(acr, &acr->wpr_start, &acr->wpr_end); + + if ((acr->wpr_end - acr->wpr_start) < wpr_size) { + nvkm_error(subdev, "WPR image too big for WPR!\n"); + return -ENOSPC; + } + + return nvkm_memory_new(subdev->device, NVKM_MEM_TARGET_INST, + wpr_size, 0, true, &acr->wpr); +} + +static void +gm20b_acr_load_bld(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf) +{ + struct flcn_bl_dmem_desc hsdesc = { + .ctx_dma = FALCON_DMAIDX_VIRT, + .code_dma_base = hsf->vma->addr >> 8, + .non_sec_code_off = hsf->non_sec_addr, + .non_sec_code_size = hsf->non_sec_size, + .sec_code_off = hsf->sec_addr, + .sec_code_size = hsf->sec_size, + .code_entry_point = 0, + .data_dma_base = (hsf->vma->addr + hsf->data_addr) >> 8, + .data_size = hsf->data_size, + }; + + flcn_bl_dmem_desc_dump(&acr->subdev, &hsdesc); + + nvkm_falcon_load_dmem(hsf->falcon, &hsdesc, 0, sizeof(hsdesc), 0); +} + +static int +gm20b_acr_load_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw) +{ + struct flcn_acr_desc *desc = (void *)&hsfw->image[hsfw->data_addr]; + + desc->ucode_blob_base = nvkm_memory_addr(acr->wpr); + desc->ucode_blob_size = nvkm_memory_size(acr->wpr); + flcn_acr_desc_dump(&acr->subdev, desc); + + return gm200_acr_hsfw_load(acr, hsfw, &acr->subdev.device->pmu->falcon); +} + +const struct nvkm_acr_hsf_func +gm20b_acr_load_0 = { + .load = gm20b_acr_load_load, + .boot = gm200_acr_load_boot, + .bld = gm20b_acr_load_bld, +}; + +#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) +MODULE_FIRMWARE("nvidia/gm20b/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gm20b/acr/ucode_load.bin"); +#endif + +static const struct nvkm_acr_hsf_fwif +gm20b_acr_load_fwif[] = { + { 0, nvkm_acr_hsfw_load, &gm20b_acr_load_0 }, + {} +}; + +static const struct nvkm_acr_func +gm20b_acr = { + .load = gm20b_acr_load_fwif, + .wpr_parse = gm200_acr_wpr_parse, + .wpr_layout = gm200_acr_wpr_layout, + .wpr_alloc = gm20b_acr_wpr_alloc, + .wpr_build = gm200_acr_wpr_build, + .wpr_patch = gm200_acr_wpr_patch, + .wpr_check = gm200_acr_wpr_check, + .init = gm200_acr_init, +}; + +int +gm20b_acr_load(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif) +{ + struct nvkm_subdev *subdev = &acr->subdev; + const struct nvkm_acr_hsf_fwif *hsfwif; + + hsfwif = nvkm_firmware_load(subdev, fwif->func->load, "AcrLoad", + acr, "acr/bl", "acr/ucode_load", "load"); + if (IS_ERR(hsfwif)) + return PTR_ERR(hsfwif); + + return 0; +} + +static const struct nvkm_acr_fwif +gm20b_acr_fwif[] = { + { 0, gm20b_acr_load, &gm20b_acr }, + {} +}; + +int +gm20b_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr) +{ + return nvkm_acr_new_(gm20b_acr_fwif, device, index, pacr); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c new file mode 100644 index 0000000000000000000000000000000000000000..49e11c46d525ee32cec749935805e36010ac46f2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c @@ -0,0 +1,281 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include +#include +#include +#include + +#include +#include + +void +gp102_acr_wpr_patch(struct nvkm_acr *acr, s64 adjust) +{ + struct wpr_header_v1 hdr; + struct lsb_header_v1 lsb; + struct nvkm_acr_lsfw *lsfw; + u32 offset = 0; + + do { + nvkm_robj(acr->wpr, offset, &hdr, sizeof(hdr)); + wpr_header_v1_dump(&acr->subdev, &hdr); + + list_for_each_entry(lsfw, &acr->lsfw, head) { + if (lsfw->id != hdr.falcon_id) + continue; + + nvkm_robj(acr->wpr, hdr.lsb_offset, &lsb, sizeof(lsb)); + lsb_header_v1_dump(&acr->subdev, &lsb); + + lsfw->func->bld_patch(acr, lsb.tail.bl_data_off, adjust); + break; + } + + offset += sizeof(hdr); + } while (hdr.falcon_id != WPR_HEADER_V1_FALCON_ID_INVALID); +} + +int +gp102_acr_wpr_build_lsb(struct nvkm_acr *acr, struct nvkm_acr_lsfw *lsfw) +{ + struct lsb_header_v1 hdr; + + if (WARN_ON(lsfw->sig->size != sizeof(hdr.signature))) + return -EINVAL; + + memcpy(&hdr.signature, lsfw->sig->data, lsfw->sig->size); + gm200_acr_wpr_build_lsb_tail(lsfw, &hdr.tail); + + nvkm_wobj(acr->wpr, lsfw->offset.lsb, &hdr, sizeof(hdr)); + return 0; +} + +int +gp102_acr_wpr_build(struct nvkm_acr *acr, struct nvkm_acr_lsf *rtos) +{ + struct nvkm_acr_lsfw *lsfw; + u32 offset = 0; + int ret; + + /* Fill per-LSF structures. */ + list_for_each_entry(lsfw, &acr->lsfw, head) { + struct lsf_signature_v1 *sig = (void *)lsfw->sig->data; + struct wpr_header_v1 hdr = { + .falcon_id = lsfw->id, + .lsb_offset = lsfw->offset.lsb, + .bootstrap_owner = NVKM_ACR_LSF_SEC2, + .lazy_bootstrap = rtos && lsfw->id != rtos->id, + .bin_version = sig->version, + .status = WPR_HEADER_V1_STATUS_COPY, + }; + + /* Write WPR header. */ + nvkm_wobj(acr->wpr, offset, &hdr, sizeof(hdr)); + offset += sizeof(hdr); + + /* Write LSB header. */ + ret = gp102_acr_wpr_build_lsb(acr, lsfw); + if (ret) + return ret; + + /* Write ucode image. */ + nvkm_wobj(acr->wpr, lsfw->offset.img, + lsfw->img.data, + lsfw->img.size); + + /* Write bootloader data. */ + lsfw->func->bld_write(acr, lsfw->offset.bld, lsfw); + } + + /* Finalise WPR. */ + nvkm_wo32(acr->wpr, offset, WPR_HEADER_V1_FALCON_ID_INVALID); + return 0; +} + +int +gp102_acr_wpr_alloc(struct nvkm_acr *acr, u32 wpr_size) +{ + int ret = nvkm_memory_new(acr->subdev.device, NVKM_MEM_TARGET_INST, + ALIGN(wpr_size, 0x40000) << 1, 0x40000, true, + &acr->wpr); + if (ret) + return ret; + + acr->shadow_start = nvkm_memory_addr(acr->wpr); + acr->wpr_start = acr->shadow_start + (nvkm_memory_size(acr->wpr) >> 1); + acr->wpr_end = acr->wpr_start + (nvkm_memory_size(acr->wpr) >> 1); + return 0; +} + +u32 +gp102_acr_wpr_layout(struct nvkm_acr *acr) +{ + struct nvkm_acr_lsfw *lsfw; + u32 wpr = 0; + + wpr += 11 /* MAX_LSF */ * sizeof(struct wpr_header_v1); + wpr = ALIGN(wpr, 256); + + wpr += 0x100; /* Shared sub-WPR headers. */ + + list_for_each_entry(lsfw, &acr->lsfw, head) { + wpr = ALIGN(wpr, 256); + lsfw->offset.lsb = wpr; + wpr += sizeof(struct lsb_header_v1); + + wpr = ALIGN(wpr, 4096); + lsfw->offset.img = wpr; + wpr += lsfw->img.size; + + wpr = ALIGN(wpr, 256); + lsfw->offset.bld = wpr; + lsfw->bl_data_size = ALIGN(lsfw->func->bld_size, 256); + wpr += lsfw->bl_data_size; + } + + return wpr; +} + +int +gp102_acr_wpr_parse(struct nvkm_acr *acr) +{ + const struct wpr_header_v1 *hdr = (void *)acr->wpr_fw->data; + + while (hdr->falcon_id != WPR_HEADER_V1_FALCON_ID_INVALID) { + wpr_header_v1_dump(&acr->subdev, hdr); + if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id)) + return -ENOMEM; + } + + return 0; +} + +MODULE_FIRMWARE("nvidia/gp102/acr/unload_bl.bin"); +MODULE_FIRMWARE("nvidia/gp102/acr/ucode_unload.bin"); + +MODULE_FIRMWARE("nvidia/gp104/acr/unload_bl.bin"); +MODULE_FIRMWARE("nvidia/gp104/acr/ucode_unload.bin"); + +MODULE_FIRMWARE("nvidia/gp106/acr/unload_bl.bin"); +MODULE_FIRMWARE("nvidia/gp106/acr/ucode_unload.bin"); + +MODULE_FIRMWARE("nvidia/gp107/acr/unload_bl.bin"); +MODULE_FIRMWARE("nvidia/gp107/acr/ucode_unload.bin"); + +static const struct nvkm_acr_hsf_fwif +gp102_acr_unload_fwif[] = { + { 0, nvkm_acr_hsfw_load, &gm200_acr_unload_0 }, + {} +}; + +int +gp102_acr_load_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw) +{ + struct flcn_acr_desc_v1 *desc = (void *)&hsfw->image[hsfw->data_addr]; + + desc->wpr_region_id = 1; + desc->regions.no_regions = 2; + desc->regions.region_props[0].start_addr = acr->wpr_start >> 8; + desc->regions.region_props[0].end_addr = acr->wpr_end >> 8; + desc->regions.region_props[0].region_id = 1; + desc->regions.region_props[0].read_mask = 0xf; + desc->regions.region_props[0].write_mask = 0xc; + desc->regions.region_props[0].client_mask = 0x2; + desc->regions.region_props[0].shadow_mem_start_addr = + acr->shadow_start >> 8; + flcn_acr_desc_v1_dump(&acr->subdev, desc); + + return gm200_acr_hsfw_load(acr, hsfw, + &acr->subdev.device->sec2->falcon); +} + +static const struct nvkm_acr_hsf_func +gp102_acr_load_0 = { + .load = gp102_acr_load_load, + .boot = gm200_acr_load_boot, + .bld = gm200_acr_hsfw_bld, +}; + +MODULE_FIRMWARE("nvidia/gp102/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gp102/acr/ucode_load.bin"); + +MODULE_FIRMWARE("nvidia/gp104/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gp104/acr/ucode_load.bin"); + +MODULE_FIRMWARE("nvidia/gp106/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gp106/acr/ucode_load.bin"); + +MODULE_FIRMWARE("nvidia/gp107/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gp107/acr/ucode_load.bin"); + +static const struct nvkm_acr_hsf_fwif +gp102_acr_load_fwif[] = { + { 0, nvkm_acr_hsfw_load, &gp102_acr_load_0 }, + {} +}; + +static const struct nvkm_acr_func +gp102_acr = { + .load = gp102_acr_load_fwif, + .unload = gp102_acr_unload_fwif, + .wpr_parse = gp102_acr_wpr_parse, + .wpr_layout = gp102_acr_wpr_layout, + .wpr_alloc = gp102_acr_wpr_alloc, + .wpr_build = gp102_acr_wpr_build, + .wpr_patch = gp102_acr_wpr_patch, + .wpr_check = gm200_acr_wpr_check, + .init = gm200_acr_init, +}; + +int +gp102_acr_load(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif) +{ + struct nvkm_subdev *subdev = &acr->subdev; + const struct nvkm_acr_hsf_fwif *hsfwif; + + hsfwif = nvkm_firmware_load(subdev, fwif->func->load, "AcrLoad", + acr, "acr/bl", "acr/ucode_load", "load"); + if (IS_ERR(hsfwif)) + return PTR_ERR(hsfwif); + + hsfwif = nvkm_firmware_load(subdev, fwif->func->unload, "AcrUnload", + acr, "acr/unload_bl", "acr/ucode_unload", + "unload"); + if (IS_ERR(hsfwif)) + return PTR_ERR(hsfwif); + + return 0; +} + +static const struct nvkm_acr_fwif +gp102_acr_fwif[] = { + { 0, gp102_acr_load, &gp102_acr }, + {} +}; + +int +gp102_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr) +{ + return nvkm_acr_new_(gp102_acr_fwif, device, index, pacr); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c new file mode 100644 index 0000000000000000000000000000000000000000..f10dc9112678cd3ded37fc91bd15ed61b54f0edd --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c @@ -0,0 +1,111 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include + +#include + +void +gp108_acr_hsfw_bld(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf) +{ + struct flcn_bl_dmem_desc_v2 hsdesc = { + .ctx_dma = FALCON_DMAIDX_VIRT, + .code_dma_base = hsf->vma->addr, + .non_sec_code_off = hsf->non_sec_addr, + .non_sec_code_size = hsf->non_sec_size, + .sec_code_off = hsf->sec_addr, + .sec_code_size = hsf->sec_size, + .code_entry_point = 0, + .data_dma_base = hsf->vma->addr + hsf->data_addr, + .data_size = hsf->data_size, + .argc = 0, + .argv = 0, + }; + + flcn_bl_dmem_desc_v2_dump(&acr->subdev, &hsdesc); + + nvkm_falcon_load_dmem(hsf->falcon, &hsdesc, 0, sizeof(hsdesc), 0); +} + +const struct nvkm_acr_hsf_func +gp108_acr_unload_0 = { + .load = gm200_acr_unload_load, + .boot = gm200_acr_unload_boot, + .bld = gp108_acr_hsfw_bld, +}; + +MODULE_FIRMWARE("nvidia/gp108/acr/unload_bl.bin"); +MODULE_FIRMWARE("nvidia/gp108/acr/ucode_unload.bin"); + +MODULE_FIRMWARE("nvidia/gv100/acr/unload_bl.bin"); +MODULE_FIRMWARE("nvidia/gv100/acr/ucode_unload.bin"); + +static const struct nvkm_acr_hsf_fwif +gp108_acr_unload_fwif[] = { + { 0, nvkm_acr_hsfw_load, &gp108_acr_unload_0 }, + {} +}; + +static const struct nvkm_acr_hsf_func +gp108_acr_load_0 = { + .load = gp102_acr_load_load, + .boot = gm200_acr_load_boot, + .bld = gp108_acr_hsfw_bld, +}; + +MODULE_FIRMWARE("nvidia/gp108/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gp108/acr/ucode_load.bin"); + +MODULE_FIRMWARE("nvidia/gv100/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gv100/acr/ucode_load.bin"); + +static const struct nvkm_acr_hsf_fwif +gp108_acr_load_fwif[] = { + { 0, nvkm_acr_hsfw_load, &gp108_acr_load_0 }, + {} +}; + +static const struct nvkm_acr_func +gp108_acr = { + .load = gp108_acr_load_fwif, + .unload = gp108_acr_unload_fwif, + .wpr_parse = gp102_acr_wpr_parse, + .wpr_layout = gp102_acr_wpr_layout, + .wpr_alloc = gp102_acr_wpr_alloc, + .wpr_build = gp102_acr_wpr_build, + .wpr_patch = gp102_acr_wpr_patch, + .wpr_check = gm200_acr_wpr_check, + .init = gm200_acr_init, +}; + +static const struct nvkm_acr_fwif +gp108_acr_fwif[] = { + { 0, gp102_acr_load, &gp108_acr }, + {} +}; + +int +gp108_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr) +{ + return nvkm_acr_new_(gp108_acr_fwif, device, index, pacr); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c new file mode 100644 index 0000000000000000000000000000000000000000..39de64292a41bb4c2ad045c184e998cedb6f06dc --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c @@ -0,0 +1,57 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) +MODULE_FIRMWARE("nvidia/gp10b/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gp10b/acr/ucode_load.bin"); +#endif + +static const struct nvkm_acr_hsf_fwif +gp10b_acr_load_fwif[] = { + { 0, nvkm_acr_hsfw_load, &gm20b_acr_load_0 }, + {} +}; + +static const struct nvkm_acr_func +gp10b_acr = { + .load = gp10b_acr_load_fwif, + .wpr_parse = gm200_acr_wpr_parse, + .wpr_layout = gm200_acr_wpr_layout, + .wpr_alloc = gm20b_acr_wpr_alloc, + .wpr_build = gm200_acr_wpr_build, + .wpr_patch = gm200_acr_wpr_patch, + .wpr_check = gm200_acr_wpr_check, + .init = gm200_acr_init, +}; + +static const struct nvkm_acr_fwif +gp10b_acr_fwif[] = { + { 0, gm20b_acr_load, &gp10b_acr }, + {} +}; + +int +gp10b_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr) +{ + return nvkm_acr_new_(gp10b_acr_fwif, device, index, pacr); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c new file mode 100644 index 0000000000000000000000000000000000000000..aecce2dac5586725e327137ba491c9f058d9af11 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c @@ -0,0 +1,180 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include + +#include +#include + +static void +nvkm_acr_hsfw_del(struct nvkm_acr_hsfw *hsfw) +{ + list_del(&hsfw->head); + kfree(hsfw->imem); + kfree(hsfw->image); + kfree(hsfw->sig.prod.data); + kfree(hsfw->sig.dbg.data); + kfree(hsfw); +} + +void +nvkm_acr_hsfw_del_all(struct nvkm_acr *acr) +{ + struct nvkm_acr_hsfw *hsfw, *hsft; + list_for_each_entry_safe(hsfw, hsft, &acr->hsfw, head) { + nvkm_acr_hsfw_del(hsfw); + } +} + +static int +nvkm_acr_hsfw_load_image(struct nvkm_acr *acr, const char *name, int ver, + struct nvkm_acr_hsfw *hsfw) +{ + struct nvkm_subdev *subdev = &acr->subdev; + const struct firmware *fw; + const struct nvfw_bin_hdr *hdr; + const struct nvfw_hs_header *fwhdr; + const struct nvfw_hs_load_header *lhdr; + u32 loc, sig; + int ret; + + ret = nvkm_firmware_get(subdev, name, ver, &fw); + if (ret < 0) + return ret; + + hdr = nvfw_bin_hdr(subdev, fw->data); + fwhdr = nvfw_hs_header(subdev, fw->data + hdr->header_offset); + + /* Earlier FW releases by NVIDIA for Nouveau's use aren't in NVIDIA's + * standard format, and don't have the indirection seen in the 0x10de + * case. + */ + switch (hdr->bin_magic) { + case 0x000010de: + loc = *(u32 *)(fw->data + fwhdr->patch_loc); + sig = *(u32 *)(fw->data + fwhdr->patch_sig); + break; + case 0x3b1d14f0: + loc = fwhdr->patch_loc; + sig = fwhdr->patch_sig; + break; + default: + ret = -EINVAL; + goto done; + } + + lhdr = nvfw_hs_load_header(subdev, fw->data + fwhdr->hdr_offset); + + if (!(hsfw->image = kmalloc(hdr->data_size, GFP_KERNEL))) { + ret = -ENOMEM; + goto done; + } + + memcpy(hsfw->image, fw->data + hdr->data_offset, hdr->data_size); + hsfw->image_size = hdr->data_size; + hsfw->non_sec_addr = lhdr->non_sec_code_off; + hsfw->non_sec_size = lhdr->non_sec_code_size; + hsfw->sec_addr = lhdr->apps[0]; + hsfw->sec_size = lhdr->apps[lhdr->num_apps]; + hsfw->data_addr = lhdr->data_dma_base; + hsfw->data_size = lhdr->data_size; + + hsfw->sig.prod.size = fwhdr->sig_prod_size; + hsfw->sig.prod.data = kmalloc(hsfw->sig.prod.size, GFP_KERNEL); + if (!hsfw->sig.prod.data) { + ret = -ENOMEM; + goto done; + } + + memcpy(hsfw->sig.prod.data, fw->data + fwhdr->sig_prod_offset + sig, + hsfw->sig.prod.size); + + hsfw->sig.dbg.size = fwhdr->sig_dbg_size; + hsfw->sig.dbg.data = kmalloc(hsfw->sig.dbg.size, GFP_KERNEL); + if (!hsfw->sig.dbg.data) { + ret = -ENOMEM; + goto done; + } + + memcpy(hsfw->sig.dbg.data, fw->data + fwhdr->sig_dbg_offset + sig, + hsfw->sig.dbg.size); + + hsfw->sig.patch_loc = loc; +done: + nvkm_firmware_put(fw); + return ret; +} + +static int +nvkm_acr_hsfw_load_bl(struct nvkm_acr *acr, const char *name, int ver, + struct nvkm_acr_hsfw *hsfw) +{ + struct nvkm_subdev *subdev = &acr->subdev; + const struct nvfw_bin_hdr *hdr; + const struct nvfw_bl_desc *desc; + const struct firmware *fw; + u8 *data; + int ret; + + ret = nvkm_firmware_get(subdev, name, ver, &fw); + if (ret) + return ret; + + hdr = nvfw_bin_hdr(subdev, fw->data); + desc = nvfw_bl_desc(subdev, fw->data + hdr->header_offset); + data = (void *)fw->data + hdr->data_offset; + + hsfw->imem_size = desc->code_size; + hsfw->imem_tag = desc->start_tag; + hsfw->imem = kmalloc(desc->code_size, GFP_KERNEL); + memcpy(hsfw->imem, data + desc->code_off, desc->code_size); + + nvkm_firmware_put(fw); + return 0; +} + +int +nvkm_acr_hsfw_load(struct nvkm_acr *acr, const char *bl, const char *fw, + const char *name, int version, + const struct nvkm_acr_hsf_fwif *fwif) +{ + struct nvkm_acr_hsfw *hsfw; + int ret; + + if (!(hsfw = kzalloc(sizeof(*hsfw), GFP_KERNEL))) + return -ENOMEM; + + hsfw->func = fwif->func; + hsfw->name = name; + list_add_tail(&hsfw->head, &acr->hsfw); + + ret = nvkm_acr_hsfw_load_bl(acr, bl, version, hsfw); + if (ret) + goto done; + + ret = nvkm_acr_hsfw_load_image(acr, fw, version, hsfw); +done: + if (ret) + nvkm_acr_hsfw_del(hsfw); + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c new file mode 100644 index 0000000000000000000000000000000000000000..07d1830126abf5bbcef73651bbcf7c0dab48c946 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c @@ -0,0 +1,253 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" +#include +#include +#include +#include + +void +nvkm_acr_lsfw_del(struct nvkm_acr_lsfw *lsfw) +{ + nvkm_blob_dtor(&lsfw->img); + nvkm_firmware_put(lsfw->sig); + list_del(&lsfw->head); + kfree(lsfw); +} + +void +nvkm_acr_lsfw_del_all(struct nvkm_acr *acr) +{ + struct nvkm_acr_lsfw *lsfw, *lsft; + list_for_each_entry_safe(lsfw, lsft, &acr->lsfw, head) { + nvkm_acr_lsfw_del(lsfw); + } +} + +static struct nvkm_acr_lsfw * +nvkm_acr_lsfw_get(struct nvkm_acr *acr, enum nvkm_acr_lsf_id id) +{ + struct nvkm_acr_lsfw *lsfw; + list_for_each_entry(lsfw, &acr->lsfw, head) { + if (lsfw->id == id) + return lsfw; + } + return NULL; +} + +struct nvkm_acr_lsfw * +nvkm_acr_lsfw_add(const struct nvkm_acr_lsf_func *func, struct nvkm_acr *acr, + struct nvkm_falcon *falcon, enum nvkm_acr_lsf_id id) +{ + struct nvkm_acr_lsfw *lsfw; + + if (!acr) + return ERR_PTR(-ENOSYS); + + lsfw = nvkm_acr_lsfw_get(acr, id); + if (lsfw && lsfw->func) { + nvkm_error(&acr->subdev, "LSFW %d redefined\n", id); + return ERR_PTR(-EEXIST); + } + + if (!lsfw) { + if (!(lsfw = kzalloc(sizeof(*lsfw), GFP_KERNEL))) + return ERR_PTR(-ENOMEM); + + lsfw->id = id; + list_add_tail(&lsfw->head, &acr->lsfw); + } + + lsfw->func = func; + lsfw->falcon = falcon; + return lsfw; +} + +static struct nvkm_acr_lsfw * +nvkm_acr_lsfw_load_sig_image_desc_(struct nvkm_subdev *subdev, + struct nvkm_falcon *falcon, + enum nvkm_acr_lsf_id id, + const char *path, int ver, + const struct nvkm_acr_lsf_func *func, + const struct firmware **pdesc) +{ + struct nvkm_acr *acr = subdev->device->acr; + struct nvkm_acr_lsfw *lsfw; + int ret; + + if (IS_ERR((lsfw = nvkm_acr_lsfw_add(func, acr, falcon, id)))) + return lsfw; + + ret = nvkm_firmware_load_name(subdev, path, "sig", ver, &lsfw->sig); + if (ret) + goto done; + + ret = nvkm_firmware_load_blob(subdev, path, "image", ver, &lsfw->img); + if (ret) + goto done; + + ret = nvkm_firmware_load_name(subdev, path, "desc", ver, pdesc); +done: + if (ret) { + nvkm_acr_lsfw_del(lsfw); + return ERR_PTR(ret); + } + + return lsfw; +} + +static void +nvkm_acr_lsfw_from_desc(const struct nvfw_ls_desc_head *desc, + struct nvkm_acr_lsfw *lsfw) +{ + lsfw->bootloader_size = ALIGN(desc->bootloader_size, 256); + lsfw->bootloader_imem_offset = desc->bootloader_imem_offset; + + lsfw->app_size = ALIGN(desc->app_size, 256); + lsfw->app_start_offset = desc->app_start_offset; + lsfw->app_imem_entry = desc->app_imem_entry; + lsfw->app_resident_code_offset = desc->app_resident_code_offset; + lsfw->app_resident_code_size = desc->app_resident_code_size; + lsfw->app_resident_data_offset = desc->app_resident_data_offset; + lsfw->app_resident_data_size = desc->app_resident_data_size; + + lsfw->ucode_size = ALIGN(lsfw->app_resident_data_offset, 256) + + lsfw->bootloader_size; + lsfw->data_size = lsfw->app_size + lsfw->bootloader_size - + lsfw->ucode_size; +} + +int +nvkm_acr_lsfw_load_sig_image_desc(struct nvkm_subdev *subdev, + struct nvkm_falcon *falcon, + enum nvkm_acr_lsf_id id, + const char *path, int ver, + const struct nvkm_acr_lsf_func *func) +{ + const struct firmware *fw; + struct nvkm_acr_lsfw *lsfw; + + lsfw = nvkm_acr_lsfw_load_sig_image_desc_(subdev, falcon, id, path, ver, + func, &fw); + if (IS_ERR(lsfw)) + return PTR_ERR(lsfw); + + nvkm_acr_lsfw_from_desc(&nvfw_ls_desc(subdev, fw->data)->head, lsfw); + nvkm_firmware_put(fw); + return 0; +} + +int +nvkm_acr_lsfw_load_sig_image_desc_v1(struct nvkm_subdev *subdev, + struct nvkm_falcon *falcon, + enum nvkm_acr_lsf_id id, + const char *path, int ver, + const struct nvkm_acr_lsf_func *func) +{ + const struct firmware *fw; + struct nvkm_acr_lsfw *lsfw; + + lsfw = nvkm_acr_lsfw_load_sig_image_desc_(subdev, falcon, id, path, ver, + func, &fw); + if (IS_ERR(lsfw)) + return PTR_ERR(lsfw); + + nvkm_acr_lsfw_from_desc(&nvfw_ls_desc_v1(subdev, fw->data)->head, lsfw); + nvkm_firmware_put(fw); + return 0; +} + +int +nvkm_acr_lsfw_load_bl_inst_data_sig(struct nvkm_subdev *subdev, + struct nvkm_falcon *falcon, + enum nvkm_acr_lsf_id id, + const char *path, int ver, + const struct nvkm_acr_lsf_func *func) +{ + struct nvkm_acr *acr = subdev->device->acr; + struct nvkm_acr_lsfw *lsfw; + const struct firmware *bl = NULL, *inst = NULL, *data = NULL; + const struct nvfw_bin_hdr *hdr; + const struct nvfw_bl_desc *desc; + u32 *bldata; + int ret; + + if (IS_ERR((lsfw = nvkm_acr_lsfw_add(func, acr, falcon, id)))) + return PTR_ERR(lsfw); + + ret = nvkm_firmware_load_name(subdev, path, "bl", ver, &bl); + if (ret) + goto done; + + hdr = nvfw_bin_hdr(subdev, bl->data); + desc = nvfw_bl_desc(subdev, bl->data + hdr->header_offset); + bldata = (void *)(bl->data + hdr->data_offset); + + ret = nvkm_firmware_load_name(subdev, path, "inst", ver, &inst); + if (ret) + goto done; + + ret = nvkm_firmware_load_name(subdev, path, "data", ver, &data); + if (ret) + goto done; + + ret = nvkm_firmware_load_name(subdev, path, "sig", ver, &lsfw->sig); + if (ret) + goto done; + + lsfw->bootloader_size = ALIGN(desc->code_size, 256); + lsfw->bootloader_imem_offset = desc->start_tag << 8; + + lsfw->app_start_offset = lsfw->bootloader_size; + lsfw->app_imem_entry = 0; + lsfw->app_resident_code_offset = 0; + lsfw->app_resident_code_size = ALIGN(inst->size, 256); + lsfw->app_resident_data_offset = lsfw->app_resident_code_size; + lsfw->app_resident_data_size = ALIGN(data->size, 256); + lsfw->app_size = lsfw->app_resident_code_size + + lsfw->app_resident_data_size; + + lsfw->img.size = lsfw->bootloader_size + lsfw->app_size; + if (!(lsfw->img.data = kzalloc(lsfw->img.size, GFP_KERNEL))) { + ret = -ENOMEM; + goto done; + } + + memcpy(lsfw->img.data, bldata, lsfw->bootloader_size); + memcpy(lsfw->img.data + lsfw->app_start_offset + + lsfw->app_resident_code_offset, inst->data, inst->size); + memcpy(lsfw->img.data + lsfw->app_start_offset + + lsfw->app_resident_data_offset, data->data, data->size); + + lsfw->ucode_size = ALIGN(lsfw->app_resident_data_offset, 256) + + lsfw->bootloader_size; + lsfw->data_size = lsfw->app_size + lsfw->bootloader_size - + lsfw->ucode_size; + +done: + if (ret) + nvkm_acr_lsfw_del(lsfw); + nvkm_firmware_put(data); + nvkm_firmware_put(inst); + nvkm_firmware_put(bl); + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h new file mode 100644 index 0000000000000000000000000000000000000000..d8ba72806d392847168961307eea3f0385aa2dba --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h @@ -0,0 +1,151 @@ +#ifndef __NVKM_ACR_PRIV_H__ +#define __NVKM_ACR_PRIV_H__ +#include +struct lsb_header_tail; + +struct nvkm_acr_fwif { + int version; + int (*load)(struct nvkm_acr *, int version, + const struct nvkm_acr_fwif *); + const struct nvkm_acr_func *func; +}; + +int gm20b_acr_load(struct nvkm_acr *, int, const struct nvkm_acr_fwif *); +int gp102_acr_load(struct nvkm_acr *, int, const struct nvkm_acr_fwif *); + +struct nvkm_acr_lsf; +struct nvkm_acr_func { + const struct nvkm_acr_hsf_fwif *load; + const struct nvkm_acr_hsf_fwif *ahesasc; + const struct nvkm_acr_hsf_fwif *asb; + const struct nvkm_acr_hsf_fwif *unload; + int (*wpr_parse)(struct nvkm_acr *); + u32 (*wpr_layout)(struct nvkm_acr *); + int (*wpr_alloc)(struct nvkm_acr *, u32 wpr_size); + int (*wpr_build)(struct nvkm_acr *, struct nvkm_acr_lsf *rtos); + void (*wpr_patch)(struct nvkm_acr *, s64 adjust); + void (*wpr_check)(struct nvkm_acr *, u64 *start, u64 *limit); + int (*init)(struct nvkm_acr *); + void (*fini)(struct nvkm_acr *); +}; + +int gm200_acr_wpr_parse(struct nvkm_acr *); +u32 gm200_acr_wpr_layout(struct nvkm_acr *); +int gm200_acr_wpr_build(struct nvkm_acr *, struct nvkm_acr_lsf *); +void gm200_acr_wpr_patch(struct nvkm_acr *, s64); +void gm200_acr_wpr_check(struct nvkm_acr *, u64 *, u64 *); +void gm200_acr_wpr_build_lsb_tail(struct nvkm_acr_lsfw *, + struct lsb_header_tail *); +int gm200_acr_init(struct nvkm_acr *); + +int gm20b_acr_wpr_alloc(struct nvkm_acr *, u32 wpr_size); + +int gp102_acr_wpr_parse(struct nvkm_acr *); +u32 gp102_acr_wpr_layout(struct nvkm_acr *); +int gp102_acr_wpr_alloc(struct nvkm_acr *, u32 wpr_size); +int gp102_acr_wpr_build(struct nvkm_acr *, struct nvkm_acr_lsf *); +int gp102_acr_wpr_build_lsb(struct nvkm_acr *, struct nvkm_acr_lsfw *); +void gp102_acr_wpr_patch(struct nvkm_acr *, s64); + +struct nvkm_acr_hsfw { + const struct nvkm_acr_hsf_func *func; + const char *name; + struct list_head head; + + u32 imem_size; + u32 imem_tag; + u32 *imem; + + u8 *image; + u32 image_size; + u32 non_sec_addr; + u32 non_sec_size; + u32 sec_addr; + u32 sec_size; + u32 data_addr; + u32 data_size; + + struct { + struct { + void *data; + u32 size; + } prod, dbg; + u32 patch_loc; + } sig; +}; + +struct nvkm_acr_hsf_fwif { + int version; + int (*load)(struct nvkm_acr *, const char *bl, const char *fw, + const char *name, int version, + const struct nvkm_acr_hsf_fwif *); + const struct nvkm_acr_hsf_func *func; +}; + +int nvkm_acr_hsfw_load(struct nvkm_acr *, const char *, const char *, + const char *, int, const struct nvkm_acr_hsf_fwif *); +void nvkm_acr_hsfw_del_all(struct nvkm_acr *); + +struct nvkm_acr_hsf { + const struct nvkm_acr_hsf_func *func; + const char *name; + struct list_head head; + + u32 imem_size; + u32 imem_tag; + u32 *imem; + + u32 non_sec_addr; + u32 non_sec_size; + u32 sec_addr; + u32 sec_size; + u32 data_addr; + u32 data_size; + + struct nvkm_memory *ucode; + struct nvkm_vma *vma; + struct nvkm_falcon *falcon; +}; + +struct nvkm_acr_hsf_func { + int (*load)(struct nvkm_acr *, struct nvkm_acr_hsfw *); + int (*boot)(struct nvkm_acr *, struct nvkm_acr_hsf *); + void (*bld)(struct nvkm_acr *, struct nvkm_acr_hsf *); +}; + +int gm200_acr_hsfw_load(struct nvkm_acr *, struct nvkm_acr_hsfw *, + struct nvkm_falcon *); +int gm200_acr_hsfw_boot(struct nvkm_acr *, struct nvkm_acr_hsf *, + u32 clear_intr, u32 mbox0_ok); + +int gm200_acr_load_boot(struct nvkm_acr *, struct nvkm_acr_hsf *); + +extern const struct nvkm_acr_hsf_func gm200_acr_unload_0; +int gm200_acr_unload_load(struct nvkm_acr *, struct nvkm_acr_hsfw *); +int gm200_acr_unload_boot(struct nvkm_acr *, struct nvkm_acr_hsf *); +void gm200_acr_hsfw_bld(struct nvkm_acr *, struct nvkm_acr_hsf *); + +extern const struct nvkm_acr_hsf_func gm20b_acr_load_0; + +int gp102_acr_load_load(struct nvkm_acr *, struct nvkm_acr_hsfw *); + +extern const struct nvkm_acr_hsf_func gp108_acr_unload_0; +void gp108_acr_hsfw_bld(struct nvkm_acr *, struct nvkm_acr_hsf *); + +int nvkm_acr_new_(const struct nvkm_acr_fwif *, struct nvkm_device *, int, + struct nvkm_acr **); +int nvkm_acr_hsf_boot(struct nvkm_acr *, const char *name); + +struct nvkm_acr_lsf { + const struct nvkm_acr_lsf_func *func; + struct nvkm_falcon *falcon; + enum nvkm_acr_lsf_id id; + struct list_head head; +}; + +struct nvkm_acr_lsfw *nvkm_acr_lsfw_add(const struct nvkm_acr_lsf_func *, + struct nvkm_acr *, struct nvkm_falcon *, + enum nvkm_acr_lsf_id); +void nvkm_acr_lsfw_del(struct nvkm_acr_lsfw *); +void nvkm_acr_lsfw_del_all(struct nvkm_acr *); +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c new file mode 100644 index 0000000000000000000000000000000000000000..7f4b89d82d320a0b7ec6e0e2fe4b981b569ff2b0 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c @@ -0,0 +1,215 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include +#include +#include +#include +#include + +#include + +static int +tu102_acr_init(struct nvkm_acr *acr) +{ + int ret = nvkm_acr_hsf_boot(acr, "AHESASC"); + if (ret) + return ret; + + return nvkm_acr_hsf_boot(acr, "ASB"); +} + +static int +tu102_acr_wpr_build(struct nvkm_acr *acr, struct nvkm_acr_lsf *rtos) +{ + struct nvkm_acr_lsfw *lsfw; + u32 offset = 0; + int ret; + + /*XXX: shared sub-WPR headers, fill terminator for now. */ + nvkm_wo32(acr->wpr, 0x200, 0xffffffff); + + /* Fill per-LSF structures. */ + list_for_each_entry(lsfw, &acr->lsfw, head) { + struct lsf_signature_v1 *sig = (void *)lsfw->sig->data; + struct wpr_header_v1 hdr = { + .falcon_id = lsfw->id, + .lsb_offset = lsfw->offset.lsb, + .bootstrap_owner = NVKM_ACR_LSF_GSPLITE, + .lazy_bootstrap = 1, + .bin_version = sig->version, + .status = WPR_HEADER_V1_STATUS_COPY, + }; + + /* Write WPR header. */ + nvkm_wobj(acr->wpr, offset, &hdr, sizeof(hdr)); + offset += sizeof(hdr); + + /* Write LSB header. */ + ret = gp102_acr_wpr_build_lsb(acr, lsfw); + if (ret) + return ret; + + /* Write ucode image. */ + nvkm_wobj(acr->wpr, lsfw->offset.img, + lsfw->img.data, + lsfw->img.size); + + /* Write bootloader data. */ + lsfw->func->bld_write(acr, lsfw->offset.bld, lsfw); + } + + /* Finalise WPR. */ + nvkm_wo32(acr->wpr, offset, WPR_HEADER_V1_FALCON_ID_INVALID); + return 0; +} + +static int +tu102_acr_hsfw_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf) +{ + return gm200_acr_hsfw_boot(acr, hsf, 0, 0); +} + +static int +tu102_acr_hsfw_nofw(struct nvkm_acr *acr, const char *bl, const char *fw, + const char *name, int version, + const struct nvkm_acr_hsf_fwif *fwif) +{ + return 0; +} + +MODULE_FIRMWARE("nvidia/tu102/acr/unload_bl.bin"); +MODULE_FIRMWARE("nvidia/tu102/acr/ucode_unload.bin"); + +MODULE_FIRMWARE("nvidia/tu104/acr/unload_bl.bin"); +MODULE_FIRMWARE("nvidia/tu104/acr/ucode_unload.bin"); + +MODULE_FIRMWARE("nvidia/tu106/acr/unload_bl.bin"); +MODULE_FIRMWARE("nvidia/tu106/acr/ucode_unload.bin"); + +static const struct nvkm_acr_hsf_fwif +tu102_acr_unload_fwif[] = { + { 0, nvkm_acr_hsfw_load, &gp108_acr_unload_0 }, + { -1, tu102_acr_hsfw_nofw }, + {} +}; + +static int +tu102_acr_asb_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw) +{ + return gm200_acr_hsfw_load(acr, hsfw, &acr->subdev.device->gsp->falcon); +} + +static const struct nvkm_acr_hsf_func +tu102_acr_asb_0 = { + .load = tu102_acr_asb_load, + .boot = tu102_acr_hsfw_boot, + .bld = gp108_acr_hsfw_bld, +}; + +MODULE_FIRMWARE("nvidia/tu102/acr/ucode_asb.bin"); +MODULE_FIRMWARE("nvidia/tu104/acr/ucode_asb.bin"); +MODULE_FIRMWARE("nvidia/tu106/acr/ucode_asb.bin"); + +static const struct nvkm_acr_hsf_fwif +tu102_acr_asb_fwif[] = { + { 0, nvkm_acr_hsfw_load, &tu102_acr_asb_0 }, + { -1, tu102_acr_hsfw_nofw }, + {} +}; + +static const struct nvkm_acr_hsf_func +tu102_acr_ahesasc_0 = { + .load = gp102_acr_load_load, + .boot = tu102_acr_hsfw_boot, + .bld = gp108_acr_hsfw_bld, +}; + +MODULE_FIRMWARE("nvidia/tu102/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/tu102/acr/ucode_ahesasc.bin"); + +MODULE_FIRMWARE("nvidia/tu104/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/tu104/acr/ucode_ahesasc.bin"); + +MODULE_FIRMWARE("nvidia/tu106/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/tu106/acr/ucode_ahesasc.bin"); + +static const struct nvkm_acr_hsf_fwif +tu102_acr_ahesasc_fwif[] = { + { 0, nvkm_acr_hsfw_load, &tu102_acr_ahesasc_0 }, + { -1, tu102_acr_hsfw_nofw }, + {} +}; + +static const struct nvkm_acr_func +tu102_acr = { + .ahesasc = tu102_acr_ahesasc_fwif, + .asb = tu102_acr_asb_fwif, + .unload = tu102_acr_unload_fwif, + .wpr_parse = gp102_acr_wpr_parse, + .wpr_layout = gp102_acr_wpr_layout, + .wpr_alloc = gp102_acr_wpr_alloc, + .wpr_patch = gp102_acr_wpr_patch, + .wpr_build = tu102_acr_wpr_build, + .wpr_check = gm200_acr_wpr_check, + .init = tu102_acr_init, +}; + +static int +tu102_acr_load(struct nvkm_acr *acr, int version, + const struct nvkm_acr_fwif *fwif) +{ + struct nvkm_subdev *subdev = &acr->subdev; + const struct nvkm_acr_hsf_fwif *hsfwif; + + hsfwif = nvkm_firmware_load(subdev, fwif->func->ahesasc, "AcrAHESASC", + acr, "acr/bl", "acr/ucode_ahesasc", + "AHESASC"); + if (IS_ERR(hsfwif)) + return PTR_ERR(hsfwif); + + hsfwif = nvkm_firmware_load(subdev, fwif->func->asb, "AcrASB", + acr, "acr/bl", "acr/ucode_asb", "ASB"); + if (IS_ERR(hsfwif)) + return PTR_ERR(hsfwif); + + hsfwif = nvkm_firmware_load(subdev, fwif->func->unload, "AcrUnload", + acr, "acr/unload_bl", "acr/ucode_unload", + "unload"); + if (IS_ERR(hsfwif)) + return PTR_ERR(hsfwif); + + return 0; +} + +static const struct nvkm_acr_fwif +tu102_acr_fwif[] = { + { 0, tu102_acr_load, &tu102_acr }, + {} +}; + +int +tu102_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr) +{ + return nvkm_acr_new_(tu102_acr_fwif, device, index, pacr); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild index 53b9d638f2c8dfa2d9873f3bdd526d49d7260c38..d65ec719f153a35cf147d22dcf7f2ba65f0a08f0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild @@ -2,5 +2,6 @@ nvkm-y += nvkm/subdev/fault/base.o nvkm-y += nvkm/subdev/fault/user.o nvkm-y += nvkm/subdev/fault/gp100.o +nvkm-y += nvkm/subdev/fault/gp10b.o nvkm-y += nvkm/subdev/fault/gv100.o nvkm-y += nvkm/subdev/fault/tu102.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c index ca251560d3e09a500eba1ee8dac18ec057a0a24d..f6dca97140d6bdf877c97e5348bcec3f48ccf5c4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c @@ -108,7 +108,7 @@ nvkm_fault_oneinit_buffer(struct nvkm_fault *fault, int id) return ret; /* Pin fault buffer in BAR2. */ - buffer->addr = nvkm_memory_bar2(buffer->mem); + buffer->addr = fault->func->buffer.pin(buffer); if (buffer->addr == ~0ULL) return -EFAULT; @@ -146,6 +146,7 @@ nvkm_fault_dtor(struct nvkm_subdev *subdev) struct nvkm_fault *fault = nvkm_fault(subdev); int i; + nvkm_notify_fini(&fault->nrpfb); nvkm_event_fini(&fault->event); for (i = 0; i < fault->buffer_nr; i++) { diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c index 4f3c4e0911173eaaa7c00263aca9e21b79726da5..f6b189cc43301f05b6220651f558830e2196a4bc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c @@ -21,25 +21,26 @@ */ #include "priv.h" +#include #include #include -static void +void gp100_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable) { struct nvkm_device *device = buffer->fault->subdev.device; nvkm_mc_intr_mask(device, NVKM_SUBDEV_FAULT, enable); } -static void +void gp100_fault_buffer_fini(struct nvkm_fault_buffer *buffer) { struct nvkm_device *device = buffer->fault->subdev.device; nvkm_mask(device, 0x002a70, 0x00000001, 0x00000000); } -static void +void gp100_fault_buffer_init(struct nvkm_fault_buffer *buffer) { struct nvkm_device *device = buffer->fault->subdev.device; @@ -48,7 +49,12 @@ gp100_fault_buffer_init(struct nvkm_fault_buffer *buffer) nvkm_mask(device, 0x002a70, 0x00000001, 0x00000001); } -static void +u64 gp100_fault_buffer_pin(struct nvkm_fault_buffer *buffer) +{ + return nvkm_memory_bar2(buffer->mem); +} + +void gp100_fault_buffer_info(struct nvkm_fault_buffer *buffer) { buffer->entries = nvkm_rd32(buffer->fault->subdev.device, 0x002a78); @@ -56,7 +62,7 @@ gp100_fault_buffer_info(struct nvkm_fault_buffer *buffer) buffer->put = 0x002a80; } -static void +void gp100_fault_intr(struct nvkm_fault *fault) { nvkm_event_send(&fault->event, 1, 0, NULL, 0); @@ -68,6 +74,7 @@ gp100_fault = { .buffer.nr = 1, .buffer.entry_size = 32, .buffer.info = gp100_fault_buffer_info, + .buffer.pin = gp100_fault_buffer_pin, .buffer.init = gp100_fault_buffer_init, .buffer.fini = gp100_fault_buffer_fini, .buffer.intr = gp100_fault_buffer_intr, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c new file mode 100644 index 0000000000000000000000000000000000000000..9e66d1f7654d4a7ad9843676ff21983e1b1771c9 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2019 NVIDIA Corporation. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "priv.h" + +#include + +#include + +u64 +gp10b_fault_buffer_pin(struct nvkm_fault_buffer *buffer) +{ + return nvkm_memory_addr(buffer->mem); +} + +static const struct nvkm_fault_func +gp10b_fault = { + .intr = gp100_fault_intr, + .buffer.nr = 1, + .buffer.entry_size = 32, + .buffer.info = gp100_fault_buffer_info, + .buffer.pin = gp10b_fault_buffer_pin, + .buffer.init = gp100_fault_buffer_init, + .buffer.fini = gp100_fault_buffer_fini, + .buffer.intr = gp100_fault_buffer_intr, + .user = { { 0, 0, MAXWELL_FAULT_BUFFER_A }, 0 }, +}; + +int +gp10b_fault_new(struct nvkm_device *device, int index, + struct nvkm_fault **pfault) +{ + return nvkm_fault_new_(&gp10b_fault, device, index, pfault); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c index 6747f09c2dc33b1d32a3047b004446e8219ccd3c..2707be4ffabc37773fd01838eaa3481bfdbde597 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c @@ -214,6 +214,7 @@ gv100_fault = { .buffer.nr = 2, .buffer.entry_size = 32, .buffer.info = gv100_fault_buffer_info, + .buffer.pin = gp100_fault_buffer_pin, .buffer.init = gv100_fault_buffer_init, .buffer.fini = gv100_fault_buffer_fini, .buffer.intr = gv100_fault_buffer_intr, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h index 975e66ac63441e46242e46ae2d6e87b9111472f1..f6f1dd7eee1ff5dd394c4b3f5efdc1ce3cb039c8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h @@ -30,6 +30,7 @@ struct nvkm_fault_func { int nr; u32 entry_size; void (*info)(struct nvkm_fault_buffer *); + u64 (*pin)(struct nvkm_fault_buffer *); void (*init)(struct nvkm_fault_buffer *); void (*fini)(struct nvkm_fault_buffer *); void (*intr)(struct nvkm_fault_buffer *, bool enable); @@ -40,6 +41,15 @@ struct nvkm_fault_func { } user; }; +void gp100_fault_buffer_intr(struct nvkm_fault_buffer *, bool enable); +void gp100_fault_buffer_fini(struct nvkm_fault_buffer *); +void gp100_fault_buffer_init(struct nvkm_fault_buffer *); +u64 gp100_fault_buffer_pin(struct nvkm_fault_buffer *); +void gp100_fault_buffer_info(struct nvkm_fault_buffer *); +void gp100_fault_intr(struct nvkm_fault *); + +u64 gp10b_fault_buffer_pin(struct nvkm_fault_buffer *); + int gv100_fault_oneinit(struct nvkm_fault *); int nvkm_ufault_new(struct nvkm_device *, const struct nvkm_oclass *, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c index fa1dfe5692b05773fe4cae8b0df250481712aa82..45a6a68b9f48a651b7e575fe907fd464b137ef8e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c @@ -154,6 +154,7 @@ tu102_fault = { .buffer.nr = 2, .buffer.entry_size = 32, .buffer.info = tu102_fault_buffer_info, + .buffer.pin = gp100_fault_buffer_pin, .buffer.init = tu102_fault_buffer_init, .buffer.fini = tu102_fault_buffer_fini, .buffer.intr = tu102_fault_buffer_intr, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c index b2bb5a3ccb02b34a7158eee49d7fec2aa5559626..5940e0dea2f8b96f3d9f548d33dc75d14a088e6c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c @@ -125,6 +125,34 @@ nvkm_fb_oneinit(struct nvkm_subdev *subdev) return nvkm_mm_init(&fb->tags, 0, 0, tags, 1); } +static int +nvkm_fb_init_scrub_vpr(struct nvkm_fb *fb) +{ + struct nvkm_subdev *subdev = &fb->subdev; + int ret; + + nvkm_debug(subdev, "VPR locked, running scrubber binary\n"); + + if (!fb->vpr_scrubber.size) { + nvkm_warn(subdev, "VPR locked, but no scrubber binary!\n"); + return 0; + } + + ret = fb->func->vpr.scrub(fb); + if (ret) { + nvkm_error(subdev, "VPR scrubber binary failed\n"); + return ret; + } + + if (fb->func->vpr.scrub_required(fb)) { + nvkm_error(subdev, "VPR still locked after scrub!\n"); + return -EIO; + } + + nvkm_debug(subdev, "VPR scrubber binary successful\n"); + return 0; +} + static int nvkm_fb_init(struct nvkm_subdev *subdev) { @@ -154,6 +182,14 @@ nvkm_fb_init(struct nvkm_subdev *subdev) if (fb->func->init_unkn) fb->func->init_unkn(fb); + + if (fb->func->vpr.scrub_required && + fb->func->vpr.scrub_required(fb)) { + ret = nvkm_fb_init_scrub_vpr(fb); + if (ret) + return ret; + } + return 0; } @@ -172,6 +208,8 @@ nvkm_fb_dtor(struct nvkm_subdev *subdev) nvkm_mm_fini(&fb->tags); nvkm_ram_del(&fb->ram); + nvkm_blob_dtor(&fb->vpr_scrubber); + if (fb->func->dtor) return fb->func->dtor(fb); return fb; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c index b4d74e81567447078fef479f49a722a8cd0f7cfc..fc8c93aa3da52e6b85fcb99a8d83a1589023b0c6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c @@ -24,7 +24,81 @@ #include "gf100.h" #include "ram.h" +#include #include +#include +#include +#include + +int +gp102_fb_vpr_scrub(struct nvkm_fb *fb) +{ + struct nvkm_subdev *subdev = &fb->subdev; + struct nvkm_device *device = subdev->device; + struct nvkm_falcon *falcon = &device->nvdec[0]->falcon; + struct nvkm_blob *blob = &fb->vpr_scrubber; + const struct nvfw_bin_hdr *hsbin_hdr; + const struct nvfw_hs_header *fw_hdr; + const struct nvfw_hs_load_header *lhdr; + void *scrub_data; + u32 patch_loc, patch_sig; + int ret; + + nvkm_falcon_get(falcon, subdev); + + hsbin_hdr = nvfw_bin_hdr(subdev, blob->data); + fw_hdr = nvfw_hs_header(subdev, blob->data + hsbin_hdr->header_offset); + lhdr = nvfw_hs_load_header(subdev, blob->data + fw_hdr->hdr_offset); + scrub_data = blob->data + hsbin_hdr->data_offset; + + patch_loc = *(u32 *)(blob->data + fw_hdr->patch_loc); + patch_sig = *(u32 *)(blob->data + fw_hdr->patch_sig); + if (falcon->debug) { + memcpy(scrub_data + patch_loc, + blob->data + fw_hdr->sig_dbg_offset + patch_sig, + fw_hdr->sig_dbg_size); + } else { + memcpy(scrub_data + patch_loc, + blob->data + fw_hdr->sig_prod_offset + patch_sig, + fw_hdr->sig_prod_size); + } + + nvkm_falcon_reset(falcon); + nvkm_falcon_bind_context(falcon, NULL); + + nvkm_falcon_load_imem(falcon, scrub_data, lhdr->non_sec_code_off, + lhdr->non_sec_code_size, + lhdr->non_sec_code_off >> 8, 0, false); + nvkm_falcon_load_imem(falcon, scrub_data + lhdr->apps[0], + ALIGN(lhdr->apps[0], 0x100), + lhdr->apps[1], + lhdr->apps[0] >> 8, 0, true); + nvkm_falcon_load_dmem(falcon, scrub_data + lhdr->data_dma_base, 0, + lhdr->data_size, 0); + + nvkm_falcon_set_start_addr(falcon, 0x0); + nvkm_falcon_start(falcon); + + ret = nvkm_falcon_wait_for_halt(falcon, 500); + if (ret < 0) { + ret = -ETIMEDOUT; + goto end; + } + + /* put nvdec in clean state - without reset it will remain in HS mode */ + nvkm_falcon_reset(falcon); +end: + nvkm_falcon_put(falcon, subdev); + return ret; +} + +bool +gp102_fb_vpr_scrub_required(struct nvkm_fb *fb) +{ + struct nvkm_device *device = fb->subdev.device; + nvkm_wr32(device, 0x100cd0, 0x2); + return (nvkm_rd32(device, 0x100cd0) & 0x00000010) != 0; +} static const struct nvkm_fb_func gp102_fb = { @@ -33,11 +107,32 @@ gp102_fb = { .init = gp100_fb_init, .init_remapper = gp100_fb_init_remapper, .init_page = gm200_fb_init_page, + .vpr.scrub_required = gp102_fb_vpr_scrub_required, + .vpr.scrub = gp102_fb_vpr_scrub, .ram_new = gp100_ram_new, }; +int +gp102_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device, + int index, struct nvkm_fb **pfb) +{ + int ret = gf100_fb_new_(func, device, index, pfb); + if (ret) + return ret; + + nvkm_firmware_load_blob(&(*pfb)->subdev, "nvdec/scrubber", "", 0, + &(*pfb)->vpr_scrubber); + return 0; +} + int gp102_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) { - return gf100_fb_new_(&gp102_fb, device, index, pfb); + return gp102_fb_new_(&gp102_fb, device, index, pfb); } + +MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin"); +MODULE_FIRMWARE("nvidia/gp104/nvdec/scrubber.bin"); +MODULE_FIRMWARE("nvidia/gp106/nvdec/scrubber.bin"); +MODULE_FIRMWARE("nvidia/gp107/nvdec/scrubber.bin"); +MODULE_FIRMWARE("nvidia/gp108/nvdec/scrubber.bin"); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c index 3c5e02e9794ae5a9c6e04e508c8c388425cc5cf4..389bad312bf279bd15102d121728e2a11607473e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c @@ -35,6 +35,8 @@ gv100_fb = { .init = gp100_fb_init, .init_page = gv100_fb_init_page, .init_unkn = gp100_fb_init_unkn, + .vpr.scrub_required = gp102_fb_vpr_scrub_required, + .vpr.scrub = gp102_fb_vpr_scrub, .ram_new = gp100_ram_new, .default_bigpage = 16, }; @@ -42,5 +44,10 @@ gv100_fb = { int gv100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) { - return gf100_fb_new_(&gv100_fb, device, index, pfb); + return gp102_fb_new_(&gv100_fb, device, index, pfb); } + +MODULE_FIRMWARE("nvidia/gv100/nvdec/scrubber.bin"); +MODULE_FIRMWARE("nvidia/tu102/nvdec/scrubber.bin"); +MODULE_FIRMWARE("nvidia/tu104/nvdec/scrubber.bin"); +MODULE_FIRMWARE("nvidia/tu106/nvdec/scrubber.bin"); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h index c4e9f55af283387477261acd3a1b5ae927040971..5be9c563350d7b684e4bdb3a4ca5f90805d09d28 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h @@ -16,6 +16,11 @@ struct nvkm_fb_func { void (*init_unkn)(struct nvkm_fb *); void (*intr)(struct nvkm_fb *); + struct { + bool (*scrub_required)(struct nvkm_fb *); + int (*scrub)(struct nvkm_fb *); + } vpr; + struct { int regions; void (*init)(struct nvkm_fb *, int i, u32 addr, u32 size, @@ -72,4 +77,9 @@ int gm200_fb_init_page(struct nvkm_fb *); void gp100_fb_init_remapper(struct nvkm_fb *); void gp100_fb_init_unkn(struct nvkm_fb *); + +int gp102_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *, int, + struct nvkm_fb **); +bool gp102_fb_vpr_scrub_required(struct nvkm_fb *); +int gp102_fb_vpr_scrub(struct nvkm_fb *); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c index ac87a3b6b7c9e4de1c837c5e03ab2a472d0a9945..ba43fe158b2211c9f6d5380859573b0d1b005726 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c @@ -655,7 +655,7 @@ gf100_ram_new_(const struct nvkm_ram_func *func, static const struct nvkm_ram_func gf100_ram = { - .upper = 0x0200000000, + .upper = 0x0200000000ULL, .probe_fbp = gf100_ram_probe_fbp, .probe_fbp_amount = gf100_ram_probe_fbp_amount, .probe_fbpa_amount = gf100_ram_probe_fbpa_amount, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c index 70a06e3cd55a2b4f1326589eb4f39774feacf858..d97fa43efb912c2816bf9d032686d8d4e1a1c55b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c @@ -43,7 +43,7 @@ gf108_ram_probe_fbp_amount(const struct nvkm_ram_func *func, u32 fbpao, static const struct nvkm_ram_func gf108_ram = { - .upper = 0x0200000000, + .upper = 0x0200000000ULL, .probe_fbp = gf100_ram_probe_fbp, .probe_fbp_amount = gf108_ram_probe_fbp_amount, .probe_fbpa_amount = gf100_ram_probe_fbpa_amount, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c index 456aed1f2a027b06ad444a84eb3c0a98e21b4440..d350d92852d285ac65293f73224b4a2a02c3e09b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c @@ -1698,7 +1698,7 @@ gk104_ram_new_(const struct nvkm_ram_func *func, struct nvkm_fb *fb, static const struct nvkm_ram_func gk104_ram = { - .upper = 0x0200000000, + .upper = 0x0200000000ULL, .probe_fbp = gf100_ram_probe_fbp, .probe_fbp_amount = gf108_ram_probe_fbp_amount, .probe_fbpa_amount = gf100_ram_probe_fbpa_amount, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c index 27c68e3f977211bf393799894f81912541e7297c..be91da854dca7fa467c7178adfa2b7a3c263758e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c @@ -33,7 +33,7 @@ gm107_ram_probe_fbp(const struct nvkm_ram_func *func, static const struct nvkm_ram_func gm107_ram = { - .upper = 0x1000000000, + .upper = 0x1000000000ULL, .probe_fbp = gm107_ram_probe_fbp, .probe_fbp_amount = gf108_ram_probe_fbp_amount, .probe_fbpa_amount = gf100_ram_probe_fbpa_amount, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c index 6b0cac1fe7b441bf0fdb23c24e3783681f4372ce..8f91ea91ee25f7cb8509817f8398ec6c972cca93 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c @@ -48,7 +48,7 @@ gm200_ram_probe_fbp_amount(const struct nvkm_ram_func *func, u32 fbpao, static const struct nvkm_ram_func gm200_ram = { - .upper = 0x1000000000, + .upper = 0x1000000000ULL, .probe_fbp = gm107_ram_probe_fbp, .probe_fbp_amount = gm200_ram_probe_fbp_amount, .probe_fbpa_amount = gf100_ram_probe_fbpa_amount, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c index adb62a6beb638a580ce8fa29db9260282c79a92d..378f6fb70990778f88ba0a76e09f1114a294e97c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c @@ -79,7 +79,7 @@ gp100_ram_probe_fbpa(struct nvkm_device *device, int fbpa) static const struct nvkm_ram_func gp100_ram = { - .upper = 0x1000000000, + .upper = 0x1000000000ULL, .probe_fbp = gm107_ram_probe_fbp, .probe_fbp_amount = gm200_ram_probe_fbp_amount, .probe_fbpa_amount = gp100_ram_probe_fbpa, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild index e7c4f068936e35856d142a43f8d8960263f3a862..67cc3b320169a6b7c18dff37a5acb2ece02d8c4e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild @@ -1,2 +1,3 @@ # SPDX-License-Identifier: MIT +nvkm-y += nvkm/subdev/gsp/base.o nvkm-y += nvkm/subdev/gsp/gv100.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c new file mode 100644 index 0000000000000000000000000000000000000000..5a32df0f9992582ff5f9a71fa304605fd510fcef --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c @@ -0,0 +1,59 @@ +/* + * Copyright 2019 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" +#include +#include +#include +#include + +static void * +nvkm_gsp_dtor(struct nvkm_subdev *subdev) +{ + struct nvkm_gsp *gsp = nvkm_gsp(subdev); + nvkm_falcon_dtor(&gsp->falcon); + return gsp; +} + +static const struct nvkm_subdev_func +nvkm_gsp = { + .dtor = nvkm_gsp_dtor, +}; + +int +nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device, + int index, struct nvkm_gsp **pgsp) +{ + struct nvkm_gsp *gsp; + + if (!(gsp = *pgsp = kzalloc(sizeof(*gsp), GFP_KERNEL))) + return -ENOMEM; + + nvkm_subdev_ctor(&nvkm_gsp, device, index, &gsp->subdev); + + fwif = nvkm_firmware_load(&gsp->subdev, fwif, "Gsp", gsp); + if (IS_ERR(fwif)) + return PTR_ERR(fwif); + + return nvkm_falcon_ctor(fwif->flcn, &gsp->subdev, + nvkm_subdev_name[gsp->subdev.index], 0, + &gsp->falcon); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c index dccfaf1162e21e32cb1d14c8e861abd196afbb72..2114f9b00a28f0718448c0370fa8b9fe18b825ba 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c @@ -19,44 +19,37 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ -#include -#include -#include +#include "priv.h" + +static const struct nvkm_falcon_func +gv100_gsp_flcn = { + .fbif = 0x600, + .load_imem = nvkm_falcon_v1_load_imem, + .load_dmem = nvkm_falcon_v1_load_dmem, + .read_dmem = nvkm_falcon_v1_read_dmem, + .bind_context = gp102_sec2_flcn_bind_context, + .wait_for_halt = nvkm_falcon_v1_wait_for_halt, + .clear_interrupt = nvkm_falcon_v1_clear_interrupt, + .set_start_addr = nvkm_falcon_v1_set_start_addr, + .start = nvkm_falcon_v1_start, + .enable = gp102_sec2_flcn_enable, + .disable = nvkm_falcon_v1_disable, +}; static int -gv100_gsp_oneinit(struct nvkm_subdev *subdev) -{ - struct nvkm_gsp *gsp = nvkm_gsp(subdev); - - gsp->addr = nvkm_top_addr(subdev->device, subdev->index); - if (!gsp->addr) - return -EINVAL; - - return nvkm_falcon_v1_new(subdev, "GSP", gsp->addr, &gsp->falcon); -} - -static void * -gv100_gsp_dtor(struct nvkm_subdev *subdev) +gv100_gsp_nofw(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif) { - struct nvkm_gsp *gsp = nvkm_gsp(subdev); - nvkm_falcon_del(&gsp->falcon); - return gsp; + return 0; } -static const struct nvkm_subdev_func -gv100_gsp = { - .dtor = gv100_gsp_dtor, - .oneinit = gv100_gsp_oneinit, +struct nvkm_gsp_fwif +gv100_gsp[] = { + { -1, gv100_gsp_nofw, &gv100_gsp_flcn }, + {} }; int gv100_gsp_new(struct nvkm_device *device, int index, struct nvkm_gsp **pgsp) { - struct nvkm_gsp *gsp; - - if (!(gsp = *pgsp = kzalloc(sizeof(*gsp), GFP_KERNEL))) - return -ENOMEM; - - nvkm_subdev_ctor(&gv100_gsp, device, index, &gsp->subdev); - return 0; + return nvkm_gsp_new_(gv100_gsp, device, index, pgsp); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h new file mode 100644 index 0000000000000000000000000000000000000000..92820fb997c1c887a9fcf8849b9b3d32128244a4 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __NVKM_GSP_PRIV_H__ +#define __NVKM_GSP_PRIV_H__ +#include +enum nvkm_acr_lsf_id; + +struct nvkm_gsp_fwif { + int version; + int (*load)(struct nvkm_gsp *, int ver, const struct nvkm_gsp_fwif *); + const struct nvkm_falcon_func *flcn; +}; + +int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, int, + struct nvkm_gsp **); +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild index 2b6d36ea7067bc85a0c0cd1697dd3467060c0fb3..728d75010847a8a5f7e1eb0aedc362393ef93f94 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild @@ -6,3 +6,4 @@ nvkm-y += nvkm/subdev/ltc/gm107.o nvkm-y += nvkm/subdev/ltc/gm200.o nvkm-y += nvkm/subdev/ltc/gp100.o nvkm-y += nvkm/subdev/ltc/gp102.o +nvkm-y += nvkm/subdev/ltc/gp10b.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c new file mode 100644 index 0000000000000000000000000000000000000000..c0063c7caa50a678939ee608cc205c3aa7e9bf35 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2019 NVIDIA Corporation. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Thierry Reding + */ + +#include "priv.h" + +static void +gp10b_ltc_init(struct nvkm_ltc *ltc) +{ + struct nvkm_device *device = ltc->subdev.device; + struct iommu_fwspec *spec; + + nvkm_wr32(device, 0x17e27c, ltc->ltc_nr); + nvkm_wr32(device, 0x17e000, ltc->ltc_nr); + nvkm_wr32(device, 0x100800, ltc->ltc_nr); + + spec = dev_iommu_fwspec_get(device->dev); + if (spec) { + u32 sid = spec->ids[0] & 0xffff; + + /* stream ID */ + nvkm_wr32(device, 0x160000, sid << 2); + } +} + +static const struct nvkm_ltc_func +gp10b_ltc = { + .oneinit = gp100_ltc_oneinit, + .init = gp10b_ltc_init, + .intr = gp100_ltc_intr, + .cbc_clear = gm107_ltc_cbc_clear, + .cbc_wait = gm107_ltc_cbc_wait, + .zbc = 16, + .zbc_clear_color = gm107_ltc_zbc_clear_color, + .zbc_clear_depth = gm107_ltc_zbc_clear_depth, + .zbc_clear_stencil = gp102_ltc_zbc_clear_stencil, + .invalidate = gf100_ltc_invalidate, + .flush = gf100_ltc_flush, +}; + +int +gp10b_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc) +{ + return nvkm_ltc_new_(&gp10b_ltc, device, index, pltc); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h index 2fcf18e46ce3d7a9418db3e114abcdc363f05bd4..eca5a711b1b83bb870561baa22f88c4e475c9212 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h @@ -46,4 +46,6 @@ void gm107_ltc_zbc_clear_depth(struct nvkm_ltc *, int, const u32); int gp100_ltc_oneinit(struct nvkm_ltc *); void gp100_ltc_init(struct nvkm_ltc *); void gp100_ltc_intr(struct nvkm_ltc *); + +void gp102_ltc_zbc_clear_stencil(struct nvkm_ltc *, int, const u32); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c index 2d075246dc46073564dd316154f66fe0cf0f1096..2cd5ec81c0d0ad6b4976f640394e38262201b889 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c @@ -30,7 +30,7 @@ * The value 0xff represents an invalid storage type. */ const u8 * -gf100_mmu_kind(struct nvkm_mmu *mmu, int *count) +gf100_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid) { static const u8 kind[256] = { @@ -69,6 +69,7 @@ gf100_mmu_kind(struct nvkm_mmu *mmu, int *count) }; *count = ARRAY_SIZE(kind); + *invalid = 0xff; return kind; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c index dbf644ebac97bd2f141755e7658d10a8b65d0ea3..83990c83f9f81728490b8eed1d54f5dfbfbbf01c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c @@ -27,7 +27,7 @@ #include const u8 * -gm200_mmu_kind(struct nvkm_mmu *mmu, int *count) +gm200_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid) { static const u8 kind[256] = { @@ -65,6 +65,7 @@ gm200_mmu_kind(struct nvkm_mmu *mmu, int *count) 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff }; *count = ARRAY_SIZE(kind); + *invalid = 0xff; return kind; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c index db3dfbbb2aa08a8af72527ce13ae3b9befaf6080..c0083ddda65a08a534ec3116e0d759158dac9ab4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c @@ -27,7 +27,7 @@ #include const u8 * -nv50_mmu_kind(struct nvkm_mmu *base, int *count) +nv50_mmu_kind(struct nvkm_mmu *base, int *count, u8 *invalid) { /* 0x01: no bank swizzle * 0x02: bank swizzled @@ -57,6 +57,7 @@ nv50_mmu_kind(struct nvkm_mmu *base, int *count) 0x01, 0x01, 0x02, 0x02, 0x01, 0x01, 0x7f, 0x7f }; *count = ARRAY_SIZE(kind); + *invalid = 0x7f; return kind; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h index 07f2fcd18f3df7c402aa690ae2db995e0a80ff73..479b023442710a41d23c7fc44f0723b7f6e6a20b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h @@ -35,17 +35,17 @@ struct nvkm_mmu_func { u32 pd_offset; } vmm; - const u8 *(*kind)(struct nvkm_mmu *, int *count); + const u8 *(*kind)(struct nvkm_mmu *, int *count, u8 *invalid); bool kind_sys; }; extern const struct nvkm_mmu_func nv04_mmu; -const u8 *nv50_mmu_kind(struct nvkm_mmu *, int *count); +const u8 *nv50_mmu_kind(struct nvkm_mmu *, int *count, u8 *invalid); -const u8 *gf100_mmu_kind(struct nvkm_mmu *, int *count); +const u8 *gf100_mmu_kind(struct nvkm_mmu *, int *count, u8 *invalid); -const u8 *gm200_mmu_kind(struct nvkm_mmu *, int *); +const u8 *gm200_mmu_kind(struct nvkm_mmu *, int *, u8 *); struct nvkm_mmu_pt { union { diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c index c0db0ce10cba153f1637b5d77ba1a75a067fbfdf..b21e82eb0916277d6aca2f2a3483fea7bcf5b26c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c @@ -1,5 +1,6 @@ /* * Copyright 2018 Red Hat Inc. + * Copyright 2019 NVIDIA Corporation. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -26,13 +27,26 @@ #include +const u8 * +tu102_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid) +{ + static const u8 + kind[16] = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00 */ + 0x06, 0x06, 0x02, 0x01, 0x03, 0x04, 0x05, 0x07, + }; + *count = ARRAY_SIZE(kind); + *invalid = 0x07; + return kind; +} + static const struct nvkm_mmu_func tu102_mmu = { .dma_bits = 47, .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}}, .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map }, .vmm = {{ -1, 0, NVIF_CLASS_VMM_GP100}, tu102_vmm_new }, - .kind = gm200_mmu_kind, + .kind = tu102_mmu_kind, .kind_sys = true, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c index 353f10f92b774f56defd53720f93defaa05aba57..0e4b8941da372ce5d55c386546f988283b0b7621 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c @@ -111,15 +111,17 @@ nvkm_ummu_kind(struct nvkm_ummu *ummu, void *argv, u32 argc) } *args = argv; const u8 *kind = NULL; int ret = -ENOSYS, count = 0; + u8 kind_inv = 0; if (mmu->func->kind) - kind = mmu->func->kind(mmu, &count); + kind = mmu->func->kind(mmu, &count, &kind_inv); if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) { if (argc != args->v0.count * sizeof(*args->v0.data)) return -EINVAL; if (args->v0.count > count) return -EINVAL; + args->v0.kind_inv = kind_inv; memcpy(args->v0.data, kind, args->v0.count); } else return ret; @@ -157,9 +159,10 @@ nvkm_ummu_new(struct nvkm_device *device, const struct nvkm_oclass *oclass, struct nvkm_mmu *mmu = device->mmu; struct nvkm_ummu *ummu; int ret = -ENOSYS, kinds = 0; + u8 unused = 0; if (mmu->func->kind) - mmu->func->kind(mmu, &kinds); + mmu->func->kind(mmu, &kinds, &unused); if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) { args->v0.dmabits = mmu->dma_bits; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c index ab6424faf84cb39181705b68c2a06bfdefc82299..6a2d9eb8e1ea8fbb6885c5064a1e2e988afd8782 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c @@ -247,7 +247,7 @@ gf100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc, } *args = argv; struct nvkm_device *device = vmm->mmu->subdev.device; struct nvkm_memory *memory = map->memory; - u8 kind, priv, ro, vol; + u8 kind, kind_inv, priv, ro, vol; int kindn, aper, ret = -ENOSYS; const u8 *kindm; @@ -274,8 +274,8 @@ gf100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc, if (WARN_ON(aper < 0)) return aper; - kindm = vmm->mmu->func->kind(vmm->mmu, &kindn); - if (kind >= kindn || kindm[kind] == 0xff) { + kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv); + if (kind >= kindn || kindm[kind] == kind_inv) { VMM_DEBUG(vmm, "kind %02x", kind); return -EINVAL; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c index b4f519768d5e082b1b8c0754b12666656e5b730e..d8628756554281520da40a050fe27c109ed27746 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c @@ -320,7 +320,7 @@ gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc, } *args = argv; struct nvkm_device *device = vmm->mmu->subdev.device; struct nvkm_memory *memory = map->memory; - u8 kind, priv, ro, vol; + u8 kind, kind_inv, priv, ro, vol; int kindn, aper, ret = -ENOSYS; const u8 *kindm; @@ -347,8 +347,8 @@ gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc, if (WARN_ON(aper < 0)) return aper; - kindm = vmm->mmu->func->kind(vmm->mmu, &kindn); - if (kind >= kindn || kindm[kind] == 0xff) { + kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv); + if (kind >= kindn || kindm[kind] == kind_inv) { VMM_DEBUG(vmm, "kind %02x", kind); return -EINVAL; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c index c98afe3134eed65cba3e542cf6a1d4b4d2c35f20..2d89e27e8e9e5eb60e7164957dc1e36c27b4fc3d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c @@ -235,7 +235,7 @@ nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc, struct nvkm_device *device = vmm->mmu->subdev.device; struct nvkm_ram *ram = device->fb->ram; struct nvkm_memory *memory = map->memory; - u8 aper, kind, comp, priv, ro; + u8 aper, kind, kind_inv, comp, priv, ro; int kindn, ret = -ENOSYS; const u8 *kindm; @@ -278,8 +278,8 @@ nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc, return -EINVAL; } - kindm = vmm->mmu->func->kind(vmm->mmu, &kindn); - if (kind >= kindn || kindm[kind] == 0x7f) { + kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv); + if (kind >= kindn || kindm[kind] == kind_inv) { VMM_DEBUG(vmm, "kind %02x", kind); return -EINVAL; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild index e37b6e45eaa28218cb8f374cdcd130481e192755..a76c2a7bd69685f6e4587c0e17d765699a3038dd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild @@ -12,3 +12,4 @@ nvkm-y += nvkm/subdev/pmu/gm107.o nvkm-y += nvkm/subdev/pmu/gm20b.o nvkm-y += nvkm/subdev/pmu/gp100.o nvkm-y += nvkm/subdev/pmu/gp102.o +nvkm-y += nvkm/subdev/pmu/gp10b.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c index ea2e11771bca522e820151b204c098b230a56b22..a0fe607c9c07fc5f59f88c45ff4c4538dc5badc2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c @@ -23,7 +23,7 @@ */ #include "priv.h" -#include +#include #include bool @@ -85,6 +85,12 @@ nvkm_pmu_fini(struct nvkm_subdev *subdev, bool suspend) pmu->func->fini(pmu); flush_work(&pmu->recv.work); + + reinit_completion(&pmu->wpr_ready); + + nvkm_falcon_cmdq_fini(pmu->lpq); + nvkm_falcon_cmdq_fini(pmu->hpq); + pmu->initmsg_received = false; return 0; } @@ -133,19 +139,15 @@ nvkm_pmu_init(struct nvkm_subdev *subdev) return ret; } -static int -nvkm_pmu_oneinit(struct nvkm_subdev *subdev) -{ - struct nvkm_pmu *pmu = nvkm_pmu(subdev); - return nvkm_falcon_v1_new(&pmu->subdev, "PMU", 0x10a000, &pmu->falcon); -} - static void * nvkm_pmu_dtor(struct nvkm_subdev *subdev) { struct nvkm_pmu *pmu = nvkm_pmu(subdev); - nvkm_msgqueue_del(&pmu->queue); - nvkm_falcon_del(&pmu->falcon); + nvkm_falcon_msgq_del(&pmu->msgq); + nvkm_falcon_cmdq_del(&pmu->lpq); + nvkm_falcon_cmdq_del(&pmu->hpq); + nvkm_falcon_qmgr_del(&pmu->qmgr); + nvkm_falcon_dtor(&pmu->falcon); return nvkm_pmu(subdev); } @@ -153,29 +155,50 @@ static const struct nvkm_subdev_func nvkm_pmu = { .dtor = nvkm_pmu_dtor, .preinit = nvkm_pmu_preinit, - .oneinit = nvkm_pmu_oneinit, .init = nvkm_pmu_init, .fini = nvkm_pmu_fini, .intr = nvkm_pmu_intr, }; int -nvkm_pmu_ctor(const struct nvkm_pmu_func *func, struct nvkm_device *device, +nvkm_pmu_ctor(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device, int index, struct nvkm_pmu *pmu) { + int ret; + nvkm_subdev_ctor(&nvkm_pmu, device, index, &pmu->subdev); - pmu->func = func; + INIT_WORK(&pmu->recv.work, nvkm_pmu_recv); init_waitqueue_head(&pmu->recv.wait); + + fwif = nvkm_firmware_load(&pmu->subdev, fwif, "Pmu", pmu); + if (IS_ERR(fwif)) + return PTR_ERR(fwif); + + pmu->func = fwif->func; + + ret = nvkm_falcon_ctor(pmu->func->flcn, &pmu->subdev, + nvkm_subdev_name[pmu->subdev.index], 0x10a000, + &pmu->falcon); + if (ret) + return ret; + + if ((ret = nvkm_falcon_qmgr_new(&pmu->falcon, &pmu->qmgr)) || + (ret = nvkm_falcon_cmdq_new(pmu->qmgr, "hpq", &pmu->hpq)) || + (ret = nvkm_falcon_cmdq_new(pmu->qmgr, "lpq", &pmu->lpq)) || + (ret = nvkm_falcon_msgq_new(pmu->qmgr, "msgq", &pmu->msgq))) + return ret; + + init_completion(&pmu->wpr_ready); return 0; } int -nvkm_pmu_new_(const struct nvkm_pmu_func *func, struct nvkm_device *device, +nvkm_pmu_new_(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) { struct nvkm_pmu *pmu; if (!(pmu = *ppmu = kzalloc(sizeof(*pmu), GFP_KERNEL))) return -ENOMEM; - return nvkm_pmu_ctor(func, device, index, *ppmu); + return nvkm_pmu_ctor(fwif, device, index, *ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c index 0b458656e870779d041ab3cd736f567f98d065c0..3ecb3d9cbcf234674a42c22d313f10c779cbf1df 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c @@ -42,6 +42,7 @@ gf100_pmu_enabled(struct nvkm_pmu *pmu) static const struct nvkm_pmu_func gf100_pmu = { + .flcn = >215_pmu_flcn, .code.data = gf100_pmu_code, .code.size = sizeof(gf100_pmu_code), .data.data = gf100_pmu_data, @@ -55,8 +56,20 @@ gf100_pmu = { .recv = gt215_pmu_recv, }; +int +gf100_pmu_nofw(struct nvkm_pmu *pmu, int ver, const struct nvkm_pmu_fwif *fwif) +{ + return 0; +} + +static const struct nvkm_pmu_fwif +gf100_pmu_fwif[] = { + { -1, gf100_pmu_nofw, &gf100_pmu }, + {} +}; + int gf100_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(&gf100_pmu, device, index, ppmu); + return nvkm_pmu_new_(gf100_pmu_fwif, device, index, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c index 3dfa79d4fb13a07781791ec0eeb6ab8bac48d68e..8dd0271aaaee6af471fad5417811d5082189cd39 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c @@ -26,6 +26,7 @@ static const struct nvkm_pmu_func gf119_pmu = { + .flcn = >215_pmu_flcn, .code.data = gf119_pmu_code, .code.size = sizeof(gf119_pmu_code), .data.data = gf119_pmu_data, @@ -39,8 +40,14 @@ gf119_pmu = { .recv = gt215_pmu_recv, }; +static const struct nvkm_pmu_fwif +gf119_pmu_fwif[] = { + { -1, gf100_pmu_nofw, &gf119_pmu }, + {} +}; + int gf119_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(&gf119_pmu, device, index, ppmu); + return nvkm_pmu_new_(gf119_pmu_fwif, device, index, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c index 8f7ec10fd2a42609fa407a5a56c4eb5b398880f5..8b70cc17a6341cd3eeb3820738532bb2283097ac 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c @@ -105,6 +105,7 @@ gk104_pmu_pgob(struct nvkm_pmu *pmu, bool enable) static const struct nvkm_pmu_func gk104_pmu = { + .flcn = >215_pmu_flcn, .code.data = gk104_pmu_code, .code.size = sizeof(gk104_pmu_code), .data.data = gk104_pmu_data, @@ -119,8 +120,14 @@ gk104_pmu = { .pgob = gk104_pmu_pgob, }; +static const struct nvkm_pmu_fwif +gk104_pmu_fwif[] = { + { -1, gf100_pmu_nofw, &gk104_pmu }, + {} +}; + int gk104_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(&gk104_pmu, device, index, ppmu); + return nvkm_pmu_new_(gk104_pmu_fwif, device, index, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c index 345741d55a56b126665576a7e66fe25977e10841..0081f2141b1083abf7fa8b67150150781edb05a9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c @@ -84,6 +84,7 @@ gk110_pmu_pgob(struct nvkm_pmu *pmu, bool enable) static const struct nvkm_pmu_func gk110_pmu = { + .flcn = >215_pmu_flcn, .code.data = gk110_pmu_code, .code.size = sizeof(gk110_pmu_code), .data.data = gk110_pmu_data, @@ -98,8 +99,14 @@ gk110_pmu = { .pgob = gk110_pmu_pgob, }; +static const struct nvkm_pmu_fwif +gk110_pmu_fwif[] = { + { -1, gf100_pmu_nofw, &gk110_pmu }, + {} +}; + int gk110_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(&gk110_pmu, device, index, ppmu); + return nvkm_pmu_new_(gk110_pmu_fwif, device, index, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c index e4acf7876ea1b541a6c0e9eefa16ef77a857e2b5..b227c701a5e7c6d9c81d1dd356c0f73bf09e430e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c @@ -26,6 +26,7 @@ static const struct nvkm_pmu_func gk208_pmu = { + .flcn = >215_pmu_flcn, .code.data = gk208_pmu_code, .code.size = sizeof(gk208_pmu_code), .data.data = gk208_pmu_data, @@ -40,8 +41,14 @@ gk208_pmu = { .pgob = gk110_pmu_pgob, }; +static const struct nvkm_pmu_fwif +gk208_pmu_fwif[] = { + { -1, gf100_pmu_nofw, &gk208_pmu }, + {} +}; + int gk208_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(&gk208_pmu, device, index, ppmu); + return nvkm_pmu_new_(gk208_pmu_fwif, device, index, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c index 05e81855c367f442b8eb0292d8fe793bc9b46a80..26c1adf8f44c6f742c6baeededf2f52636907a37 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c @@ -95,7 +95,7 @@ static void gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu, struct gk20a_pmu_dvfs_dev_status *status) { - struct nvkm_falcon *falcon = pmu->base.falcon; + struct nvkm_falcon *falcon = &pmu->base.falcon; status->busy = nvkm_falcon_rd32(falcon, 0x508 + (BUSY_SLOT * 0x10)); status->total= nvkm_falcon_rd32(falcon, 0x508 + (CLK_SLOT * 0x10)); @@ -104,7 +104,7 @@ gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu, static void gk20a_pmu_dvfs_reset_dev_status(struct gk20a_pmu *pmu) { - struct nvkm_falcon *falcon = pmu->base.falcon; + struct nvkm_falcon *falcon = &pmu->base.falcon; nvkm_falcon_wr32(falcon, 0x508 + (BUSY_SLOT * 0x10), 0x80000000); nvkm_falcon_wr32(falcon, 0x508 + (CLK_SLOT * 0x10), 0x80000000); @@ -160,7 +160,7 @@ gk20a_pmu_fini(struct nvkm_pmu *pmu) struct gk20a_pmu *gpmu = gk20a_pmu(pmu); nvkm_timer_alarm(pmu->subdev.device->timer, 0, &gpmu->alarm); - nvkm_falcon_put(pmu->falcon, &pmu->subdev); + nvkm_falcon_put(&pmu->falcon, &pmu->subdev); } static int @@ -169,7 +169,7 @@ gk20a_pmu_init(struct nvkm_pmu *pmu) struct gk20a_pmu *gpmu = gk20a_pmu(pmu); struct nvkm_subdev *subdev = &pmu->subdev; struct nvkm_device *device = pmu->subdev.device; - struct nvkm_falcon *falcon = pmu->falcon; + struct nvkm_falcon *falcon = &pmu->falcon; int ret; ret = nvkm_falcon_get(falcon, subdev); @@ -196,25 +196,34 @@ gk20a_dvfs_data= { static const struct nvkm_pmu_func gk20a_pmu = { + .flcn = >215_pmu_flcn, .enabled = gf100_pmu_enabled, .init = gk20a_pmu_init, .fini = gk20a_pmu_fini, .reset = gf100_pmu_reset, }; +static const struct nvkm_pmu_fwif +gk20a_pmu_fwif[] = { + { -1, gf100_pmu_nofw, &gk20a_pmu }, + {} +}; + int gk20a_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) { struct gk20a_pmu *pmu; + int ret; if (!(pmu = kzalloc(sizeof(*pmu), GFP_KERNEL))) return -ENOMEM; *ppmu = &pmu->base; - nvkm_pmu_ctor(&gk20a_pmu, device, index, &pmu->base); + ret = nvkm_pmu_ctor(gk20a_pmu_fwif, device, index, &pmu->base); + if (ret) + return ret; pmu->data = &gk20a_dvfs_data; nvkm_alarm_init(&pmu->alarm, gk20a_pmu_dvfs_work); - return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c index 459df1ef9e70c67d138902a4a521ff8a80ccf3f6..5afb55e58b517e406451a8203066e664629928c3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c @@ -28,6 +28,7 @@ static const struct nvkm_pmu_func gm107_pmu = { + .flcn = >215_pmu_flcn, .code.data = gm107_pmu_code, .code.size = sizeof(gm107_pmu_code), .data.data = gm107_pmu_data, @@ -41,8 +42,14 @@ gm107_pmu = { .recv = gt215_pmu_recv, }; +static const struct nvkm_pmu_fwif +gm107_pmu_fwif[] = { + { -1, gf100_pmu_nofw, &gm107_pmu }, + {} +}; + int gm107_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(&gm107_pmu, device, index, ppmu); + return nvkm_pmu_new_(gm107_pmu_fwif, device, index, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c index 31c843145c7ac0d1f6374744064e38106fd4d159..82571032a07d004e27c465408ed7046e23b11ebe 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c @@ -19,38 +19,224 @@ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ - -#include -#include #include "priv.h" -static void +#include +#include + +#include +#include + +static int +gm20b_pmu_acr_bootstrap_falcon_cb(void *priv, struct nv_falcon_msg *hdr) +{ + struct nv_pmu_acr_bootstrap_falcon_msg *msg = + container_of(hdr, typeof(*msg), msg.hdr); + return msg->falcon_id; +} + +int +gm20b_pmu_acr_bootstrap_falcon(struct nvkm_falcon *falcon, + enum nvkm_acr_lsf_id id) +{ + struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon); + struct nv_pmu_acr_bootstrap_falcon_cmd cmd = { + .cmd.hdr.unit_id = NV_PMU_UNIT_ACR, + .cmd.hdr.size = sizeof(cmd), + .cmd.cmd_type = NV_PMU_ACR_CMD_BOOTSTRAP_FALCON, + .flags = NV_PMU_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES, + .falcon_id = id, + }; + int ret; + + ret = nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr, + gm20b_pmu_acr_bootstrap_falcon_cb, + &pmu->subdev, msecs_to_jiffies(1000)); + if (ret >= 0) { + if (ret != cmd.falcon_id) + ret = -EIO; + else + ret = 0; + } + + return ret; +} + +int +gm20b_pmu_acr_boot(struct nvkm_falcon *falcon) +{ + struct nv_pmu_args args = { .secure_mode = true }; + const u32 addr_args = falcon->data.limit - sizeof(struct nv_pmu_args); + nvkm_falcon_load_dmem(falcon, &args, addr_args, sizeof(args), 0); + nvkm_falcon_start(falcon); + return 0; +} + +void +gm20b_pmu_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust) +{ + struct loader_config hdr; + u64 addr; + + nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr)); + addr = ((u64)hdr.code_dma_base1 << 40 | hdr.code_dma_base << 8); + hdr.code_dma_base = lower_32_bits((addr + adjust) >> 8); + hdr.code_dma_base1 = upper_32_bits((addr + adjust) >> 8); + addr = ((u64)hdr.data_dma_base1 << 40 | hdr.data_dma_base << 8); + hdr.data_dma_base = lower_32_bits((addr + adjust) >> 8); + hdr.data_dma_base1 = upper_32_bits((addr + adjust) >> 8); + addr = ((u64)hdr.overlay_dma_base1 << 40 | hdr.overlay_dma_base << 8); + hdr.overlay_dma_base = lower_32_bits((addr + adjust) << 8); + hdr.overlay_dma_base1 = upper_32_bits((addr + adjust) << 8); + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); + + loader_config_dump(&acr->subdev, &hdr); +} + +void +gm20b_pmu_acr_bld_write(struct nvkm_acr *acr, u32 bld, + struct nvkm_acr_lsfw *lsfw) +{ + const u64 base = lsfw->offset.img + lsfw->app_start_offset; + const u64 code = (base + lsfw->app_resident_code_offset) >> 8; + const u64 data = (base + lsfw->app_resident_data_offset) >> 8; + const struct loader_config hdr = { + .dma_idx = FALCON_DMAIDX_UCODE, + .code_dma_base = lower_32_bits(code), + .code_size_total = lsfw->app_size, + .code_size_to_load = lsfw->app_resident_code_size, + .code_entry_point = lsfw->app_imem_entry, + .data_dma_base = lower_32_bits(data), + .data_size = lsfw->app_resident_data_size, + .overlay_dma_base = lower_32_bits(code), + .argc = 1, + .argv = lsfw->falcon->data.limit - sizeof(struct nv_pmu_args), + .code_dma_base1 = upper_32_bits(code), + .data_dma_base1 = upper_32_bits(data), + .overlay_dma_base1 = upper_32_bits(code), + }; + + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); +} + +static const struct nvkm_acr_lsf_func +gm20b_pmu_acr = { + .flags = NVKM_ACR_LSF_DMACTL_REQ_CTX, + .bld_size = sizeof(struct loader_config), + .bld_write = gm20b_pmu_acr_bld_write, + .bld_patch = gm20b_pmu_acr_bld_patch, + .boot = gm20b_pmu_acr_boot, + .bootstrap_falcon = gm20b_pmu_acr_bootstrap_falcon, +}; + +static int +gm20b_pmu_acr_init_wpr_callback(void *priv, struct nv_falcon_msg *hdr) +{ + struct nv_pmu_acr_init_wpr_region_msg *msg = + container_of(hdr, typeof(*msg), msg.hdr); + struct nvkm_pmu *pmu = priv; + struct nvkm_subdev *subdev = &pmu->subdev; + + if (msg->error_code) { + nvkm_error(subdev, "ACR WPR init failure: %d\n", + msg->error_code); + return -EINVAL; + } + + nvkm_debug(subdev, "ACR WPR init complete\n"); + complete_all(&pmu->wpr_ready); + return 0; +} + +static int +gm20b_pmu_acr_init_wpr(struct nvkm_pmu *pmu) +{ + struct nv_pmu_acr_init_wpr_region_cmd cmd = { + .cmd.hdr.unit_id = NV_PMU_UNIT_ACR, + .cmd.hdr.size = sizeof(cmd), + .cmd.cmd_type = NV_PMU_ACR_CMD_INIT_WPR_REGION, + .region_id = 1, + .wpr_offset = 0, + }; + + return nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr, + gm20b_pmu_acr_init_wpr_callback, pmu, 0); +} + +int +gm20b_pmu_initmsg(struct nvkm_pmu *pmu) +{ + struct nv_pmu_init_msg msg; + int ret; + + ret = nvkm_falcon_msgq_recv_initmsg(pmu->msgq, &msg, sizeof(msg)); + if (ret) + return ret; + + if (msg.hdr.unit_id != NV_PMU_UNIT_INIT || + msg.msg_type != NV_PMU_INIT_MSG_INIT) + return -EINVAL; + + nvkm_falcon_cmdq_init(pmu->hpq, msg.queue_info[0].index, + msg.queue_info[0].offset, + msg.queue_info[0].size); + nvkm_falcon_cmdq_init(pmu->lpq, msg.queue_info[1].index, + msg.queue_info[1].offset, + msg.queue_info[1].size); + nvkm_falcon_msgq_init(pmu->msgq, msg.queue_info[4].index, + msg.queue_info[4].offset, + msg.queue_info[4].size); + return gm20b_pmu_acr_init_wpr(pmu); +} + +void gm20b_pmu_recv(struct nvkm_pmu *pmu) { - if (!pmu->queue) { - nvkm_warn(&pmu->subdev, - "recv function called while no firmware set!\n"); - return; + if (!pmu->initmsg_received) { + int ret = pmu->func->initmsg(pmu); + if (ret) { + nvkm_error(&pmu->subdev, + "error parsing init message: %d\n", ret); + return; + } + + pmu->initmsg_received = true; } - nvkm_msgqueue_recv(pmu->queue); + nvkm_falcon_msgq_recv(pmu->msgq); } static const struct nvkm_pmu_func gm20b_pmu = { + .flcn = >215_pmu_flcn, .enabled = gf100_pmu_enabled, .intr = gt215_pmu_intr, .recv = gm20b_pmu_recv, + .initmsg = gm20b_pmu_initmsg, }; +#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) +MODULE_FIRMWARE("nvidia/gm20b/pmu/desc.bin"); +MODULE_FIRMWARE("nvidia/gm20b/pmu/image.bin"); +MODULE_FIRMWARE("nvidia/gm20b/pmu/sig.bin"); +#endif + int -gm20b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) +gm20b_pmu_load(struct nvkm_pmu *pmu, int ver, const struct nvkm_pmu_fwif *fwif) { - int ret; + return nvkm_acr_lsfw_load_sig_image_desc(&pmu->subdev, &pmu->falcon, + NVKM_ACR_LSF_PMU, "pmu/", + ver, fwif->acr); +} - ret = nvkm_pmu_new_(&gm20b_pmu, device, index, ppmu); - if (ret) - return ret; +static const struct nvkm_pmu_fwif +gm20b_pmu_fwif[] = { + { 0, gm20b_pmu_load, &gm20b_pmu, &gm20b_pmu_acr }, + {} +}; - return 0; +int +gm20b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) +{ + return nvkm_pmu_new_(gm20b_pmu_fwif, device, index, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c index e210cd6af816dd22212084d363f728fe42148806..09e05db21ff5b8287518780fd72c6b387b67ec95 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c @@ -25,12 +25,19 @@ static const struct nvkm_pmu_func gp100_pmu = { + .flcn = >215_pmu_flcn, .enabled = gf100_pmu_enabled, .reset = gf100_pmu_reset, }; +static const struct nvkm_pmu_fwif +gp100_pmu_fwif[] = { + { -1, gf100_pmu_nofw, &gp100_pmu }, + {} +}; + int gp100_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(&gp100_pmu, device, index, ppmu); + return nvkm_pmu_new_(gp100_pmu_fwif, device, index, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c index 98c7a2a8afc4507e998ee129ab90da251e791b0e..262b8a3dd5079db5c487f58871b82b283045f562 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c @@ -39,12 +39,19 @@ gp102_pmu_enabled(struct nvkm_pmu *pmu) static const struct nvkm_pmu_func gp102_pmu = { + .flcn = >215_pmu_flcn, .enabled = gp102_pmu_enabled, .reset = gp102_pmu_reset, }; +static const struct nvkm_pmu_fwif +gp102_pmu_fwif[] = { + { -1, gf100_pmu_nofw, &gp102_pmu }, + {} +}; + int gp102_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(&gp102_pmu, device, index, ppmu); + return nvkm_pmu_new_(gp102_pmu_fwif, device, index, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c new file mode 100644 index 0000000000000000000000000000000000000000..5b81c732047963b7e31dcbff7c8cb65dfea6f2bc --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include + +#include +#include + +static int +gp10b_pmu_acr_bootstrap_multiple_falcons_cb(void *priv, + struct nv_falcon_msg *hdr) +{ + struct nv_pmu_acr_bootstrap_multiple_falcons_msg *msg = + container_of(hdr, typeof(*msg), msg.hdr); + return msg->falcon_mask; +} +static int +gp10b_pmu_acr_bootstrap_multiple_falcons(struct nvkm_falcon *falcon, u32 mask) +{ + struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon); + struct nv_pmu_acr_bootstrap_multiple_falcons_cmd cmd = { + .cmd.hdr.unit_id = NV_PMU_UNIT_ACR, + .cmd.hdr.size = sizeof(cmd), + .cmd.cmd_type = NV_PMU_ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS, + .flags = NV_PMU_ACR_BOOTSTRAP_MULTIPLE_FALCONS_FLAGS_RESET_YES, + .falcon_mask = mask, + .wpr_lo = 0, /*XXX*/ + .wpr_hi = 0, /*XXX*/ + }; + int ret; + + ret = nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr, + gp10b_pmu_acr_bootstrap_multiple_falcons_cb, + &pmu->subdev, msecs_to_jiffies(1000)); + if (ret >= 0) { + if (ret != cmd.falcon_mask) + ret = -EIO; + else + ret = 0; + } + + return ret; +} + +static const struct nvkm_acr_lsf_func +gp10b_pmu_acr = { + .flags = NVKM_ACR_LSF_DMACTL_REQ_CTX, + .bld_size = sizeof(struct loader_config), + .bld_write = gm20b_pmu_acr_bld_write, + .bld_patch = gm20b_pmu_acr_bld_patch, + .boot = gm20b_pmu_acr_boot, + .bootstrap_falcon = gm20b_pmu_acr_bootstrap_falcon, + .bootstrap_multiple_falcons = gp10b_pmu_acr_bootstrap_multiple_falcons, +}; + +static const struct nvkm_pmu_func +gp10b_pmu = { + .flcn = >215_pmu_flcn, + .enabled = gf100_pmu_enabled, + .intr = gt215_pmu_intr, + .recv = gm20b_pmu_recv, + .initmsg = gm20b_pmu_initmsg, +}; + +#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) +MODULE_FIRMWARE("nvidia/gp10b/pmu/desc.bin"); +MODULE_FIRMWARE("nvidia/gp10b/pmu/image.bin"); +MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin"); +#endif + +static const struct nvkm_pmu_fwif +gp10b_pmu_fwif[] = { + { 0, gm20b_pmu_load, &gp10b_pmu, &gp10b_pmu_acr }, + {} +}; + +int +gp10b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) +{ + return nvkm_pmu_new_(gp10b_pmu_fwif, device, index, ppmu); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c index e04216daea5812c5b7a5c9e609c3286fce207ab4..88b909913ff96e83799a317aa4808e3f19a017cd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c @@ -241,8 +241,27 @@ gt215_pmu_init(struct nvkm_pmu *pmu) return 0; } +const struct nvkm_falcon_func +gt215_pmu_flcn = { + .debug = 0xc08, + .fbif = 0xe00, + .load_imem = nvkm_falcon_v1_load_imem, + .load_dmem = nvkm_falcon_v1_load_dmem, + .read_dmem = nvkm_falcon_v1_read_dmem, + .bind_context = nvkm_falcon_v1_bind_context, + .wait_for_halt = nvkm_falcon_v1_wait_for_halt, + .clear_interrupt = nvkm_falcon_v1_clear_interrupt, + .set_start_addr = nvkm_falcon_v1_set_start_addr, + .start = nvkm_falcon_v1_start, + .enable = nvkm_falcon_v1_enable, + .disable = nvkm_falcon_v1_disable, + .cmdq = { 0x4a0, 0x4b0, 4 }, + .msgq = { 0x4c8, 0x4cc, 0 }, +}; + static const struct nvkm_pmu_func gt215_pmu = { + .flcn = >215_pmu_flcn, .code.data = gt215_pmu_code, .code.size = sizeof(gt215_pmu_code), .data.data = gt215_pmu_data, @@ -256,8 +275,14 @@ gt215_pmu = { .recv = gt215_pmu_recv, }; +static const struct nvkm_pmu_fwif +gt215_pmu_fwif[] = { + { -1, gf100_pmu_nofw, >215_pmu }, + {} +}; + int gt215_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(>215_pmu, device, index, ppmu); + return nvkm_pmu_new_(gt215_pmu_fwif, device, index, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h index 26d73f9cd6d3a246e09112e80d1f06c30f440eac..f470859244de72ba7c2dd9340d9f8e5014563f3a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h @@ -4,13 +4,12 @@ #define nvkm_pmu(p) container_of((p), struct nvkm_pmu, subdev) #include #include - -int nvkm_pmu_ctor(const struct nvkm_pmu_func *, struct nvkm_device *, - int index, struct nvkm_pmu *); -int nvkm_pmu_new_(const struct nvkm_pmu_func *, struct nvkm_device *, - int index, struct nvkm_pmu **); +enum nvkm_acr_lsf_id; +struct nvkm_acr_lsfw; struct nvkm_pmu_func { + const struct nvkm_falcon_func *flcn; + struct { u32 *data; u32 size; @@ -29,9 +28,11 @@ struct nvkm_pmu_func { int (*send)(struct nvkm_pmu *, u32 reply[2], u32 process, u32 message, u32 data0, u32 data1); void (*recv)(struct nvkm_pmu *); + int (*initmsg)(struct nvkm_pmu *); void (*pgob)(struct nvkm_pmu *, bool); }; +extern const struct nvkm_falcon_func gt215_pmu_flcn; int gt215_pmu_init(struct nvkm_pmu *); void gt215_pmu_fini(struct nvkm_pmu *); void gt215_pmu_intr(struct nvkm_pmu *); @@ -42,4 +43,26 @@ bool gf100_pmu_enabled(struct nvkm_pmu *); void gf100_pmu_reset(struct nvkm_pmu *); void gk110_pmu_pgob(struct nvkm_pmu *, bool); + +void gm20b_pmu_acr_bld_patch(struct nvkm_acr *, u32, s64); +void gm20b_pmu_acr_bld_write(struct nvkm_acr *, u32, struct nvkm_acr_lsfw *); +int gm20b_pmu_acr_boot(struct nvkm_falcon *); +int gm20b_pmu_acr_bootstrap_falcon(struct nvkm_falcon *, enum nvkm_acr_lsf_id); +void gm20b_pmu_recv(struct nvkm_pmu *); +int gm20b_pmu_initmsg(struct nvkm_pmu *); + +struct nvkm_pmu_fwif { + int version; + int (*load)(struct nvkm_pmu *, int ver, const struct nvkm_pmu_fwif *); + const struct nvkm_pmu_func *func; + const struct nvkm_acr_lsf_func *acr; +}; + +int gf100_pmu_nofw(struct nvkm_pmu *, int, const struct nvkm_pmu_fwif *); +int gm20b_pmu_load(struct nvkm_pmu *, int, const struct nvkm_pmu_fwif *); + +int nvkm_pmu_ctor(const struct nvkm_pmu_fwif *, struct nvkm_device *, + int index, struct nvkm_pmu *); +int nvkm_pmu_new_(const struct nvkm_pmu_fwif *, struct nvkm_device *, + int index, struct nvkm_pmu **); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild deleted file mode 100644 index f3dee2693c7954755feaad854c49252bfcbda21f..0000000000000000000000000000000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild +++ /dev/null @@ -1,17 +0,0 @@ -# SPDX-License-Identifier: MIT -nvkm-y += nvkm/subdev/secboot/base.o -nvkm-y += nvkm/subdev/secboot/hs_ucode.o -nvkm-y += nvkm/subdev/secboot/ls_ucode_gr.o -nvkm-y += nvkm/subdev/secboot/ls_ucode_msgqueue.o -nvkm-y += nvkm/subdev/secboot/acr.o -nvkm-y += nvkm/subdev/secboot/acr_r352.o -nvkm-y += nvkm/subdev/secboot/acr_r361.o -nvkm-y += nvkm/subdev/secboot/acr_r364.o -nvkm-y += nvkm/subdev/secboot/acr_r367.o -nvkm-y += nvkm/subdev/secboot/acr_r370.o -nvkm-y += nvkm/subdev/secboot/acr_r375.o -nvkm-y += nvkm/subdev/secboot/gm200.o -nvkm-y += nvkm/subdev/secboot/gm20b.o -nvkm-y += nvkm/subdev/secboot/gp102.o -nvkm-y += nvkm/subdev/secboot/gp108.o -nvkm-y += nvkm/subdev/secboot/gp10b.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c deleted file mode 100644 index dc80985cf0933a647855f470848cbe80bbe0a246..0000000000000000000000000000000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "acr.h" - -#include - -/** - * Convenience function to duplicate a firmware file in memory and check that - * it has the required minimum size. - */ -void * -nvkm_acr_load_firmware(const struct nvkm_subdev *subdev, const char *name, - size_t min_size) -{ - const struct firmware *fw; - void *blob; - int ret; - - ret = nvkm_firmware_get(subdev, name, &fw); - if (ret) - return ERR_PTR(ret); - if (fw->size < min_size) { - nvkm_error(subdev, "%s is smaller than expected size %zu\n", - name, min_size); - nvkm_firmware_put(fw); - return ERR_PTR(-EINVAL); - } - blob = kmemdup(fw->data, fw->size, GFP_KERNEL); - nvkm_firmware_put(fw); - if (!blob) - return ERR_PTR(-ENOMEM); - - return blob; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h deleted file mode 100644 index 73a2ac81ac69295219a807b244729770ea25b5e2..0000000000000000000000000000000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ -#ifndef __NVKM_SECBOOT_ACR_H__ -#define __NVKM_SECBOOT_ACR_H__ - -#include "priv.h" - -struct nvkm_acr; - -/** - * struct nvkm_acr_func - properties and functions specific to an ACR - * - * @load: make the ACR ready to run on the given secboot device - * @reset: reset the specified falcon - * @start: start the specified falcon (assumed to have been reset) - */ -struct nvkm_acr_func { - void (*dtor)(struct nvkm_acr *); - int (*oneinit)(struct nvkm_acr *, struct nvkm_secboot *); - int (*fini)(struct nvkm_acr *, struct nvkm_secboot *, bool); - int (*load)(struct nvkm_acr *, struct nvkm_falcon *, - struct nvkm_gpuobj *, u64); - int (*reset)(struct nvkm_acr *, struct nvkm_secboot *, unsigned long); -}; - -/** - * struct nvkm_acr - instance of an ACR - * - * @boot_falcon: ID of the falcon that will perform secure boot - * @managed_falcons: bitfield of falcons managed by this ACR - * @optional_falcons: bitfield of falcons we can live without - */ -struct nvkm_acr { - const struct nvkm_acr_func *func; - const struct nvkm_subdev *subdev; - - enum nvkm_secboot_falcon boot_falcon; - unsigned long managed_falcons; - unsigned long optional_falcons; -}; - -void *nvkm_acr_load_firmware(const struct nvkm_subdev *, const char *, size_t); - -struct nvkm_acr *acr_r352_new(unsigned long); -struct nvkm_acr *acr_r361_new(unsigned long); -struct nvkm_acr *acr_r364_new(unsigned long); -struct nvkm_acr *acr_r367_new(enum nvkm_secboot_falcon, unsigned long); -struct nvkm_acr *acr_r370_new(enum nvkm_secboot_falcon, unsigned long); -struct nvkm_acr *acr_r375_new(enum nvkm_secboot_falcon, unsigned long); - -#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c deleted file mode 100644 index 7af971db91bce2121495872cff5188fe9a6ff6ce..0000000000000000000000000000000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c +++ /dev/null @@ -1,1241 +0,0 @@ -/* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "acr_r352.h" -#include "hs_ucode.h" - -#include -#include -#include -#include -#include -#include - -/** - * struct acr_r352_flcn_bl_desc - DMEM bootloader descriptor - * @signature: 16B signature for secure code. 0s if no secure code - * @ctx_dma: DMA context to be used by BL while loading code/data - * @code_dma_base: 256B-aligned Physical FB Address where code is located - * (falcon's $xcbase register) - * @non_sec_code_off: offset from code_dma_base where the non-secure code is - * located. The offset must be multiple of 256 to help perf - * @non_sec_code_size: the size of the nonSecure code part. - * @sec_code_off: offset from code_dma_base where the secure code is - * located. The offset must be multiple of 256 to help perf - * @sec_code_size: offset from code_dma_base where the secure code is - * located. The offset must be multiple of 256 to help perf - * @code_entry_point: code entry point which will be invoked by BL after - * code is loaded. - * @data_dma_base: 256B aligned Physical FB Address where data is located. - * (falcon's $xdbase register) - * @data_size: size of data block. Should be multiple of 256B - * - * Structure used by the bootloader to load the rest of the code. This has - * to be filled by host and copied into DMEM at offset provided in the - * hsflcn_bl_desc.bl_desc_dmem_load_off. - */ -struct acr_r352_flcn_bl_desc { - u32 reserved[4]; - u32 signature[4]; - u32 ctx_dma; - u32 code_dma_base; - u32 non_sec_code_off; - u32 non_sec_code_size; - u32 sec_code_off; - u32 sec_code_size; - u32 code_entry_point; - u32 data_dma_base; - u32 data_size; - u32 code_dma_base1; - u32 data_dma_base1; -}; - -/** - * acr_r352_generate_flcn_bl_desc - generate generic BL descriptor for LS image - */ -static void -acr_r352_generate_flcn_bl_desc(const struct nvkm_acr *acr, - const struct ls_ucode_img *img, u64 wpr_addr, - void *_desc) -{ - struct acr_r352_flcn_bl_desc *desc = _desc; - const struct ls_ucode_img_desc *pdesc = &img->ucode_desc; - u64 base, addr_code, addr_data; - - base = wpr_addr + img->ucode_off + pdesc->app_start_offset; - addr_code = (base + pdesc->app_resident_code_offset) >> 8; - addr_data = (base + pdesc->app_resident_data_offset) >> 8; - - desc->ctx_dma = FALCON_DMAIDX_UCODE; - desc->code_dma_base = lower_32_bits(addr_code); - desc->code_dma_base1 = upper_32_bits(addr_code); - desc->non_sec_code_off = pdesc->app_resident_code_offset; - desc->non_sec_code_size = pdesc->app_resident_code_size; - desc->code_entry_point = pdesc->app_imem_entry; - desc->data_dma_base = lower_32_bits(addr_data); - desc->data_dma_base1 = upper_32_bits(addr_data); - desc->data_size = pdesc->app_resident_data_size; -} - - -/** - * struct hsflcn_acr_desc - data section of the HS firmware - * - * This header is to be copied at the beginning of DMEM by the HS bootloader. - * - * @signature: signature of ACR ucode - * @wpr_region_id: region ID holding the WPR header and its details - * @wpr_offset: offset from the WPR region holding the wpr header - * @regions: region descriptors - * @nonwpr_ucode_blob_size: size of LS blob - * @nonwpr_ucode_blob_start: FB location of LS blob is - */ -struct hsflcn_acr_desc { - union { - u8 reserved_dmem[0x200]; - u32 signatures[4]; - } ucode_reserved_space; - u32 wpr_region_id; - u32 wpr_offset; - u32 mmu_mem_range; -#define FLCN_ACR_MAX_REGIONS 2 - struct { - u32 no_regions; - struct { - u32 start_addr; - u32 end_addr; - u32 region_id; - u32 read_mask; - u32 write_mask; - u32 client_mask; - } region_props[FLCN_ACR_MAX_REGIONS]; - } regions; - u32 ucode_blob_size; - u64 ucode_blob_base __aligned(8); - struct { - u32 vpr_enabled; - u32 vpr_start; - u32 vpr_end; - u32 hdcp_policies; - } vpr_desc; -}; - - -/* - * Low-secure blob creation - */ - -/** - * struct acr_r352_lsf_lsb_header - LS firmware header - * @signature: signature to verify the firmware against - * @ucode_off: offset of the ucode blob in the WPR region. The ucode - * blob contains the bootloader, code and data of the - * LS falcon - * @ucode_size: size of the ucode blob, including bootloader - * @data_size: size of the ucode blob data - * @bl_code_size: size of the bootloader code - * @bl_imem_off: offset in imem of the bootloader - * @bl_data_off: offset of the bootloader data in WPR region - * @bl_data_size: size of the bootloader data - * @app_code_off: offset of the app code relative to ucode_off - * @app_code_size: size of the app code - * @app_data_off: offset of the app data relative to ucode_off - * @app_data_size: size of the app data - * @flags: flags for the secure bootloader - * - * This structure is written into the WPR region for each managed falcon. Each - * instance is referenced by the lsb_offset member of the corresponding - * lsf_wpr_header. - */ -struct acr_r352_lsf_lsb_header { - /** - * LS falcon signatures - * @prd_keys: signature to use in production mode - * @dgb_keys: signature to use in debug mode - * @b_prd_present: whether the production key is present - * @b_dgb_present: whether the debug key is present - * @falcon_id: ID of the falcon the ucode applies to - */ - struct { - u8 prd_keys[2][16]; - u8 dbg_keys[2][16]; - u32 b_prd_present; - u32 b_dbg_present; - u32 falcon_id; - } signature; - u32 ucode_off; - u32 ucode_size; - u32 data_size; - u32 bl_code_size; - u32 bl_imem_off; - u32 bl_data_off; - u32 bl_data_size; - u32 app_code_off; - u32 app_code_size; - u32 app_data_off; - u32 app_data_size; - u32 flags; -}; - -/** - * struct acr_r352_lsf_wpr_header - LS blob WPR Header - * @falcon_id: LS falcon ID - * @lsb_offset: offset of the lsb_lsf_header in the WPR region - * @bootstrap_owner: secure falcon reponsible for bootstrapping the LS falcon - * @lazy_bootstrap: skip bootstrapping by ACR - * @status: bootstrapping status - * - * An array of these is written at the beginning of the WPR region, one for - * each managed falcon. The array is terminated by an instance which falcon_id - * is LSF_FALCON_ID_INVALID. - */ -struct acr_r352_lsf_wpr_header { - u32 falcon_id; - u32 lsb_offset; - u32 bootstrap_owner; - u32 lazy_bootstrap; - u32 status; -#define LSF_IMAGE_STATUS_NONE 0 -#define LSF_IMAGE_STATUS_COPY 1 -#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED 2 -#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED 3 -#define LSF_IMAGE_STATUS_VALIDATION_DONE 4 -#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED 5 -#define LSF_IMAGE_STATUS_BOOTSTRAP_READY 6 -}; - -/** - * struct ls_ucode_img_r352 - ucode image augmented with r352 headers - */ -struct ls_ucode_img_r352 { - struct ls_ucode_img base; - - const struct acr_r352_lsf_func *func; - - struct acr_r352_lsf_wpr_header wpr_header; - struct acr_r352_lsf_lsb_header lsb_header; -}; -#define ls_ucode_img_r352(i) container_of(i, struct ls_ucode_img_r352, base) - -/** - * ls_ucode_img_load() - create a lsf_ucode_img and load it - */ -struct ls_ucode_img * -acr_r352_ls_ucode_img_load(const struct acr_r352 *acr, - const struct nvkm_secboot *sb, - enum nvkm_secboot_falcon falcon_id) -{ - const struct nvkm_subdev *subdev = acr->base.subdev; - const struct acr_r352_ls_func *func = acr->func->ls_func[falcon_id]; - struct ls_ucode_img_r352 *img; - int ret; - - img = kzalloc(sizeof(*img), GFP_KERNEL); - if (!img) - return ERR_PTR(-ENOMEM); - - img->base.falcon_id = falcon_id; - - ret = func->load(sb, func->version_max, &img->base); - if (ret < 0) { - kfree(img->base.ucode_data); - kfree(img->base.sig); - kfree(img); - return ERR_PTR(ret); - } - - img->func = func->version[ret]; - - /* Check that the signature size matches our expectations... */ - if (img->base.sig_size != sizeof(img->lsb_header.signature)) { - nvkm_error(subdev, "invalid signature size for %s falcon!\n", - nvkm_secboot_falcon_name[falcon_id]); - return ERR_PTR(-EINVAL); - } - - /* Copy signature to the right place */ - memcpy(&img->lsb_header.signature, img->base.sig, img->base.sig_size); - - /* not needed? the signature should already have the right value */ - img->lsb_header.signature.falcon_id = falcon_id; - - return &img->base; -} - -#define LSF_LSB_HEADER_ALIGN 256 -#define LSF_BL_DATA_ALIGN 256 -#define LSF_BL_DATA_SIZE_ALIGN 256 -#define LSF_BL_CODE_SIZE_ALIGN 256 -#define LSF_UCODE_DATA_ALIGN 4096 - -/** - * acr_r352_ls_img_fill_headers - fill the WPR and LSB headers of an image - * @acr: ACR to use - * @img: image to generate for - * @offset: offset in the WPR region where this image starts - * - * Allocate space in the WPR area from offset and write the WPR and LSB headers - * accordingly. - * - * Return: offset at the end of this image. - */ -static u32 -acr_r352_ls_img_fill_headers(struct acr_r352 *acr, - struct ls_ucode_img_r352 *img, u32 offset) -{ - struct ls_ucode_img *_img = &img->base; - struct acr_r352_lsf_wpr_header *whdr = &img->wpr_header; - struct acr_r352_lsf_lsb_header *lhdr = &img->lsb_header; - struct ls_ucode_img_desc *desc = &_img->ucode_desc; - const struct acr_r352_lsf_func *func = img->func; - - /* Fill WPR header */ - whdr->falcon_id = _img->falcon_id; - whdr->bootstrap_owner = acr->base.boot_falcon; - whdr->status = LSF_IMAGE_STATUS_COPY; - - /* Skip bootstrapping falcons started by someone else than ACR */ - if (acr->lazy_bootstrap & BIT(_img->falcon_id)) - whdr->lazy_bootstrap = 1; - - /* Align, save off, and include an LSB header size */ - offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN); - whdr->lsb_offset = offset; - offset += sizeof(*lhdr); - - /* - * Align, save off, and include the original (static) ucode - * image size - */ - offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN); - _img->ucode_off = lhdr->ucode_off = offset; - offset += _img->ucode_size; - - /* - * For falcons that use a boot loader (BL), we append a loader - * desc structure on the end of the ucode image and consider - * this the boot loader data. The host will then copy the loader - * desc args to this space within the WPR region (before locking - * down) and the HS bin will then copy them to DMEM 0 for the - * loader. - */ - lhdr->bl_code_size = ALIGN(desc->bootloader_size, - LSF_BL_CODE_SIZE_ALIGN); - lhdr->ucode_size = ALIGN(desc->app_resident_data_offset, - LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size; - lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) + - lhdr->bl_code_size - lhdr->ucode_size; - /* - * Though the BL is located at 0th offset of the image, the VA - * is different to make sure that it doesn't collide the actual - * OS VA range - */ - lhdr->bl_imem_off = desc->bootloader_imem_offset; - lhdr->app_code_off = desc->app_start_offset + - desc->app_resident_code_offset; - lhdr->app_code_size = desc->app_resident_code_size; - lhdr->app_data_off = desc->app_start_offset + - desc->app_resident_data_offset; - lhdr->app_data_size = desc->app_resident_data_size; - - lhdr->flags = func->lhdr_flags; - if (_img->falcon_id == acr->base.boot_falcon) - lhdr->flags |= LSF_FLAG_DMACTL_REQ_CTX; - - /* Align and save off BL descriptor size */ - lhdr->bl_data_size = ALIGN(func->bl_desc_size, LSF_BL_DATA_SIZE_ALIGN); - - /* - * Align, save off, and include the additional BL data - */ - offset = ALIGN(offset, LSF_BL_DATA_ALIGN); - lhdr->bl_data_off = offset; - offset += lhdr->bl_data_size; - - return offset; -} - -/** - * acr_r352_ls_fill_headers - fill WPR and LSB headers of all managed images - */ -int -acr_r352_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs) -{ - struct ls_ucode_img_r352 *img; - struct list_head *l; - u32 count = 0; - u32 offset; - - /* Count the number of images to manage */ - list_for_each(l, imgs) - count++; - - /* - * Start with an array of WPR headers at the base of the WPR. - * The expectation here is that the secure falcon will do a single DMA - * read of this array and cache it internally so it's ok to pack these. - * Also, we add 1 to the falcon count to indicate the end of the array. - */ - offset = sizeof(img->wpr_header) * (count + 1); - - /* - * Walk the managed falcons, accounting for the LSB structs - * as well as the ucode images. - */ - list_for_each_entry(img, imgs, base.node) { - offset = acr_r352_ls_img_fill_headers(acr, img, offset); - } - - return offset; -} - -/** - * acr_r352_ls_write_wpr - write the WPR blob contents - */ -int -acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs, - struct nvkm_gpuobj *wpr_blob, u64 wpr_addr) -{ - struct ls_ucode_img *_img; - u32 pos = 0; - u32 max_desc_size = 0; - u8 *gdesc; - - /* Figure out how large we need gdesc to be. */ - list_for_each_entry(_img, imgs, node) { - struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img); - const struct acr_r352_lsf_func *ls_func = img->func; - - max_desc_size = max(max_desc_size, ls_func->bl_desc_size); - } - - gdesc = kmalloc(max_desc_size, GFP_KERNEL); - if (!gdesc) - return -ENOMEM; - - nvkm_kmap(wpr_blob); - - list_for_each_entry(_img, imgs, node) { - struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img); - const struct acr_r352_lsf_func *ls_func = img->func; - - nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header, - sizeof(img->wpr_header)); - - nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset, - &img->lsb_header, sizeof(img->lsb_header)); - - /* Generate and write BL descriptor */ - memset(gdesc, 0, ls_func->bl_desc_size); - ls_func->generate_bl_desc(&acr->base, _img, wpr_addr, gdesc); - - nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.bl_data_off, - gdesc, ls_func->bl_desc_size); - - /* Copy ucode */ - nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off, - _img->ucode_data, _img->ucode_size); - - pos += sizeof(img->wpr_header); - } - - nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID); - - nvkm_done(wpr_blob); - - kfree(gdesc); - - return 0; -} - -/* Both size and address of WPR need to be 256K-aligned */ -#define WPR_ALIGNMENT 0x40000 -/** - * acr_r352_prepare_ls_blob() - prepare the LS blob - * - * For each securely managed falcon, load the FW, signatures and bootloaders and - * prepare a ucode blob. Then, compute the offsets in the WPR region for each - * blob, and finally write the headers and ucode blobs into a GPU object that - * will be copied into the WPR region by the HS firmware. - */ -static int -acr_r352_prepare_ls_blob(struct acr_r352 *acr, struct nvkm_secboot *sb) -{ - const struct nvkm_subdev *subdev = acr->base.subdev; - struct list_head imgs; - struct ls_ucode_img *img, *t; - unsigned long managed_falcons = acr->base.managed_falcons; - u64 wpr_addr = sb->wpr_addr; - u32 wpr_size = sb->wpr_size; - int managed_count = 0; - u32 image_wpr_size, ls_blob_size; - int falcon_id; - int ret; - - INIT_LIST_HEAD(&imgs); - - /* Load all LS blobs */ - for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) { - struct ls_ucode_img *img; - - img = acr->func->ls_ucode_img_load(acr, sb, falcon_id); - if (IS_ERR(img)) { - if (acr->base.optional_falcons & BIT(falcon_id)) { - managed_falcons &= ~BIT(falcon_id); - nvkm_info(subdev, "skipping %s falcon...\n", - nvkm_secboot_falcon_name[falcon_id]); - continue; - } - ret = PTR_ERR(img); - goto cleanup; - } - - list_add_tail(&img->node, &imgs); - managed_count++; - } - - /* Commit the actual list of falcons we will manage from now on */ - acr->base.managed_falcons = managed_falcons; - - /* - * If the boot falcon has a firmare, let it manage the bootstrap of other - * falcons. - */ - if (acr->func->ls_func[acr->base.boot_falcon] && - (managed_falcons & BIT(acr->base.boot_falcon))) { - for_each_set_bit(falcon_id, &managed_falcons, - NVKM_SECBOOT_FALCON_END) { - if (falcon_id == acr->base.boot_falcon) - continue; - - acr->lazy_bootstrap |= BIT(falcon_id); - } - } - - /* - * Fill the WPR and LSF headers with the right offsets and compute - * required WPR size - */ - image_wpr_size = acr->func->ls_fill_headers(acr, &imgs); - image_wpr_size = ALIGN(image_wpr_size, WPR_ALIGNMENT); - - ls_blob_size = image_wpr_size; - - /* - * If we need a shadow area, allocate twice the size and use the - * upper half as WPR - */ - if (wpr_size == 0 && acr->func->shadow_blob) - ls_blob_size *= 2; - - /* Allocate GPU object that will contain the WPR region */ - ret = nvkm_gpuobj_new(subdev->device, ls_blob_size, WPR_ALIGNMENT, - false, NULL, &acr->ls_blob); - if (ret) - goto cleanup; - - nvkm_debug(subdev, "%d managed LS falcons, WPR size is %d bytes\n", - managed_count, image_wpr_size); - - /* If WPR address and size are not fixed, set them to fit the LS blob */ - if (wpr_size == 0) { - wpr_addr = acr->ls_blob->addr; - if (acr->func->shadow_blob) - wpr_addr += acr->ls_blob->size / 2; - - wpr_size = image_wpr_size; - /* - * But if the WPR region is set by the bootloader, it is illegal for - * the HS blob to be larger than this region. - */ - } else if (image_wpr_size > wpr_size) { - nvkm_error(subdev, "WPR region too small for FW blob!\n"); - nvkm_error(subdev, "required: %dB\n", image_wpr_size); - nvkm_error(subdev, "available: %dB\n", wpr_size); - ret = -ENOSPC; - goto cleanup; - } - - /* Write LS blob */ - ret = acr->func->ls_write_wpr(acr, &imgs, acr->ls_blob, wpr_addr); - if (ret) - nvkm_gpuobj_del(&acr->ls_blob); - -cleanup: - list_for_each_entry_safe(img, t, &imgs, node) { - kfree(img->ucode_data); - kfree(img->sig); - kfree(img); - } - - return ret; -} - - - - -void -acr_r352_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb, - void *_desc) -{ - struct hsflcn_acr_desc *desc = _desc; - struct nvkm_gpuobj *ls_blob = acr->ls_blob; - - /* WPR region information if WPR is not fixed */ - if (sb->wpr_size == 0) { - u64 wpr_start = ls_blob->addr; - u64 wpr_end = wpr_start + ls_blob->size; - - desc->wpr_region_id = 1; - desc->regions.no_regions = 2; - desc->regions.region_props[0].start_addr = wpr_start >> 8; - desc->regions.region_props[0].end_addr = wpr_end >> 8; - desc->regions.region_props[0].region_id = 1; - desc->regions.region_props[0].read_mask = 0xf; - desc->regions.region_props[0].write_mask = 0xc; - desc->regions.region_props[0].client_mask = 0x2; - } else { - desc->ucode_blob_base = ls_blob->addr; - desc->ucode_blob_size = ls_blob->size; - } -} - -static void -acr_r352_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc, - u64 offset) -{ - struct acr_r352_flcn_bl_desc *bl_desc = _bl_desc; - u64 addr_code, addr_data; - - addr_code = offset >> 8; - addr_data = (offset + hdr->data_dma_base) >> 8; - - bl_desc->ctx_dma = FALCON_DMAIDX_VIRT; - bl_desc->code_dma_base = lower_32_bits(addr_code); - bl_desc->non_sec_code_off = hdr->non_sec_code_off; - bl_desc->non_sec_code_size = hdr->non_sec_code_size; - bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0); - bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0); - bl_desc->code_entry_point = 0; - bl_desc->data_dma_base = lower_32_bits(addr_data); - bl_desc->data_size = hdr->data_size; -} - -/** - * acr_r352_prepare_hs_blob - load and prepare a HS blob and BL descriptor - * - * @sb secure boot instance to prepare for - * @fw name of the HS firmware to load - * @blob pointer to gpuobj that will be allocated to receive the HS FW payload - * @bl_desc pointer to the BL descriptor to write for this firmware - * @patch whether we should patch the HS descriptor (only for HS loaders) - */ -static int -acr_r352_prepare_hs_blob(struct acr_r352 *acr, struct nvkm_secboot *sb, - const char *fw, struct nvkm_gpuobj **blob, - struct hsf_load_header *load_header, bool patch) -{ - struct nvkm_subdev *subdev = &sb->subdev; - void *acr_image; - struct fw_bin_header *hsbin_hdr; - struct hsf_fw_header *fw_hdr; - struct hsf_load_header *load_hdr; - void *acr_data; - int ret; - - acr_image = hs_ucode_load_blob(subdev, sb->boot_falcon, fw); - if (IS_ERR(acr_image)) - return PTR_ERR(acr_image); - - hsbin_hdr = acr_image; - fw_hdr = acr_image + hsbin_hdr->header_offset; - load_hdr = acr_image + fw_hdr->hdr_offset; - acr_data = acr_image + hsbin_hdr->data_offset; - - /* Patch descriptor with WPR information? */ - if (patch) { - struct hsflcn_acr_desc *desc; - - desc = acr_data + load_hdr->data_dma_base; - acr->func->fixup_hs_desc(acr, sb, desc); - } - - if (load_hdr->num_apps > ACR_R352_MAX_APPS) { - nvkm_error(subdev, "more apps (%d) than supported (%d)!", - load_hdr->num_apps, ACR_R352_MAX_APPS); - ret = -EINVAL; - goto cleanup; - } - memcpy(load_header, load_hdr, sizeof(*load_header) + - (sizeof(load_hdr->apps[0]) * 2 * load_hdr->num_apps)); - - /* Create ACR blob and copy HS data to it */ - ret = nvkm_gpuobj_new(subdev->device, ALIGN(hsbin_hdr->data_size, 256), - 0x1000, false, NULL, blob); - if (ret) - goto cleanup; - - nvkm_kmap(*blob); - nvkm_gpuobj_memcpy_to(*blob, 0, acr_data, hsbin_hdr->data_size); - nvkm_done(*blob); - -cleanup: - kfree(acr_image); - - return ret; -} - -/** - * acr_r352_load_blobs - load blobs common to all ACR V1 versions. - * - * This includes the LS blob, HS ucode loading blob, and HS bootloader. - * - * The HS ucode unload blob is only used on dGPU if the WPR region is variable. - */ -int -acr_r352_load_blobs(struct acr_r352 *acr, struct nvkm_secboot *sb) -{ - struct nvkm_subdev *subdev = &sb->subdev; - int ret; - - /* Firmware already loaded? */ - if (acr->firmware_ok) - return 0; - - /* Load and prepare the managed falcon's firmwares */ - ret = acr_r352_prepare_ls_blob(acr, sb); - if (ret) - return ret; - - /* Load the HS firmware that will load the LS firmwares */ - if (!acr->load_blob) { - ret = acr_r352_prepare_hs_blob(acr, sb, "acr/ucode_load", - &acr->load_blob, - &acr->load_bl_header, true); - if (ret) - return ret; - } - - /* If the ACR region is dynamically programmed, we need an unload FW */ - if (sb->wpr_size == 0) { - ret = acr_r352_prepare_hs_blob(acr, sb, "acr/ucode_unload", - &acr->unload_blob, - &acr->unload_bl_header, false); - if (ret) - return ret; - } - - /* Load the HS firmware bootloader */ - if (!acr->hsbl_blob) { - acr->hsbl_blob = nvkm_acr_load_firmware(subdev, "acr/bl", 0); - if (IS_ERR(acr->hsbl_blob)) { - ret = PTR_ERR(acr->hsbl_blob); - acr->hsbl_blob = NULL; - return ret; - } - - if (acr->base.boot_falcon != NVKM_SECBOOT_FALCON_PMU) { - acr->hsbl_unload_blob = nvkm_acr_load_firmware(subdev, - "acr/unload_bl", 0); - if (IS_ERR(acr->hsbl_unload_blob)) { - ret = PTR_ERR(acr->hsbl_unload_blob); - acr->hsbl_unload_blob = NULL; - return ret; - } - } else { - acr->hsbl_unload_blob = acr->hsbl_blob; - } - } - - acr->firmware_ok = true; - nvkm_debug(&sb->subdev, "LS blob successfully created\n"); - - return 0; -} - -/** - * acr_r352_load() - prepare HS falcon to run the specified blob, mapped. - * - * Returns the start address to use, or a negative error value. - */ -static int -acr_r352_load(struct nvkm_acr *_acr, struct nvkm_falcon *falcon, - struct nvkm_gpuobj *blob, u64 offset) -{ - struct acr_r352 *acr = acr_r352(_acr); - const u32 bl_desc_size = acr->func->hs_bl_desc_size; - const struct hsf_load_header *load_hdr; - struct fw_bin_header *bl_hdr; - struct fw_bl_desc *hsbl_desc; - void *bl, *blob_data, *hsbl_code, *hsbl_data; - u32 code_size; - u8 *bl_desc; - - bl_desc = kzalloc(bl_desc_size, GFP_KERNEL); - if (!bl_desc) - return -ENOMEM; - - /* Find the bootloader descriptor for our blob and copy it */ - if (blob == acr->load_blob) { - load_hdr = &acr->load_bl_header; - bl = acr->hsbl_blob; - } else if (blob == acr->unload_blob) { - load_hdr = &acr->unload_bl_header; - bl = acr->hsbl_unload_blob; - } else { - nvkm_error(_acr->subdev, "invalid secure boot blob!\n"); - kfree(bl_desc); - return -EINVAL; - } - - bl_hdr = bl; - hsbl_desc = bl + bl_hdr->header_offset; - blob_data = bl + bl_hdr->data_offset; - hsbl_code = blob_data + hsbl_desc->code_off; - hsbl_data = blob_data + hsbl_desc->data_off; - code_size = ALIGN(hsbl_desc->code_size, 256); - - /* - * Copy HS bootloader data - */ - nvkm_falcon_load_dmem(falcon, hsbl_data, 0x0, hsbl_desc->data_size, 0); - - /* Copy HS bootloader code to end of IMEM */ - nvkm_falcon_load_imem(falcon, hsbl_code, falcon->code.limit - code_size, - code_size, hsbl_desc->start_tag, 0, false); - - /* Generate the BL header */ - acr->func->generate_hs_bl_desc(load_hdr, bl_desc, offset); - - /* - * Copy HS BL header where the HS descriptor expects it to be - */ - nvkm_falcon_load_dmem(falcon, bl_desc, hsbl_desc->dmem_load_off, - bl_desc_size, 0); - - kfree(bl_desc); - return hsbl_desc->start_tag << 8; -} - -static int -acr_r352_shutdown(struct acr_r352 *acr, struct nvkm_secboot *sb) -{ - struct nvkm_subdev *subdev = &sb->subdev; - int i; - - /* Run the unload blob to unprotect the WPR region */ - if (acr->unload_blob && sb->wpr_set) { - int ret; - - nvkm_debug(subdev, "running HS unload blob\n"); - ret = sb->func->run_blob(sb, acr->unload_blob, sb->halt_falcon); - if (ret < 0) - return ret; - /* - * Unload blob will return this error code - it is not an error - * and the expected behavior on RM as well - */ - if (ret && ret != 0x1d) { - nvkm_error(subdev, "HS unload failed, ret 0x%08x\n", ret); - return -EINVAL; - } - nvkm_debug(subdev, "HS unload blob completed\n"); - } - - for (i = 0; i < NVKM_SECBOOT_FALCON_END; i++) - acr->falcon_state[i] = NON_SECURE; - - sb->wpr_set = false; - - return 0; -} - -/** - * Check if the WPR region has been indeed set by the ACR firmware, and - * matches where it should be. - */ -static bool -acr_r352_wpr_is_set(const struct acr_r352 *acr, const struct nvkm_secboot *sb) -{ - const struct nvkm_subdev *subdev = &sb->subdev; - const struct nvkm_device *device = subdev->device; - u64 wpr_lo, wpr_hi; - u64 wpr_range_lo, wpr_range_hi; - - nvkm_wr32(device, 0x100cd4, 0x2); - wpr_lo = (nvkm_rd32(device, 0x100cd4) & ~0xff); - wpr_lo <<= 8; - nvkm_wr32(device, 0x100cd4, 0x3); - wpr_hi = (nvkm_rd32(device, 0x100cd4) & ~0xff); - wpr_hi <<= 8; - - if (sb->wpr_size != 0) { - wpr_range_lo = sb->wpr_addr; - wpr_range_hi = wpr_range_lo + sb->wpr_size; - } else { - wpr_range_lo = acr->ls_blob->addr; - wpr_range_hi = wpr_range_lo + acr->ls_blob->size; - } - - return (wpr_lo >= wpr_range_lo && wpr_lo < wpr_range_hi && - wpr_hi > wpr_range_lo && wpr_hi <= wpr_range_hi); -} - -static int -acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb) -{ - const struct nvkm_subdev *subdev = &sb->subdev; - unsigned long managed_falcons = acr->base.managed_falcons; - int falcon_id; - int ret; - - if (sb->wpr_set) - return 0; - - /* Make sure all blobs are ready */ - ret = acr_r352_load_blobs(acr, sb); - if (ret) - return ret; - - nvkm_debug(subdev, "running HS load blob\n"); - ret = sb->func->run_blob(sb, acr->load_blob, sb->boot_falcon); - /* clear halt interrupt */ - nvkm_falcon_clear_interrupt(sb->boot_falcon, 0x10); - sb->wpr_set = acr_r352_wpr_is_set(acr, sb); - if (ret < 0) { - return ret; - } else if (ret > 0) { - nvkm_error(subdev, "HS load failed, ret 0x%08x\n", ret); - return -EINVAL; - } - nvkm_debug(subdev, "HS load blob completed\n"); - /* WPR must be set at this point */ - if (!sb->wpr_set) { - nvkm_error(subdev, "ACR blob completed but WPR not set!\n"); - return -EINVAL; - } - - /* Run LS firmwares post_run hooks */ - for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) { - const struct acr_r352_ls_func *func = - acr->func->ls_func[falcon_id]; - - if (func->post_run) { - ret = func->post_run(&acr->base, sb); - if (ret) - return ret; - } - } - - return 0; -} - -/** - * acr_r352_reset_nopmu - dummy reset method when no PMU firmware is loaded - * - * Reset is done by re-executing secure boot from scratch, with lazy bootstrap - * disabled. This has the effect of making all managed falcons ready-to-run. - */ -static int -acr_r352_reset_nopmu(struct acr_r352 *acr, struct nvkm_secboot *sb, - unsigned long falcon_mask) -{ - int falcon; - int ret; - - /* - * Perform secure boot each time we are called on FECS. Since only FECS - * and GPCCS are managed and started together, this ought to be safe. - */ - if (!(falcon_mask & BIT(NVKM_SECBOOT_FALCON_FECS))) - goto end; - - ret = acr_r352_shutdown(acr, sb); - if (ret) - return ret; - - ret = acr_r352_bootstrap(acr, sb); - if (ret) - return ret; - -end: - for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END) { - acr->falcon_state[falcon] = RESET; - } - return 0; -} - -/* - * acr_r352_reset() - execute secure boot from the prepared state - * - * Load the HS bootloader and ask the falcon to run it. This will in turn - * load the HS firmware and run it, so once the falcon stops all the managed - * falcons should have their LS firmware loaded and be ready to run. - */ -static int -acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb, - unsigned long falcon_mask) -{ - struct acr_r352 *acr = acr_r352(_acr); - struct nvkm_msgqueue *queue; - int falcon; - bool wpr_already_set = sb->wpr_set; - int ret; - - /* Make sure secure boot is performed */ - ret = acr_r352_bootstrap(acr, sb); - if (ret) - return ret; - - /* No PMU interface? */ - if (!nvkm_secboot_is_managed(sb, _acr->boot_falcon)) { - /* Redo secure boot entirely if it was already done */ - if (wpr_already_set) - return acr_r352_reset_nopmu(acr, sb, falcon_mask); - /* Else return the result of the initial invokation */ - else - return ret; - } - - switch (_acr->boot_falcon) { - case NVKM_SECBOOT_FALCON_PMU: - queue = sb->subdev.device->pmu->queue; - break; - case NVKM_SECBOOT_FALCON_SEC2: - queue = sb->subdev.device->sec2->queue; - break; - default: - return -EINVAL; - } - - /* Otherwise just ask the LS firmware to reset the falcon */ - for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END) - nvkm_debug(&sb->subdev, "resetting %s falcon\n", - nvkm_secboot_falcon_name[falcon]); - ret = nvkm_msgqueue_acr_boot_falcons(queue, falcon_mask); - if (ret) { - nvkm_error(&sb->subdev, "error during falcon reset: %d\n", ret); - return ret; - } - nvkm_debug(&sb->subdev, "falcon reset done\n"); - - return 0; -} - -static int -acr_r352_fini(struct nvkm_acr *_acr, struct nvkm_secboot *sb, bool suspend) -{ - struct acr_r352 *acr = acr_r352(_acr); - - return acr_r352_shutdown(acr, sb); -} - -static void -acr_r352_dtor(struct nvkm_acr *_acr) -{ - struct acr_r352 *acr = acr_r352(_acr); - - nvkm_gpuobj_del(&acr->unload_blob); - - if (_acr->boot_falcon != NVKM_SECBOOT_FALCON_PMU) - kfree(acr->hsbl_unload_blob); - kfree(acr->hsbl_blob); - nvkm_gpuobj_del(&acr->load_blob); - nvkm_gpuobj_del(&acr->ls_blob); - - kfree(acr); -} - -static const struct acr_r352_lsf_func -acr_r352_ls_fecs_func_0 = { - .generate_bl_desc = acr_r352_generate_flcn_bl_desc, - .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc), -}; - -const struct acr_r352_ls_func -acr_r352_ls_fecs_func = { - .load = acr_ls_ucode_load_fecs, - .version_max = 0, - .version = { - &acr_r352_ls_fecs_func_0, - } -}; - -static const struct acr_r352_lsf_func -acr_r352_ls_gpccs_func_0 = { - .generate_bl_desc = acr_r352_generate_flcn_bl_desc, - .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc), - /* GPCCS will be loaded using PRI */ - .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD, -}; - -static const struct acr_r352_ls_func -acr_r352_ls_gpccs_func = { - .load = acr_ls_ucode_load_gpccs, - .version_max = 0, - .version = { - &acr_r352_ls_gpccs_func_0, - } -}; - - - -/** - * struct acr_r352_pmu_bl_desc - PMU DMEM bootloader descriptor - * @dma_idx: DMA context to be used by BL while loading code/data - * @code_dma_base: 256B-aligned Physical FB Address where code is located - * @total_code_size: total size of the code part in the ucode - * @code_size_to_load: size of the code part to load in PMU IMEM. - * @code_entry_point: entry point in the code. - * @data_dma_base: Physical FB address where data part of ucode is located - * @data_size: Total size of the data portion. - * @overlay_dma_base: Physical Fb address for resident code present in ucode - * @argc: Total number of args - * @argv: offset where args are copied into PMU's DMEM. - * - * Structure used by the PMU bootloader to load the rest of the code - */ -struct acr_r352_pmu_bl_desc { - u32 dma_idx; - u32 code_dma_base; - u32 code_size_total; - u32 code_size_to_load; - u32 code_entry_point; - u32 data_dma_base; - u32 data_size; - u32 overlay_dma_base; - u32 argc; - u32 argv; - u16 code_dma_base1; - u16 data_dma_base1; - u16 overlay_dma_base1; -}; - -/** - * acr_r352_generate_pmu_bl_desc() - populate a DMEM BL descriptor for PMU LS image - * - */ -static void -acr_r352_generate_pmu_bl_desc(const struct nvkm_acr *acr, - const struct ls_ucode_img *img, u64 wpr_addr, - void *_desc) -{ - const struct ls_ucode_img_desc *pdesc = &img->ucode_desc; - const struct nvkm_pmu *pmu = acr->subdev->device->pmu; - struct acr_r352_pmu_bl_desc *desc = _desc; - u64 base; - u64 addr_code; - u64 addr_data; - u32 addr_args; - - base = wpr_addr + img->ucode_off + pdesc->app_start_offset; - addr_code = (base + pdesc->app_resident_code_offset) >> 8; - addr_data = (base + pdesc->app_resident_data_offset) >> 8; - addr_args = pmu->falcon->data.limit; - addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE; - - desc->dma_idx = FALCON_DMAIDX_UCODE; - desc->code_dma_base = lower_32_bits(addr_code); - desc->code_dma_base1 = upper_32_bits(addr_code); - desc->code_size_total = pdesc->app_size; - desc->code_size_to_load = pdesc->app_resident_code_size; - desc->code_entry_point = pdesc->app_imem_entry; - desc->data_dma_base = lower_32_bits(addr_data); - desc->data_dma_base1 = upper_32_bits(addr_data); - desc->data_size = pdesc->app_resident_data_size; - desc->overlay_dma_base = lower_32_bits(addr_code); - desc->overlay_dma_base1 = upper_32_bits(addr_code); - desc->argc = 1; - desc->argv = addr_args; -} - -static const struct acr_r352_lsf_func -acr_r352_ls_pmu_func_0 = { - .generate_bl_desc = acr_r352_generate_pmu_bl_desc, - .bl_desc_size = sizeof(struct acr_r352_pmu_bl_desc), -}; - -static const struct acr_r352_ls_func -acr_r352_ls_pmu_func = { - .load = acr_ls_ucode_load_pmu, - .post_run = acr_ls_pmu_post_run, - .version_max = 0, - .version = { - &acr_r352_ls_pmu_func_0, - } -}; - -const struct acr_r352_func -acr_r352_func = { - .fixup_hs_desc = acr_r352_fixup_hs_desc, - .generate_hs_bl_desc = acr_r352_generate_hs_bl_desc, - .hs_bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc), - .ls_ucode_img_load = acr_r352_ls_ucode_img_load, - .ls_fill_headers = acr_r352_ls_fill_headers, - .ls_write_wpr = acr_r352_ls_write_wpr, - .ls_func = { - [NVKM_SECBOOT_FALCON_FECS] = &acr_r352_ls_fecs_func, - [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r352_ls_gpccs_func, - [NVKM_SECBOOT_FALCON_PMU] = &acr_r352_ls_pmu_func, - }, -}; - -static const struct nvkm_acr_func -acr_r352_base_func = { - .dtor = acr_r352_dtor, - .fini = acr_r352_fini, - .load = acr_r352_load, - .reset = acr_r352_reset, -}; - -struct nvkm_acr * -acr_r352_new_(const struct acr_r352_func *func, - enum nvkm_secboot_falcon boot_falcon, - unsigned long managed_falcons) -{ - struct acr_r352 *acr; - int i; - - /* Check that all requested falcons are supported */ - for_each_set_bit(i, &managed_falcons, NVKM_SECBOOT_FALCON_END) { - if (!func->ls_func[i]) - return ERR_PTR(-ENOTSUPP); - } - - acr = kzalloc(sizeof(*acr), GFP_KERNEL); - if (!acr) - return ERR_PTR(-ENOMEM); - - acr->base.boot_falcon = boot_falcon; - acr->base.managed_falcons = managed_falcons; - acr->base.func = &acr_r352_base_func; - acr->func = func; - - return &acr->base; -} - -struct nvkm_acr * -acr_r352_new(unsigned long managed_falcons) -{ - return acr_r352_new_(&acr_r352_func, NVKM_SECBOOT_FALCON_PMU, - managed_falcons); -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h deleted file mode 100644 index e516cab849dd1915926d2117839d31c3ac9b7d57..0000000000000000000000000000000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h +++ /dev/null @@ -1,167 +0,0 @@ -/* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ -#ifndef __NVKM_SECBOOT_ACR_R352_H__ -#define __NVKM_SECBOOT_ACR_R352_H__ - -#include "acr.h" -#include "ls_ucode.h" -#include "hs_ucode.h" - -struct ls_ucode_img; - -#define ACR_R352_MAX_APPS 8 - -#define LSF_FLAG_LOAD_CODE_AT_0 1 -#define LSF_FLAG_DMACTL_REQ_CTX 4 -#define LSF_FLAG_FORCE_PRIV_LOAD 8 - -static inline u32 -hsf_load_header_app_off(const struct hsf_load_header *hdr, u32 app) -{ - return hdr->apps[app]; -} - -static inline u32 -hsf_load_header_app_size(const struct hsf_load_header *hdr, u32 app) -{ - return hdr->apps[hdr->num_apps + app]; -} - -/** - * struct acr_r352_lsf_func - manages a specific LS firmware version - * - * @generate_bl_desc: function called on a block of bl_desc_size to generate the - * proper bootloader descriptor for this LS firmware - * @bl_desc_size: size of the bootloader descriptor - * @lhdr_flags: LS flags - */ -struct acr_r352_lsf_func { - void (*generate_bl_desc)(const struct nvkm_acr *, - const struct ls_ucode_img *, u64, void *); - u32 bl_desc_size; - u32 lhdr_flags; -}; - -/** - * struct acr_r352_ls_func - manages a single LS falcon - * - * @load: load the external firmware into a ls_ucode_img - * @post_run: hook called right after the ACR is executed - */ -struct acr_r352_ls_func { - int (*load)(const struct nvkm_secboot *, int maxver, - struct ls_ucode_img *); - int (*post_run)(const struct nvkm_acr *, const struct nvkm_secboot *); - int version_max; - const struct acr_r352_lsf_func *version[]; -}; - -struct acr_r352; - -/** - * struct acr_r352_func - manages nuances between ACR versions - * - * @generate_hs_bl_desc: function called on a block of bl_desc_size to generate - * the proper HS bootloader descriptor - * @hs_bl_desc_size: size of the HS bootloader descriptor - */ -struct acr_r352_func { - void (*generate_hs_bl_desc)(const struct hsf_load_header *, void *, - u64); - void (*fixup_hs_desc)(struct acr_r352 *, struct nvkm_secboot *, void *); - u32 hs_bl_desc_size; - bool shadow_blob; - - struct ls_ucode_img *(*ls_ucode_img_load)(const struct acr_r352 *, - const struct nvkm_secboot *, - enum nvkm_secboot_falcon); - int (*ls_fill_headers)(struct acr_r352 *, struct list_head *); - int (*ls_write_wpr)(struct acr_r352 *, struct list_head *, - struct nvkm_gpuobj *, u64); - - const struct acr_r352_ls_func *ls_func[NVKM_SECBOOT_FALCON_END]; -}; - -/** - * struct acr_r352 - ACR data for driver release 352 (and beyond) - */ -struct acr_r352 { - struct nvkm_acr base; - const struct acr_r352_func *func; - - /* - * HS FW - lock WPR region (dGPU only) and load LS FWs - * on Tegra the HS FW copies the LS blob into the fixed WPR instead - */ - struct nvkm_gpuobj *load_blob; - struct { - struct hsf_load_header load_bl_header; - u32 __load_apps[ACR_R352_MAX_APPS * 2]; - }; - - /* HS FW - unlock WPR region (dGPU only) */ - struct nvkm_gpuobj *unload_blob; - struct { - struct hsf_load_header unload_bl_header; - u32 __unload_apps[ACR_R352_MAX_APPS * 2]; - }; - - /* HS bootloader */ - void *hsbl_blob; - - /* HS bootloader for unload blob, if using a different falcon */ - void *hsbl_unload_blob; - - /* LS FWs, to be loaded by the HS ACR */ - struct nvkm_gpuobj *ls_blob; - - /* Firmware already loaded? */ - bool firmware_ok; - - /* Falcons to lazy-bootstrap */ - u32 lazy_bootstrap; - - /* To keep track of the state of all managed falcons */ - enum { - /* In non-secure state, no firmware loaded, no privileges*/ - NON_SECURE = 0, - /* In low-secure mode and ready to be started */ - RESET, - /* In low-secure mode and running */ - RUNNING, - } falcon_state[NVKM_SECBOOT_FALCON_END]; -}; -#define acr_r352(acr) container_of(acr, struct acr_r352, base) - -struct nvkm_acr *acr_r352_new_(const struct acr_r352_func *, - enum nvkm_secboot_falcon, unsigned long); - -struct ls_ucode_img *acr_r352_ls_ucode_img_load(const struct acr_r352 *, - const struct nvkm_secboot *, - enum nvkm_secboot_falcon); -int acr_r352_ls_fill_headers(struct acr_r352 *, struct list_head *); -int acr_r352_ls_write_wpr(struct acr_r352 *, struct list_head *, - struct nvkm_gpuobj *, u64); - -void acr_r352_fixup_hs_desc(struct acr_r352 *, struct nvkm_secboot *, void *); - -#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c deleted file mode 100644 index f6b2d20d7fc3dd3d098fcfbc7577149a0f7580ac..0000000000000000000000000000000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c +++ /dev/null @@ -1,229 +0,0 @@ -/* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "acr_r361.h" - -#include -#include -#include -#include - -static void -acr_r361_generate_flcn_bl_desc(const struct nvkm_acr *acr, - const struct ls_ucode_img *img, u64 wpr_addr, - void *_desc) -{ - struct acr_r361_flcn_bl_desc *desc = _desc; - const struct ls_ucode_img_desc *pdesc = &img->ucode_desc; - u64 base, addr_code, addr_data; - - base = wpr_addr + img->ucode_off + pdesc->app_start_offset; - addr_code = base + pdesc->app_resident_code_offset; - addr_data = base + pdesc->app_resident_data_offset; - - desc->ctx_dma = FALCON_DMAIDX_UCODE; - desc->code_dma_base = u64_to_flcn64(addr_code); - desc->non_sec_code_off = pdesc->app_resident_code_offset; - desc->non_sec_code_size = pdesc->app_resident_code_size; - desc->code_entry_point = pdesc->app_imem_entry; - desc->data_dma_base = u64_to_flcn64(addr_data); - desc->data_size = pdesc->app_resident_data_size; -} - -void -acr_r361_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc, - u64 offset) -{ - struct acr_r361_flcn_bl_desc *bl_desc = _bl_desc; - - bl_desc->ctx_dma = FALCON_DMAIDX_VIRT; - bl_desc->code_dma_base = u64_to_flcn64(offset); - bl_desc->non_sec_code_off = hdr->non_sec_code_off; - bl_desc->non_sec_code_size = hdr->non_sec_code_size; - bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0); - bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0); - bl_desc->code_entry_point = 0; - bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base); - bl_desc->data_size = hdr->data_size; -} - -static const struct acr_r352_lsf_func -acr_r361_ls_fecs_func_0 = { - .generate_bl_desc = acr_r361_generate_flcn_bl_desc, - .bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc), -}; - -const struct acr_r352_ls_func -acr_r361_ls_fecs_func = { - .load = acr_ls_ucode_load_fecs, - .version_max = 0, - .version = { - &acr_r361_ls_fecs_func_0, - } -}; - -static const struct acr_r352_lsf_func -acr_r361_ls_gpccs_func_0 = { - .generate_bl_desc = acr_r361_generate_flcn_bl_desc, - .bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc), - /* GPCCS will be loaded using PRI */ - .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD, -}; - -const struct acr_r352_ls_func -acr_r361_ls_gpccs_func = { - .load = acr_ls_ucode_load_gpccs, - .version_max = 0, - .version = { - &acr_r361_ls_gpccs_func_0, - } -}; - -struct acr_r361_pmu_bl_desc { - u32 reserved; - u32 dma_idx; - struct flcn_u64 code_dma_base; - u32 total_code_size; - u32 code_size_to_load; - u32 code_entry_point; - struct flcn_u64 data_dma_base; - u32 data_size; - struct flcn_u64 overlay_dma_base; - u32 argc; - u32 argv; -}; - -static void -acr_r361_generate_pmu_bl_desc(const struct nvkm_acr *acr, - const struct ls_ucode_img *img, u64 wpr_addr, - void *_desc) -{ - const struct ls_ucode_img_desc *pdesc = &img->ucode_desc; - const struct nvkm_pmu *pmu = acr->subdev->device->pmu; - struct acr_r361_pmu_bl_desc *desc = _desc; - u64 base, addr_code, addr_data; - u32 addr_args; - - base = wpr_addr + img->ucode_off + pdesc->app_start_offset; - addr_code = base + pdesc->app_resident_code_offset; - addr_data = base + pdesc->app_resident_data_offset; - addr_args = pmu->falcon->data.limit; - addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE; - - desc->dma_idx = FALCON_DMAIDX_UCODE; - desc->code_dma_base = u64_to_flcn64(addr_code); - desc->total_code_size = pdesc->app_size; - desc->code_size_to_load = pdesc->app_resident_code_size; - desc->code_entry_point = pdesc->app_imem_entry; - desc->data_dma_base = u64_to_flcn64(addr_data); - desc->data_size = pdesc->app_resident_data_size; - desc->overlay_dma_base = u64_to_flcn64(addr_code); - desc->argc = 1; - desc->argv = addr_args; -} - -static const struct acr_r352_lsf_func -acr_r361_ls_pmu_func_0 = { - .generate_bl_desc = acr_r361_generate_pmu_bl_desc, - .bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc), -}; - -const struct acr_r352_ls_func -acr_r361_ls_pmu_func = { - .load = acr_ls_ucode_load_pmu, - .post_run = acr_ls_pmu_post_run, - .version_max = 0, - .version = { - &acr_r361_ls_pmu_func_0, - } -}; - -static void -acr_r361_generate_sec2_bl_desc(const struct nvkm_acr *acr, - const struct ls_ucode_img *img, u64 wpr_addr, - void *_desc) -{ - const struct ls_ucode_img_desc *pdesc = &img->ucode_desc; - const struct nvkm_sec2 *sec = acr->subdev->device->sec2; - struct acr_r361_pmu_bl_desc *desc = _desc; - u64 base, addr_code, addr_data; - u32 addr_args; - - base = wpr_addr + img->ucode_off + pdesc->app_start_offset; - /* For some reason we should not add app_resident_code_offset here */ - addr_code = base; - addr_data = base + pdesc->app_resident_data_offset; - addr_args = sec->falcon->data.limit; - addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE; - - desc->dma_idx = FALCON_SEC2_DMAIDX_UCODE; - desc->code_dma_base = u64_to_flcn64(addr_code); - desc->total_code_size = pdesc->app_size; - desc->code_size_to_load = pdesc->app_resident_code_size; - desc->code_entry_point = pdesc->app_imem_entry; - desc->data_dma_base = u64_to_flcn64(addr_data); - desc->data_size = pdesc->app_resident_data_size; - desc->overlay_dma_base = u64_to_flcn64(addr_code); - desc->argc = 1; - /* args are stored at the beginning of EMEM */ - desc->argv = 0x01000000; -} - -const struct acr_r352_lsf_func -acr_r361_ls_sec2_func_0 = { - .generate_bl_desc = acr_r361_generate_sec2_bl_desc, - .bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc), -}; - -static const struct acr_r352_ls_func -acr_r361_ls_sec2_func = { - .load = acr_ls_ucode_load_sec2, - .post_run = acr_ls_sec2_post_run, - .version_max = 0, - .version = { - &acr_r361_ls_sec2_func_0, - } -}; - - -const struct acr_r352_func -acr_r361_func = { - .fixup_hs_desc = acr_r352_fixup_hs_desc, - .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc, - .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc), - .ls_ucode_img_load = acr_r352_ls_ucode_img_load, - .ls_fill_headers = acr_r352_ls_fill_headers, - .ls_write_wpr = acr_r352_ls_write_wpr, - .ls_func = { - [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func, - [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func, - [NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func, - [NVKM_SECBOOT_FALCON_SEC2] = &acr_r361_ls_sec2_func, - }, -}; - -struct nvkm_acr * -acr_r361_new(unsigned long managed_falcons) -{ - return acr_r352_new_(&acr_r361_func, NVKM_SECBOOT_FALCON_PMU, - managed_falcons); -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h deleted file mode 100644 index 38dec93779c886ac5e5103bfdae46d37796f8aad..0000000000000000000000000000000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef __NVKM_SECBOOT_ACR_R361_H__ -#define __NVKM_SECBOOT_ACR_R361_H__ - -#include "acr_r352.h" - -/** - * struct acr_r361_flcn_bl_desc - DMEM bootloader descriptor - * @signature: 16B signature for secure code. 0s if no secure code - * @ctx_dma: DMA context to be used by BL while loading code/data - * @code_dma_base: 256B-aligned Physical FB Address where code is located - * (falcon's $xcbase register) - * @non_sec_code_off: offset from code_dma_base where the non-secure code is - * located. The offset must be multiple of 256 to help perf - * @non_sec_code_size: the size of the nonSecure code part. - * @sec_code_off: offset from code_dma_base where the secure code is - * located. The offset must be multiple of 256 to help perf - * @sec_code_size: offset from code_dma_base where the secure code is - * located. The offset must be multiple of 256 to help perf - * @code_entry_point: code entry point which will be invoked by BL after - * code is loaded. - * @data_dma_base: 256B aligned Physical FB Address where data is located. - * (falcon's $xdbase register) - * @data_size: size of data block. Should be multiple of 256B - * - * Structure used by the bootloader to load the rest of the code. This has - * to be filled by host and copied into DMEM at offset provided in the - * hsflcn_bl_desc.bl_desc_dmem_load_off. - */ -struct acr_r361_flcn_bl_desc { - u32 reserved[4]; - u32 signature[4]; - u32 ctx_dma; - struct flcn_u64 code_dma_base; - u32 non_sec_code_off; - u32 non_sec_code_size; - u32 sec_code_off; - u32 sec_code_size; - u32 code_entry_point; - struct flcn_u64 data_dma_base; - u32 data_size; -}; - -void acr_r361_generate_hs_bl_desc(const struct hsf_load_header *, void *, u64); - -extern const struct acr_r352_ls_func acr_r361_ls_fecs_func; -extern const struct acr_r352_ls_func acr_r361_ls_gpccs_func; -extern const struct acr_r352_ls_func acr_r361_ls_pmu_func; -extern const struct acr_r352_lsf_func acr_r361_ls_sec2_func_0; -#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r364.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r364.c deleted file mode 100644 index 30cf041099912a99535fb958b199bdb534dc38f8..0000000000000000000000000000000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r364.c +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "acr_r361.h" - -#include - -/* - * r364 ACR: hsflcn_desc structure has changed to introduce the shadow_mem - * parameter. - */ - -struct acr_r364_hsflcn_desc { - union { - u8 reserved_dmem[0x200]; - u32 signatures[4]; - } ucode_reserved_space; - u32 wpr_region_id; - u32 wpr_offset; - u32 mmu_memory_range; - struct { - u32 no_regions; - struct { - u32 start_addr; - u32 end_addr; - u32 region_id; - u32 read_mask; - u32 write_mask; - u32 client_mask; - u32 shadow_mem_start_addr; - } region_props[2]; - } regions; - u32 ucode_blob_size; - u64 ucode_blob_base __aligned(8); - struct { - u32 vpr_enabled; - u32 vpr_start; - u32 vpr_end; - u32 hdcp_policies; - } vpr_desc; -}; - -static void -acr_r364_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb, - void *_desc) -{ - struct acr_r364_hsflcn_desc *desc = _desc; - struct nvkm_gpuobj *ls_blob = acr->ls_blob; - - /* WPR region information if WPR is not fixed */ - if (sb->wpr_size == 0) { - u64 wpr_start = ls_blob->addr; - u64 wpr_end = ls_blob->addr + ls_blob->size; - - if (acr->func->shadow_blob) - wpr_start += ls_blob->size / 2; - - desc->wpr_region_id = 1; - desc->regions.no_regions = 2; - desc->regions.region_props[0].start_addr = wpr_start >> 8; - desc->regions.region_props[0].end_addr = wpr_end >> 8; - desc->regions.region_props[0].region_id = 1; - desc->regions.region_props[0].read_mask = 0xf; - desc->regions.region_props[0].write_mask = 0xc; - desc->regions.region_props[0].client_mask = 0x2; - if (acr->func->shadow_blob) - desc->regions.region_props[0].shadow_mem_start_addr = - ls_blob->addr >> 8; - else - desc->regions.region_props[0].shadow_mem_start_addr = 0; - } else { - desc->ucode_blob_base = ls_blob->addr; - desc->ucode_blob_size = ls_blob->size; - } -} - -const struct acr_r352_func -acr_r364_func = { - .fixup_hs_desc = acr_r364_fixup_hs_desc, - .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc, - .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc), - .ls_ucode_img_load = acr_r352_ls_ucode_img_load, - .ls_fill_headers = acr_r352_ls_fill_headers, - .ls_write_wpr = acr_r352_ls_write_wpr, - .ls_func = { - [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func, - [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func, - [NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func, - }, -}; - - -struct nvkm_acr * -acr_r364_new(unsigned long managed_falcons) -{ - return acr_r352_new_(&acr_r364_func, NVKM_SECBOOT_FALCON_PMU, - managed_falcons); -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c deleted file mode 100644 index 472ced29da7e5f01053d9e61b16a73bc9ebc1d84..0000000000000000000000000000000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c +++ /dev/null @@ -1,418 +0,0 @@ -/* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "acr_r367.h" -#include "acr_r361.h" -#include "acr_r370.h" - -#include - -/* - * r367 ACR: new LS signature format requires a rewrite of LS firmware and - * blob creation functions. Also the hsflcn_desc layout has changed slightly. - */ - -#define LSF_LSB_DEPMAP_SIZE 11 - -/** - * struct acr_r367_lsf_lsb_header - LS firmware header - * - * See also struct acr_r352_lsf_lsb_header for documentation. - */ -struct acr_r367_lsf_lsb_header { - /** - * LS falcon signatures - * @prd_keys: signature to use in production mode - * @dgb_keys: signature to use in debug mode - * @b_prd_present: whether the production key is present - * @b_dgb_present: whether the debug key is present - * @falcon_id: ID of the falcon the ucode applies to - */ - struct { - u8 prd_keys[2][16]; - u8 dbg_keys[2][16]; - u32 b_prd_present; - u32 b_dbg_present; - u32 falcon_id; - u32 supports_versioning; - u32 version; - u32 depmap_count; - u8 depmap[LSF_LSB_DEPMAP_SIZE * 2 * 4]; - u8 kdf[16]; - } signature; - u32 ucode_off; - u32 ucode_size; - u32 data_size; - u32 bl_code_size; - u32 bl_imem_off; - u32 bl_data_off; - u32 bl_data_size; - u32 app_code_off; - u32 app_code_size; - u32 app_data_off; - u32 app_data_size; - u32 flags; -}; - -/** - * struct acr_r367_lsf_wpr_header - LS blob WPR Header - * - * See also struct acr_r352_lsf_wpr_header for documentation. - */ -struct acr_r367_lsf_wpr_header { - u32 falcon_id; - u32 lsb_offset; - u32 bootstrap_owner; - u32 lazy_bootstrap; - u32 bin_version; - u32 status; -#define LSF_IMAGE_STATUS_NONE 0 -#define LSF_IMAGE_STATUS_COPY 1 -#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED 2 -#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED 3 -#define LSF_IMAGE_STATUS_VALIDATION_DONE 4 -#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED 5 -#define LSF_IMAGE_STATUS_BOOTSTRAP_READY 6 -#define LSF_IMAGE_STATUS_REVOCATION_CHECK_FAILED 7 -}; - -/** - * struct ls_ucode_img_r367 - ucode image augmented with r367 headers - */ -struct ls_ucode_img_r367 { - struct ls_ucode_img base; - - const struct acr_r352_lsf_func *func; - - struct acr_r367_lsf_wpr_header wpr_header; - struct acr_r367_lsf_lsb_header lsb_header; -}; -#define ls_ucode_img_r367(i) container_of(i, struct ls_ucode_img_r367, base) - -struct ls_ucode_img * -acr_r367_ls_ucode_img_load(const struct acr_r352 *acr, - const struct nvkm_secboot *sb, - enum nvkm_secboot_falcon falcon_id) -{ - const struct nvkm_subdev *subdev = acr->base.subdev; - const struct acr_r352_ls_func *func = acr->func->ls_func[falcon_id]; - struct ls_ucode_img_r367 *img; - int ret; - - img = kzalloc(sizeof(*img), GFP_KERNEL); - if (!img) - return ERR_PTR(-ENOMEM); - - img->base.falcon_id = falcon_id; - - ret = func->load(sb, func->version_max, &img->base); - if (ret < 0) { - kfree(img->base.ucode_data); - kfree(img->base.sig); - kfree(img); - return ERR_PTR(ret); - } - - img->func = func->version[ret]; - - /* Check that the signature size matches our expectations... */ - if (img->base.sig_size != sizeof(img->lsb_header.signature)) { - nvkm_error(subdev, "invalid signature size for %s falcon!\n", - nvkm_secboot_falcon_name[falcon_id]); - return ERR_PTR(-EINVAL); - } - - /* Copy signature to the right place */ - memcpy(&img->lsb_header.signature, img->base.sig, img->base.sig_size); - - /* not needed? the signature should already have the right value */ - img->lsb_header.signature.falcon_id = falcon_id; - - return &img->base; -} - -#define LSF_LSB_HEADER_ALIGN 256 -#define LSF_BL_DATA_ALIGN 256 -#define LSF_BL_DATA_SIZE_ALIGN 256 -#define LSF_BL_CODE_SIZE_ALIGN 256 -#define LSF_UCODE_DATA_ALIGN 4096 - -static u32 -acr_r367_ls_img_fill_headers(struct acr_r352 *acr, - struct ls_ucode_img_r367 *img, u32 offset) -{ - struct ls_ucode_img *_img = &img->base; - struct acr_r367_lsf_wpr_header *whdr = &img->wpr_header; - struct acr_r367_lsf_lsb_header *lhdr = &img->lsb_header; - struct ls_ucode_img_desc *desc = &_img->ucode_desc; - const struct acr_r352_lsf_func *func = img->func; - - /* Fill WPR header */ - whdr->falcon_id = _img->falcon_id; - whdr->bootstrap_owner = acr->base.boot_falcon; - whdr->bin_version = lhdr->signature.version; - whdr->status = LSF_IMAGE_STATUS_COPY; - - /* Skip bootstrapping falcons started by someone else than ACR */ - if (acr->lazy_bootstrap & BIT(_img->falcon_id)) - whdr->lazy_bootstrap = 1; - - /* Align, save off, and include an LSB header size */ - offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN); - whdr->lsb_offset = offset; - offset += sizeof(*lhdr); - - /* - * Align, save off, and include the original (static) ucode - * image size - */ - offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN); - _img->ucode_off = lhdr->ucode_off = offset; - offset += _img->ucode_size; - - /* - * For falcons that use a boot loader (BL), we append a loader - * desc structure on the end of the ucode image and consider - * this the boot loader data. The host will then copy the loader - * desc args to this space within the WPR region (before locking - * down) and the HS bin will then copy them to DMEM 0 for the - * loader. - */ - lhdr->bl_code_size = ALIGN(desc->bootloader_size, - LSF_BL_CODE_SIZE_ALIGN); - lhdr->ucode_size = ALIGN(desc->app_resident_data_offset, - LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size; - lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) + - lhdr->bl_code_size - lhdr->ucode_size; - /* - * Though the BL is located at 0th offset of the image, the VA - * is different to make sure that it doesn't collide the actual - * OS VA range - */ - lhdr->bl_imem_off = desc->bootloader_imem_offset; - lhdr->app_code_off = desc->app_start_offset + - desc->app_resident_code_offset; - lhdr->app_code_size = desc->app_resident_code_size; - lhdr->app_data_off = desc->app_start_offset + - desc->app_resident_data_offset; - lhdr->app_data_size = desc->app_resident_data_size; - - lhdr->flags = func->lhdr_flags; - if (_img->falcon_id == acr->base.boot_falcon) - lhdr->flags |= LSF_FLAG_DMACTL_REQ_CTX; - - /* Align and save off BL descriptor size */ - lhdr->bl_data_size = ALIGN(func->bl_desc_size, LSF_BL_DATA_SIZE_ALIGN); - - /* - * Align, save off, and include the additional BL data - */ - offset = ALIGN(offset, LSF_BL_DATA_ALIGN); - lhdr->bl_data_off = offset; - offset += lhdr->bl_data_size; - - return offset; -} - -int -acr_r367_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs) -{ - struct ls_ucode_img_r367 *img; - struct list_head *l; - u32 count = 0; - u32 offset; - - /* Count the number of images to manage */ - list_for_each(l, imgs) - count++; - - /* - * Start with an array of WPR headers at the base of the WPR. - * The expectation here is that the secure falcon will do a single DMA - * read of this array and cache it internally so it's ok to pack these. - * Also, we add 1 to the falcon count to indicate the end of the array. - */ - offset = sizeof(img->wpr_header) * (count + 1); - - /* - * Walk the managed falcons, accounting for the LSB structs - * as well as the ucode images. - */ - list_for_each_entry(img, imgs, base.node) { - offset = acr_r367_ls_img_fill_headers(acr, img, offset); - } - - return offset; -} - -int -acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs, - struct nvkm_gpuobj *wpr_blob, u64 wpr_addr) -{ - struct ls_ucode_img *_img; - u32 pos = 0; - u32 max_desc_size = 0; - u8 *gdesc; - - list_for_each_entry(_img, imgs, node) { - struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img); - const struct acr_r352_lsf_func *ls_func = img->func; - - max_desc_size = max(max_desc_size, ls_func->bl_desc_size); - } - - gdesc = kmalloc(max_desc_size, GFP_KERNEL); - if (!gdesc) - return -ENOMEM; - - nvkm_kmap(wpr_blob); - - list_for_each_entry(_img, imgs, node) { - struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img); - const struct acr_r352_lsf_func *ls_func = img->func; - - nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header, - sizeof(img->wpr_header)); - - nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset, - &img->lsb_header, sizeof(img->lsb_header)); - - /* Generate and write BL descriptor */ - memset(gdesc, 0, ls_func->bl_desc_size); - ls_func->generate_bl_desc(&acr->base, _img, wpr_addr, gdesc); - - nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.bl_data_off, - gdesc, ls_func->bl_desc_size); - - /* Copy ucode */ - nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off, - _img->ucode_data, _img->ucode_size); - - pos += sizeof(img->wpr_header); - } - - nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID); - - nvkm_done(wpr_blob); - - kfree(gdesc); - - return 0; -} - -struct acr_r367_hsflcn_desc { - u8 reserved_dmem[0x200]; - u32 signatures[4]; - u32 wpr_region_id; - u32 wpr_offset; - u32 mmu_memory_range; -#define FLCN_ACR_MAX_REGIONS 2 - struct { - u32 no_regions; - struct { - u32 start_addr; - u32 end_addr; - u32 region_id; - u32 read_mask; - u32 write_mask; - u32 client_mask; - u32 shadow_mem_start_addr; - } region_props[FLCN_ACR_MAX_REGIONS]; - } regions; - u32 ucode_blob_size; - u64 ucode_blob_base __aligned(8); - struct { - u32 vpr_enabled; - u32 vpr_start; - u32 vpr_end; - u32 hdcp_policies; - } vpr_desc; -}; - -void -acr_r367_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb, - void *_desc) -{ - struct acr_r367_hsflcn_desc *desc = _desc; - struct nvkm_gpuobj *ls_blob = acr->ls_blob; - - /* WPR region information if WPR is not fixed */ - if (sb->wpr_size == 0) { - u64 wpr_start = ls_blob->addr; - u64 wpr_end = ls_blob->addr + ls_blob->size; - - if (acr->func->shadow_blob) - wpr_start += ls_blob->size / 2; - - desc->wpr_region_id = 1; - desc->regions.no_regions = 2; - desc->regions.region_props[0].start_addr = wpr_start >> 8; - desc->regions.region_props[0].end_addr = wpr_end >> 8; - desc->regions.region_props[0].region_id = 1; - desc->regions.region_props[0].read_mask = 0xf; - desc->regions.region_props[0].write_mask = 0xc; - desc->regions.region_props[0].client_mask = 0x2; - if (acr->func->shadow_blob) - desc->regions.region_props[0].shadow_mem_start_addr = - ls_blob->addr >> 8; - else - desc->regions.region_props[0].shadow_mem_start_addr = 0; - } else { - desc->ucode_blob_base = ls_blob->addr; - desc->ucode_blob_size = ls_blob->size; - } -} - -static const struct acr_r352_ls_func -acr_r367_ls_sec2_func = { - .load = acr_ls_ucode_load_sec2, - .post_run = acr_ls_sec2_post_run, - .version_max = 1, - .version = { - &acr_r361_ls_sec2_func_0, - &acr_r370_ls_sec2_func_0, - } -}; - -const struct acr_r352_func -acr_r367_func = { - .fixup_hs_desc = acr_r367_fixup_hs_desc, - .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc, - .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc), - .shadow_blob = true, - .ls_ucode_img_load = acr_r367_ls_ucode_img_load, - .ls_fill_headers = acr_r367_ls_fill_headers, - .ls_write_wpr = acr_r367_ls_write_wpr, - .ls_func = { - [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func, - [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func, - [NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func, - [NVKM_SECBOOT_FALCON_SEC2] = &acr_r367_ls_sec2_func, - }, -}; - -struct nvkm_acr * -acr_r367_new(enum nvkm_secboot_falcon boot_falcon, - unsigned long managed_falcons) -{ - return acr_r352_new_(&acr_r367_func, boot_falcon, managed_falcons); -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c deleted file mode 100644 index e821d0fd62171801047334b3344194744f3ac586..0000000000000000000000000000000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "acr_r370.h" -#include "acr_r367.h" - -#include -#include -#include - -static void -acr_r370_generate_flcn_bl_desc(const struct nvkm_acr *acr, - const struct ls_ucode_img *img, u64 wpr_addr, - void *_desc) -{ - struct acr_r370_flcn_bl_desc *desc = _desc; - const struct ls_ucode_img_desc *pdesc = &img->ucode_desc; - u64 base, addr_code, addr_data; - - base = wpr_addr + img->ucode_off + pdesc->app_start_offset; - addr_code = base + pdesc->app_resident_code_offset; - addr_data = base + pdesc->app_resident_data_offset; - - desc->ctx_dma = FALCON_DMAIDX_UCODE; - desc->code_dma_base = u64_to_flcn64(addr_code); - desc->non_sec_code_off = pdesc->app_resident_code_offset; - desc->non_sec_code_size = pdesc->app_resident_code_size; - desc->code_entry_point = pdesc->app_imem_entry; - desc->data_dma_base = u64_to_flcn64(addr_data); - desc->data_size = pdesc->app_resident_data_size; -} - -static const struct acr_r352_lsf_func -acr_r370_ls_fecs_func_0 = { - .generate_bl_desc = acr_r370_generate_flcn_bl_desc, - .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc), -}; - -const struct acr_r352_ls_func -acr_r370_ls_fecs_func = { - .load = acr_ls_ucode_load_fecs, - .version_max = 0, - .version = { - &acr_r370_ls_fecs_func_0, - } -}; - -static const struct acr_r352_lsf_func -acr_r370_ls_gpccs_func_0 = { - .generate_bl_desc = acr_r370_generate_flcn_bl_desc, - .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc), - /* GPCCS will be loaded using PRI */ - .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD, -}; - -const struct acr_r352_ls_func -acr_r370_ls_gpccs_func = { - .load = acr_ls_ucode_load_gpccs, - .version_max = 0, - .version = { - &acr_r370_ls_gpccs_func_0, - } -}; - -static void -acr_r370_generate_sec2_bl_desc(const struct nvkm_acr *acr, - const struct ls_ucode_img *img, u64 wpr_addr, - void *_desc) -{ - const struct ls_ucode_img_desc *pdesc = &img->ucode_desc; - const struct nvkm_sec2 *sec = acr->subdev->device->sec2; - struct acr_r370_flcn_bl_desc *desc = _desc; - u64 base, addr_code, addr_data; - u32 addr_args; - - base = wpr_addr + img->ucode_off + pdesc->app_start_offset; - /* For some reason we should not add app_resident_code_offset here */ - addr_code = base; - addr_data = base + pdesc->app_resident_data_offset; - addr_args = sec->falcon->data.limit; - addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE; - - desc->ctx_dma = FALCON_SEC2_DMAIDX_UCODE; - desc->code_dma_base = u64_to_flcn64(addr_code); - desc->non_sec_code_off = pdesc->app_resident_code_offset; - desc->non_sec_code_size = pdesc->app_resident_code_size; - desc->code_entry_point = pdesc->app_imem_entry; - desc->data_dma_base = u64_to_flcn64(addr_data); - desc->data_size = pdesc->app_resident_data_size; - desc->argc = 1; - /* args are stored at the beginning of EMEM */ - desc->argv = 0x01000000; -} - -const struct acr_r352_lsf_func -acr_r370_ls_sec2_func_0 = { - .generate_bl_desc = acr_r370_generate_sec2_bl_desc, - .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc), -}; - -const struct acr_r352_ls_func -acr_r370_ls_sec2_func = { - .load = acr_ls_ucode_load_sec2, - .post_run = acr_ls_sec2_post_run, - .version_max = 0, - .version = { - &acr_r370_ls_sec2_func_0, - } -}; - -void -acr_r370_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc, - u64 offset) -{ - struct acr_r370_flcn_bl_desc *bl_desc = _bl_desc; - - bl_desc->ctx_dma = FALCON_DMAIDX_VIRT; - bl_desc->non_sec_code_off = hdr->non_sec_code_off; - bl_desc->non_sec_code_size = hdr->non_sec_code_size; - bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0); - bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0); - bl_desc->code_entry_point = 0; - bl_desc->code_dma_base = u64_to_flcn64(offset); - bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base); - bl_desc->data_size = hdr->data_size; -} - -const struct acr_r352_func -acr_r370_func = { - .fixup_hs_desc = acr_r367_fixup_hs_desc, - .generate_hs_bl_desc = acr_r370_generate_hs_bl_desc, - .hs_bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc), - .shadow_blob = true, - .ls_ucode_img_load = acr_r367_ls_ucode_img_load, - .ls_fill_headers = acr_r367_ls_fill_headers, - .ls_write_wpr = acr_r367_ls_write_wpr, - .ls_func = { - [NVKM_SECBOOT_FALCON_SEC2] = &acr_r370_ls_sec2_func, - [NVKM_SECBOOT_FALCON_FECS] = &acr_r370_ls_fecs_func, - [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r370_ls_gpccs_func, - }, -}; - -struct nvkm_acr * -acr_r370_new(enum nvkm_secboot_falcon boot_falcon, - unsigned long managed_falcons) -{ - return acr_r352_new_(&acr_r370_func, boot_falcon, managed_falcons); -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h deleted file mode 100644 index 2efed6f995ad03fd64c895286f73985e52bda0a1..0000000000000000000000000000000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef __NVKM_SECBOOT_ACR_R370_H__ -#define __NVKM_SECBOOT_ACR_R370_H__ - -#include "priv.h" -struct hsf_load_header; - -/* Same as acr_r361_flcn_bl_desc, plus argc/argv */ -struct acr_r370_flcn_bl_desc { - u32 reserved[4]; - u32 signature[4]; - u32 ctx_dma; - struct flcn_u64 code_dma_base; - u32 non_sec_code_off; - u32 non_sec_code_size; - u32 sec_code_off; - u32 sec_code_size; - u32 code_entry_point; - struct flcn_u64 data_dma_base; - u32 data_size; - u32 argc; - u32 argv; -}; - -void acr_r370_generate_hs_bl_desc(const struct hsf_load_header *, void *, u64); -extern const struct acr_r352_ls_func acr_r370_ls_fecs_func; -extern const struct acr_r352_ls_func acr_r370_ls_gpccs_func; -extern const struct acr_r352_lsf_func acr_r370_ls_sec2_func_0; -#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c deleted file mode 100644 index 8f0647766038604635d90def01909cc5be75e378..0000000000000000000000000000000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "acr_r370.h" -#include "acr_r367.h" - -#include -#include - -static void -acr_r375_generate_pmu_bl_desc(const struct nvkm_acr *acr, - const struct ls_ucode_img *img, u64 wpr_addr, - void *_desc) -{ - const struct ls_ucode_img_desc *pdesc = &img->ucode_desc; - const struct nvkm_pmu *pmu = acr->subdev->device->pmu; - struct acr_r370_flcn_bl_desc *desc = _desc; - u64 base, addr_code, addr_data; - u32 addr_args; - - base = wpr_addr + img->ucode_off + pdesc->app_start_offset; - addr_code = base + pdesc->app_resident_code_offset; - addr_data = base + pdesc->app_resident_data_offset; - addr_args = pmu->falcon->data.limit; - addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE; - - desc->ctx_dma = FALCON_DMAIDX_UCODE; - desc->code_dma_base = u64_to_flcn64(addr_code); - desc->non_sec_code_off = pdesc->app_resident_code_offset; - desc->non_sec_code_size = pdesc->app_resident_code_size; - desc->code_entry_point = pdesc->app_imem_entry; - desc->data_dma_base = u64_to_flcn64(addr_data); - desc->data_size = pdesc->app_resident_data_size; - desc->argc = 1; - desc->argv = addr_args; -} - -static const struct acr_r352_lsf_func -acr_r375_ls_pmu_func_0 = { - .generate_bl_desc = acr_r375_generate_pmu_bl_desc, - .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc), -}; - -const struct acr_r352_ls_func -acr_r375_ls_pmu_func = { - .load = acr_ls_ucode_load_pmu, - .post_run = acr_ls_pmu_post_run, - .version_max = 0, - .version = { - &acr_r375_ls_pmu_func_0, - } -}; - -const struct acr_r352_func -acr_r375_func = { - .fixup_hs_desc = acr_r367_fixup_hs_desc, - .generate_hs_bl_desc = acr_r370_generate_hs_bl_desc, - .hs_bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc), - .shadow_blob = true, - .ls_ucode_img_load = acr_r367_ls_ucode_img_load, - .ls_fill_headers = acr_r367_ls_fill_headers, - .ls_write_wpr = acr_r367_ls_write_wpr, - .ls_func = { - [NVKM_SECBOOT_FALCON_FECS] = &acr_r370_ls_fecs_func, - [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r370_ls_gpccs_func, - [NVKM_SECBOOT_FALCON_PMU] = &acr_r375_ls_pmu_func, - }, -}; - -struct nvkm_acr * -acr_r375_new(enum nvkm_secboot_falcon boot_falcon, - unsigned long managed_falcons) -{ - return acr_r352_new_(&acr_r375_func, boot_falcon, managed_falcons); -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c deleted file mode 100644 index ee29c6c11afd524a10b8d3dc9eeb4fcca37e7d41..0000000000000000000000000000000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c +++ /dev/null @@ -1,213 +0,0 @@ -/* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -/* - * Secure boot is the process by which NVIDIA-signed firmware is loaded into - * some of the falcons of a GPU. For production devices this is the only way - * for the firmware to access useful (but sensitive) registers. - * - * A Falcon microprocessor supporting advanced security modes can run in one of - * three modes: - * - * - Non-secure (NS). In this mode, functionality is similar to Falcon - * architectures before security modes were introduced (pre-Maxwell), but - * capability is restricted. In particular, certain registers may be - * inaccessible for reads and/or writes, and physical memory access may be - * disabled (on certain Falcon instances). This is the only possible mode that - * can be used if you don't have microcode cryptographically signed by NVIDIA. - * - * - Heavy Secure (HS). In this mode, the microprocessor is a black box - it's - * not possible to read or write any Falcon internal state or Falcon registers - * from outside the Falcon (for example, from the host system). The only way - * to enable this mode is by loading microcode that has been signed by NVIDIA. - * (The loading process involves tagging the IMEM block as secure, writing the - * signature into a Falcon register, and starting execution. The hardware will - * validate the signature, and if valid, grant HS privileges.) - * - * - Light Secure (LS). In this mode, the microprocessor has more privileges - * than NS but fewer than HS. Some of the microprocessor state is visible to - * host software to ease debugging. The only way to enable this mode is by HS - * microcode enabling LS mode. Some privileges available to HS mode are not - * available here. LS mode is introduced in GM20x. - * - * Secure boot consists in temporarily switching a HS-capable falcon (typically - * PMU) into HS mode in order to validate the LS firmwares of managed falcons, - * load them, and switch managed falcons into LS mode. Once secure boot - * completes, no falcon remains in HS mode. - * - * Secure boot requires a write-protected memory region (WPR) which can only be - * written by the secure falcon. On dGPU, the driver sets up the WPR region in - * video memory. On Tegra, it is set up by the bootloader and its location and - * size written into memory controller registers. - * - * The secure boot process takes place as follows: - * - * 1) A LS blob is constructed that contains all the LS firmwares we want to - * load, along with their signatures and bootloaders. - * - * 2) A HS blob (also called ACR) is created that contains the signed HS - * firmware in charge of loading the LS firmwares into their respective - * falcons. - * - * 3) The HS blob is loaded (via its own bootloader) and executed on the - * HS-capable falcon. It authenticates itself, switches the secure falcon to - * HS mode and setup the WPR region around the LS blob (dGPU) or copies the - * LS blob into the WPR region (Tegra). - * - * 4) The LS blob is now secure from all external tampering. The HS falcon - * checks the signatures of the LS firmwares and, if valid, switches the - * managed falcons to LS mode and makes them ready to run the LS firmware. - * - * 5) The managed falcons remain in LS mode and can be started. - * - */ - -#include "priv.h" -#include "acr.h" - -#include -#include -#include -#include - -const char * -nvkm_secboot_falcon_name[] = { - [NVKM_SECBOOT_FALCON_PMU] = "PMU", - [NVKM_SECBOOT_FALCON_RESERVED] = "", - [NVKM_SECBOOT_FALCON_FECS] = "FECS", - [NVKM_SECBOOT_FALCON_GPCCS] = "GPCCS", - [NVKM_SECBOOT_FALCON_SEC2] = "SEC2", - [NVKM_SECBOOT_FALCON_END] = "", -}; -/** - * nvkm_secboot_reset() - reset specified falcon - */ -int -nvkm_secboot_reset(struct nvkm_secboot *sb, unsigned long falcon_mask) -{ - /* Unmanaged falcon? */ - if ((falcon_mask | sb->acr->managed_falcons) != sb->acr->managed_falcons) { - nvkm_error(&sb->subdev, "cannot reset unmanaged falcon!\n"); - return -EINVAL; - } - - return sb->acr->func->reset(sb->acr, sb, falcon_mask); -} - -/** - * nvkm_secboot_is_managed() - check whether a given falcon is securely-managed - */ -bool -nvkm_secboot_is_managed(struct nvkm_secboot *sb, enum nvkm_secboot_falcon fid) -{ - if (!sb) - return false; - - return sb->acr->managed_falcons & BIT(fid); -} - -static int -nvkm_secboot_oneinit(struct nvkm_subdev *subdev) -{ - struct nvkm_secboot *sb = nvkm_secboot(subdev); - int ret = 0; - - switch (sb->acr->boot_falcon) { - case NVKM_SECBOOT_FALCON_PMU: - sb->halt_falcon = sb->boot_falcon = subdev->device->pmu->falcon; - break; - case NVKM_SECBOOT_FALCON_SEC2: - /* we must keep SEC2 alive forever since ACR will run on it */ - nvkm_engine_ref(&subdev->device->sec2->engine); - sb->boot_falcon = subdev->device->sec2->falcon; - sb->halt_falcon = subdev->device->pmu->falcon; - break; - default: - nvkm_error(subdev, "Unmanaged boot falcon %s!\n", - nvkm_secboot_falcon_name[sb->acr->boot_falcon]); - return -EINVAL; - } - nvkm_debug(subdev, "using %s falcon for ACR\n", sb->boot_falcon->name); - - /* Call chip-specific init function */ - if (sb->func->oneinit) - ret = sb->func->oneinit(sb); - if (ret) { - nvkm_error(subdev, "Secure Boot initialization failed: %d\n", - ret); - return ret; - } - - return 0; -} - -static int -nvkm_secboot_fini(struct nvkm_subdev *subdev, bool suspend) -{ - struct nvkm_secboot *sb = nvkm_secboot(subdev); - int ret = 0; - - if (sb->func->fini) - ret = sb->func->fini(sb, suspend); - - return ret; -} - -static void * -nvkm_secboot_dtor(struct nvkm_subdev *subdev) -{ - struct nvkm_secboot *sb = nvkm_secboot(subdev); - void *ret = NULL; - - if (sb->func->dtor) - ret = sb->func->dtor(sb); - - return ret; -} - -static const struct nvkm_subdev_func -nvkm_secboot = { - .oneinit = nvkm_secboot_oneinit, - .fini = nvkm_secboot_fini, - .dtor = nvkm_secboot_dtor, -}; - -int -nvkm_secboot_ctor(const struct nvkm_secboot_func *func, struct nvkm_acr *acr, - struct nvkm_device *device, int index, - struct nvkm_secboot *sb) -{ - unsigned long fid; - - nvkm_subdev_ctor(&nvkm_secboot, device, index, &sb->subdev); - sb->func = func; - sb->acr = acr; - acr->subdev = &sb->subdev; - - nvkm_debug(&sb->subdev, "securely managed falcons:\n"); - for_each_set_bit(fid, &sb->acr->managed_falcons, - NVKM_SECBOOT_FALCON_END) - nvkm_debug(&sb->subdev, "- %s\n", - nvkm_secboot_falcon_name[fid]); - - return 0; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c deleted file mode 100644 index 5e91b3f90065fe286db8dbc0c060670fe49ca3a6..0000000000000000000000000000000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c +++ /dev/null @@ -1,262 +0,0 @@ -/* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - - -#include "acr.h" -#include "gm200.h" - -#include -#include -#include -#include - -/** - * gm200_secboot_run_blob() - run the given high-secure blob - * - */ -int -gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob, - struct nvkm_falcon *falcon) -{ - struct gm200_secboot *gsb = gm200_secboot(sb); - struct nvkm_subdev *subdev = &gsb->base.subdev; - struct nvkm_vma *vma = NULL; - u32 start_address; - int ret; - - ret = nvkm_falcon_get(falcon, subdev); - if (ret) - return ret; - - /* Map the HS firmware so the HS bootloader can see it */ - ret = nvkm_vmm_get(gsb->vmm, 12, blob->size, &vma); - if (ret) { - nvkm_falcon_put(falcon, subdev); - return ret; - } - - ret = nvkm_memory_map(blob, 0, gsb->vmm, vma, NULL, 0); - if (ret) - goto end; - - /* Reset and set the falcon up */ - ret = nvkm_falcon_reset(falcon); - if (ret) - goto end; - nvkm_falcon_bind_context(falcon, gsb->inst); - - /* Load the HS bootloader into the falcon's IMEM/DMEM */ - ret = sb->acr->func->load(sb->acr, falcon, blob, vma->addr); - if (ret < 0) - goto end; - - start_address = ret; - - /* Disable interrupts as we will poll for the HALT bit */ - nvkm_mc_intr_mask(sb->subdev.device, falcon->owner->index, false); - - /* Set default error value in mailbox register */ - nvkm_falcon_wr32(falcon, 0x040, 0xdeada5a5); - - /* Start the HS bootloader */ - nvkm_falcon_set_start_addr(falcon, start_address); - nvkm_falcon_start(falcon); - ret = nvkm_falcon_wait_for_halt(falcon, 100); - if (ret) - goto end; - - /* - * The mailbox register contains the (positive) error code - return this - * to the caller - */ - ret = nvkm_falcon_rd32(falcon, 0x040); - -end: - /* Reenable interrupts */ - nvkm_mc_intr_mask(sb->subdev.device, falcon->owner->index, true); - - /* We don't need the ACR firmware anymore */ - nvkm_vmm_put(gsb->vmm, &vma); - nvkm_falcon_put(falcon, subdev); - - return ret; -} - -int -gm200_secboot_oneinit(struct nvkm_secboot *sb) -{ - struct gm200_secboot *gsb = gm200_secboot(sb); - struct nvkm_device *device = sb->subdev.device; - int ret; - - /* Allocate instance block and VM */ - ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, true, - &gsb->inst); - if (ret) - return ret; - - ret = nvkm_vmm_new(device, 0, 600 * 1024, NULL, 0, NULL, "acr", - &gsb->vmm); - if (ret) - return ret; - - atomic_inc(&gsb->vmm->engref[NVKM_SUBDEV_PMU]); - gsb->vmm->debug = gsb->base.subdev.debug; - - ret = nvkm_vmm_join(gsb->vmm, gsb->inst); - if (ret) - return ret; - - if (sb->acr->func->oneinit) { - ret = sb->acr->func->oneinit(sb->acr, sb); - if (ret) - return ret; - } - - return 0; -} - -int -gm200_secboot_fini(struct nvkm_secboot *sb, bool suspend) -{ - int ret = 0; - - if (sb->acr->func->fini) - ret = sb->acr->func->fini(sb->acr, sb, suspend); - - return ret; -} - -void * -gm200_secboot_dtor(struct nvkm_secboot *sb) -{ - struct gm200_secboot *gsb = gm200_secboot(sb); - - sb->acr->func->dtor(sb->acr); - - nvkm_vmm_part(gsb->vmm, gsb->inst); - nvkm_vmm_unref(&gsb->vmm); - nvkm_memory_unref(&gsb->inst); - - return gsb; -} - - -static const struct nvkm_secboot_func -gm200_secboot = { - .dtor = gm200_secboot_dtor, - .oneinit = gm200_secboot_oneinit, - .fini = gm200_secboot_fini, - .run_blob = gm200_secboot_run_blob, -}; - -int -gm200_secboot_new(struct nvkm_device *device, int index, - struct nvkm_secboot **psb) -{ - int ret; - struct gm200_secboot *gsb; - struct nvkm_acr *acr; - - acr = acr_r361_new(BIT(NVKM_SECBOOT_FALCON_FECS) | - BIT(NVKM_SECBOOT_FALCON_GPCCS)); - if (IS_ERR(acr)) - return PTR_ERR(acr); - - gsb = kzalloc(sizeof(*gsb), GFP_KERNEL); - if (!gsb) { - psb = NULL; - return -ENOMEM; - } - *psb = &gsb->base; - - ret = nvkm_secboot_ctor(&gm200_secboot, acr, device, index, &gsb->base); - if (ret) - return ret; - - return 0; -} - - -MODULE_FIRMWARE("nvidia/gm200/acr/bl.bin"); -MODULE_FIRMWARE("nvidia/gm200/acr/ucode_load.bin"); -MODULE_FIRMWARE("nvidia/gm200/acr/ucode_unload.bin"); -MODULE_FIRMWARE("nvidia/gm200/gr/fecs_bl.bin"); -MODULE_FIRMWARE("nvidia/gm200/gr/fecs_inst.bin"); -MODULE_FIRMWARE("nvidia/gm200/gr/fecs_data.bin"); -MODULE_FIRMWARE("nvidia/gm200/gr/fecs_sig.bin"); -MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_bl.bin"); -MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_inst.bin"); -MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_data.bin"); -MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_sig.bin"); -MODULE_FIRMWARE("nvidia/gm200/gr/sw_ctx.bin"); -MODULE_FIRMWARE("nvidia/gm200/gr/sw_nonctx.bin"); -MODULE_FIRMWARE("nvidia/gm200/gr/sw_bundle_init.bin"); -MODULE_FIRMWARE("nvidia/gm200/gr/sw_method_init.bin"); - -MODULE_FIRMWARE("nvidia/gm204/acr/bl.bin"); -MODULE_FIRMWARE("nvidia/gm204/acr/ucode_load.bin"); -MODULE_FIRMWARE("nvidia/gm204/acr/ucode_unload.bin"); -MODULE_FIRMWARE("nvidia/gm204/gr/fecs_bl.bin"); -MODULE_FIRMWARE("nvidia/gm204/gr/fecs_inst.bin"); -MODULE_FIRMWARE("nvidia/gm204/gr/fecs_data.bin"); -MODULE_FIRMWARE("nvidia/gm204/gr/fecs_sig.bin"); -MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_bl.bin"); -MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_inst.bin"); -MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_data.bin"); -MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_sig.bin"); -MODULE_FIRMWARE("nvidia/gm204/gr/sw_ctx.bin"); -MODULE_FIRMWARE("nvidia/gm204/gr/sw_nonctx.bin"); -MODULE_FIRMWARE("nvidia/gm204/gr/sw_bundle_init.bin"); -MODULE_FIRMWARE("nvidia/gm204/gr/sw_method_init.bin"); - -MODULE_FIRMWARE("nvidia/gm206/acr/bl.bin"); -MODULE_FIRMWARE("nvidia/gm206/acr/ucode_load.bin"); -MODULE_FIRMWARE("nvidia/gm206/acr/ucode_unload.bin"); -MODULE_FIRMWARE("nvidia/gm206/gr/fecs_bl.bin"); -MODULE_FIRMWARE("nvidia/gm206/gr/fecs_inst.bin"); -MODULE_FIRMWARE("nvidia/gm206/gr/fecs_data.bin"); -MODULE_FIRMWARE("nvidia/gm206/gr/fecs_sig.bin"); -MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_bl.bin"); -MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_inst.bin"); -MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_data.bin"); -MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_sig.bin"); -MODULE_FIRMWARE("nvidia/gm206/gr/sw_ctx.bin"); -MODULE_FIRMWARE("nvidia/gm206/gr/sw_nonctx.bin"); -MODULE_FIRMWARE("nvidia/gm206/gr/sw_bundle_init.bin"); -MODULE_FIRMWARE("nvidia/gm206/gr/sw_method_init.bin"); - -MODULE_FIRMWARE("nvidia/gp100/acr/bl.bin"); -MODULE_FIRMWARE("nvidia/gp100/acr/ucode_load.bin"); -MODULE_FIRMWARE("nvidia/gp100/acr/ucode_unload.bin"); -MODULE_FIRMWARE("nvidia/gp100/gr/fecs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp100/gr/fecs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp100/gr/fecs_data.bin"); -MODULE_FIRMWARE("nvidia/gp100/gr/fecs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_data.bin"); -MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp100/gr/sw_ctx.bin"); -MODULE_FIRMWARE("nvidia/gp100/gr/sw_nonctx.bin"); -MODULE_FIRMWARE("nvidia/gp100/gr/sw_bundle_init.bin"); -MODULE_FIRMWARE("nvidia/gp100/gr/sw_method_init.bin"); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h deleted file mode 100644 index 62c5e162099aaa50b59cde15fda75420a188526a..0000000000000000000000000000000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef __NVKM_SECBOOT_GM200_H__ -#define __NVKM_SECBOOT_GM200_H__ - -#include "priv.h" - -struct gm200_secboot { - struct nvkm_secboot base; - - /* Instance block & address space used for HS FW execution */ - struct nvkm_memory *inst; - struct nvkm_vmm *vmm; -}; -#define gm200_secboot(sb) container_of(sb, struct gm200_secboot, base) - -int gm200_secboot_oneinit(struct nvkm_secboot *); -int gm200_secboot_fini(struct nvkm_secboot *, bool); -void *gm200_secboot_dtor(struct nvkm_secboot *); -int gm200_secboot_run_blob(struct nvkm_secboot *, struct nvkm_gpuobj *, - struct nvkm_falcon *); - -/* Tegra-only */ -int gm20b_secboot_tegra_read_wpr(struct gm200_secboot *, u32); - -#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c deleted file mode 100644 index df8b919dcf09bc4b6470afa898fed69a1221c531..0000000000000000000000000000000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "acr.h" -#include "gm200.h" - -#define TEGRA210_MC_BASE 0x70019000 - -#ifdef CONFIG_ARCH_TEGRA -#define MC_SECURITY_CARVEOUT2_CFG0 0xc58 -#define MC_SECURITY_CARVEOUT2_BOM_0 0xc5c -#define MC_SECURITY_CARVEOUT2_BOM_HI_0 0xc60 -#define MC_SECURITY_CARVEOUT2_SIZE_128K 0xc64 -#define TEGRA_MC_SECURITY_CARVEOUT_CFG_LOCKED (1 << 1) -/** - * gm20b_secboot_tegra_read_wpr() - read the WPR registers on Tegra - * - * On dGPU, we can manage the WPR region ourselves, but on Tegra the WPR region - * is reserved from system memory by the bootloader and irreversibly locked. - * This function reads the address and size of the pre-configured WPR region. - */ -int -gm20b_secboot_tegra_read_wpr(struct gm200_secboot *gsb, u32 mc_base) -{ - struct nvkm_secboot *sb = &gsb->base; - void __iomem *mc; - u32 cfg; - - mc = ioremap(mc_base, 0xd00); - if (!mc) { - nvkm_error(&sb->subdev, "Cannot map Tegra MC registers\n"); - return -ENOMEM; - } - sb->wpr_addr = ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_0) | - ((u64)ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_HI_0) << 32); - sb->wpr_size = ioread32_native(mc + MC_SECURITY_CARVEOUT2_SIZE_128K) - << 17; - cfg = ioread32_native(mc + MC_SECURITY_CARVEOUT2_CFG0); - iounmap(mc); - - /* Check that WPR settings are valid */ - if (sb->wpr_size == 0) { - nvkm_error(&sb->subdev, "WPR region is empty\n"); - return -EINVAL; - } - - if (!(cfg & TEGRA_MC_SECURITY_CARVEOUT_CFG_LOCKED)) { - nvkm_error(&sb->subdev, "WPR region not locked\n"); - return -EINVAL; - } - - return 0; -} -#else -int -gm20b_secboot_tegra_read_wpr(struct gm200_secboot *gsb, u32 mc_base) -{ - nvkm_error(&gsb->base.subdev, "Tegra support not compiled in\n"); - return -EINVAL; -} -#endif - -static int -gm20b_secboot_oneinit(struct nvkm_secboot *sb) -{ - struct gm200_secboot *gsb = gm200_secboot(sb); - int ret; - - ret = gm20b_secboot_tegra_read_wpr(gsb, TEGRA210_MC_BASE); - if (ret) - return ret; - - return gm200_secboot_oneinit(sb); -} - -static const struct nvkm_secboot_func -gm20b_secboot = { - .dtor = gm200_secboot_dtor, - .oneinit = gm20b_secboot_oneinit, - .fini = gm200_secboot_fini, - .run_blob = gm200_secboot_run_blob, -}; - -int -gm20b_secboot_new(struct nvkm_device *device, int index, - struct nvkm_secboot **psb) -{ - int ret; - struct gm200_secboot *gsb; - struct nvkm_acr *acr; - - acr = acr_r352_new(BIT(NVKM_SECBOOT_FALCON_FECS) | - BIT(NVKM_SECBOOT_FALCON_PMU)); - if (IS_ERR(acr)) - return PTR_ERR(acr); - /* Support the initial GM20B firmware release without PMU */ - acr->optional_falcons = BIT(NVKM_SECBOOT_FALCON_PMU); - - gsb = kzalloc(sizeof(*gsb), GFP_KERNEL); - if (!gsb) { - psb = NULL; - return -ENOMEM; - } - *psb = &gsb->base; - - ret = nvkm_secboot_ctor(&gm20b_secboot, acr, device, index, &gsb->base); - if (ret) - return ret; - - return 0; -} - -#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) -MODULE_FIRMWARE("nvidia/gm20b/acr/bl.bin"); -MODULE_FIRMWARE("nvidia/gm20b/acr/ucode_load.bin"); -MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_bl.bin"); -MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_inst.bin"); -MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_data.bin"); -MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_sig.bin"); -MODULE_FIRMWARE("nvidia/gm20b/gr/gpccs_inst.bin"); -MODULE_FIRMWARE("nvidia/gm20b/gr/gpccs_data.bin"); -MODULE_FIRMWARE("nvidia/gm20b/gr/sw_ctx.bin"); -MODULE_FIRMWARE("nvidia/gm20b/gr/sw_nonctx.bin"); -MODULE_FIRMWARE("nvidia/gm20b/gr/sw_bundle_init.bin"); -MODULE_FIRMWARE("nvidia/gm20b/gr/sw_method_init.bin"); -MODULE_FIRMWARE("nvidia/gm20b/pmu/desc.bin"); -MODULE_FIRMWARE("nvidia/gm20b/pmu/image.bin"); -MODULE_FIRMWARE("nvidia/gm20b/pmu/sig.bin"); -#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c deleted file mode 100644 index 4695f1c8e33f4f32219102ac569462bb7516a1ea..0000000000000000000000000000000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c +++ /dev/null @@ -1,264 +0,0 @@ -/* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "acr.h" -#include "gm200.h" - -#include "ls_ucode.h" -#include "hs_ucode.h" -#include -#include -#include -#include - -static bool -gp102_secboot_scrub_required(struct nvkm_secboot *sb) -{ - struct nvkm_subdev *subdev = &sb->subdev; - struct nvkm_device *device = subdev->device; - u32 reg; - - nvkm_wr32(device, 0x100cd0, 0x2); - reg = nvkm_rd32(device, 0x100cd0); - - return (reg & BIT(4)); -} - -static int -gp102_run_secure_scrub(struct nvkm_secboot *sb) -{ - struct nvkm_subdev *subdev = &sb->subdev; - struct nvkm_device *device = subdev->device; - struct nvkm_engine *engine; - struct nvkm_falcon *falcon; - void *scrub_image; - struct fw_bin_header *hsbin_hdr; - struct hsf_fw_header *fw_hdr; - struct hsf_load_header *lhdr; - void *scrub_data; - int ret; - - nvkm_debug(subdev, "running VPR scrubber binary on NVDEC...\n"); - - engine = nvkm_engine_ref(&device->nvdec[0]->engine); - if (IS_ERR(engine)) - return PTR_ERR(engine); - falcon = device->nvdec[0]->falcon; - - nvkm_falcon_get(falcon, &sb->subdev); - - scrub_image = hs_ucode_load_blob(subdev, falcon, "nvdec/scrubber"); - if (IS_ERR(scrub_image)) - return PTR_ERR(scrub_image); - - nvkm_falcon_reset(falcon); - nvkm_falcon_bind_context(falcon, NULL); - - hsbin_hdr = scrub_image; - fw_hdr = scrub_image + hsbin_hdr->header_offset; - lhdr = scrub_image + fw_hdr->hdr_offset; - scrub_data = scrub_image + hsbin_hdr->data_offset; - - nvkm_falcon_load_imem(falcon, scrub_data, lhdr->non_sec_code_off, - lhdr->non_sec_code_size, - lhdr->non_sec_code_off >> 8, 0, false); - nvkm_falcon_load_imem(falcon, scrub_data + lhdr->apps[0], - ALIGN(lhdr->apps[0], 0x100), - lhdr->apps[1], - lhdr->apps[0] >> 8, 0, true); - nvkm_falcon_load_dmem(falcon, scrub_data + lhdr->data_dma_base, 0, - lhdr->data_size, 0); - - kfree(scrub_image); - - nvkm_falcon_set_start_addr(falcon, 0x0); - nvkm_falcon_start(falcon); - - ret = nvkm_falcon_wait_for_halt(falcon, 500); - if (ret < 0) { - nvkm_error(subdev, "failed to run VPR scrubber binary!\n"); - ret = -ETIMEDOUT; - goto end; - } - - /* put nvdec in clean state - without reset it will remain in HS mode */ - nvkm_falcon_reset(falcon); - - if (gp102_secboot_scrub_required(sb)) { - nvkm_error(subdev, "VPR scrubber binary failed!\n"); - ret = -EINVAL; - goto end; - } - - nvkm_debug(subdev, "VPR scrub successfully completed\n"); - -end: - nvkm_falcon_put(falcon, &sb->subdev); - nvkm_engine_unref(&engine); - return ret; -} - -static int -gp102_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob, - struct nvkm_falcon *falcon) -{ - int ret; - - /* make sure the VPR region is unlocked */ - if (gp102_secboot_scrub_required(sb)) { - ret = gp102_run_secure_scrub(sb); - if (ret) - return ret; - } - - return gm200_secboot_run_blob(sb, blob, falcon); -} - -const struct nvkm_secboot_func -gp102_secboot = { - .dtor = gm200_secboot_dtor, - .oneinit = gm200_secboot_oneinit, - .fini = gm200_secboot_fini, - .run_blob = gp102_secboot_run_blob, -}; - -int -gp102_secboot_new(struct nvkm_device *device, int index, - struct nvkm_secboot **psb) -{ - int ret; - struct gm200_secboot *gsb; - struct nvkm_acr *acr; - - acr = acr_r367_new(NVKM_SECBOOT_FALCON_SEC2, - BIT(NVKM_SECBOOT_FALCON_FECS) | - BIT(NVKM_SECBOOT_FALCON_GPCCS) | - BIT(NVKM_SECBOOT_FALCON_SEC2)); - if (IS_ERR(acr)) - return PTR_ERR(acr); - - gsb = kzalloc(sizeof(*gsb), GFP_KERNEL); - if (!gsb) { - psb = NULL; - return -ENOMEM; - } - *psb = &gsb->base; - - ret = nvkm_secboot_ctor(&gp102_secboot, acr, device, index, &gsb->base); - if (ret) - return ret; - - return 0; -} - -MODULE_FIRMWARE("nvidia/gp102/acr/bl.bin"); -MODULE_FIRMWARE("nvidia/gp102/acr/unload_bl.bin"); -MODULE_FIRMWARE("nvidia/gp102/acr/ucode_load.bin"); -MODULE_FIRMWARE("nvidia/gp102/acr/ucode_unload.bin"); -MODULE_FIRMWARE("nvidia/gp102/gr/fecs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp102/gr/fecs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp102/gr/fecs_data.bin"); -MODULE_FIRMWARE("nvidia/gp102/gr/fecs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_data.bin"); -MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp102/gr/sw_ctx.bin"); -MODULE_FIRMWARE("nvidia/gp102/gr/sw_nonctx.bin"); -MODULE_FIRMWARE("nvidia/gp102/gr/sw_bundle_init.bin"); -MODULE_FIRMWARE("nvidia/gp102/gr/sw_method_init.bin"); -MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin"); -MODULE_FIRMWARE("nvidia/gp102/sec2/desc.bin"); -MODULE_FIRMWARE("nvidia/gp102/sec2/image.bin"); -MODULE_FIRMWARE("nvidia/gp102/sec2/sig.bin"); -MODULE_FIRMWARE("nvidia/gp102/sec2/desc-1.bin"); -MODULE_FIRMWARE("nvidia/gp102/sec2/image-1.bin"); -MODULE_FIRMWARE("nvidia/gp102/sec2/sig-1.bin"); -MODULE_FIRMWARE("nvidia/gp104/acr/bl.bin"); -MODULE_FIRMWARE("nvidia/gp104/acr/unload_bl.bin"); -MODULE_FIRMWARE("nvidia/gp104/acr/ucode_load.bin"); -MODULE_FIRMWARE("nvidia/gp104/acr/ucode_unload.bin"); -MODULE_FIRMWARE("nvidia/gp104/gr/fecs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp104/gr/fecs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp104/gr/fecs_data.bin"); -MODULE_FIRMWARE("nvidia/gp104/gr/fecs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_data.bin"); -MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp104/gr/sw_ctx.bin"); -MODULE_FIRMWARE("nvidia/gp104/gr/sw_nonctx.bin"); -MODULE_FIRMWARE("nvidia/gp104/gr/sw_bundle_init.bin"); -MODULE_FIRMWARE("nvidia/gp104/gr/sw_method_init.bin"); -MODULE_FIRMWARE("nvidia/gp104/nvdec/scrubber.bin"); -MODULE_FIRMWARE("nvidia/gp104/sec2/desc.bin"); -MODULE_FIRMWARE("nvidia/gp104/sec2/image.bin"); -MODULE_FIRMWARE("nvidia/gp104/sec2/sig.bin"); -MODULE_FIRMWARE("nvidia/gp104/sec2/desc-1.bin"); -MODULE_FIRMWARE("nvidia/gp104/sec2/image-1.bin"); -MODULE_FIRMWARE("nvidia/gp104/sec2/sig-1.bin"); -MODULE_FIRMWARE("nvidia/gp106/acr/bl.bin"); -MODULE_FIRMWARE("nvidia/gp106/acr/unload_bl.bin"); -MODULE_FIRMWARE("nvidia/gp106/acr/ucode_load.bin"); -MODULE_FIRMWARE("nvidia/gp106/acr/ucode_unload.bin"); -MODULE_FIRMWARE("nvidia/gp106/gr/fecs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp106/gr/fecs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp106/gr/fecs_data.bin"); -MODULE_FIRMWARE("nvidia/gp106/gr/fecs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_data.bin"); -MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp106/gr/sw_ctx.bin"); -MODULE_FIRMWARE("nvidia/gp106/gr/sw_nonctx.bin"); -MODULE_FIRMWARE("nvidia/gp106/gr/sw_bundle_init.bin"); -MODULE_FIRMWARE("nvidia/gp106/gr/sw_method_init.bin"); -MODULE_FIRMWARE("nvidia/gp106/nvdec/scrubber.bin"); -MODULE_FIRMWARE("nvidia/gp106/sec2/desc.bin"); -MODULE_FIRMWARE("nvidia/gp106/sec2/image.bin"); -MODULE_FIRMWARE("nvidia/gp106/sec2/sig.bin"); -MODULE_FIRMWARE("nvidia/gp106/sec2/desc-1.bin"); -MODULE_FIRMWARE("nvidia/gp106/sec2/image-1.bin"); -MODULE_FIRMWARE("nvidia/gp106/sec2/sig-1.bin"); -MODULE_FIRMWARE("nvidia/gp107/acr/bl.bin"); -MODULE_FIRMWARE("nvidia/gp107/acr/unload_bl.bin"); -MODULE_FIRMWARE("nvidia/gp107/acr/ucode_load.bin"); -MODULE_FIRMWARE("nvidia/gp107/acr/ucode_unload.bin"); -MODULE_FIRMWARE("nvidia/gp107/gr/fecs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp107/gr/fecs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp107/gr/fecs_data.bin"); -MODULE_FIRMWARE("nvidia/gp107/gr/fecs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_data.bin"); -MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp107/gr/sw_ctx.bin"); -MODULE_FIRMWARE("nvidia/gp107/gr/sw_nonctx.bin"); -MODULE_FIRMWARE("nvidia/gp107/gr/sw_bundle_init.bin"); -MODULE_FIRMWARE("nvidia/gp107/gr/sw_method_init.bin"); -MODULE_FIRMWARE("nvidia/gp107/nvdec/scrubber.bin"); -MODULE_FIRMWARE("nvidia/gp107/sec2/desc.bin"); -MODULE_FIRMWARE("nvidia/gp107/sec2/image.bin"); -MODULE_FIRMWARE("nvidia/gp107/sec2/sig.bin"); -MODULE_FIRMWARE("nvidia/gp107/sec2/desc-1.bin"); -MODULE_FIRMWARE("nvidia/gp107/sec2/image-1.bin"); -MODULE_FIRMWARE("nvidia/gp107/sec2/sig-1.bin"); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c deleted file mode 100644 index 737a8d50a1f204ec2d072e53f0590ec865842a5a..0000000000000000000000000000000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright 2017 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "gm200.h" -#include "acr.h" - -int -gp108_secboot_new(struct nvkm_device *device, int index, - struct nvkm_secboot **psb) -{ - struct gm200_secboot *gsb; - struct nvkm_acr *acr; - - acr = acr_r370_new(NVKM_SECBOOT_FALCON_SEC2, - BIT(NVKM_SECBOOT_FALCON_FECS) | - BIT(NVKM_SECBOOT_FALCON_GPCCS) | - BIT(NVKM_SECBOOT_FALCON_SEC2)); - if (IS_ERR(acr)) - return PTR_ERR(acr); - - if (!(gsb = kzalloc(sizeof(*gsb), GFP_KERNEL))) { - acr->func->dtor(acr); - return -ENOMEM; - } - *psb = &gsb->base; - - return nvkm_secboot_ctor(&gp102_secboot, acr, device, index, &gsb->base); -} - -MODULE_FIRMWARE("nvidia/gp108/acr/bl.bin"); -MODULE_FIRMWARE("nvidia/gp108/acr/unload_bl.bin"); -MODULE_FIRMWARE("nvidia/gp108/acr/ucode_load.bin"); -MODULE_FIRMWARE("nvidia/gp108/acr/ucode_unload.bin"); -MODULE_FIRMWARE("nvidia/gp108/gr/fecs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp108/gr/fecs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp108/gr/fecs_data.bin"); -MODULE_FIRMWARE("nvidia/gp108/gr/fecs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_data.bin"); -MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp108/gr/sw_ctx.bin"); -MODULE_FIRMWARE("nvidia/gp108/gr/sw_nonctx.bin"); -MODULE_FIRMWARE("nvidia/gp108/gr/sw_bundle_init.bin"); -MODULE_FIRMWARE("nvidia/gp108/gr/sw_method_init.bin"); -MODULE_FIRMWARE("nvidia/gp108/nvdec/scrubber.bin"); -MODULE_FIRMWARE("nvidia/gp108/sec2/desc.bin"); -MODULE_FIRMWARE("nvidia/gp108/sec2/image.bin"); -MODULE_FIRMWARE("nvidia/gp108/sec2/sig.bin"); - -MODULE_FIRMWARE("nvidia/gv100/acr/bl.bin"); -MODULE_FIRMWARE("nvidia/gv100/acr/unload_bl.bin"); -MODULE_FIRMWARE("nvidia/gv100/acr/ucode_load.bin"); -MODULE_FIRMWARE("nvidia/gv100/acr/ucode_unload.bin"); -MODULE_FIRMWARE("nvidia/gv100/gr/fecs_bl.bin"); -MODULE_FIRMWARE("nvidia/gv100/gr/fecs_inst.bin"); -MODULE_FIRMWARE("nvidia/gv100/gr/fecs_data.bin"); -MODULE_FIRMWARE("nvidia/gv100/gr/fecs_sig.bin"); -MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_bl.bin"); -MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_inst.bin"); -MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_data.bin"); -MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_sig.bin"); -MODULE_FIRMWARE("nvidia/gv100/gr/sw_ctx.bin"); -MODULE_FIRMWARE("nvidia/gv100/gr/sw_nonctx.bin"); -MODULE_FIRMWARE("nvidia/gv100/gr/sw_bundle_init.bin"); -MODULE_FIRMWARE("nvidia/gv100/gr/sw_method_init.bin"); -MODULE_FIRMWARE("nvidia/gv100/nvdec/scrubber.bin"); -MODULE_FIRMWARE("nvidia/gv100/sec2/desc.bin"); -MODULE_FIRMWARE("nvidia/gv100/sec2/image.bin"); -MODULE_FIRMWARE("nvidia/gv100/sec2/sig.bin"); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c deleted file mode 100644 index 28ca29d0eeeeb8d0ab869f319b0e7c1304ef91be..0000000000000000000000000000000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "acr.h" -#include "gm200.h" - -#define TEGRA186_MC_BASE 0x02c10000 - -static int -gp10b_secboot_oneinit(struct nvkm_secboot *sb) -{ - struct gm200_secboot *gsb = gm200_secboot(sb); - int ret; - - ret = gm20b_secboot_tegra_read_wpr(gsb, TEGRA186_MC_BASE); - if (ret) - return ret; - - return gm200_secboot_oneinit(sb); -} - -static const struct nvkm_secboot_func -gp10b_secboot = { - .dtor = gm200_secboot_dtor, - .oneinit = gp10b_secboot_oneinit, - .fini = gm200_secboot_fini, - .run_blob = gm200_secboot_run_blob, -}; - -int -gp10b_secboot_new(struct nvkm_device *device, int index, - struct nvkm_secboot **psb) -{ - int ret; - struct gm200_secboot *gsb; - struct nvkm_acr *acr; - - acr = acr_r352_new(BIT(NVKM_SECBOOT_FALCON_FECS) | - BIT(NVKM_SECBOOT_FALCON_GPCCS) | - BIT(NVKM_SECBOOT_FALCON_PMU)); - if (IS_ERR(acr)) - return PTR_ERR(acr); - - gsb = kzalloc(sizeof(*gsb), GFP_KERNEL); - if (!gsb) { - psb = NULL; - return -ENOMEM; - } - *psb = &gsb->base; - - ret = nvkm_secboot_ctor(&gp10b_secboot, acr, device, index, &gsb->base); - if (ret) - return ret; - - return 0; -} - -#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) -MODULE_FIRMWARE("nvidia/gp10b/acr/bl.bin"); -MODULE_FIRMWARE("nvidia/gp10b/acr/ucode_load.bin"); -MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_data.bin"); -MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_bl.bin"); -MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_inst.bin"); -MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_data.bin"); -MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_sig.bin"); -MODULE_FIRMWARE("nvidia/gp10b/gr/sw_ctx.bin"); -MODULE_FIRMWARE("nvidia/gp10b/gr/sw_nonctx.bin"); -MODULE_FIRMWARE("nvidia/gp10b/gr/sw_bundle_init.bin"); -MODULE_FIRMWARE("nvidia/gp10b/gr/sw_method_init.bin"); -MODULE_FIRMWARE("nvidia/gp10b/pmu/desc.bin"); -MODULE_FIRMWARE("nvidia/gp10b/pmu/image.bin"); -MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin"); -#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.c deleted file mode 100644 index 6b33182ddc2ffa045974d6c620fe5541fa7a02c3..0000000000000000000000000000000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.c +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "hs_ucode.h" -#include "ls_ucode.h" -#include "acr.h" - -#include - -/** - * hs_ucode_patch_signature() - patch HS blob with correct signature for - * specified falcon. - */ -static void -hs_ucode_patch_signature(const struct nvkm_falcon *falcon, void *acr_image, - bool new_format) -{ - struct fw_bin_header *hsbin_hdr = acr_image; - struct hsf_fw_header *fw_hdr = acr_image + hsbin_hdr->header_offset; - void *hs_data = acr_image + hsbin_hdr->data_offset; - void *sig; - u32 sig_size; - u32 patch_loc, patch_sig; - - /* - * I had the brilliant idea to "improve" the binary format by - * removing this useless indirection. However to make NVIDIA files - * directly compatible, let's support both format. - */ - if (new_format) { - patch_loc = fw_hdr->patch_loc; - patch_sig = fw_hdr->patch_sig; - } else { - patch_loc = *(u32 *)(acr_image + fw_hdr->patch_loc); - patch_sig = *(u32 *)(acr_image + fw_hdr->patch_sig); - } - - /* Falcon in debug or production mode? */ - if (falcon->debug) { - sig = acr_image + fw_hdr->sig_dbg_offset; - sig_size = fw_hdr->sig_dbg_size; - } else { - sig = acr_image + fw_hdr->sig_prod_offset; - sig_size = fw_hdr->sig_prod_size; - } - - /* Patch signature */ - memcpy(hs_data + patch_loc, sig + patch_sig, sig_size); -} - -void * -hs_ucode_load_blob(struct nvkm_subdev *subdev, const struct nvkm_falcon *falcon, - const char *fw) -{ - void *acr_image; - bool new_format; - - acr_image = nvkm_acr_load_firmware(subdev, fw, 0); - if (IS_ERR(acr_image)) - return acr_image; - - /* detect the format to define how signature should be patched */ - switch (((u32 *)acr_image)[0]) { - case 0x3b1d14f0: - new_format = true; - break; - case 0x000010de: - new_format = false; - break; - default: - nvkm_error(subdev, "unknown header for HS blob %s\n", fw); - return ERR_PTR(-EINVAL); - } - - hs_ucode_patch_signature(falcon, acr_image, new_format); - - return acr_image; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.h deleted file mode 100644 index d8cfc6f7752aaf7847509aab5ff34d076e8cd1f5..0000000000000000000000000000000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef __NVKM_SECBOOT_HS_UCODE_H__ -#define __NVKM_SECBOOT_HS_UCODE_H__ - -#include -#include - -struct nvkm_falcon; - -/** - * struct hsf_fw_header - HS firmware descriptor - * @sig_dbg_offset: offset of the debug signature - * @sig_dbg_size: size of the debug signature - * @sig_prod_offset: offset of the production signature - * @sig_prod_size: size of the production signature - * @patch_loc: offset of the offset (sic) of where the signature is - * @patch_sig: offset of the offset (sic) to add to sig_*_offset - * @hdr_offset: offset of the load header (see struct hs_load_header) - * @hdr_size: size of above header - * - * This structure is embedded in the HS firmware image at - * hs_bin_hdr.header_offset. - */ -struct hsf_fw_header { - u32 sig_dbg_offset; - u32 sig_dbg_size; - u32 sig_prod_offset; - u32 sig_prod_size; - u32 patch_loc; - u32 patch_sig; - u32 hdr_offset; - u32 hdr_size; -}; - -/** - * struct hsf_load_header - HS firmware load header - */ -struct hsf_load_header { - u32 non_sec_code_off; - u32 non_sec_code_size; - u32 data_dma_base; - u32 data_size; - u32 num_apps; - /* - * Organized as follows: - * - app0_code_off - * - app1_code_off - * - ... - * - appn_code_off - * - app0_code_size - * - app1_code_size - * - ... - */ - u32 apps[0]; -}; - -void *hs_ucode_load_blob(struct nvkm_subdev *, const struct nvkm_falcon *, - const char *); - -#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h deleted file mode 100644 index d43f906da3a7bbcb451a40f24aa27398e91134a4..0000000000000000000000000000000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h +++ /dev/null @@ -1,161 +0,0 @@ -/* - * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef __NVKM_SECBOOT_LS_UCODE_H__ -#define __NVKM_SECBOOT_LS_UCODE_H__ - -#include -#include -#include - -struct nvkm_acr; - -/** - * struct ls_ucode_img_desc - descriptor of firmware image - * @descriptor_size: size of this descriptor - * @image_size: size of the whole image - * @bootloader_start_offset: start offset of the bootloader in ucode image - * @bootloader_size: size of the bootloader - * @bootloader_imem_offset: start off set of the bootloader in IMEM - * @bootloader_entry_point: entry point of the bootloader in IMEM - * @app_start_offset: start offset of the LS firmware - * @app_size: size of the LS firmware's code and data - * @app_imem_offset: offset of the app in IMEM - * @app_imem_entry: entry point of the app in IMEM - * @app_dmem_offset: offset of the data in DMEM - * @app_resident_code_offset: offset of app code from app_start_offset - * @app_resident_code_size: size of the code - * @app_resident_data_offset: offset of data from app_start_offset - * @app_resident_data_size: size of data - * - * A firmware image contains the code, data, and bootloader of a given LS - * falcon in a single blob. This structure describes where everything is. - * - * This can be generated from a (bootloader, code, data) set if they have - * been loaded separately, or come directly from a file. - */ -struct ls_ucode_img_desc { - u32 descriptor_size; - u32 image_size; - u32 tools_version; - u32 app_version; - char date[64]; - u32 bootloader_start_offset; - u32 bootloader_size; - u32 bootloader_imem_offset; - u32 bootloader_entry_point; - u32 app_start_offset; - u32 app_size; - u32 app_imem_offset; - u32 app_imem_entry; - u32 app_dmem_offset; - u32 app_resident_code_offset; - u32 app_resident_code_size; - u32 app_resident_data_offset; - u32 app_resident_data_size; - u32 nb_overlays; - struct {u32 start; u32 size; } load_ovl[64]; - u32 compressed; -}; - -/** - * struct ls_ucode_img - temporary storage for loaded LS firmwares - * @node: to link within lsf_ucode_mgr - * @falcon_id: ID of the falcon this LS firmware is for - * @ucode_desc: loaded or generated map of ucode_data - * @ucode_data: firmware payload (code and data) - * @ucode_size: size in bytes of data in ucode_data - * @ucode_off: offset of the ucode in ucode_data - * @sig: signature for this firmware - * @sig:size: size of the signature in bytes - * - * Preparing the WPR LS blob requires information about all the LS firmwares - * (size, etc) to be known. This structure contains all the data of one LS - * firmware. - */ -struct ls_ucode_img { - struct list_head node; - enum nvkm_secboot_falcon falcon_id; - - struct ls_ucode_img_desc ucode_desc; - u8 *ucode_data; - u32 ucode_size; - u32 ucode_off; - - u8 *sig; - u32 sig_size; -}; - -/** - * struct fw_bin_header - header of firmware files - * @bin_magic: always 0x3b1d14f0 - * @bin_ver: version of the bin format - * @bin_size: entire image size including this header - * @header_offset: offset of the firmware/bootloader header in the file - * @data_offset: offset of the firmware/bootloader payload in the file - * @data_size: size of the payload - * - * This header is located at the beginning of the HS firmware and HS bootloader - * files, to describe where the headers and data can be found. - */ -struct fw_bin_header { - u32 bin_magic; - u32 bin_ver; - u32 bin_size; - u32 header_offset; - u32 data_offset; - u32 data_size; -}; - -/** - * struct fw_bl_desc - firmware bootloader descriptor - * @start_tag: starting tag of bootloader - * @desc_dmem_load_off: DMEM offset of flcn_bl_dmem_desc - * @code_off: offset of code section - * @code_size: size of code section - * @data_off: offset of data section - * @data_size: size of data section - * - * This structure is embedded in bootloader firmware files at to describe the - * IMEM and DMEM layout expected by the bootloader. - */ -struct fw_bl_desc { - u32 start_tag; - u32 dmem_load_off; - u32 code_off; - u32 code_size; - u32 data_off; - u32 data_size; -}; - -int acr_ls_ucode_load_fecs(const struct nvkm_secboot *, int, - struct ls_ucode_img *); -int acr_ls_ucode_load_gpccs(const struct nvkm_secboot *, int, - struct ls_ucode_img *); -int acr_ls_ucode_load_pmu(const struct nvkm_secboot *, int, - struct ls_ucode_img *); -int acr_ls_pmu_post_run(const struct nvkm_acr *, const struct nvkm_secboot *); -int acr_ls_ucode_load_sec2(const struct nvkm_secboot *, int, - struct ls_ucode_img *); -int acr_ls_sec2_post_run(const struct nvkm_acr *, const struct nvkm_secboot *); - -#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c deleted file mode 100644 index 821d3b2bdb1fa7503785d4b74d511b96dac8c1d4..0000000000000000000000000000000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - - -#include "ls_ucode.h" -#include "acr.h" - -#include - -#define BL_DESC_BLK_SIZE 256 -/** - * Build a ucode image and descriptor from provided bootloader, code and data. - * - * @bl: bootloader image, including 16-bytes descriptor - * @code: LS firmware code segment - * @data: LS firmware data segment - * @desc: ucode descriptor to be written - * - * Return: allocated ucode image with corresponding descriptor information. desc - * is also updated to contain the right offsets within returned image. - */ -static void * -ls_ucode_img_build(const struct firmware *bl, const struct firmware *code, - const struct firmware *data, struct ls_ucode_img_desc *desc) -{ - struct fw_bin_header *bin_hdr = (void *)bl->data; - struct fw_bl_desc *bl_desc = (void *)bl->data + bin_hdr->header_offset; - void *bl_data = (void *)bl->data + bin_hdr->data_offset; - u32 pos = 0; - void *image; - - desc->bootloader_start_offset = pos; - desc->bootloader_size = ALIGN(bl_desc->code_size, sizeof(u32)); - desc->bootloader_imem_offset = bl_desc->start_tag * 256; - desc->bootloader_entry_point = bl_desc->start_tag * 256; - - pos = ALIGN(pos + desc->bootloader_size, BL_DESC_BLK_SIZE); - desc->app_start_offset = pos; - desc->app_size = ALIGN(code->size, BL_DESC_BLK_SIZE) + - ALIGN(data->size, BL_DESC_BLK_SIZE); - desc->app_imem_offset = 0; - desc->app_imem_entry = 0; - desc->app_dmem_offset = 0; - desc->app_resident_code_offset = 0; - desc->app_resident_code_size = ALIGN(code->size, BL_DESC_BLK_SIZE); - - pos = ALIGN(pos + desc->app_resident_code_size, BL_DESC_BLK_SIZE); - desc->app_resident_data_offset = pos - desc->app_start_offset; - desc->app_resident_data_size = ALIGN(data->size, BL_DESC_BLK_SIZE); - - desc->image_size = ALIGN(bl_desc->code_size, BL_DESC_BLK_SIZE) + - desc->app_size; - - image = kzalloc(desc->image_size, GFP_KERNEL); - if (!image) - return ERR_PTR(-ENOMEM); - - memcpy(image + desc->bootloader_start_offset, bl_data, - bl_desc->code_size); - memcpy(image + desc->app_start_offset, code->data, code->size); - memcpy(image + desc->app_start_offset + desc->app_resident_data_offset, - data->data, data->size); - - return image; -} - -/** - * ls_ucode_img_load_gr() - load and prepare a LS GR ucode image - * - * Load the LS microcode, bootloader and signature and pack them into a single - * blob. Also generate the corresponding ucode descriptor. - */ -static int -ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, int maxver, - struct ls_ucode_img *img, const char *falcon_name) -{ - const struct firmware *bl, *code, *data, *sig; - char f[64]; - int ret; - - snprintf(f, sizeof(f), "gr/%s_bl", falcon_name); - ret = nvkm_firmware_get(subdev, f, &bl); - if (ret) - goto error; - - snprintf(f, sizeof(f), "gr/%s_inst", falcon_name); - ret = nvkm_firmware_get(subdev, f, &code); - if (ret) - goto free_bl; - - snprintf(f, sizeof(f), "gr/%s_data", falcon_name); - ret = nvkm_firmware_get(subdev, f, &data); - if (ret) - goto free_inst; - - snprintf(f, sizeof(f), "gr/%s_sig", falcon_name); - ret = nvkm_firmware_get(subdev, f, &sig); - if (ret) - goto free_data; - - img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL); - if (!img->sig) { - ret = -ENOMEM; - goto free_sig; - } - img->sig_size = sig->size; - - img->ucode_data = ls_ucode_img_build(bl, code, data, - &img->ucode_desc); - if (IS_ERR(img->ucode_data)) { - kfree(img->sig); - ret = PTR_ERR(img->ucode_data); - goto free_sig; - } - img->ucode_size = img->ucode_desc.image_size; - -free_sig: - nvkm_firmware_put(sig); -free_data: - nvkm_firmware_put(data); -free_inst: - nvkm_firmware_put(code); -free_bl: - nvkm_firmware_put(bl); -error: - return ret; -} - -int -acr_ls_ucode_load_fecs(const struct nvkm_secboot *sb, int maxver, - struct ls_ucode_img *img) -{ - return ls_ucode_img_load_gr(&sb->subdev, maxver, img, "fecs"); -} - -int -acr_ls_ucode_load_gpccs(const struct nvkm_secboot *sb, int maxver, - struct ls_ucode_img *img) -{ - return ls_ucode_img_load_gr(&sb->subdev, maxver, img, "gpccs"); -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c deleted file mode 100644 index a84a999445bb5fbb6534ce61d70fec646440d2bb..0000000000000000000000000000000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - - -#include "ls_ucode.h" -#include "acr.h" - -#include -#include -#include -#include -#include -#include - -/** - * acr_ls_ucode_load_msgqueue - load and prepare a ucode img for a msgqueue fw - * - * Load the LS microcode, desc and signature and pack them into a single - * blob. - */ -static int -acr_ls_ucode_load_msgqueue(const struct nvkm_subdev *subdev, const char *name, - int maxver, struct ls_ucode_img *img) -{ - const struct firmware *image, *desc, *sig; - char f[64]; - int ver, ret; - - snprintf(f, sizeof(f), "%s/image", name); - ver = nvkm_firmware_get_version(subdev, f, 0, maxver, &image); - if (ver < 0) - return ver; - img->ucode_data = kmemdup(image->data, image->size, GFP_KERNEL); - nvkm_firmware_put(image); - if (!img->ucode_data) - return -ENOMEM; - - snprintf(f, sizeof(f), "%s/desc", name); - ret = nvkm_firmware_get_version(subdev, f, ver, ver, &desc); - if (ret < 0) - return ret; - memcpy(&img->ucode_desc, desc->data, sizeof(img->ucode_desc)); - img->ucode_size = ALIGN(img->ucode_desc.app_start_offset + img->ucode_desc.app_size, 256); - nvkm_firmware_put(desc); - - snprintf(f, sizeof(f), "%s/sig", name); - ret = nvkm_firmware_get_version(subdev, f, ver, ver, &sig); - if (ret < 0) - return ret; - img->sig_size = sig->size; - img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL); - nvkm_firmware_put(sig); - if (!img->sig) - return -ENOMEM; - - return ver; -} - -static int -acr_ls_msgqueue_post_run(struct nvkm_msgqueue *queue, - struct nvkm_falcon *falcon, u32 addr_args) -{ - struct nvkm_device *device = falcon->owner->device; - u8 buf[NVKM_MSGQUEUE_CMDLINE_SIZE]; - - memset(buf, 0, sizeof(buf)); - nvkm_msgqueue_write_cmdline(queue, buf); - nvkm_falcon_load_dmem(falcon, buf, addr_args, sizeof(buf), 0); - /* rearm the queue so it will wait for the init message */ - nvkm_msgqueue_reinit(queue); - - /* Enable interrupts */ - nvkm_falcon_wr32(falcon, 0x10, 0xff); - nvkm_mc_intr_mask(device, falcon->owner->index, true); - - /* Start LS firmware on boot falcon */ - nvkm_falcon_start(falcon); - - return 0; -} - -int -acr_ls_ucode_load_pmu(const struct nvkm_secboot *sb, int maxver, - struct ls_ucode_img *img) -{ - struct nvkm_pmu *pmu = sb->subdev.device->pmu; - int ret; - - ret = acr_ls_ucode_load_msgqueue(&sb->subdev, "pmu", maxver, img); - if (ret) - return ret; - - /* Allocate the PMU queue corresponding to the FW version */ - ret = nvkm_msgqueue_new(img->ucode_desc.app_version, pmu->falcon, - sb, &pmu->queue); - if (ret) - return ret; - - return 0; -} - -int -acr_ls_pmu_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb) -{ - struct nvkm_device *device = sb->subdev.device; - struct nvkm_pmu *pmu = device->pmu; - u32 addr_args = pmu->falcon->data.limit - NVKM_MSGQUEUE_CMDLINE_SIZE; - int ret; - - ret = acr_ls_msgqueue_post_run(pmu->queue, pmu->falcon, addr_args); - if (ret) - return ret; - - nvkm_debug(&sb->subdev, "%s started\n", - nvkm_secboot_falcon_name[acr->boot_falcon]); - - return 0; -} - -int -acr_ls_ucode_load_sec2(const struct nvkm_secboot *sb, int maxver, - struct ls_ucode_img *img) -{ - struct nvkm_sec2 *sec = sb->subdev.device->sec2; - int ver, ret; - - ver = acr_ls_ucode_load_msgqueue(&sb->subdev, "sec2", maxver, img); - if (ver < 0) - return ver; - - /* Allocate the PMU queue corresponding to the FW version */ - ret = nvkm_msgqueue_new(img->ucode_desc.app_version, sec->falcon, - sb, &sec->queue); - if (ret) - return ret; - - return ver; -} - -int -acr_ls_sec2_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb) -{ - const struct nvkm_subdev *subdev = &sb->subdev; - struct nvkm_device *device = subdev->device; - struct nvkm_sec2 *sec = device->sec2; - /* on SEC arguments are always at the beginning of EMEM */ - const u32 addr_args = 0x01000000; - int ret; - - ret = acr_ls_msgqueue_post_run(sec->queue, sec->falcon, addr_args); - if (ret) - return ret; - - nvkm_debug(&sb->subdev, "%s started\n", - nvkm_secboot_falcon_name[acr->boot_falcon]); - - return 0; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h deleted file mode 100644 index 959a7b2dbdc92f4d60de3c52146784aa85ab0be6..0000000000000000000000000000000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef __NVKM_SECBOOT_PRIV_H__ -#define __NVKM_SECBOOT_PRIV_H__ - -#include -#include -struct nvkm_gpuobj; - -struct nvkm_secboot_func { - int (*oneinit)(struct nvkm_secboot *); - int (*fini)(struct nvkm_secboot *, bool suspend); - void *(*dtor)(struct nvkm_secboot *); - int (*run_blob)(struct nvkm_secboot *, struct nvkm_gpuobj *, - struct nvkm_falcon *); -}; - -int nvkm_secboot_ctor(const struct nvkm_secboot_func *, struct nvkm_acr *, - struct nvkm_device *, int, struct nvkm_secboot *); -int nvkm_secboot_falcon_reset(struct nvkm_secboot *); -int nvkm_secboot_falcon_run(struct nvkm_secboot *); - -extern const struct nvkm_secboot_func gp102_secboot; - -struct flcn_u64 { - u32 lo; - u32 hi; -}; - -static inline u64 flcn64_to_u64(const struct flcn_u64 f) -{ - return ((u64)f.hi) << 32 | f.lo; -} - -static inline struct flcn_u64 u64_to_flcn64(u64 u) -{ - struct flcn_u64 ret; - - ret.hi = upper_32_bits(u); - ret.lo = lower_32_bits(u); - - return ret; -} - -#endif diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c index 413dbdd1771e33c04073a519448736dab2259b66..dbb90f2d2ccde5973db67530924c358059cec747 100644 --- a/drivers/gpu/drm/omapdrm/dss/dispc.c +++ b/drivers/gpu/drm/omapdrm/dss/dispc.c @@ -393,8 +393,7 @@ static void dispc_get_reg_field(struct dispc_device *dispc, enum dispc_feat_reg_field id, u8 *start, u8 *end) { - if (id >= dispc->feat->num_reg_fields) - BUG(); + BUG_ON(id >= dispc->feat->num_reg_fields); *start = dispc->feat->reg_fields[id].start; *end = dispc->feat->reg_fields[id].end; diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index 41f796b28dd5e406f319d66f2e4ac013e00fcb90..ae44ac2ec10607866739176151fb49e2efa2decb 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -338,6 +338,17 @@ config DRM_PANEL_SITRONIX_ST7789V Say Y here if you want to enable support for the Sitronix ST7789V controller for 240x320 LCD panels +config DRM_PANEL_SONY_ACX424AKP + tristate "Sony ACX424AKP DSI command mode panel" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + select VIDEOMODE_HELPERS + help + Say Y here if you want to enable the Sony ACX424 display + panel. This panel supports DSI in both command and video + mode. + config DRM_PANEL_SONY_ACX565AKM tristate "Sony ACX565AKM panel" depends on GPIOLIB && OF && SPI diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile index 4dc7acff21b90f26342a668ca64b0864224f5c11..7c4d3c581fd4289f7fa7700287cb23e784f223c8 100644 --- a/drivers/gpu/drm/panel/Makefile +++ b/drivers/gpu/drm/panel/Makefile @@ -35,6 +35,7 @@ obj-$(CONFIG_DRM_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o obj-$(CONFIG_DRM_PANEL_SHARP_LS043T1LE01) += panel-sharp-ls043t1le01.o obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7701) += panel-sitronix-st7701.o obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o +obj-$(CONFIG_DRM_PANEL_SONY_ACX424AKP) += panel-sony-acx424akp.o obj-$(CONFIG_DRM_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o obj-$(CONFIG_DRM_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o obj-$(CONFIG_DRM_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index ba3f85f36c2f9874c9ed8415d23fb9e4b82520e6..e14c14ac62b53d9ca569e53f8a85421a444ae141 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -629,6 +629,35 @@ static const struct panel_desc auo_b101xtn01 = { }, }; +static const struct drm_display_mode auo_b116xak01_mode = { + .clock = 69300, + .hdisplay = 1366, + .hsync_start = 1366 + 48, + .hsync_end = 1366 + 48 + 32, + .htotal = 1366 + 48 + 32 + 10, + .vdisplay = 768, + .vsync_start = 768 + 4, + .vsync_end = 768 + 4 + 6, + .vtotal = 768 + 4 + 6 + 15, + .vrefresh = 60, + .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, +}; + +static const struct panel_desc auo_b116xak01 = { + .modes = &auo_b116xak01_mode, + .num_modes = 1, + .bpc = 6, + .size = { + .width = 256, + .height = 144, + }, + .delay = { + .hpd_absent_delay = 200, + }, + .bus_format = MEDIA_BUS_FMT_RGB666_1X18, + .connector_type = DRM_MODE_CONNECTOR_eDP, +}; + static const struct drm_display_mode auo_b116xw03_mode = { .clock = 70589, .hdisplay = 1366, @@ -1008,6 +1037,38 @@ static const struct panel_desc boe_nv101wxmn51 = { }, }; +static const struct drm_display_mode boe_nv140fhmn49_modes[] = { + { + .clock = 148500, + .hdisplay = 1920, + .hsync_start = 1920 + 48, + .hsync_end = 1920 + 48 + 32, + .htotal = 2200, + .vdisplay = 1080, + .vsync_start = 1080 + 3, + .vsync_end = 1080 + 3 + 5, + .vtotal = 1125, + .vrefresh = 60, + }, +}; + +static const struct panel_desc boe_nv140fhmn49 = { + .modes = boe_nv140fhmn49_modes, + .num_modes = ARRAY_SIZE(boe_nv140fhmn49_modes), + .bpc = 6, + .size = { + .width = 309, + .height = 174, + }, + .delay = { + .prepare = 210, + .enable = 50, + .unprepare = 160, + }, + .bus_format = MEDIA_BUS_FMT_RGB666_1X18, + .connector_type = DRM_MODE_CONNECTOR_eDP, +}; + static const struct drm_display_mode cdtech_s043wq26h_ct7_mode = { .clock = 9000, .hdisplay = 480, @@ -2553,6 +2614,30 @@ static const struct panel_desc samsung_ltn140at29_301 = { }, }; +static const struct display_timing satoz_sat050at40h12r2_timing = { + .pixelclock = {33300000, 33300000, 50000000}, + .hactive = {800, 800, 800}, + .hfront_porch = {16, 210, 354}, + .hback_porch = {46, 46, 46}, + .hsync_len = {1, 1, 40}, + .vactive = {480, 480, 480}, + .vfront_porch = {7, 22, 147}, + .vback_porch = {23, 23, 23}, + .vsync_len = {1, 1, 20}, +}; + +static const struct panel_desc satoz_sat050at40h12r2 = { + .timings = &satoz_sat050at40h12r2_timing, + .num_timings = 1, + .bpc = 8, + .size = { + .width = 108, + .height = 65, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X24, + .connector_type = DRM_MODE_CONNECTOR_LVDS, +}; + static const struct drm_display_mode sharp_ld_d5116z01b_mode = { .clock = 168480, .hdisplay = 1920, @@ -3125,6 +3210,9 @@ static const struct of_device_id platform_of_match[] = { }, { .compatible = "auo,b101xtn01", .data = &auo_b101xtn01, + }, { + .compatible = "auo,b116xa01", + .data = &auo_b116xak01, }, { .compatible = "auo,b116xw03", .data = &auo_b116xw03, @@ -3167,6 +3255,9 @@ static const struct of_device_id platform_of_match[] = { }, { .compatible = "boe,nv101wxmn51", .data = &boe_nv101wxmn51, + }, { + .compatible = "boe,nv140fhmn49", + .data = &boe_nv140fhmn49, }, { .compatible = "cdtech,s043wq26h-ct7", .data = &cdtech_s043wq26h_ct7, @@ -3356,6 +3447,9 @@ static const struct of_device_id platform_of_match[] = { }, { .compatible = "samsung,ltn140at29-301", .data = &samsung_ltn140at29_301, + }, { + .compatible = "satoz,sat050at40h12r2", + .data = &satoz_sat050at40h12r2, }, { .compatible = "sharp,ld-d5116z01b", .data = &sharp_ld_d5116z01b, diff --git a/drivers/gpu/drm/panel/panel-sony-acx424akp.c b/drivers/gpu/drm/panel/panel-sony-acx424akp.c new file mode 100644 index 0000000000000000000000000000000000000000..de0abf76ae6fd7e86963647c8d20c5205e673483 --- /dev/null +++ b/drivers/gpu/drm/panel/panel-sony-acx424akp.c @@ -0,0 +1,550 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * MIPI-DSI Sony ACX424AKP panel driver. This is a 480x864 + * AMOLED panel with a command-only DSI interface. + * + * Copyright (C) Linaro Ltd. 2019 + * Author: Linus Walleij + * Based on code and know-how from Marcus Lorentzon + * Copyright (C) ST-Ericsson SA 2010 + */ +#include +#include +#include +#include +#include +#include + +#include