diff --git a/.clang-format b/.clang-format index 196ca317bd1f24ad57ad9ded549bc0b7994d8111..6ec5558b516bd909e2bb266208e08fdffee4364e 100644 --- a/.clang-format +++ b/.clang-format @@ -86,6 +86,8 @@ ForEachMacros: - 'bio_for_each_segment_all' - 'bio_list_for_each' - 'bip_for_each_vec' + - 'bitmap_for_each_clear_region' + - 'bitmap_for_each_set_region' - 'blkg_for_each_descendant_post' - 'blkg_for_each_descendant_pre' - 'blk_queue_for_each_rl' @@ -115,6 +117,7 @@ ForEachMacros: - 'drm_client_for_each_connector_iter' - 'drm_client_for_each_modeset' - 'drm_connector_for_each_possible_encoder' + - 'drm_for_each_bridge_in_chain' - 'drm_for_each_connector_iter' - 'drm_for_each_crtc' - 'drm_for_each_encoder' @@ -136,9 +139,10 @@ ForEachMacros: - 'for_each_bio' - 'for_each_board_func_rsrc' - 'for_each_bvec' + - 'for_each_card_auxs' + - 'for_each_card_auxs_safe' - 'for_each_card_components' - - 'for_each_card_links' - - 'for_each_card_links_safe' + - 'for_each_card_pre_auxs' - 'for_each_card_prelinks' - 'for_each_card_rtds' - 'for_each_card_rtds_safe' @@ -166,6 +170,7 @@ ForEachMacros: - 'for_each_dpcm_fe' - 'for_each_drhd_unit' - 'for_each_dss_dev' + - 'for_each_efi_handle' - 'for_each_efi_memory_desc' - 'for_each_efi_memory_desc_in_map' - 'for_each_element' @@ -190,6 +195,7 @@ ForEachMacros: - 'for_each_lru' - 'for_each_matching_node' - 'for_each_matching_node_and_match' + - 'for_each_member' - 'for_each_memblock' - 'for_each_memblock_type' - 'for_each_memcg_cache_index' @@ -200,9 +206,11 @@ ForEachMacros: - 'for_each_msi_entry' - 'for_each_msi_entry_safe' - 'for_each_net' + - 'for_each_net_continue_reverse' - 'for_each_netdev' - 'for_each_netdev_continue' - 'for_each_netdev_continue_rcu' + - 'for_each_netdev_continue_reverse' - 'for_each_netdev_feature' - 'for_each_netdev_in_bond_rcu' - 'for_each_netdev_rcu' @@ -254,10 +262,10 @@ ForEachMacros: - 'for_each_reserved_mem_region' - 'for_each_rtd_codec_dai' - 'for_each_rtd_codec_dai_rollback' - - 'for_each_rtdcom' - - 'for_each_rtdcom_safe' + - 'for_each_rtd_components' - 'for_each_set_bit' - 'for_each_set_bit_from' + - 'for_each_set_clump8' - 'for_each_sg' - 'for_each_sg_dma_page' - 'for_each_sg_page' @@ -267,6 +275,7 @@ ForEachMacros: - 'for_each_subelement_id' - '__for_each_thread' - 'for_each_thread' + - 'for_each_wakeup_source' - 'for_each_zone' - 'for_each_zone_zonelist' - 'for_each_zone_zonelist_nodemask' @@ -330,6 +339,7 @@ ForEachMacros: - 'list_for_each' - 'list_for_each_codec' - 'list_for_each_codec_safe' + - 'list_for_each_continue' - 'list_for_each_entry' - 'list_for_each_entry_continue' - 'list_for_each_entry_continue_rcu' @@ -351,6 +361,7 @@ ForEachMacros: - 'llist_for_each_entry' - 'llist_for_each_entry_safe' - 'llist_for_each_safe' + - 'mci_for_each_dimm' - 'media_device_for_each_entity' - 'media_device_for_each_intf' - 'media_device_for_each_link' @@ -444,10 +455,16 @@ ForEachMacros: - 'virtio_device_for_each_vq' - 'xa_for_each' - 'xa_for_each_marked' + - 'xa_for_each_range' - 'xa_for_each_start' - 'xas_for_each' - 'xas_for_each_conflict' - 'xas_for_each_marked' + - 'xbc_array_for_each_value' + - 'xbc_for_each_key_value' + - 'xbc_node_for_each_array_value' + - 'xbc_node_for_each_child' + - 'xbc_node_for_each_key_value' - 'zorro_for_each_dev' #IncludeBlocks: Preserve # Unknown to clang-format-5.0 diff --git a/.mailmap b/.mailmap index ffb8f28290c75243da611596630d4a2651c0e76e..a0dfce8de1ba6f93fee6a17450daa83703548dfe 100644 --- a/.mailmap +++ b/.mailmap @@ -225,6 +225,7 @@ Pratyush Anand Praveen BP Punit Agrawal Qais Yousef +Quentin Monnet Quentin Perret Rafael J. Wysocki Rajesh Shah diff --git a/COPYING b/COPYING index da4cb28febe66172a9fdf1a235525ae6c00cde1d..a635a38ef9405fdfcfe97f3a435393c1e9cae971 100644 --- a/COPYING +++ b/COPYING @@ -16,3 +16,5 @@ In addition, other licenses may also apply. Please see: Documentation/process/license-rules.rst for more details. + +All contributions to the Linux Kernel are subject to this COPYING file. diff --git a/CREDITS b/CREDITS index a97d3280a627b3665e68c94a574409f71cee1da6..032b5994f4760a13770d1629b8b057195d9266c2 100644 --- a/CREDITS +++ b/CREDITS @@ -567,6 +567,11 @@ D: Original author of Amiga FFS filesystem S: Orlando, Florida S: USA +N: Paul Burton +E: paulburton@kernel.org +W: https://pburton.com +D: MIPS maintainer 2018-2020 + N: Lennert Buytenhek E: kernel@wantstofly.org D: Original (2.4) rewrite of the ethernet bridging code diff --git a/Documentation/admin-guide/acpi/fan_performance_states.rst b/Documentation/admin-guide/acpi/fan_performance_states.rst index 21d233ca50d8b1009cb21bd033088af60d297c39..98fe5c3331214df8745ba8fab3e25875bdec7f8f 100644 --- a/Documentation/admin-guide/acpi/fan_performance_states.rst +++ b/Documentation/admin-guide/acpi/fan_performance_states.rst @@ -18,7 +18,7 @@ may look as follows:: $ ls -l /sys/bus/acpi/devices/INT3404:00/ total 0 -... + ... -r--r--r-- 1 root root 4096 Dec 13 20:38 state0 -r--r--r-- 1 root root 4096 Dec 13 20:38 state1 -r--r--r-- 1 root root 4096 Dec 13 20:38 state10 @@ -38,7 +38,7 @@ where each of the "state*" files represents one performance state of the fan and contains a colon-separated list of 5 integer numbers (fields) with the following interpretation:: -control_percent:trip_point_index:speed_rpm:noise_level_mdb:power_mw + control_percent:trip_point_index:speed_rpm:noise_level_mdb:power_mw * ``control_percent``: The percent value to be used to set the fan speed to a specific level using the _FSL object (0-100). diff --git a/Documentation/admin-guide/bootconfig.rst b/Documentation/admin-guide/bootconfig.rst index b342a679639277c41100b13d9cdf6b88d70040b8..cf2edcd09183bef328294cbe67b28983149d283f 100644 --- a/Documentation/admin-guide/bootconfig.rst +++ b/Documentation/admin-guide/bootconfig.rst @@ -62,6 +62,30 @@ Or more shorter, written as following:: In both styles, same key words are automatically merged when parsing it at boot time. So you can append similar trees or key-values. +Same-key Values +--------------- + +It is prohibited that two or more values or arrays share a same-key. +For example,:: + + foo = bar, baz + foo = qux # !ERROR! we can not re-define same key + +If you want to append the value to existing key as an array member, +you can use ``+=`` operator. For example:: + + foo = bar, baz + foo += qux + +In this case, the key ``foo`` has ``bar``, ``baz`` and ``qux``. + +However, a sub-key and a value can not co-exist under a parent key. +For example, following config is NOT allowed.:: + + foo = value1 + foo.bar = value2 # !ERROR! subkey "bar" and value "value1" can NOT co-exist + + Comments -------- @@ -102,9 +126,13 @@ Boot Kernel With a Boot Config ============================== Since the boot configuration file is loaded with initrd, it will be added -to the end of the initrd (initramfs) image file. The Linux kernel decodes -the last part of the initrd image in memory to get the boot configuration -data. +to the end of the initrd (initramfs) image file with size, checksum and +12-byte magic word as below. + +[initrd][bootconfig][size(u32)][checksum(u32)][#BOOTCONFIG\n] + +The Linux kernel decodes the last part of the initrd image in memory to +get the boot configuration data. Because of this "piggyback" method, there is no need to change or update the boot loader and the kernel image itself. diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index dbc22d68462751d2bb59ab35784c1c61c84bbb0a..c07815d230bcd4bde32a93c0615326f37a68824c 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -136,6 +136,10 @@ dynamic table installation which will install SSDT tables to /sys/firmware/acpi/tables/dynamic. + acpi_no_watchdog [HW,ACPI,WDT] + Ignore the ACPI-based watchdog interface (WDAT) and let + a native driver control the watchdog device instead. + acpi_rsdp= [ACPI,EFI,KEXEC] Pass the RSDP address to the kernel, mostly used on machines running EFI runtime service to boot the diff --git a/Documentation/arm64/memory.rst b/Documentation/arm64/memory.rst index 02e02175e6f56a75da568cd0112bf71769b8f938..cf03b3290800c25ea7c5d8cba093b0189e3d727a 100644 --- a/Documentation/arm64/memory.rst +++ b/Documentation/arm64/memory.rst @@ -129,7 +129,7 @@ this logic. As a single binary will need to support both 48-bit and 52-bit VA spaces, the VMEMMAP must be sized large enough for 52-bit VAs and -also must be sized large enought to accommodate a fixed PAGE_OFFSET. +also must be sized large enough to accommodate a fixed PAGE_OFFSET. Most code in the kernel should not need to consider the VA_BITS, for code that does need to know the VA size the variables are diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst index 9120e59578dcaaf24e5633cd0c1a2ec37b1ea278..2c08c628febdf3c7a17c2129047d9f53e246a779 100644 --- a/Documentation/arm64/silicon-errata.rst +++ b/Documentation/arm64/silicon-errata.rst @@ -110,6 +110,8 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 | +----------------+-----------------+-----------------+-----------------------------+ +| Cavium | ThunderX GICv3 | #38539 | N/A | ++----------------+-----------------+-----------------+-----------------------------+ | Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 | +----------------+-----------------+-----------------+-----------------------------+ | Cavium | ThunderX Core | #30115 | CAVIUM_ERRATUM_30115 | diff --git a/Documentation/arm64/tagged-address-abi.rst b/Documentation/arm64/tagged-address-abi.rst index d4a85d535bf99e09cfe5c3dc41652eaa3b64f21b..4a9d9c794ee5d889638d1a3af008dec06d11feaf 100644 --- a/Documentation/arm64/tagged-address-abi.rst +++ b/Documentation/arm64/tagged-address-abi.rst @@ -44,8 +44,15 @@ The AArch64 Tagged Address ABI has two stages of relaxation depending how the user addresses are used by the kernel: 1. User addresses not accessed by the kernel but used for address space - management (e.g. ``mmap()``, ``mprotect()``, ``madvise()``). The use - of valid tagged pointers in this context is always allowed. + management (e.g. ``mprotect()``, ``madvise()``). The use of valid + tagged pointers in this context is allowed with the exception of + ``brk()``, ``mmap()`` and the ``new_address`` argument to + ``mremap()`` as these have the potential to alias with existing + user addresses. + + NOTE: This behaviour changed in v5.6 and so some earlier kernels may + incorrectly accept valid tagged pointers for the ``brk()``, + ``mmap()`` and ``mremap()`` system calls. 2. User addresses accessed by the kernel (e.g. ``write()``). This ABI relaxation is disabled by default and the application thread needs to diff --git a/Documentation/dev-tools/kunit/usage.rst b/Documentation/dev-tools/kunit/usage.rst index 7cd56a1993b14ad75e5d21c5b3c23c29bee8f1cb..607758a66a99cc04b6abb71b3f3d610a3ccaf26c 100644 --- a/Documentation/dev-tools/kunit/usage.rst +++ b/Documentation/dev-tools/kunit/usage.rst @@ -551,6 +551,7 @@ options to your ``.config``: Once the kernel is built and installed, a simple .. code-block:: bash + modprobe example-test ...will run the tests. diff --git a/Documentation/devicetree/bindings/arm/arm,scmi.txt b/Documentation/devicetree/bindings/arm/arm,scmi.txt index f493d69e61946acf1d50476fad0429645e96114e..dc102c4e4a78b8cf6f79fae1fdf502f7dac924cb 100644 --- a/Documentation/devicetree/bindings/arm/arm,scmi.txt +++ b/Documentation/devicetree/bindings/arm/arm,scmi.txt @@ -102,7 +102,7 @@ Required sub-node properties: [1] Documentation/devicetree/bindings/clock/clock-bindings.txt [2] Documentation/devicetree/bindings/power/power-domain.yaml [3] Documentation/devicetree/bindings/thermal/thermal.txt -[4] Documentation/devicetree/bindings/sram/sram.txt +[4] Documentation/devicetree/bindings/sram/sram.yaml [5] Documentation/devicetree/bindings/reset/reset.txt Example: diff --git a/Documentation/devicetree/bindings/arm/arm,scpi.txt b/Documentation/devicetree/bindings/arm/arm,scpi.txt index 7b83ef43b41877ba783f47863d51f76d60813df9..dd04d9d9a1b8e0b06e43d16683feb440ebbff4d5 100644 --- a/Documentation/devicetree/bindings/arm/arm,scpi.txt +++ b/Documentation/devicetree/bindings/arm/arm,scpi.txt @@ -109,7 +109,7 @@ Required properties: [0] http://infocenter.arm.com/help/topic/com.arm.doc.dui0922b/index.html [1] Documentation/devicetree/bindings/clock/clock-bindings.txt [2] Documentation/devicetree/bindings/thermal/thermal.txt -[3] Documentation/devicetree/bindings/sram/sram.txt +[3] Documentation/devicetree/bindings/sram/sram.yaml [4] Documentation/devicetree/bindings/power/power-domain.yaml Example: diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm63138.txt b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm63138.txt index b82b6a0ae6f725cee5f6138657e05bdb92e138f7..8c7a4908a8492cf17a65a6eee99c457312982883 100644 --- a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm63138.txt +++ b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm63138.txt @@ -62,7 +62,7 @@ Timer node: Syscon reboot node: -See Documentation/devicetree/bindings/power/reset/syscon-reboot.txt for the +See Documentation/devicetree/bindings/power/reset/syscon-reboot.yaml for the detailed list of properties, the two values defined below are specific to the BCM6328-style timer: diff --git a/Documentation/devicetree/bindings/arm/cpus.yaml b/Documentation/devicetree/bindings/arm/cpus.yaml index 7a9c3ce2dbef991d87c2111609c0aff4db40ca57..0d5b61056b106db0a356f473b2e9fa448838f7ed 100644 --- a/Documentation/devicetree/bindings/arm/cpus.yaml +++ b/Documentation/devicetree/bindings/arm/cpus.yaml @@ -216,7 +216,7 @@ properties: $ref: '/schemas/types.yaml#/definitions/phandle-array' description: | List of phandles to idle state nodes supported - by this cpu (see ./idle-states.txt). + by this cpu (see ./idle-states.yaml). capacity-dmips-mhz: $ref: '/schemas/types.yaml#/definitions/uint32' diff --git a/Documentation/devicetree/bindings/arm/fsl.yaml b/Documentation/devicetree/bindings/arm/fsl.yaml index a8e0b4a813edcf4e6f21803f9afe2896cdab0d88..0e17e1f6fb807e233723c9b53c732269b1a60b06 100644 --- a/Documentation/devicetree/bindings/arm/fsl.yaml +++ b/Documentation/devicetree/bindings/arm/fsl.yaml @@ -160,7 +160,7 @@ properties: items: - enum: - armadeus,imx6dl-apf6 # APF6 (Solo) SoM - - armadeus,imx6dl-apf6dldev # APF6 (Solo) SoM on APF6Dev board + - armadeus,imx6dl-apf6dev # APF6 (Solo) SoM on APF6Dev board - eckelmann,imx6dl-ci4x10 - emtrion,emcon-mx6 # emCON-MX6S or emCON-MX6DL SoM - emtrion,emcon-mx6-avari # emCON-MX6S or emCON-MX6DL SoM on Avari Base diff --git a/Documentation/devicetree/bindings/arm/hisilicon/hi3519-sysctrl.txt b/Documentation/devicetree/bindings/arm/hisilicon/hi3519-sysctrl.txt index 115c5be0bd0b0a3db9d6fc8df048bfd33ec81622..8defacc44dd5b9e43a62f1db0fc5cea34a0edfd5 100644 --- a/Documentation/devicetree/bindings/arm/hisilicon/hi3519-sysctrl.txt +++ b/Documentation/devicetree/bindings/arm/hisilicon/hi3519-sysctrl.txt @@ -1,7 +1,7 @@ * Hisilicon Hi3519 System Controller Block This bindings use the following binding: -Documentation/devicetree/bindings/mfd/syscon.txt +Documentation/devicetree/bindings/mfd/syscon.yaml Required properties: - compatible: "hisilicon,hi3519-sysctrl". diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt b/Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt index 06df04cc827a5eaba364f86f6800d1c0a84773d5..6ce0b212ec6d6447f4ef037ec2588278d72d0ef4 100644 --- a/Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt +++ b/Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt @@ -81,4 +81,4 @@ Example: }; }; -[1]. Documentation/devicetree/bindings/arm/idle-states.txt +[1]. Documentation/devicetree/bindings/arm/idle-states.yaml diff --git a/Documentation/devicetree/bindings/arm/omap/mpu.txt b/Documentation/devicetree/bindings/arm/omap/mpu.txt index f301e636fd525b9dd9fd11bffe718332f38c0da9..e41490e6979c3df792a57d0d44246d331ec8d847 100644 --- a/Documentation/devicetree/bindings/arm/omap/mpu.txt +++ b/Documentation/devicetree/bindings/arm/omap/mpu.txt @@ -17,7 +17,7 @@ am335x and am437x only: - pm-sram: Phandles to ocmcram nodes to be used for power management. First should be type 'protect-exec' for the driver to use to copy and run PM functions, second should be regular pool to be used for - data region for code. See Documentation/devicetree/bindings/sram/sram.txt + data region for code. See Documentation/devicetree/bindings/sram/sram.yaml for more details. Examples: diff --git a/Documentation/devicetree/bindings/arm/psci.yaml b/Documentation/devicetree/bindings/arm/psci.yaml index 8ef85420b2ab1913786fc69dd5fbfda86c8c1f0b..5e66934455bbc986afb759e32be3ed2e4e80b8a5 100644 --- a/Documentation/devicetree/bindings/arm/psci.yaml +++ b/Documentation/devicetree/bindings/arm/psci.yaml @@ -100,13 +100,14 @@ properties: bindings in [1]) must specify this property. [1] Kernel documentation - ARM idle states bindings - Documentation/devicetree/bindings/arm/idle-states.txt - - "#power-domain-cells": - description: - The number of cells in a PM domain specifier as per binding in [3]. - Must be 0 as to represent a single PM domain. + Documentation/devicetree/bindings/arm/idle-states.yaml +patternProperties: + "^power-domain-": + allOf: + - $ref: "../power/power-domain.yaml#" + type: object + description: | ARM systems can have multiple cores, sometimes in an hierarchical arrangement. This often, but not always, maps directly to the processor power topology of the system. Individual nodes in a topology have their @@ -122,14 +123,8 @@ properties: helps to implement support for OSI mode and OS implementations may choose to mandate it. - [3] Documentation/devicetree/bindings/power/power_domain.txt - [4] Documentation/devicetree/bindings/power/domain-idle-state.txt - - power-domains: - $ref: '/schemas/types.yaml#/definitions/phandle-array' - description: - List of phandles and PM domain specifiers, as defined by bindings of the - PM domain provider. + [3] Documentation/devicetree/bindings/power/power-domain.yaml + [4] Documentation/devicetree/bindings/power/domain-idle-state.yaml required: - compatible @@ -199,7 +194,7 @@ examples: CPU0: cpu@0 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0>; enable-method = "psci"; power-domains = <&CPU_PD0>; @@ -208,7 +203,7 @@ examples: CPU1: cpu@1 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x100>; enable-method = "psci"; power-domains = <&CPU_PD1>; @@ -224,6 +219,9 @@ examples: exit-latency-us = <10>; min-residency-us = <100>; }; + }; + + domain-idle-states { CLUSTER_RET: cluster-retention { compatible = "domain-idle-state"; @@ -247,19 +245,19 @@ examples: compatible = "arm,psci-1.0"; method = "smc"; - CPU_PD0: cpu-pd0 { + CPU_PD0: power-domain-cpu0 { #power-domain-cells = <0>; domain-idle-states = <&CPU_PWRDN>; power-domains = <&CLUSTER_PD>; }; - CPU_PD1: cpu-pd1 { + CPU_PD1: power-domain-cpu1 { #power-domain-cells = <0>; domain-idle-states = <&CPU_PWRDN>; power-domains = <&CLUSTER_PD>; }; - CLUSTER_PD: cluster-pd { + CLUSTER_PD: power-domain-cluster { #power-domain-cells = <0>; domain-idle-states = <&CLUSTER_RET>, <&CLUSTER_PWRDN>; }; diff --git a/Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml b/Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml index 68917bb7c7e8727a55ad52cb03dd7bbc649b3441..55f7938c48260149736fc0117cdc669c23011bd9 100644 --- a/Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml +++ b/Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml @@ -52,7 +52,7 @@ required: examples: - | - mlahb: ahb { + mlahb: ahb@38000000 { compatible = "st,mlahb", "simple-bus"; #address-cells = <1>; #size-cells = <1>; diff --git a/Documentation/devicetree/bindings/bus/allwinner,sun8i-a23-rsb.yaml b/Documentation/devicetree/bindings/bus/allwinner,sun8i-a23-rsb.yaml index 9fe11ceecdba004c5546f30711be002eaca64694..80973619342d9803bf43edf6a66f7b3bb10cd809 100644 --- a/Documentation/devicetree/bindings/bus/allwinner,sun8i-a23-rsb.yaml +++ b/Documentation/devicetree/bindings/bus/allwinner,sun8i-a23-rsb.yaml @@ -70,7 +70,6 @@ examples: #size-cells = <0>; pmic@3e3 { - compatible = "..."; reg = <0x3e3>; /* ... */ diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-osc-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-osc-clk.yaml index 69cfa4a3d562833aaaa7f0b11154c148dbb7c3ac..c604822cda073a3d97dc2a4625136aab9b2b40df 100644 --- a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-osc-clk.yaml +++ b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-osc-clk.yaml @@ -40,7 +40,7 @@ additionalProperties: false examples: - | - osc24M: clk@01c20050 { + osc24M: clk@1c20050 { #clock-cells = <0>; compatible = "allwinner,sun4i-a10-osc-clk"; reg = <0x01c20050 0x4>; diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-gt-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-gt-clk.yaml index 07f38def7dc3b3ebc43a4f9929507c8cee9e1b76..43963c3062c81a650de3c4f553724a9b1eb0dacb 100644 --- a/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-gt-clk.yaml +++ b/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-gt-clk.yaml @@ -41,7 +41,7 @@ additionalProperties: false examples: - | - clk@0600005c { + clk@600005c { #clock-cells = <0>; compatible = "allwinner,sun9i-a80-gt-clk"; reg = <0x0600005c 0x4>; diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml index 17f87178f6b8e91f7c21c0c97b41c673f70a2be5..3647007f82cad9c0dcf6f3511c815894061956af 100644 --- a/Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml +++ b/Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml @@ -42,7 +42,7 @@ properties: be part of GCC and hence the TSENS properties can also be part of the GCC/clock-controller node. For more details on the TSENS properties please refer - Documentation/devicetree/bindings/thermal/qcom-tsens.txt + Documentation/devicetree/bindings/thermal/qcom-tsens.yaml nvmem-cell-names: minItems: 1 diff --git a/Documentation/devicetree/bindings/crypto/allwinner,sun4i-a10-crypto.yaml b/Documentation/devicetree/bindings/crypto/allwinner,sun4i-a10-crypto.yaml index 33c7842917f629c8176df1f94ad52e4fc629a271..8b9a8f337f16845ffe91e6be6475f2d1fced1cab 100644 --- a/Documentation/devicetree/bindings/crypto/allwinner,sun4i-a10-crypto.yaml +++ b/Documentation/devicetree/bindings/crypto/allwinner,sun4i-a10-crypto.yaml @@ -23,6 +23,8 @@ properties: - items: - const: allwinner,sun7i-a20-crypto - const: allwinner,sun4i-a10-crypto + - items: + - const: allwinner,sun8i-a33-crypto reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml index bbd500b024d36696856616ae3022d8dc0b40be22..e5344c4ae22656d7ffa773a3a3ce95b8f750986d 100644 --- a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml +++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml @@ -49,9 +49,13 @@ properties: - enum: - allwinner,sun8i-h3-tcon-tv - allwinner,sun50i-a64-tcon-tv - - allwinner,sun50i-h6-tcon-tv - const: allwinner,sun8i-a83t-tcon-tv + - items: + - enum: + - allwinner,sun50i-h6-tcon-tv + - const: allwinner,sun8i-r40-tcon-tv + reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tv-encoder.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tv-encoder.yaml index 5d5d396651190b72ede21c5d5acd9a40605f2940..6009324be967ef517a64b99c0efed92d7862f932 100644 --- a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tv-encoder.yaml +++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tv-encoder.yaml @@ -49,11 +49,7 @@ examples: resets = <&tcon_ch0_clk 0>; port { - #address-cells = <1>; - #size-cells = <0>; - - tve0_in_tcon0: endpoint@0 { - reg = <0>; + tve0_in_tcon0: endpoint { remote-endpoint = <&tcon0_out_tve0>; }; }; diff --git a/Documentation/devicetree/bindings/display/bridge/anx6345.yaml b/Documentation/devicetree/bindings/display/bridge/anx6345.yaml index 6d72b3d11fbc4e9a31a9329cded32adea96285ff..c211038699233b943ad3e10e93215ef30be40434 100644 --- a/Documentation/devicetree/bindings/display/bridge/anx6345.yaml +++ b/Documentation/devicetree/bindings/display/bridge/anx6345.yaml @@ -79,21 +79,15 @@ examples: #size-cells = <0>; anx6345_in: port@0 { - #address-cells = <1>; - #size-cells = <0>; reg = <0>; - anx6345_in_tcon0: endpoint@0 { - reg = <0>; + anx6345_in_tcon0: endpoint { remote-endpoint = <&tcon0_out_anx6345>; }; }; anx6345_out: port@1 { - #address-cells = <1>; - #size-cells = <0>; reg = <1>; - anx6345_out_panel: endpoint@0 { - reg = <0>; + anx6345_out_panel: endpoint { remote-endpoint = <&panel_in_edp>; }; }; diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt index b6a7e7397b8b47ef4d250fd6f8dba971ee23dc00..58914cf681b8926b6958be4dc7a53581727e68fb 100644 --- a/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt +++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt @@ -7,6 +7,7 @@ output bus. Required properties: - compatible: "mediatek,-dpi" + the supported chips are mt2701 , mt8173 and mt8183. - reg: Physical base address and length of the controller's registers - interrupts: The interrupt signal from the function block. - clocks: device clocks diff --git a/Documentation/devicetree/bindings/display/msm/gmu.txt b/Documentation/devicetree/bindings/display/msm/gmu.txt deleted file mode 100644 index bf9c7a2a495cc31cfcadf021394556234d209122..0000000000000000000000000000000000000000 --- a/Documentation/devicetree/bindings/display/msm/gmu.txt +++ /dev/null @@ -1,116 +0,0 @@ -Qualcomm adreno/snapdragon GMU (Graphics management unit) - -The GMU is a programmable power controller for the GPU. the CPU controls the -GMU which in turn handles power controls for the GPU. - -Required properties: -- compatible: "qcom,adreno-gmu-XYZ.W", "qcom,adreno-gmu" - for example: "qcom,adreno-gmu-630.2", "qcom,adreno-gmu" - Note that you need to list the less specific "qcom,adreno-gmu" - for generic matches and the more specific identifier to identify - the specific device. -- reg: Physical base address and length of the GMU registers. -- reg-names: Matching names for the register regions - * "gmu" - * "gmu_pdc" - * "gmu_pdc_seg" -- interrupts: The interrupt signals from the GMU. -- interrupt-names: Matching names for the interrupts - * "hfi" - * "gmu" -- clocks: phandles to the device clocks -- clock-names: Matching names for the clocks - * "gmu" - * "cxo" - * "axi" - * "mnoc" -- power-domains: should be: - <&clock_gpucc GPU_CX_GDSC> - <&clock_gpucc GPU_GX_GDSC> -- power-domain-names: Matching names for the power domains -- iommus: phandle to the adreno iommu -- operating-points-v2: phandle to the OPP operating points - -Optional properties: -- sram: phandle to the On Chip Memory (OCMEM) that's present on some Snapdragon - SoCs. See Documentation/devicetree/bindings/sram/qcom,ocmem.yaml. - -Example: - -/ { - ... - - gmu: gmu@506a000 { - compatible="qcom,adreno-gmu-630.2", "qcom,adreno-gmu"; - - reg = <0x506a000 0x30000>, - <0xb280000 0x10000>, - <0xb480000 0x10000>; - reg-names = "gmu", "gmu_pdc", "gmu_pdc_seq"; - - interrupts = , - ; - interrupt-names = "hfi", "gmu"; - - clocks = <&gpucc GPU_CC_CX_GMU_CLK>, - <&gpucc GPU_CC_CXO_CLK>, - <&gcc GCC_DDRSS_GPU_AXI_CLK>, - <&gcc GCC_GPU_MEMNOC_GFX_CLK>; - clock-names = "gmu", "cxo", "axi", "memnoc"; - - power-domains = <&gpucc GPU_CX_GDSC>, - <&gpucc GPU_GX_GDSC>; - power-domain-names = "cx", "gx"; - - iommus = <&adreno_smmu 5>; - - operating-points-v2 = <&gmu_opp_table>; - }; -}; - -a3xx example with OCMEM support: - -/ { - ... - - gpu: adreno@fdb00000 { - compatible = "qcom,adreno-330.2", - "qcom,adreno"; - reg = <0xfdb00000 0x10000>; - reg-names = "kgsl_3d0_reg_memory"; - interrupts = ; - interrupt-names = "kgsl_3d0_irq"; - clock-names = "core", - "iface", - "mem_iface"; - clocks = <&mmcc OXILI_GFX3D_CLK>, - <&mmcc OXILICX_AHB_CLK>, - <&mmcc OXILICX_AXI_CLK>; - sram = <&gmu_sram>; - power-domains = <&mmcc OXILICX_GDSC>; - operating-points-v2 = <&gpu_opp_table>; - iommus = <&gpu_iommu 0>; - }; - - ocmem@fdd00000 { - compatible = "qcom,msm8974-ocmem"; - - reg = <0xfdd00000 0x2000>, - <0xfec00000 0x180000>; - reg-names = "ctrl", - "mem"; - - clocks = <&rpmcc RPM_SMD_OCMEMGX_CLK>, - <&mmcc OCMEMCX_OCMEMNOC_CLK>; - clock-names = "core", - "iface"; - - #address-cells = <1>; - #size-cells = <1>; - - gmu_sram: gmu-sram@0 { - reg = <0x0 0x100000>; - ranges = <0 0 0xfec00000 0x100000>; - }; - }; -}; diff --git a/Documentation/devicetree/bindings/display/msm/gmu.yaml b/Documentation/devicetree/bindings/display/msm/gmu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0b8736a9384eb82a612a8ec51e720618bf51fab5 --- /dev/null +++ b/Documentation/devicetree/bindings/display/msm/gmu.yaml @@ -0,0 +1,123 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright 2019-2020, The Linux Foundation, All Rights Reserved +%YAML 1.2 +--- + +$id: "http://devicetree.org/schemas/display/msm/gmu.yaml#" +$schema: "http://devicetree.org/meta-schemas/core.yaml#" + +title: Devicetree bindings for the GMU attached to certain Adreno GPUs + +maintainers: + - Rob Clark + +description: | + These bindings describe the Graphics Management Unit (GMU) that is attached + to members of the Adreno A6xx GPU family. The GMU provides on-device power + management and support to improve power efficiency and reduce the load on + the CPU. + +properties: + compatible: + items: + - enum: + - qcom,adreno-gmu-630.2 + - const: qcom,adreno-gmu + + reg: + items: + - description: Core GMU registers + - description: GMU PDC registers + - description: GMU PDC sequence registers + + reg-names: + items: + - const: gmu + - const: gmu_pdc + - const: gmu_pdc_seq + + clocks: + items: + - description: GMU clock + - description: GPU CX clock + - description: GPU AXI clock + - description: GPU MEMNOC clock + + clock-names: + items: + - const: gmu + - const: cxo + - const: axi + - const: memnoc + + interrupts: + items: + - description: GMU HFI interrupt + - description: GMU interrupt + + + interrupt-names: + items: + - const: hfi + - const: gmu + + power-domains: + items: + - description: CX power domain + - description: GX power domain + + power-domain-names: + items: + - const: cx + - const: gx + + iommus: + maxItems: 1 + + operating-points-v2: true + +required: + - compatible + - reg + - reg-names + - clocks + - clock-names + - interrupts + - interrupt-names + - power-domains + - power-domain-names + - iommus + - operating-points-v2 + +examples: + - | + #include + #include + #include + #include + + gmu: gmu@506a000 { + compatible="qcom,adreno-gmu-630.2", "qcom,adreno-gmu"; + + reg = <0x506a000 0x30000>, + <0xb280000 0x10000>, + <0xb480000 0x10000>; + reg-names = "gmu", "gmu_pdc", "gmu_pdc_seq"; + + clocks = <&gpucc GPU_CC_CX_GMU_CLK>, + <&gpucc GPU_CC_CXO_CLK>, + <&gcc GCC_DDRSS_GPU_AXI_CLK>, + <&gcc GCC_GPU_MEMNOC_GFX_CLK>; + clock-names = "gmu", "cxo", "axi", "memnoc"; + + interrupts = , + ; + interrupt-names = "hfi", "gmu"; + + power-domains = <&gpucc GPU_CX_GDSC>, + <&gpucc GPU_GX_GDSC>; + power-domain-names = "cx", "gx"; + + iommus = <&adreno_smmu 5>; + operating-points-v2 = <&gmu_opp_table>; + }; diff --git a/Documentation/devicetree/bindings/display/msm/gpu.txt b/Documentation/devicetree/bindings/display/msm/gpu.txt index 7edc298a15f297320c70ebda2e8be516775bd74b..fd779cd6994d8e6a1410282ae61c8060c940c8b8 100644 --- a/Documentation/devicetree/bindings/display/msm/gpu.txt +++ b/Documentation/devicetree/bindings/display/msm/gpu.txt @@ -35,25 +35,54 @@ Required properties: bring the GPU out of secure mode. - firmware-name: optional property of the 'zap-shader' node, listing the relative path of the device specific zap firmware. +- sram: phandle to the On Chip Memory (OCMEM) that's present on some a3xx and + a4xx Snapdragon SoCs. See + Documentation/devicetree/bindings/sram/qcom,ocmem.yaml. -Example 3xx/4xx/a5xx: +Example 3xx/4xx: / { ... - gpu: qcom,kgsl-3d0@4300000 { - compatible = "qcom,adreno-320.2", "qcom,adreno"; - reg = <0x04300000 0x20000>; + gpu: adreno@fdb00000 { + compatible = "qcom,adreno-330.2", + "qcom,adreno"; + reg = <0xfdb00000 0x10000>; reg-names = "kgsl_3d0_reg_memory"; - interrupts = ; - clock-names = - "core", - "iface", - "mem_iface"; - clocks = - <&mmcc GFX3D_CLK>, - <&mmcc GFX3D_AHB_CLK>, - <&mmcc MMSS_IMEM_AHB_CLK>; + interrupts = ; + interrupt-names = "kgsl_3d0_irq"; + clock-names = "core", + "iface", + "mem_iface"; + clocks = <&mmcc OXILI_GFX3D_CLK>, + <&mmcc OXILICX_AHB_CLK>, + <&mmcc OXILICX_AXI_CLK>; + sram = <&gpu_sram>; + power-domains = <&mmcc OXILICX_GDSC>; + operating-points-v2 = <&gpu_opp_table>; + iommus = <&gpu_iommu 0>; + }; + + gpu_sram: ocmem@fdd00000 { + compatible = "qcom,msm8974-ocmem"; + + reg = <0xfdd00000 0x2000>, + <0xfec00000 0x180000>; + reg-names = "ctrl", + "mem"; + + clocks = <&rpmcc RPM_SMD_OCMEMGX_CLK>, + <&mmcc OCMEMCX_OCMEMNOC_CLK>; + clock-names = "core", + "iface"; + + #address-cells = <1>; + #size-cells = <1>; + + gpu_sram: gpu-sram@0 { + reg = <0x0 0x100000>; + ranges = <0 0 0xfec00000 0x100000>; + }; }; }; diff --git a/Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml b/Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml index 2c9b8aa34815a40543f0e117ebbf78988aae6c3f..fd931b29381670bcf271f926b9543522f06d260c 100644 --- a/Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml +++ b/Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml @@ -37,6 +37,8 @@ examples: dsi { #address-cells = <1>; #size-cells = <0>; + reg = <0xff450000 0x1000>; + panel@0 { compatible = "leadtek,ltk500hd1829"; reg = <0>; diff --git a/Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml b/Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml index 39ec7ae525addf5ee8db395fc35393b825693fb9..d9fdb58e06b4218bc1db4d2b9b8534085903f336 100644 --- a/Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml +++ b/Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml @@ -37,6 +37,8 @@ examples: dsi { #address-cells = <1>; #size-cells = <0>; + reg = <0xff450000 0x1000>; + panel@0 { compatible = "xinpeng,xpp055c272"; reg = <0>; diff --git a/Documentation/devicetree/bindings/display/simple-framebuffer.yaml b/Documentation/devicetree/bindings/display/simple-framebuffer.yaml index 678776b6012a2b3effde9aa4b952fae5ea6b2027..1db608c9eef59a0ad754554aea36d692fbe15fed 100644 --- a/Documentation/devicetree/bindings/display/simple-framebuffer.yaml +++ b/Documentation/devicetree/bindings/display/simple-framebuffer.yaml @@ -174,10 +174,6 @@ examples: }; }; - soc@1c00000 { - lcdc0: lcdc@1c0c000 { - compatible = "allwinner,sun4i-a10-lcdc"; - }; - }; + lcdc0: lcdc { }; ... diff --git a/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt b/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt index 7bf1bb444812adc0a417e7d57e8be0ddbddc28d2..aac617acb64f5ff391be254d078d1b8bbe3aa895 100644 --- a/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt +++ b/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt @@ -37,7 +37,7 @@ Optional nodes: supports a single port with a single endpoint. - See also Documentation/devicetree/bindings/display/tilcdc/panel.txt and - Documentation/devicetree/bindings/display/tilcdc/tfp410.txt for connecting + Documentation/devicetree/bindings/display/bridge/ti,tfp410.txt for connecting tfp410 DVI encoder or lcd panel to lcdc [1] There is an errata about AM335x color wiring. For 16-bit color mode diff --git a/Documentation/devicetree/bindings/dma/ti/k3-udma.yaml b/Documentation/devicetree/bindings/dma/ti/k3-udma.yaml index 8b5c346f23f6c7da10bab6bd3978475bc221efc0..34780d7535b8a915a972febb669ec95170886e51 100644 --- a/Documentation/devicetree/bindings/dma/ti/k3-udma.yaml +++ b/Documentation/devicetree/bindings/dma/ti/k3-udma.yaml @@ -143,7 +143,7 @@ examples: #size-cells = <2>; dma-coherent; dma-ranges; - ranges; + ranges = <0x0 0x30800000 0x0 0x30800000 0x0 0x05000000>; ti,sci-dev-id = <118>; @@ -169,16 +169,4 @@ examples: ti,sci-rm-range-rflow = <0x6>; /* GP RFLOW */ }; }; - - mcasp0: mcasp@02B00000 { - dmas = <&main_udmap 0xc400>, <&main_udmap 0x4400>; - dma-names = "tx", "rx"; - }; - - crypto: crypto@4E00000 { - compatible = "ti,sa2ul-crypto"; - - dmas = <&main_udmap 0xc000>, <&main_udmap 0x4000>, <&main_udmap 0x4001>; - dma-names = "tx", "rx1", "rx2"; - }; }; diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml b/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml index 4ea6a8789699709d898496353d5de4963507a1f7..e8b99adcb1bd2926c4ffe44faa2c6d6b1bd409d7 100644 --- a/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml +++ b/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml @@ -84,31 +84,31 @@ examples: gpu_opp_table: opp_table0 { compatible = "operating-points-v2"; - opp@533000000 { + opp-533000000 { opp-hz = /bits/ 64 <533000000>; opp-microvolt = <1250000>; }; - opp@450000000 { + opp-450000000 { opp-hz = /bits/ 64 <450000000>; opp-microvolt = <1150000>; }; - opp@400000000 { + opp-400000000 { opp-hz = /bits/ 64 <400000000>; opp-microvolt = <1125000>; }; - opp@350000000 { + opp-350000000 { opp-hz = /bits/ 64 <350000000>; opp-microvolt = <1075000>; }; - opp@266000000 { + opp-266000000 { opp-hz = /bits/ 64 <266000000>; opp-microvolt = <1025000>; }; - opp@160000000 { + opp-160000000 { opp-hz = /bits/ 64 <160000000>; opp-microvolt = <925000>; }; - opp@100000000 { + opp-100000000 { opp-hz = /bits/ 64 <100000000>; opp-microvolt = <912500>; }; diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-midgard.yaml b/Documentation/devicetree/bindings/gpu/arm,mali-midgard.yaml index 36f59b3ade71dd6737e440e606ca08450de30eab..8d966f3ff3dbd6fdcb8ae446495e93172ffd1f80 100644 --- a/Documentation/devicetree/bindings/gpu/arm,mali-midgard.yaml +++ b/Documentation/devicetree/bindings/gpu/arm,mali-midgard.yaml @@ -138,31 +138,31 @@ examples: gpu_opp_table: opp_table0 { compatible = "operating-points-v2"; - opp@533000000 { + opp-533000000 { opp-hz = /bits/ 64 <533000000>; opp-microvolt = <1250000>; }; - opp@450000000 { + opp-450000000 { opp-hz = /bits/ 64 <450000000>; opp-microvolt = <1150000>; }; - opp@400000000 { + opp-400000000 { opp-hz = /bits/ 64 <400000000>; opp-microvolt = <1125000>; }; - opp@350000000 { + opp-350000000 { opp-hz = /bits/ 64 <350000000>; opp-microvolt = <1075000>; }; - opp@266000000 { + opp-266000000 { opp-hz = /bits/ 64 <266000000>; opp-microvolt = <1025000>; }; - opp@160000000 { + opp-160000000 { opp-hz = /bits/ 64 <160000000>; opp-microvolt = <925000>; }; - opp@100000000 { + opp-100000000 { opp-hz = /bits/ 64 <100000000>; opp-microvolt = <912500>; }; diff --git a/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.yaml b/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.yaml index f46de17c08788d185fa7f1dd1aba148178f99a14..cc3c8ea6a894807992ed47eeb1d52ecaca077c7c 100644 --- a/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.yaml +++ b/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.yaml @@ -123,7 +123,7 @@ examples: samsung,syscon-phandle = <&pmu_system_controller>; /* NTC thermistor is a hwmon device */ - ncp15wb473@0 { + ncp15wb473 { compatible = "murata,ncp15wb473"; pullup-uv = <1800000>; pullup-ohm = <47000>; diff --git a/Documentation/devicetree/bindings/input/cypress,tm2-touchkey.txt b/Documentation/devicetree/bindings/input/cypress,tm2-touchkey.txt index ef2ae729718f8d005565b532d0c5c1df4a88c6a5..921172f689b8c740c1987eab904ba8670637b11d 100644 --- a/Documentation/devicetree/bindings/input/cypress,tm2-touchkey.txt +++ b/Documentation/devicetree/bindings/input/cypress,tm2-touchkey.txt @@ -5,6 +5,7 @@ Required properties: * "cypress,tm2-touchkey" - for the touchkey found on the tm2 board * "cypress,midas-touchkey" - for the touchkey found on midas boards * "cypress,aries-touchkey" - for the touchkey found on aries boards + * "coreriver,tc360-touchkey" - for the Coreriver TouchCore 360 touchkey - reg: I2C address of the chip. - interrupts: interrupt to which the chip is connected (see interrupt binding[0]). diff --git a/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml b/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml index d7c3262b249450932fd1e25649e2299561450ae7..c99ed3934d7ee34aac7e416552db0c4bb29849ee 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml +++ b/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml @@ -62,7 +62,7 @@ required: examples: - | - i2c@00000000 { + i2c { #address-cells = <1>; #size-cells = <0>; gt928@5d { diff --git a/Documentation/devicetree/bindings/input/twl4030-pwrbutton.txt b/Documentation/devicetree/bindings/input/twl4030-pwrbutton.txt index c864a46cddcf65ea573a0e7575ff30c7405b9476..f5021214edecb9b5bab54887e1e98cc6bd3ef090 100644 --- a/Documentation/devicetree/bindings/input/twl4030-pwrbutton.txt +++ b/Documentation/devicetree/bindings/input/twl4030-pwrbutton.txt @@ -1,7 +1,7 @@ Texas Instruments TWL family (twl4030) pwrbutton module This module is part of the TWL4030. For more details about the whole -chip see Documentation/devicetree/bindings/mfd/twl-familly.txt. +chip see Documentation/devicetree/bindings/mfd/twl-family.txt. This module provides a simple power button event via an Interrupt. diff --git a/Documentation/devicetree/bindings/leds/common.yaml b/Documentation/devicetree/bindings/leds/common.yaml index d97d099b87e5b96ff9d3cabc3f962bf94f432e63..c60b994fe116d8bf8f369ea2aaeb50ec28afdb2b 100644 --- a/Documentation/devicetree/bindings/leds/common.yaml +++ b/Documentation/devicetree/bindings/leds/common.yaml @@ -85,7 +85,7 @@ properties: # LED will act as a back-light, controlled by the framebuffer system - backlight # LED will turn on (but for leds-gpio see "default-state" property in - # Documentation/devicetree/bindings/leds/leds-gpio.txt) + # Documentation/devicetree/bindings/leds/leds-gpio.yaml) - default-on # LED "double" flashes at a load average based rate - heartbeat diff --git a/Documentation/devicetree/bindings/leds/register-bit-led.txt b/Documentation/devicetree/bindings/leds/register-bit-led.txt index cf1ea403ba7aa22f14542c8ceb295475b9f2a574..c7af6f70a97bf50acd57dcb332506b8486322dc9 100644 --- a/Documentation/devicetree/bindings/leds/register-bit-led.txt +++ b/Documentation/devicetree/bindings/leds/register-bit-led.txt @@ -5,7 +5,7 @@ where single bits in a certain register can turn on/off a single LED. The register bit LEDs appear as children to the syscon device, with the proper compatible string. For the syscon bindings see: -Documentation/devicetree/bindings/mfd/syscon.txt +Documentation/devicetree/bindings/mfd/syscon.yaml Each LED is represented as a sub-node of the syscon device. Each node's name represents the name of the corresponding LED. diff --git a/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-csi.yaml b/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-csi.yaml index 9af873b43acd8742ade8ed944e2cfea4ec045369..8453ee340b9fb0abffb19f48c95391bb0e93e9c9 100644 --- a/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-csi.yaml +++ b/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-csi.yaml @@ -33,24 +33,40 @@ properties: maxItems: 1 clocks: - minItems: 2 - maxItems: 3 - items: - - description: The CSI interface clock - - description: The CSI ISP clock - - description: The CSI DRAM clock + oneOf: + - items: + - description: The CSI interface clock + - description: The CSI DRAM clock + + - items: + - description: The CSI interface clock + - description: The CSI ISP clock + - description: The CSI DRAM clock clock-names: - minItems: 2 - maxItems: 3 - items: - - const: bus - - const: isp - - const: ram + oneOf: + - items: + - const: bus + - const: ram + + - items: + - const: bus + - const: isp + - const: ram resets: maxItems: 1 + # FIXME: This should be made required eventually once every SoC will + # have the MBUS declared. + interconnects: + maxItems: 1 + + # FIXME: This should be made required eventually once every SoC will + # have the MBUS declared. + interconnect-names: + const: dma-mem + # See ./video-interfaces.txt for details port: type: object diff --git a/Documentation/devicetree/bindings/media/ti,cal.yaml b/Documentation/devicetree/bindings/media/ti,cal.yaml index 1ea78417953649fad0700fc3b429f4e15f840309..5e066629287d4de310094b60d3104ee65e99f48a 100644 --- a/Documentation/devicetree/bindings/media/ti,cal.yaml +++ b/Documentation/devicetree/bindings/media/ti,cal.yaml @@ -177,7 +177,7 @@ examples: }; }; - i2c5: i2c@4807c000 { + i2c { clock-frequency = <400000>; #address-cells = <1>; #size-cells = <0>; diff --git a/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra124-emc.yaml b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra124-emc.yaml index dd1843489ad15075fa2b31c48fa00e18d40ae032..3e0a8a92d6529ef6f2ed0901b9264e1496f7d3b8 100644 --- a/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra124-emc.yaml +++ b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra124-emc.yaml @@ -347,6 +347,7 @@ examples: interrupts = ; #iommu-cells = <1>; + #reset-cells = <1>; }; external-memory-controller@7001b000 { @@ -363,20 +364,23 @@ examples: timing-0 { clock-frequency = <12750000>; - nvidia,emc-zcal-cnt-long = <0x00000042>; - nvidia,emc-auto-cal-interval = <0x001fffff>; - nvidia,emc-ctt-term-ctrl = <0x00000802>; - nvidia,emc-cfg = <0x73240000>; - nvidia,emc-cfg-2 = <0x000008c5>; - nvidia,emc-sel-dpd-ctrl = <0x00040128>; - nvidia,emc-bgbias-ctl0 = <0x00000008>; nvidia,emc-auto-cal-config = <0xa1430000>; nvidia,emc-auto-cal-config2 = <0x00000000>; nvidia,emc-auto-cal-config3 = <0x00000000>; - nvidia,emc-mode-reset = <0x80001221>; + nvidia,emc-auto-cal-interval = <0x001fffff>; + nvidia,emc-bgbias-ctl0 = <0x00000008>; + nvidia,emc-cfg = <0x73240000>; + nvidia,emc-cfg-2 = <0x000008c5>; + nvidia,emc-ctt-term-ctrl = <0x00000802>; nvidia,emc-mode-1 = <0x80100003>; nvidia,emc-mode-2 = <0x80200008>; nvidia,emc-mode-4 = <0x00000000>; + nvidia,emc-mode-reset = <0x80001221>; + nvidia,emc-mrs-wait-cnt = <0x000e000e>; + nvidia,emc-sel-dpd-ctrl = <0x00040128>; + nvidia,emc-xm2dqspadctrl2 = <0x0130b118>; + nvidia,emc-zcal-cnt-long = <0x00000042>; + nvidia,emc-zcal-interval = <0x00000000>; nvidia,emc-configuration = < 0x00000000 /* EMC_RC */ diff --git a/Documentation/devicetree/bindings/memory-controllers/ti/emif.txt b/Documentation/devicetree/bindings/memory-controllers/ti/emif.txt index 44d71469c9148001df7ed926d54096f09c109e9f..63f674ffeb4f432699f2a5c60c9dbbacf5c166ab 100644 --- a/Documentation/devicetree/bindings/memory-controllers/ti/emif.txt +++ b/Documentation/devicetree/bindings/memory-controllers/ti/emif.txt @@ -32,7 +32,7 @@ Required only for "ti,emif-am3352" and "ti,emif-am4372": - sram : Phandles for generic sram driver nodes, first should be type 'protect-exec' for the driver to use to copy and run PM functions, second should be regular pool to be used for - data region for code. See Documentation/devicetree/bindings/sram/sram.txt + data region for code. See Documentation/devicetree/bindings/sram/sram.yaml for more details. Optional properties: diff --git a/Documentation/devicetree/bindings/mfd/max77650.yaml b/Documentation/devicetree/bindings/mfd/max77650.yaml index 4a70f875a6eb9ff6163d1cbcc9ba7898eed0891d..4803857893942a420baaf525ed2e687a2dacc6ca 100644 --- a/Documentation/devicetree/bindings/mfd/max77650.yaml +++ b/Documentation/devicetree/bindings/mfd/max77650.yaml @@ -97,14 +97,14 @@ examples: regulators { compatible = "maxim,max77650-regulator"; - max77650_ldo: regulator@0 { + max77650_ldo: regulator-ldo { regulator-compatible = "ldo"; regulator-name = "max77650-ldo"; regulator-min-microvolt = <1350000>; regulator-max-microvolt = <2937500>; }; - max77650_sbb0: regulator@1 { + max77650_sbb0: regulator-sbb0 { regulator-compatible = "sbb0"; regulator-name = "max77650-sbb0"; regulator-min-microvolt = <800000>; diff --git a/Documentation/devicetree/bindings/mfd/tps65910.txt b/Documentation/devicetree/bindings/mfd/tps65910.txt index 4f62143afd240d7bd63010639f5635e6633cadc4..a5ced46bbde97e1d59080924fd0bc1aa43f102d7 100644 --- a/Documentation/devicetree/bindings/mfd/tps65910.txt +++ b/Documentation/devicetree/bindings/mfd/tps65910.txt @@ -26,8 +26,8 @@ Required properties: ldo6, ldo7, ldo8 - xxx-supply: Input voltage supply regulator. - These entries are require if regulators are enabled for a device. Missing of these - properties can cause the regulator registration fails. + These entries are required if regulators are enabled for a device. Missing these + properties can cause the regulator registration to fail. If some of input supply is powered through battery or always-on supply then also it is require to have these parameters with proper node handle of always on power supply. diff --git a/Documentation/devicetree/bindings/mfd/twl-familly.txt b/Documentation/devicetree/bindings/mfd/twl-family.txt similarity index 100% rename from Documentation/devicetree/bindings/mfd/twl-familly.txt rename to Documentation/devicetree/bindings/mfd/twl-family.txt diff --git a/Documentation/devicetree/bindings/mfd/zii,rave-sp.txt b/Documentation/devicetree/bindings/mfd/zii,rave-sp.txt index 088eff9ddb786cdbdf0a26ea3d9fed27f048b021..e0f901edc063563c28a26e70c3de1f3aebcd3847 100644 --- a/Documentation/devicetree/bindings/mfd/zii,rave-sp.txt +++ b/Documentation/devicetree/bindings/mfd/zii,rave-sp.txt @@ -20,7 +20,7 @@ RAVE SP consists of the following sub-devices: Device Description ------ ----------- rave-sp-wdt : Watchdog -rave-sp-nvmem : Interface to onborad EEPROM +rave-sp-nvmem : Interface to onboard EEPROM rave-sp-backlight : Display backlight rave-sp-hwmon : Interface to onboard hardware sensors rave-sp-leds : Interface to onboard LEDs diff --git a/Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt b/Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt index bb7e896cb644eb97831f234e19d19b6a82b6310e..9134e9bcca56690076f6d7eba1445cee486618ec 100644 --- a/Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt +++ b/Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt @@ -26,7 +26,7 @@ For generic IOMMU bindings, see Documentation/devicetree/bindings/iommu/iommu.txt. For arm-smmu binding, see: -Documentation/devicetree/bindings/iommu/arm,smmu.txt. +Documentation/devicetree/bindings/iommu/arm,smmu.yaml. Required properties: diff --git a/Documentation/devicetree/bindings/mmc/mmc-controller.yaml b/Documentation/devicetree/bindings/mmc/mmc-controller.yaml index 3c0df4016a127c393e1dda89676ab942b53aff54..8fded83c519adc57f7de2da1a48ec4be043a265f 100644 --- a/Documentation/devicetree/bindings/mmc/mmc-controller.yaml +++ b/Documentation/devicetree/bindings/mmc/mmc-controller.yaml @@ -370,6 +370,7 @@ examples: mmc3: mmc@1c12000 { #address-cells = <1>; #size-cells = <0>; + reg = <0x1c12000 0x200>; pinctrl-names = "default"; pinctrl-0 = <&mmc3_pins_a>; vmmc-supply = <®_vmmc3>; diff --git a/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt b/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt index 19f5508a75696b722624c550700ee74f72b2362f..4a9145ef15d6b4a4485649eb85757445aa0bffb6 100644 --- a/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt +++ b/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt @@ -124,7 +124,7 @@ not every application needs SDIO irq, e.g. MMC cards. pinctrl-1 = <&mmc1_idle>; pinctrl-2 = <&mmc1_sleep>; ... - interrupts-extended = <&intc 64 &gpio2 28 GPIO_ACTIVE_LOW>; + interrupts-extended = <&intc 64 &gpio2 28 IRQ_TYPE_LEVEL_LOW>; }; mmc1_idle : pinmux_cirq_pin { diff --git a/Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt b/Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt index f3893c4d3c6a96952deb602897c750c452e29ebd..d2eada5044b2410fe36bf231b8abfdebb56f13e2 100644 --- a/Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt +++ b/Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt @@ -27,7 +27,7 @@ Required properties of NAND chips: - reg: shall contain the native Chip Select ids from 0 to max supported by the cadence nand flash controller -See Documentation/devicetree/bindings/mtd/nand.txt for more details on +See Documentation/devicetree/bindings/mtd/nand-controller.yaml for more details on generic bindings. Example: diff --git a/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt b/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt index 48a7f916c5e4e21e9c1b154b7cc7697ad0e5b608..88b57b0ca1f49ecea05a528d6be881e700c0cd2c 100644 --- a/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt +++ b/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt @@ -45,7 +45,7 @@ Optional properties: switch queue - resets: a single phandle and reset identifier pair. See - Documentation/devicetree/binding/reset/reset.txt for details. + Documentation/devicetree/bindings/reset/reset.txt for details. - reset-names: If the "reset" property is specified, this property should have the value "switch" to denote the switch reset line. diff --git a/Documentation/devicetree/bindings/net/fsl-fman.txt b/Documentation/devicetree/bindings/net/fsl-fman.txt index 250f8d8cdce4bc17d0acc1bdb3ee2bb7ecf3fb34..c00fb0d22c7b2355f47482d1fa93a2c118bacee3 100644 --- a/Documentation/devicetree/bindings/net/fsl-fman.txt +++ b/Documentation/devicetree/bindings/net/fsl-fman.txt @@ -110,6 +110,13 @@ PROPERTIES Usage: required Definition: See soc/fsl/qman.txt and soc/fsl/bman.txt +- fsl,erratum-a050385 + Usage: optional + Value type: boolean + Definition: A boolean property. Indicates the presence of the + erratum A050385 which indicates that DMA transactions that are + split can result in a FMan lock. + ============================================================================= FMan MURAM Node diff --git a/Documentation/devicetree/bindings/net/mdio.yaml b/Documentation/devicetree/bindings/net/mdio.yaml index 5d08d2ffd4ebcc0af0d542fa2d00700f5382b555..50c3397a82bc4cea8efc364827c4fefb0e0838f8 100644 --- a/Documentation/devicetree/bindings/net/mdio.yaml +++ b/Documentation/devicetree/bindings/net/mdio.yaml @@ -56,7 +56,6 @@ patternProperties: examples: - | davinci_mdio: mdio@5c030000 { - compatible = "ti,davinci_mdio"; reg = <0x5c030000 0x1000>; #address-cells = <1>; #size-cells = <0>; diff --git a/Documentation/devicetree/bindings/nvmem/nvmem.yaml b/Documentation/devicetree/bindings/nvmem/nvmem.yaml index b43c6c65294edd6f51c336b2555d2d938bb80160..65980224d550e773db3baa8cb960ddb4325f11f7 100644 --- a/Documentation/devicetree/bindings/nvmem/nvmem.yaml +++ b/Documentation/devicetree/bindings/nvmem/nvmem.yaml @@ -76,6 +76,8 @@ examples: qfprom: eeprom@700000 { #address-cells = <1>; #size-cells = <1>; + reg = <0x00700000 0x100000>; + wp-gpios = <&gpio1 3 GPIO_ACTIVE_HIGH>; /* ... */ diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun4i-a10-usb-phy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun4i-a10-usb-phy.yaml index 020ef9e4c41194e0a7d83cc4fc4bb0f546c12643..94ac23687b7eba99d846fdadc57acdd6c57cac6d 100644 --- a/Documentation/devicetree/bindings/phy/allwinner,sun4i-a10-usb-phy.yaml +++ b/Documentation/devicetree/bindings/phy/allwinner,sun4i-a10-usb-phy.yaml @@ -86,7 +86,7 @@ examples: #include #include - usbphy: phy@01c13400 { + usbphy: phy@1c13400 { #phy-cells = <1>; compatible = "allwinner,sun4i-a10-usb-phy"; reg = <0x01c13400 0x10>, <0x01c14800 0x4>, <0x01c1c800 0x4>; diff --git a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml index bb690e20c36800d16a722ba633c5e08d404ac543..135c7dfbc1800f378ad564e3f7e0d7c9744fe86e 100644 --- a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml +++ b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml @@ -17,7 +17,7 @@ description: |+ "aspeed,ast2400-scu", "syscon", "simple-mfd" Refer to the the bindings described in - Documentation/devicetree/bindings/mfd/syscon.txt + Documentation/devicetree/bindings/mfd/syscon.yaml properties: compatible: diff --git a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml index f7f5d57f2c9adc70d99056779f9070e8f7b2058b..824f7fd1d51bf735bd496c4736dde06617f38b34 100644 --- a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml +++ b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml @@ -18,7 +18,7 @@ description: |+ "aspeed,g5-scu", "syscon", "simple-mfd" Refer to the the bindings described in - Documentation/devicetree/bindings/mfd/syscon.txt + Documentation/devicetree/bindings/mfd/syscon.yaml properties: compatible: diff --git a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml index 3749fa233e87f995dc639e950b1b4dfd01713564..ac8d1c30a8ed75fc8e85b67a934b037c225ba8d6 100644 --- a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml +++ b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml @@ -17,7 +17,7 @@ description: |+ "aspeed,ast2600-scu", "syscon", "simple-mfd" Refer to the the bindings described in - Documentation/devicetree/bindings/mfd/syscon.txt + Documentation/devicetree/bindings/mfd/syscon.yaml properties: compatible: diff --git a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml index 754ea7ab040ab03f506ad96659031b552b925a4f..ef4de32cb17cca1ec9214a109a0cb9223ba67e06 100644 --- a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml +++ b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml @@ -248,7 +248,7 @@ examples: }; //Example 3 pin groups - pinctrl@60020000 { + pinctrl { usart1_pins_a: usart1-0 { pins1 { pinmux = ; diff --git a/Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml b/Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml index aab70e8b681e489bb5213747b607bc77ef3a30a6..d3098c924b25e308d1eeb7341dbaaf16340b4ae9 100644 --- a/Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml +++ b/Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml @@ -18,7 +18,7 @@ description: |+ "amlogic,meson-gx-hhi-sysctrl", "simple-mfd", "syscon" Refer to the the bindings described in - Documentation/devicetree/bindings/mfd/syscon.txt + Documentation/devicetree/bindings/mfd/syscon.yaml properties: compatible: diff --git a/Documentation/devicetree/bindings/power/domain-idle-state.txt b/Documentation/devicetree/bindings/power/domain-idle-state.txt deleted file mode 100644 index eefc7ed22ca269129b142a7bea78e568a73e9bdc..0000000000000000000000000000000000000000 --- a/Documentation/devicetree/bindings/power/domain-idle-state.txt +++ /dev/null @@ -1,33 +0,0 @@ -PM Domain Idle State Node: - -A domain idle state node represents the state parameters that will be used to -select the state when there are no active components in the domain. - -The state node has the following parameters - - -- compatible: - Usage: Required - Value type: - Definition: Must be "domain-idle-state". - -- entry-latency-us - Usage: Required - Value type: - Definition: u32 value representing worst case latency in - microseconds required to enter the idle state. - The exit-latency-us duration may be guaranteed - only after entry-latency-us has passed. - -- exit-latency-us - Usage: Required - Value type: - Definition: u32 value representing worst case latency - in microseconds required to exit the idle state. - -- min-residency-us - Usage: Required - Value type: - Definition: u32 value representing minimum residency duration - in microseconds after which the idle state will yield - power benefits after overcoming the overhead in entering -i the idle state. diff --git a/Documentation/devicetree/bindings/power/domain-idle-state.yaml b/Documentation/devicetree/bindings/power/domain-idle-state.yaml new file mode 100644 index 0000000000000000000000000000000000000000..dfba1af9abe55b3d963bdcc8fd786f634715f1bd --- /dev/null +++ b/Documentation/devicetree/bindings/power/domain-idle-state.yaml @@ -0,0 +1,64 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/power/domain-idle-state.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: PM Domain Idle States binding description + +maintainers: + - Ulf Hansson + +description: + A domain idle state node represents the state parameters that will be used to + select the state when there are no active components in the PM domain. + +properties: + $nodename: + const: domain-idle-states + +patternProperties: + "^(cpu|cluster|domain)-": + type: object + description: + Each state node represents a domain idle state description. + + properties: + compatible: + const: domain-idle-state + + entry-latency-us: + description: + The worst case latency in microseconds required to enter the idle + state. Note that, the exit-latency-us duration may be guaranteed only + after the entry-latency-us has passed. + + exit-latency-us: + description: + The worst case latency in microseconds required to exit the idle + state. + + min-residency-us: + description: + The minimum residency duration in microseconds after which the idle + state will yield power benefits, after overcoming the overhead while + entering the idle state. + + required: + - compatible + - entry-latency-us + - exit-latency-us + - min-residency-us + +examples: + - | + + domain-idle-states { + domain_retention: domain-retention { + compatible = "domain-idle-state"; + entry-latency-us = <20>; + exit-latency-us = <40>; + min-residency-us = <80>; + }; + }; +... diff --git a/Documentation/devicetree/bindings/power/power-domain.yaml b/Documentation/devicetree/bindings/power/power-domain.yaml index 455b573293aed46fadd0a3499eccdc9271e35630..6047aacd77667b16e1bba2a68f5d0ceb1236de08 100644 --- a/Documentation/devicetree/bindings/power/power-domain.yaml +++ b/Documentation/devicetree/bindings/power/power-domain.yaml @@ -25,22 +25,20 @@ description: |+ properties: $nodename: - pattern: "^(power-controller|power-domain)(@.*)?$" + pattern: "^(power-controller|power-domain)([@-].*)?$" domain-idle-states: $ref: /schemas/types.yaml#/definitions/phandle-array - description: - A phandle of an idle-state that shall be soaked into a generic domain - power state. The idle state definitions are compatible with - domain-idle-state specified in - Documentation/devicetree/bindings/power/domain-idle-state.txt - phandles that are not compatible with domain-idle-state will be ignored. - The domain-idle-state property reflects the idle state of this PM domain - and not the idle states of the devices or sub-domains in the PM domain. - Devices and sub-domains have their own idle-states independent - of the parent domain's idle states. In the absence of this property, - the domain would be considered as capable of being powered-on - or powered-off. + description: | + Phandles of idle states that defines the available states for the + power-domain provider. The idle state definitions are compatible with the + domain-idle-state bindings, specified in ./domain-idle-state.yaml. + + Note that, the domain-idle-state property reflects the idle states of this + PM domain and not the idle states of the devices or sub-domains in the PM + domain. Devices and sub-domains have their own idle states independent of + the parent domain's idle states. In the absence of this property, the + domain would be considered as capable of being powered-on or powered-off. operating-points-v2: $ref: /schemas/types.yaml#/definitions/phandle-array diff --git a/Documentation/devicetree/bindings/power/power_domain.txt b/Documentation/devicetree/bindings/power/power_domain.txt index 5b09b2deb4833069273823d4f0e4ccec81f2b72e..08497ef26c7aabf49787ee36f434ef46bf00e6cd 100644 --- a/Documentation/devicetree/bindings/power/power_domain.txt +++ b/Documentation/devicetree/bindings/power/power_domain.txt @@ -109,4 +109,4 @@ Example: required-opps = <&domain1_opp_1>; }; -[1]. Documentation/devicetree/bindings/power/domain-idle-state.txt +[1]. Documentation/devicetree/bindings/power/domain-idle-state.yaml diff --git a/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt b/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt index f5cdac8b284710055ef5ccf57ffec590e18baa8f..8b005192f6e89b9ba8847ffcf72a3851bf630444 100644 --- a/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt +++ b/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt @@ -161,7 +161,7 @@ The regulator node houses sub-nodes for each regulator within the device. Each sub-node is identified using the node's name, with valid values listed for each of the PMICs below. -pm8005: +pm8004: s2, s5 pm8005: diff --git a/Documentation/devicetree/bindings/regulator/regulator.yaml b/Documentation/devicetree/bindings/regulator/regulator.yaml index 92ff2e8ad572ba47517e6d38a2656aa4a7e50000..91a39a33000b17fa0a15d1484c5e21b1932d279a 100644 --- a/Documentation/devicetree/bindings/regulator/regulator.yaml +++ b/Documentation/devicetree/bindings/regulator/regulator.yaml @@ -191,7 +191,7 @@ patternProperties: examples: - | - xyzreg: regulator@0 { + xyzreg: regulator { regulator-min-microvolt = <1000000>; regulator-max-microvolt = <2500000>; regulator-always-on; diff --git a/Documentation/devicetree/bindings/reset/intel,rcu-gw.yaml b/Documentation/devicetree/bindings/reset/intel,rcu-gw.yaml index 246dea8a2ec95874f7d05c98806d2ccdc3c47e48..8ac4372826591e8a9d03664b839824b117382411 100644 --- a/Documentation/devicetree/bindings/reset/intel,rcu-gw.yaml +++ b/Documentation/devicetree/bindings/reset/intel,rcu-gw.yaml @@ -23,7 +23,11 @@ properties: description: Global reset register offset and bit offset. allOf: - $ref: /schemas/types.yaml#/definitions/uint32-array - - maxItems: 2 + items: + - description: Register offset + - description: Register bit offset + minimum: 0 + maximum: 31 "#reset-cells": minimum: 2 diff --git a/Documentation/devicetree/bindings/reset/st,stm32mp1-rcc.txt b/Documentation/devicetree/bindings/reset/st,stm32mp1-rcc.txt index b4edaf7c7ff3d77f11ecd4bb8645ab735a5bab6a..2880d5dda95e8326c561fb6347b3336d5d10c4b5 100644 --- a/Documentation/devicetree/bindings/reset/st,stm32mp1-rcc.txt +++ b/Documentation/devicetree/bindings/reset/st,stm32mp1-rcc.txt @@ -3,4 +3,4 @@ STMicroelectronics STM32MP1 Peripheral Reset Controller The RCC IP is both a reset and a clock controller. -Please see Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.txt +Please see Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.yaml diff --git a/Documentation/devicetree/bindings/sound/st,stm32-sai.txt b/Documentation/devicetree/bindings/sound/st,stm32-sai.txt index 944743dd9212550135242a6ad374edf0fbcff501..c42b91e525fa2d9105921fd0f93aa663ace9d39f 100644 --- a/Documentation/devicetree/bindings/sound/st,stm32-sai.txt +++ b/Documentation/devicetree/bindings/sound/st,stm32-sai.txt @@ -36,7 +36,7 @@ SAI subnodes required properties: - clock-names: Must contain "sai_ck". Must also contain "MCLK", if SAI shares a master clock, with a SAI set as MCLK clock provider. - - dmas: see Documentation/devicetree/bindings/dma/stm32-dma.txt + - dmas: see Documentation/devicetree/bindings/dma/st,stm32-dma.yaml - dma-names: identifier string for each DMA request line "tx": if sai sub-block is configured as playback DAI "rx": if sai sub-block is configured as capture DAI diff --git a/Documentation/devicetree/bindings/sound/st,stm32-spdifrx.txt b/Documentation/devicetree/bindings/sound/st,stm32-spdifrx.txt index 33826f2459fa80215753f918f732056b0fecf322..ca9101777c44b19a6d85e075e93691ab6e06b7ce 100644 --- a/Documentation/devicetree/bindings/sound/st,stm32-spdifrx.txt +++ b/Documentation/devicetree/bindings/sound/st,stm32-spdifrx.txt @@ -10,7 +10,7 @@ Required properties: - clock-names: must contain "kclk" - interrupts: cpu DAI interrupt line - dmas: DMA specifiers for audio data DMA and iec control flow DMA - See STM32 DMA bindings, Documentation/devicetree/bindings/dma/stm32-dma.txt + See STM32 DMA bindings, Documentation/devicetree/bindings/dma/st,stm32-dma.yaml - dma-names: two dmas have to be defined, "rx" and "rx-ctrl" Optional properties: diff --git a/Documentation/devicetree/bindings/spi/st,stm32-spi.yaml b/Documentation/devicetree/bindings/spi/st,stm32-spi.yaml index f0d979664f0763429d0feefe1ccbb7f84ff6bbb5..e49ecbf715ba0814948e240d92e5892bd9a891ad 100644 --- a/Documentation/devicetree/bindings/spi/st,stm32-spi.yaml +++ b/Documentation/devicetree/bindings/spi/st,stm32-spi.yaml @@ -49,7 +49,7 @@ properties: dmas: description: | DMA specifiers for tx and rx dma. DMA fifo mode must be used. See - the STM32 DMA bindings Documentation/devicetree/bindings/dma/stm32-dma.txt. + the STM32 DMA bindings Documentation/devicetree/bindings/dma/st,stm32-dma.yaml. items: - description: rx DMA channel - description: tx DMA channel diff --git a/Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml b/Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml index 80bac7a182d571c8d0d448a1a0dda1ea51571955..4b550943658879c66d3f087e5f3b41137d8683a9 100644 --- a/Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml +++ b/Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml @@ -125,7 +125,7 @@ examples: #size-cells = <1>; ranges; - sram_a: sram@00000000 { + sram_a: sram@0 { compatible = "mmio-sram"; reg = <0x00000000 0xc000>; #address-cells = <1>; diff --git a/Documentation/devicetree/bindings/thermal/brcm,avs-ro-thermal.yaml b/Documentation/devicetree/bindings/thermal/brcm,avs-ro-thermal.yaml index d9fdf4809a4971efd0dd550da7cdf2cf5b984ed7..f3e68ed03abf89d290fba1016363959140bf56fd 100644 --- a/Documentation/devicetree/bindings/thermal/brcm,avs-ro-thermal.yaml +++ b/Documentation/devicetree/bindings/thermal/brcm,avs-ro-thermal.yaml @@ -17,7 +17,7 @@ description: |+ "brcm,bcm2711-avs-monitor", "syscon", "simple-mfd" Refer to the the bindings described in - Documentation/devicetree/bindings/mfd/syscon.txt + Documentation/devicetree/bindings/mfd/syscon.yaml properties: compatible: diff --git a/Documentation/devicetree/bindings/timer/allwinner,sun4i-a10-timer.yaml b/Documentation/devicetree/bindings/timer/allwinner,sun4i-a10-timer.yaml index 23e989e0976630382d70ab5ddaeeb3b4c3542b61..d918cee100aced4e31c4a0dddf5ec176ade12460 100644 --- a/Documentation/devicetree/bindings/timer/allwinner,sun4i-a10-timer.yaml +++ b/Documentation/devicetree/bindings/timer/allwinner,sun4i-a10-timer.yaml @@ -87,7 +87,7 @@ additionalProperties: false examples: - | - timer { + timer@1c20c00 { compatible = "allwinner,sun4i-a10-timer"; reg = <0x01c20c00 0x400>; interrupts = <22>, diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml index a2da166df1bc7ae51640422d1640fc1ca1be1950..70fbc45b2a3ea21aa5e0476bab5b691a65d9b698 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.yaml +++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml @@ -205,6 +205,8 @@ patternProperties: description: Colorful GRP, Shenzhen Xueyushi Technology Ltd. "^compulab,.*": description: CompuLab Ltd. + "^coreriver,.*": + description: CORERIVER Semiconductor Co.,Ltd. "^corpro,.*": description: Chengdu Corpro Technology Co., Ltd. "^cortina,.*": diff --git a/Documentation/driver-api/dmaengine/client.rst b/Documentation/driver-api/dmaengine/client.rst index e5953e7e4bf429a20afd44234efa48df8e2c1003..2104830a99aeff529f54df726db45451075ea86c 100644 --- a/Documentation/driver-api/dmaengine/client.rst +++ b/Documentation/driver-api/dmaengine/client.rst @@ -151,8 +151,8 @@ The details of these operations are: Note that callbacks will always be invoked from the DMA engines tasklet, never from interrupt context. -Optional: per descriptor metadata ---------------------------------- + **Optional: per descriptor metadata** + DMAengine provides two ways for metadata support. DESC_METADATA_CLIENT @@ -199,12 +199,15 @@ Optional: per descriptor metadata DESC_METADATA_CLIENT - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM: + 1. prepare the descriptor (dmaengine_prep_*) construct the metadata in the client's buffer 2. use dmaengine_desc_attach_metadata() to attach the buffer to the descriptor 3. submit the transfer + - DMA_DEV_TO_MEM: + 1. prepare the descriptor (dmaengine_prep_*) 2. use dmaengine_desc_attach_metadata() to attach the buffer to the descriptor @@ -215,6 +218,7 @@ Optional: per descriptor metadata DESC_METADATA_ENGINE - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM: + 1. prepare the descriptor (dmaengine_prep_*) 2. use dmaengine_desc_get_metadata_ptr() to get the pointer to the engine's metadata area @@ -222,7 +226,9 @@ Optional: per descriptor metadata 4. use dmaengine_desc_set_metadata_len() to tell the DMA engine the amount of data the client has placed into the metadata buffer 5. submit the transfer + - DMA_DEV_TO_MEM: + 1. prepare the descriptor (dmaengine_prep_*) 2. submit the transfer 3. on transfer completion, use dmaengine_desc_get_metadata_ptr() to get @@ -278,8 +284,8 @@ Optional: per descriptor metadata void dma_async_issue_pending(struct dma_chan *chan); -Further APIs: -------------- +Further APIs +------------ 1. Terminate APIs diff --git a/Documentation/driver-api/dmaengine/provider.rst b/Documentation/driver-api/dmaengine/provider.rst index 790a15089f1f3d8f47917283a632b5b866f69848..56e5833e8a07bc0ec6d9c34acec077ab1959740e 100644 --- a/Documentation/driver-api/dmaengine/provider.rst +++ b/Documentation/driver-api/dmaengine/provider.rst @@ -266,11 +266,15 @@ to use. attached (via the dmaengine_desc_attach_metadata() helper to the descriptor. From the DMA driver the following is expected for this mode: + - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM + The data from the provided metadata buffer should be prepared for the DMA controller to be sent alongside of the payload data. Either by copying to a hardware descriptor, or highly coupled packet. + - DMA_DEV_TO_MEM + On transfer completion the DMA driver must copy the metadata to the client provided metadata buffer before notifying the client about the completion. After the transfer completion, DMA drivers must not touch the metadata @@ -284,10 +288,14 @@ to use. and dmaengine_desc_set_metadata_len() is provided as helper functions. From the DMA driver the following is expected for this mode: - - get_metadata_ptr + + - get_metadata_ptr() + Should return a pointer for the metadata buffer, the maximum size of the metadata buffer and the currently used / valid (if any) bytes in the buffer. - - set_metadata_len + + - set_metadata_len() + It is called by the clients after it have placed the metadata to the buffer to let the DMA driver know the number of valid bytes provided. diff --git a/Documentation/filesystems/debugfs.txt b/Documentation/filesystems/debugfs.txt index dc497b96fa4ff54dc6b84cbf2951391a369473c0..55336a47a110c679dbcb26ec70f95e6be74a7d56 100644 --- a/Documentation/filesystems/debugfs.txt +++ b/Documentation/filesystems/debugfs.txt @@ -164,9 +164,9 @@ file. void __iomem *base; }; - struct dentry *debugfs_create_regset32(const char *name, umode_t mode, - struct dentry *parent, - struct debugfs_regset32 *regset); + debugfs_create_regset32(const char *name, umode_t mode, + struct dentry *parent, + struct debugfs_regset32 *regset); void debugfs_print_regs32(struct seq_file *s, struct debugfs_reg32 *regs, int nregs, void __iomem *base, char *prefix); diff --git a/Documentation/filesystems/porting.rst b/Documentation/filesystems/porting.rst index f18506083ced831cf0ceef4870d964144152cd99..26c093969573639fc815803aa275d609057c7bfc 100644 --- a/Documentation/filesystems/porting.rst +++ b/Documentation/filesystems/porting.rst @@ -850,3 +850,11 @@ business doing so. d_alloc_pseudo() is internal-only; uses outside of alloc_file_pseudo() are very suspect (and won't work in modules). Such uses are very likely to be misspelled d_alloc_anon(). + +--- + +**mandatory** + +[should've been added in 2016] stale comment in finish_open() nonwithstanding, +failure exits in ->atomic_open() instances should *NOT* fput() the file, +no matter what. Everything is handled by the caller. diff --git a/Documentation/filesystems/zonefs.txt b/Documentation/filesystems/zonefs.txt index 935bf22031ca1ce8ea2df2cfc8cd6cb6a33d672b..78813c34ec475b3bb8b7ef7a07e6022e9146ccde 100644 --- a/Documentation/filesystems/zonefs.txt +++ b/Documentation/filesystems/zonefs.txt @@ -134,7 +134,7 @@ Sequential zone files can only be written sequentially, starting from the file end, that is, write operations can only be append writes. Zonefs makes no attempt at accepting random writes and will fail any write request that has a start offset not corresponding to the end of the file, or to the end of the last -write issued and still in-flight (for asynchrnous I/O operations). +write issued and still in-flight (for asynchronous I/O operations). Since dirty page writeback by the page cache does not guarantee a sequential write pattern, zonefs prevents buffered writes and writeable shared mappings @@ -142,7 +142,7 @@ on sequential files. Only direct I/O writes are accepted for these files. zonefs relies on the sequential delivery of write I/O requests to the device implemented by the block layer elevator. An elevator implementing the sequential write feature for zoned block device (ELEVATOR_F_ZBD_SEQ_WRITE elevator feature) -must be used. This type of elevator (e.g. mq-deadline) is the set by default +must be used. This type of elevator (e.g. mq-deadline) is set by default for zoned block devices on device initialization. There are no restrictions on the type of I/O used for read operations in @@ -196,7 +196,7 @@ additional conditions that result in I/O errors. may still happen in the case of a partial failure of a very large direct I/O operation split into multiple BIOs/requests or asynchronous I/O operations. If one of the write request within the set of sequential write requests - issued to the device fails, all write requests after queued after it will + issued to the device fails, all write requests queued after it will become unaligned and fail. * Delayed write errors: similarly to regular block devices, if the device side @@ -207,7 +207,7 @@ additional conditions that result in I/O errors. causing all data to be dropped after the sector that caused the error. All I/O errors detected by zonefs are notified to the user with an error code -return for the system call that trigered or detected the error. The recovery +return for the system call that triggered or detected the error. The recovery actions taken by zonefs in response to I/O errors depend on the I/O type (read vs write) and on the reason for the error (bad sector, unaligned writes or zone condition change). @@ -222,7 +222,7 @@ condition change). * A zone condition change to read-only or offline also always triggers zonefs I/O error recovery. -Zonefs minimal I/O error recovery may change a file size and a file access +Zonefs minimal I/O error recovery may change a file size and file access permissions. * File size changes: @@ -237,7 +237,7 @@ permissions. A file size may also be reduced to reflect a delayed write error detected on fsync(): in this case, the amount of data effectively written in the zone may be less than originally indicated by the file inode size. After such I/O - error, zonefs always fixes a file inode size to reflect the amount of data + error, zonefs always fixes the file inode size to reflect the amount of data persistently stored in the file zone. * Access permission changes: @@ -258,11 +258,11 @@ conditions. | option | condition | size read write read write | +--------------+-----------+-----------------------------------------+ | | good | fixed yes no yes yes | - | remount-ro | read-only | fixed yes no yes no | + | remount-ro | read-only | as is yes no yes no | | (default) | offline | 0 no no no no | +--------------+-----------+-----------------------------------------+ | | good | fixed yes no yes yes | - | zone-ro | read-only | fixed yes no yes no | + | zone-ro | read-only | as is yes no yes no | | | offline | 0 no no no no | +--------------+-----------+-----------------------------------------+ | | good | 0 no no yes yes | @@ -270,7 +270,7 @@ conditions. | | offline | 0 no no no no | +--------------+-----------+-----------------------------------------+ | | good | fixed yes yes yes yes | - | repair | read-only | fixed yes no yes no | + | repair | read-only | as is yes no yes no | | | offline | 0 no no no no | +--------------+-----------+-----------------------------------------+ @@ -281,11 +281,11 @@ Further notes: permissions to read-only applies to all files. The file system is remounted read-only. * Access permission and file size changes due to the device transitioning zones - to the offline condition are permanent. Remounting or reformating the device + to the offline condition are permanent. Remounting or reformatting the device with mkfs.zonefs (mkzonefs) will not change back offline zone files to a good state. * File access permission changes to read-only due to the device transitioning - zones to the read-only condition are permanent. Remounting or reformating + zones to the read-only condition are permanent. Remounting or reformatting the device will not re-enable file write access. * File access permission changes implied by the remount-ro, zone-ro and zone-offline mount options are temporary for zones in a good condition. @@ -301,14 +301,22 @@ Mount options zonefs define the "errors=" mount option to allow the user to specify zonefs behavior in response to I/O errors, inode size inconsistencies or zone -condition chages. The defined behaviors are as follow: +condition changes. The defined behaviors are as follow: * remount-ro (default) * zone-ro * zone-offline * repair -The I/O error actions defined for each behavior is detailed in the previous -section. +The run-time I/O error actions defined for each behavior are detailed in the +previous section. Mount time I/O errors will cause the mount operation to fail. +The handling of read-only zones also differs between mount-time and run-time. +If a read-only zone is found at mount time, the zone is always treated in the +same manner as offline zones, that is, all accesses are disabled and the zone +file size set to 0. This is necessary as the write pointer of read-only zones +is defined as invalib by the ZBC and ZAC standards, making it impossible to +discover the amount of data that has been written to the zone. In the case of a +read-only zone discovered at run-time, as indicated in the previous section. +the size of the zone file is left unchanged from its last updated value. Zonefs User Space Tools ======================= diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst index e539c42a3e78da0f9aeba550d234fb17c7bc5219..f6d363b6756e9e5a6432954f7d94278b986eb85a 100644 --- a/Documentation/gpu/i915.rst +++ b/Documentation/gpu/i915.rst @@ -207,10 +207,10 @@ DPIO CSR firmware support for DMC ---------------------------- -.. kernel-doc:: drivers/gpu/drm/i915/intel_csr.c +.. kernel-doc:: drivers/gpu/drm/i915/display/intel_csr.c :doc: csr support for dmc -.. kernel-doc:: drivers/gpu/drm/i915/intel_csr.c +.. kernel-doc:: drivers/gpu/drm/i915/display/intel_csr.c :internal: Video BIOS Table (VBT) @@ -332,7 +332,7 @@ This process is dubbed relocation. GEM BO Management Implementation Details ---------------------------------------- -.. kernel-doc:: drivers/gpu/drm/i915/i915_vma.h +.. kernel-doc:: drivers/gpu/drm/i915/i915_vma_types.h :doc: Virtual Memory Address Buffer Object Eviction @@ -382,7 +382,7 @@ Logical Rings, Logical Ring Contexts and Execlists Global GTT views ---------------- -.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_gtt.c +.. kernel-doc:: drivers/gpu/drm/i915/i915_vma_types.h :doc: Global GTT views .. kernel-doc:: drivers/gpu/drm/i915/i915_gem_gtt.c diff --git a/Documentation/hwmon/adm1177.rst b/Documentation/hwmon/adm1177.rst index c81e0b4abd2875f53d5ec69e59c3b0098a94b2a3..471be1e98d6f6618989784e9508e2312fb66cb00 100644 --- a/Documentation/hwmon/adm1177.rst +++ b/Documentation/hwmon/adm1177.rst @@ -20,8 +20,7 @@ Usage Notes ----------- This driver does not auto-detect devices. You will have to instantiate the -devices explicitly. Please see Documentation/i2c/instantiating-devices for -details. +devices explicitly. Please see :doc:`/i2c/instantiating-devices` for details. Sysfs entries diff --git a/Documentation/hwmon/xdpe12284.rst b/Documentation/hwmon/xdpe12284.rst index 6b7ae98cc536f6382a947436f3ede87a3b112fdc..67d1f87808e57981a18c55c4877d6ab80659f5ce 100644 --- a/Documentation/hwmon/xdpe12284.rst +++ b/Documentation/hwmon/xdpe12284.rst @@ -24,6 +24,7 @@ This driver implements support for Infineon Multi-phase XDPE122 family dual loop voltage regulators. The family includes XDPE12284 and XDPE12254 devices. The devices from this family complaint with: + - Intel VR13 and VR13HC rev 1.3, IMVP8 rev 1.2 and IMPVP9 rev 1.3 DC-DC converter specification. - Intel SVID rev 1.9. protocol. diff --git a/Documentation/kbuild/kbuild.rst b/Documentation/kbuild/kbuild.rst index f1e5dce86af7cfe586cb2b751c8d4801cc810155..510f38d7e78a3480cb812761c9766415d2e84939 100644 --- a/Documentation/kbuild/kbuild.rst +++ b/Documentation/kbuild/kbuild.rst @@ -237,7 +237,7 @@ This is solely useful to speed up test compiles. KBUILD_EXTRA_SYMBOLS -------------------- For modules that use symbols from other modules. -See more details in modules.txt. +See more details in modules.rst. ALLSOURCE_ARCHS --------------- diff --git a/Documentation/kbuild/kconfig-macro-language.rst b/Documentation/kbuild/kconfig-macro-language.rst index 35b3263b7e4088618000bacbf8ced5f5277d69de..8b413ef9603dae16122706b62b340663814d8edf 100644 --- a/Documentation/kbuild/kconfig-macro-language.rst +++ b/Documentation/kbuild/kconfig-macro-language.rst @@ -44,7 +44,7 @@ intermediate:: def_bool y Then, Kconfig moves onto the evaluation stage to resolve inter-symbol -dependency as explained in kconfig-language.txt. +dependency as explained in kconfig-language.rst. Variables diff --git a/Documentation/kbuild/makefiles.rst b/Documentation/kbuild/makefiles.rst index 0e0eb2c8da7d541847ab2af34208f38c276898d5..04d5c01a2e998db16fa8ca7846a5d8e0034c41a0 100644 --- a/Documentation/kbuild/makefiles.rst +++ b/Documentation/kbuild/makefiles.rst @@ -765,7 +765,7 @@ is not sufficient this sometimes needs to be explicit. Example:: #arch/x86/boot/Makefile - subdir- := compressed/ + subdir- := compressed The above assignment instructs kbuild to descend down in the directory compressed/ when "make clean" is executed. @@ -924,7 +924,7 @@ When kbuild executes, the following steps are followed (roughly): $(KBUILD_AFLAGS_MODULE) is used to add arch-specific options that are used for assembler. - From commandline AFLAGS_MODULE shall be used (see kbuild.txt). + From commandline AFLAGS_MODULE shall be used (see kbuild.rst). KBUILD_CFLAGS_KERNEL $(CC) options specific for built-in @@ -937,7 +937,7 @@ When kbuild executes, the following steps are followed (roughly): $(KBUILD_CFLAGS_MODULE) is used to add arch-specific options that are used for $(CC). - From commandline CFLAGS_MODULE shall be used (see kbuild.txt). + From commandline CFLAGS_MODULE shall be used (see kbuild.rst). KBUILD_LDFLAGS_MODULE Options for $(LD) when linking modules @@ -945,7 +945,7 @@ When kbuild executes, the following steps are followed (roughly): $(KBUILD_LDFLAGS_MODULE) is used to add arch-specific options used when linking modules. This is often a linker script. - From commandline LDFLAGS_MODULE shall be used (see kbuild.txt). + From commandline LDFLAGS_MODULE shall be used (see kbuild.rst). KBUILD_LDS @@ -1379,9 +1379,6 @@ See subsequent chapter for the syntax of the Kbuild file. in arch/$(ARCH)/include/(uapi/)/asm, Kbuild will automatically generate a wrapper of the asm-generic one. - The convention is to list one subdir per line and - preferably in alphabetic order. - 8 Kbuild Variables ================== diff --git a/Documentation/kbuild/modules.rst b/Documentation/kbuild/modules.rst index 69fa48ee93d6ec5de3988e3a7450dda4652860c7..e0b45a257f2188e2cea5d7988480f5fb0819d7f8 100644 --- a/Documentation/kbuild/modules.rst +++ b/Documentation/kbuild/modules.rst @@ -470,9 +470,9 @@ build. The syntax of the Module.symvers file is:: - + - 0xe1cc2a05 usb_stor_suspend USB_STORAGE drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL + 0xe1cc2a05 usb_stor_suspend drivers/usb/storage/usb-storage EXPORT_SYMBOL_GPL USB_STORAGE The fields are separated by tabs and values may be empty (e.g. if no namespace is defined for an exported symbol). diff --git a/Documentation/networking/devlink/devlink-region.rst b/Documentation/networking/devlink/devlink-region.rst index 1a7683e7acb217d9782af60eb5bedd9b6e6631e5..8b46e8591fe0f7fbc9b43d1f5f3f9e0e25250728 100644 --- a/Documentation/networking/devlink/devlink-region.rst +++ b/Documentation/networking/devlink/devlink-region.rst @@ -40,9 +40,6 @@ example usage # Delete a snapshot using: $ devlink region del pci/0000:00:05.0/cr-space snapshot 1 - # Trigger (request) a snapshot be taken: - $ devlink region trigger pci/0000:00:05.0/cr-space - # Dump a snapshot: $ devlink region dump pci/0000:00:05.0/fw-health snapshot 1 0000000000000000 0014 95dc 0014 9514 0035 1670 0034 db30 diff --git a/Documentation/networking/net_failover.rst b/Documentation/networking/net_failover.rst index 06c97dcb57caee07743c55d7a500b2a42f1ff0a8..e143ab79a960d429a431993690a7db76d0e2afe9 100644 --- a/Documentation/networking/net_failover.rst +++ b/Documentation/networking/net_failover.rst @@ -8,9 +8,9 @@ Overview ======== The net_failover driver provides an automated failover mechanism via APIs -to create and destroy a failover master netdev and mananges a primary and +to create and destroy a failover master netdev and manages a primary and standby slave netdevs that get registered via the generic failover -infrastructrure. +infrastructure. The failover netdev acts a master device and controls 2 slave devices. The original paravirtual interface is registered as 'standby' slave netdev and @@ -29,7 +29,7 @@ virtio-net accelerated datapath: STANDBY mode ============================================= net_failover enables hypervisor controlled accelerated datapath to virtio-net -enabled VMs in a transparent manner with no/minimal guest userspace chanages. +enabled VMs in a transparent manner with no/minimal guest userspace changes. To support this, the hypervisor needs to enable VIRTIO_NET_F_STANDBY feature on the virtio-net interface and assign the same MAC address to both diff --git a/Documentation/networking/phy.rst b/Documentation/networking/phy.rst index 1e4735cc055351ff81bee0541dc221d974547bb1..256106054c8cb0b9d7fdc0a01f35100a388e6f08 100644 --- a/Documentation/networking/phy.rst +++ b/Documentation/networking/phy.rst @@ -487,8 +487,9 @@ phy_register_fixup_for_id():: The stubs set one of the two matching criteria, and set the other one to match anything. -When phy_register_fixup() or \*_for_uid()/\*_for_id() is called at module, -unregister fixup and free allocate memory are required. +When phy_register_fixup() or \*_for_uid()/\*_for_id() is called at module load +time, the module needs to unregister the fixup and free allocated memory when +it's unloaded. Call one of following function before unloading module:: diff --git a/Documentation/networking/rds.txt b/Documentation/networking/rds.txt index f2a0147c933d1b7be6f9b09de38de2e8994e6e2f..eec61694e894d2728f39c23eb5efe88558aa72c0 100644 --- a/Documentation/networking/rds.txt +++ b/Documentation/networking/rds.txt @@ -159,7 +159,7 @@ Socket Interface set SO_RDS_TRANSPORT on a socket for which the transport has been previously attached explicitly (by SO_RDS_TRANSPORT) or implicitly (via bind(2)) will return an error of EOPNOTSUPP. - An attempt to set SO_RDS_TRANSPPORT to RDS_TRANS_NONE will + An attempt to set SO_RDS_TRANSPORT to RDS_TRANS_NONE will always return EINVAL. RDMA for RDS diff --git a/Documentation/power/index.rst b/Documentation/power/index.rst index 002e42745263a6416de98bb93532786ab444b5cf..ced8a80074348ec744d7bedff3ce681a0162d94b 100644 --- a/Documentation/power/index.rst +++ b/Documentation/power/index.rst @@ -13,7 +13,6 @@ Power Management drivers-testing energy-model freezing-of-tasks - interface opp pci pm_qos_interface diff --git a/Documentation/process/embargoed-hardware-issues.rst b/Documentation/process/embargoed-hardware-issues.rst index 33edae6545994830942c317a1aa2f8ee8666f407..a19d084f9b2cdefd2fd90810bb9b900b64c0f68b 100644 --- a/Documentation/process/embargoed-hardware-issues.rst +++ b/Documentation/process/embargoed-hardware-issues.rst @@ -244,23 +244,23 @@ disclosure of a particular issue, unless requested by a response team or by an involved disclosed party. The current ambassadors list: ============= ======================================================== - ARM + ARM Grant Likely AMD Tom Lendacky IBM Intel Tony Luck Qualcomm Trilok Soni - Microsoft Sasha Levin + Microsoft James Morris VMware Xen Andrew Cooper - Canonical Tyler Hicks + Canonical John Johansen Debian Ben Hutchings Oracle Konrad Rzeszutek Wilk Red Hat Josh Poimboeuf SUSE Jiri Kosina - Amazon Peter Bowen + Amazon Google Kees Cook ============= ======================================================== diff --git a/Documentation/sphinx/parallel-wrapper.sh b/Documentation/sphinx/parallel-wrapper.sh index 7daf5133bdd3144df4f9ae7c624effabaa9e3c01..e54c44ce117d51835f7aeaba820ca48256681915 100644 --- a/Documentation/sphinx/parallel-wrapper.sh +++ b/Documentation/sphinx/parallel-wrapper.sh @@ -30,4 +30,4 @@ if [ -n "$parallel" ] ; then parallel="-j$parallel" fi -exec "$sphinx" "$parallel" "$@" +exec "$sphinx" $parallel "$@" diff --git a/Documentation/translations/zh_CN/process/embargoed-hardware-issues.rst b/Documentation/translations/zh_CN/process/embargoed-hardware-issues.rst index b93f1af6826131fe9f1996f8ea1e8fa35d785871..88273ebe7823d4512fdd59e1823876cbfba0a66c 100644 --- a/Documentation/translations/zh_CN/process/embargoed-hardware-issues.rst +++ b/Documentation/translations/zh_CN/process/embargoed-hardware-issues.rst @@ -183,7 +183,7 @@ CVEåˆ†é… VMware Xen Andrew Cooper - Canonical Tyler Hicks + Canonical John Johansen Debian Ben Hutchings Oracle Konrad Rzeszutek Wilk Red Hat Josh Poimboeuf diff --git a/Documentation/virt/kvm/amd-memory-encryption.rst b/Documentation/virt/kvm/amd-memory-encryption.rst index d18c97b4e14067a48566c6cadb24ebdb84a4362e..c3129b9ba5cb43a7b4b21e30b9c9dba044714856 100644 --- a/Documentation/virt/kvm/amd-memory-encryption.rst +++ b/Documentation/virt/kvm/amd-memory-encryption.rst @@ -53,6 +53,29 @@ key management interface to perform common hypervisor activities such as encrypting bootstrap code, snapshot, migrating and debugging the guest. For more information, see the SEV Key Management spec [api-spec]_ +The main ioctl to access SEV is KVM_MEM_ENCRYPT_OP. If the argument +to KVM_MEM_ENCRYPT_OP is NULL, the ioctl returns 0 if SEV is enabled +and ``ENOTTY` if it is disabled (on some older versions of Linux, +the ioctl runs normally even with a NULL argument, and therefore will +likely return ``EFAULT``). If non-NULL, the argument to KVM_MEM_ENCRYPT_OP +must be a struct kvm_sev_cmd:: + + struct kvm_sev_cmd { + __u32 id; + __u64 data; + __u32 error; + __u32 sev_fd; + }; + + +The ``id`` field contains the subcommand, and the ``data`` field points to +another struct containing arguments specific to command. The ``sev_fd`` +should point to a file descriptor that is opened on the ``/dev/sev`` +device, if needed (see individual commands). + +On output, ``error`` is zero on success, or an error code. Error codes +are defined in ```. + KVM implements the following commands to support common lifecycle events of SEV guests, such as launching, running, snapshotting, migrating and decommissioning. @@ -90,6 +113,8 @@ Returns: 0 on success, -negative on error On success, the 'handle' field contains a new handle and on error, a negative value. +KVM_SEV_LAUNCH_START requires the ``sev_fd`` field to be valid. + For more details, see SEV spec Section 6.2. 3. KVM_SEV_LAUNCH_UPDATE_DATA diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 97a72a53fa4b246829d64ebe209724e5d2aee2a9..ebd383fba9399d02b7acea47b1ccbbf333949dad 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -4611,35 +4611,38 @@ unpins the VPA pages and releases all the device pages that are used to track the secure pages by hypervisor. 4.122 KVM_S390_NORMAL_RESET +--------------------------- -Capability: KVM_CAP_S390_VCPU_RESETS -Architectures: s390 -Type: vcpu ioctl -Parameters: none -Returns: 0 +:Capability: KVM_CAP_S390_VCPU_RESETS +:Architectures: s390 +:Type: vcpu ioctl +:Parameters: none +:Returns: 0 This ioctl resets VCPU registers and control structures according to the cpu reset definition in the POP (Principles Of Operation). 4.123 KVM_S390_INITIAL_RESET +---------------------------- -Capability: none -Architectures: s390 -Type: vcpu ioctl -Parameters: none -Returns: 0 +:Capability: none +:Architectures: s390 +:Type: vcpu ioctl +:Parameters: none +:Returns: 0 This ioctl resets VCPU registers and control structures according to the initial cpu reset definition in the POP. However, the cpu is not put into ESA mode. This reset is a superset of the normal reset. 4.124 KVM_S390_CLEAR_RESET +-------------------------- -Capability: KVM_CAP_S390_VCPU_RESETS -Architectures: s390 -Type: vcpu ioctl -Parameters: none -Returns: 0 +:Capability: KVM_CAP_S390_VCPU_RESETS +:Architectures: s390 +:Type: vcpu ioctl +:Parameters: none +:Returns: 0 This ioctl resets VCPU registers and control structures according to the clear cpu reset definition in the POP. However, the cpu is not put diff --git a/Documentation/x86/index.rst b/Documentation/x86/index.rst index a8de2fbc1caad73319ee40b2c16a2ec83642ea7e..265d9e9a093b8d3810b2148b3f794dfa58c0b473 100644 --- a/Documentation/x86/index.rst +++ b/Documentation/x86/index.rst @@ -19,7 +19,6 @@ x86-specific Documentation tlb mtrr pat - intel_mpx intel-iommu intel_txt amd-memory-encryption diff --git a/MAINTAINERS b/MAINTAINERS index 37c2963226d4159fe002528fbf13132ffd235813..ee42a5aba9b0b1972a0aef4105d47f1d322bcf9c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -693,7 +693,7 @@ ALLWINNER CPUFREQ DRIVER M: Yangtao Li L: linux-pm@vger.kernel.org S: Maintained -F: Documentation/devicetree/bindings/opp/sun50i-nvmem-cpufreq.txt +F: Documentation/devicetree/bindings/opp/allwinner,sun50i-h6-operating-points.yaml F: drivers/cpufreq/sun50i-cpufreq-nvmem.c ALLWINNER CRYPTO DRIVERS @@ -3649,6 +3649,7 @@ F: sound/pci/oxygen/ C-SKY ARCHITECTURE M: Guo Ren +L: linux-csky@vger.kernel.org T: git https://github.com/c-sky/csky-linux.git S: Supported F: arch/csky/ @@ -3909,7 +3910,7 @@ S: Supported F: Documentation/filesystems/ceph.txt F: fs/ceph/ -CERTIFICATE HANDLING: +CERTIFICATE HANDLING M: David Howells M: David Woodhouse L: keyrings@vger.kernel.org @@ -3919,7 +3920,7 @@ F: certs/ F: scripts/sign-file.c F: scripts/extract-cert.c -CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM: +CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM L: devel@driverdev.osuosl.org S: Obsolete F: drivers/staging/wusbcore/ @@ -4016,7 +4017,7 @@ M: Cheng-Yi Chiang S: Maintained R: Enric Balletbo i Serra R: Guenter Roeck -F: Documentation/devicetree/bindings/sound/google,cros-ec-codec.txt +F: Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml F: sound/soc/codecs/cros_ec_codec.* CIRRUS LOGIC AUDIO CODEC DRIVERS @@ -4072,7 +4073,6 @@ F: drivers/scsi/snic/ CISCO VIC ETHERNET NIC DRIVER M: Christian Benvenuti M: Govindarajulu Varadarajan <_govind@gmx.com> -M: Parvi Kaustubhi S: Supported F: drivers/net/ethernet/cisco/enic/ @@ -4474,7 +4474,7 @@ L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/platform/sunxi/sun6i-csi/ -F: Documentation/devicetree/bindings/media/sun6i-csi.txt +F: Documentation/devicetree/bindings/media/allwinner,sun6i-a31-csi.yaml CW1200 WLAN driver M: Solomon Peachy @@ -4571,7 +4571,7 @@ F: drivers/infiniband/hw/cxgb4/ F: include/uapi/rdma/cxgb4-abi.h CXGB4VF ETHERNET DRIVER (CXGB4VF) -M: Casey Leedom +M: Vishal Kulkarni L: netdev@vger.kernel.org W: http://www.chelsio.com S: Supported @@ -5628,7 +5628,7 @@ F: include/uapi/drm/lima_drm.h T: git git://anongit.freedesktop.org/drm/drm-misc DRM DRIVERS FOR MEDIATEK -M: CK Hu +M: Chun-Kuang Hu M: Philipp Zabel L: dri-devel@lists.freedesktop.org S: Supported @@ -5688,7 +5688,7 @@ L: dri-devel@lists.freedesktop.org T: git git://anongit.freedesktop.org/drm/drm-misc S: Maintained F: drivers/gpu/drm/stm -F: Documentation/devicetree/bindings/display/st,stm32-ltdc.txt +F: Documentation/devicetree/bindings/display/st,stm32-ltdc.yaml DRM DRIVERS FOR TI LCDC M: Jyri Sarha @@ -5964,12 +5964,12 @@ S: Maintained F: drivers/media/dvb-frontends/ec100* ECRYPT FILE SYSTEM -M: Tyler Hicks +M: Tyler Hicks L: ecryptfs@vger.kernel.org W: http://ecryptfs.org W: https://launchpad.net/ecryptfs T: git git://git.kernel.org/pub/scm/linux/kernel/git/tyhicks/ecryptfs.git -S: Supported +S: Odd Fixes F: Documentation/filesystems/ecryptfs.txt F: fs/ecryptfs/ @@ -6229,7 +6229,6 @@ S: Supported F: drivers/scsi/be2iscsi/ Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER (be2net) -M: Sathya Perla M: Ajit Khaparde M: Sriharsha Basavapatna M: Somnath Kotur @@ -7079,7 +7078,7 @@ L: kvm@vger.kernel.org S: Supported F: drivers/uio/uio_pci_generic.c -GENERIC VDSO LIBRARY: +GENERIC VDSO LIBRARY M: Andy Lutomirski M: Thomas Gleixner M: Vincenzo Frascino @@ -7549,6 +7548,12 @@ F: include/uapi/linux/if_hippi.h F: net/802/hippi.c F: drivers/net/hippi/ +HISILICON DMA DRIVER +M: Zhou Wang +L: dmaengine@vger.kernel.org +S: Maintained +F: drivers/dma/hisi_dma.c + HISILICON SECURITY ENGINE V2 DRIVER (SEC2) M: Zaibo Xu L: linux-crypto@vger.kernel.org @@ -7606,7 +7611,8 @@ F: Documentation/admin-guide/perf/hisi-pmu.rst HISILICON ROCE DRIVER M: Lijun Ou -M: Wei Hu(Xavier) +M: Wei Hu(Xavier) +M: Weihang Li L: linux-rdma@vger.kernel.org S: Maintained F: drivers/infiniband/hw/hns/ @@ -7769,7 +7775,7 @@ Hyper-V CORE AND DRIVERS M: "K. Y. Srinivasan" M: Haiyang Zhang M: Stephen Hemminger -M: Sasha Levin +M: Wei Liu T: git git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux.git L: linux-hyperv@vger.kernel.org S: Supported @@ -8509,7 +8515,6 @@ L: dmaengine@vger.kernel.org S: Supported F: drivers/dma/idxd/* F: include/uapi/linux/idxd.h -F: include/linux/idxd.h INTEL IDLE DRIVER M: Jacob Pan @@ -8716,7 +8721,7 @@ M: Emmanuel Grumbach M: Luca Coelho M: Intel Linux Wireless L: linux-wireless@vger.kernel.org -W: http://intellinuxwireless.org +W: https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi T: git git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi.git S: Supported F: drivers/net/wireless/intel/iwlwifi/ @@ -9310,7 +9315,7 @@ F: include/keys/trusted-type.h F: security/keys/trusted.c F: include/keys/trusted.h -KEYS/KEYRINGS: +KEYS/KEYRINGS M: David Howells M: Jarkko Sakkinen L: keyrings@vger.kernel.org @@ -10195,7 +10200,7 @@ MAXBOTIX ULTRASONIC RANGER IIO DRIVER M: Andreas Klinger L: linux-iio@vger.kernel.org S: Maintained -F: Documentation/devicetree/bindings/iio/proximity/maxbotix,mb1232.txt +F: Documentation/devicetree/bindings/iio/proximity/maxbotix,mb1232.yaml F: drivers/iio/proximity/mb1232.c MAXIM MAX77650 PMIC MFD DRIVER @@ -10498,7 +10503,7 @@ M: Hugues Fruchet L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Supported -F: Documentation/devicetree/bindings/media/st,stm32-dcmi.txt +F: Documentation/devicetree/bindings/media/st,stm32-dcmi.yaml F: drivers/media/platform/stm32/stm32-dcmi.c MEDIA DRIVERS FOR NVIDIA TEGRA - VDE @@ -11146,14 +11151,12 @@ S: Maintained F: drivers/usb/image/microtek.* MIPS -M: Ralf Baechle -M: Paul Burton +M: Thomas Bogendoerfer L: linux-mips@vger.kernel.org W: http://www.linux-mips.org/ -T: git git://git.linux-mips.org/pub/scm/ralf/linux.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux.git -Q: http://patchwork.linux-mips.org/project/linux-mips/list/ -S: Supported +Q: https://patchwork.kernel.org/project/linux-mips/list/ +S: Maintained F: Documentation/devicetree/bindings/mips/ F: Documentation/mips/ F: arch/mips/ @@ -11516,7 +11519,7 @@ F: drivers/scsi/mac_scsi.* F: drivers/scsi/sun3_scsi.* F: drivers/scsi/sun3_scsi_vme.c -NCSI LIBRARY: +NCSI LIBRARY M: Samuel Mendoza-Jonas S: Maintained F: net/ncsi/ @@ -12772,7 +12775,7 @@ M: Tom Joseph L: linux-pci@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/pci/cdns,*.txt -F: drivers/pci/controller/pcie-cadence* +F: drivers/pci/controller/cadence/ PCI DRIVER FOR FREESCALE LAYERSCAPE M: Minghuan Lian @@ -12985,7 +12988,6 @@ M: Robert Richter L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Supported -F: Documentation/devicetree/bindings/pci/pci-thunder-* F: drivers/pci/controller/pci-thunder-* PCIE DRIVER FOR HISILICON @@ -13544,7 +13546,7 @@ L: linuxppc-dev@lists.ozlabs.org S: Maintained F: drivers/block/ps3vram.c -PSAMPLE PACKET SAMPLING SUPPORT: +PSAMPLE PACKET SAMPLING SUPPORT M: Yotam Gigi S: Maintained F: net/psample @@ -14260,7 +14262,7 @@ F: include/dt-bindings/reset/ F: include/linux/reset.h F: include/linux/reset/ F: include/linux/reset-controller.h -K: \b(?:devm_|of_)?reset_control(?:ler_[a-z]+|_[a-z_]+)?\b +K: \b(?:devm_|of_)?reset_control(?:ler_[a-z]+|_[a-z_]+)?\b RESTARTABLE SEQUENCES SUPPORT M: Mathieu Desnoyers @@ -14614,10 +14616,10 @@ F: drivers/media/pci/saa7146/ F: include/media/drv-intf/saa7146* SAFESETID SECURITY MODULE -M: Micah Morton -S: Supported -F: security/safesetid/ -F: Documentation/admin-guide/LSM/SafeSetID.rst +M: Micah Morton +S: Supported +F: security/safesetid/ +F: Documentation/admin-guide/LSM/SafeSetID.rst SAMSUNG AUDIO (ASoC) DRIVERS M: Krzysztof Kozlowski @@ -15452,11 +15454,9 @@ F: drivers/infiniband/sw/siw/ F: include/uapi/rdma/siw-abi.h SOFT-ROCE DRIVER (rxe) -M: Moni Shoua +M: Zhu Yanjun L: linux-rdma@vger.kernel.org S: Supported -W: https://github.com/SoftRoCE/rxe-dev/wiki/rxe-dev:-Home -Q: http://patchwork.kernel.org/project/linux-rdma/list/ F: drivers/infiniband/sw/rxe/ F: include/uapi/rdma/rdma_user_rxe.h @@ -15955,7 +15955,7 @@ F: drivers/*/stm32-*timer* F: drivers/pwm/pwm-stm32* F: include/linux/*/stm32-*tim* F: Documentation/ABI/testing/*timer-stm32 -F: Documentation/devicetree/bindings/*/stm32-*timer* +F: Documentation/devicetree/bindings/*/*stm32-*timer* F: Documentation/devicetree/bindings/pwm/pwm-stm32* STMMAC ETHERNET DRIVER @@ -16114,6 +16114,8 @@ SYNOPSYS DESIGNWARE 8250 UART DRIVER R: Andy Shevchenko S: Maintained F: drivers/tty/serial/8250/8250_dw.c +F: drivers/tty/serial/8250/8250_dwlib.* +F: drivers/tty/serial/8250/8250_lpss.c SYNOPSYS DESIGNWARE APB GPIO DRIVER M: Hoan Tran @@ -16584,8 +16586,8 @@ M: Michael Jamet M: Mika Westerberg M: Yehezkel Bernat L: linux-usb@vger.kernel.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt.git S: Maintained +T: git git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt.git F: Documentation/admin-guide/thunderbolt.rst F: drivers/thunderbolt/ F: include/linux/thunderbolt.h @@ -16784,7 +16786,7 @@ Q: http://patchwork.linuxtv.org/project/linux-media/list/ S: Maintained F: drivers/media/platform/ti-vpe/ F: Documentation/devicetree/bindings/media/ti,vpe.yaml - Documentation/devicetree/bindings/media/ti,cal.yaml +F: Documentation/devicetree/bindings/media/ti,cal.yaml TI WILINK WIRELESS DRIVERS L: linux-wireless@vger.kernel.org @@ -17112,7 +17114,7 @@ S: Maintained F: Documentation/admin-guide/ufs.rst F: fs/ufs/ -UHID USERSPACE HID IO DRIVER: +UHID USERSPACE HID IO DRIVER M: David Herrmann L: linux-input@vger.kernel.org S: Maintained @@ -17126,18 +17128,18 @@ S: Maintained F: drivers/usb/common/ulpi.c F: include/linux/ulpi/ -ULTRA-WIDEBAND (UWB) SUBSYSTEM: +ULTRA-WIDEBAND (UWB) SUBSYSTEM L: devel@driverdev.osuosl.org S: Obsolete F: drivers/staging/uwb/ -UNICODE SUBSYSTEM: +UNICODE SUBSYSTEM M: Gabriel Krisman Bertazi L: linux-fsdevel@vger.kernel.org S: Supported F: fs/unicode/ -UNICORE32 ARCHITECTURE: +UNICORE32 ARCHITECTURE M: Guan Xuetao W: http://mprc.pku.edu.cn/~guanxuetao/linux S: Maintained @@ -17424,11 +17426,14 @@ F: drivers/usb/ F: include/linux/usb.h F: include/linux/usb/ -USB TYPEC PI3USB30532 MUX DRIVER -M: Hans de Goede +USB TYPEC BUS FOR ALTERNATE MODES +M: Heikki Krogerus L: linux-usb@vger.kernel.org S: Maintained -F: drivers/usb/typec/mux/pi3usb30532.c +F: Documentation/ABI/testing/sysfs-bus-typec +F: Documentation/driver-api/usb/typec_bus.rst +F: drivers/usb/typec/altmodes/ +F: include/linux/usb/typec_altmode.h USB TYPEC CLASS M: Heikki Krogerus @@ -17439,14 +17444,11 @@ F: Documentation/driver-api/usb/typec.rst F: drivers/usb/typec/ F: include/linux/usb/typec.h -USB TYPEC BUS FOR ALTERNATE MODES -M: Heikki Krogerus +USB TYPEC PI3USB30532 MUX DRIVER +M: Hans de Goede L: linux-usb@vger.kernel.org S: Maintained -F: Documentation/ABI/testing/sysfs-bus-typec -F: Documentation/driver-api/usb/typec_bus.rst -F: drivers/usb/typec/altmodes/ -F: include/linux/usb/typec_altmode.h +F: drivers/usb/typec/mux/pi3usb30532.c USB TYPEC PORT CONTROLLER DRIVERS M: Guenter Roeck @@ -17823,7 +17825,7 @@ F: include/linux/vbox_utils.h F: include/uapi/linux/vbox*.h F: drivers/virt/vboxguest/ -VIRTUAL BOX SHARED FOLDER VFS DRIVER: +VIRTUAL BOX SHARED FOLDER VFS DRIVER M: Hans de Goede L: linux-fsdevel@vger.kernel.org S: Maintained diff --git a/Makefile b/Makefile index aab38cb02b249c03d5f3513574b32e439c123bf5..4d0711f540475c6273addeb1bd925f27faa81092 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ VERSION = 5 PATCHLEVEL = 6 SUBLEVEL = 0 -EXTRAVERSION = -rc2 +EXTRAVERSION = NAME = Kleptomaniac Octopus # *DOCUMENTATION* @@ -68,6 +68,7 @@ unexport GREP_OPTIONS # # If KBUILD_VERBOSE equals 0 then the above command will be hidden. # If KBUILD_VERBOSE equals 1 then the above command is displayed. +# If KBUILD_VERBOSE equals 2 then give the reason why each target is rebuilt. # # To put more focus on warnings, be less verbose as default # Use 'make V=1' to see the full commands @@ -1238,7 +1239,7 @@ ifneq ($(dtstree),) %.dtb: include/config/kernel.release scripts_dtc $(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@ -PHONY += dtbs dtbs_install dt_binding_check +PHONY += dtbs dtbs_install dtbs_check dtbs dtbs_check: include/config/kernel.release scripts_dtc $(Q)$(MAKE) $(build)=$(dtstree) @@ -1258,6 +1259,7 @@ PHONY += scripts_dtc scripts_dtc: scripts_basic $(Q)$(MAKE) $(build)=scripts/dtc +PHONY += dt_binding_check dt_binding_check: scripts_dtc $(Q)$(MAKE) $(build)=Documentation/devicetree/bindings @@ -1802,7 +1804,7 @@ existing-targets := $(wildcard $(sort $(targets))) -include $(foreach f,$(existing-targets),$(dir $(f)).$(notdir $(f)).cmd) -endif # config-targets +endif # config-build endif # mixed-build endif # need-sub-make diff --git a/arch/Kconfig b/arch/Kconfig index 98de654b79b312eca4737efff9b6b1460cb94f37..17fe351cdde08ab849b3b7f7763c4cdb307c61d2 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -738,8 +738,9 @@ config HAVE_STACK_VALIDATION config HAVE_RELIABLE_STACKTRACE bool help - Architecture has a save_stack_trace_tsk_reliable() function which - only returns a stack trace if it can guarantee the trace is reliable. + Architecture has either save_stack_trace_tsk_reliable() or + arch_stack_walk_reliable() function which only returns a stack trace + if it can guarantee the trace is reliable. config HAVE_ARCH_HASH bool diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index ff2a393b635c5681f5ce6c9edd1a1d20efd6ea10..7124ab82dfa31fb32f9701419fcd199e9f2569c2 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -154,7 +154,7 @@ config ARC_CPU_HS help Support for ARC HS38x Cores based on ARCv2 ISA The notable features are: - - SMP configurations of upto 4 core with coherency + - SMP configurations of up to 4 cores with coherency - Optional L2 Cache and IO-Coherency - Revised Interrupt Architecture (multiple priorites, reg banks, auto stack switch, auto regfile save/restore) @@ -192,7 +192,7 @@ config ARC_SMP_HALT_ON_RESET help In SMP configuration cores can be configured as Halt-on-reset or they could all start at same time. For Halt-on-reset, non - masters are parked until Master kicks them so they can start of + masters are parked until Master kicks them so they can start off at designated entry point. For other case, all jump to common entry point and spin wait for Master's signal. diff --git a/arch/arc/configs/nps_defconfig b/arch/arc/configs/nps_defconfig index 07f26ed39f024158466f2b5d6f4c0cb2cf115060..f7a978dfdf1d3668660aad6c29468fa7e97fe06c 100644 --- a/arch/arc/configs/nps_defconfig +++ b/arch/arc/configs/nps_defconfig @@ -21,8 +21,6 @@ CONFIG_MODULES=y CONFIG_MODULE_FORCE_LOAD=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set -# CONFIG_IOSCHED_DEADLINE is not set -# CONFIG_IOSCHED_CFQ is not set CONFIG_ARC_PLAT_EZNPS=y CONFIG_SMP=y CONFIG_NR_CPUS=4096 diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig index 5dd470b6609ebf2e061e1b48e98c86f5ef175056..bf39a0091679c766fa54f7670b4d351bf50dc6c8 100644 --- a/arch/arc/configs/nsimosci_defconfig +++ b/arch/arc/configs/nsimosci_defconfig @@ -20,8 +20,6 @@ CONFIG_ISA_ARCOMPACT=y CONFIG_KPROBES=y CONFIG_MODULES=y # CONFIG_BLK_DEV_BSG is not set -# CONFIG_IOSCHED_DEADLINE is not set -# CONFIG_IOSCHED_CFQ is not set CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci" # CONFIG_COMPACTION is not set CONFIG_NET=y diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig index 3532e86f7bff690b4906d8c7f9ea8e9d6a5db117..7121bd71c543ad3232e2ca97eb01a7ddda1e9921 100644 --- a/arch/arc/configs/nsimosci_hs_defconfig +++ b/arch/arc/configs/nsimosci_hs_defconfig @@ -19,8 +19,6 @@ CONFIG_PERF_EVENTS=y CONFIG_KPROBES=y CONFIG_MODULES=y # CONFIG_BLK_DEV_BSG is not set -# CONFIG_IOSCHED_DEADLINE is not set -# CONFIG_IOSCHED_CFQ is not set CONFIG_ISA_ARCV2=y CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci_hs" # CONFIG_COMPACTION is not set diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig index d90448bee064f3604631966dcd0e67253a44226f..f9863b294a707ef569bf36aa08592bf8ca48b5f2 100644 --- a/arch/arc/configs/nsimosci_hs_smp_defconfig +++ b/arch/arc/configs/nsimosci_hs_smp_defconfig @@ -14,8 +14,6 @@ CONFIG_PERF_EVENTS=y CONFIG_KPROBES=y CONFIG_MODULES=y # CONFIG_BLK_DEV_BSG is not set -# CONFIG_IOSCHED_DEADLINE is not set -# CONFIG_IOSCHED_CFQ is not set CONFIG_ISA_ARCV2=y CONFIG_SMP=y # CONFIG_ARC_TIMERS_64BIT is not set diff --git a/arch/arc/include/asm/fpu.h b/arch/arc/include/asm/fpu.h index 64347250fdf55e58453e90982e90182ee68b1fc4..006bcf88a7a5f21284ddb2fb015d4a75924b4490 100644 --- a/arch/arc/include/asm/fpu.h +++ b/arch/arc/include/asm/fpu.h @@ -43,6 +43,8 @@ extern void fpu_init_task(struct pt_regs *regs); #endif /* !CONFIG_ISA_ARCOMPACT */ +struct task_struct; + extern void fpu_save_restore(struct task_struct *p, struct task_struct *n); #else /* !CONFIG_ARC_FPU_SAVE_RESTORE */ diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h index d9ee43c6b7dbc2c698960365e8bbe78d0f087ddb..fe19f1d412e71896b324e28278fc52cae13e1a38 100644 --- a/arch/arc/include/asm/linkage.h +++ b/arch/arc/include/asm/linkage.h @@ -29,6 +29,8 @@ .endm #define ASM_NL ` /* use '`' to mark new line in macro */ +#define __ALIGN .align 4 +#define __ALIGN_STR __stringify(__ALIGN) /* annotation for data we want in DCCM - if enabled in .config */ .macro ARCFP_DATA nm diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index e1c647490f00e91069671f5aeb9871830cb9be83..aa41af6ef4ac6eccca426d72810c726423391de8 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -8,11 +8,11 @@ #include #include #include -#include #include #include #include #include +#include #include #include #include diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c index b79886a6cec8a72398720fa839df2e2917aa523e..d2999503fb8a5f1095419a673bd317f4e4690c6c 100644 --- a/arch/arc/kernel/troubleshoot.c +++ b/arch/arc/kernel/troubleshoot.c @@ -104,8 +104,7 @@ static void show_faulting_vma(unsigned long address) if (IS_ERR(nm)) nm = "?"; } - pr_info(" @off 0x%lx in [%s]\n" - " VMA: 0x%08lx to 0x%08lx\n", + pr_info(" @off 0x%lx in [%s] VMA: 0x%08lx to 0x%08lx\n", vma->vm_start < TASK_UNMAPPED_BASE ? address : address - vma->vm_start, nm, vma->vm_start, vma->vm_end); @@ -120,8 +119,6 @@ static void show_ecr_verbose(struct pt_regs *regs) unsigned int vec, cause_code; unsigned long address; - pr_info("\n[ECR ]: 0x%08lx => ", regs->event); - /* For Data fault, this is data address not instruction addr */ address = current->thread.fault_address; @@ -130,10 +127,10 @@ static void show_ecr_verbose(struct pt_regs *regs) /* For DTLB Miss or ProtV, display the memory involved too */ if (vec == ECR_V_DTLB_MISS) { - pr_cont("Invalid %s @ 0x%08lx by insn @ 0x%08lx\n", + pr_cont("Invalid %s @ 0x%08lx by insn @ %pS\n", (cause_code == 0x01) ? "Read" : ((cause_code == 0x02) ? "Write" : "EX"), - address, regs->ret); + address, (void *)regs->ret); } else if (vec == ECR_V_ITLB_MISS) { pr_cont("Insn could not be fetched\n"); } else if (vec == ECR_V_MACH_CHK) { @@ -191,31 +188,31 @@ void show_regs(struct pt_regs *regs) show_ecr_verbose(regs); - pr_info("[EFA ]: 0x%08lx\n[BLINK ]: %pS\n[ERET ]: %pS\n", - current->thread.fault_address, - (void *)regs->blink, (void *)regs->ret); - if (user_mode(regs)) show_faulting_vma(regs->ret); /* faulting code, not data */ - pr_info("[STAT32]: 0x%08lx", regs->status32); + pr_info("ECR: 0x%08lx EFA: 0x%08lx ERET: 0x%08lx\n", + regs->event, current->thread.fault_address, regs->ret); + + pr_info("STAT32: 0x%08lx", regs->status32); #define STS_BIT(r, bit) r->status32 & STATUS_##bit##_MASK ? #bit" " : "" #ifdef CONFIG_ISA_ARCOMPACT - pr_cont(" : %2s%2s%2s%2s%2s%2s%2s\n", + pr_cont(" [%2s%2s%2s%2s%2s%2s%2s]", (regs->status32 & STATUS_U_MASK) ? "U " : "K ", STS_BIT(regs, DE), STS_BIT(regs, AE), STS_BIT(regs, A2), STS_BIT(regs, A1), STS_BIT(regs, E2), STS_BIT(regs, E1)); #else - pr_cont(" : %2s%2s%2s%2s\n", + pr_cont(" [%2s%2s%2s%2s]", STS_BIT(regs, IE), (regs->status32 & STATUS_U_MASK) ? "U " : "K ", STS_BIT(regs, DE), STS_BIT(regs, AE)); #endif - pr_info("BTA: 0x%08lx\t SP: 0x%08lx\t FP: 0x%08lx\n", - regs->bta, regs->sp, regs->fp); + pr_cont(" BTA: 0x%08lx\n", regs->bta); + pr_info("BLK: %pS\n SP: 0x%08lx FP: 0x%08lx\n", + (void *)regs->blink, regs->sp, regs->fp); pr_info("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n", regs->lp_start, regs->lp_end, regs->lp_count); diff --git a/arch/arm/Makefile b/arch/arm/Makefile index db857d07114f18240ac7d2deec2710f7b838cf1a..1fc32b611f8a4de487695fe42f86847f53d898e1 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -307,13 +307,15 @@ endif ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y) prepare: stack_protector_prepare stack_protector_prepare: prepare0 - $(eval KBUILD_CFLAGS += \ + $(eval SSP_PLUGIN_CFLAGS := \ -fplugin-arg-arm_ssp_per_task_plugin-tso=$(shell \ awk '{if ($$2 == "THREAD_SZ_ORDER") print $$3;}'\ include/generated/asm-offsets.h) \ -fplugin-arg-arm_ssp_per_task_plugin-offset=$(shell \ awk '{if ($$2 == "TI_STACK_CANARY") print $$3;}'\ include/generated/asm-offsets.h)) + $(eval KBUILD_CFLAGS += $(SSP_PLUGIN_CFLAGS)) + $(eval GCC_PLUGINS_CFLAGS += $(SSP_PLUGIN_CFLAGS)) endif all: $(notdir $(KBUILD_IMAGE)) diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index da599c3a1193433207b6e2b2bdd44a6bb8a76647..9c11e7490292f0e0031b2dfb4f7e0386b3bcee38 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -101,7 +101,6 @@ clean-files += piggy_data lib1funcs.S ashldi3.S bswapsdi2.S \ $(libfdt) $(libfdt_hdrs) hyp-stub.S KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING -KBUILD_CFLAGS += $(DISABLE_ARM_SSP_PER_TASK_PLUGIN) ifeq ($(CONFIG_FUNCTION_TRACER),y) ORIG_CFLAGS := $(KBUILD_CFLAGS) @@ -117,7 +116,8 @@ CFLAGS_fdt_ro.o := $(nossp-flags-y) CFLAGS_fdt_rw.o := $(nossp-flags-y) CFLAGS_fdt_wip.o := $(nossp-flags-y) -ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin -I$(obj) +ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin \ + -I$(obj) $(DISABLE_ARM_SSP_PER_TASK_PLUGIN) asflags-y := -DZIMAGE # Supply kernel BSS size to the decompressor via a linker symbol. diff --git a/arch/arm/boot/dts/am437x-idk-evm.dts b/arch/arm/boot/dts/am437x-idk-evm.dts index f3ced6df0c9b272461f394b090905dd7ee4af6da..9f66f96d09c91661a0917e4abcdf701086059ba4 100644 --- a/arch/arm/boot/dts/am437x-idk-evm.dts +++ b/arch/arm/boot/dts/am437x-idk-evm.dts @@ -526,11 +526,11 @@ * Supply voltage supervisor on board will not allow opp50 so * disable it and set opp100 as suspend OPP. */ - opp50@300000000 { + opp50-300000000 { status = "disabled"; }; - opp100@600000000 { + opp100-600000000 { opp-suspend; }; }; diff --git a/arch/arm/boot/dts/bcm2711-rpi-4-b.dts b/arch/arm/boot/dts/bcm2711-rpi-4-b.dts index 1b5a835f66bd3de475e8229cc2302576f3c57b45..efea891b1a76ea0a32fd74950bd1fa179a73acfe 100644 --- a/arch/arm/boot/dts/bcm2711-rpi-4-b.dts +++ b/arch/arm/boot/dts/bcm2711-rpi-4-b.dts @@ -21,6 +21,7 @@ aliases { ethernet0 = &genet; + pcie0 = &pcie0; }; leds { @@ -31,6 +32,8 @@ pwr { label = "PWR"; gpios = <&expgpio 2 GPIO_ACTIVE_LOW>; + default-state = "keep"; + linux,default-trigger = "default-on"; }; }; diff --git a/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts index b75af21069f95d4b2bd4187d08cb2e974cbf6e5c..4c3f606e5b8d8ebae250daef6d3a26c0f8488ff3 100644 --- a/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts +++ b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts @@ -112,6 +112,7 @@ &sdhci { #address-cells = <1>; #size-cells = <0>; + pinctrl-names = "default"; pinctrl-0 = <&emmc_gpio34 &gpclk2_gpio43>; bus-width = <4>; mmc-pwrseq = <&wifi_pwrseq>; diff --git a/arch/arm/boot/dts/bcm2835-rpi.dtsi b/arch/arm/boot/dts/bcm2835-rpi.dtsi index 394c8a71b13be30a8f8604b1936bc3ed95fe30eb..fd2c766e0f7108973b771660e368d66cef3a5bc4 100644 --- a/arch/arm/boot/dts/bcm2835-rpi.dtsi +++ b/arch/arm/boot/dts/bcm2835-rpi.dtsi @@ -15,6 +15,7 @@ firmware: firmware { compatible = "raspberrypi,bcm2835-firmware", "simple-bus"; mboxes = <&mailbox>; + dma-ranges; }; power: power { diff --git a/arch/arm/boot/dts/bcm2837-rpi-3-a-plus.dts b/arch/arm/boot/dts/bcm2837-rpi-3-a-plus.dts index 66ab35eccba7bbc0f753ac99ce0763a8b1f5af9c..28be0332c1c810e6fe727802ce7af4458f70029f 100644 --- a/arch/arm/boot/dts/bcm2837-rpi-3-a-plus.dts +++ b/arch/arm/boot/dts/bcm2837-rpi-3-a-plus.dts @@ -26,6 +26,8 @@ pwr { label = "PWR"; gpios = <&expgpio 2 GPIO_ACTIVE_LOW>; + default-state = "keep"; + linux,default-trigger = "default-on"; }; }; }; diff --git a/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts b/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts index 74ed6d04780703d8c3197c39716baecad5cafcb5..37343148643dbfafb9d89d7eb091b401fafb4aa0 100644 --- a/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts +++ b/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts @@ -27,6 +27,8 @@ pwr { label = "PWR"; gpios = <&expgpio 2 GPIO_ACTIVE_LOW>; + default-state = "keep"; + linux,default-trigger = "default-on"; }; }; diff --git a/arch/arm/boot/dts/dm8148-evm.dts b/arch/arm/boot/dts/dm8148-evm.dts index 3931fb068ff09d408023e19cfe4c542e434347f2..91d1018ab75fcddd266f926d1d9c046a9bb566e4 100644 --- a/arch/arm/boot/dts/dm8148-evm.dts +++ b/arch/arm/boot/dts/dm8148-evm.dts @@ -24,12 +24,12 @@ &cpsw_emac0 { phy-handle = <ðphy0>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; }; &cpsw_emac1 { phy-handle = <ðphy1>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; }; &davinci_mdio { diff --git a/arch/arm/boot/dts/dm8148-t410.dts b/arch/arm/boot/dts/dm8148-t410.dts index 9e43d5ec0bb2fe6a893a19b270021f2d9f01c486..79ccdd4470f4cb61cd2f9aca0cb7faca172276a6 100644 --- a/arch/arm/boot/dts/dm8148-t410.dts +++ b/arch/arm/boot/dts/dm8148-t410.dts @@ -33,12 +33,12 @@ &cpsw_emac0 { phy-handle = <ðphy0>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; }; &cpsw_emac1 { phy-handle = <ðphy1>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; }; &davinci_mdio { diff --git a/arch/arm/boot/dts/dra62x-j5eco-evm.dts b/arch/arm/boot/dts/dra62x-j5eco-evm.dts index 861ab90a3f3aac4e817814d0d00b88be6689296f..c16e183822bee8a0bf48deaca947c202b6790d6b 100644 --- a/arch/arm/boot/dts/dra62x-j5eco-evm.dts +++ b/arch/arm/boot/dts/dra62x-j5eco-evm.dts @@ -24,12 +24,12 @@ &cpsw_emac0 { phy-handle = <ðphy0>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; }; &cpsw_emac1 { phy-handle = <ðphy1>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; }; &davinci_mdio { diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts index de7f85efaa5120a765abf05eb34780b8000994d2..af06a55d1c5c6374eae5f41c874c2b40872b75ef 100644 --- a/arch/arm/boot/dts/dra7-evm.dts +++ b/arch/arm/boot/dts/dra7-evm.dts @@ -61,10 +61,10 @@ regulator-max-microvolt = <1800000>; }; - evm_3v3: fixedregulator-evm3v3 { + vsys_3v3: fixedregulator-vsys3v3 { /* Output of Cntlr A of TPS43351-Q1 on dra7-evm */ compatible = "regulator-fixed"; - regulator-name = "evm_3v3"; + regulator-name = "vsys_3v3"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; vin-supply = <&evm_12v0>; diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi index fc418834890d4c9d5d323b8acf6e76d0170e41de..2119a78e9c153605acc99eb83de65940257a76ef 100644 --- a/arch/arm/boot/dts/dra7-l4.dtsi +++ b/arch/arm/boot/dts/dra7-l4.dtsi @@ -3474,6 +3474,7 @@ clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER13_CLKCTRL 24>; clock-names = "fck"; interrupts = ; + ti,timer-pwm; }; }; @@ -3501,6 +3502,7 @@ clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER14_CLKCTRL 24>; clock-names = "fck"; interrupts = ; + ti,timer-pwm; }; }; @@ -3528,6 +3530,7 @@ clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER15_CLKCTRL 24>; clock-names = "fck"; interrupts = ; + ti,timer-pwm; }; }; @@ -3555,6 +3558,7 @@ clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER16_CLKCTRL 24>; clock-names = "fck"; interrupts = ; + ti,timer-pwm; }; }; diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index d78b684e7fca0330d760502d9f45168bb1996b5e..5f5ee16f07a3af5a9ae4e34da20fa612db3c8741 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -148,6 +148,7 @@ #address-cells = <1>; #size-cells = <1>; ranges = <0x0 0x0 0x0 0xc0000000>; + dma-ranges = <0x80000000 0x0 0x80000000 0x80000000>; ti,hwmods = "l3_main_1", "l3_main_2"; reg = <0x0 0x44000000 0x0 0x1000000>, <0x0 0x45000000 0x0 0x1000>; @@ -184,6 +185,7 @@ device_type = "pci"; ranges = <0x81000000 0 0 0x03000 0 0x00010000 0x82000000 0 0x20013000 0x13000 0 0xffed000>; + dma-ranges = <0x02000000 0x0 0x00000000 0x00000000 0x1 0x00000000>; bus-range = <0x00 0xff>; #interrupt-cells = <1>; num-lanes = <1>; @@ -238,6 +240,7 @@ device_type = "pci"; ranges = <0x81000000 0 0 0x03000 0 0x00010000 0x82000000 0 0x30013000 0x13000 0 0xffed000>; + dma-ranges = <0x02000000 0x0 0x00000000 0x00000000 0x1 0x00000000>; bus-range = <0x00 0xff>; #interrupt-cells = <1>; num-lanes = <1>; diff --git a/arch/arm/boot/dts/dra76x.dtsi b/arch/arm/boot/dts/dra76x.dtsi index 2f7539afef2be823a3fb732b31d19b86d12e9e13..42b8a205b64f8abac147f8da63716ce1fb1026a9 100644 --- a/arch/arm/boot/dts/dra76x.dtsi +++ b/arch/arm/boot/dts/dra76x.dtsi @@ -128,3 +128,8 @@ &usb4_tm { status = "disabled"; }; + +&mmc3 { + /* dra76x is not affected by i887 */ + max-frequency = <96000000>; +}; diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi index 55cef4cac5f16f5c858b8c04d7c4c4895d1d6fef..dc0a93bccbf1e2aa5413e2ef6f14ab0b609eb421 100644 --- a/arch/arm/boot/dts/dra7xx-clocks.dtsi +++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi @@ -796,16 +796,6 @@ clock-div = <1>; }; - ipu1_gfclk_mux: ipu1_gfclk_mux@520 { - #clock-cells = <0>; - compatible = "ti,mux-clock"; - clocks = <&dpll_abe_m2x2_ck>, <&dpll_core_h22x2_ck>; - ti,bit-shift = <24>; - reg = <0x0520>; - assigned-clocks = <&ipu1_gfclk_mux>; - assigned-clock-parents = <&dpll_core_h22x2_ck>; - }; - dummy_ck: dummy_ck { #clock-cells = <0>; compatible = "fixed-clock"; @@ -1564,6 +1554,8 @@ compatible = "ti,clkctrl"; reg = <0x20 0x4>; #clock-cells = <2>; + assigned-clocks = <&ipu1_clkctrl DRA7_IPU1_MMU_IPU1_CLKCTRL 24>; + assigned-clock-parents = <&dpll_core_h22x2_ck>; }; ipu_clkctrl: ipu-clkctrl@50 { diff --git a/arch/arm/boot/dts/exynos4412-galaxy-s3.dtsi b/arch/arm/boot/dts/exynos4412-galaxy-s3.dtsi index 31719c079d6729624a60a2bfc6008c4df75e9ff7..44f97546dd0aa163b71e70fa766d425b0c932d2f 100644 --- a/arch/arm/boot/dts/exynos4412-galaxy-s3.dtsi +++ b/arch/arm/boot/dts/exynos4412-galaxy-s3.dtsi @@ -33,7 +33,7 @@ }; }; - lcd_vdd3_reg: voltage-regulator-6 { + lcd_vdd3_reg: voltage-regulator-7 { compatible = "regulator-fixed"; regulator-name = "LCD_VDD_2.2V"; regulator-min-microvolt = <2200000>; @@ -42,7 +42,7 @@ enable-active-high; }; - ps_als_reg: voltage-regulator-7 { + ps_als_reg: voltage-regulator-8 { compatible = "regulator-fixed"; regulator-name = "LED_A_3.0V"; regulator-min-microvolt = <3000000>; diff --git a/arch/arm/boot/dts/exynos4412-n710x.dts b/arch/arm/boot/dts/exynos4412-n710x.dts index 98cd1284cd90d03e353b4b8c6de5e9ec1a0c2bb5..4189e1fb204c1683fc2a1f374edd75caa23830af 100644 --- a/arch/arm/boot/dts/exynos4412-n710x.dts +++ b/arch/arm/boot/dts/exynos4412-n710x.dts @@ -13,7 +13,7 @@ /* bootargs are passed in by bootloader */ - cam_vdda_reg: voltage-regulator-6 { + cam_vdda_reg: voltage-regulator-7 { compatible = "regulator-fixed"; regulator-name = "CAM_SENSOR_CORE_1.2V"; regulator-min-microvolt = <1200000>; diff --git a/arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts b/arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts index cd075621de52dfa73b925ed9a35d1f828a9cda8e..84fcc203a2e48179c4ff27395a0a5e15746aaddb 100644 --- a/arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts +++ b/arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts @@ -275,7 +275,7 @@ /* SRAM on Colibri nEXT_CS0 */ sram@0,0 { - compatible = "cypress,cy7c1019dv33-10zsxi, mtd-ram"; + compatible = "cypress,cy7c1019dv33-10zsxi", "mtd-ram"; reg = <0 0 0x00010000>; #address-cells = <1>; #size-cells = <1>; @@ -286,7 +286,7 @@ /* SRAM on Colibri nEXT_CS1 */ sram@1,0 { - compatible = "cypress,cy7c1019dv33-10zsxi, mtd-ram"; + compatible = "cypress,cy7c1019dv33-10zsxi", "mtd-ram"; reg = <1 0 0x00010000>; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi index 978dc1c2ff1b8b3c9b0dd76a99ed911f8994790c..77d871340eb7422dfc49cfb0b7b5408f3b7f72ba 100644 --- a/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi +++ b/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi @@ -112,7 +112,7 @@ regulators { vdd_arm: buck1 { regulator-name = "vdd_arm"; - regulator-min-microvolt = <730000>; + regulator-min-microvolt = <925000>; regulator-max-microvolt = <1380000>; regulator-initial-mode = ; regulator-always-on; @@ -120,7 +120,7 @@ vdd_soc: buck2 { regulator-name = "vdd_soc"; - regulator-min-microvolt = <730000>; + regulator-min-microvolt = <1150000>; regulator-max-microvolt = <1380000>; regulator-initial-mode = ; regulator-always-on; @@ -192,7 +192,6 @@ pinctrl-0 = <&pinctrl_usdhc4>; bus-width = <8>; non-removable; - vmmc-supply = <&vdd_emmc_1p8>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/imx7-colibri.dtsi b/arch/arm/boot/dts/imx7-colibri.dtsi index d05be3f0e2a722f4165b6d2450e1fcf397e7ef02..04717cf69db07cd4049e52aefce6ef3542df3f3c 100644 --- a/arch/arm/boot/dts/imx7-colibri.dtsi +++ b/arch/arm/boot/dts/imx7-colibri.dtsi @@ -336,7 +336,6 @@ assigned-clock-rates = <400000000>; bus-width = <8>; fsl,tuning-step = <2>; - max-frequency = <100000000>; vmmc-supply = <®_module_3v3>; vqmmc-supply = <®_DCDC3>; non-removable; diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi index 92f6d0c2a74f6544912f211f055c6083748b8138..4c22828df55f31522b2708e7aa353312857215ab 100644 --- a/arch/arm/boot/dts/imx7d.dtsi +++ b/arch/arm/boot/dts/imx7d.dtsi @@ -44,7 +44,7 @@ opp-hz = /bits/ 64 <792000000>; opp-microvolt = <1000000>; clock-latency-ns = <150000>; - opp-supported-hw = <0xd>, <0xf>; + opp-supported-hw = <0xd>, <0x7>; opp-suspend; }; @@ -52,7 +52,7 @@ opp-hz = /bits/ 64 <996000000>; opp-microvolt = <1100000>; clock-latency-ns = <150000>; - opp-supported-hw = <0xc>, <0xf>; + opp-supported-hw = <0xc>, <0x7>; opp-suspend; }; @@ -60,7 +60,7 @@ opp-hz = /bits/ 64 <1200000000>; opp-microvolt = <1225000>; clock-latency-ns = <150000>; - opp-supported-hw = <0x8>, <0xf>; + opp-supported-hw = <0x8>, <0x3>; opp-suspend; }; }; diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi index 0855b1fe98e0d2ffc04ac48b940c6c6ef1f868d1..760a68c163c834213e396c55620152ce97967dd2 100644 --- a/arch/arm/boot/dts/ls1021a.dtsi +++ b/arch/arm/boot/dts/ls1021a.dtsi @@ -747,7 +747,7 @@ }; mdio0: mdio@2d24000 { - compatible = "fsl,etsec2-mdio"; + compatible = "gianfar"; device_type = "mdio"; #address-cells = <1>; #size-cells = <0>; @@ -756,7 +756,7 @@ }; mdio1: mdio@2d64000 { - compatible = "fsl,etsec2-mdio"; + compatible = "gianfar"; device_type = "mdio"; #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm/boot/dts/motorola-mapphone-common.dtsi b/arch/arm/boot/dts/motorola-mapphone-common.dtsi index 85665506f4f80142162342c49b7c6a1e6aa27eee..9067e0ef4240f846e1ff4b7cc2ac80fb246cbb53 100644 --- a/arch/arm/boot/dts/motorola-mapphone-common.dtsi +++ b/arch/arm/boot/dts/motorola-mapphone-common.dtsi @@ -182,6 +182,14 @@ pwm-names = "enable", "direction"; direction-duty-cycle-ns = <10000000>; }; + + backlight: backlight { + compatible = "led-backlight"; + + leds = <&backlight_led>; + brightness-levels = <31 63 95 127 159 191 223 255>; + default-brightness-level = <6>; + }; }; &dss { @@ -205,6 +213,8 @@ vddi-supply = <&lcd_regulator>; reset-gpios = <&gpio4 5 GPIO_ACTIVE_HIGH>; /* gpio101 */ + backlight = <&backlight>; + width-mm = <50>; height-mm = <89>; @@ -393,12 +403,11 @@ ramp-up-us = <1024>; ramp-down-us = <8193>; - led@0 { + backlight_led: led@0 { reg = <0>; led-sources = <2>; ti,led-mode = <0>; label = ":backlight"; - linux,default-trigger = "backlight"; }; led@1 { @@ -420,7 +429,7 @@ reset-gpios = <&gpio6 13 GPIO_ACTIVE_HIGH>; /* gpio173 */ /* gpio_183 with sys_nirq2 pad as wakeup */ - interrupts-extended = <&gpio6 23 IRQ_TYPE_EDGE_FALLING>, + interrupts-extended = <&gpio6 23 IRQ_TYPE_LEVEL_LOW>, <&omap4_pmx_core 0x160>; interrupt-names = "irq", "wakeup"; wakeup-source; diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index c3c6d7d04a76c2416983b97c1f8f8c788ed3fcc0..4089d97405c950e0148180334d0e5fbe88154df8 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts @@ -854,34 +854,46 @@ compatible = "ti,omap2-onenand"; reg = <0 0 0x20000>; /* CS0, offset 0, IO size 128K */ + /* + * These timings are based on CONFIG_OMAP_GPMC_DEBUG=y reported + * bootloader set values when booted with v5.1 + * (OneNAND Manufacturer: Samsung): + * + * cs0 GPMC_CS_CONFIG1: 0xfb001202 + * cs0 GPMC_CS_CONFIG2: 0x00111100 + * cs0 GPMC_CS_CONFIG3: 0x00020200 + * cs0 GPMC_CS_CONFIG4: 0x11001102 + * cs0 GPMC_CS_CONFIG5: 0x03101616 + * cs0 GPMC_CS_CONFIG6: 0x90060000 + */ gpmc,sync-read; gpmc,sync-write; gpmc,burst-length = <16>; gpmc,burst-read; gpmc,burst-wrap; gpmc,burst-write; - gpmc,device-width = <2>; /* GPMC_DEVWIDTH_16BIT */ - gpmc,mux-add-data = <2>; /* GPMC_MUX_AD */ + gpmc,device-width = <2>; + gpmc,mux-add-data = <2>; gpmc,cs-on-ns = <0>; - gpmc,cs-rd-off-ns = <87>; - gpmc,cs-wr-off-ns = <87>; + gpmc,cs-rd-off-ns = <102>; + gpmc,cs-wr-off-ns = <102>; gpmc,adv-on-ns = <0>; - gpmc,adv-rd-off-ns = <10>; - gpmc,adv-wr-off-ns = <10>; - gpmc,oe-on-ns = <15>; - gpmc,oe-off-ns = <87>; + gpmc,adv-rd-off-ns = <12>; + gpmc,adv-wr-off-ns = <12>; + gpmc,oe-on-ns = <12>; + gpmc,oe-off-ns = <102>; gpmc,we-on-ns = <0>; - gpmc,we-off-ns = <87>; - gpmc,rd-cycle-ns = <112>; - gpmc,wr-cycle-ns = <112>; - gpmc,access-ns = <81>; - gpmc,page-burst-access-ns = <15>; + gpmc,we-off-ns = <102>; + gpmc,rd-cycle-ns = <132>; + gpmc,wr-cycle-ns = <132>; + gpmc,access-ns = <96>; + gpmc,page-burst-access-ns = <18>; gpmc,bus-turnaround-ns = <0>; gpmc,cycle2cycle-delay-ns = <0>; gpmc,wait-monitoring-ns = <0>; - gpmc,clk-activation-ns = <5>; - gpmc,wr-data-mux-bus-ns = <30>; - gpmc,wr-access-ns = <81>; + gpmc,clk-activation-ns = <6>; + gpmc,wr-data-mux-bus-ns = <36>; + gpmc,wr-access-ns = <96>; gpmc,sync-clk-ps = <15000>; /* diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi index d0ecf54d5a2386ec5fbc1085b606431388f4a0af..a7562d3deb1a2ca3856db96601fe2c886ae85cd6 100644 --- a/arch/arm/boot/dts/omap5.dtsi +++ b/arch/arm/boot/dts/omap5.dtsi @@ -143,6 +143,7 @@ #address-cells = <1>; #size-cells = <1>; ranges = <0 0 0 0xc0000000>; + dma-ranges = <0x80000000 0x0 0x80000000 0x80000000>; ti,hwmods = "l3_main_1", "l3_main_2", "l3_main_3"; reg = <0 0x44000000 0 0x2000>, <0 0x44800000 0 0x3000>, diff --git a/arch/arm/boot/dts/ox810se.dtsi b/arch/arm/boot/dts/ox810se.dtsi index 9f6c2b660ed391b762fe8cdad3ad4715206f38fe..0755e5864c4a372caf9f95b60f9cde575bfc2a65 100644 --- a/arch/arm/boot/dts/ox810se.dtsi +++ b/arch/arm/boot/dts/ox810se.dtsi @@ -323,8 +323,8 @@ interrupt-controller; reg = <0 0x200>; #interrupt-cells = <1>; - valid-mask = <0xFFFFFFFF>; - clear-mask = <0>; + valid-mask = <0xffffffff>; + clear-mask = <0xffffffff>; }; timer0: timer@200 { diff --git a/arch/arm/boot/dts/ox820.dtsi b/arch/arm/boot/dts/ox820.dtsi index c9b32773206393dbbd5787b97d3caf54f588bc25..90846a7655b49aa70b97806176c01752c3159cce 100644 --- a/arch/arm/boot/dts/ox820.dtsi +++ b/arch/arm/boot/dts/ox820.dtsi @@ -240,8 +240,8 @@ reg = <0 0x200>; interrupts = ; #interrupt-cells = <1>; - valid-mask = <0xFFFFFFFF>; - clear-mask = <0>; + valid-mask = <0xffffffff>; + clear-mask = <0xffffffff>; }; timer0: timer@200 { diff --git a/arch/arm/boot/dts/r8a7779.dtsi b/arch/arm/boot/dts/r8a7779.dtsi index beb9885e6ffca7a521ffb670f44ddd8be639a287..c0999e27e9b145e31ae341af3709fbd6d1a7a933 100644 --- a/arch/arm/boot/dts/r8a7779.dtsi +++ b/arch/arm/boot/dts/r8a7779.dtsi @@ -377,7 +377,7 @@ }; sata: sata@fc600000 { - compatible = "renesas,sata-r8a7779", "renesas,rcar-sata"; + compatible = "renesas,sata-r8a7779"; reg = <0xfc600000 0x200000>; interrupts = ; clocks = <&mstp1_clks R8A7779_CLK_SATA>; diff --git a/arch/arm/boot/dts/sun8i-a33.dtsi b/arch/arm/boot/dts/sun8i-a33.dtsi index 1532a0e59af4d28810d994aae73b7de566c2cb3c..a2c37adacf773fb0c897343be1db220dc7af5551 100644 --- a/arch/arm/boot/dts/sun8i-a33.dtsi +++ b/arch/arm/boot/dts/sun8i-a33.dtsi @@ -215,7 +215,7 @@ }; crypto: crypto-engine@1c15000 { - compatible = "allwinner,sun4i-a10-crypto"; + compatible = "allwinner,sun8i-a33-crypto"; reg = <0x01c15000 0x1000>; interrupts = ; clocks = <&ccu CLK_BUS_SS>, <&ccu CLK_SS>; diff --git a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts index 2fd31a0a0b344e90c4c6efb53b90b85fa90da85f..e8b3669e0e5d8bb5c29cbade1848b89fe50bd3e6 100644 --- a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts +++ b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts @@ -374,8 +374,8 @@ }; ®_dldo3 { - regulator-min-microvolt = <2800000>; - regulator-max-microvolt = <2800000>; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; regulator-name = "vdd-csi"; }; @@ -498,7 +498,8 @@ }; &usbphy { - usb0_id_det-gpios = <&pio 7 11 GPIO_ACTIVE_HIGH>; /* PH11 */ + usb0_id_det-gpios = <&pio 7 11 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PH11 */ + usb0_vbus_power-supply = <&usb_power_supply>; usb0_vbus-supply = <®_drivevbus>; usb1_vbus-supply = <®_vmain>; usb2_vbus-supply = <®_vmain>; diff --git a/arch/arm/boot/dts/sun8i-a83t.dtsi b/arch/arm/boot/dts/sun8i-a83t.dtsi index 74ac7ee9383cf89ca1d60e543332ab8c71748722..e7b9bef1be6b61734c3880528c8ee680fa8887c5 100644 --- a/arch/arm/boot/dts/sun8i-a83t.dtsi +++ b/arch/arm/boot/dts/sun8i-a83t.dtsi @@ -1006,10 +1006,10 @@ reg = <0x01c30000 0x104>; interrupts = ; interrupt-names = "macirq"; - resets = <&ccu CLK_BUS_EMAC>; - reset-names = "stmmaceth"; - clocks = <&ccu RST_BUS_EMAC>; + clocks = <&ccu CLK_BUS_EMAC>; clock-names = "stmmaceth"; + resets = <&ccu RST_BUS_EMAC>; + reset-names = "stmmaceth"; status = "disabled"; mdio: mdio { diff --git a/arch/arm/boot/dts/sun8i-r40.dtsi b/arch/arm/boot/dts/sun8i-r40.dtsi index 8f09a24b36ec080e5823fe7768a3b4eae7722742..a9d5d6ddbd71d2b8535f906024a9934fecba9657 100644 --- a/arch/arm/boot/dts/sun8i-r40.dtsi +++ b/arch/arm/boot/dts/sun8i-r40.dtsi @@ -181,6 +181,32 @@ interrupts = ; }; + spi0: spi@1c05000 { + compatible = "allwinner,sun8i-r40-spi", + "allwinner,sun8i-h3-spi"; + reg = <0x01c05000 0x1000>; + interrupts = ; + clocks = <&ccu CLK_BUS_SPI0>, <&ccu CLK_SPI0>; + clock-names = "ahb", "mod"; + resets = <&ccu RST_BUS_SPI0>; + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; + }; + + spi1: spi@1c06000 { + compatible = "allwinner,sun8i-r40-spi", + "allwinner,sun8i-h3-spi"; + reg = <0x01c06000 0x1000>; + interrupts = ; + clocks = <&ccu CLK_BUS_SPI1>, <&ccu CLK_SPI1>; + clock-names = "ahb", "mod"; + resets = <&ccu RST_BUS_SPI1>; + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; + }; + csi0: csi@1c09000 { compatible = "allwinner,sun8i-r40-csi0", "allwinner,sun7i-a20-csi0"; @@ -290,6 +316,29 @@ resets = <&ccu RST_BUS_CE>; }; + spi2: spi@1c17000 { + compatible = "allwinner,sun8i-r40-spi", + "allwinner,sun8i-h3-spi"; + reg = <0x01c17000 0x1000>; + interrupts = ; + clocks = <&ccu CLK_BUS_SPI2>, <&ccu CLK_SPI2>; + clock-names = "ahb", "mod"; + resets = <&ccu RST_BUS_SPI2>; + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; + }; + + ahci: sata@1c18000 { + compatible = "allwinner,sun8i-r40-ahci"; + reg = <0x01c18000 0x1000>; + interrupts = ; + clocks = <&ccu CLK_BUS_SATA>, <&ccu CLK_SATA>; + resets = <&ccu RST_BUS_SATA>; + reset-names = "ahci"; + status = "disabled"; + }; + ehci1: usb@1c19000 { compatible = "allwinner,sun8i-r40-ehci", "generic-ehci"; reg = <0x01c19000 0x100>; @@ -336,6 +385,19 @@ status = "disabled"; }; + spi3: spi@1c1f000 { + compatible = "allwinner,sun8i-r40-spi", + "allwinner,sun8i-h3-spi"; + reg = <0x01c1f000 0x1000>; + interrupts = ; + clocks = <&ccu CLK_BUS_SPI3>, <&ccu CLK_SPI3>; + clock-names = "ahb", "mod"; + resets = <&ccu RST_BUS_SPI3>; + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; + }; + ccu: clock@1c20000 { compatible = "allwinner,sun8i-r40-ccu"; reg = <0x01c20000 0x400>; @@ -653,69 +715,6 @@ #size-cells = <0>; }; - spi0: spi@1c05000 { - compatible = "allwinner,sun8i-r40-spi", - "allwinner,sun8i-h3-spi"; - reg = <0x01c05000 0x1000>; - interrupts = ; - clocks = <&ccu CLK_BUS_SPI0>, <&ccu CLK_SPI0>; - clock-names = "ahb", "mod"; - resets = <&ccu RST_BUS_SPI0>; - status = "disabled"; - #address-cells = <1>; - #size-cells = <0>; - }; - - spi1: spi@1c06000 { - compatible = "allwinner,sun8i-r40-spi", - "allwinner,sun8i-h3-spi"; - reg = <0x01c06000 0x1000>; - interrupts = ; - clocks = <&ccu CLK_BUS_SPI1>, <&ccu CLK_SPI1>; - clock-names = "ahb", "mod"; - resets = <&ccu RST_BUS_SPI1>; - status = "disabled"; - #address-cells = <1>; - #size-cells = <0>; - }; - - spi2: spi@1c07000 { - compatible = "allwinner,sun8i-r40-spi", - "allwinner,sun8i-h3-spi"; - reg = <0x01c07000 0x1000>; - interrupts = ; - clocks = <&ccu CLK_BUS_SPI2>, <&ccu CLK_SPI2>; - clock-names = "ahb", "mod"; - resets = <&ccu RST_BUS_SPI2>; - status = "disabled"; - #address-cells = <1>; - #size-cells = <0>; - }; - - spi3: spi@1c0f000 { - compatible = "allwinner,sun8i-r40-spi", - "allwinner,sun8i-h3-spi"; - reg = <0x01c0f000 0x1000>; - interrupts = ; - clocks = <&ccu CLK_BUS_SPI3>, <&ccu CLK_SPI3>; - clock-names = "ahb", "mod"; - resets = <&ccu RST_BUS_SPI3>; - status = "disabled"; - #address-cells = <1>; - #size-cells = <0>; - }; - - ahci: sata@1c18000 { - compatible = "allwinner,sun8i-r40-ahci"; - reg = <0x01c18000 0x1000>; - interrupts = ; - clocks = <&ccu CLK_BUS_SATA>, <&ccu CLK_SATA>; - resets = <&ccu RST_BUS_SATA>; - reset-names = "ahci"; - status = "disabled"; - - }; - gmac: ethernet@1c50000 { compatible = "allwinner,sun8i-r40-gmac"; syscon = <&ccu>; diff --git a/arch/arm/configs/bcm2835_defconfig b/arch/arm/configs/bcm2835_defconfig index 519ff58e67b3057b4b506f8e0a5b9c964a527eb8..0afcae9f7cf8a0411e53ebd658d9a0efd93b9c49 100644 --- a/arch/arm/configs/bcm2835_defconfig +++ b/arch/arm/configs/bcm2835_defconfig @@ -178,6 +178,7 @@ CONFIG_SCHED_TRACER=y CONFIG_STACK_TRACER=y CONFIG_FUNCTION_PROFILER=y CONFIG_TEST_KSTRTOX=y +CONFIG_DEBUG_FS=y CONFIG_KGDB=y CONFIG_KGDB_KDB=y CONFIG_STRICT_DEVMEM=y diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index b9698e217aa7d311139c416eaae330c5141142e1..54f1a21de7e0dc69f0e22efd78a6cb818dc0c679 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -374,6 +374,7 @@ CONFIG_BACKLIGHT_GENERIC=m CONFIG_BACKLIGHT_PWM=m CONFIG_BACKLIGHT_PANDORA=m CONFIG_BACKLIGHT_GPIO=m +CONFIG_BACKLIGHT_LED=m CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y CONFIG_LOGO=y diff --git a/arch/arm/configs/socfpga_defconfig b/arch/arm/configs/socfpga_defconfig index fe2e1e82e23399dacdc4dbe6a68fd1957fd0f4df..e73c97b0f5b09b898b69e51d171c9f5d7fcec48d 100644 --- a/arch/arm/configs/socfpga_defconfig +++ b/arch/arm/configs/socfpga_defconfig @@ -157,6 +157,7 @@ CONFIG_NLS_ISO8859_1=y CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO=y CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_FS=y CONFIG_DETECT_HUNG_TASK=y # CONFIG_SCHED_DEBUG is not set CONFIG_FUNCTION_TRACER=y diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index c3314b286a61ea6feac86430fd576e834156dbe8..a827b4d60d389f3936a8185c4769f01fe9397226 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -392,9 +392,6 @@ static inline void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) {} static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {} static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {} -static inline void kvm_arm_vhe_guest_enter(void) {} -static inline void kvm_arm_vhe_guest_exit(void) {} - #define KVM_BP_HARDEN_UNKNOWN -1 #define KVM_BP_HARDEN_WA_NEEDED 0 #define KVM_BP_HARDEN_NOT_REQUIRED 1 diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c index c89ac1b9d28b2969facdd11ca57b64189caf5d5a..e0330a25e1c6df38f672b0c662fe801278088110 100644 --- a/arch/arm/kernel/vdso.c +++ b/arch/arm/kernel/vdso.c @@ -94,6 +94,8 @@ static bool __init cntvct_functional(void) * this. */ np = of_find_compatible_node(NULL, NULL, "arm,armv7-timer"); + if (!np) + np = of_find_compatible_node(NULL, NULL, "arm,armv8-timer"); if (!np) goto out_put; diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S index 95b2e1ce559cb198016b5281d2e3b463b3ad0fac..f8016e3db65d7f628327ed7600f24943c210ea7f 100644 --- a/arch/arm/lib/copy_from_user.S +++ b/arch/arm/lib/copy_from_user.S @@ -118,7 +118,7 @@ ENTRY(arm_copy_from_user) ENDPROC(arm_copy_from_user) - .pushsection .fixup,"ax" + .pushsection .text.fixup,"ax" .align 0 copy_abort_preamble ldmfd sp!, {r1, r2, r3} diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile index 35ff620537e62b3fab1759730a53b6cb8d4a5d9b..03506ce4614934675e772cce4a74386ead086fae 100644 --- a/arch/arm/mach-imx/Makefile +++ b/arch/arm/mach-imx/Makefile @@ -91,6 +91,8 @@ AFLAGS_suspend-imx6.o :=-Wa,-march=armv7-a obj-$(CONFIG_SOC_IMX6) += suspend-imx6.o obj-$(CONFIG_SOC_IMX53) += suspend-imx53.o endif +AFLAGS_resume-imx6.o :=-Wa,-march=armv7-a +obj-$(CONFIG_SOC_IMX6) += resume-imx6.o obj-$(CONFIG_SOC_IMX6) += pm-imx6.o obj-$(CONFIG_SOC_IMX1) += mach-imx1.o diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h index 912aeceb4ff81cc8cb2c41c2fd91b2757032335d..5aa5796cff0e19280208379ceae42df22da4c452 100644 --- a/arch/arm/mach-imx/common.h +++ b/arch/arm/mach-imx/common.h @@ -109,17 +109,17 @@ void imx_cpu_die(unsigned int cpu); int imx_cpu_kill(unsigned int cpu); #ifdef CONFIG_SUSPEND -void v7_cpu_resume(void); void imx53_suspend(void __iomem *ocram_vbase); extern const u32 imx53_suspend_sz; void imx6_suspend(void __iomem *ocram_vbase); #else -static inline void v7_cpu_resume(void) {} static inline void imx53_suspend(void __iomem *ocram_vbase) {} static const u32 imx53_suspend_sz; static inline void imx6_suspend(void __iomem *ocram_vbase) {} #endif +void v7_cpu_resume(void); + void imx6_pm_ccm_init(const char *ccm_compat); void imx6q_pm_init(void); void imx6dl_pm_init(void); diff --git a/arch/arm/mach-imx/resume-imx6.S b/arch/arm/mach-imx/resume-imx6.S new file mode 100644 index 0000000000000000000000000000000000000000..5bd1ba7ef15b61cb98d1bd2d3af4e85acc5ec632 --- /dev/null +++ b/arch/arm/mach-imx/resume-imx6.S @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright 2014 Freescale Semiconductor, Inc. + */ + +#include +#include +#include +#include +#include "hardware.h" + +/* + * The following code must assume it is running from physical address + * where absolute virtual addresses to the data section have to be + * turned into relative ones. + */ + +ENTRY(v7_cpu_resume) + bl v7_invalidate_l1 +#ifdef CONFIG_CACHE_L2X0 + bl l2c310_early_resume +#endif + b cpu_resume +ENDPROC(v7_cpu_resume) diff --git a/arch/arm/mach-imx/suspend-imx6.S b/arch/arm/mach-imx/suspend-imx6.S index 062391ff13dae207b35c15fb3a56e9c1c65b70a5..1eabf2d2834be79c6a6a695262ca4f1af2edcfc2 100644 --- a/arch/arm/mach-imx/suspend-imx6.S +++ b/arch/arm/mach-imx/suspend-imx6.S @@ -327,17 +327,3 @@ resume: ret lr ENDPROC(imx6_suspend) - -/* - * The following code must assume it is running from physical address - * where absolute virtual addresses to the data section have to be - * turned into relative ones. - */ - -ENTRY(v7_cpu_resume) - bl v7_invalidate_l1 -#ifdef CONFIG_CACHE_L2X0 - bl l2c310_early_resume -#endif - b cpu_resume -ENDPROC(v7_cpu_resume) diff --git a/arch/arm/mach-meson/Kconfig b/arch/arm/mach-meson/Kconfig index 01f0f4b765e00c980c855389256681d9d7293912..75034fe197e3be0d04e7043d14312dbfa0107f69 100644 --- a/arch/arm/mach-meson/Kconfig +++ b/arch/arm/mach-meson/Kconfig @@ -9,7 +9,6 @@ menuconfig ARCH_MESON select CACHE_L2X0 select PINCTRL select PINCTRL_MESON - select COMMON_CLK select HAVE_ARM_SCU if SMP select HAVE_ARM_TWD if SMP diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index e1135b9d67c65c3e2080d36d2b54b40033bd5b4f..5017a3be0ff016f8c33afabad821caca8f25a9ca 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile @@ -16,7 +16,7 @@ hwmod-common = omap_hwmod.o omap_hwmod_reset.o \ clock-common = clock.o secure-common = omap-smc.o omap-secure.o -obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common) $(secure-common) +obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common) obj-$(CONFIG_ARCH_OMAP3) += $(omap-2-3-common) $(hwmod-common) $(secure-common) obj-$(CONFIG_ARCH_OMAP4) += $(hwmod-common) $(secure-common) obj-$(CONFIG_SOC_AM33XX) += $(hwmod-common) $(secure-common) diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c index f280472336657dbbb1fe544d27e08ddfc2277cfa..27608d1026cbced98b22f25678d4e92827cd8758 100644 --- a/arch/arm/mach-omap2/io.c +++ b/arch/arm/mach-omap2/io.c @@ -431,7 +431,6 @@ void __init omap2420_init_early(void) omap_hwmod_init_postsetup(); omap_clk_soc_init = omap2420_dt_clk_init; rate_table = omap2420_rate_table; - omap_secure_init(); } void __init omap2420_init_late(void) @@ -456,7 +455,6 @@ void __init omap2430_init_early(void) omap_hwmod_init_postsetup(); omap_clk_soc_init = omap2430_dt_clk_init; rate_table = omap2430_rate_table; - omap_secure_init(); } void __init omap2430_init_late(void) diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts index f82f25c1a5f97be47d4593be9850ac0c5c7f0f32..d5dc12878dfe495b136b894ed45ebd3e07f929dd 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts @@ -327,7 +327,7 @@ #size-cells = <0>; bus-width = <4>; - max-frequency = <50000000>; + max-frequency = <60000000>; non-removable; disable-wp; diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts index a8bb3fa9fec98e994ce2876b95e81e86b8369c02..cb1b48f5b8b1cdf4d4c5cb98198c2df1006cf177 100644 --- a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts +++ b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts @@ -593,6 +593,7 @@ compatible = "brcm,bcm43438-bt"; interrupt-parent = <&gpio_intc>; interrupts = <95 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "host-wakeup"; shutdown-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>; max-speed = <2000000>; clocks = <&wifi32k>; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi index 0bf375ec959b3dbd0d4735bc7bb6523d22793e78..55b71bb4baf878c58b6c35f91741205d66dc922d 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi @@ -53,7 +53,7 @@ * PSCI node is not added default, U-boot will add missing * parts if it determines to use PSCI. */ - entry-method = "arm,psci"; + entry-method = "psci"; CPU_PW20: cpu-pw20 { compatible = "arm,idle-state"; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi index 6082ae02213642878e5a659c51fc6fe7fd506df2..d237162a874462604ba4a170cf068809d0d34836 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi @@ -20,6 +20,8 @@ }; &fman0 { + fsl,erratum-a050385; + /* these aliases provide the FMan ports mapping */ enet0: ethernet@e0000 { }; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts index 4223a2352d45adda2b39b427ca8e2313d2446df7..dde50c88f5e35c47d2c3daa1e8b86c45bfb55cc1 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts @@ -119,12 +119,12 @@ ethernet@e4000 { phy-handle = <&rgmii_phy1>; - phy-connection-type = "rgmii-txid"; + phy-connection-type = "rgmii-id"; }; ethernet@e6000 { phy-handle = <&rgmii_phy2>; - phy-connection-type = "rgmii-txid"; + phy-connection-type = "rgmii-id"; }; ethernet@e8000 { diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts index dbc23d6cd3b442b58f27b515fccd5bbb09820e95..d53ccc56bb639f7e8b638c36a4b0dd9bb2e5f940 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts @@ -131,12 +131,12 @@ &fman0 { ethernet@e4000 { phy-handle = <&rgmii_phy1>; - phy-connection-type = "rgmii"; + phy-connection-type = "rgmii-id"; }; ethernet@e6000 { phy-handle = <&rgmii_phy2>; - phy-connection-type = "rgmii"; + phy-connection-type = "rgmii-id"; }; ethernet@e8000 { diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts index d3d26cca7d526c63b8a6f5d4be201c9cfbbfb60c..13460a360c6af7bc0dba24184c20523b332184ce 100644 --- a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts +++ b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts @@ -52,11 +52,6 @@ compatible = "ethernet-phy-ieee802.3-c22"; reg = <0>; }; - - ethphy1: ethernet-phy@1 { - compatible = "ethernet-phy-ieee802.3-c22"; - reg = <1>; - }; }; }; diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi index e1d357eaad7c364333d826cc45d00e2d9fca8ab2..d8c44d3ca15acb7ab2751533972eeab372803419 100644 --- a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi +++ b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi @@ -102,7 +102,7 @@ }; gmac0: ethernet@ff800000 { - compatible = "altr,socfpga-stmmac", "snps,dwmac-3.74a", "snps,dwmac"; + compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac"; reg = <0xff800000 0x2000>; interrupts = <0 90 4>; interrupt-names = "macirq"; @@ -118,7 +118,7 @@ }; gmac1: ethernet@ff802000 { - compatible = "altr,socfpga-stmmac", "snps,dwmac-3.74a", "snps,dwmac"; + compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac"; reg = <0xff802000 0x2000>; interrupts = <0 91 4>; interrupt-names = "macirq"; @@ -134,7 +134,7 @@ }; gmac2: ethernet@ff804000 { - compatible = "altr,socfpga-stmmac", "snps,dwmac-3.74a", "snps,dwmac"; + compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac"; reg = <0xff804000 0x2000>; interrupts = <0 92 4>; interrupt-names = "macirq"; diff --git a/arch/arm64/boot/dts/sprd/sc9863a.dtsi b/arch/arm64/boot/dts/sprd/sc9863a.dtsi index cd80756c888d2f9d76ba11854ae18253728a7851..2c590ca1d079460d1c4f9ee7e203ad3c8041a4ec 100644 --- a/arch/arm64/boot/dts/sprd/sc9863a.dtsi +++ b/arch/arm64/boot/dts/sprd/sc9863a.dtsi @@ -108,7 +108,7 @@ }; idle-states { - entry-method = "arm,psci"; + entry-method = "psci"; CORE_PD: core-pd { compatible = "arm,idle-state"; entry-latency-us = <4000>; diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 905109f6814ff9e026c994d87dc65d6bf4370dac..4db223dbc549936c1dcef9b0832731c28a32112a 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -773,7 +773,7 @@ CONFIG_ARCH_R8A774A1=y CONFIG_ARCH_R8A774B1=y CONFIG_ARCH_R8A774C0=y CONFIG_ARCH_R8A7795=y -CONFIG_ARCH_R8A7796=y +CONFIG_ARCH_R8A77960=y CONFIG_ARCH_R8A77961=y CONFIG_ARCH_R8A77965=y CONFIG_ARCH_R8A77970=y diff --git a/arch/arm64/crypto/chacha-neon-glue.c b/arch/arm64/crypto/chacha-neon-glue.c index c1f9660d104cfc57365e42edc23867034b65fcfd..37ca3e8898484572d37ae0222cf13100c4dce952 100644 --- a/arch/arm64/crypto/chacha-neon-glue.c +++ b/arch/arm64/crypto/chacha-neon-glue.c @@ -55,10 +55,10 @@ static void chacha_doneon(u32 *state, u8 *dst, const u8 *src, break; } chacha_4block_xor_neon(state, dst, src, nrounds, l); - bytes -= CHACHA_BLOCK_SIZE * 5; - src += CHACHA_BLOCK_SIZE * 5; - dst += CHACHA_BLOCK_SIZE * 5; - state[12] += 5; + bytes -= l; + src += l; + dst += l; + state[12] += DIV_ROUND_UP(l, CHACHA_BLOCK_SIZE); } } diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index 324e7d5ab37edca2f17334b9734bcffc5d813da8..5e5dc05d63a06473050a1a55c7b0763484d73ebb 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -221,7 +221,7 @@ alternative_endif .macro user_alt, label, oldinstr, newinstr, cond 9999: alternative_insn "\oldinstr", "\newinstr", \cond - _ASM_EXTABLE 9999b, \label + _asm_extable 9999b, \label .endm /* diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index 25fec4bde43af6f66bbb0e170a888d154eb82535..a358e97572c14c58d55d732926cf3f211a71a410 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -32,7 +32,7 @@ static inline void gic_write_eoir(u32 irq) isb(); } -static inline void gic_write_dir(u32 irq) +static __always_inline void gic_write_dir(u32 irq) { write_sysreg_s(irq, SYS_ICC_DIR_EL1); isb(); diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index 806e9dc2a852a43d54f9eeb89122684553971b16..a4d1b5f771f6baebd64c3c291be937e84eddd397 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -69,7 +69,7 @@ static inline int icache_is_aliasing(void) return test_bit(ICACHEF_ALIASING, &__icache_flags); } -static inline int icache_is_vpipt(void) +static __always_inline int icache_is_vpipt(void) { return test_bit(ICACHEF_VPIPT, &__icache_flags); } diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 665c78e0665a65ea89f40442bef31b55bebf9147..e6cca3d4acf702f060bb35996ead0762566de7b6 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -145,7 +145,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *, #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 extern void flush_dcache_page(struct page *); -static inline void __flush_icache_all(void) +static __always_inline void __flush_icache_all(void) { if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC)) return; diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 92ef9539874a663b3bd693c4e834a5cf134a21d6..2a746b99e937f489cac93452fe4a11dfbc1c17d8 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -435,13 +435,13 @@ cpuid_feature_extract_signed_field(u64 features, int field) return cpuid_feature_extract_signed_field_width(features, field, 4); } -static inline unsigned int __attribute_const__ +static __always_inline unsigned int __attribute_const__ cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width) { return (u64)(features << (64 - width - field)) >> (64 - width); } -static inline unsigned int __attribute_const__ +static __always_inline unsigned int __attribute_const__ cpuid_feature_extract_unsigned_field(u64 features, int field) { return cpuid_feature_extract_unsigned_field_width(features, field, 4); @@ -564,7 +564,7 @@ static inline bool system_supports_mixed_endian(void) return val == 0x1; } -static inline bool system_supports_fpsimd(void) +static __always_inline bool system_supports_fpsimd(void) { return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD); } @@ -575,13 +575,13 @@ static inline bool system_uses_ttbr0_pan(void) !cpus_have_const_cap(ARM64_HAS_PAN); } -static inline bool system_supports_sve(void) +static __always_inline bool system_supports_sve(void) { return IS_ENABLED(CONFIG_ARM64_SVE) && cpus_have_const_cap(ARM64_SVE); } -static inline bool system_supports_cnp(void) +static __always_inline bool system_supports_cnp(void) { return IS_ENABLED(CONFIG_ARM64_CNP) && cpus_have_const_cap(ARM64_HAS_CNP); diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 4e531f57147d122aec8c87a800a08e33400b3c51..6facd1308e7c28c54ef35e762652f7cdb364322e 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -34,7 +34,7 @@ static inline void __raw_writew(u16 val, volatile void __iomem *addr) } #define __raw_writel __raw_writel -static inline void __raw_writel(u32 val, volatile void __iomem *addr) +static __always_inline void __raw_writel(u32 val, volatile void __iomem *addr) { asm volatile("str %w0, [%1]" : : "rZ" (val), "r" (addr)); } @@ -69,7 +69,7 @@ static inline u16 __raw_readw(const volatile void __iomem *addr) } #define __raw_readl __raw_readl -static inline u32 __raw_readl(const volatile void __iomem *addr) +static __always_inline u32 __raw_readl(const volatile void __iomem *addr) { u32 val; asm volatile(ALTERNATIVE("ldr %w0, [%1]", diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 688c63412cc27922aa2b5faba8d8fc808bef674b..f658dda123645f53462586cf9b4798e8349177cb 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -36,7 +36,7 @@ void kvm_inject_undef32(struct kvm_vcpu *vcpu); void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr); void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr); -static inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu) +static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu) { return !(vcpu->arch.hcr_el2 & HCR_RW); } @@ -127,7 +127,7 @@ static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr) vcpu->arch.vsesr_el2 = vsesr; } -static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) +static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) { return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc; } @@ -153,17 +153,17 @@ static inline void vcpu_write_elr_el1(const struct kvm_vcpu *vcpu, unsigned long *__vcpu_elr_el1(vcpu) = v; } -static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu) +static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu) { return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate; } -static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu) +static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu) { return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT); } -static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) +static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) { if (vcpu_mode_is_32bit(vcpu)) return kvm_condition_valid32(vcpu); @@ -181,13 +181,13 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on * AArch32 with banked registers. */ -static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu, +static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu, u8 reg_num) { return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num]; } -static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, +static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, unsigned long val) { if (reg_num != 31) @@ -264,12 +264,12 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu) return mode != PSR_MODE_EL0t; } -static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu) +static __always_inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu) { return vcpu->arch.fault.esr_el2; } -static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) +static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) { u32 esr = kvm_vcpu_get_hsr(vcpu); @@ -279,12 +279,12 @@ static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) return -1; } -static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu) +static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu) { return vcpu->arch.fault.far_el2; } -static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu) +static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu) { return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8; } @@ -299,7 +299,7 @@ static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu) return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK; } -static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) +static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) { return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV); } @@ -319,17 +319,17 @@ static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu) return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF); } -static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) +static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) { return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; } -static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) +static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) { return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW); } -static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) +static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) { return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) || kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */ @@ -340,18 +340,18 @@ static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu) return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM); } -static inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) +static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) { return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT); } /* This one is not specific to Data Abort */ -static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu) +static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu) { return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL); } -static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) +static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) { return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu)); } @@ -361,17 +361,17 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW; } -static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) +static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) { return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC; } -static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) +static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) { return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE; } -static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) +static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) { switch (kvm_vcpu_trap_get_fault(vcpu)) { case FSC_SEA: @@ -390,7 +390,7 @@ static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) } } -static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu) +static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu) { u32 esr = kvm_vcpu_get_hsr(vcpu); return ESR_ELx_SYS64_ISS_RT(esr); @@ -504,7 +504,7 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu, return data; /* Leave LE untouched */ } -static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) +static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) { if (vcpu_mode_is_32bit(vcpu)) kvm_skip_instr32(vcpu, is_wide_instr); @@ -519,7 +519,7 @@ static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) * Skip an instruction which has been emulated at hyp while most guest sysregs * are live. */ -static inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu) +static __always_inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu) { *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR); vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR); diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index d87aa609d2b6f3e49a5b44ccd523b89753289846..57fd46acd05823ed650a34bf16d7412013f7607c 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -626,38 +626,6 @@ static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {} static inline void kvm_clr_pmu_events(u32 clr) {} #endif -static inline void kvm_arm_vhe_guest_enter(void) -{ - local_daif_mask(); - - /* - * Having IRQs masked via PMR when entering the guest means the GIC - * will not signal the CPU of interrupts of lower priority, and the - * only way to get out will be via guest exceptions. - * Naturally, we want to avoid this. - * - * local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a - * dsb to ensure the redistributor is forwards EL2 IRQs to the CPU. - */ - pmr_sync(); -} - -static inline void kvm_arm_vhe_guest_exit(void) -{ - /* - * local_daif_restore() takes care to properly restore PSTATE.DAIF - * and the GIC PMR if the host is using IRQ priorities. - */ - local_daif_restore(DAIF_PROCCTX_NOIRQ); - - /* - * When we exit from the guest we change a number of CPU configuration - * parameters, such as traps. Make sure these changes take effect - * before running the host or additional guests. - */ - isb(); -} - #define KVM_BP_HARDEN_UNKNOWN -1 #define KVM_BP_HARDEN_WA_NEEDED 0 #define KVM_BP_HARDEN_NOT_REQUIRED 1 diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index a3a6a2ba9a635efd7feec4542f4c0b281052521c..fe57f60f06a8944fff74b2e3f1ed5de6d1790626 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -47,6 +47,13 @@ #define read_sysreg_el2(r) read_sysreg_elx(r, _EL2, _EL1) #define write_sysreg_el2(v,r) write_sysreg_elx(v, r, _EL2, _EL1) +/* + * Without an __arch_swab32(), we fall back to ___constant_swab32(), but the + * static inline can allow the compiler to out-of-line this. KVM always wants + * the macro version as its always inlined. + */ +#define __kvm_swab32(x) ___constant_swab32(x) + int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu); void __vgic_v3_save_state(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 53d846f1bfe70497b81d2457c9a6eefe543fc61f..785762860c63fd5c435f575cffdf22f2c69d8b27 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -93,7 +93,7 @@ void kvm_update_va_mask(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst); void kvm_compute_layout(void); -static inline unsigned long __kern_hyp_va(unsigned long v) +static __always_inline unsigned long __kern_hyp_va(unsigned long v) { asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n" "ror %0, %0, #1\n" @@ -473,6 +473,7 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa, extern void *__kvm_bp_vect_base; extern int __kvm_harden_el2_vector_slot; +/* This is only called on a VHE system */ static inline void *kvm_get_hyp_vector(void) { struct bp_hardening_data *data = arm64_get_bp_hardening_data(); diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h index d429f7701c3670101b62540f79c65ec711cba4d1..5d10051c3e62e839808d9ee0a56e0274d934589a 100644 --- a/arch/arm64/include/asm/lse.h +++ b/arch/arm64/include/asm/lse.h @@ -6,7 +6,7 @@ #ifdef CONFIG_ARM64_LSE_ATOMICS -#define __LSE_PREAMBLE ".arch armv8-a+lse\n" +#define __LSE_PREAMBLE ".arch_extension lse\n" #include #include diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index a4f9ca5479b063a012ec400c5ee623594d44e693..4d94676e5a8b6b08e0043b09bf7728ec1eb15642 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -213,7 +213,7 @@ static inline unsigned long kaslr_offset(void) ((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55)) #define untagged_addr(addr) ({ \ - u64 __addr = (__force u64)addr; \ + u64 __addr = (__force u64)(addr); \ __addr &= __untagged_addr(__addr); \ (__force __typeof__(addr))__addr; \ }) diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index e4d862420bb4051714f8052b01d84e817678f857..d79ce6df9e127c16fda0b11a64716674f16419e7 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -29,11 +29,9 @@ typedef struct { */ #define ASID(mm) ((mm)->context.id.counter & 0xffff) -extern bool arm64_use_ng_mappings; - static inline bool arm64_kernel_unmapped_at_el0(void) { - return arm64_use_ng_mappings; + return cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0); } typedef void (*bp_hardening_cb_t)(void); diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 6f87839f02491b5161452ea76b05eeef47821ed6..1305e28225fc77bb5c0a8a2bbd6ed8540934c835 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -23,11 +23,13 @@ #include +extern bool arm64_use_ng_mappings; + #define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) #define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) -#define PTE_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PTE_NG : 0) -#define PMD_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0) +#define PTE_MAYBE_NG (arm64_use_ng_mappings ? PTE_NG : 0) +#define PMD_MAYBE_NG (arm64_use_ng_mappings ? PMD_SECT_NG : 0) #define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG) #define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG) diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 1dd22da1c3a95985ad22331c5f59975edefedf6a..803039d504de609b188e62abd2fc102f00d73db5 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -25,8 +25,8 @@ #define __NR_compat_gettimeofday 78 #define __NR_compat_sigreturn 119 #define __NR_compat_rt_sigreturn 173 -#define __NR_compat_clock_getres 247 #define __NR_compat_clock_gettime 263 +#define __NR_compat_clock_getres 264 #define __NR_compat_clock_gettime64 403 #define __NR_compat_clock_getres_time64 406 diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index 0958ed6191aa344dcc84c965cc1f1e410a53540f..61fd26752adcb3bf19f55a7d6e3f03b8581f686e 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h @@ -83,7 +83,7 @@ static inline bool is_kernel_in_hyp_mode(void) return read_sysreg(CurrentEL) == CurrentEL_EL2; } -static inline bool has_vhe(void) +static __always_inline bool has_vhe(void) { if (cpus_have_const_cap(ARM64_HAS_VIRT_HOST_EXTN)) return true; diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index d4ed9a19d8fe7f8a63946edbfb934bbe8d58a442..5407bf5d98ac5ec4a0a19be90bb638525b0d2325 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -958,11 +958,22 @@ void tick_broadcast(const struct cpumask *mask) } #endif +/* + * The number of CPUs online, not counting this CPU (which may not be + * fully online and so not counted in num_online_cpus()). + */ +static inline unsigned int num_other_online_cpus(void) +{ + unsigned int this_cpu_online = cpu_online(smp_processor_id()); + + return num_online_cpus() - this_cpu_online; +} + void smp_send_stop(void) { unsigned long timeout; - if (num_online_cpus() > 1) { + if (num_other_online_cpus()) { cpumask_t mask; cpumask_copy(&mask, cpu_online_mask); @@ -975,10 +986,10 @@ void smp_send_stop(void) /* Wait up to one second for other CPUs to stop */ timeout = USEC_PER_SEC; - while (num_online_cpus() > 1 && timeout--) + while (num_other_online_cpus() && timeout--) udelay(1); - if (num_online_cpus() > 1) + if (num_other_online_cpus()) pr_warn("SMP: failed to stop secondary CPUs %*pbl\n", cpumask_pr_args(cpu_online_mask)); @@ -1001,7 +1012,11 @@ void crash_smp_send_stop(void) cpus_stopped = 1; - if (num_online_cpus() == 1) { + /* + * If this cpu is the only one alive at this point in time, online or + * not, there are no stop messages to be sent around, so just back out. + */ + if (num_other_online_cpus() == 0) { sdei_mask_local_cpu(); return; } @@ -1009,7 +1024,7 @@ void crash_smp_send_stop(void) cpumask_copy(&mask, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &mask); - atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); + atomic_set(&waiting_for_crash_ipi, num_other_online_cpus()); pr_crit("SMP: stopping secondary CPUs\n"); smp_cross_call(&mask, IPI_CPU_CRASH_STOP); diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index dfe8dd1725128405a1661a946782d812695d2d15..925086b46136f3cefd629ce28a9995a21bf38b71 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -625,7 +625,7 @@ static void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt) } /* Switch to the guest for VHE systems running in EL2 */ -int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) +static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) { struct kvm_cpu_context *host_ctxt; struct kvm_cpu_context *guest_ctxt; @@ -678,7 +678,42 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) return exit_code; } -NOKPROBE_SYMBOL(kvm_vcpu_run_vhe); +NOKPROBE_SYMBOL(__kvm_vcpu_run_vhe); + +int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) +{ + int ret; + + local_daif_mask(); + + /* + * Having IRQs masked via PMR when entering the guest means the GIC + * will not signal the CPU of interrupts of lower priority, and the + * only way to get out will be via guest exceptions. + * Naturally, we want to avoid this. + * + * local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a + * dsb to ensure the redistributor is forwards EL2 IRQs to the CPU. + */ + pmr_sync(); + + ret = __kvm_vcpu_run_vhe(vcpu); + + /* + * local_daif_restore() takes care to properly restore PSTATE.DAIF + * and the GIC PMR if the host is using IRQ priorities. + */ + local_daif_restore(DAIF_PROCCTX_NOIRQ); + + /* + * When we exit from the guest we change a number of CPU configuration + * parameters, such as traps. Make sure these changes take effect + * before running the host or additional guests. + */ + isb(); + + return ret; +} /* Switch to the guest for legacy non-VHE systems */ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c index 29ee1feba4eb7de12d55d8c8a99f64e2577e73e5..4f3a087e36d51c6e53d2fedc9370b1edcdbca0c9 100644 --- a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c +++ b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c @@ -69,14 +69,14 @@ int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu) u32 data = vcpu_get_reg(vcpu, rd); if (__is_be(vcpu)) { /* guest pre-swabbed data, undo this for writel() */ - data = swab32(data); + data = __kvm_swab32(data); } writel_relaxed(data, addr); } else { u32 data = readl_relaxed(addr); if (__is_be(vcpu)) { /* guest expects swabbed data */ - data = swab32(data); + data = __kvm_swab32(data); } vcpu_set_reg(vcpu, rd, data); } diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index 8ef73e89d51485950b0bc05689c64120b8fedebc..d89bb22589f660fb34a5786a243167245d0f4754 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -260,14 +260,26 @@ asmlinkage void post_ttbr_update_workaround(void) CONFIG_CAVIUM_ERRATUM_27456)); } -static int asids_init(void) +static int asids_update_limit(void) { - asid_bits = get_cpu_asid_bits(); + unsigned long num_available_asids = NUM_USER_ASIDS; + + if (arm64_kernel_unmapped_at_el0()) + num_available_asids /= 2; /* * Expect allocation after rollover to fail if we don't have at least * one more ASID than CPUs. ASID #0 is reserved for init_mm. */ - WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus()); + WARN_ON(num_available_asids - 1 <= num_possible_cpus()); + pr_info("ASID allocator initialised with %lu entries\n", + num_available_asids); + return 0; +} +arch_initcall(asids_update_limit); + +static int asids_init(void) +{ + asid_bits = get_cpu_asid_bits(); atomic64_set(&asid_generation, ASID_FIRST_VERSION); asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map), GFP_KERNEL); @@ -282,8 +294,6 @@ static int asids_init(void) */ if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) set_kpti_asid_bits(); - - pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS); return 0; } early_initcall(asids_init); diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig index da09c884cc305f20ba912c3b1ecd3b9ea54f8bbc..047427f71d835a6022023c7d69e7a42864d9caac 100644 --- a/arch/csky/Kconfig +++ b/arch/csky/Kconfig @@ -9,7 +9,6 @@ config CSKY select ARCH_USE_QUEUED_RWLOCKS if NR_CPUS>2 select COMMON_CLK select CLKSRC_MMIO - select CLKSRC_OF select CSKY_MPINTC if CPU_CK860 select CSKY_MP_TIMER if CPU_CK860 select CSKY_APB_INTC @@ -37,6 +36,7 @@ config CSKY select GX6605S_TIMER if CPU_CK610 select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_AUDITSYSCALL + select HAVE_COPY_THREAD_TLS select HAVE_DYNAMIC_FTRACE select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_GRAPH_TRACER @@ -47,8 +47,8 @@ config CSKY select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP - select HAVE_DMA_API_DEBUG select HAVE_DMA_CONTIGUOUS + select HAVE_STACKPROTECTOR select HAVE_SYSCALL_TRACEPOINTS select MAY_HAVE_SPARSE_IRQ select MODULES_USE_ELF_RELA if MODULES @@ -59,6 +59,11 @@ config CSKY select TIMER_OF select USB_ARCH_HAS_EHCI select USB_ARCH_HAS_OHCI + select GENERIC_PCI_IOMAP + select HAVE_PCI + select PCI_DOMAINS_GENERIC if PCI + select PCI_SYSCALL if PCI + select PCI_MSI if PCI config CPU_HAS_CACHEV2 bool @@ -75,7 +80,7 @@ config CPU_HAS_TLBI config CPU_HAS_LDSTEX bool help - For SMP, CPU needs "ldex&stex" instrcutions to atomic operations. + For SMP, CPU needs "ldex&stex" instructions for atomic operations. config CPU_NEED_TLBSYNC bool @@ -188,6 +193,40 @@ config CPU_PM_STOP bool "stop" endchoice +menuconfig HAVE_TCM + bool "Tightly-Coupled/Sram Memory" + select GENERIC_ALLOCATOR + help + The implementation are not only used by TCM (Tightly-Coupled Meory) + but also used by sram on SOC bus. It follow existed linux tcm + software interface, so that old tcm application codes could be + re-used directly. + +if HAVE_TCM +config ITCM_RAM_BASE + hex "ITCM ram base" + default 0xffffffff + +config ITCM_NR_PAGES + int "Page count of ITCM size: NR*4KB" + range 1 256 + default 32 + +config HAVE_DTCM + bool "DTCM Support" + +config DTCM_RAM_BASE + hex "DTCM ram base" + depends on HAVE_DTCM + default 0xffffffff + +config DTCM_NR_PAGES + int "Page count of DTCM size: NR*4KB" + depends on HAVE_DTCM + range 1 256 + default 32 +endif + config CPU_HAS_VDSP bool "CPU has VDSP coprocessor" depends on CPU_HAS_FPU && CPU_HAS_FPUV2 @@ -196,6 +235,10 @@ config CPU_HAS_FPU bool "CPU has FPU coprocessor" depends on CPU_CK807 || CPU_CK810 || CPU_CK860 +config CPU_HAS_ICACHE_INS + bool "CPU has Icache invalidate instructions" + depends on CPU_HAS_CACHEV2 + config CPU_HAS_TEE bool "CPU has Trusted Execution Environment" depends on CPU_CK810 @@ -235,4 +278,6 @@ config HOTPLUG_CPU Say N if you want to disable CPU hotplug. endmenu +source "arch/csky/Kconfig.platforms" + source "kernel/Kconfig.hz" diff --git a/arch/csky/Kconfig.platforms b/arch/csky/Kconfig.platforms new file mode 100644 index 0000000000000000000000000000000000000000..639e17f4eacbae24c00a45d90e5a576666fded69 --- /dev/null +++ b/arch/csky/Kconfig.platforms @@ -0,0 +1,9 @@ +menu "Platform drivers selection" + +config ARCH_CSKY_DW_APB_ICTL + bool "Select dw-apb interrupt controller" + select DW_APB_ICTL + default y + help + This enables support for snps dw-apb-ictl +endmenu diff --git a/arch/csky/abiv1/inc/abi/cacheflush.h b/arch/csky/abiv1/inc/abi/cacheflush.h index 79ef9e8c1afddc1a520fb56cd2817be38e4cae4c..d3e04208d53c23e36eee552e89e484ef81d8f2c5 100644 --- a/arch/csky/abiv1/inc/abi/cacheflush.h +++ b/arch/csky/abiv1/inc/abi/cacheflush.h @@ -48,9 +48,8 @@ extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, u #define flush_icache_page(vma, page) do {} while (0); #define flush_icache_range(start, end) cache_wbinv_range(start, end) - -#define flush_icache_user_range(vma,page,addr,len) \ - flush_dcache_page(page) +#define flush_icache_mm_range(mm, start, end) cache_wbinv_range(start, end) +#define flush_icache_deferred(mm) do {} while (0); #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ do { \ diff --git a/arch/csky/abiv1/inc/abi/entry.h b/arch/csky/abiv1/inc/abi/entry.h index 7ab78bd0f3b13f7e52be13445314ef956566fa73..f35a9f3315ee6f62b126eb54b46f72a90d22da41 100644 --- a/arch/csky/abiv1/inc/abi/entry.h +++ b/arch/csky/abiv1/inc/abi/entry.h @@ -16,14 +16,16 @@ #define LSAVE_A4 40 #define LSAVE_A5 44 +#define usp ss1 + .macro USPTOKSP - mtcr sp, ss1 + mtcr sp, usp mfcr sp, ss0 .endm .macro KSPTOUSP mtcr sp, ss0 - mfcr sp, ss1 + mfcr sp, usp .endm .macro SAVE_ALL epc_inc @@ -45,7 +47,13 @@ add lr, r13 stw lr, (sp, 8) + mov lr, sp + addi lr, 32 + addi lr, 32 + addi lr, 16 + bt 2f mfcr lr, ss1 +2: stw lr, (sp, 16) stw a0, (sp, 20) @@ -79,9 +87,10 @@ ldw a0, (sp, 12) mtcr a0, epsr btsti a0, 31 + bt 1f ldw a0, (sp, 16) mtcr a0, ss1 - +1: ldw a0, (sp, 24) ldw a1, (sp, 28) ldw a2, (sp, 32) @@ -102,9 +111,9 @@ addi sp, 32 addi sp, 8 - bt 1f + bt 2f KSPTOUSP -1: +2: rte .endm diff --git a/arch/csky/abiv2/cacheflush.c b/arch/csky/abiv2/cacheflush.c index 5bb887b275e1213e4e6e9e77b894a7ef490d06c7..790f1ebfba44ba925ccd94f1927b61e03137dbc6 100644 --- a/arch/csky/abiv2/cacheflush.c +++ b/arch/csky/abiv2/cacheflush.c @@ -6,46 +6,80 @@ #include #include -void flush_icache_page(struct vm_area_struct *vma, struct page *page) +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, + pte_t *pte) { - unsigned long start; + unsigned long addr; + struct page *page; - start = (unsigned long) kmap_atomic(page); + page = pfn_to_page(pte_pfn(*pte)); + if (page == ZERO_PAGE(0)) + return; - cache_wbinv_range(start, start + PAGE_SIZE); + if (test_and_set_bit(PG_dcache_clean, &page->flags)) + return; - kunmap_atomic((void *)start); -} + addr = (unsigned long) kmap_atomic(page); -void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, - unsigned long vaddr, int len) -{ - unsigned long kaddr; + dcache_wb_range(addr, addr + PAGE_SIZE); - kaddr = (unsigned long) kmap_atomic(page) + (vaddr & ~PAGE_MASK); + if (vma->vm_flags & VM_EXEC) + icache_inv_range(addr, addr + PAGE_SIZE); + + kunmap_atomic((void *) addr); +} - cache_wbinv_range(kaddr, kaddr + len); +void flush_icache_deferred(struct mm_struct *mm) +{ + unsigned int cpu = smp_processor_id(); + cpumask_t *mask = &mm->context.icache_stale_mask; - kunmap_atomic((void *)kaddr); + if (cpumask_test_cpu(cpu, mask)) { + cpumask_clear_cpu(cpu, mask); + /* + * Ensure the remote hart's writes are visible to this hart. + * This pairs with a barrier in flush_icache_mm. + */ + smp_mb(); + local_icache_inv_all(NULL); + } } -void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, - pte_t *pte) +void flush_icache_mm_range(struct mm_struct *mm, + unsigned long start, unsigned long end) { - unsigned long addr, pfn; - struct page *page; + unsigned int cpu; + cpumask_t others, *mask; - pfn = pte_pfn(*pte); - if (unlikely(!pfn_valid(pfn))) - return; + preempt_disable(); - page = pfn_to_page(pfn); - if (page == ZERO_PAGE(0)) +#ifdef CONFIG_CPU_HAS_ICACHE_INS + if (mm == current->mm) { + icache_inv_range(start, end); + preempt_enable(); return; + } +#endif - addr = (unsigned long) kmap_atomic(page); + /* Mark every hart's icache as needing a flush for this MM. */ + mask = &mm->context.icache_stale_mask; + cpumask_setall(mask); - cache_wbinv_range(addr, addr + PAGE_SIZE); + /* Flush this hart's I$ now, and mark it as flushed. */ + cpu = smp_processor_id(); + cpumask_clear_cpu(cpu, mask); + local_icache_inv_all(NULL); - kunmap_atomic((void *) addr); + /* + * Flush the I$ of other harts concurrently executing, and mark them as + * flushed. + */ + cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); + + if (mm != current->active_mm || !cpumask_empty(&others)) { + on_each_cpu_mask(&others, local_icache_inv_all, NULL, 1); + cpumask_clear(mask); + } + + preempt_enable(); } diff --git a/arch/csky/abiv2/inc/abi/cacheflush.h b/arch/csky/abiv2/inc/abi/cacheflush.h index b8db5e0b2fe3c6448907398cba030c7d9abb9223..a565e00c3f70b2e51420cf61650c59cf68c199dc 100644 --- a/arch/csky/abiv2/inc/abi/cacheflush.h +++ b/arch/csky/abiv2/inc/abi/cacheflush.h @@ -13,24 +13,27 @@ #define flush_cache_all() do { } while (0) #define flush_cache_mm(mm) do { } while (0) #define flush_cache_dup_mm(mm) do { } while (0) +#define flush_cache_range(vma, start, end) do { } while (0) +#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) -#define flush_cache_range(vma, start, end) \ - do { \ - if (vma->vm_flags & VM_EXEC) \ - icache_inv_all(); \ - } while (0) +#define PG_dcache_clean PG_arch_1 + +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 +static inline void flush_dcache_page(struct page *page) +{ + if (test_bit(PG_dcache_clean, &page->flags)) + clear_bit(PG_dcache_clean, &page->flags); +} -#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) -#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 -#define flush_dcache_page(page) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) +#define flush_icache_page(vma, page) do { } while (0) #define flush_icache_range(start, end) cache_wbinv_range(start, end) -void flush_icache_page(struct vm_area_struct *vma, struct page *page); -void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, - unsigned long vaddr, int len); +void flush_icache_mm_range(struct mm_struct *mm, + unsigned long start, unsigned long end); +void flush_icache_deferred(struct mm_struct *mm); #define flush_cache_vmap(start, end) do { } while (0) #define flush_cache_vunmap(start, end) do { } while (0) @@ -38,7 +41,13 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ do { \ memcpy(dst, src, len); \ - cache_wbinv_range((unsigned long)dst, (unsigned long)dst + len); \ + if (vma->vm_flags & VM_EXEC) { \ + dcache_wb_range((unsigned long)dst, \ + (unsigned long)dst + len); \ + flush_icache_mm_range(current->mm, \ + (unsigned long)dst, \ + (unsigned long)dst + len); \ + } \ } while (0) #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ memcpy(dst, src, len) diff --git a/arch/csky/abiv2/inc/abi/entry.h b/arch/csky/abiv2/inc/abi/entry.h index 9897a16b45e5dcc75b4e6638a8ecd1153be661e9..94a7a58765dffe6a0b28bb1acb36bf6c380fbbb6 100644 --- a/arch/csky/abiv2/inc/abi/entry.h +++ b/arch/csky/abiv2/inc/abi/entry.h @@ -31,7 +31,13 @@ mfcr lr, epsr stw lr, (sp, 12) + btsti lr, 31 + bf 1f + addi lr, sp, 152 + br 2f +1: mfcr lr, usp +2: stw lr, (sp, 16) stw a0, (sp, 20) @@ -64,8 +70,10 @@ mtcr a0, epc ldw a0, (sp, 12) mtcr a0, epsr + btsti a0, 31 ldw a0, (sp, 16) mtcr a0, usp + mtcr a0, ss0 #ifdef CONFIG_CPU_HAS_HILO ldw a0, (sp, 140) @@ -86,6 +94,9 @@ addi sp, 40 ldm r16-r30, (sp) addi sp, 72 + bf 1f + mfcr sp, ss0 +1: rte .endm diff --git a/arch/csky/configs/defconfig b/arch/csky/configs/defconfig index 7ef42895dfb03b294c77b0f9687054cfe9d94571..af722e4dfb47d8239c969b25834e9b231a9b4244 100644 --- a/arch/csky/configs/defconfig +++ b/arch/csky/configs/defconfig @@ -10,9 +10,6 @@ CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y -CONFIG_DEFAULT_DEADLINE=y -CONFIG_CPU_CK807=y -CONFIG_CPU_HAS_FPU=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -27,10 +24,7 @@ CONFIG_SERIAL_NONSTANDARD=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_OF_PLATFORM=y -CONFIG_TTY_PRINTK=y # CONFIG_VGA_CONSOLE is not set -CONFIG_CSKY_MPTIMER=y -CONFIG_GX6605S_TIMER=y CONFIG_PM_DEVFREQ=y CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y CONFIG_DEVFREQ_GOV_PERFORMANCE=y @@ -56,6 +50,4 @@ CONFIG_CRAMFS=y CONFIG_ROMFS_FS=y CONFIG_NFS_FS=y CONFIG_PRINTK_TIME=y -CONFIG_DEBUG_INFO=y -CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y diff --git a/arch/csky/include/asm/Kbuild b/arch/csky/include/asm/Kbuild index bc15a26c782f9268fc47d1617594881d8d9bec29..4130e3eaa766711685f776b8b22cb234edce5379 100644 --- a/arch/csky/include/asm/Kbuild +++ b/arch/csky/include/asm/Kbuild @@ -28,7 +28,6 @@ generic-y += local64.h generic-y += mm-arch-hooks.h generic-y += mmiowb.h generic-y += module.h -generic-y += pci.h generic-y += percpu.h generic-y += preempt.h generic-y += qrwlock.h diff --git a/arch/csky/include/asm/cache.h b/arch/csky/include/asm/cache.h index 1d5fc2f78fd7e8c634b87e61eb9219fbc4f67ba3..4b5c09bf1d25e3a9b9720bd58f99effcdab09610 100644 --- a/arch/csky/include/asm/cache.h +++ b/arch/csky/include/asm/cache.h @@ -16,6 +16,7 @@ void dcache_wb_line(unsigned long start); void icache_inv_range(unsigned long start, unsigned long end); void icache_inv_all(void); +void local_icache_inv_all(void *priv); void dcache_wb_range(unsigned long start, unsigned long end); void dcache_wbinv_all(void); diff --git a/arch/csky/include/asm/cacheflush.h b/arch/csky/include/asm/cacheflush.h index a96da67261ae51fd67f48291489ef709874ac148..f0b8f25429a27f723c191888f624b53c0d778529 100644 --- a/arch/csky/include/asm/cacheflush.h +++ b/arch/csky/include/asm/cacheflush.h @@ -4,6 +4,7 @@ #ifndef __ASM_CSKY_CACHEFLUSH_H #define __ASM_CSKY_CACHEFLUSH_H +#include #include #endif /* __ASM_CSKY_CACHEFLUSH_H */ diff --git a/arch/csky/include/asm/fixmap.h b/arch/csky/include/asm/fixmap.h index 380ff0a307df02095f1585d021984a7167007b2b..81f9477d5330c95e2c964294fb782a5aea3aeaf0 100644 --- a/arch/csky/include/asm/fixmap.h +++ b/arch/csky/include/asm/fixmap.h @@ -5,12 +5,16 @@ #define __ASM_CSKY_FIXMAP_H #include +#include #ifdef CONFIG_HIGHMEM #include #include #endif enum fixed_addresses { +#ifdef CONFIG_HAVE_TCM + FIX_TCM = TCM_NR_PAGES, +#endif #ifdef CONFIG_HIGHMEM FIX_KMAP_BEGIN, FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1, @@ -18,10 +22,13 @@ enum fixed_addresses { __end_of_fixed_addresses }; -#define FIXADDR_TOP 0xffffc000 #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) #include +extern void fixrange_init(unsigned long start, unsigned long end, + pgd_t *pgd_base); +extern void __init fixaddr_init(void); + #endif /* __ASM_CSKY_FIXMAP_H */ diff --git a/arch/csky/include/asm/memory.h b/arch/csky/include/asm/memory.h new file mode 100644 index 0000000000000000000000000000000000000000..a65c6759f53753307774b38d7e22d02939f6bde5 --- /dev/null +++ b/arch/csky/include/asm/memory.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_MEMORY_H +#define __ASM_CSKY_MEMORY_H + +#include +#include +#include +#include + +#define FIXADDR_TOP _AC(0xffffc000, UL) +#define PKMAP_BASE _AC(0xff800000, UL) +#define VMALLOC_START _AC(0xc0008000, UL) +#define VMALLOC_END (PKMAP_BASE - (PAGE_SIZE * 2)) + +#ifdef CONFIG_HAVE_TCM +#ifdef CONFIG_HAVE_DTCM +#define TCM_NR_PAGES (CONFIG_ITCM_NR_PAGES + CONFIG_DTCM_NR_PAGES) +#else +#define TCM_NR_PAGES (CONFIG_ITCM_NR_PAGES) +#endif +#define FIXADDR_TCM _AC(FIXADDR_TOP - (TCM_NR_PAGES * PAGE_SIZE), UL) +#endif + +#endif diff --git a/arch/csky/include/asm/mmu.h b/arch/csky/include/asm/mmu.h index b382a14ea4ec72535393b4a6962b1f1020ab0b7e..26fbb1d15df08ea711f73efe26b39bc356312cc0 100644 --- a/arch/csky/include/asm/mmu.h +++ b/arch/csky/include/asm/mmu.h @@ -7,6 +7,7 @@ typedef struct { atomic64_t asid; void *vdso; + cpumask_t icache_stale_mask; } mm_context_t; #endif /* __ASM_CSKY_MMU_H */ diff --git a/arch/csky/include/asm/mmu_context.h b/arch/csky/include/asm/mmu_context.h index 0285b0ad18b6f2bce9276396461f9a1ffc2ed2f2..abdf1f1cb6ec991248ac17d7dc63881c479616ef 100644 --- a/arch/csky/include/asm/mmu_context.h +++ b/arch/csky/include/asm/mmu_context.h @@ -43,5 +43,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, TLBMISS_HANDLER_SETUP_PGD(next->pgd); write_mmu_entryhi(next->context.asid.counter); + + flush_icache_deferred(next); } #endif /* __ASM_CSKY_MMU_CONTEXT_H */ diff --git a/arch/csky/include/asm/pci.h b/arch/csky/include/asm/pci.h new file mode 100644 index 0000000000000000000000000000000000000000..ebc765b1f78b5f1f71b9eded7da3f60d2fcf25e1 --- /dev/null +++ b/arch/csky/include/asm/pci.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ASM_CSKY_PCI_H +#define __ASM_CSKY_PCI_H + +#include +#include +#include + +#include + +#define PCIBIOS_MIN_IO 0 +#define PCIBIOS_MIN_MEM 0 + +/* C-SKY shim does not initialize PCI bus */ +#define pcibios_assign_all_busses() 1 + +extern int isa_dma_bridge_buggy; + +#ifdef CONFIG_PCI +static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) +{ + /* no legacy IRQ on csky */ + return -ENODEV; +} + +static inline int pci_proc_domain(struct pci_bus *bus) +{ + /* always show the domain in /proc */ + return 1; +} +#endif /* CONFIG_PCI */ + +#endif /* __ASM_CSKY_PCI_H */ diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h index 4b2a41e15f2e4231b0c1905eb3ec38628359fe33..9b7764cb76450f8d205e0996047d2866ae55d0eb 100644 --- a/arch/csky/include/asm/pgtable.h +++ b/arch/csky/include/asm/pgtable.h @@ -5,6 +5,7 @@ #define __ASM_CSKY_PGTABLE_H #include +#include #include #include #include @@ -16,11 +17,6 @@ #define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE) #define FIRST_USER_ADDRESS 0UL -#define PKMAP_BASE (0xff800000) - -#define VMALLOC_START (0xc0008000) -#define VMALLOC_END (PKMAP_BASE - 2*PAGE_SIZE) - /* * C-SKY is two-level paging structure: */ diff --git a/arch/csky/include/asm/stackprotector.h b/arch/csky/include/asm/stackprotector.h new file mode 100644 index 0000000000000000000000000000000000000000..d7cd4e51edd965e0a3a67e62214074b799be701c --- /dev/null +++ b/arch/csky/include/asm/stackprotector.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_STACKPROTECTOR_H +#define _ASM_STACKPROTECTOR_H 1 + +#include +#include + +extern unsigned long __stack_chk_guard; + +/* + * Initialize the stackprotector canary value. + * + * NOTE: this must only be called from functions that never return, + * and it must always be inlined. + */ +static __always_inline void boot_init_stack_canary(void) +{ + unsigned long canary; + + /* Try to get a semi random initial value. */ + get_random_bytes(&canary, sizeof(canary)); + canary ^= LINUX_VERSION_CODE; + canary &= CANARY_MASK; + + current->stack_canary = canary; + __stack_chk_guard = current->stack_canary; +} + +#endif /* __ASM_SH_STACKPROTECTOR_H */ diff --git a/arch/csky/include/asm/tcm.h b/arch/csky/include/asm/tcm.h new file mode 100644 index 0000000000000000000000000000000000000000..2b135cefb73f2f49684cd5e9b92f403740f21a37 --- /dev/null +++ b/arch/csky/include/asm/tcm.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_TCM_H +#define __ASM_CSKY_TCM_H + +#ifndef CONFIG_HAVE_TCM +#error "You should not be including tcm.h unless you have a TCM!" +#endif + +#include + +/* Tag variables with this */ +#define __tcmdata __section(.tcm.data) +/* Tag constants with this */ +#define __tcmconst __section(.tcm.rodata) +/* Tag functions inside TCM called from outside TCM with this */ +#define __tcmfunc __section(.tcm.text) noinline +/* Tag function inside TCM called from inside TCM with this */ +#define __tcmlocalfunc __section(.tcm.text) + +void *tcm_alloc(size_t len); +void tcm_free(void *addr, size_t len); + +#endif diff --git a/arch/csky/include/uapi/asm/unistd.h b/arch/csky/include/uapi/asm/unistd.h index 211c983c7282d13f3b75150e686c8e815fd7777b..ba40189297338d1de36b9cff54d76f0746b344c6 100644 --- a/arch/csky/include/uapi/asm/unistd.h +++ b/arch/csky/include/uapi/asm/unistd.h @@ -1,7 +1,10 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. +#define __ARCH_WANT_STAT64 +#define __ARCH_WANT_NEW_STAT #define __ARCH_WANT_SYS_CLONE +#define __ARCH_WANT_SYS_CLONE3 #define __ARCH_WANT_SET_GET_RLIMIT #define __ARCH_WANT_TIME32_SYSCALLS #include diff --git a/arch/csky/kernel/atomic.S b/arch/csky/kernel/atomic.S index 5b84f11485aeb8c6794699c54250cf8c54f5a91d..3821ef9b75672d8a5af90839ffa7f95cfdb4da50 100644 --- a/arch/csky/kernel/atomic.S +++ b/arch/csky/kernel/atomic.S @@ -17,10 +17,12 @@ ENTRY(csky_cmpxchg) mfcr a3, epc addi a3, TRAP0_SIZE - subi sp, 8 + subi sp, 16 stw a3, (sp, 0) mfcr a3, epsr stw a3, (sp, 4) + mfcr a3, usp + stw a3, (sp, 8) psrset ee #ifdef CONFIG_CPU_HAS_LDSTEX @@ -47,7 +49,9 @@ ENTRY(csky_cmpxchg) mtcr a3, epc ldw a3, (sp, 4) mtcr a3, epsr - addi sp, 8 + ldw a3, (sp, 8) + mtcr a3, usp + addi sp, 16 KSPTOUSP rte END(csky_cmpxchg) diff --git a/arch/csky/kernel/process.c b/arch/csky/kernel/process.c index f320d9248a225fe31941129ccd2b4225c47f36d1..f7b231ca269a0dbc2b5fd76dbffcb5429fb01c6d 100644 --- a/arch/csky/kernel/process.c +++ b/arch/csky/kernel/process.c @@ -16,6 +16,12 @@ struct cpuinfo_csky cpu_data[NR_CPUS]; +#ifdef CONFIG_STACKPROTECTOR +#include +unsigned long __stack_chk_guard __read_mostly; +EXPORT_SYMBOL(__stack_chk_guard); +#endif + asmlinkage void ret_from_fork(void); asmlinkage void ret_from_kernel_thread(void); @@ -34,10 +40,11 @@ unsigned long thread_saved_pc(struct task_struct *tsk) return sw->r15; } -int copy_thread(unsigned long clone_flags, +int copy_thread_tls(unsigned long clone_flags, unsigned long usp, unsigned long kthread_arg, - struct task_struct *p) + struct task_struct *p, + unsigned long tls) { struct switch_stack *childstack; struct pt_regs *childregs = task_pt_regs(p); @@ -64,7 +71,7 @@ int copy_thread(unsigned long clone_flags, childregs->usp = usp; if (clone_flags & CLONE_SETTLS) task_thread_info(p)->tp_value = childregs->tls - = childregs->regs[0]; + = tls; childregs->a0 = 0; childstack->r15 = (unsigned long) ret_from_fork; diff --git a/arch/csky/kernel/setup.c b/arch/csky/kernel/setup.c index 52eaf31ba27fc95a44f8e4ce377d776110ad1142..3821e55742f46f0070688f1e81161aefb8760f5c 100644 --- a/arch/csky/kernel/setup.c +++ b/arch/csky/kernel/setup.c @@ -47,9 +47,6 @@ static void __init csky_memblock_init(void) signed long size; memblock_reserve(__pa(_stext), _end - _stext); -#ifdef CONFIG_BLK_DEV_INITRD - memblock_reserve(__pa(initrd_start), initrd_end - initrd_start); -#endif early_init_fdt_reserve_self(); early_init_fdt_scan_reserved_mem(); @@ -133,6 +130,8 @@ void __init setup_arch(char **cmdline_p) sparse_init(); + fixaddr_init(); + #ifdef CONFIG_HIGHMEM kmap_init(); #endif diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c index b753d382e4cef53f45350e1108518096934d826a..0bb0954d557090c359b7e80bd30957ad0826b196 100644 --- a/arch/csky/kernel/smp.c +++ b/arch/csky/kernel/smp.c @@ -120,7 +120,7 @@ void __init setup_smp_ipi(void) int rc; if (ipi_irq == 0) - panic("%s IRQ mapping failed\n", __func__); + return; rc = request_percpu_irq(ipi_irq, handle_ipi, "IPI Interrupt", &ipi_dummy_dev); diff --git a/arch/csky/kernel/time.c b/arch/csky/kernel/time.c index b5fc9447d93f2307f892ec0afa9fbda3e59a0294..52379d866fe45f2839afdcbca2216224fd455d33 100644 --- a/arch/csky/kernel/time.c +++ b/arch/csky/kernel/time.c @@ -1,8 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. -#include #include +#include void __init time_init(void) { diff --git a/arch/csky/kernel/vmlinux.lds.S b/arch/csky/kernel/vmlinux.lds.S index 2ff37beaf2bf38af39db8c44f58554e832f9909e..f05b413df32849f6000834e2445c1dbb194cdfc7 100644 --- a/arch/csky/kernel/vmlinux.lds.S +++ b/arch/csky/kernel/vmlinux.lds.S @@ -2,6 +2,7 @@ #include #include +#include OUTPUT_ARCH(csky) ENTRY(_start) @@ -53,6 +54,54 @@ SECTIONS RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) _edata = .; +#ifdef CONFIG_HAVE_TCM + .tcm_start : { + . = ALIGN(PAGE_SIZE); + __tcm_start = .; + } + + .text_data_tcm FIXADDR_TCM : AT(__tcm_start) + { + . = ALIGN(4); + __stcm_text_data = .; + *(.tcm.text) + *(.tcm.rodata) +#ifndef CONFIG_HAVE_DTCM + *(.tcm.data) +#endif + . = ALIGN(4); + __etcm_text_data = .; + } + + . = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_data_tcm); + +#ifdef CONFIG_HAVE_DTCM + #define ITCM_SIZE CONFIG_ITCM_NR_PAGES * PAGE_SIZE + + .dtcm_start : { + __dtcm_start = .; + } + + .data_tcm FIXADDR_TCM + ITCM_SIZE : AT(__dtcm_start) + { + . = ALIGN(4); + __stcm_data = .; + *(.tcm.data) + . = ALIGN(4); + __etcm_data = .; + } + + . = ADDR(.dtcm_start) + SIZEOF(.data_tcm); + + .tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_tcm)) { +#else + .tcm_end : AT(ADDR(.tcm_start) + SIZEOF(.text_data_tcm)) { +#endif + . = ALIGN(PAGE_SIZE); + __tcm_end = .; + } +#endif + EXCEPTION_TABLE(L1_CACHE_BYTES) BSS_SECTION(L1_CACHE_BYTES, PAGE_SIZE, L1_CACHE_BYTES) VBR_BASE diff --git a/arch/csky/mm/Makefile b/arch/csky/mm/Makefile index c94ef648109865e5662edeb1a61c9685426340fa..6e7696e55f71131dd1fde113ecdf177bbe01844c 100644 --- a/arch/csky/mm/Makefile +++ b/arch/csky/mm/Makefile @@ -1,8 +1,10 @@ # SPDX-License-Identifier: GPL-2.0-only ifeq ($(CONFIG_CPU_HAS_CACHEV2),y) obj-y += cachev2.o +CFLAGS_REMOVE_cachev2.o = $(CC_FLAGS_FTRACE) else obj-y += cachev1.o +CFLAGS_REMOVE_cachev1.o = $(CC_FLAGS_FTRACE) endif obj-y += dma-mapping.o @@ -14,3 +16,4 @@ obj-y += syscache.o obj-y += tlb.o obj-y += asid.o obj-y += context.o +obj-$(CONFIG_HAVE_TCM) += tcm.o diff --git a/arch/csky/mm/cachev1.c b/arch/csky/mm/cachev1.c index 494ec912abff072ed0a85ba6d6c2b313a27001f2..5a5a9804a0e3d468baa2ff7444d91265d35f9783 100644 --- a/arch/csky/mm/cachev1.c +++ b/arch/csky/mm/cachev1.c @@ -94,6 +94,11 @@ void icache_inv_all(void) cache_op_all(INS_CACHE|CACHE_INV, 0); } +void local_icache_inv_all(void *priv) +{ + cache_op_all(INS_CACHE|CACHE_INV, 0); +} + void dcache_wb_range(unsigned long start, unsigned long end) { cache_op_range(start, end, DATA_CACHE|CACHE_CLR, 0); diff --git a/arch/csky/mm/cachev2.c b/arch/csky/mm/cachev2.c index b61be6518e214bbed8d8704a6d36b76d1520f479..bc419f8039d3144ddaa14d7e9a1a6352869c9970 100644 --- a/arch/csky/mm/cachev2.c +++ b/arch/csky/mm/cachev2.c @@ -3,15 +3,25 @@ #include #include +#include #include #include -inline void dcache_wb_line(unsigned long start) +#define INS_CACHE (1 << 0) +#define CACHE_INV (1 << 4) + +void local_icache_inv_all(void *priv) { - asm volatile("dcache.cval1 %0\n"::"r"(start):"memory"); + mtcr("cr17", INS_CACHE|CACHE_INV); sync_is(); } +void icache_inv_all(void) +{ + on_each_cpu(local_icache_inv_all, NULL, 1); +} + +#ifdef CONFIG_CPU_HAS_ICACHE_INS void icache_inv_range(unsigned long start, unsigned long end) { unsigned long i = start & ~(L1_CACHE_BYTES - 1); @@ -20,43 +30,32 @@ void icache_inv_range(unsigned long start, unsigned long end) asm volatile("icache.iva %0\n"::"r"(i):"memory"); sync_is(); } - -void icache_inv_all(void) +#else +void icache_inv_range(unsigned long start, unsigned long end) { - asm volatile("icache.ialls\n":::"memory"); - sync_is(); + icache_inv_all(); } +#endif -void dcache_wb_range(unsigned long start, unsigned long end) +inline void dcache_wb_line(unsigned long start) { - unsigned long i = start & ~(L1_CACHE_BYTES - 1); - - for (; i < end; i += L1_CACHE_BYTES) - asm volatile("dcache.cval1 %0\n"::"r"(i):"memory"); + asm volatile("dcache.cval1 %0\n"::"r"(start):"memory"); sync_is(); } -void dcache_inv_range(unsigned long start, unsigned long end) +void dcache_wb_range(unsigned long start, unsigned long end) { unsigned long i = start & ~(L1_CACHE_BYTES - 1); for (; i < end; i += L1_CACHE_BYTES) - asm volatile("dcache.civa %0\n"::"r"(i):"memory"); + asm volatile("dcache.cval1 %0\n"::"r"(i):"memory"); sync_is(); } void cache_wbinv_range(unsigned long start, unsigned long end) { - unsigned long i = start & ~(L1_CACHE_BYTES - 1); - - for (; i < end; i += L1_CACHE_BYTES) - asm volatile("dcache.cval1 %0\n"::"r"(i):"memory"); - sync_is(); - - i = start & ~(L1_CACHE_BYTES - 1); - for (; i < end; i += L1_CACHE_BYTES) - asm volatile("icache.iva %0\n"::"r"(i):"memory"); - sync_is(); + dcache_wb_range(start, end); + icache_inv_range(start, end); } EXPORT_SYMBOL(cache_wbinv_range); diff --git a/arch/csky/mm/highmem.c b/arch/csky/mm/highmem.c index 3317b774f6dc145eae07b562689d975404279bb9..813129145f3da77c87a516f2f33bf48d8592ed18 100644 --- a/arch/csky/mm/highmem.c +++ b/arch/csky/mm/highmem.c @@ -117,85 +117,29 @@ struct page *kmap_atomic_to_page(void *ptr) return pte_page(*pte); } -static void __init fixrange_init(unsigned long start, unsigned long end, - pgd_t *pgd_base) +static void __init kmap_pages_init(void) { -#ifdef CONFIG_HIGHMEM - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - int i, j, k; unsigned long vaddr; - - vaddr = start; - i = __pgd_offset(vaddr); - j = __pud_offset(vaddr); - k = __pmd_offset(vaddr); - pgd = pgd_base + i; - - for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { - pud = (pud_t *)pgd; - for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { - pmd = (pmd_t *)pud; - for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { - if (pmd_none(*pmd)) { - pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); - if (!pte) - panic("%s: Failed to allocate %lu bytes align=%lx\n", - __func__, PAGE_SIZE, - PAGE_SIZE); - - set_pmd(pmd, __pmd(__pa(pte))); - BUG_ON(pte != pte_offset_kernel(pmd, 0)); - } - vaddr += PMD_SIZE; - } - k = 0; - } - j = 0; - } -#endif -} - -void __init fixaddr_kmap_pages_init(void) -{ - unsigned long vaddr; - pgd_t *pgd_base; -#ifdef CONFIG_HIGHMEM pgd_t *pgd; pmd_t *pmd; pud_t *pud; pte_t *pte; -#endif - pgd_base = swapper_pg_dir; - - /* - * Fixed mappings: - */ - vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; - fixrange_init(vaddr, 0, pgd_base); - -#ifdef CONFIG_HIGHMEM - /* - * Permanent kmaps: - */ + vaddr = PKMAP_BASE; - fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); + fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, swapper_pg_dir); pgd = swapper_pg_dir + __pgd_offset(vaddr); pud = (pud_t *)pgd; pmd = pmd_offset(pud, vaddr); pte = pte_offset_kernel(pmd, vaddr); pkmap_page_table = pte; -#endif } void __init kmap_init(void) { unsigned long vaddr; - fixaddr_kmap_pages_init(); + kmap_pages_init(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN); diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c index d4c2292ea46bc6cf0825c6af3b9499338577d329..cb64d8647a78b3eb9529b962e6267361e3c15897 100644 --- a/arch/csky/mm/init.c +++ b/arch/csky/mm/init.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -31,10 +32,50 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss; +EXPORT_SYMBOL(invalid_pte_table); unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; EXPORT_SYMBOL(empty_zero_page); +#ifdef CONFIG_BLK_DEV_INITRD +static void __init setup_initrd(void) +{ + unsigned long size; + + if (initrd_start >= initrd_end) { + pr_err("initrd not found or empty"); + goto disable; + } + + if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) { + pr_err("initrd extends beyond end of memory"); + goto disable; + } + + size = initrd_end - initrd_start; + + if (memblock_is_region_reserved(__pa(initrd_start), size)) { + pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region", + __pa(initrd_start), size); + goto disable; + } + + memblock_reserve(__pa(initrd_start), size); + + pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n", + (void *)(initrd_start), size); + + initrd_below_start_ok = 1; + + return; + +disable: + initrd_start = initrd_end = 0; + + pr_err(" - disabling initrd\n"); +} +#endif + void __init mem_init(void) { #ifdef CONFIG_HIGHMEM @@ -46,6 +87,10 @@ void __init mem_init(void) #endif high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); +#ifdef CONFIG_BLK_DEV_INITRD + setup_initrd(); +#endif + memblock_free_all(); #ifdef CONFIG_HIGHMEM @@ -101,3 +146,50 @@ void __init pre_mmu_init(void) /* Setup page mask to 4k */ write_mmu_pagemask(0); } + +void __init fixrange_init(unsigned long start, unsigned long end, + pgd_t *pgd_base) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + int i, j, k; + unsigned long vaddr; + + vaddr = start; + i = __pgd_offset(vaddr); + j = __pud_offset(vaddr); + k = __pmd_offset(vaddr); + pgd = pgd_base + i; + + for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { + pud = (pud_t *)pgd; + for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { + pmd = (pmd_t *)pud; + for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { + if (pmd_none(*pmd)) { + pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); + if (!pte) + panic("%s: Failed to allocate %lu bytes align=%lx\n", + __func__, PAGE_SIZE, + PAGE_SIZE); + + set_pmd(pmd, __pmd(__pa(pte))); + BUG_ON(pte != pte_offset_kernel(pmd, 0)); + } + vaddr += PMD_SIZE; + } + k = 0; + } + j = 0; + } +} + +void __init fixaddr_init(void) +{ + unsigned long vaddr; + + vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; + fixrange_init(vaddr, vaddr + PMD_SIZE, swapper_pg_dir); +} diff --git a/arch/csky/mm/syscache.c b/arch/csky/mm/syscache.c index c4645e4e97f4b70d4cc23be29f20cd3aaaff5db0..ffade2f9a4c87914af5c8322fdfc1b9600a204c1 100644 --- a/arch/csky/mm/syscache.c +++ b/arch/csky/mm/syscache.c @@ -3,7 +3,7 @@ #include #include -#include +#include #include SYSCALL_DEFINE3(cacheflush, @@ -13,17 +13,14 @@ SYSCALL_DEFINE3(cacheflush, { switch (cache) { case ICACHE: - icache_inv_range((unsigned long)addr, - (unsigned long)addr + bytes); - break; + case BCACHE: + flush_icache_mm_range(current->mm, + (unsigned long)addr, + (unsigned long)addr + bytes); case DCACHE: dcache_wb_range((unsigned long)addr, (unsigned long)addr + bytes); break; - case BCACHE: - cache_wbinv_range((unsigned long)addr, - (unsigned long)addr + bytes); - break; default: return -EINVAL; } diff --git a/arch/csky/mm/tcm.c b/arch/csky/mm/tcm.c new file mode 100644 index 0000000000000000000000000000000000000000..ddeb36328819960ac275ca1febf13fc4ad628677 --- /dev/null +++ b/arch/csky/mm/tcm.c @@ -0,0 +1,169 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include + +#if (CONFIG_ITCM_RAM_BASE == 0xffffffff) +#error "You should define ITCM_RAM_BASE" +#endif + +#ifdef CONFIG_HAVE_DTCM +#if (CONFIG_DTCM_RAM_BASE == 0xffffffff) +#error "You should define DTCM_RAM_BASE" +#endif + +#if (CONFIG_DTCM_RAM_BASE == CONFIG_ITCM_RAM_BASE) +#error "You should define correct DTCM_RAM_BASE" +#endif +#endif + +extern char __tcm_start, __tcm_end, __dtcm_start; + +static struct gen_pool *tcm_pool; + +static void __init tcm_mapping_init(void) +{ + pte_t *tcm_pte; + unsigned long vaddr, paddr; + int i; + + paddr = CONFIG_ITCM_RAM_BASE; + + if (pfn_valid(PFN_DOWN(CONFIG_ITCM_RAM_BASE))) + goto panic; + +#ifndef CONFIG_HAVE_DTCM + for (i = 0; i < TCM_NR_PAGES; i++) { +#else + for (i = 0; i < CONFIG_ITCM_NR_PAGES; i++) { +#endif + vaddr = __fix_to_virt(FIX_TCM - i); + + tcm_pte = + pte_offset_kernel((pmd_t *)pgd_offset_k(vaddr), vaddr); + + set_pte(tcm_pte, pfn_pte(__phys_to_pfn(paddr), PAGE_KERNEL)); + + flush_tlb_one(vaddr); + + paddr = paddr + PAGE_SIZE; + } + +#ifdef CONFIG_HAVE_DTCM + if (pfn_valid(PFN_DOWN(CONFIG_DTCM_RAM_BASE))) + goto panic; + + paddr = CONFIG_DTCM_RAM_BASE; + + for (i = 0; i < CONFIG_DTCM_NR_PAGES; i++) { + vaddr = __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES - i); + + tcm_pte = + pte_offset_kernel((pmd_t *) pgd_offset_k(vaddr), vaddr); + + set_pte(tcm_pte, pfn_pte(__phys_to_pfn(paddr), PAGE_KERNEL)); + + flush_tlb_one(vaddr); + + paddr = paddr + PAGE_SIZE; + } +#endif + +#ifndef CONFIG_HAVE_DTCM + memcpy((void *)__fix_to_virt(FIX_TCM), + &__tcm_start, &__tcm_end - &__tcm_start); + + pr_info("%s: mapping tcm va:0x%08lx to pa:0x%08x\n", + __func__, __fix_to_virt(FIX_TCM), CONFIG_ITCM_RAM_BASE); + + pr_info("%s: __tcm_start va:0x%08lx size:%d\n", + __func__, (unsigned long)&__tcm_start, &__tcm_end - &__tcm_start); +#else + memcpy((void *)__fix_to_virt(FIX_TCM), + &__tcm_start, &__dtcm_start - &__tcm_start); + + pr_info("%s: mapping itcm va:0x%08lx to pa:0x%08x\n", + __func__, __fix_to_virt(FIX_TCM), CONFIG_ITCM_RAM_BASE); + + pr_info("%s: __itcm_start va:0x%08lx size:%d\n", + __func__, (unsigned long)&__tcm_start, &__dtcm_start - &__tcm_start); + + memcpy((void *)__fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES), + &__dtcm_start, &__tcm_end - &__dtcm_start); + + pr_info("%s: mapping dtcm va:0x%08lx to pa:0x%08x\n", + __func__, __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES), + CONFIG_DTCM_RAM_BASE); + + pr_info("%s: __dtcm_start va:0x%08lx size:%d\n", + __func__, (unsigned long)&__dtcm_start, &__tcm_end - &__dtcm_start); + +#endif + return; +panic: + panic("TCM init error"); +} + +void *tcm_alloc(size_t len) +{ + unsigned long vaddr; + + if (!tcm_pool) + return NULL; + + vaddr = gen_pool_alloc(tcm_pool, len); + if (!vaddr) + return NULL; + + return (void *) vaddr; +} +EXPORT_SYMBOL(tcm_alloc); + +void tcm_free(void *addr, size_t len) +{ + gen_pool_free(tcm_pool, (unsigned long) addr, len); +} +EXPORT_SYMBOL(tcm_free); + +static int __init tcm_setup_pool(void) +{ +#ifndef CONFIG_HAVE_DTCM + u32 pool_size = (u32) (TCM_NR_PAGES * PAGE_SIZE) + - (u32) (&__tcm_end - &__tcm_start); + + u32 tcm_pool_start = __fix_to_virt(FIX_TCM) + + (u32) (&__tcm_end - &__tcm_start); +#else + u32 pool_size = (u32) (CONFIG_DTCM_NR_PAGES * PAGE_SIZE) + - (u32) (&__tcm_end - &__dtcm_start); + + u32 tcm_pool_start = __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES) + + (u32) (&__tcm_end - &__dtcm_start); +#endif + int ret; + + tcm_pool = gen_pool_create(2, -1); + + ret = gen_pool_add(tcm_pool, tcm_pool_start, pool_size, -1); + if (ret) { + pr_err("%s: gen_pool add failed!\n", __func__); + return ret; + } + + pr_info("%s: Added %d bytes @ 0x%08x to memory pool\n", + __func__, pool_size, tcm_pool_start); + + return 0; +} + +static int __init tcm_init(void) +{ + tcm_mapping_init(); + + tcm_setup_pool(); + + return 0; +} +arch_initcall(tcm_init); diff --git a/arch/mips/boot/dts/ingenic/ci20.dts b/arch/mips/boot/dts/ingenic/ci20.dts index 37b93166bf22d17947aa878657d269d84bbdd097..c340f947baa03a4d5e0cb8e0f79a3a72aeccd4e8 100644 --- a/arch/mips/boot/dts/ingenic/ci20.dts +++ b/arch/mips/boot/dts/ingenic/ci20.dts @@ -4,6 +4,8 @@ #include "jz4780.dtsi" #include #include +#include +#include / { compatible = "img,ci20", "ingenic,jz4780"; @@ -163,63 +165,71 @@ regulators { vddcore: SUDCDC1 { - regulator-name = "VDDCORE"; + regulator-name = "DCDC_REG1"; regulator-min-microvolt = <1100000>; regulator-max-microvolt = <1100000>; regulator-always-on; }; vddmem: SUDCDC2 { - regulator-name = "VDDMEM"; + regulator-name = "DCDC_REG2"; regulator-min-microvolt = <1500000>; regulator-max-microvolt = <1500000>; regulator-always-on; }; vcc_33: SUDCDC3 { - regulator-name = "VCC33"; + regulator-name = "DCDC_REG3"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; regulator-always-on; }; vcc_50: SUDCDC4 { - regulator-name = "VCC50"; + regulator-name = "SUDCDC_REG4"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; regulator-always-on; }; vcc_25: LDO_REG5 { - regulator-name = "VCC25"; + regulator-name = "LDO_REG5"; regulator-min-microvolt = <2500000>; regulator-max-microvolt = <2500000>; regulator-always-on; }; wifi_io: LDO_REG6 { - regulator-name = "WIFIIO"; + regulator-name = "LDO_REG6"; regulator-min-microvolt = <2500000>; regulator-max-microvolt = <2500000>; regulator-always-on; }; vcc_28: LDO_REG7 { - regulator-name = "VCC28"; + regulator-name = "LDO_REG7"; regulator-min-microvolt = <2800000>; regulator-max-microvolt = <2800000>; regulator-always-on; }; vcc_15: LDO_REG8 { - regulator-name = "VCC15"; + regulator-name = "LDO_REG8"; regulator-min-microvolt = <1500000>; regulator-max-microvolt = <1500000>; regulator-always-on; }; - vcc_18: LDO_REG9 { - regulator-name = "VCC18"; - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <1800000>; + vrtc_18: LDO_REG9 { + regulator-name = "LDO_REG9"; + /* Despite the datasheet stating 3.3V + * for REG9 and the driver expecting that, + * REG9 outputs 1.8V. + * Likely the CI20 uses a proprietary + * factory programmed chip variant. + * Since this is a simple on/off LDO the + * exact values do not matter. + */ + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; regulator-always-on; }; vcc_11: LDO_REG10 { - regulator-name = "VCC11"; - regulator-min-microvolt = <1100000>; - regulator-max-microvolt = <1100000>; + regulator-name = "LDO_REG10"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; regulator-always-on; }; }; @@ -261,7 +271,9 @@ rtc@51 { compatible = "nxp,pcf8563"; reg = <0x51>; - interrupts = <110>; + + interrupt-parent = <&gpf>; + interrupts = <30 IRQ_TYPE_LEVEL_LOW>; }; }; diff --git a/arch/mips/boot/dts/ingenic/jz4740.dtsi b/arch/mips/boot/dts/ingenic/jz4740.dtsi index 5accda2767bea4d641116206dc470c8b34775a9b..a3301bab9231ae4a2f65859c5713f1b746b7239d 100644 --- a/arch/mips/boot/dts/ingenic/jz4740.dtsi +++ b/arch/mips/boot/dts/ingenic/jz4740.dtsi @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include +#include / { #address-cells = <1>; @@ -45,14 +46,6 @@ #clock-cells = <1>; }; - watchdog: watchdog@10002000 { - compatible = "ingenic,jz4740-watchdog"; - reg = <0x10002000 0x10>; - - clocks = <&cgu JZ4740_CLK_RTC>; - clock-names = "rtc"; - }; - tcu: timer@10002000 { compatible = "ingenic,jz4740-tcu", "simple-mfd"; reg = <0x10002000 0x1000>; @@ -73,6 +66,14 @@ interrupt-parent = <&intc>; interrupts = <23 22 21>; + + watchdog: watchdog@0 { + compatible = "ingenic,jz4740-watchdog"; + reg = <0x0 0xc>; + + clocks = <&tcu TCU_CLK_WDT>; + clock-names = "wdt"; + }; }; rtc_dev: rtc@10003000 { diff --git a/arch/mips/boot/dts/ingenic/jz4780.dtsi b/arch/mips/boot/dts/ingenic/jz4780.dtsi index f928329b034b374de13a9d1e0570824a2e7d92c4..bb89653d16a321ab4bb9d136b96bff8eb8d0b2d0 100644 --- a/arch/mips/boot/dts/ingenic/jz4780.dtsi +++ b/arch/mips/boot/dts/ingenic/jz4780.dtsi @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include +#include #include / { @@ -67,6 +68,14 @@ interrupt-parent = <&intc>; interrupts = <27 26 25>; + + watchdog: watchdog@0 { + compatible = "ingenic,jz4780-watchdog"; + reg = <0x0 0xc>; + + clocks = <&tcu TCU_CLK_WDT>; + clock-names = "wdt"; + }; }; rtc_dev: rtc@10003000 { @@ -348,14 +357,6 @@ status = "disabled"; }; - watchdog: watchdog@10002000 { - compatible = "ingenic,jz4780-watchdog"; - reg = <0x10002000 0x10>; - - clocks = <&cgu JZ4780_CLK_RTCLK>; - clock-names = "rtc"; - }; - nemc: nemc@13410000 { compatible = "ingenic,jz4780-nemc"; reg = <0x13410000 0x10000>; diff --git a/arch/mips/boot/dts/ingenic/x1000.dtsi b/arch/mips/boot/dts/ingenic/x1000.dtsi index 4994c695a1a73ba0bf0b4ca3dd5d3a4591ea75c7..147f7d5c243a2fe5c6cfc3a639f5e4c3299fa7ea 100644 --- a/arch/mips/boot/dts/ingenic/x1000.dtsi +++ b/arch/mips/boot/dts/ingenic/x1000.dtsi @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 +#include #include #include @@ -72,7 +73,7 @@ compatible = "ingenic,x1000-watchdog", "ingenic,jz4780-watchdog"; reg = <0x0 0x10>; - clocks = <&cgu X1000_CLK_RTCLK>; + clocks = <&tcu TCU_CLK_WDT>; clock-names = "wdt"; }; }; @@ -158,7 +159,6 @@ i2c0: i2c-controller@10050000 { compatible = "ingenic,x1000-i2c"; reg = <0x10050000 0x1000>; - #address-cells = <1>; #size-cells = <0>; @@ -173,7 +173,6 @@ i2c1: i2c-controller@10051000 { compatible = "ingenic,x1000-i2c"; reg = <0x10051000 0x1000>; - #address-cells = <1>; #size-cells = <0>; @@ -188,7 +187,6 @@ i2c2: i2c-controller@10052000 { compatible = "ingenic,x1000-i2c"; reg = <0x10052000 0x1000>; - #address-cells = <1>; #size-cells = <0>; diff --git a/arch/mips/include/asm/sync.h b/arch/mips/include/asm/sync.h index 7c6a1095f556268700909ba31b5b24c1b6d9dc6a..aabd097933fe97f599361dd002d6e9f2fcdd999b 100644 --- a/arch/mips/include/asm/sync.h +++ b/arch/mips/include/asm/sync.h @@ -155,9 +155,11 @@ * effective barrier as noted by commit 6b07d38aaa52 ("MIPS: Octeon: Use * optimized memory barrier primitives."). Here we specify that the affected * sync instructions should be emitted twice. + * Note that this expression is evaluated by the assembler (not the compiler), + * and that the assembler evaluates '==' as 0 or -1, not 0 or 1. */ #ifdef CONFIG_CPU_CAVIUM_OCTEON -# define __SYNC_rpt(type) (1 + (type == __SYNC_wmb)) +# define __SYNC_rpt(type) (1 - (type == __SYNC_wmb)) #else # define __SYNC_rpt(type) 1 #endif diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 1ac2752fb7919ea26fb1caf5470ea3887887beb0..a7b469d89e2cc5e9679136d48a582356d56e0398 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -605,7 +605,8 @@ static void __init bootcmdline_init(char **cmdline_p) * If we're configured to take boot arguments from DT, look for those * now. */ - if (IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB)) + if (IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB) || + IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND)) of_scan_flat_dt(bootcmdline_scan_chosen, &dt_bootargs); #endif diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index 6176b9acba950e7189c48752498a9bb79f3648ad..d0d832ab3d3b86192b57c30f2c8c9e90820965ea 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c @@ -134,7 +134,7 @@ void release_vpe(struct vpe *v) { list_del(&v->list); if (v->load_addr) - release_progmem(v); + release_progmem(v->load_addr); kfree(v); } diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile index aa89a41dc5dddef97aa080366aa47eb743dfe0eb..d7fe8408603e85583e013337e5df1c87d9ada7cb 100644 --- a/arch/mips/vdso/Makefile +++ b/arch/mips/vdso/Makefile @@ -33,6 +33,7 @@ endif cflags-vdso := $(ccflags-vdso) \ $(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \ -O3 -g -fPIC -fno-strict-aliasing -fno-common -fno-builtin -G 0 \ + -mrelax-pic-calls $(call cc-option, -mexplicit-relocs) \ -fno-stack-protector -fno-jump-tables -DDISABLE_BRANCH_PROFILING \ $(call cc-option, -fno-asynchronous-unwind-tables) \ $(call cc-option, -fno-stack-protector) @@ -51,6 +52,8 @@ endif CFLAGS_REMOVE_vgettimeofday.o = -pg +DISABLE_VDSO := n + # # For the pre-R6 code in arch/mips/vdso/vdso.h for locating # the base address of VDSO, the linker will emit a R_MIPS_PC32 @@ -64,11 +67,24 @@ CFLAGS_REMOVE_vgettimeofday.o = -pg ifndef CONFIG_CPU_MIPSR6 ifeq ($(call ld-ifversion, -lt, 225000000, y),y) $(warning MIPS VDSO requires binutils >= 2.25) - obj-vdso-y := $(filter-out vgettimeofday.o, $(obj-vdso-y)) - ccflags-vdso += -DDISABLE_MIPS_VDSO + DISABLE_VDSO := y endif endif +# +# GCC (at least up to version 9.2) appears to emit function calls that make use +# of the GOT when targeting microMIPS, which we can't use in the VDSO due to +# the lack of relocations. As such, we disable the VDSO for microMIPS builds. +# +ifdef CONFIG_CPU_MICROMIPS + DISABLE_VDSO := y +endif + +ifeq ($(DISABLE_VDSO),y) + obj-vdso-y := $(filter-out vgettimeofday.o, $(obj-vdso-y)) + ccflags-vdso += -DDISABLE_MIPS_VDSO +endif + # VDSO linker flags. VDSO_LDFLAGS := \ -Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1 \ @@ -81,12 +97,18 @@ GCOV_PROFILE := n UBSAN_SANITIZE := n KCOV_INSTRUMENT := n +# Check that we don't have PIC 'jalr t9' calls left +quiet_cmd_vdso_mips_check = VDSOCHK $@ + cmd_vdso_mips_check = if $(OBJDUMP) --disassemble $@ | egrep -h "jalr.*t9" > /dev/null; \ + then (echo >&2 "$@: PIC 'jalr t9' calls are not supported"; \ + rm -f $@; /bin/false); fi + # # Shared build commands. # quiet_cmd_vdsold_and_vdso_check = LD $@ - cmd_vdsold_and_vdso_check = $(cmd_vdsold); $(cmd_vdso_check) + cmd_vdsold_and_vdso_check = $(cmd_vdsold); $(cmd_vdso_check); $(cmd_vdso_mips_check) quiet_cmd_vdsold = VDSO $@ cmd_vdsold = $(CC) $(c_flags) $(VDSO_LDFLAGS) \ diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 71034b54d74e3e45c750ad2854621a3f65d0901b..3801a2ef9bca1e8baafdf96577a784d36897af49 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -79,6 +79,11 @@ config MMU config STACK_GROWSUP def_bool y +config ARCH_DEFCONFIG + string + default "arch/parisc/configs/generic-32bit_defconfig" if !64BIT + default "arch/parisc/configs/generic-64bit_defconfig" if 64BIT + config GENERIC_LOCKBREAK bool default y diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index dca8f2de8cf5d0491cb072e88a5af43662ee37c3..628cd8bb7ad8500d682042a6a8ec64f7909cecb8 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -34,6 +34,13 @@ CC_ARCHES = hppa hppa2.0 hppa1.1 LD_BFD := elf32-hppa-linux endif +# select defconfig based on actual architecture +ifeq ($(shell uname -m),parisc64) + KBUILD_DEFCONFIG := generic-64bit_defconfig +else + KBUILD_DEFCONFIG := generic-32bit_defconfig +endif + export LD_BFD ifneq ($(SUBARCH),$(UTS_MACHINE)) diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 86332080399a57d461c3a0efbd1ac2b3debeb9ab..080a0bf8e54bb9cd6267e38adacb2dc15150b401 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -295,8 +295,13 @@ static inline bool pfn_valid(unsigned long pfn) /* * Some number of bits at the level of the page table that points to * a hugepte are used to encode the size. This masks those bits. + * On 8xx, HW assistance requires 4k alignment for the hugepte. */ +#ifdef CONFIG_PPC_8xx +#define HUGEPD_SHIFT_MASK 0xfff +#else #define HUGEPD_SHIFT_MASK 0x3f +#endif #ifndef __ASSEMBLY__ diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 8387698bd5b629692fc676143e39202ed7b47f66..eedcbfb9a6ff38d874b027c2d639519b83ebba24 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -168,6 +168,10 @@ struct thread_struct { unsigned long srr1; unsigned long dar; unsigned long dsisr; +#ifdef CONFIG_PPC_BOOK3S_32 + unsigned long r0, r3, r4, r5, r6, r8, r9, r11; + unsigned long lr, ctr; +#endif #endif /* Debug Registers */ struct debug_reg debug; diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index c25e562f1cd9d3f1d4fdf93b70bc2f698c2c4427..fcf24a365fc014b0490c957a16c8a9bfd55fc2fd 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -132,6 +132,18 @@ int main(void) OFFSET(SRR1, thread_struct, srr1); OFFSET(DAR, thread_struct, dar); OFFSET(DSISR, thread_struct, dsisr); +#ifdef CONFIG_PPC_BOOK3S_32 + OFFSET(THR0, thread_struct, r0); + OFFSET(THR3, thread_struct, r3); + OFFSET(THR4, thread_struct, r4); + OFFSET(THR5, thread_struct, r5); + OFFSET(THR6, thread_struct, r6); + OFFSET(THR8, thread_struct, r8); + OFFSET(THR9, thread_struct, r9); + OFFSET(THR11, thread_struct, r11); + OFFSET(THLR, thread_struct, lr); + OFFSET(THCTR, thread_struct, ctr); +#endif #endif #ifdef CONFIG_SPE OFFSET(THREAD_EVR0, thread_struct, evr[0]); diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index e745abc5457a0bb5f0560a695f00a7925ff29386..245be4fafe134a95c468ee7086ce2f985231dd3e 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -2193,11 +2193,13 @@ static struct cpu_spec * __init setup_cpu_spec(unsigned long offset, * oprofile_cpu_type already has a value, then we are * possibly overriding a real PVR with a logical one, * and, in that case, keep the current value for - * oprofile_cpu_type. + * oprofile_cpu_type. Futhermore, let's ensure that the + * fix for the PMAO bug is enabled on compatibility mode. */ if (old.oprofile_cpu_type != NULL) { t->oprofile_cpu_type = old.oprofile_cpu_type; t->oprofile_type = old.oprofile_type; + t->cpu_features |= old.cpu_features & CPU_FTR_PMAO_BUG; } } diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index a1eaffe868de4d76cbb8c401ddb175671e336ef4..7b048cee767c746fefe8a75cb991fb94fe757164 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -1184,6 +1184,17 @@ void eeh_handle_special_event(void) eeh_pe_state_mark(pe, EEH_PE_RECOVERING); eeh_handle_normal_event(pe); } else { + eeh_for_each_pe(pe, tmp_pe) + eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev) + edev->mode &= ~EEH_DEV_NO_HANDLER; + + /* Notify all devices to be down */ + eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true); + eeh_set_channel_state(pe, pci_channel_io_perm_failure); + eeh_pe_report( + "error_detected(permanent failure)", pe, + eeh_report_failure, NULL); + pci_lock_rescan_remove(); list_for_each_entry(hose, &hose_list, list_node) { phb_pe = eeh_phb_pe_get(hose); @@ -1192,16 +1203,6 @@ void eeh_handle_special_event(void) (phb_pe->state & EEH_PE_RECOVERING)) continue; - eeh_for_each_pe(pe, tmp_pe) - eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev) - edev->mode &= ~EEH_DEV_NO_HANDLER; - - /* Notify all devices to be down */ - eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true); - eeh_set_channel_state(pe, pci_channel_io_perm_failure); - eeh_pe_report( - "error_detected(permanent failure)", pe, - eeh_report_failure, NULL); bus = eeh_pe_bus_get(phb_pe); if (!bus) { pr_err("%s: Cannot find PCI bus for " diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 0713daa651d9e469b2acedfea00d15f12ce6be70..16af0d8d90a8641ae2d43b29cef9d691a08f06e2 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -783,7 +783,7 @@ fast_exception_return: 1: lis r3,exc_exit_restart_end@ha addi r3,r3,exc_exit_restart_end@l cmplw r12,r3 -#if CONFIG_PPC_BOOK3S_601 +#ifdef CONFIG_PPC_BOOK3S_601 bge 2b #else bge 3f @@ -791,7 +791,7 @@ fast_exception_return: lis r4,exc_exit_restart@ha addi r4,r4,exc_exit_restart@l cmplw r12,r4 -#if CONFIG_PPC_BOOK3S_601 +#ifdef CONFIG_PPC_BOOK3S_601 blt 2b #else blt 3f @@ -1354,12 +1354,17 @@ _GLOBAL(enter_rtas) mtspr SPRN_SRR0,r8 mtspr SPRN_SRR1,r9 RFI -1: tophys(r9,r1) +1: tophys_novmstack r9, r1 +#ifdef CONFIG_VMAP_STACK + li r0, MSR_KERNEL & ~MSR_IR /* can take DTLB miss */ + mtmsr r0 + isync +#endif lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */ lwz r9,8(r9) /* original msr value */ addi r1,r1,INT_FRAME_SIZE li r0,0 - tophys(r7, r2) + tophys_novmstack r7, r2 stw r0, THREAD + RTAS_SP(r7) mtspr SPRN_SRR0,r8 mtspr SPRN_SRR1,r9 diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 0493fcac6409508394932ea267e2f28c2f690f32..97c887950c3ca19d3156b8cc1e7aca6ceaef79f7 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -290,17 +290,55 @@ MachineCheck: 7: EXCEPTION_PROLOG_2 addi r3,r1,STACK_FRAME_OVERHEAD #ifdef CONFIG_PPC_CHRP - bne cr1,1f +#ifdef CONFIG_VMAP_STACK + mfspr r4, SPRN_SPRG_THREAD + tovirt(r4, r4) + lwz r4, RTAS_SP(r4) + cmpwi cr1, r4, 0 #endif - EXC_XFER_STD(0x200, machine_check_exception) -#ifdef CONFIG_PPC_CHRP -1: b machine_check_in_rtas + beq cr1, machine_check_tramp + b machine_check_in_rtas +#else + b machine_check_tramp #endif /* Data access exception. */ . = 0x300 DO_KVM 0x300 DataAccess: +#ifdef CONFIG_VMAP_STACK + mtspr SPRN_SPRG_SCRATCH0,r10 + mfspr r10, SPRN_SPRG_THREAD +BEGIN_MMU_FTR_SECTION + stw r11, THR11(r10) + mfspr r10, SPRN_DSISR + mfcr r11 +#ifdef CONFIG_PPC_KUAP + andis. r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h +#else + andis. r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h +#endif + mfspr r10, SPRN_SPRG_THREAD + beq hash_page_dsi +.Lhash_page_dsi_cont: + mtcr r11 + lwz r11, THR11(r10) +END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) + mtspr SPRN_SPRG_SCRATCH1,r11 + mfspr r11, SPRN_DAR + stw r11, DAR(r10) + mfspr r11, SPRN_DSISR + stw r11, DSISR(r10) + mfspr r11, SPRN_SRR0 + stw r11, SRR0(r10) + mfspr r11, SPRN_SRR1 /* check whether user or kernel */ + stw r11, SRR1(r10) + mfcr r10 + andi. r11, r11, MSR_PR + + EXCEPTION_PROLOG_1 + b handle_page_fault_tramp_1 +#else /* CONFIG_VMAP_STACK */ EXCEPTION_PROLOG handle_dar_dsisr=1 get_and_save_dar_dsisr_on_stack r4, r5, r11 BEGIN_MMU_FTR_SECTION @@ -316,11 +354,32 @@ BEGIN_MMU_FTR_SECTION FTR_SECTION_ELSE b handle_page_fault_tramp_2 ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE) +#endif /* CONFIG_VMAP_STACK */ /* Instruction access exception. */ . = 0x400 DO_KVM 0x400 InstructionAccess: +#ifdef CONFIG_VMAP_STACK + mtspr SPRN_SPRG_SCRATCH0,r10 + mtspr SPRN_SPRG_SCRATCH1,r11 + mfspr r10, SPRN_SPRG_THREAD + mfspr r11, SPRN_SRR0 + stw r11, SRR0(r10) + mfspr r11, SPRN_SRR1 /* check whether user or kernel */ + stw r11, SRR1(r10) + mfcr r10 +BEGIN_MMU_FTR_SECTION + andis. r11, r11, SRR1_ISI_NOPT@h /* no pte found? */ + bne hash_page_isi +.Lhash_page_isi_cont: + mfspr r11, SPRN_SRR1 /* check whether user or kernel */ +END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) + andi. r11, r11, MSR_PR + + EXCEPTION_PROLOG_1 + EXCEPTION_PROLOG_2 +#else /* CONFIG_VMAP_STACK */ EXCEPTION_PROLOG andis. r0,r9,SRR1_ISI_NOPT@h /* no pte found? */ beq 1f /* if so, try to put a PTE */ @@ -329,6 +388,7 @@ InstructionAccess: BEGIN_MMU_FTR_SECTION bl hash_page END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) +#endif /* CONFIG_VMAP_STACK */ 1: mr r4,r12 andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */ stw r4, _DAR(r11) @@ -344,7 +404,7 @@ Alignment: EXCEPTION_PROLOG handle_dar_dsisr=1 save_dar_dsisr_on_stack r4, r5, r11 addi r3,r1,STACK_FRAME_OVERHEAD - EXC_XFER_STD(0x600, alignment_exception) + b alignment_exception_tramp /* Program check exception */ EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD) @@ -645,15 +705,100 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU) . = 0x3000 +machine_check_tramp: + EXC_XFER_STD(0x200, machine_check_exception) + +alignment_exception_tramp: + EXC_XFER_STD(0x600, alignment_exception) + handle_page_fault_tramp_1: +#ifdef CONFIG_VMAP_STACK + EXCEPTION_PROLOG_2 handle_dar_dsisr=1 +#endif lwz r4, _DAR(r11) lwz r5, _DSISR(r11) /* fall through */ handle_page_fault_tramp_2: EXC_XFER_LITE(0x300, handle_page_fault) +#ifdef CONFIG_VMAP_STACK +.macro save_regs_thread thread + stw r0, THR0(\thread) + stw r3, THR3(\thread) + stw r4, THR4(\thread) + stw r5, THR5(\thread) + stw r6, THR6(\thread) + stw r8, THR8(\thread) + stw r9, THR9(\thread) + mflr r0 + stw r0, THLR(\thread) + mfctr r0 + stw r0, THCTR(\thread) +.endm + +.macro restore_regs_thread thread + lwz r0, THLR(\thread) + mtlr r0 + lwz r0, THCTR(\thread) + mtctr r0 + lwz r0, THR0(\thread) + lwz r3, THR3(\thread) + lwz r4, THR4(\thread) + lwz r5, THR5(\thread) + lwz r6, THR6(\thread) + lwz r8, THR8(\thread) + lwz r9, THR9(\thread) +.endm + +hash_page_dsi: + save_regs_thread r10 + mfdsisr r3 + mfdar r4 + mfsrr0 r5 + mfsrr1 r9 + rlwinm r3, r3, 32 - 15, _PAGE_RW /* DSISR_STORE -> _PAGE_RW */ + bl hash_page + mfspr r10, SPRN_SPRG_THREAD + restore_regs_thread r10 + b .Lhash_page_dsi_cont + +hash_page_isi: + mr r11, r10 + mfspr r10, SPRN_SPRG_THREAD + save_regs_thread r10 + li r3, 0 + lwz r4, SRR0(r10) + lwz r9, SRR1(r10) + bl hash_page + mfspr r10, SPRN_SPRG_THREAD + restore_regs_thread r10 + mr r10, r11 + b .Lhash_page_isi_cont + + .globl fast_hash_page_return +fast_hash_page_return: + andis. r10, r9, SRR1_ISI_NOPT@h /* Set on ISI, cleared on DSI */ + mfspr r10, SPRN_SPRG_THREAD + restore_regs_thread r10 + bne 1f + + /* DSI */ + mtcr r11 + lwz r11, THR11(r10) + mfspr r10, SPRN_SPRG_SCRATCH0 + SYNC + RFI + +1: /* ISI */ + mtcr r11 + mfspr r11, SPRN_SPRG_SCRATCH1 + mfspr r10, SPRN_SPRG_SCRATCH0 + SYNC + RFI + stack_overflow: vmap_stack_overflow_exception +#endif AltiVecUnavailable: EXCEPTION_PROLOG diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h index a6a5fbbf8504ae3cd5d5104076cec9358873dabb..9db162f79fe6e650fd61be32dd80bda9ea844f08 100644 --- a/arch/powerpc/kernel/head_32.h +++ b/arch/powerpc/kernel/head_32.h @@ -64,11 +64,25 @@ .endm .macro EXCEPTION_PROLOG_2 handle_dar_dsisr=0 +#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S) +BEGIN_MMU_FTR_SECTION + mtcr r10 +FTR_SECTION_ELSE + stw r10, _CCR(r11) +ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE) +#else stw r10,_CCR(r11) /* save registers */ +#endif + mfspr r10, SPRN_SPRG_SCRATCH0 stw r12,GPR12(r11) stw r9,GPR9(r11) - mfspr r10,SPRN_SPRG_SCRATCH0 stw r10,GPR10(r11) +#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S) +BEGIN_MMU_FTR_SECTION + mfcr r10 + stw r10, _CCR(r11) +END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) +#endif mfspr r12,SPRN_SPRG_SCRATCH1 stw r12,GPR11(r11) mflr r10 @@ -83,6 +97,11 @@ stw r10, _DSISR(r11) .endif lwz r9, SRR1(r12) +#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S) +BEGIN_MMU_FTR_SECTION + andi. r10, r9, MSR_PR +END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) +#endif lwz r12, SRR0(r12) #else mfspr r12,SPRN_SRR0 diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 9922306ae51244f9dcd0ed1f2f3ffac4603aabee..073a651787df8ab65847aa428fa1d9216d57a231 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -256,7 +256,7 @@ InstructionTLBMiss: * set. All other Linux PTE bits control the behavior * of the MMU. */ - rlwimi r10, r10, 0, 0x0f00 /* Clear bits 20-23 */ + rlwinm r10, r10, 0, ~0x0f00 /* Clear bits 20-23 */ rlwimi r10, r10, 4, 0x0400 /* Copy _PAGE_EXEC into bit 21 */ ori r10, r10, RPN_PATTERN | 0x200 /* Set 22 and 24-27 */ mtspr SPRN_MI_RPN, r10 /* Update TLB entry */ diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 2462cd7c565c6053f4866447b148ab0b7b3391c9..d0854320bb50fdd65eadbadb50ffcddc0bedc592 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -331,11 +331,13 @@ int hw_breakpoint_handler(struct die_args *args) } info->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ; - if (!dar_within_range(regs->dar, info)) - info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; - - if (!IS_ENABLED(CONFIG_PPC_8xx) && !stepping_handler(regs, bp, info)) - goto out; + if (IS_ENABLED(CONFIG_PPC_8xx)) { + if (!dar_within_range(regs->dar, info)) + info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; + } else { + if (!stepping_handler(regs, bp, info)) + goto out; + } /* * As a policy, the callback is invoked in a 'trigger-after-execute' diff --git a/arch/powerpc/kernel/idle_6xx.S b/arch/powerpc/kernel/idle_6xx.S index 0ffdd18b9f268b2b75f3c305505deb89fb6d09de..433d97bea1f3b8eb2e7554c317c9600a41d53b08 100644 --- a/arch/powerpc/kernel/idle_6xx.S +++ b/arch/powerpc/kernel/idle_6xx.S @@ -166,7 +166,11 @@ BEGIN_FTR_SECTION mfspr r9,SPRN_HID0 andis. r9,r9,HID0_NAP@h beq 1f +#ifdef CONFIG_VMAP_STACK + addis r9, r11, nap_save_msscr0@ha +#else addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha +#endif lwz r9,nap_save_msscr0@l(r9) mtspr SPRN_MSSCR0, r9 sync @@ -174,7 +178,11 @@ BEGIN_FTR_SECTION 1: END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR) BEGIN_FTR_SECTION +#ifdef CONFIG_VMAP_STACK + addis r9, r11, nap_save_hid1@ha +#else addis r9,r11,(nap_save_hid1-KERNELBASE)@ha +#endif lwz r9,nap_save_hid1@l(r9) mtspr SPRN_HID1, r9 END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX) diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index e6c30cee6abf1748e52fe5a4ae9a0fbc01c3274a..d215f95545537ababac1eb849baab01df0e1642a 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c @@ -200,14 +200,27 @@ unsigned long get_tm_stackpointer(struct task_struct *tsk) * normal/non-checkpointed stack pointer. */ + unsigned long ret = tsk->thread.regs->gpr[1]; + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM BUG_ON(tsk != current); if (MSR_TM_ACTIVE(tsk->thread.regs->msr)) { + preempt_disable(); tm_reclaim_current(TM_CAUSE_SIGNAL); if (MSR_TM_TRANSACTIONAL(tsk->thread.regs->msr)) - return tsk->thread.ckpt_regs.gpr[1]; + ret = tsk->thread.ckpt_regs.gpr[1]; + + /* + * If we treclaim, we must clear the current thread's TM bits + * before re-enabling preemption. Otherwise we might be + * preempted and have the live MSR[TS] changed behind our back + * (tm_recheckpoint_new_task() would recheckpoint). Besides, we + * enter the signal handler in non-transactional state. + */ + tsk->thread.regs->msr &= ~MSR_TS_MASK; + preempt_enable(); } #endif - return tsk->thread.regs->gpr[1]; + return ret; } diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 98600b276f764d957fe5f8d91386c921a288cc90..1b090a76b4444729c4b8614b72277c1643539e92 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -489,19 +489,11 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, */ static int save_tm_user_regs(struct pt_regs *regs, struct mcontext __user *frame, - struct mcontext __user *tm_frame, int sigret) + struct mcontext __user *tm_frame, int sigret, + unsigned long msr) { - unsigned long msr = regs->msr; - WARN_ON(tm_suspend_disabled); - /* Remove TM bits from thread's MSR. The MSR in the sigcontext - * just indicates to userland that we were doing a transaction, but we - * don't want to return in transactional state. This also ensures - * that flush_fp_to_thread won't set TIF_RESTORE_TM again. - */ - regs->msr &= ~MSR_TS_MASK; - /* Save both sets of general registers */ if (save_general_regs(¤t->thread.ckpt_regs, frame) || save_general_regs(regs, tm_frame)) @@ -912,6 +904,10 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset, int sigret; unsigned long tramp; struct pt_regs *regs = tsk->thread.regs; +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + /* Save the thread's msr before get_tm_stackpointer() changes it */ + unsigned long msr = regs->msr; +#endif BUG_ON(tsk != current); @@ -944,13 +940,13 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset, #ifdef CONFIG_PPC_TRANSACTIONAL_MEM tm_frame = &rt_sf->uc_transact.uc_mcontext; - if (MSR_TM_ACTIVE(regs->msr)) { + if (MSR_TM_ACTIVE(msr)) { if (__put_user((unsigned long)&rt_sf->uc_transact, &rt_sf->uc.uc_link) || __put_user((unsigned long)tm_frame, &rt_sf->uc_transact.uc_regs)) goto badframe; - if (save_tm_user_regs(regs, frame, tm_frame, sigret)) + if (save_tm_user_regs(regs, frame, tm_frame, sigret, msr)) goto badframe; } else @@ -1369,6 +1365,10 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset, int sigret; unsigned long tramp; struct pt_regs *regs = tsk->thread.regs; +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + /* Save the thread's msr before get_tm_stackpointer() changes it */ + unsigned long msr = regs->msr; +#endif BUG_ON(tsk != current); @@ -1402,9 +1402,9 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset, #ifdef CONFIG_PPC_TRANSACTIONAL_MEM tm_mctx = &frame->mctx_transact; - if (MSR_TM_ACTIVE(regs->msr)) { + if (MSR_TM_ACTIVE(msr)) { if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact, - sigret)) + sigret, msr)) goto badframe; } else diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 117515564ec7a6e2d13ecdeba37e3973934fba10..84ed2e77ef9c3f5e039cf8f65921b5124e0bb226 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -192,7 +192,8 @@ static long setup_sigcontext(struct sigcontext __user *sc, static long setup_tm_sigcontexts(struct sigcontext __user *sc, struct sigcontext __user *tm_sc, struct task_struct *tsk, - int signr, sigset_t *set, unsigned long handler) + int signr, sigset_t *set, unsigned long handler, + unsigned long msr) { /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the * process never used altivec yet (MSR_VEC is zero in pt_regs of @@ -207,12 +208,11 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc, elf_vrreg_t __user *tm_v_regs = sigcontext_vmx_regs(tm_sc); #endif struct pt_regs *regs = tsk->thread.regs; - unsigned long msr = tsk->thread.regs->msr; long err = 0; BUG_ON(tsk != current); - BUG_ON(!MSR_TM_ACTIVE(regs->msr)); + BUG_ON(!MSR_TM_ACTIVE(msr)); WARN_ON(tm_suspend_disabled); @@ -222,13 +222,6 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc, */ msr |= tsk->thread.ckpt_regs.msr & (MSR_FP | MSR_VEC | MSR_VSX); - /* Remove TM bits from thread's MSR. The MSR in the sigcontext - * just indicates to userland that we were doing a transaction, but we - * don't want to return in transactional state. This also ensures - * that flush_fp_to_thread won't set TIF_RESTORE_TM again. - */ - regs->msr &= ~MSR_TS_MASK; - #ifdef CONFIG_ALTIVEC err |= __put_user(v_regs, &sc->v_regs); err |= __put_user(tm_v_regs, &tm_sc->v_regs); @@ -824,6 +817,10 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, unsigned long newsp = 0; long err = 0; struct pt_regs *regs = tsk->thread.regs; +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + /* Save the thread's msr before get_tm_stackpointer() changes it */ + unsigned long msr = regs->msr; +#endif BUG_ON(tsk != current); @@ -841,7 +838,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, err |= __put_user(0, &frame->uc.uc_flags); err |= __save_altstack(&frame->uc.uc_stack, regs->gpr[1]); #ifdef CONFIG_PPC_TRANSACTIONAL_MEM - if (MSR_TM_ACTIVE(regs->msr)) { + if (MSR_TM_ACTIVE(msr)) { /* The ucontext_t passed to userland points to the second * ucontext_t (for transactional state) with its uc_link ptr. */ @@ -849,7 +846,8 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext, &frame->uc_transact.uc_mcontext, tsk, ksig->sig, NULL, - (unsigned long)ksig->ka.sa.sa_handler); + (unsigned long)ksig->ka.sa.sa_handler, + msr); } else #endif { diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index b4c89a1acebb83b75b1151e1a29aae89704f66ed..a32d478a7f41de22c1841262790ca60fc72ca376 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -303,6 +303,12 @@ SECTIONS *(.branch_lt) } +#ifdef CONFIG_DEBUG_INFO_BTF + .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { + *(.BTF) + } +#endif + .opd : AT(ADDR(.opd) - LOAD_OFFSET) { __start_opd = .; KEEP(*(.opd)) diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 729a0f12a752f659ed56b9ffc4a5199418838083..db3a87319642af9bc145ee54458a68aea3e2d34d 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -1817,6 +1817,7 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); + kvmppc_mmu_destroy_pr(vcpu); free_page((unsigned long)vcpu->arch.shared & PAGE_MASK); #ifdef CONFIG_KVM_BOOK3S_32_HANDLER kfree(vcpu->arch.shadow_vcpu); diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 1af96fb5dc6fbedb7e03aa0a97ff4b06159f262e..302e9dccdd6d79ae8d945c4d730aae4ca036c277 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -759,7 +759,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) return 0; out_vcpu_uninit: - kvmppc_mmu_destroy(vcpu); kvmppc_subarch_vcpu_uninit(vcpu); return err; } @@ -792,7 +791,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) kvmppc_core_vcpu_free(vcpu); - kvmppc_mmu_destroy(vcpu); kvmppc_subarch_vcpu_uninit(vcpu); } diff --git a/arch/powerpc/mm/book3s32/hash_low.S b/arch/powerpc/mm/book3s32/hash_low.S index c11b0a005196675271839c20a9a0c89cf2253f13..2015c4f962380940700d84bcc32af9d4961b80ea 100644 --- a/arch/powerpc/mm/book3s32/hash_low.S +++ b/arch/powerpc/mm/book3s32/hash_low.S @@ -25,12 +25,6 @@ #include #include -#ifdef CONFIG_VMAP_STACK -#define ADDR_OFFSET 0 -#else -#define ADDR_OFFSET PAGE_OFFSET -#endif - #ifdef CONFIG_SMP .section .bss .align 2 @@ -53,8 +47,8 @@ mmu_hash_lock: .text _GLOBAL(hash_page) #ifdef CONFIG_SMP - lis r8, (mmu_hash_lock - ADDR_OFFSET)@h - ori r8, r8, (mmu_hash_lock - ADDR_OFFSET)@l + lis r8, (mmu_hash_lock - PAGE_OFFSET)@h + ori r8, r8, (mmu_hash_lock - PAGE_OFFSET)@l lis r0,0x0fff b 10f 11: lwz r6,0(r8) @@ -72,12 +66,9 @@ _GLOBAL(hash_page) cmplw 0,r4,r0 ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */ mfspr r5, SPRN_SPRG_PGDIR /* phys page-table root */ -#ifdef CONFIG_VMAP_STACK - tovirt(r5, r5) -#endif blt+ 112f /* assume user more likely */ - lis r5, (swapper_pg_dir - ADDR_OFFSET)@ha /* if kernel address, use */ - addi r5 ,r5 ,(swapper_pg_dir - ADDR_OFFSET)@l /* kernel page table */ + lis r5, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ + addi r5 ,r5 ,(swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */ 112: #ifndef CONFIG_PTE_64BIT @@ -89,9 +80,6 @@ _GLOBAL(hash_page) lwzx r8,r8,r5 /* Get L1 entry */ rlwinm. r8,r8,0,0,20 /* extract pt base address */ #endif -#ifdef CONFIG_VMAP_STACK - tovirt(r8, r8) -#endif #ifdef CONFIG_SMP beq- hash_page_out /* return if no mapping */ #else @@ -143,30 +131,36 @@ retry: bne- retry /* retry if someone got there first */ mfsrin r3,r4 /* get segment reg for segment */ +#ifndef CONFIG_VMAP_STACK mfctr r0 stw r0,_CTR(r11) +#endif bl create_hpte /* add the hash table entry */ #ifdef CONFIG_SMP eieio - lis r8, (mmu_hash_lock - ADDR_OFFSET)@ha + lis r8, (mmu_hash_lock - PAGE_OFFSET)@ha li r0,0 - stw r0, (mmu_hash_lock - ADDR_OFFSET)@l(r8) + stw r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8) #endif +#ifdef CONFIG_VMAP_STACK + b fast_hash_page_return +#else /* Return from the exception */ lwz r5,_CTR(r11) mtctr r5 lwz r0,GPR0(r11) lwz r8,GPR8(r11) b fast_exception_return +#endif #ifdef CONFIG_SMP hash_page_out: eieio - lis r8, (mmu_hash_lock - ADDR_OFFSET)@ha + lis r8, (mmu_hash_lock - PAGE_OFFSET)@ha li r0,0 - stw r0, (mmu_hash_lock - ADDR_OFFSET)@l(r8) + stw r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8) blr #endif /* CONFIG_SMP */ @@ -341,7 +335,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) patch_site 1f, patch__hash_page_A1 patch_site 2f, patch__hash_page_A2 /* Get the address of the primary PTE group in the hash table (r3) */ -0: lis r0, (Hash_base - ADDR_OFFSET)@h /* base address of hash table */ +0: lis r0, (Hash_base - PAGE_OFFSET)@h /* base address of hash table */ 1: rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */ 2: rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */ xor r3,r3,r0 /* make primary hash */ @@ -355,10 +349,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) beq+ 10f /* no PTE: go look for an empty slot */ tlbie r4 - lis r4, (htab_hash_searches - ADDR_OFFSET)@ha - lwz r6, (htab_hash_searches - ADDR_OFFSET)@l(r4) + lis r4, (htab_hash_searches - PAGE_OFFSET)@ha + lwz r6, (htab_hash_searches - PAGE_OFFSET)@l(r4) addi r6,r6,1 /* count how many searches we do */ - stw r6, (htab_hash_searches - ADDR_OFFSET)@l(r4) + stw r6, (htab_hash_searches - PAGE_OFFSET)@l(r4) /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */ mtctr r0 @@ -390,10 +384,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) beq+ found_empty /* update counter of times that the primary PTEG is full */ - lis r4, (primary_pteg_full - ADDR_OFFSET)@ha - lwz r6, (primary_pteg_full - ADDR_OFFSET)@l(r4) + lis r4, (primary_pteg_full - PAGE_OFFSET)@ha + lwz r6, (primary_pteg_full - PAGE_OFFSET)@l(r4) addi r6,r6,1 - stw r6, (primary_pteg_full - ADDR_OFFSET)@l(r4) + stw r6, (primary_pteg_full - PAGE_OFFSET)@l(r4) patch_site 0f, patch__hash_page_C /* Search the secondary PTEG for an empty slot */ @@ -427,8 +421,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) * lockup here but that shouldn't happen */ -1: lis r4, (next_slot - ADDR_OFFSET)@ha /* get next evict slot */ - lwz r6, (next_slot - ADDR_OFFSET)@l(r4) +1: lis r4, (next_slot - PAGE_OFFSET)@ha /* get next evict slot */ + lwz r6, (next_slot - PAGE_OFFSET)@l(r4) addi r6,r6,HPTE_SIZE /* search for candidate */ andi. r6,r6,7*HPTE_SIZE stw r6,next_slot@l(r4) diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c index 0a1c65a2c56553cf36836af6bc30ef2424489980..f888cbb109b9134daee3176b37988b250ef008f5 100644 --- a/arch/powerpc/mm/book3s32/mmu.c +++ b/arch/powerpc/mm/book3s32/mmu.c @@ -413,7 +413,7 @@ void __init MMU_init_hw(void) void __init MMU_init_hw_patch(void) { unsigned int hmask = Hash_mask >> (16 - LG_HPTEG_SIZE); - unsigned int hash; + unsigned int hash = (unsigned int)Hash - PAGE_OFFSET; if (ppc_md.progress) ppc_md.progress("hash:patch", 0x345); @@ -425,11 +425,6 @@ void __init MMU_init_hw_patch(void) /* * Patch up the instructions in hashtable.S:create_hpte */ - if (IS_ENABLED(CONFIG_VMAP_STACK)) - hash = (unsigned int)Hash; - else - hash = (unsigned int)Hash - PAGE_OFFSET; - modify_instruction_site(&patch__hash_page_A0, 0xffff, hash >> 16); modify_instruction_site(&patch__hash_page_A1, 0x7c0, hash_mb << 6); modify_instruction_site(&patch__hash_page_A2, 0x7c0, hash_mb2 << 6); @@ -439,8 +434,7 @@ void __init MMU_init_hw_patch(void) /* * Patch up the instructions in hashtable.S:flush_hash_page */ - modify_instruction_site(&patch__flush_hash_A0, 0xffff, - ((unsigned int)Hash - PAGE_OFFSET) >> 16); + modify_instruction_site(&patch__flush_hash_A0, 0xffff, hash >> 16); modify_instruction_site(&patch__flush_hash_A1, 0x7c0, hash_mb << 6); modify_instruction_site(&patch__flush_hash_A2, 0x7c0, hash_mb2 << 6); modify_instruction_site(&patch__flush_hash_B, 0xffff, hmask); diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 73d4873fc7f85442eafc08399446bc176fd82b9a..33b3461d91e8db0e7b19f569a5c20d06a9a827be 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -53,20 +53,24 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, if (pshift >= pdshift) { cachep = PGT_CACHE(PTE_T_ORDER); num_hugepd = 1 << (pshift - pdshift); + new = NULL; } else if (IS_ENABLED(CONFIG_PPC_8xx)) { - cachep = PGT_CACHE(PTE_INDEX_SIZE); + cachep = NULL; num_hugepd = 1; + new = pte_alloc_one(mm); } else { cachep = PGT_CACHE(pdshift - pshift); num_hugepd = 1; + new = NULL; } - if (!cachep) { + if (!cachep && !new) { WARN_ONCE(1, "No page table cache created for hugetlb tables"); return -ENOMEM; } - new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL)); + if (cachep) + new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL)); BUG_ON(pshift > HUGEPD_SHIFT_MASK); BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK); @@ -97,7 +101,10 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, if (i < num_hugepd) { for (i = i - 1 ; i >= 0; i--, hpdp--) *hpdp = __hugepd(0); - kmem_cache_free(cachep, new); + if (cachep) + kmem_cache_free(cachep, new); + else + pte_free(mm, new); } else { kmemleak_ignore(new); } @@ -324,8 +331,7 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif if (shift >= pdshift) hugepd_free(tlb, hugepte); else if (IS_ENABLED(CONFIG_PPC_8xx)) - pgtable_free_tlb(tlb, hugepte, - get_hugepd_cache_index(PTE_INDEX_SIZE)); + pgtable_free_tlb(tlb, hugepte, 0); else pgtable_free_tlb(tlb, hugepte, get_hugepd_cache_index(pdshift - shift)); @@ -639,12 +645,13 @@ static int __init hugetlbpage_init(void) * if we have pdshift and shift value same, we don't * use pgt cache for hugepd. */ - if (pdshift > shift && IS_ENABLED(CONFIG_PPC_8xx)) - pgtable_cache_add(PTE_INDEX_SIZE); - else if (pdshift > shift) - pgtable_cache_add(pdshift - shift); - else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) || IS_ENABLED(CONFIG_PPC_8xx)) + if (pdshift > shift) { + if (!IS_ENABLED(CONFIG_PPC_8xx)) + pgtable_cache_add(pdshift - shift); + } else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) || + IS_ENABLED(CONFIG_PPC_8xx)) { pgtable_cache_add(PTE_T_ORDER); + } configured = true; } diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c index 16dd95bd0749d851410fce5ae727a0e9edfdb563..d2bed3fcb7194ec065e107b71786797359fd8257 100644 --- a/arch/powerpc/mm/kasan/kasan_init_32.c +++ b/arch/powerpc/mm/kasan/kasan_init_32.c @@ -120,12 +120,6 @@ static void __init kasan_unmap_early_shadow_vmalloc(void) unsigned long k_cur; phys_addr_t pa = __pa(kasan_early_shadow_page); - if (!early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) { - int ret = kasan_init_shadow_page_tables(k_start, k_end); - - if (ret) - panic("kasan: kasan_init_shadow_page_tables() failed"); - } for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) { pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur); pte_t *ptep = pte_offset_kernel(pmd, k_cur); @@ -143,7 +137,8 @@ void __init kasan_mmu_init(void) int ret; struct memblock_region *reg; - if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) { + if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE) || + IS_ENABLED(CONFIG_KASAN_VMALLOC)) { ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END); if (ret) @@ -185,8 +180,7 @@ u8 __initdata early_hash[256 << 10] __aligned(256 << 10) = {0}; static void __init kasan_early_hash_table(void) { - unsigned int hash = IS_ENABLED(CONFIG_VMAP_STACK) ? (unsigned int)early_hash : - __pa(early_hash); + unsigned int hash = __pa(early_hash); modify_instruction_site(&patch__hash_page_A0, 0xffff, hash >> 16); modify_instruction_site(&patch__flush_hash_A0, 0xffff, hash >> 16); diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index ef7b1119b2e296c41753fb4c17489c4834237e01..1c07d5a3f54318cdf7f315b3fa517b73698b028b 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -373,7 +373,9 @@ static inline bool flush_coherent_icache(unsigned long addr) */ if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) { mb(); /* sync */ + allow_read_from_user((const void __user *)addr, L1_CACHE_BYTES); icbi((void *)addr); + prevent_read_from_user((const void __user *)addr, L1_CACHE_BYTES); mb(); /* sync */ isync(); return true; diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index e8c84d265602bc57bf47b49243a2044d93f2336d..0ec9640335bb3544548bcf3c391b2dd946621f4b 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -3435,6 +3435,11 @@ getstring(char *s, int size) int c; c = skipbl(); + if (c == '\n') { + *s = 0; + return; + } + do { if( size > 1 ){ *s++ = c; diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 73f029eae0cce91d89cfff9206e86324b716f46f..cd5db57bfd41de99b08d7eace62bce0086c42bb3 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -50,7 +50,6 @@ config RISCV select PCI_DOMAINS_GENERIC if PCI select PCI_MSI if PCI select RISCV_TIMER - select UACCESS_MEMCPY if !MMU select GENERIC_IRQ_MULTI_HANDLER select GENERIC_ARCH_TOPOLOGY if SMP select ARCH_HAS_PTE_SPECIAL @@ -121,6 +120,7 @@ config ARCH_FLATMEM_ENABLE config ARCH_SPARSEMEM_ENABLE def_bool y + depends on MMU select SPARSEMEM_VMEMMAP_ENABLE config ARCH_SELECT_MEMORY_MODEL diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs index d325b67d00dfcf70c4f3cf89e68e3964eec62a06..a131174a0a77251f0cf630461fa7c8df8f260839 100644 --- a/arch/riscv/Kconfig.socs +++ b/arch/riscv/Kconfig.socs @@ -10,4 +10,14 @@ config SOC_SIFIVE help This enables support for SiFive SoC platform hardware. +config SOC_VIRT + bool "QEMU Virt Machine" + select POWER_RESET_SYSCON + select POWER_RESET_SYSCON_POWEROFF + select GOLDFISH + select RTC_DRV_GOLDFISH + select SIFIVE_PLIC + help + This enables support for QEMU Virt Machine. + endmenu diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index b9009a2fbaf5c4dd7965f75264dee93539308019..259cb53d7f20e04e872d3b3f68f34b1e28789f10 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -13,8 +13,10 @@ LDFLAGS_vmlinux := ifeq ($(CONFIG_DYNAMIC_FTRACE),y) LDFLAGS_vmlinux := --no-relax endif -KBUILD_AFLAGS_MODULE += -fPIC -KBUILD_CFLAGS_MODULE += -fPIC + +ifeq ($(CONFIG_64BIT)$(CONFIG_CMODEL_MEDLOW),yy) +KBUILD_CFLAGS_MODULE += -mcmodel=medany +endif export BITS ifeq ($(CONFIG_ARCH_RV64I),y) diff --git a/arch/riscv/boot/.gitignore b/arch/riscv/boot/.gitignore index 8dab0bb6ae667c5f89da8aca74a0a3bb61dcfaa9..8a45a37d2af4ccf129e4de4002529c853b224d8a 100644 --- a/arch/riscv/boot/.gitignore +++ b/arch/riscv/boot/.gitignore @@ -1,2 +1,4 @@ Image Image.gz +loader +loader.lds diff --git a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts index 609198cb1163756e9c00d6c8f70e8a8b2440c7a0..4a2729f5ca3f0113be3e02e72f9c427fe02207cc 100644 --- a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts +++ b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts @@ -2,6 +2,7 @@ /* Copyright (c) 2018-2019 SiFive, Inc */ #include "fu540-c000.dtsi" +#include /* Clock frequency (in Hz) of the PCB crystal for rtcclk */ #define RTCCLK_FREQ 1000000 @@ -41,6 +42,10 @@ clock-frequency = ; clock-output-names = "rtcclk"; }; + gpio-restart { + compatible = "gpio-restart"; + gpios = <&gpio 10 GPIO_ACTIVE_LOW>; + }; }; &uart0 { diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig index e2ff95cb3390f1750746d433e1877c45855b73f6..2557c5372a2507685a86c0cfe26c2e6b4cb195b9 100644 --- a/arch/riscv/configs/defconfig +++ b/arch/riscv/configs/defconfig @@ -15,6 +15,7 @@ CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y CONFIG_BPF_SYSCALL=y CONFIG_SOC_SIFIVE=y +CONFIG_SOC_VIRT=y CONFIG_SMP=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -63,6 +64,7 @@ CONFIG_HW_RANDOM_VIRTIO=y CONFIG_SPI=y CONFIG_SPI_SIFIVE=y # CONFIG_PTP_1588_CLOCK is not set +CONFIG_POWER_RESET=y CONFIG_DRM=y CONFIG_DRM_RADEON=y CONFIG_DRM_VIRTIO_GPU=y @@ -78,6 +80,7 @@ CONFIG_USB_STORAGE=y CONFIG_USB_UAS=y CONFIG_MMC=y CONFIG_MMC_SPI=y +CONFIG_RTC_CLASS=y CONFIG_VIRTIO_PCI=y CONFIG_VIRTIO_BALLOON=y CONFIG_VIRTIO_INPUT=y @@ -102,13 +105,13 @@ CONFIG_CRYPTO_DEV_VIRTIO=y CONFIG_PRINTK_TIME=y CONFIG_DEBUG_FS=y CONFIG_DEBUG_PAGEALLOC=y +CONFIG_SCHED_STACK_END_CHECK=y CONFIG_DEBUG_VM=y CONFIG_DEBUG_VM_PGFLAGS=y CONFIG_DEBUG_MEMORY_INIT=y CONFIG_DEBUG_PER_CPU_MAPS=y CONFIG_SOFTLOCKUP_DETECTOR=y CONFIG_WQ_WATCHDOG=y -CONFIG_SCHED_STACK_END_CHECK=y CONFIG_DEBUG_TIMEKEEPING=y CONFIG_DEBUG_RT_MUTEXES=y CONFIG_DEBUG_SPINLOCK=y diff --git a/arch/riscv/configs/rv32_defconfig b/arch/riscv/configs/rv32_defconfig index eb519407c841b3327898680cd4c1893165900eb4..0292879a96904e7d8e2caaad3e34f6d3d2022c90 100644 --- a/arch/riscv/configs/rv32_defconfig +++ b/arch/riscv/configs/rv32_defconfig @@ -14,6 +14,7 @@ CONFIG_CHECKPOINT_RESTORE=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y CONFIG_BPF_SYSCALL=y +CONFIG_SOC_VIRT=y CONFIG_ARCH_RV32I=y CONFIG_SMP=y CONFIG_MODULES=y @@ -61,6 +62,7 @@ CONFIG_VIRTIO_CONSOLE=y CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_VIRTIO=y # CONFIG_PTP_1588_CLOCK is not set +CONFIG_POWER_RESET=y CONFIG_DRM=y CONFIG_DRM_RADEON=y CONFIG_DRM_VIRTIO_GPU=y @@ -74,13 +76,13 @@ CONFIG_USB_OHCI_HCD=y CONFIG_USB_OHCI_HCD_PLATFORM=y CONFIG_USB_STORAGE=y CONFIG_USB_UAS=y +CONFIG_RTC_CLASS=y CONFIG_VIRTIO_PCI=y CONFIG_VIRTIO_BALLOON=y CONFIG_VIRTIO_INPUT=y CONFIG_VIRTIO_MMIO=y CONFIG_RPMSG_CHAR=y CONFIG_RPMSG_VIRTIO=y -CONFIG_SIFIVE_PLIC=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_AUTOFS4_FS=y @@ -99,13 +101,13 @@ CONFIG_CRYPTO_DEV_VIRTIO=y CONFIG_PRINTK_TIME=y CONFIG_DEBUG_FS=y CONFIG_DEBUG_PAGEALLOC=y +CONFIG_SCHED_STACK_END_CHECK=y CONFIG_DEBUG_VM=y CONFIG_DEBUG_VM_PGFLAGS=y CONFIG_DEBUG_MEMORY_INIT=y CONFIG_DEBUG_PER_CPU_MAPS=y CONFIG_SOFTLOCKUP_DETECTOR=y CONFIG_WQ_WATCHDOG=y -CONFIG_SCHED_STACK_END_CHECK=y CONFIG_DEBUG_TIMEKEEPING=y CONFIG_DEBUG_RT_MUTEXES=y CONFIG_DEBUG_SPINLOCK=y diff --git a/arch/riscv/include/asm/clint.h b/arch/riscv/include/asm/clint.h index 6eaa2eedd69418d0baf4efad5516bf9e96d044c0..a279b17a6aade915d0f330247a02387515837da0 100644 --- a/arch/riscv/include/asm/clint.h +++ b/arch/riscv/include/asm/clint.h @@ -15,12 +15,12 @@ static inline void clint_send_ipi_single(unsigned long hartid) writel(1, clint_ipi_base + hartid); } -static inline void clint_send_ipi_mask(const struct cpumask *hartid_mask) +static inline void clint_send_ipi_mask(const struct cpumask *mask) { - int hartid; + int cpu; - for_each_cpu(hartid, hartid_mask) - clint_send_ipi_single(hartid); + for_each_cpu(cpu, mask) + clint_send_ipi_single(cpuid_to_hartid_map(cpu)); } static inline void clint_clear_ipi(unsigned long hartid) diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h index 435b65532e2945703cc17a9e5c4f7613a0d5b6b8..8e18d2c64399df91e0619852bfd5c9a4757cdc37 100644 --- a/arch/riscv/include/asm/csr.h +++ b/arch/riscv/include/asm/csr.h @@ -72,6 +72,16 @@ #define EXC_LOAD_PAGE_FAULT 13 #define EXC_STORE_PAGE_FAULT 15 +/* PMP configuration */ +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_A_TOR 0x08 +#define PMP_A_NA4 0x10 +#define PMP_A_NAPOT 0x18 +#define PMP_L 0x80 + /* symbolic CSR names: */ #define CSR_CYCLE 0xc00 #define CSR_TIME 0xc01 @@ -100,6 +110,8 @@ #define CSR_MCAUSE 0x342 #define CSR_MTVAL 0x343 #define CSR_MIP 0x344 +#define CSR_PMPCFG0 0x3a0 +#define CSR_PMPADDR0 0x3b0 #define CSR_MHARTID 0xf14 #ifdef CONFIG_RISCV_M_MODE diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index e43041519edd7a5dc42b78f9fed2f5c5d375fb5b..393f2014dfeed0aab02a0292fb5b32a860560a8e 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -19,6 +19,47 @@ #include #include +#ifdef CONFIG_MMU + +#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) +#define VMALLOC_END (PAGE_OFFSET - 1) +#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) + +#define BPF_JIT_REGION_SIZE (SZ_128M) +#define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE) +#define BPF_JIT_REGION_END (VMALLOC_END) + +/* + * Roughly size the vmemmap space to be large enough to fit enough + * struct pages to map half the virtual address space. Then + * position vmemmap directly below the VMALLOC region. + */ +#define VMEMMAP_SHIFT \ + (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT) +#define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT) +#define VMEMMAP_END (VMALLOC_START - 1) +#define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE) + +/* + * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel + * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled. + */ +#define vmemmap ((struct page *)VMEMMAP_START) + +#define PCI_IO_SIZE SZ_16M +#define PCI_IO_END VMEMMAP_START +#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) + +#define FIXADDR_TOP PCI_IO_START +#ifdef CONFIG_64BIT +#define FIXADDR_SIZE PMD_SIZE +#else +#define FIXADDR_SIZE PGDIR_SIZE +#endif +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) + +#endif + #ifdef CONFIG_64BIT #include #else @@ -90,31 +131,6 @@ extern pgd_t swapper_pg_dir[]; #define __S110 PAGE_SHARED_EXEC #define __S111 PAGE_SHARED_EXEC -#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) -#define VMALLOC_END (PAGE_OFFSET - 1) -#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) - -#define BPF_JIT_REGION_SIZE (SZ_128M) -#define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE) -#define BPF_JIT_REGION_END (VMALLOC_END) - -/* - * Roughly size the vmemmap space to be large enough to fit enough - * struct pages to map half the virtual address space. Then - * position vmemmap directly below the VMALLOC region. - */ -#define VMEMMAP_SHIFT \ - (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT) -#define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT) -#define VMEMMAP_END (VMALLOC_START - 1) -#define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE) - -/* - * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel - * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled. - */ -#define vmemmap ((struct page *)VMEMMAP_START) - static inline int pmd_present(pmd_t pmd) { return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); @@ -432,18 +448,6 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma, #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) -#define PCI_IO_SIZE SZ_16M -#define PCI_IO_END VMEMMAP_START -#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) - -#define FIXADDR_TOP PCI_IO_START -#ifdef CONFIG_64BIT -#define FIXADDR_SIZE PMD_SIZE -#else -#define FIXADDR_SIZE PGDIR_SIZE -#endif -#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) - /* * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32. * Note that PGDIR_SIZE must evenly divide TASK_SIZE. diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h index 42347d0981e7e4634a28d40d7675a417ac76b869..49350c8bd7b09b0f8193b3415c084db3d707d549 100644 --- a/arch/riscv/include/asm/syscall.h +++ b/arch/riscv/include/asm/syscall.h @@ -28,13 +28,6 @@ static inline int syscall_get_nr(struct task_struct *task, return regs->a7; } -static inline void syscall_set_nr(struct task_struct *task, - struct pt_regs *regs, - int sysno) -{ - regs->a7 = sysno; -} - static inline void syscall_rollback(struct task_struct *task, struct pt_regs *regs) { diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h index f462a183a9c2343415ce933ce34d996f0ae0b4d0..8ce9d607b53dceb8f115d4ebf7a43657270ed56a 100644 --- a/arch/riscv/include/asm/uaccess.h +++ b/arch/riscv/include/asm/uaccess.h @@ -11,6 +11,24 @@ /* * User space memory access functions */ + +extern unsigned long __must_check __asm_copy_to_user(void __user *to, + const void *from, unsigned long n); +extern unsigned long __must_check __asm_copy_from_user(void *to, + const void __user *from, unsigned long n); + +static inline unsigned long +raw_copy_from_user(void *to, const void __user *from, unsigned long n) +{ + return __asm_copy_from_user(to, from, n); +} + +static inline unsigned long +raw_copy_to_user(void __user *to, const void *from, unsigned long n) +{ + return __asm_copy_to_user(to, from, n); +} + #ifdef CONFIG_MMU #include #include @@ -367,24 +385,6 @@ do { \ -EFAULT; \ }) - -extern unsigned long __must_check __asm_copy_to_user(void __user *to, - const void *from, unsigned long n); -extern unsigned long __must_check __asm_copy_from_user(void *to, - const void __user *from, unsigned long n); - -static inline unsigned long -raw_copy_from_user(void *to, const void __user *from, unsigned long n) -{ - return __asm_copy_from_user(to, from, n); -} - -static inline unsigned long -raw_copy_to_user(void __user *to, const void *from, unsigned long n) -{ - return __asm_copy_to_user(to, from, n); -} - extern long strncpy_from_user(char *dest, const char __user *src, long count); extern long __must_check strlen_user(const char __user *str); diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index bad4d85b5e91810332719af05cb8d66009cf8982..208702d8c18eeac291ca8ef21eeec977d93af570 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -228,20 +228,13 @@ check_syscall_nr: /* Check to make sure we don't jump to a bogus syscall number. */ li t0, __NR_syscalls la s0, sys_ni_syscall - /* - * The tracer can change syscall number to valid/invalid value. - * We use syscall_set_nr helper in syscall_trace_enter thus we - * cannot trust the current value in a7 and have to reload from - * the current task pt_regs. - */ - REG_L a7, PT_A7(sp) /* * Syscall number held in a7. * If syscall number is above allowed value, redirect to ni_syscall. */ bge a7, t0, 1f /* - * Check if syscall is rejected by tracer or seccomp, i.e., a7 == -1. + * Check if syscall is rejected by tracer, i.e., a7 == -1. * If yes, we pretend it was executed. */ li t1, -1 @@ -334,6 +327,7 @@ work_resched: handle_syscall_trace_enter: move a0, sp call do_syscall_trace_enter + move t0, a0 REG_L a0, PT_A0(sp) REG_L a1, PT_A1(sp) REG_L a2, PT_A2(sp) @@ -342,6 +336,7 @@ handle_syscall_trace_enter: REG_L a5, PT_A5(sp) REG_L a6, PT_A6(sp) REG_L a7, PT_A7(sp) + bnez t0, ret_from_syscall_rejected j check_syscall_nr handle_syscall_trace_exit: move a0, sp diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index 271860fc2c3f02a5d50552339a005004444ab456..85f2073e7fe4abee5dcb97f91cd2f07cf9d9f9e7 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -58,6 +58,12 @@ _start_kernel: /* Reset all registers except ra, a0, a1 */ call reset_regs + /* Setup a PMP to permit access to all of memory. */ + li a0, -1 + csrw CSR_PMPADDR0, a0 + li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X) + csrw CSR_PMPCFG0, a0 + /* * The hartid in a0 is expected later on, and we have no firmware * to hand it to us. diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c index b7401858d872f37c496a3306b44a462df9e634a7..8bbe5dbe1341b663f02e08f094fd9e72a7fb2aad 100644 --- a/arch/riscv/kernel/module.c +++ b/arch/riscv/kernel/module.c @@ -8,6 +8,10 @@ #include #include #include +#include +#include +#include +#include static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v) { @@ -386,3 +390,15 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, return 0; } + +#if defined(CONFIG_MMU) && defined(CONFIG_64BIT) +#define VMALLOC_MODULE_START \ + max(PFN_ALIGN((unsigned long)&_end - SZ_2G), VMALLOC_START) +void *module_alloc(unsigned long size) +{ + return __vmalloc_node_range(size, 1, VMALLOC_MODULE_START, + VMALLOC_END, GFP_KERNEL, + PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, + __builtin_return_address(0)); +} +#endif diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c index 407464201b91efe42fb3002bd78dd5498e729c55..444dc7b0fd78c5cdc04887ffa4f6fea50b99a992 100644 --- a/arch/riscv/kernel/ptrace.c +++ b/arch/riscv/kernel/ptrace.c @@ -148,21 +148,19 @@ long arch_ptrace(struct task_struct *child, long request, * Allows PTRACE_SYSCALL to work. These are called from entry.S in * {handle,ret_from}_syscall. */ -__visible void do_syscall_trace_enter(struct pt_regs *regs) +__visible int do_syscall_trace_enter(struct pt_regs *regs) { if (test_thread_flag(TIF_SYSCALL_TRACE)) if (tracehook_report_syscall_entry(regs)) - syscall_set_nr(current, regs, -1); + return -1; /* * Do the secure computing after ptrace; failures should be fast. * If this fails we might have return value in a0 from seccomp * (via SECCOMP_RET_ERRNO/TRACE). */ - if (secure_computing() == -1) { - syscall_set_nr(current, regs, -1); - return; - } + if (secure_computing() == -1) + return -1; #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) @@ -170,6 +168,7 @@ __visible void do_syscall_trace_enter(struct pt_regs *regs) #endif audit_syscall_entry(regs->a7, regs->a0, regs->a1, regs->a2, regs->a3); + return 0; } __visible void do_syscall_trace_exit(struct pt_regs *regs) diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c index eb878abcaaf8176e6e3e7a6c0059360d60488582..e0a6293093f12ad51919842de90d47030547eea8 100644 --- a/arch/riscv/kernel/smp.c +++ b/arch/riscv/kernel/smp.c @@ -96,7 +96,7 @@ static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op) if (IS_ENABLED(CONFIG_RISCV_SBI)) sbi_send_ipi(cpumask_bits(&hartid_mask)); else - clint_send_ipi_mask(&hartid_mask); + clint_send_ipi_mask(mask); } static void send_ipi_single(int cpu, enum ipi_message_type op) diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index f4cad5163bf2c0160f64e828ba07e2d3b84623d2..ffb3d94bf0cc2782f5777e1423f6a3fc4310099f 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -156,6 +156,6 @@ void __init trap_init(void) csr_write(CSR_SCRATCH, 0); /* Set the exception vector address */ csr_write(CSR_TVEC, &handle_exception); - /* Enable all interrupts */ - csr_write(CSR_IE, -1); + /* Enable interrupts */ + csr_write(CSR_IE, IE_SIE | IE_EIE); } diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile index 47e7a82044608d1b1b81a981b1511ee3406cf68a..0d0db80800c4ed32f8b9a39a6a60a2aa9f9886aa 100644 --- a/arch/riscv/lib/Makefile +++ b/arch/riscv/lib/Makefile @@ -2,5 +2,5 @@ lib-y += delay.o lib-y += memcpy.o lib-y += memset.o -lib-$(CONFIG_MMU) += uaccess.o +lib-y += uaccess.o lib-$(CONFIG_64BIT) += tishift.o diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 965a8cf4829ca33bfde4754bf47db0b8b17c6611..fab855963c730653f3d4ee677f3371a5f3ee5bd7 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -131,7 +131,7 @@ void __init setup_bootmem(void) for_each_memblock(memory, reg) { phys_addr_t end = reg->base + reg->size; - if (reg->base <= vmlinux_end && vmlinux_end <= end) { + if (reg->base <= vmlinux_start && vmlinux_end <= end) { mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET); /* diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c index f0cc860405871d0457d6dc0fdc83fb3a0350a2c5..ec0ca90dd9000f339a3d8a739d59e84df386d0dc 100644 --- a/arch/riscv/mm/kasan_init.c +++ b/arch/riscv/mm/kasan_init.c @@ -19,18 +19,20 @@ asmlinkage void __init kasan_early_init(void) for (i = 0; i < PTRS_PER_PTE; ++i) set_pte(kasan_early_shadow_pte + i, mk_pte(virt_to_page(kasan_early_shadow_page), - PAGE_KERNEL)); + PAGE_KERNEL)); for (i = 0; i < PTRS_PER_PMD; ++i) set_pmd(kasan_early_shadow_pmd + i, - pfn_pmd(PFN_DOWN(__pa((uintptr_t)kasan_early_shadow_pte)), - __pgprot(_PAGE_TABLE))); + pfn_pmd(PFN_DOWN + (__pa((uintptr_t) kasan_early_shadow_pte)), + __pgprot(_PAGE_TABLE))); for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END; i += PGDIR_SIZE, ++pgd) set_pgd(pgd, - pfn_pgd(PFN_DOWN(__pa(((uintptr_t)kasan_early_shadow_pmd))), - __pgprot(_PAGE_TABLE))); + pfn_pgd(PFN_DOWN + (__pa(((uintptr_t) kasan_early_shadow_pmd))), + __pgprot(_PAGE_TABLE))); /* init for swapper_pg_dir */ pgd = pgd_offset_k(KASAN_SHADOW_START); @@ -38,37 +40,43 @@ asmlinkage void __init kasan_early_init(void) for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END; i += PGDIR_SIZE, ++pgd) set_pgd(pgd, - pfn_pgd(PFN_DOWN(__pa(((uintptr_t)kasan_early_shadow_pmd))), - __pgprot(_PAGE_TABLE))); + pfn_pgd(PFN_DOWN + (__pa(((uintptr_t) kasan_early_shadow_pmd))), + __pgprot(_PAGE_TABLE))); flush_tlb_all(); } static void __init populate(void *start, void *end) { - unsigned long i; + unsigned long i, offset; unsigned long vaddr = (unsigned long)start & PAGE_MASK; unsigned long vend = PAGE_ALIGN((unsigned long)end); unsigned long n_pages = (vend - vaddr) / PAGE_SIZE; + unsigned long n_ptes = + ((n_pages + PTRS_PER_PTE) & -PTRS_PER_PTE) / PTRS_PER_PTE; unsigned long n_pmds = - (n_pages % PTRS_PER_PTE) ? n_pages / PTRS_PER_PTE + 1 : - n_pages / PTRS_PER_PTE; + ((n_ptes + PTRS_PER_PMD) & -PTRS_PER_PMD) / PTRS_PER_PMD; + + pte_t *pte = + memblock_alloc(n_ptes * PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE); + pmd_t *pmd = + memblock_alloc(n_pmds * PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE); pgd_t *pgd = pgd_offset_k(vaddr); - pmd_t *pmd = memblock_alloc(n_pmds * sizeof(pmd_t), PAGE_SIZE); - pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE); for (i = 0; i < n_pages; i++) { phys_addr_t phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); - - set_pte(pte + i, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL)); + set_pte(&pte[i], pfn_pte(PHYS_PFN(phys), PAGE_KERNEL)); } - for (i = 0; i < n_pmds; ++pgd, i += PTRS_PER_PMD) - set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(((uintptr_t)(pmd + i)))), + for (i = 0, offset = 0; i < n_ptes; i++, offset += PTRS_PER_PTE) + set_pmd(&pmd[i], + pfn_pmd(PFN_DOWN(__pa(&pte[offset])), __pgprot(_PAGE_TABLE))); - for (i = 0; i < n_pages; ++pmd, i += PTRS_PER_PTE) - set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa((uintptr_t)(pte + i))), + for (i = 0, offset = 0; i < n_pmds; i++, offset += PTRS_PER_PMD) + set_pgd(&pgd[i], + pfn_pgd(PFN_DOWN(__pa(&pmd[offset])), __pgprot(_PAGE_TABLE))); flush_tlb_all(); @@ -81,7 +89,8 @@ void __init kasan_init(void) unsigned long i; kasan_populate_early_shadow((void *)KASAN_SHADOW_START, - (void *)kasan_mem_to_shadow((void *)VMALLOC_END)); + (void *)kasan_mem_to_shadow((void *) + VMALLOC_END)); for_each_memblock(memory, reg) { void *start = (void *)__va(reg->base); @@ -90,14 +99,14 @@ void __init kasan_init(void) if (start >= end) break; - populate(kasan_mem_to_shadow(start), - kasan_mem_to_shadow(end)); + populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end)); }; for (i = 0; i < PTRS_PER_PTE; i++) set_pte(&kasan_early_shadow_pte[i], mk_pte(virt_to_page(kasan_early_shadow_page), - __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_ACCESSED))); + __pgprot(_PAGE_PRESENT | _PAGE_READ | + _PAGE_ACCESSED))); memset(kasan_early_shadow_page, 0, PAGE_SIZE); init_task.kasan_depth = 0; diff --git a/arch/s390/Makefile b/arch/s390/Makefile index e0e3a465bbfd6fd15eb44ab8675af10c171c2e01..8dfa2cf1f05c7a9028683aa1900e0975d27863e9 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -146,7 +146,7 @@ all: bzImage #KBUILD_IMAGE is necessary for packaging targets like rpm-pkg, deb-pkg... KBUILD_IMAGE := $(boot)/bzImage -install: vmlinux +install: $(Q)$(MAKE) $(build)=$(boot) $@ bzImage: vmlinux diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile index e2c47d3a1c891b7bf5bb783f156ca902c69f1fdf..0ff9261c915e3fa64369b9e484c3baaed909b6aa 100644 --- a/arch/s390/boot/Makefile +++ b/arch/s390/boot/Makefile @@ -70,7 +70,7 @@ $(obj)/compressed/vmlinux: $(obj)/startup.a FORCE $(obj)/startup.a: $(OBJECTS) FORCE $(call if_changed,ar) -install: $(CONFIGURE) $(obj)/bzImage +install: sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/bzImage \ System.map "$(INSTALL_PATH)" diff --git a/arch/s390/boot/kaslr.c b/arch/s390/boot/kaslr.c index 5d12352545c558d4f5598fd074048029cfd6218a..5591243d673e82cea561c5df4c9e6ac19c55ed6f 100644 --- a/arch/s390/boot/kaslr.c +++ b/arch/s390/boot/kaslr.c @@ -75,7 +75,7 @@ static unsigned long get_random(unsigned long limit) *(unsigned long *) prng.parm_block ^= seed; for (i = 0; i < 16; i++) { cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block, - (char *) entropy, (char *) entropy, + (u8 *) entropy, (u8 *) entropy, sizeof(entropy)); memcpy(prng.parm_block, entropy, sizeof(entropy)); } diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index 2e60c80395ab084afbe4713fbc8816f42bea3f60..0c86ba19fa2bcdb0ca9ce4627366b59714b379eb 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -53,6 +53,7 @@ CONFIG_VFIO_AP=m CONFIG_CRASH_DUMP=y CONFIG_HIBERNATION=y CONFIG_PM_DEBUG=y +CONFIG_PROTECTED_VIRTUALIZATION_GUEST=y CONFIG_CMM=m CONFIG_APPLDATA_BASE=y CONFIG_KVM=m @@ -474,7 +475,6 @@ CONFIG_NLMON=m # CONFIG_NET_VENDOR_EMULEX is not set # CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_GOOGLE is not set -# CONFIG_NET_VENDOR_HP is not set # CONFIG_NET_VENDOR_HUAWEI is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set @@ -684,7 +684,6 @@ CONFIG_CRYPTO_ADIANTUM=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_CRC32=m -CONFIG_CRYPTO_XXHASH=m CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m @@ -748,7 +747,6 @@ CONFIG_DEBUG_INFO_DWARF4=y CONFIG_GDB_SCRIPTS=y CONFIG_FRAME_WARN=1024 CONFIG_HEADERS_INSTALL=y -CONFIG_HEADERS_CHECK=y CONFIG_DEBUG_SECTION_MISMATCH=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_PAGEALLOC=y @@ -772,9 +770,9 @@ CONFIG_DEBUG_MEMORY_INIT=y CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m CONFIG_DEBUG_PER_CPU_MAPS=y CONFIG_DEBUG_SHIRQ=y +CONFIG_PANIC_ON_OOPS=y CONFIG_DETECT_HUNG_TASK=y CONFIG_WQ_WATCHDOG=y -CONFIG_PANIC_ON_OOPS=y CONFIG_DEBUG_TIMEKEEPING=y CONFIG_PROVE_LOCKING=y CONFIG_LOCK_STAT=y @@ -783,9 +781,20 @@ CONFIG_DEBUG_ATOMIC_SLEEP=y CONFIG_DEBUG_LOCKING_API_SELFTESTS=y CONFIG_DEBUG_SG=y CONFIG_DEBUG_NOTIFIERS=y +CONFIG_BUG_ON_DATA_CORRUPTION=y CONFIG_DEBUG_CREDENTIALS=y CONFIG_RCU_TORTURE_TEST=m CONFIG_RCU_CPU_STALL_TIMEOUT=300 +CONFIG_LATENCYTOP=y +CONFIG_FUNCTION_PROFILER=y +CONFIG_STACK_TRACER=y +CONFIG_IRQSOFF_TRACER=y +CONFIG_PREEMPT_TRACER=y +CONFIG_SCHED_TRACER=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_HIST_TRIGGERS=y +CONFIG_S390_PTDUMP=y CONFIG_NOTIFIER_ERROR_INJECTION=m CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m CONFIG_FAULT_INJECTION=y @@ -796,15 +805,6 @@ CONFIG_FAIL_IO_TIMEOUT=y CONFIG_FAIL_FUTEX=y CONFIG_FAULT_INJECTION_DEBUG_FS=y CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y -CONFIG_LATENCYTOP=y -CONFIG_IRQSOFF_TRACER=y -CONFIG_PREEMPT_TRACER=y -CONFIG_SCHED_TRACER=y -CONFIG_FTRACE_SYSCALLS=y -CONFIG_STACK_TRACER=y -CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_FUNCTION_PROFILER=y -CONFIG_HIST_TRIGGERS=y CONFIG_LKDTM=m CONFIG_TEST_LIST_SORT=y CONFIG_TEST_SORT=y @@ -814,5 +814,3 @@ CONFIG_INTERVAL_TREE_TEST=m CONFIG_PERCPU_TEST=m CONFIG_ATOMIC64_SELFTEST=y CONFIG_TEST_BPF=m -CONFIG_BUG_ON_DATA_CORRUPTION=y -CONFIG_S390_PTDUMP=y diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig index 25f79984958219506566216ca4a34db0e49a31e4..6b27d861a9a306e03cb7f319339cabf16a0c6de3 100644 --- a/arch/s390/configs/defconfig +++ b/arch/s390/configs/defconfig @@ -53,6 +53,7 @@ CONFIG_VFIO_AP=m CONFIG_CRASH_DUMP=y CONFIG_HIBERNATION=y CONFIG_PM_DEBUG=y +CONFIG_PROTECTED_VIRTUALIZATION_GUEST=y CONFIG_CMM=m CONFIG_APPLDATA_BASE=y CONFIG_KVM=m @@ -470,7 +471,6 @@ CONFIG_NLMON=m # CONFIG_NET_VENDOR_EMULEX is not set # CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_GOOGLE is not set -# CONFIG_NET_VENDOR_HP is not set # CONFIG_NET_VENDOR_HUAWEI is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set @@ -677,7 +677,6 @@ CONFIG_CRYPTO_ADIANTUM=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_CRC32=m -CONFIG_CRYPTO_XXHASH=m CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m @@ -739,18 +738,18 @@ CONFIG_DEBUG_SECTION_MISMATCH=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_MEMORY_INIT=y CONFIG_PANIC_ON_OOPS=y +CONFIG_BUG_ON_DATA_CORRUPTION=y CONFIG_RCU_TORTURE_TEST=m CONFIG_RCU_CPU_STALL_TIMEOUT=60 CONFIG_LATENCYTOP=y +CONFIG_FUNCTION_PROFILER=y +CONFIG_STACK_TRACER=y CONFIG_SCHED_TRACER=y CONFIG_FTRACE_SYSCALLS=y -CONFIG_STACK_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_FUNCTION_PROFILER=y CONFIG_HIST_TRIGGERS=y +CONFIG_S390_PTDUMP=y CONFIG_LKDTM=m CONFIG_PERCPU_TEST=m CONFIG_ATOMIC64_SELFTEST=y CONFIG_TEST_BPF=m -CONFIG_BUG_ON_DATA_CORRUPTION=y -CONFIG_S390_PTDUMP=y diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index 85e944f04c70ec68a64fd25622d1faf4cfdd5f90..1019efd85b9dc952126fa455dcde0fa7187ab112 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h @@ -42,7 +42,7 @@ void __storage_key_init_range(unsigned long start, unsigned long end); static inline void storage_key_init_range(unsigned long start, unsigned long end) { - if (PAGE_DEFAULT_KEY) + if (PAGE_DEFAULT_KEY != 0) __storage_key_init_range(start, end); } diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 137a3920ca3632907ca17b515aed4045a8d4888c..6d7c3b7e928101d78d6a678b8fd9e2c3ccb239ab 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -752,6 +752,12 @@ static inline int pmd_write(pmd_t pmd) return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0; } +#define pud_write pud_write +static inline int pud_write(pud_t pud) +{ + return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0; +} + static inline int pmd_dirty(pmd_t pmd) { return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0; diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 361ef5eda46895270f781407cbd684dd5eb1ac1e..aadb3d0e2adc767c862f4455a6a9c3949691b856 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -84,7 +84,6 @@ void s390_update_cpu_mhz(void); void cpu_detect_mhz_feature(void); extern const struct seq_operations cpuinfo_op; -extern int sysctl_ieee_emulation_warnings; extern void execve_tail(void); extern void __bpon(void); diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h index 71e3f0146cda084920c40b7736c01d45e5b0a594..1e3517b0518beb27d8dc7e2d7ea2b378d4f36993 100644 --- a/arch/s390/include/asm/qdio.h +++ b/arch/s390/include/asm/qdio.h @@ -201,7 +201,7 @@ struct slib { * @scount: SBAL count * @sflags: whole SBAL flags * @length: length - * @addr: address + * @addr: absolute data address */ struct qdio_buffer_element { u8 eflags; @@ -211,7 +211,7 @@ struct qdio_buffer_element { u8 scount; u8 sflags; u32 length; - void *addr; + u64 addr; } __attribute__ ((packed, aligned(16))); /** @@ -227,7 +227,7 @@ struct qdio_buffer { * @sbal: absolute SBAL address */ struct sl_element { - unsigned long sbal; + u64 sbal; } __attribute__ ((packed)); /** diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index d7ff30e45589935890ad1c1f67c0f36eaa4d480e..c2e6d4ba4e2369db946ecb6339045d91c1715c88 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -3268,7 +3268,10 @@ static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu) /* Initial reset is a superset of the normal reset */ kvm_arch_vcpu_ioctl_normal_reset(vcpu); - /* this equals initial cpu reset in pop, but we don't switch to ESA */ + /* + * This equals initial cpu reset in pop, but we don't switch to ESA. + * We do not only reset the internal data, but also ... + */ vcpu->arch.sie_block->gpsw.mask = 0; vcpu->arch.sie_block->gpsw.addr = 0; kvm_s390_set_prefix(vcpu, 0); @@ -3278,6 +3281,19 @@ static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu) memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr)); vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK; vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK; + + /* ... the data in sync regs */ + memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs)); + vcpu->run->s.regs.ckc = 0; + vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK; + vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK; + vcpu->run->psw_addr = 0; + vcpu->run->psw_mask = 0; + vcpu->run->s.regs.todpr = 0; + vcpu->run->s.regs.cputm = 0; + vcpu->run->s.regs.ckc = 0; + vcpu->run->s.regs.pp = 0; + vcpu->run->s.regs.gbea = 1; vcpu->run->s.regs.fpc = 0; vcpu->arch.sie_block->gbea = 1; vcpu->arch.sie_block->pp = 0; diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index bc61ea18e88d90bc183b80c34cbfc4f372c343de..60716d18ce5afd0d0f4c9277ea0342b9ddadc9d9 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -424,7 +424,7 @@ static void zpci_map_resources(struct pci_dev *pdev) if (zpci_use_mio(zdev)) pdev->resource[i].start = - (resource_size_t __force) zdev->bars[i].mio_wb; + (resource_size_t __force) zdev->bars[i].mio_wt; else pdev->resource[i].start = (resource_size_t __force) pci_iomap_range_fh(pdev, i, 0, 0); @@ -531,7 +531,7 @@ static int zpci_setup_bus_resources(struct zpci_dev *zdev, flags |= IORESOURCE_MEM_64; if (zpci_use_mio(zdev)) - addr = (unsigned long) zdev->bars[i].mio_wb; + addr = (unsigned long) zdev->bars[i].mio_wt; else addr = ZPCI_ADDR(entry); size = 1UL << zdev->bars[i].size; diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 94df0868804bcb4b9b37f7abf4985ac95158c4f8..513a55562d7508eaf75d3efc954b0a786956bbd1 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -194,9 +194,10 @@ avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1) avx512_instr :=$(call as-instr,vpmovm2b %k1$(comma)%zmm5,-DCONFIG_AS_AVX512=1) sha1_ni_instr :=$(call as-instr,sha1msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA1_NI=1) sha256_ni_instr :=$(call as-instr,sha256msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA256_NI=1) +adx_instr := $(call as-instr,adox %r10$(comma)%r10,-DCONFIG_AS_ADX=1) -KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) -KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) +KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr) +KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr) KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE) diff --git a/arch/x86/boot/compressed/kaslr_64.c b/arch/x86/boot/compressed/kaslr_64.c index 748456c365f4691af041753c63d6991f9bc8b4b8..9557c5a15b91e29a6465e502599960aad87e3e60 100644 --- a/arch/x86/boot/compressed/kaslr_64.c +++ b/arch/x86/boot/compressed/kaslr_64.c @@ -29,9 +29,6 @@ #define __PAGE_OFFSET __PAGE_OFFSET_BASE #include "../../mm/ident_map.c" -/* Used by pgtable.h asm code to force instruction serialization. */ -unsigned long __force_order; - /* Used to track our page table allocation area. */ struct alloc_pgt_data { unsigned char *pgt_buf; diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index b69e00bf20b82e4653cb7b563aa132efa43f0838..8c2e9eadee8a0ff349c324cdfc310e6910fad1b8 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -11,6 +11,7 @@ avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\ avx512_supported :=$(call as-instr,vpmovm2b %k1$(comma)%zmm5,yes,no) sha1_ni_supported :=$(call as-instr,sha1msg1 %xmm0$(comma)%xmm1,yes,no) sha256_ni_supported :=$(call as-instr,sha256msg1 %xmm0$(comma)%xmm1,yes,no) +adx_supported := $(call as-instr,adox %r10$(comma)%r10,yes,no) obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o @@ -39,7 +40,11 @@ obj-$(CONFIG_CRYPTO_AEGIS128_AESNI_SSE2) += aegis128-aesni.o obj-$(CONFIG_CRYPTO_NHPOLY1305_SSE2) += nhpoly1305-sse2.o obj-$(CONFIG_CRYPTO_NHPOLY1305_AVX2) += nhpoly1305-avx2.o -obj-$(CONFIG_CRYPTO_CURVE25519_X86) += curve25519-x86_64.o + +# These modules require the assembler to support ADX. +ifeq ($(adx_supported),yes) + obj-$(CONFIG_CRYPTO_CURVE25519_X86) += curve25519-x86_64.o +endif # These modules require assembler to support AVX. ifeq ($(avx_supported),yes) diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index a6ea07f2aa8482d6ea0835b4a745245c039b352e..4d867a752f0ecd0f79656341edfba56fc1d40fc3 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c @@ -190,15 +190,12 @@ static int amd_uncore_event_init(struct perf_event *event) /* * NB and Last level cache counters (MSRs) are shared across all cores - * that share the same NB / Last level cache. Interrupts can be directed - * to a single target core, however, event counts generated by processes - * running on other cores cannot be masked out. So we do not support - * sampling and per-thread events. + * that share the same NB / Last level cache. On family 16h and below, + * Interrupts can be directed to a single target core, however, event + * counts generated by processes running on other cores cannot be masked + * out. So we do not support sampling and per-thread events via + * CAP_NO_INTERRUPT, and we do not enable counter overflow interrupts: */ - if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) - return -EINVAL; - - /* and we do not enable counter overflow interrupts */ hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB; hwc->idx = -1; @@ -306,7 +303,7 @@ static struct pmu amd_nb_pmu = { .start = amd_uncore_start, .stop = amd_uncore_stop, .read = amd_uncore_read, - .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT, }; static struct pmu amd_llc_pmu = { @@ -317,7 +314,7 @@ static struct pmu amd_llc_pmu = { .start = amd_uncore_start, .stop = amd_uncore_stop, .read = amd_uncore_read, - .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT, }; static struct amd_uncore *amd_uncore_alloc(unsigned int cpu) diff --git a/arch/x86/include/asm/io_bitmap.h b/arch/x86/include/asm/io_bitmap.h index 02c6ef8f7667725b2730e049a2a51d78caec650e..07344d82e88ee6b28e4bb2040932eb1135a3b4a3 100644 --- a/arch/x86/include/asm/io_bitmap.h +++ b/arch/x86/include/asm/io_bitmap.h @@ -19,7 +19,14 @@ struct task_struct; void io_bitmap_share(struct task_struct *tsk); void io_bitmap_exit(void); -void tss_update_io_bitmap(void); +void native_tss_update_io_bitmap(void); + +#ifdef CONFIG_PARAVIRT_XXL +#include +#else +#define tss_update_io_bitmap native_tss_update_io_bitmap +#endif + #else static inline void io_bitmap_share(struct task_struct *tsk) { } static inline void io_bitmap_exit(void) { } diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index 03946eb3e2b9e5162c6f20fff85bf1747ad8431a..c06e8353efd3297ee44878b91f58798dd04b0664 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h @@ -292,6 +292,14 @@ enum x86emul_mode { #define X86EMUL_SMM_MASK (1 << 6) #define X86EMUL_SMM_INSIDE_NMI_MASK (1 << 7) +/* + * fastop functions are declared as taking a never-defined fastop parameter, + * so they can't be called from C directly. + */ +struct fastop; + +typedef void (*fastop_t)(struct fastop *); + struct x86_emulate_ctxt { const struct x86_emulate_ops *ops; @@ -324,7 +332,10 @@ struct x86_emulate_ctxt { struct operand src; struct operand src2; struct operand dst; - int (*execute)(struct x86_emulate_ctxt *ctxt); + union { + int (*execute)(struct x86_emulate_ctxt *ctxt); + fastop_t fop; + }; int (*check_perm)(struct x86_emulate_ctxt *ctxt); /* * The following six fields are cleared together, @@ -349,7 +360,6 @@ struct x86_emulate_ctxt { u64 d; unsigned long _eip; struct operand memop; - /* Fields above regs are cleared together. */ unsigned long _regs[NR_VCPU_REGS]; struct operand *memopp; struct fetch_cache fetch; diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 40a0c0fd95ca8a52fc92c90b3434bfbe310a556b..98959e8cd4489877946d091a9bb5ee297790dbe3 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1122,6 +1122,7 @@ struct kvm_x86_ops { int (*handle_exit)(struct kvm_vcpu *vcpu, enum exit_fastpath_completion exit_fastpath); int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); + void (*update_emulated_instruction)(struct kvm_vcpu *vcpu); void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu); void (*patch_hypercall)(struct kvm_vcpu *vcpu, @@ -1146,7 +1147,7 @@ struct kvm_x86_ops { void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu); void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa); - void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector); + int (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector); int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu); int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr); diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index ebe1685e92dda2bfd6795b45a92924de8a8f9451..d5e517d1c3ddc5c9ac6e594500d87524168b143d 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -512,6 +512,8 @@ #define MSR_K7_HWCR 0xc0010015 #define MSR_K7_HWCR_SMMLOCK_BIT 0 #define MSR_K7_HWCR_SMMLOCK BIT_ULL(MSR_K7_HWCR_SMMLOCK_BIT) +#define MSR_K7_HWCR_IRPERF_EN_BIT 30 +#define MSR_K7_HWCR_IRPERF_EN BIT_ULL(MSR_K7_HWCR_IRPERF_EN_BIT) #define MSR_K7_FID_VID_CTL 0xc0010041 #define MSR_K7_FID_VID_STATUS 0xc0010042 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 86e7317eb31f9a11c3f3b638f0882929b76af777..694d8daf498376ef7e91a1a3026638e275378cab 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -295,6 +295,13 @@ static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g) PVOP_VCALL3(cpu.write_idt_entry, dt, entry, g); } +#ifdef CONFIG_X86_IOPL_IOPERM +static inline void tss_update_io_bitmap(void) +{ + PVOP_VCALL0(cpu.update_io_bitmap); +} +#endif + static inline void paravirt_activate_mm(struct mm_struct *prev, struct mm_struct *next) { diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 84812964d3dd6f0ae5a68763251b4bd4f8af5f7e..732f62e04ddb851f47248013fac01bd62633aaf7 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -140,6 +140,10 @@ struct pv_cpu_ops { void (*load_sp0)(unsigned long sp0); +#ifdef CONFIG_X86_IOPL_IOPERM + void (*update_io_bitmap)(void); +#endif + void (*wbinvd)(void); /* cpuid emulation, mostly so that caps bits can be disabled */ diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 2a85287b368521b317529e2bbd619287d8b85481..8521af3fef27f4fb103de37879c14724a9c3fe69 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -72,7 +72,7 @@ #define SECONDARY_EXEC_MODE_BASED_EPT_EXEC VMCS_CONTROL_BIT(MODE_BASED_EPT_EXEC) #define SECONDARY_EXEC_PT_USE_GPA VMCS_CONTROL_BIT(PT_USE_GPA) #define SECONDARY_EXEC_TSC_SCALING VMCS_CONTROL_BIT(TSC_SCALING) -#define SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE 0x04000000 +#define SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE VMCS_CONTROL_BIT(USR_WAIT_PAUSE) #define PIN_BASED_EXT_INTR_MASK VMCS_CONTROL_BIT(INTR_EXITING) #define PIN_BASED_NMI_EXITING VMCS_CONTROL_BIT(NMI_EXITING) diff --git a/arch/x86/include/asm/vmxfeatures.h b/arch/x86/include/asm/vmxfeatures.h index a50e4a0de31538fb90e1e5c98f05f68cfc153e9e..9915990fd8cfab51a9cd287017e2d71d93f1f3f0 100644 --- a/arch/x86/include/asm/vmxfeatures.h +++ b/arch/x86/include/asm/vmxfeatures.h @@ -81,6 +81,7 @@ #define VMX_FEATURE_MODE_BASED_EPT_EXEC ( 2*32+ 22) /* "ept_mode_based_exec" Enable separate EPT EXEC bits for supervisor vs. user */ #define VMX_FEATURE_PT_USE_GPA ( 2*32+ 24) /* "" Processor Trace logs GPAs */ #define VMX_FEATURE_TSC_SCALING ( 2*32+ 25) /* Scale hardware TSC when read in guest */ +#define VMX_FEATURE_USR_WAIT_PAUSE ( 2*32+ 26) /* Enable TPAUSE, UMONITOR, UMWAIT in guest */ #define VMX_FEATURE_ENCLV_EXITING ( 2*32+ 28) /* "" VM-Exit on ENCLV (leaf dependent) */ #endif /* _ASM_X86_VMXFEATURES_H */ diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h index 503d3f42da1676791d2c4f4a70bfad35743daf4c..3f3f780c8c6500e1a1ea52bc0585af93699572fe 100644 --- a/arch/x86/include/uapi/asm/kvm.h +++ b/arch/x86/include/uapi/asm/kvm.h @@ -390,6 +390,7 @@ struct kvm_sync_regs { #define KVM_STATE_NESTED_GUEST_MODE 0x00000001 #define KVM_STATE_NESTED_RUN_PENDING 0x00000002 #define KVM_STATE_NESTED_EVMCS 0x00000004 +#define KVM_STATE_NESTED_MTF_PENDING 0x00000008 #define KVM_STATE_NESTED_SMM_GUEST_MODE 0x00000001 #define KVM_STATE_NESTED_SMM_VMXON 0x00000002 diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index 2c5676b0a6e7f72c70e6fbab2a8ae485e14e9220..48293d15f1e1d69705dfb7d0d1d780256db78e7c 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -838,13 +838,15 @@ static void free_moved_vector(struct apic_chip_data *apicd) bool managed = apicd->is_managed; /* - * This should never happen. Managed interrupts are not - * migrated except on CPU down, which does not involve the - * cleanup vector. But try to keep the accounting correct - * nevertheless. + * Managed interrupts are usually not migrated away + * from an online CPU, but CPU isolation 'managed_irq' + * can make that happen. + * 1) Activation does not take the isolation into account + * to keep the code simple + * 2) Migration away from an isolated CPU can happen when + * a non-isolated CPU which is in the calculated + * affinity mask comes online. */ - WARN_ON_ONCE(managed); - trace_vector_free_moved(apicd->irq, cpu, vector, managed); irq_matrix_free(vector_matrix, cpu, vector, managed); per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED; diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index ac83a0fef6285d9901bf44b2b5169ab9fef89698..1f875fbe13846a9bfc00582173e6aedc571cd3ca 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -28,6 +28,7 @@ static const int amd_erratum_383[]; static const int amd_erratum_400[]; +static const int amd_erratum_1054[]; static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); /* @@ -972,6 +973,15 @@ static void init_amd(struct cpuinfo_x86 *c) /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */ if (!cpu_has(c, X86_FEATURE_XENPV)) set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); + + /* + * Turn on the Instructions Retired free counter on machines not + * susceptible to erratum #1054 "Instructions Retired Performance + * Counter May Be Inaccurate". + */ + if (cpu_has(c, X86_FEATURE_IRPERF) && + !cpu_has_amd_erratum(c, amd_erratum_1054)) + msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT); } #ifdef CONFIG_X86_32 @@ -1099,6 +1109,10 @@ static const int amd_erratum_400[] = static const int amd_erratum_383[] = AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); +/* #1054: Instructions Retired Performance Counter May Be Inaccurate */ +static const int amd_erratum_1054[] = + AMD_OSVW_ERRATUM(0, AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); + static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) { diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 52c9bfbbdb2a04dbdbd7288de4bcfe7be8f44851..4cdb123ff66a8dddd24a69987f78c37af7e28b8a 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -445,7 +445,7 @@ static __always_inline void setup_pku(struct cpuinfo_x86 *c) * cpuid bit to be set. We need to ensure that we * update that bit in this CPU's "cpu_info". */ - get_cpu_cap(c); + set_cpu_cap(c, X86_FEATURE_OSPKE); } #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c index b3a50d962851cec722df10623dbc90e6a3f16985..52de616a806559d6e0a25a5f65e3bef721432f48 100644 --- a/arch/x86/kernel/cpu/mce/amd.c +++ b/arch/x86/kernel/cpu/mce/amd.c @@ -1163,9 +1163,12 @@ static const struct sysfs_ops threshold_ops = { .store = store, }; +static void threshold_block_release(struct kobject *kobj); + static struct kobj_type threshold_ktype = { .sysfs_ops = &threshold_ops, .default_attrs = default_attrs, + .release = threshold_block_release, }; static const char *get_name(unsigned int bank, struct threshold_block *b) @@ -1198,8 +1201,9 @@ static const char *get_name(unsigned int bank, struct threshold_block *b) return buf_mcatype; } -static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank, - unsigned int block, u32 address) +static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb, + unsigned int bank, unsigned int block, + u32 address) { struct threshold_block *b = NULL; u32 low, high; @@ -1243,16 +1247,12 @@ static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank, INIT_LIST_HEAD(&b->miscj); - if (per_cpu(threshold_banks, cpu)[bank]->blocks) { - list_add(&b->miscj, - &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj); - } else { - per_cpu(threshold_banks, cpu)[bank]->blocks = b; - } + if (tb->blocks) + list_add(&b->miscj, &tb->blocks->miscj); + else + tb->blocks = b; - err = kobject_init_and_add(&b->kobj, &threshold_ktype, - per_cpu(threshold_banks, cpu)[bank]->kobj, - get_name(bank, b)); + err = kobject_init_and_add(&b->kobj, &threshold_ktype, tb->kobj, get_name(bank, b)); if (err) goto out_free; recurse: @@ -1260,7 +1260,7 @@ recurse: if (!address) return 0; - err = allocate_threshold_blocks(cpu, bank, block, address); + err = allocate_threshold_blocks(cpu, tb, bank, block, address); if (err) goto out_free; @@ -1345,8 +1345,6 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank) goto out_free; } - per_cpu(threshold_banks, cpu)[bank] = b; - if (is_shared_bank(bank)) { refcount_set(&b->cpus, 1); @@ -1357,9 +1355,13 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank) } } - err = allocate_threshold_blocks(cpu, bank, 0, msr_ops.misc(bank)); - if (!err) - goto out; + err = allocate_threshold_blocks(cpu, b, bank, 0, msr_ops.misc(bank)); + if (err) + goto out_free; + + per_cpu(threshold_banks, cpu)[bank] = b; + + return 0; out_free: kfree(b); @@ -1368,8 +1370,12 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank) return err; } -static void deallocate_threshold_block(unsigned int cpu, - unsigned int bank) +static void threshold_block_release(struct kobject *kobj) +{ + kfree(to_block(kobj)); +} + +static void deallocate_threshold_block(unsigned int cpu, unsigned int bank) { struct threshold_block *pos = NULL; struct threshold_block *tmp = NULL; @@ -1379,13 +1385,11 @@ static void deallocate_threshold_block(unsigned int cpu, return; list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) { - kobject_put(&pos->kobj); list_del(&pos->miscj); - kfree(pos); + kobject_put(&pos->kobj); } - kfree(per_cpu(threshold_banks, cpu)[bank]->blocks); - per_cpu(threshold_banks, cpu)[bank]->blocks = NULL; + kobject_put(&head->blocks->kobj); } static void __threshold_remove_blocks(struct threshold_bank *b) diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c index 5627b1091b85639eb800a592d882832d81d1a941..f996ffb887bc09ca2ea64d221f14ef86f3c0006a 100644 --- a/arch/x86/kernel/cpu/mce/intel.c +++ b/arch/x86/kernel/cpu/mce/intel.c @@ -493,17 +493,18 @@ static void intel_ppin_init(struct cpuinfo_x86 *c) return; if ((val & 3UL) == 1UL) { - /* PPIN available but disabled: */ + /* PPIN locked in disabled mode */ return; } - /* If PPIN is disabled, but not locked, try to enable: */ - if (!(val & 3UL)) { + /* If PPIN is disabled, try to enable */ + if (!(val & 2UL)) { wrmsrl_safe(MSR_PPIN_CTL, val | 2UL); rdmsrl_safe(MSR_PPIN_CTL, &val); } - if ((val & 3UL) == 2UL) + /* Is the enable bit set? */ + if (val & 2UL) set_cpu_cap(c, X86_FEATURE_INTEL_PPIN); } } diff --git a/arch/x86/kernel/cpu/mce/therm_throt.c b/arch/x86/kernel/cpu/mce/therm_throt.c index 58b4ee3cda7774c096cbbac4841976344bbe79a0..f36dc07420851b90759349d7ffe3caf3eac34012 100644 --- a/arch/x86/kernel/cpu/mce/therm_throt.c +++ b/arch/x86/kernel/cpu/mce/therm_throt.c @@ -486,9 +486,14 @@ static int thermal_throttle_offline(unsigned int cpu) { struct thermal_state *state = &per_cpu(thermal_state, cpu); struct device *dev = get_cpu_device(cpu); + u32 l; + + /* Mask the thermal vector before draining evtl. pending work */ + l = apic_read(APIC_LVTTHMR); + apic_write(APIC_LVTTHMR, l | APIC_LVT_MASKED); - cancel_delayed_work(&state->package_throttle.therm_work); - cancel_delayed_work(&state->core_throttle.therm_work); + cancel_delayed_work_sync(&state->package_throttle.therm_work); + cancel_delayed_work_sync(&state->core_throttle.therm_work); state->package_throttle.rate_control_active = false; state->core_throttle.rate_control_active = false; diff --git a/arch/x86/kernel/ima_arch.c b/arch/x86/kernel/ima_arch.c index 4d4f5d9faac314ad5fe5c21a05e72c873ec42d65..23054909c8ddfa4258fc9004877f6ceb88ef0786 100644 --- a/arch/x86/kernel/ima_arch.c +++ b/arch/x86/kernel/ima_arch.c @@ -10,8 +10,6 @@ extern struct boot_params boot_params; static enum efi_secureboot_mode get_sb_mode(void) { - efi_char16_t efi_SecureBoot_name[] = L"SecureBoot"; - efi_char16_t efi_SetupMode_name[] = L"SecureBoot"; efi_guid_t efi_variable_guid = EFI_GLOBAL_VARIABLE_GUID; efi_status_t status; unsigned long size; @@ -25,7 +23,7 @@ static enum efi_secureboot_mode get_sb_mode(void) } /* Get variable contents into buffer */ - status = efi.get_variable(efi_SecureBoot_name, &efi_variable_guid, + status = efi.get_variable(L"SecureBoot", &efi_variable_guid, NULL, &size, &secboot); if (status == EFI_NOT_FOUND) { pr_info("ima: secureboot mode disabled\n"); @@ -38,7 +36,7 @@ static enum efi_secureboot_mode get_sb_mode(void) } size = sizeof(setupmode); - status = efi.get_variable(efi_SetupMode_name, &efi_variable_guid, + status = efi.get_variable(L"SetupMode", &efi_variable_guid, NULL, &size, &setupmode); if (status != EFI_SUCCESS) /* ignore unknown SetupMode */ diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index d817f255aed8e0b083bb04dfbce257c8c3b02a58..6efe0410fb728995ea8944ecd984840b6f30778e 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -425,7 +425,29 @@ static void __init sev_map_percpu_data(void) } } +static bool pv_tlb_flush_supported(void) +{ + return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && + !kvm_para_has_hint(KVM_HINTS_REALTIME) && + kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)); +} + +static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask); + #ifdef CONFIG_SMP + +static bool pv_ipi_supported(void) +{ + return kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI); +} + +static bool pv_sched_yield_supported(void) +{ + return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) && + !kvm_para_has_hint(KVM_HINTS_REALTIME) && + kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)); +} + #define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG) static void __send_ipi_mask(const struct cpumask *mask, int vector) @@ -490,12 +512,12 @@ static void kvm_send_ipi_mask(const struct cpumask *mask, int vector) static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector) { unsigned int this_cpu = smp_processor_id(); - struct cpumask new_mask; + struct cpumask *new_mask = this_cpu_cpumask_var_ptr(__pv_cpu_mask); const struct cpumask *local_mask; - cpumask_copy(&new_mask, mask); - cpumask_clear_cpu(this_cpu, &new_mask); - local_mask = &new_mask; + cpumask_copy(new_mask, mask); + cpumask_clear_cpu(this_cpu, new_mask); + local_mask = new_mask; __send_ipi_mask(local_mask, vector); } @@ -575,7 +597,6 @@ static void __init kvm_apf_trap_init(void) update_intr_gate(X86_TRAP_PF, async_page_fault); } -static DEFINE_PER_CPU(cpumask_var_t, __pv_tlb_mask); static void kvm_flush_tlb_others(const struct cpumask *cpumask, const struct flush_tlb_info *info) @@ -583,7 +604,7 @@ static void kvm_flush_tlb_others(const struct cpumask *cpumask, u8 state; int cpu; struct kvm_steal_time *src; - struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_tlb_mask); + struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask); cpumask_copy(flushmask, cpumask); /* @@ -619,11 +640,10 @@ static void __init kvm_guest_init(void) pv_ops.time.steal_clock = kvm_steal_clock; } - if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && - !kvm_para_has_hint(KVM_HINTS_REALTIME) && - kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { + if (pv_tlb_flush_supported()) { pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others; pv_ops.mmu.tlb_remove_table = tlb_remove_table; + pr_info("KVM setup pv remote TLB flush\n"); } if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) @@ -632,9 +652,7 @@ static void __init kvm_guest_init(void) #ifdef CONFIG_SMP smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus; smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu; - if (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) && - !kvm_para_has_hint(KVM_HINTS_REALTIME) && - kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { + if (pv_sched_yield_supported()) { smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi; pr_info("KVM setup pv sched yield\n"); } @@ -700,7 +718,7 @@ static uint32_t __init kvm_detect(void) static void __init kvm_apic_init(void) { #if defined(CONFIG_SMP) - if (kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI)) + if (pv_ipi_supported()) kvm_setup_pv_ipi(); #endif } @@ -732,26 +750,31 @@ static __init int activate_jump_labels(void) } arch_initcall(activate_jump_labels); -static __init int kvm_setup_pv_tlb_flush(void) +static __init int kvm_alloc_cpumask(void) { int cpu; + bool alloc = false; if (!kvm_para_available() || nopv) return 0; - if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && - !kvm_para_has_hint(KVM_HINTS_REALTIME) && - kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { + if (pv_tlb_flush_supported()) + alloc = true; + +#if defined(CONFIG_SMP) + if (pv_ipi_supported()) + alloc = true; +#endif + + if (alloc) for_each_possible_cpu(cpu) { - zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu), + zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu), GFP_KERNEL, cpu_to_node(cpu)); } - pr_info("KVM setup pv remote TLB flush\n"); - } return 0; } -arch_initcall(kvm_setup_pv_tlb_flush); +arch_initcall(kvm_alloc_cpumask); #ifdef CONFIG_PARAVIRT_SPINLOCKS diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 789f5e4f89defc97f1daaf801c9929d616237b55..c131ba4e70ef8229d2c9f1722f88d100f942489e 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -30,6 +30,7 @@ #include #include #include +#include /* * nop stub, which must not clobber anything *including the stack* to @@ -341,6 +342,10 @@ struct paravirt_patch_template pv_ops = { .cpu.iret = native_iret, .cpu.swapgs = native_swapgs, +#ifdef CONFIG_X86_IOPL_IOPERM + .cpu.update_io_bitmap = native_tss_update_io_bitmap, +#endif + .cpu.start_context_switch = paravirt_nop, .cpu.end_context_switch = paravirt_nop, diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 839b5244e3b7e17767fc69bc610ac7c9a487c7fd..3053c85e0e42db9abbb5261673fa78e897efe20e 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -374,7 +374,7 @@ static void tss_copy_io_bitmap(struct tss_struct *tss, struct io_bitmap *iobm) /** * tss_update_io_bitmap - Update I/O bitmap before exiting to usermode */ -void tss_update_io_bitmap(void) +void native_tss_update_io_bitmap(void) { struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw); struct thread_struct *t = ¤t->thread; diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 991019d5eee1e03c21bd20a6e88ee209696304e0..9fea0757db9226653c611233470ae092fda6e762 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -59,6 +59,19 @@ config KVM If unsure, say N. +config KVM_WERROR + bool "Compile KVM with -Werror" + # KASAN may cause the build to fail due to larger frames + default y if X86_64 && !KASAN + # We use the dependency on !COMPILE_TEST to not be enabled + # blindly in allmodconfig or allyesconfig configurations + depends on (X86_64 && !KASAN) || !COMPILE_TEST + depends on EXPERT + help + Add -Werror to the build flags for KVM. + + If in doubt, say "N". + config KVM_INTEL tristate "KVM for Intel (and compatible) processors support" depends on KVM && IA32_FEAT_CTL diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index b19ef421084dff20ba0ce8b61c98c0c8878ef41b..e553f0fdd87d47dbd0fe4dc5cb79f5e5421e0e64 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 ccflags-y += -Iarch/x86/kvm +ccflags-$(CONFIG_KVM_WERROR) += -Werror KVM := ../../../virt/kvm diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index ddbc61984227c3dbee763ed3759e9a5351f32606..bc00642e5d3b7b158cb410ed6d39a1724661904b 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -191,25 +191,6 @@ #define NR_FASTOP (ilog2(sizeof(ulong)) + 1) #define FASTOP_SIZE 8 -/* - * fastop functions have a special calling convention: - * - * dst: rax (in/out) - * src: rdx (in/out) - * src2: rcx (in) - * flags: rflags (in/out) - * ex: rsi (in:fastop pointer, out:zero if exception) - * - * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for - * different operand sizes can be reached by calculation, rather than a jump - * table (which would be bigger than the code). - * - * fastop functions are declared as taking a never-defined fastop parameter, - * so they can't be called from C directly. - */ - -struct fastop; - struct opcode { u64 flags : 56; u64 intercept : 8; @@ -311,8 +292,19 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt) #define ON64(x) #endif -typedef void (*fastop_t)(struct fastop *); - +/* + * fastop functions have a special calling convention: + * + * dst: rax (in/out) + * src: rdx (in/out) + * src2: rcx (in) + * flags: rflags (in/out) + * ex: rsi (in:fastop pointer, out:zero if exception) + * + * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for + * different operand sizes can be reached by calculation, rather than a jump + * table (which would be bigger than the code). + */ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop); #define __FOP_FUNC(name) \ @@ -5181,6 +5173,7 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) ctxt->fetch.ptr = ctxt->fetch.data; ctxt->fetch.end = ctxt->fetch.data + insn_len; ctxt->opcode_len = 1; + ctxt->intercept = x86_intercept_none; if (insn_len > 0) memcpy(ctxt->fetch.data, insn, insn_len); else { @@ -5683,7 +5676,7 @@ special_insn: if (ctxt->execute) { if (ctxt->d & Fastop) - rc = fastop(ctxt, (fastop_t)ctxt->execute); + rc = fastop(ctxt, ctxt->fop); else rc = ctxt->execute(ctxt); if (rc != X86EMUL_CONTINUE) diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c index 7668fed1ce6527767fbf6c8e10647c6627686f58..750ff0b294047cf83b2c28f72cfbd057375fc7b1 100644 --- a/arch/x86/kvm/ioapic.c +++ b/arch/x86/kvm/ioapic.c @@ -378,12 +378,15 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) if (e->fields.delivery_mode == APIC_DM_FIXED) { struct kvm_lapic_irq irq; - irq.shorthand = APIC_DEST_NOSHORT; irq.vector = e->fields.vector; irq.delivery_mode = e->fields.delivery_mode << 8; - irq.dest_id = e->fields.dest_id; irq.dest_mode = kvm_lapic_irq_dest_mode(!!e->fields.dest_mode); + irq.level = false; + irq.trig_mode = e->fields.trig_mode; + irq.shorthand = APIC_DEST_NOSHORT; + irq.dest_id = e->fields.dest_id; + irq.msi_redir_hint = false; bitmap_zero(&vcpu_bitmap, 16); kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq, &vcpu_bitmap); diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c index 79afa0bb5f410f701e18370b38f9aeb182f53cfc..c47d2acec52934eae639a8c4b347fbfa660caca6 100644 --- a/arch/x86/kvm/irq_comm.c +++ b/arch/x86/kvm/irq_comm.c @@ -417,7 +417,7 @@ void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu, kvm_set_msi_irq(vcpu->kvm, entry, &irq); - if (irq.level && + if (irq.trig_mode && kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT, irq.dest_id, irq.dest_mode)) __set_bit(irq.vector, ioapic_handled_vectors); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index afcd30d44cbb28b6af4e16faacaa1fd0e36fcd08..7356a56e6282f192e5e8c8630ce1a5b2065bdcb5 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -627,9 +627,11 @@ static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu) static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu) { u8 val; - if (pv_eoi_get_user(vcpu, &val) < 0) + if (pv_eoi_get_user(vcpu, &val) < 0) { printk(KERN_WARNING "Can't read EOI MSR value: 0x%llx\n", (unsigned long long)vcpu->arch.pv_eoi.msr_val); + return false; + } return val & 0x1; } @@ -1046,11 +1048,8 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, apic->regs + APIC_TMR); } - if (vcpu->arch.apicv_active) - kvm_x86_ops->deliver_posted_interrupt(vcpu, vector); - else { + if (kvm_x86_ops->deliver_posted_interrupt(vcpu, vector)) { kvm_lapic_set_irr(vector, apic); - kvm_make_request(KVM_REQ_EVENT, vcpu); kvm_vcpu_kick(vcpu); } @@ -1446,6 +1445,8 @@ static void limit_periodic_timer_frequency(struct kvm_lapic *apic) } } +static void cancel_hv_timer(struct kvm_lapic *apic); + static void apic_update_lvtt(struct kvm_lapic *apic) { u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) & @@ -1455,6 +1456,10 @@ static void apic_update_lvtt(struct kvm_lapic *apic) if (apic_lvtt_tscdeadline(apic) != (timer_mode == APIC_LVT_TIMER_TSCDEADLINE)) { hrtimer_cancel(&apic->lapic_timer.timer); + preempt_disable(); + if (apic->lapic_timer.hv_timer_in_use) + cancel_hv_timer(apic); + preempt_enable(); kvm_lapic_set_reg(apic, APIC_TMICT, 0); apic->lapic_timer.period = 0; apic->lapic_timer.tscdeadline = 0; @@ -1716,7 +1721,7 @@ static void start_sw_period(struct kvm_lapic *apic) hrtimer_start(&apic->lapic_timer.timer, apic->lapic_timer.target_expiration, - HRTIMER_MODE_ABS); + HRTIMER_MODE_ABS_HARD); } bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h index 3c6522b84ff1175a64fef95e119e213c272bc873..ffcd96fc02d0a4892ee4573d9a7a2791f4cf0c69 100644 --- a/arch/x86/kvm/mmutrace.h +++ b/arch/x86/kvm/mmutrace.h @@ -339,7 +339,7 @@ TRACE_EVENT( /* These depend on page entry type, so compute them now. */ __field(bool, r) __field(bool, x) - __field(u8, u) + __field(signed char, u) ), TP_fast_assign( diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index bef0ba35f121b3ba474437d6599fb1bc1d2487b1..50d1ebafe0b31fa3c432e3d2ce2613b35ffdbd48 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -57,11 +57,13 @@ MODULE_AUTHOR("Qumranet"); MODULE_LICENSE("GPL"); +#ifdef MODULE static const struct x86_cpu_id svm_cpu_id[] = { X86_FEATURE_MATCH(X86_FEATURE_SVM), {} }; MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id); +#endif #define IOPM_ALLOC_ORDER 2 #define MSRPM_ALLOC_ORDER 1 @@ -1005,33 +1007,32 @@ static void svm_cpu_uninit(int cpu) static int svm_cpu_init(int cpu) { struct svm_cpu_data *sd; - int r; sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL); if (!sd) return -ENOMEM; sd->cpu = cpu; - r = -ENOMEM; sd->save_area = alloc_page(GFP_KERNEL); if (!sd->save_area) - goto err_1; + goto free_cpu_data; if (svm_sev_enabled()) { - r = -ENOMEM; sd->sev_vmcbs = kmalloc_array(max_sev_asid + 1, sizeof(void *), GFP_KERNEL); if (!sd->sev_vmcbs) - goto err_1; + goto free_save_area; } per_cpu(svm_data, cpu) = sd; return 0; -err_1: +free_save_area: + __free_page(sd->save_area); +free_cpu_data: kfree(sd); - return r; + return -ENOMEM; } @@ -1350,6 +1351,24 @@ static __init void svm_adjust_mmio_mask(void) kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK); } +static void svm_hardware_teardown(void) +{ + int cpu; + + if (svm_sev_enabled()) { + bitmap_free(sev_asid_bitmap); + bitmap_free(sev_reclaim_asid_bitmap); + + sev_flush_asids(); + } + + for_each_possible_cpu(cpu) + svm_cpu_uninit(cpu); + + __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER); + iopm_base = 0; +} + static __init int svm_hardware_setup(void) { int cpu; @@ -1463,29 +1482,10 @@ static __init int svm_hardware_setup(void) return 0; err: - __free_pages(iopm_pages, IOPM_ALLOC_ORDER); - iopm_base = 0; + svm_hardware_teardown(); return r; } -static __exit void svm_hardware_unsetup(void) -{ - int cpu; - - if (svm_sev_enabled()) { - bitmap_free(sev_asid_bitmap); - bitmap_free(sev_reclaim_asid_bitmap); - - sev_flush_asids(); - } - - for_each_possible_cpu(cpu) - svm_cpu_uninit(cpu); - - __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER); - iopm_base = 0; -} - static void init_seg(struct vmcb_seg *seg) { seg->selector = 0; @@ -1933,14 +1933,6 @@ static void sev_clflush_pages(struct page *pages[], unsigned long npages) static void __unregister_enc_region_locked(struct kvm *kvm, struct enc_region *region) { - /* - * The guest may change the memory encryption attribute from C=0 -> C=1 - * or vice versa for this memory range. Lets make sure caches are - * flushed to ensure that guest data gets written into memory with - * correct C-bit. - */ - sev_clflush_pages(region->pages, region->npages); - sev_unpin_memory(kvm, region->pages, region->npages); list_del(®ion->list); kfree(region); @@ -1970,6 +1962,13 @@ static void sev_vm_destroy(struct kvm *kvm) mutex_lock(&kvm->lock); + /* + * Ensure that all guest tagged cache entries are flushed before + * releasing the pages back to the system for use. CLFLUSH will + * not do this, so issue a WBINVD. + */ + wbinvd_on_all_cpus(); + /* * if userspace was terminated before unregistering the memory regions * then lets unpin all the registered memory. @@ -2196,8 +2195,9 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) static int avic_init_vcpu(struct vcpu_svm *svm) { int ret; + struct kvm_vcpu *vcpu = &svm->vcpu; - if (!kvm_vcpu_apicv_active(&svm->vcpu)) + if (!avic || !irqchip_in_kernel(vcpu->kvm)) return 0; ret = avic_init_backing_page(&svm->vcpu); @@ -5232,6 +5232,9 @@ static void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu) struct vmcb *vmcb = svm->vmcb; bool activated = kvm_vcpu_apicv_active(vcpu); + if (!avic) + return; + if (activated) { /** * During AVIC temporary deactivation, guest could update @@ -5255,8 +5258,11 @@ static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) return; } -static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec) +static int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec) { + if (!vcpu->arch.apicv_active) + return -1; + kvm_lapic_set_irr(vec, vcpu->arch.apic); smp_mb__after_atomic(); @@ -5268,6 +5274,8 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec) put_cpu(); } else kvm_vcpu_wake_up(vcpu); + + return 0; } static bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu) @@ -6303,7 +6311,8 @@ static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu, enum exit_fastpath_completion *exit_fastpath) { if (!is_guest_mode(vcpu) && - to_svm(vcpu)->vmcb->control.exit_code == EXIT_REASON_MSR_WRITE) + to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR && + to_svm(vcpu)->vmcb->control.exit_info_1) *exit_fastpath = handle_fastpath_set_msr_irqoff(vcpu); } @@ -7148,6 +7157,9 @@ static int svm_mem_enc_op(struct kvm *kvm, void __user *argp) if (!svm_sev_enabled()) return -ENOTTY; + if (!argp) + return 0; + if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd))) return -EFAULT; @@ -7275,6 +7287,13 @@ static int svm_unregister_enc_region(struct kvm *kvm, goto failed; } + /* + * Ensure that all guest tagged cache entries are flushed before + * releasing the pages back to the system for use. CLFLUSH will + * not do this, so issue a WBINVD. + */ + wbinvd_on_all_cpus(); + __unregister_enc_region_locked(kvm, region); mutex_unlock(&kvm->lock); @@ -7378,7 +7397,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .cpu_has_kvm_support = has_svm, .disabled_by_bios = is_disabled, .hardware_setup = svm_hardware_setup, - .hardware_unsetup = svm_hardware_unsetup, + .hardware_unsetup = svm_hardware_teardown, .check_processor_compatibility = svm_check_processor_compat, .hardware_enable = svm_hardware_enable, .hardware_disable = svm_hardware_disable, @@ -7433,6 +7452,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .run = svm_vcpu_run, .handle_exit = handle_exit, .skip_emulated_instruction = skip_emulated_instruction, + .update_emulated_instruction = NULL, .set_interrupt_shadow = svm_set_interrupt_shadow, .get_interrupt_shadow = svm_get_interrupt_shadow, .patch_hypercall = svm_patch_hypercall, diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h index 283bdb7071af604453e081f30baec3bcdd938dac..f486e260624740e8fd57683b001beb4737593c60 100644 --- a/arch/x86/kvm/vmx/capabilities.h +++ b/arch/x86/kvm/vmx/capabilities.h @@ -12,6 +12,7 @@ extern bool __read_mostly enable_ept; extern bool __read_mostly enable_unrestricted_guest; extern bool __read_mostly enable_ept_ad_bits; extern bool __read_mostly enable_pml; +extern bool __read_mostly enable_apicv; extern int __read_mostly pt_mode; #define PT_MODE_SYSTEM 0 diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 3589cd3c0fcc89f5430c42d2f7854a2491e54fee..9750e590c89d75e14c174e8ae64df2ccdfd25d61 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -224,7 +224,7 @@ static inline void nested_release_evmcs(struct kvm_vcpu *vcpu) return; kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true); - vmx->nested.hv_evmcs_vmptr = -1ull; + vmx->nested.hv_evmcs_vmptr = 0; vmx->nested.hv_evmcs = NULL; } @@ -1923,7 +1923,8 @@ static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu, if (!nested_enlightened_vmentry(vcpu, &evmcs_gpa)) return 1; - if (unlikely(evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) { + if (unlikely(!vmx->nested.hv_evmcs || + evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) { if (!vmx->nested.hv_evmcs) vmx->nested.current_vmptr = -1ull; @@ -3161,10 +3162,10 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume. * * Returns: - * NVMX_ENTRY_SUCCESS: Entered VMX non-root mode - * NVMX_ENTRY_VMFAIL: Consistency check VMFail - * NVMX_ENTRY_VMEXIT: Consistency check VMExit - * NVMX_ENTRY_KVM_INTERNAL_ERROR: KVM internal error + * NVMX_VMENTRY_SUCCESS: Entered VMX non-root mode + * NVMX_VMENTRY_VMFAIL: Consistency check VMFail + * NVMX_VMENTRY_VMEXIT: Consistency check VMExit + * NVMX_VMENTRY_KVM_INTERNAL_ERROR: KVM internal error */ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry) @@ -3609,8 +3610,15 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) unsigned long exit_qual; bool block_nested_events = vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu); + bool mtf_pending = vmx->nested.mtf_pending; struct kvm_lapic *apic = vcpu->arch.apic; + /* + * Clear the MTF state. If a higher priority VM-exit is delivered first, + * this state is discarded. + */ + vmx->nested.mtf_pending = false; + if (lapic_in_kernel(vcpu) && test_bit(KVM_APIC_INIT, &apic->pending_events)) { if (block_nested_events) @@ -3621,8 +3629,28 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) return 0; } + /* + * Process any exceptions that are not debug traps before MTF. + */ + if (vcpu->arch.exception.pending && + !vmx_pending_dbg_trap(vcpu) && + nested_vmx_check_exception(vcpu, &exit_qual)) { + if (block_nested_events) + return -EBUSY; + nested_vmx_inject_exception_vmexit(vcpu, exit_qual); + return 0; + } + + if (mtf_pending) { + if (block_nested_events) + return -EBUSY; + nested_vmx_update_pending_dbg(vcpu); + nested_vmx_vmexit(vcpu, EXIT_REASON_MONITOR_TRAP_FLAG, 0, 0); + return 0; + } + if (vcpu->arch.exception.pending && - nested_vmx_check_exception(vcpu, &exit_qual)) { + nested_vmx_check_exception(vcpu, &exit_qual)) { if (block_nested_events) return -EBUSY; nested_vmx_inject_exception_vmexit(vcpu, exit_qual); @@ -5285,24 +5313,17 @@ fail: return 1; } - -static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12) +/* + * Return true if an IO instruction with the specified port and size should cause + * a VM-exit into L1. + */ +bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port, + int size) { - unsigned long exit_qualification; + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); gpa_t bitmap, last_bitmap; - unsigned int port; - int size; u8 b; - if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) - return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING); - - exit_qualification = vmcs_readl(EXIT_QUALIFICATION); - - port = exit_qualification >> 16; - size = (exit_qualification & 7) + 1; - last_bitmap = (gpa_t)-1; b = -1; @@ -5329,8 +5350,26 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, return false; } +static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + unsigned long exit_qualification; + unsigned short port; + int size; + + if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) + return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING); + + exit_qualification = vmcs_readl(EXIT_QUALIFICATION); + + port = exit_qualification >> 16; + size = (exit_qualification & 7) + 1; + + return nested_vmx_check_io_bitmaps(vcpu, port, size); +} + /* - * Return 1 if we should exit from L2 to L1 to handle an MSR access access, + * Return 1 if we should exit from L2 to L1 to handle an MSR access, * rather than handle it ourselves in L0. I.e., check whether L1 expressed * disinterest in the current event (read or write a specific MSR) by using an * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps. @@ -5712,6 +5751,9 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu, if (vmx->nested.nested_run_pending) kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING; + + if (vmx->nested.mtf_pending) + kvm_state.flags |= KVM_STATE_NESTED_MTF_PENDING; } } @@ -5892,6 +5934,9 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, vmx->nested.nested_run_pending = !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING); + vmx->nested.mtf_pending = + !!(kvm_state->flags & KVM_STATE_NESTED_MTF_PENDING); + ret = -EINVAL; if (nested_cpu_has_shadow_vmcs(vmcs12) && vmcs12->vmcs_link_pointer != -1ull) { @@ -5949,8 +5994,7 @@ void nested_vmx_set_vmcs_shadowing_bitmap(void) * bit in the high half is on if the corresponding bit in the control field * may be on. See also vmx_control_verify(). */ -void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps, - bool apicv) +void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps) { /* * Note that as a general rule, the high half of the MSRs (bits in @@ -5977,7 +6021,7 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps, PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING | PIN_BASED_VIRTUAL_NMIS | - (apicv ? PIN_BASED_POSTED_INTR : 0); + (enable_apicv ? PIN_BASED_POSTED_INTR : 0); msrs->pinbased_ctls_high |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | PIN_BASED_VMX_PREEMPTION_TIMER; diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h index fc874d4ead0f07613eaa95880ee623f490e04418..9aeda46f473ee380f6764b59641805e5a343fec1 100644 --- a/arch/x86/kvm/vmx/nested.h +++ b/arch/x86/kvm/vmx/nested.h @@ -17,8 +17,7 @@ enum nvmx_vmentry_status { }; void vmx_leave_nested(struct kvm_vcpu *vcpu); -void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps, - bool apicv); +void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps); void nested_vmx_hardware_unsetup(void); __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *)); void nested_vmx_set_vmcs_shadowing_bitmap(void); @@ -34,6 +33,8 @@ int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata); int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, u32 vmx_instruction_info, bool wr, int len, gva_t *ret); void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu); +bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port, + int size); static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu) { @@ -175,6 +176,11 @@ static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12) return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS; } +static inline int nested_cpu_has_mtf(struct vmcs12 *vmcs12) +{ + return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG); +} + static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12) { return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 3be25ecae1455d9f30972bc2daaafab41eafa2ac..079d9fbf278e490c9adadbd7685ceb4e05f1b218 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -64,11 +64,13 @@ MODULE_AUTHOR("Qumranet"); MODULE_LICENSE("GPL"); +#ifdef MODULE static const struct x86_cpu_id vmx_cpu_id[] = { X86_FEATURE_MATCH(X86_FEATURE_VMX), {} }; MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id); +#endif bool __read_mostly enable_vpid = 1; module_param_named(vpid, enable_vpid, bool, 0444); @@ -95,7 +97,7 @@ module_param(emulate_invalid_guest_state, bool, S_IRUGO); static bool __read_mostly fasteoi = 1; module_param(fasteoi, bool, S_IRUGO); -static bool __read_mostly enable_apicv = 1; +bool __read_mostly enable_apicv = 1; module_param(enable_apicv, bool, S_IRUGO); /* @@ -1175,6 +1177,10 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) vmx->guest_msrs[i].mask); } + + if (vmx->nested.need_vmcs12_to_shadow_sync) + nested_sync_vmcs12_to_shadow(vcpu); + if (vmx->guest_state_loaded) return; @@ -1599,6 +1605,40 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu) return 1; } + +/* + * Recognizes a pending MTF VM-exit and records the nested state for later + * delivery. + */ +static void vmx_update_emulated_instruction(struct kvm_vcpu *vcpu) +{ + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + struct vcpu_vmx *vmx = to_vmx(vcpu); + + if (!is_guest_mode(vcpu)) + return; + + /* + * Per the SDM, MTF takes priority over debug-trap exceptions besides + * T-bit traps. As instruction emulation is completed (i.e. at the + * instruction boundary), any #DB exception pending delivery must be a + * debug-trap. Record the pending MTF state to be delivered in + * vmx_check_nested_events(). + */ + if (nested_cpu_has_mtf(vmcs12) && + (!vcpu->arch.exception.pending || + vcpu->arch.exception.nr == DB_VECTOR)) + vmx->nested.mtf_pending = true; + else + vmx->nested.mtf_pending = false; +} + +static int vmx_skip_emulated_instruction(struct kvm_vcpu *vcpu) +{ + vmx_update_emulated_instruction(vcpu); + return skip_emulated_instruction(vcpu); +} + static void vmx_clear_hlt(struct kvm_vcpu *vcpu) { /* @@ -2298,6 +2338,17 @@ static void hardware_disable(void) kvm_cpu_vmxoff(); } +/* + * There is no X86_FEATURE for SGX yet, but anyway we need to query CPUID + * directly instead of going through cpu_has(), to ensure KVM is trapping + * ENCLS whenever it's supported in hardware. It does not matter whether + * the host OS supports or has enabled SGX. + */ +static bool cpu_has_sgx(void) +{ + return cpuid_eax(0) >= 0x12 && (cpuid_eax(0x12) & BIT(0)); +} + static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, u32 msr, u32 *result) { @@ -2378,8 +2429,9 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf, SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | SECONDARY_EXEC_PT_USE_GPA | SECONDARY_EXEC_PT_CONCEAL_VMX | - SECONDARY_EXEC_ENABLE_VMFUNC | - SECONDARY_EXEC_ENCLS_EXITING; + SECONDARY_EXEC_ENABLE_VMFUNC; + if (cpu_has_sgx()) + opt2 |= SECONDARY_EXEC_ENCLS_EXITING; if (adjust_vmx_controls(min2, opt2, MSR_IA32_VMX_PROCBASED_CTLS2, &_cpu_based_2nd_exec_control) < 0) @@ -3818,24 +3870,29 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, * 2. If target vcpu isn't running(root mode), kick it to pick up the * interrupt from PIR in next vmentry. */ -static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector) +static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector) { struct vcpu_vmx *vmx = to_vmx(vcpu); int r; r = vmx_deliver_nested_posted_interrupt(vcpu, vector); if (!r) - return; + return 0; + + if (!vcpu->arch.apicv_active) + return -1; if (pi_test_and_set_pir(vector, &vmx->pi_desc)) - return; + return 0; /* If a previous notification has sent the IPI, nothing to do. */ if (pi_test_and_set_on(&vmx->pi_desc)) - return; + return 0; if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false)) kvm_vcpu_kick(vcpu); + + return 0; } /* @@ -6230,7 +6287,7 @@ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu) #endif ASM_CALL_CONSTRAINT : - THUNK_TARGET(entry), + [thunk_target]"r"(entry), [ss]"i"(__KERNEL_DS), [cs]"i"(__KERNEL_CS) ); @@ -6482,8 +6539,11 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) vmcs_write32(PLE_WINDOW, vmx->ple_window); } - if (vmx->nested.need_vmcs12_to_shadow_sync) - nested_sync_vmcs12_to_shadow(vcpu); + /* + * We did this in prepare_switch_to_guest, because it needs to + * be within srcu_read_lock. + */ + WARN_ON_ONCE(vmx->nested.need_vmcs12_to_shadow_sync); if (kvm_register_is_dirty(vcpu, VCPU_REGS_RSP)) vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); @@ -6757,8 +6817,7 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu) if (nested) nested_vmx_setup_ctls_msrs(&vmx->nested.msrs, - vmx_capability.ept, - kvm_vcpu_apicv_active(vcpu)); + vmx_capability.ept); else memset(&vmx->nested.msrs, 0, sizeof(vmx->nested.msrs)); @@ -6839,8 +6898,7 @@ static int __init vmx_check_processor_compat(void) if (setup_vmcs_config(&vmcs_conf, &vmx_cap) < 0) return -EIO; if (nested) - nested_vmx_setup_ctls_msrs(&vmcs_conf.nested, vmx_cap.ept, - enable_apicv); + nested_vmx_setup_ctls_msrs(&vmcs_conf.nested, vmx_cap.ept); if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) { printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n", smp_processor_id()); @@ -7101,6 +7159,40 @@ static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu) to_vmx(vcpu)->req_immediate_exit = true; } +static int vmx_check_intercept_io(struct kvm_vcpu *vcpu, + struct x86_instruction_info *info) +{ + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + unsigned short port; + bool intercept; + int size; + + if (info->intercept == x86_intercept_in || + info->intercept == x86_intercept_ins) { + port = info->src_val; + size = info->dst_bytes; + } else { + port = info->dst_val; + size = info->src_bytes; + } + + /* + * If the 'use IO bitmaps' VM-execution control is 0, IO instruction + * VM-exits depend on the 'unconditional IO exiting' VM-execution + * control. + * + * Otherwise, IO instruction VM-exits are controlled by the IO bitmaps. + */ + if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) + intercept = nested_cpu_has(vmcs12, + CPU_BASED_UNCOND_IO_EXITING); + else + intercept = nested_vmx_check_io_bitmaps(vcpu, port, size); + + /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */ + return intercept ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; +} + static int vmx_check_intercept(struct kvm_vcpu *vcpu, struct x86_instruction_info *info, enum x86_intercept_stage stage) @@ -7108,19 +7200,45 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12 = get_vmcs12(vcpu); struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; + switch (info->intercept) { /* * RDPID causes #UD if disabled through secondary execution controls. * Because it is marked as EmulateOnUD, we need to intercept it here. */ - if (info->intercept == x86_intercept_rdtscp && - !nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) { - ctxt->exception.vector = UD_VECTOR; - ctxt->exception.error_code_valid = false; - return X86EMUL_PROPAGATE_FAULT; - } + case x86_intercept_rdtscp: + if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) { + ctxt->exception.vector = UD_VECTOR; + ctxt->exception.error_code_valid = false; + return X86EMUL_PROPAGATE_FAULT; + } + break; + + case x86_intercept_in: + case x86_intercept_ins: + case x86_intercept_out: + case x86_intercept_outs: + return vmx_check_intercept_io(vcpu, info); + + case x86_intercept_lgdt: + case x86_intercept_lidt: + case x86_intercept_lldt: + case x86_intercept_ltr: + case x86_intercept_sgdt: + case x86_intercept_sidt: + case x86_intercept_sldt: + case x86_intercept_str: + if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC)) + return X86EMUL_CONTINUE; + + /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */ + break; /* TODO: check more intercepts... */ - return X86EMUL_CONTINUE; + default: + break; + } + + return X86EMUL_UNHANDLEABLE; } #ifdef CONFIG_X86_64 @@ -7702,7 +7820,7 @@ static __init int hardware_setup(void) if (nested) { nested_vmx_setup_ctls_msrs(&vmcs_config.nested, - vmx_capability.ept, enable_apicv); + vmx_capability.ept); r = nested_vmx_hardware_setup(kvm_vmx_exit_handlers); if (r) @@ -7786,7 +7904,8 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { .run = vmx_vcpu_run, .handle_exit = vmx_handle_exit, - .skip_emulated_instruction = skip_emulated_instruction, + .skip_emulated_instruction = vmx_skip_emulated_instruction, + .update_emulated_instruction = vmx_update_emulated_instruction, .set_interrupt_shadow = vmx_set_interrupt_shadow, .get_interrupt_shadow = vmx_get_interrupt_shadow, .patch_hypercall = vmx_patch_hypercall, diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 7f42cf3dcd7002bd41c3702b457bfc507c800978..e64da06c70092362ed29964fcc12c7bab6a2a228 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -150,6 +150,9 @@ struct nested_vmx { /* L2 must run next, and mustn't decide to exit to L1. */ bool nested_run_pending; + /* Pending MTF VM-exit into L1. */ + bool mtf_pending; + struct loaded_vmcs vmcs02; /* diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index fb5d64ebc35d079969ad6f5fb3fac8dbdf092355..cf95c36cb4f4aab2b5c0628460b8c41718ef5a01 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1554,7 +1554,10 @@ EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr); */ static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data) { - if (lapic_in_kernel(vcpu) && apic_x2apic_mode(vcpu->arch.apic) && + if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(vcpu->arch.apic)) + return 1; + + if (((data & APIC_SHORT_MASK) == APIC_DEST_NOSHORT) && ((data & APIC_DEST_MASK) == APIC_DEST_PHYSICAL) && ((data & APIC_MODE_MASK) == APIC_DM_FIXED)) { @@ -2444,7 +2447,6 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) vcpu->hv_clock.tsc_timestamp = tsc_timestamp; vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; vcpu->last_guest_tsc = tsc_timestamp; - WARN_ON((s64)vcpu->hv_clock.system_time < 0); /* If the host uses TSC clocksource, then it is stable */ pvclock_flags = 0; @@ -6891,6 +6893,8 @@ restart: kvm_rip_write(vcpu, ctxt->eip); if (r && ctxt->tf) r = kvm_vcpu_do_singlestep(vcpu); + if (kvm_x86_ops->update_emulated_instruction) + kvm_x86_ops->update_emulated_instruction(vcpu); __kvm_set_rflags(vcpu, ctxt->eflags); } @@ -7188,14 +7192,16 @@ static void kvm_timer_init(void) if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { #ifdef CONFIG_CPU_FREQ - struct cpufreq_policy policy; + struct cpufreq_policy *policy; int cpu; - memset(&policy, 0, sizeof(policy)); cpu = get_cpu(); - cpufreq_get_policy(&policy, cpu); - if (policy.cpuinfo.max_freq) - max_tsc_khz = policy.cpuinfo.max_freq; + policy = cpufreq_cpu_get(cpu); + if (policy) { + if (policy->cpuinfo.max_freq) + max_tsc_khz = policy->cpuinfo.max_freq; + cpufreq_cpu_put(policy); + } put_cpu(); #endif cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block, @@ -7306,12 +7312,12 @@ int kvm_arch_init(void *opaque) } if (!ops->cpu_has_kvm_support()) { - printk(KERN_ERR "kvm: no hardware support\n"); + pr_err_ratelimited("kvm: no hardware support\n"); r = -EOPNOTSUPP; goto out; } if (ops->disabled_by_bios()) { - printk(KERN_ERR "kvm: disabled by bios\n"); + pr_err_ratelimited("kvm: disabled by bios\n"); r = -EOPNOTSUPP; goto out; } diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index 64229dad7eab6ce868957491431b62b51a2bee95..69309cd56fdf3fb28c8b7732297471ba69f46686 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c @@ -363,13 +363,8 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, { const struct ptdump_range ptdump_ranges[] = { #ifdef CONFIG_X86_64 - -#define normalize_addr_shift (64 - (__VIRTUAL_MASK_SHIFT + 1)) -#define normalize_addr(u) ((signed long)((u) << normalize_addr_shift) >> \ - normalize_addr_shift) - {0, PTRS_PER_PGD * PGD_LEVEL_MULT / 2}, - {normalize_addr(PTRS_PER_PGD * PGD_LEVEL_MULT / 2), ~0UL}, + {GUARD_HOLE_END_ADDR, ~0UL}, #else {0, ~0UL}, #endif diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index fa4ea09593abb0b3f63cee1b910f78f4828789b6..629fdf13f846562a9884a71e5c65d11319642b22 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -190,7 +190,7 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) return pmd_k; } -void vmalloc_sync_all(void) +static void vmalloc_sync(void) { unsigned long address; @@ -217,6 +217,16 @@ void vmalloc_sync_all(void) } } +void vmalloc_sync_mappings(void) +{ + vmalloc_sync(); +} + +void vmalloc_sync_unmappings(void) +{ + vmalloc_sync(); +} + /* * 32-bit: * @@ -319,11 +329,23 @@ out: #else /* CONFIG_X86_64: */ -void vmalloc_sync_all(void) +void vmalloc_sync_mappings(void) { + /* + * 64-bit mappings might allocate new p4d/pud pages + * that need to be propagated to all tasks' PGDs. + */ sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END); } +void vmalloc_sync_unmappings(void) +{ + /* + * Unmappings never allocate or free p4d/pud pages. + * No work is required here. + */ +} + /* * 64-bit: * diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 44e4beb4239f93bb83876bd21ab6f8007460be71..18c637c0dc6fa8c2f51393903f8d4e1546fb6d5d 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -106,6 +106,22 @@ static unsigned int __ioremap_check_encrypted(struct resource *res) return 0; } +/* + * The EFI runtime services data area is not covered by walk_mem_res(), but must + * be mapped encrypted when SEV is active. + */ +static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *desc) +{ + if (!sev_active()) + return; + + if (!IS_ENABLED(CONFIG_EFI)) + return; + + if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA) + desc->flags |= IORES_MAP_ENCRYPTED; +} + static int __ioremap_collect_map_flags(struct resource *res, void *arg) { struct ioremap_desc *desc = arg; @@ -124,6 +140,9 @@ static int __ioremap_collect_map_flags(struct resource *res, void *arg) * To avoid multiple resource walks, this function walks resources marked as * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES). + * + * After that, deal with misc other ranges in __ioremap_check_other() which do + * not fall into the above category. */ static void __ioremap_check_mem(resource_size_t addr, unsigned long size, struct ioremap_desc *desc) @@ -135,6 +154,8 @@ static void __ioremap_check_mem(resource_size_t addr, unsigned long size, memset(desc, 0, sizeof(struct ioremap_desc)); walk_mem_res(start, end, desc, __ioremap_collect_map_flags); + + __ioremap_check_other(addr, desc); } /* diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c index 393d251798c0fa51ad7dda280aa38d8abc702120..4d2a7a76460262a537bc242c4f3468ea1d0ee15b 100644 --- a/arch/x86/net/bpf_jit_comp32.c +++ b/arch/x86/net/bpf_jit_comp32.c @@ -2039,10 +2039,12 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, } /* and dreg_lo,sreg_lo */ EMIT2(0x23, add_2reg(0xC0, sreg_lo, dreg_lo)); - /* and dreg_hi,sreg_hi */ - EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi)); - /* or dreg_lo,dreg_hi */ - EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi)); + if (is_jmp64) { + /* and dreg_hi,sreg_hi */ + EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi)); + /* or dreg_lo,dreg_hi */ + EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi)); + } goto emit_cond_jmp; } case BPF_JMP | BPF_JSET | BPF_K: diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index fa8506e76bbeba3eb7b2efc75ff96767d41f6b6b..d19a2edd63cb224e33408d094d0df58f6ed3508c 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -180,7 +180,7 @@ void efi_sync_low_kernel_mappings(void) static inline phys_addr_t virt_to_phys_or_null_size(void *va, unsigned long size) { - bool bad_size; + phys_addr_t pa; if (!va) return 0; @@ -188,16 +188,13 @@ virt_to_phys_or_null_size(void *va, unsigned long size) if (virt_addr_valid(va)) return virt_to_phys(va); - /* - * A fully aligned variable on the stack is guaranteed not to - * cross a page bounary. Try to catch strings on the stack by - * checking that 'size' is a power of two. - */ - bad_size = size > PAGE_SIZE || !is_power_of_2(size); + pa = slow_virt_to_phys(va); - WARN_ON(!IS_ALIGNED((unsigned long)va, size) || bad_size); + /* check if the object crosses a page boundary */ + if (WARN_ON((pa ^ (pa + size - 1)) & PAGE_MASK)) + return 0; - return slow_virt_to_phys(va); + return pa; } #define virt_to_phys_or_null(addr) \ @@ -568,85 +565,25 @@ efi_thunk_set_virtual_address_map(unsigned long memory_map_size, static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc) { - efi_status_t status; - u32 phys_tm, phys_tc; - unsigned long flags; - - spin_lock(&rtc_lock); - spin_lock_irqsave(&efi_runtime_lock, flags); - - phys_tm = virt_to_phys_or_null(tm); - phys_tc = virt_to_phys_or_null(tc); - - status = efi_thunk(get_time, phys_tm, phys_tc); - - spin_unlock_irqrestore(&efi_runtime_lock, flags); - spin_unlock(&rtc_lock); - - return status; + return EFI_UNSUPPORTED; } static efi_status_t efi_thunk_set_time(efi_time_t *tm) { - efi_status_t status; - u32 phys_tm; - unsigned long flags; - - spin_lock(&rtc_lock); - spin_lock_irqsave(&efi_runtime_lock, flags); - - phys_tm = virt_to_phys_or_null(tm); - - status = efi_thunk(set_time, phys_tm); - - spin_unlock_irqrestore(&efi_runtime_lock, flags); - spin_unlock(&rtc_lock); - - return status; + return EFI_UNSUPPORTED; } static efi_status_t efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending, efi_time_t *tm) { - efi_status_t status; - u32 phys_enabled, phys_pending, phys_tm; - unsigned long flags; - - spin_lock(&rtc_lock); - spin_lock_irqsave(&efi_runtime_lock, flags); - - phys_enabled = virt_to_phys_or_null(enabled); - phys_pending = virt_to_phys_or_null(pending); - phys_tm = virt_to_phys_or_null(tm); - - status = efi_thunk(get_wakeup_time, phys_enabled, - phys_pending, phys_tm); - - spin_unlock_irqrestore(&efi_runtime_lock, flags); - spin_unlock(&rtc_lock); - - return status; + return EFI_UNSUPPORTED; } static efi_status_t efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm) { - efi_status_t status; - u32 phys_tm; - unsigned long flags; - - spin_lock(&rtc_lock); - spin_lock_irqsave(&efi_runtime_lock, flags); - - phys_tm = virt_to_phys_or_null(tm); - - status = efi_thunk(set_wakeup_time, enabled, phys_tm); - - spin_unlock_irqrestore(&efi_runtime_lock, flags); - spin_unlock(&rtc_lock); - - return status; + return EFI_UNSUPPORTED; } static unsigned long efi_name_size(efi_char16_t *name) @@ -658,6 +595,8 @@ static efi_status_t efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor, u32 *attr, unsigned long *data_size, void *data) { + u8 buf[24] __aligned(8); + efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd)); efi_status_t status; u32 phys_name, phys_vendor, phys_attr; u32 phys_data_size, phys_data; @@ -665,14 +604,19 @@ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor, spin_lock_irqsave(&efi_runtime_lock, flags); + *vnd = *vendor; + phys_data_size = virt_to_phys_or_null(data_size); - phys_vendor = virt_to_phys_or_null(vendor); + phys_vendor = virt_to_phys_or_null(vnd); phys_name = virt_to_phys_or_null_size(name, efi_name_size(name)); phys_attr = virt_to_phys_or_null(attr); phys_data = virt_to_phys_or_null_size(data, *data_size); - status = efi_thunk(get_variable, phys_name, phys_vendor, - phys_attr, phys_data_size, phys_data); + if (!phys_name || (data && !phys_data)) + status = EFI_INVALID_PARAMETER; + else + status = efi_thunk(get_variable, phys_name, phys_vendor, + phys_attr, phys_data_size, phys_data); spin_unlock_irqrestore(&efi_runtime_lock, flags); @@ -683,19 +627,25 @@ static efi_status_t efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor, u32 attr, unsigned long data_size, void *data) { + u8 buf[24] __aligned(8); + efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd)); u32 phys_name, phys_vendor, phys_data; efi_status_t status; unsigned long flags; spin_lock_irqsave(&efi_runtime_lock, flags); + *vnd = *vendor; + phys_name = virt_to_phys_or_null_size(name, efi_name_size(name)); - phys_vendor = virt_to_phys_or_null(vendor); + phys_vendor = virt_to_phys_or_null(vnd); phys_data = virt_to_phys_or_null_size(data, data_size); - /* If data_size is > sizeof(u32) we've got problems */ - status = efi_thunk(set_variable, phys_name, phys_vendor, - attr, data_size, phys_data); + if (!phys_name || !phys_data) + status = EFI_INVALID_PARAMETER; + else + status = efi_thunk(set_variable, phys_name, phys_vendor, + attr, data_size, phys_data); spin_unlock_irqrestore(&efi_runtime_lock, flags); @@ -707,6 +657,8 @@ efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor, u32 attr, unsigned long data_size, void *data) { + u8 buf[24] __aligned(8); + efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd)); u32 phys_name, phys_vendor, phys_data; efi_status_t status; unsigned long flags; @@ -714,13 +666,17 @@ efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor, if (!spin_trylock_irqsave(&efi_runtime_lock, flags)) return EFI_NOT_READY; + *vnd = *vendor; + phys_name = virt_to_phys_or_null_size(name, efi_name_size(name)); - phys_vendor = virt_to_phys_or_null(vendor); + phys_vendor = virt_to_phys_or_null(vnd); phys_data = virt_to_phys_or_null_size(data, data_size); - /* If data_size is > sizeof(u32) we've got problems */ - status = efi_thunk(set_variable, phys_name, phys_vendor, - attr, data_size, phys_data); + if (!phys_name || !phys_data) + status = EFI_INVALID_PARAMETER; + else + status = efi_thunk(set_variable, phys_name, phys_vendor, + attr, data_size, phys_data); spin_unlock_irqrestore(&efi_runtime_lock, flags); @@ -732,39 +688,36 @@ efi_thunk_get_next_variable(unsigned long *name_size, efi_char16_t *name, efi_guid_t *vendor) { + u8 buf[24] __aligned(8); + efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd)); efi_status_t status; u32 phys_name_size, phys_name, phys_vendor; unsigned long flags; spin_lock_irqsave(&efi_runtime_lock, flags); + *vnd = *vendor; + phys_name_size = virt_to_phys_or_null(name_size); - phys_vendor = virt_to_phys_or_null(vendor); + phys_vendor = virt_to_phys_or_null(vnd); phys_name = virt_to_phys_or_null_size(name, *name_size); - status = efi_thunk(get_next_variable, phys_name_size, - phys_name, phys_vendor); + if (!phys_name) + status = EFI_INVALID_PARAMETER; + else + status = efi_thunk(get_next_variable, phys_name_size, + phys_name, phys_vendor); spin_unlock_irqrestore(&efi_runtime_lock, flags); + *vendor = *vnd; return status; } static efi_status_t efi_thunk_get_next_high_mono_count(u32 *count) { - efi_status_t status; - u32 phys_count; - unsigned long flags; - - spin_lock_irqsave(&efi_runtime_lock, flags); - - phys_count = virt_to_phys_or_null(count); - status = efi_thunk(get_next_high_mono_count, phys_count); - - spin_unlock_irqrestore(&efi_runtime_lock, flags); - - return status; + return EFI_UNSUPPORTED; } static void diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 1f756ffffe8b3159dade936c5eecda8d847900b8..507f4fb88fa7fd184d1bba278a9ebaa8c8039f37 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -72,6 +72,9 @@ #include #include #include +#ifdef CONFIG_X86_IOPL_IOPERM +#include +#endif #ifdef CONFIG_ACPI #include @@ -837,6 +840,25 @@ static void xen_load_sp0(unsigned long sp0) this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0); } +#ifdef CONFIG_X86_IOPL_IOPERM +static void xen_update_io_bitmap(void) +{ + struct physdev_set_iobitmap iobitmap; + struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw); + + native_tss_update_io_bitmap(); + + iobitmap.bitmap = (uint8_t *)(&tss->x86_tss) + + tss->x86_tss.io_bitmap_base; + if (tss->x86_tss.io_bitmap_base == IO_BITMAP_OFFSET_INVALID) + iobitmap.nr_ports = 0; + else + iobitmap.nr_ports = IO_BITMAP_BITS; + + HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &iobitmap); +} +#endif + static void xen_io_delay(void) { } @@ -896,14 +918,15 @@ static u64 xen_read_msr_safe(unsigned int msr, int *err) static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) { int ret; +#ifdef CONFIG_X86_64 + unsigned int which; + u64 base; +#endif ret = 0; switch (msr) { #ifdef CONFIG_X86_64 - unsigned which; - u64 base; - case MSR_FS_BASE: which = SEGBASE_FS; goto set; case MSR_KERNEL_GS_BASE: which = SEGBASE_GS_USER; goto set; case MSR_GS_BASE: which = SEGBASE_GS_KERNEL; goto set; @@ -1046,6 +1069,9 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = { .write_idt_entry = xen_write_idt_entry, .load_sp0 = xen_load_sp0, +#ifdef CONFIG_X86_IOPL_IOPERM + .update_io_bitmap = xen_update_io_bitmap, +#endif .io_delay = xen_io_delay, /* Xen takes care of %gs when switching to usermode for us */ diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index 09b69a3ed490362a13ff290b5d46ee1b477277ad..f0ff6654af2894359e54395fe72d0ce03f207406 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -610,12 +610,13 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, */ entity = &bfqg->entity; for_each_entity(entity) { - bfqg = container_of(entity, struct bfq_group, entity); - if (bfqg != bfqd->root_group) { - parent = bfqg_parent(bfqg); + struct bfq_group *curr_bfqg = container_of(entity, + struct bfq_group, entity); + if (curr_bfqg != bfqd->root_group) { + parent = bfqg_parent(curr_bfqg); if (!parent) parent = bfqd->root_group; - bfq_group_set_parent(bfqg, parent); + bfq_group_set_parent(curr_bfqg, parent); } } diff --git a/block/blk-core.c b/block/blk-core.c index 089e890ab208fd01efbda68aea4fdb47aaab1353..60dc9552ef8de53f9ba4b5b2f996e93825731275 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1663,12 +1663,6 @@ int kblockd_schedule_work(struct work_struct *work) } EXPORT_SYMBOL(kblockd_schedule_work); -int kblockd_schedule_work_on(int cpu, struct work_struct *work) -{ - return queue_work_on(cpu, kblockd_workqueue, work); -} -EXPORT_SYMBOL(kblockd_schedule_work_on); - int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay) { diff --git a/block/blk-flush.c b/block/blk-flush.c index 3f977c517960e61dca951551d437b52abcacd588..5cc775bdb06acbfac9dbfa79551d97f28c5a97cb 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -412,7 +412,7 @@ void blk_insert_flush(struct request *rq) */ if ((policy & REQ_FSEQ_DATA) && !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { - blk_mq_request_bypass_insert(rq, false); + blk_mq_request_bypass_insert(rq, false, false); return; } diff --git a/block/blk-iocost.c b/block/blk-iocost.c index 27ca68621137ad5418e647c859b39b12a588f9c2..9a599cc28c290c7d79ef6512c5173a4ee7605bc5 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -1318,7 +1318,7 @@ static bool iocg_is_idle(struct ioc_gq *iocg) return false; /* is something in flight? */ - if (atomic64_read(&iocg->done_vtime) < atomic64_read(&iocg->vtime)) + if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime)) return false; return true; diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index ca22afd47b3dcce1ea72da7f4a6218c4ac9d85b5..74cedea560348167ab951c53dc3d8e14fe4f70dc 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -361,13 +361,19 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, bool has_sched, struct request *rq) { - /* dispatch flush rq directly */ - if (rq->rq_flags & RQF_FLUSH_SEQ) { - spin_lock(&hctx->lock); - list_add(&rq->queuelist, &hctx->dispatch); - spin_unlock(&hctx->lock); + /* + * dispatch flush and passthrough rq directly + * + * passthrough request has to be added to hctx->dispatch directly. + * For some reason, device may be in one situation which can't + * handle FS request, so STS_RESOURCE is always returned and the + * FS request will be added to hctx->dispatch. However passthrough + * request may be required at that time for fixing the problem. If + * passthrough request is added to scheduler queue, there isn't any + * chance to dispatch it given we prioritize requests in hctx->dispatch. + */ + if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq)) return true; - } if (has_sched) rq->rq_flags |= RQF_SORTED; @@ -391,8 +397,32 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head, WARN_ON(e && (rq->tag != -1)); - if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) + if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) { + /* + * Firstly normal IO request is inserted to scheduler queue or + * sw queue, meantime we add flush request to dispatch queue( + * hctx->dispatch) directly and there is at most one in-flight + * flush request for each hw queue, so it doesn't matter to add + * flush request to tail or front of the dispatch queue. + * + * Secondly in case of NCQ, flush request belongs to non-NCQ + * command, and queueing it will fail when there is any + * in-flight normal IO request(NCQ command). When adding flush + * rq to the front of hctx->dispatch, it is easier to introduce + * extra time to flush rq's latency because of S_SCHED_RESTART + * compared with adding to the tail of dispatch queue, then + * chance of flush merge is increased, and less flush requests + * will be issued to controller. It is observed that ~10% time + * is saved in blktests block/004 on disk attached to AHCI/NCQ + * drive when adding flush rq to the front of hctx->dispatch. + * + * Simply queue flush rq to the front of hctx->dispatch so that + * intensive flush workloads can benefit in case of NCQ HW. + */ + at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head; + blk_mq_request_bypass_insert(rq, at_head, false); goto run; + } if (e && e->type->ops.insert_requests) { LIST_HEAD(list); diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index fbacde454718583555b38f07d0fe88ad89b712c3..586c9d6e904ab8aade50bea050a5a973bb2a273e 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -183,8 +183,8 @@ found_tag: return tag + tag_offset; } -void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags, - struct blk_mq_ctx *ctx, unsigned int tag) +void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, + unsigned int tag) { if (!blk_mq_tag_is_reserved(tags, tag)) { const int real_tag = tag - tags->nr_reserved_tags; diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 15bc74acb57eca1c56bf0564cb000c002ea08618..2b8321efb68206cce7a865e801bf5d980629a487 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -26,8 +26,8 @@ extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int r extern void blk_mq_free_tags(struct blk_mq_tags *tags); extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data); -extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags, - struct blk_mq_ctx *ctx, unsigned int tag); +extern void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, + unsigned int tag); extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags **tags, unsigned int depth, bool can_grow); diff --git a/block/blk-mq.c b/block/blk-mq.c index a12b1763508d3194853b6d33a057a62da62ea0e0..d92088dec6c359afd83642af362372b651b3d7e9 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -477,9 +477,9 @@ static void __blk_mq_free_request(struct request *rq) blk_pm_mark_last_busy(rq); rq->mq_hctx = NULL; if (rq->tag != -1) - blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag); + blk_mq_put_tag(hctx->tags, ctx, rq->tag); if (sched_tag != -1) - blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag); + blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag); blk_mq_sched_restart(hctx); blk_queue_exit(q); } @@ -735,7 +735,7 @@ static void blk_mq_requeue_work(struct work_struct *work) * merge. */ if (rq->rq_flags & RQF_DONTPREP) - blk_mq_request_bypass_insert(rq, false); + blk_mq_request_bypass_insert(rq, false, false); else blk_mq_sched_insert_request(rq, true, false, false); } @@ -1286,7 +1286,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, q->mq_ops->commit_rqs(hctx); spin_lock(&hctx->lock); - list_splice_init(list, &hctx->dispatch); + list_splice_tail_init(list, &hctx->dispatch); spin_unlock(&hctx->lock); /* @@ -1677,12 +1677,16 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, * Should only be used carefully, when the caller knows we want to * bypass a potential IO scheduler on the target device. */ -void blk_mq_request_bypass_insert(struct request *rq, bool run_queue) +void blk_mq_request_bypass_insert(struct request *rq, bool at_head, + bool run_queue) { struct blk_mq_hw_ctx *hctx = rq->mq_hctx; spin_lock(&hctx->lock); - list_add_tail(&rq->queuelist, &hctx->dispatch); + if (at_head) + list_add(&rq->queuelist, &hctx->dispatch); + else + list_add_tail(&rq->queuelist, &hctx->dispatch); spin_unlock(&hctx->lock); if (run_queue) @@ -1849,7 +1853,7 @@ insert: if (bypass_insert) return BLK_STS_RESOURCE; - blk_mq_request_bypass_insert(rq, run_queue); + blk_mq_request_bypass_insert(rq, false, run_queue); return BLK_STS_OK; } @@ -1876,7 +1880,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true); if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) - blk_mq_request_bypass_insert(rq, true); + blk_mq_request_bypass_insert(rq, false, true); else if (ret != BLK_STS_OK) blk_mq_end_request(rq, ret); @@ -1910,7 +1914,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, if (ret != BLK_STS_OK) { if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) { - blk_mq_request_bypass_insert(rq, + blk_mq_request_bypass_insert(rq, false, list_empty(list)); break; } @@ -3398,7 +3402,6 @@ static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb) } static unsigned long blk_mq_poll_nsecs(struct request_queue *q, - struct blk_mq_hw_ctx *hctx, struct request *rq) { unsigned long ret = 0; @@ -3431,7 +3434,6 @@ static unsigned long blk_mq_poll_nsecs(struct request_queue *q, } static bool blk_mq_poll_hybrid_sleep(struct request_queue *q, - struct blk_mq_hw_ctx *hctx, struct request *rq) { struct hrtimer_sleeper hs; @@ -3451,7 +3453,7 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q, if (q->poll_nsec > 0) nsecs = q->poll_nsec; else - nsecs = blk_mq_poll_nsecs(q, hctx, rq); + nsecs = blk_mq_poll_nsecs(q, rq); if (!nsecs) return false; @@ -3506,7 +3508,7 @@ static bool blk_mq_poll_hybrid(struct request_queue *q, return false; } - return blk_mq_poll_hybrid_sleep(q, hctx, rq); + return blk_mq_poll_hybrid_sleep(q, rq); } /** diff --git a/block/blk-mq.h b/block/blk-mq.h index eaaca8fc1c2874e77e9fce95964444ddaf280745..10bfdfb494faf4035f8be143f1cf6e47e32fe50c 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -66,7 +66,8 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, */ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, bool at_head); -void blk_mq_request_bypass_insert(struct request *rq, bool run_queue); +void blk_mq_request_bypass_insert(struct request *rq, bool at_head, + bool run_queue); void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, struct list_head *list); @@ -199,7 +200,7 @@ static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx) static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq) { - blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag); + blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag); rq->tag = -1; if (rq->rq_flags & RQF_MQ_INFLIGHT) { diff --git a/block/genhd.c b/block/genhd.c index ff6268970ddc069f84b3977069cca72ba10627c6..9c2e13ce0d19554a01eb72c03d5c0fcba7a67a5a 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -301,6 +301,42 @@ struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector) } EXPORT_SYMBOL_GPL(disk_map_sector_rcu); +/** + * disk_has_partitions + * @disk: gendisk of interest + * + * Walk through the partition table and check if valid partition exists. + * + * CONTEXT: + * Don't care. + * + * RETURNS: + * True if the gendisk has at least one valid non-zero size partition. + * Otherwise false. + */ +bool disk_has_partitions(struct gendisk *disk) +{ + struct disk_part_tbl *ptbl; + int i; + bool ret = false; + + rcu_read_lock(); + ptbl = rcu_dereference(disk->part_tbl); + + /* Iterate partitions skipping the whole device at index 0 */ + for (i = 1; i < ptbl->len; i++) { + if (rcu_dereference(ptbl->part[i])) { + ret = true; + break; + } + } + + rcu_read_unlock(); + + return ret; +} +EXPORT_SYMBOL_GPL(disk_has_partitions); + /* * Can be deleted altogether. Later. * diff --git a/crypto/hash_info.c b/crypto/hash_info.c index c754cb75dd1a96a5f52b39ed17adc6be45347345..a49ff96bde7784f9095ac7ecab8ff65124b7b17c 100644 --- a/crypto/hash_info.c +++ b/crypto/hash_info.c @@ -26,7 +26,7 @@ const char *const hash_algo_name[HASH_ALGO__LAST] = { [HASH_ALGO_TGR_128] = "tgr128", [HASH_ALGO_TGR_160] = "tgr160", [HASH_ALGO_TGR_192] = "tgr192", - [HASH_ALGO_SM3_256] = "sm3-256", + [HASH_ALGO_SM3_256] = "sm3", [HASH_ALGO_STREEBOG_256] = "streebog256", [HASH_ALGO_STREEBOG_512] = "streebog512", }; diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c index b5516b04ffc07b95a5869678f0886499fa9553bc..6e9ec6e3fe47d63ea9adf07e540ac2a223379a06 100644 --- a/drivers/acpi/acpi_watchdog.c +++ b/drivers/acpi/acpi_watchdog.c @@ -55,12 +55,14 @@ static bool acpi_watchdog_uses_rtc(const struct acpi_table_wdat *wdat) } #endif +static bool acpi_no_watchdog; + static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void) { const struct acpi_table_wdat *wdat = NULL; acpi_status status; - if (acpi_disabled) + if (acpi_disabled || acpi_no_watchdog) return NULL; status = acpi_get_table(ACPI_SIG_WDAT, 0, @@ -88,6 +90,14 @@ bool acpi_has_watchdog(void) } EXPORT_SYMBOL_GPL(acpi_has_watchdog); +/* ACPI watchdog can be disabled on boot command line */ +static int __init disable_acpi_watchdog(char *str) +{ + acpi_no_watchdog = true; + return 1; +} +__setup("acpi_no_watchdog", disable_acpi_watchdog); + void __init acpi_watchdog_init(void) { const struct acpi_wdat_entry *entries; @@ -126,12 +136,11 @@ void __init acpi_watchdog_init(void) gas = &entries[i].register_region; res.start = gas->address; + res.end = res.start + ACPI_ACCESS_BYTE_WIDTH(gas->access_width) - 1; if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { res.flags = IORESOURCE_MEM; - res.end = res.start + ALIGN(gas->access_width, 4) - 1; } else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { res.flags = IORESOURCE_IO; - res.end = res.start + gas->access_width - 1; } else { pr_warn("Unsupported address space: %u\n", gas->space_id); diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c index 8c83d8c620dc3deb10dc9a5a61b75bd6b2b0251b..789d5e920aaf7084058512e09467b554a9b5120d 100644 --- a/drivers/acpi/acpica/evevent.c +++ b/drivers/acpi/acpica/evevent.c @@ -265,4 +265,49 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event) handler) (acpi_gbl_fixed_event_handlers[event].context)); } +/******************************************************************************* + * + * FUNCTION: acpi_any_fixed_event_status_set + * + * PARAMETERS: None + * + * RETURN: TRUE or FALSE + * + * DESCRIPTION: Checks the PM status register for active fixed events + * + ******************************************************************************/ + +u32 acpi_any_fixed_event_status_set(void) +{ + acpi_status status; + u32 in_status; + u32 in_enable; + u32 i; + + status = acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &in_enable); + if (ACPI_FAILURE(status)) { + return (FALSE); + } + + status = acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &in_status); + if (ACPI_FAILURE(status)) { + return (FALSE); + } + + /* + * Check for all possible Fixed Events and dispatch those that are active + */ + for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) { + + /* Both the status and enable bits must be on for this event */ + + if ((in_status & acpi_gbl_fixed_event_info[i].status_bit_mask) && + (in_enable & acpi_gbl_fixed_event_info[i].enable_bit_mask)) { + return (TRUE); + } + } + + return (FALSE); +} + #endif /* !ACPI_REDUCED_HARDWARE */ diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 103acbbfcf9a513ff219404e0fe8894df1f563db..24c9642e8fc70d272f71b083f703eb1a1095e564 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -171,7 +171,7 @@ int ghes_estatus_pool_init(int num_ghes) * New allocation must be visible in all pgd before it can be found by * an NMI allocating from the pool. */ - vmalloc_sync_all(); + vmalloc_sync_mappings(); rc = gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1); if (rc) diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 152f7fc0b2003f1fcfc89105a7547df358e51ee9..e5f95922bc217e42342cce186bf0968c7e7f31dd 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -1005,6 +1005,13 @@ static bool acpi_s2idle_wake(void) if (irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq))) return true; + /* + * If the status bit of any enabled fixed event is set, the + * wakeup is regarded as valid. + */ + if (acpi_any_fixed_event_status_set()) + return true; + /* * If there are no EC events to process and at least one of the * other enabled GPEs is active, the wakeup is regarded as a diff --git a/drivers/android/binder.c b/drivers/android/binder.c index a6b2082c24f8f1031d906469793270db1eb833c7..e47c8a4c83db52ea97bf6a0b4283fa65de7ec8c8 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -5228,6 +5228,7 @@ static int binder_open(struct inode *nodp, struct file *filp) binder_dev = container_of(filp->private_data, struct binder_device, miscdev); } + refcount_inc(&binder_dev->ref); proc->context = &binder_dev->context; binder_alloc_init(&proc->alloc); @@ -5405,6 +5406,7 @@ static int binder_node_release(struct binder_node *node, int refs) static void binder_deferred_release(struct binder_proc *proc) { struct binder_context *context = proc->context; + struct binder_device *device; struct rb_node *n; int threads, nodes, incoming_refs, outgoing_refs, active_transactions; @@ -5421,6 +5423,12 @@ static void binder_deferred_release(struct binder_proc *proc) context->binder_context_mgr_node = NULL; } mutex_unlock(&context->context_mgr_node_lock); + device = container_of(proc->context, struct binder_device, context); + if (refcount_dec_and_test(&device->ref)) { + kfree(context->name); + kfree(device); + } + proc->context = NULL; binder_inner_proc_lock(proc); /* * Make sure proc stays alive after we @@ -6077,6 +6085,7 @@ static int __init init_binder_device(const char *name) binder_device->miscdev.minor = MISC_DYNAMIC_MINOR; binder_device->miscdev.name = name; + refcount_set(&binder_device->ref, 1); binder_device->context.binder_context_mgr_uid = INVALID_UID; binder_device->context.name = name; mutex_init(&binder_device->context.context_mgr_node_lock); diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h index ae991097d14d17068f1512281704b0f04300c7d0..283d3cb9c16e5ffca25e171d7eb18b26e059f61f 100644 --- a/drivers/android/binder_internal.h +++ b/drivers/android/binder_internal.h @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -33,6 +34,7 @@ struct binder_device { struct miscdevice miscdev; struct binder_context context; struct inode *binderfs_inode; + refcount_t ref; }; /** diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c index e2580e5316a2f5747f5db96d3cbbd907f7442822..f303106b3362b8621c026bad0d7ac517170e77cb 100644 --- a/drivers/android/binderfs.c +++ b/drivers/android/binderfs.c @@ -154,6 +154,7 @@ static int binderfs_binder_device_create(struct inode *ref_inode, if (!name) goto err; + refcount_set(&device->ref, 1); device->binderfs_inode = inode; device->context.binder_context_mgr_uid = INVALID_UID; device->context.name = name; @@ -257,8 +258,10 @@ static void binderfs_evict_inode(struct inode *inode) ida_free(&binderfs_minors, device->miscdev.minor); mutex_unlock(&binderfs_minors_mutex); - kfree(device->context.name); - kfree(device); + if (refcount_dec_and_test(&device->ref)) { + kfree(device->context.name); + kfree(device); + } } /** @@ -445,6 +448,7 @@ static int binderfs_binder_ctl_create(struct super_block *sb) inode->i_uid = info->root_uid; inode->i_gid = info->root_gid; + refcount_set(&device->ref, 1); device->binderfs_inode = inode; device->miscdev.minor = minor; diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c index 8db8c0fb5e2dac6851893e62d25b682177539a7f..7af74fb450a0d01c5aaf90e9ba1a2859041b6250 100644 --- a/drivers/atm/nicstar.c +++ b/drivers/atm/nicstar.c @@ -91,7 +91,7 @@ #ifdef GENERAL_DEBUG #define PRINTK(args...) printk(args) #else -#define PRINTK(args...) +#define PRINTK(args...) do {} while (0) #endif /* GENERAL_DEBUG */ #ifdef EXTRA_DEBUG diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig index b8313a04422db69495d547cd10a8ef39c88510a1..48efa7a047f3ec0e4149d24a836151928c717793 100644 --- a/drivers/auxdisplay/Kconfig +++ b/drivers/auxdisplay/Kconfig @@ -111,7 +111,7 @@ config CFAG12864B If unsure, say N. config CFAG12864B_RATE - int "Refresh rate (hertz)" + int "Refresh rate (hertz)" depends on CFAG12864B default "20" ---help--- @@ -329,7 +329,7 @@ config PANEL_LCD_PROTO config PANEL_LCD_PIN_E depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0" - int "Parallel port pin number & polarity connected to the LCD E signal (-17...17) " + int "Parallel port pin number & polarity connected to the LCD E signal (-17...17) " range -17 17 default 14 ---help--- @@ -344,7 +344,7 @@ config PANEL_LCD_PIN_E config PANEL_LCD_PIN_RS depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0" - int "Parallel port pin number & polarity connected to the LCD RS signal (-17...17) " + int "Parallel port pin number & polarity connected to the LCD RS signal (-17...17) " range -17 17 default 17 ---help--- @@ -359,7 +359,7 @@ config PANEL_LCD_PIN_RS config PANEL_LCD_PIN_RW depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0" - int "Parallel port pin number & polarity connected to the LCD RW signal (-17...17) " + int "Parallel port pin number & polarity connected to the LCD RW signal (-17...17) " range -17 17 default 16 ---help--- @@ -374,7 +374,7 @@ config PANEL_LCD_PIN_RW config PANEL_LCD_PIN_SCL depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0" - int "Parallel port pin number & polarity connected to the LCD SCL signal (-17...17) " + int "Parallel port pin number & polarity connected to the LCD SCL signal (-17...17) " range -17 17 default 1 ---help--- @@ -389,7 +389,7 @@ config PANEL_LCD_PIN_SCL config PANEL_LCD_PIN_SDA depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0" - int "Parallel port pin number & polarity connected to the LCD SDA signal (-17...17) " + int "Parallel port pin number & polarity connected to the LCD SDA signal (-17...17) " range -17 17 default 2 ---help--- @@ -404,12 +404,12 @@ config PANEL_LCD_PIN_SDA config PANEL_LCD_PIN_BL depends on PANEL_PROFILE="0" && PANEL_LCD="1" - int "Parallel port pin number & polarity connected to the LCD backlight signal (-17...17) " + int "Parallel port pin number & polarity connected to the LCD backlight signal (-17...17) " range -17 17 default 0 ---help--- This describes the number of the parallel port pin to which the LCD 'BL' signal - has been connected. It can be : + has been connected. It can be : 0 : no connection (eg: connected to ground) 1..17 : directly connected to any of these pins on the DB25 plug diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c index 874c259a88291880d368440437444dcf7e7b625d..c0da3820454b2ca944041d167e5f40857f0caeb9 100644 --- a/drivers/auxdisplay/charlcd.c +++ b/drivers/auxdisplay/charlcd.c @@ -88,7 +88,7 @@ struct charlcd_priv { int len; } esc_seq; - unsigned long long drvdata[0]; + unsigned long long drvdata[]; }; #define charlcd_to_priv(p) container_of(p, struct charlcd_priv, lcd) diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c index efb928e25aef35cac24fa7f4f240391277bf9e0b..1cce409ce5cacbc8a9a7ae271233c981b0ef2dcd 100644 --- a/drivers/auxdisplay/img-ascii-lcd.c +++ b/drivers/auxdisplay/img-ascii-lcd.c @@ -356,7 +356,6 @@ static int img_ascii_lcd_probe(struct platform_device *pdev) const struct of_device_id *match; const struct img_ascii_lcd_config *cfg; struct img_ascii_lcd_ctx *ctx; - struct resource *res; int err; match = of_match_device(img_ascii_lcd_matches, &pdev->dev); @@ -378,8 +377,7 @@ static int img_ascii_lcd_probe(struct platform_device *pdev) &ctx->offset)) return -EINVAL; } else { - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ctx->base = devm_ioremap_resource(&pdev->dev, res); + ctx->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ctx->base)) return PTR_ERR(ctx->base); } diff --git a/drivers/base/core.c b/drivers/base/core.c index 42a67245643234a8fb23614feb1d7a5f60f7c0c9..dbb0f9130f42d386ad815d37377782fd2695bb8a 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -718,6 +718,8 @@ static void __device_links_queue_sync_state(struct device *dev, { struct device_link *link; + if (!dev_has_sync_state(dev)) + return; if (dev->state_synced) return; @@ -745,25 +747,31 @@ static void __device_links_queue_sync_state(struct device *dev, /** * device_links_flush_sync_list - Call sync_state() on a list of devices * @list: List of devices to call sync_state() on + * @dont_lock_dev: Device for which lock is already held by the caller * * Calls sync_state() on all the devices that have been queued for it. This - * function is used in conjunction with __device_links_queue_sync_state(). + * function is used in conjunction with __device_links_queue_sync_state(). The + * @dont_lock_dev parameter is useful when this function is called from a + * context where a device lock is already held. */ -static void device_links_flush_sync_list(struct list_head *list) +static void device_links_flush_sync_list(struct list_head *list, + struct device *dont_lock_dev) { struct device *dev, *tmp; list_for_each_entry_safe(dev, tmp, list, links.defer_sync) { list_del_init(&dev->links.defer_sync); - device_lock(dev); + if (dev != dont_lock_dev) + device_lock(dev); if (dev->bus->sync_state) dev->bus->sync_state(dev); else if (dev->driver && dev->driver->sync_state) dev->driver->sync_state(dev); - device_unlock(dev); + if (dev != dont_lock_dev) + device_unlock(dev); put_device(dev); } @@ -801,7 +809,7 @@ void device_links_supplier_sync_state_resume(void) out: device_links_write_unlock(); - device_links_flush_sync_list(&sync_list); + device_links_flush_sync_list(&sync_list, NULL); } static int sync_state_resume_initcall(void) @@ -813,7 +821,7 @@ late_initcall(sync_state_resume_initcall); static void __device_links_supplier_defer_sync(struct device *sup) { - if (list_empty(&sup->links.defer_sync)) + if (list_empty(&sup->links.defer_sync) && dev_has_sync_state(sup)) list_add_tail(&sup->links.defer_sync, &deferred_sync); } @@ -865,6 +873,11 @@ void device_links_driver_bound(struct device *dev) driver_deferred_probe_add(link->consumer); } + if (defer_sync_state_count) + __device_links_supplier_defer_sync(dev); + else + __device_links_queue_sync_state(dev, &sync_list); + list_for_each_entry(link, &dev->links.suppliers, c_node) { if (!(link->flags & DL_FLAG_MANAGED)) continue; @@ -883,7 +896,7 @@ void device_links_driver_bound(struct device *dev) device_links_write_unlock(); - device_links_flush_sync_list(&sync_list); + device_links_flush_sync_list(&sync_list, dev); } static void device_link_drop_managed(struct device_link *link) diff --git a/drivers/base/memory.c b/drivers/base/memory.c index b9f474c1139302116c9639612ea7746e7427b4f1..4086718f6876ff4f3f1e9c897a14985b9bf21299 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -97,30 +97,13 @@ static ssize_t phys_index_show(struct device *dev, } /* - * Show whether the memory block is likely to be offlineable (or is already - * offline). Once offline, the memory block could be removed. The return - * value does, however, not indicate that there is a way to remove the - * memory block. + * Legacy interface that we cannot remove. Always indicate "removable" + * with CONFIG_MEMORY_HOTREMOVE - bad heuristic. */ static ssize_t removable_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct memory_block *mem = to_memory_block(dev); - unsigned long pfn; - int ret = 1, i; - - if (mem->state != MEM_ONLINE) - goto out; - - for (i = 0; i < sections_per_block; i++) { - if (!present_section_nr(mem->start_section_nr + i)) - continue; - pfn = section_nr_to_pfn(mem->start_section_nr + i); - ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION); - } - -out: - return sprintf(buf, "%d\n", ret); + return sprintf(buf, "%d\n", (int)IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)); } /* diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 7fa654f1288b80ffa73e11c49f7d340695d5de9e..b5ce7b0857953e32c90743c39b920a9f61a89f2a 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -363,10 +363,10 @@ static void setup_pdev_dma_masks(struct platform_device *pdev) { if (!pdev->dev.coherent_dma_mask) pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); - if (!pdev->dma_mask) - pdev->dma_mask = DMA_BIT_MASK(32); - if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &pdev->dma_mask; + if (!pdev->dev.dma_mask) { + pdev->platform_dma_mask = DMA_BIT_MASK(32); + pdev->dev.dma_mask = &pdev->platform_dma_mask; + } }; /** @@ -662,20 +662,8 @@ struct platform_device *platform_device_register_full( pdev->dev.of_node_reused = pdevinfo->of_node_reused; if (pdevinfo->dma_mask) { - /* - * This memory isn't freed when the device is put, - * I don't have a nice idea for that though. Conceptually - * dma_mask in struct device should not be a pointer. - * See http://thread.gmane.org/gmane.linux.kernel.pci/9081 - */ - pdev->dev.dma_mask = - kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL); - if (!pdev->dev.dma_mask) - goto err; - - kmemleak_ignore(pdev->dev.dma_mask); - - *pdev->dev.dma_mask = pdevinfo->dma_mask; + pdev->platform_dma_mask = pdevinfo->dma_mask; + pdev->dev.dma_mask = &pdev->platform_dma_mask; pdev->dev.coherent_dma_mask = pdevinfo->dma_mask; } @@ -700,7 +688,6 @@ struct platform_device *platform_device_register_full( if (ret) { err: ACPI_COMPANION_SET(&pdev->dev, NULL); - kfree(pdev->dev.dma_mask); platform_device_put(pdev); return ERR_PTR(ret); } diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c index 0b081dee1e95cce1b71b76131fce56d1d1b6f3b3..de8d3543e8fe347be2ffc2f3afdedd4ef84fd35c 100644 --- a/drivers/base/swnode.c +++ b/drivers/base/swnode.c @@ -608,6 +608,13 @@ static void software_node_release(struct kobject *kobj) { struct swnode *swnode = kobj_to_swnode(kobj); + if (swnode->parent) { + ida_simple_remove(&swnode->parent->child_ids, swnode->id); + list_del(&swnode->entry); + } else { + ida_simple_remove(&swnode_root_ids, swnode->id); + } + if (swnode->allocated) { property_entries_free(swnode->node->properties); kfree(swnode->node); @@ -773,13 +780,6 @@ void fwnode_remove_software_node(struct fwnode_handle *fwnode) if (!swnode) return; - if (swnode->parent) { - ida_simple_remove(&swnode->parent->child_ids, swnode->id); - list_del(&swnode->entry); - } else { - ida_simple_remove(&swnode_root_ids, swnode->id); - } - kobject_put(&swnode->kobj); } EXPORT_SYMBOL_GPL(fwnode_remove_software_node); diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index cd3612e4e2e14317ca506e24f00c4f9a6a2f9a73..8ef65c0856407cfbf780e370bf24a5141d8938f5 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -853,14 +853,17 @@ static void reset_fdc_info(int mode) /* selects the fdc and drive, and enables the fdc's input/dma. */ static void set_fdc(int drive) { + unsigned int new_fdc = fdc; + if (drive >= 0 && drive < N_DRIVE) { - fdc = FDC(drive); + new_fdc = FDC(drive); current_drive = drive; } - if (fdc != 1 && fdc != 0) { + if (new_fdc >= N_FDC) { pr_info("bad fdc value\n"); return; } + fdc = new_fdc; set_dor(fdc, ~0, 8); #if N_FDC > 1 set_dor(1 - fdc, ~8, 0); diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h index bc837862b7679ac88b0e8ef15d33e61f388114f5..62b660821dbcc19b390712b4088fc7141a4b27c2 100644 --- a/drivers/block/null_blk.h +++ b/drivers/block/null_blk.h @@ -14,9 +14,6 @@ #include struct nullb_cmd { - struct list_head list; - struct llist_node ll_list; - struct __call_single_data csd; struct request *rq; struct bio *bio; unsigned int tag; diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c index 16510795e37720de6b4c233bbd9c0f4bcf798e05..133060431dbdb2f2c058876c75072571c9141c41 100644 --- a/drivers/block/null_blk_main.c +++ b/drivers/block/null_blk_main.c @@ -1518,8 +1518,6 @@ static int setup_commands(struct nullb_queue *nq) for (i = 0; i < nq->queue_depth; i++) { cmd = &nq->cmds[i]; - INIT_LIST_HEAD(&cmd->list); - cmd->ll_list.next = NULL; cmd->tag = -1U; } diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index 117cfc8cd05a4c509876d137bf7fb3e2bd272382..cda5cf917e9afaee700470b00e26d8aaeb7fffef 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c @@ -276,7 +276,7 @@ static const struct block_device_operations pcd_bdops = { .release = pcd_block_release, .ioctl = pcd_block_ioctl, #ifdef CONFIG_COMPAT - .ioctl = blkdev_compat_ptr_ioctl, + .compat_ioctl = blkdev_compat_ptr_ioctl, #endif .check_events = pcd_block_check_events, }; diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 54158766334b20cb7eeb0a5ead976843caa3df40..0736248999b0da7f81d81a1c9525483759444634 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -245,13 +245,20 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num); if (err) { virtqueue_kick(vblk->vqs[qid].vq); - blk_mq_stop_hw_queue(hctx); + /* Don't stop the queue if -ENOMEM: we may have failed to + * bounce the buffer due to global resource outage. + */ + if (err == -ENOSPC) + blk_mq_stop_hw_queue(hctx); spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); - /* Out of mem doesn't actually happen, since we fall back - * to direct descriptors */ - if (err == -ENOMEM || err == -ENOSPC) + switch (err) { + case -ENOSPC: return BLK_STS_DEV_RESOURCE; - return BLK_STS_IOERR; + case -ENOMEM: + return BLK_STS_RESOURCE; + default: + return BLK_STS_IOERR; + } } if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq)) diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index e2ad6bba2281e79067066b5a5f05bc2f7818b3cc..9df516a56bb2f80ee38f2fefecf5aac599e4fa11 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -213,6 +213,7 @@ struct blkfront_info struct blk_mq_tag_set tag_set; struct blkfront_ring_info *rinfo; unsigned int nr_rings; + unsigned int rinfo_size; /* Save uncomplete reqs and bios for migration. */ struct list_head requests; struct bio_list bio_list; @@ -259,6 +260,18 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo); static void blkfront_gather_backend_features(struct blkfront_info *info); static int negotiate_mq(struct blkfront_info *info); +#define for_each_rinfo(info, ptr, idx) \ + for ((ptr) = (info)->rinfo, (idx) = 0; \ + (idx) < (info)->nr_rings; \ + (idx)++, (ptr) = (void *)(ptr) + (info)->rinfo_size) + +static inline struct blkfront_ring_info * +get_rinfo(const struct blkfront_info *info, unsigned int i) +{ + BUG_ON(i >= info->nr_rings); + return (void *)info->rinfo + i * info->rinfo_size; +} + static int get_id_from_freelist(struct blkfront_ring_info *rinfo) { unsigned long free = rinfo->shadow_free; @@ -883,8 +896,7 @@ static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx, struct blkfront_info *info = hctx->queue->queuedata; struct blkfront_ring_info *rinfo = NULL; - BUG_ON(info->nr_rings <= qid); - rinfo = &info->rinfo[qid]; + rinfo = get_rinfo(info, qid); blk_mq_start_request(qd->rq); spin_lock_irqsave(&rinfo->ring_lock, flags); if (RING_FULL(&rinfo->ring)) @@ -1181,6 +1193,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, static void xlvbd_release_gendisk(struct blkfront_info *info) { unsigned int minor, nr_minors, i; + struct blkfront_ring_info *rinfo; if (info->rq == NULL) return; @@ -1188,9 +1201,7 @@ static void xlvbd_release_gendisk(struct blkfront_info *info) /* No more blkif_request(). */ blk_mq_stop_hw_queues(info->rq); - for (i = 0; i < info->nr_rings; i++) { - struct blkfront_ring_info *rinfo = &info->rinfo[i]; - + for_each_rinfo(info, rinfo, i) { /* No more gnttab callback work. */ gnttab_cancel_free_callback(&rinfo->callback); @@ -1339,6 +1350,7 @@ free_shadow: static void blkif_free(struct blkfront_info *info, int suspend) { unsigned int i; + struct blkfront_ring_info *rinfo; /* Prevent new requests being issued until we fix things up. */ info->connected = suspend ? @@ -1347,8 +1359,8 @@ static void blkif_free(struct blkfront_info *info, int suspend) if (info->rq) blk_mq_stop_hw_queues(info->rq); - for (i = 0; i < info->nr_rings; i++) - blkif_free_ring(&info->rinfo[i]); + for_each_rinfo(info, rinfo, i) + blkif_free_ring(rinfo); kvfree(info->rinfo); info->rinfo = NULL; @@ -1775,6 +1787,7 @@ static int talk_to_blkback(struct xenbus_device *dev, int err; unsigned int i, max_page_order; unsigned int ring_page_order; + struct blkfront_ring_info *rinfo; if (!info) return -ENODEV; @@ -1788,9 +1801,7 @@ static int talk_to_blkback(struct xenbus_device *dev, if (err) goto destroy_blkring; - for (i = 0; i < info->nr_rings; i++) { - struct blkfront_ring_info *rinfo = &info->rinfo[i]; - + for_each_rinfo(info, rinfo, i) { /* Create shared ring, alloc event channel. */ err = setup_blkring(dev, rinfo); if (err) @@ -1815,7 +1826,7 @@ again: /* We already got the number of queues/rings in _probe */ if (info->nr_rings == 1) { - err = write_per_ring_nodes(xbt, &info->rinfo[0], dev->nodename); + err = write_per_ring_nodes(xbt, info->rinfo, dev->nodename); if (err) goto destroy_blkring; } else { @@ -1837,10 +1848,10 @@ again: goto abort_transaction; } - for (i = 0; i < info->nr_rings; i++) { + for_each_rinfo(info, rinfo, i) { memset(path, 0, pathsize); snprintf(path, pathsize, "%s/queue-%u", dev->nodename, i); - err = write_per_ring_nodes(xbt, &info->rinfo[i], path); + err = write_per_ring_nodes(xbt, rinfo, path); if (err) { kfree(path); goto destroy_blkring; @@ -1868,9 +1879,8 @@ again: goto destroy_blkring; } - for (i = 0; i < info->nr_rings; i++) { + for_each_rinfo(info, rinfo, i) { unsigned int j; - struct blkfront_ring_info *rinfo = &info->rinfo[i]; for (j = 0; j < BLK_RING_SIZE(info); j++) rinfo->shadow[j].req.u.rw.id = j + 1; @@ -1900,6 +1910,7 @@ static int negotiate_mq(struct blkfront_info *info) { unsigned int backend_max_queues; unsigned int i; + struct blkfront_ring_info *rinfo; BUG_ON(info->nr_rings); @@ -1911,20 +1922,16 @@ static int negotiate_mq(struct blkfront_info *info) if (!info->nr_rings) info->nr_rings = 1; - info->rinfo = kvcalloc(info->nr_rings, - struct_size(info->rinfo, shadow, - BLK_RING_SIZE(info)), - GFP_KERNEL); + info->rinfo_size = struct_size(info->rinfo, shadow, + BLK_RING_SIZE(info)); + info->rinfo = kvcalloc(info->nr_rings, info->rinfo_size, GFP_KERNEL); if (!info->rinfo) { xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure"); info->nr_rings = 0; return -ENOMEM; } - for (i = 0; i < info->nr_rings; i++) { - struct blkfront_ring_info *rinfo; - - rinfo = &info->rinfo[i]; + for_each_rinfo(info, rinfo, i) { INIT_LIST_HEAD(&rinfo->indirect_pages); INIT_LIST_HEAD(&rinfo->grants); rinfo->dev_info = info; @@ -2017,6 +2024,7 @@ static int blkif_recover(struct blkfront_info *info) int rc; struct bio *bio; unsigned int segs; + struct blkfront_ring_info *rinfo; blkfront_gather_backend_features(info); /* Reset limits changed by blk_mq_update_nr_hw_queues(). */ @@ -2024,9 +2032,7 @@ static int blkif_recover(struct blkfront_info *info) segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG); - for (r_index = 0; r_index < info->nr_rings; r_index++) { - struct blkfront_ring_info *rinfo = &info->rinfo[r_index]; - + for_each_rinfo(info, rinfo, r_index) { rc = blkfront_setup_indirect(rinfo); if (rc) return rc; @@ -2036,10 +2042,7 @@ static int blkif_recover(struct blkfront_info *info) /* Now safe for us to use the shared ring */ info->connected = BLKIF_STATE_CONNECTED; - for (r_index = 0; r_index < info->nr_rings; r_index++) { - struct blkfront_ring_info *rinfo; - - rinfo = &info->rinfo[r_index]; + for_each_rinfo(info, rinfo, r_index) { /* Kick any other new requests queued since we resumed */ kick_pending_request_queues(rinfo); } @@ -2072,13 +2075,13 @@ static int blkfront_resume(struct xenbus_device *dev) struct blkfront_info *info = dev_get_drvdata(&dev->dev); int err = 0; unsigned int i, j; + struct blkfront_ring_info *rinfo; dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename); bio_list_init(&info->bio_list); INIT_LIST_HEAD(&info->requests); - for (i = 0; i < info->nr_rings; i++) { - struct blkfront_ring_info *rinfo = &info->rinfo[i]; + for_each_rinfo(info, rinfo, i) { struct bio_list merge_bio; struct blk_shadow *shadow = rinfo->shadow; @@ -2337,6 +2340,7 @@ static void blkfront_connect(struct blkfront_info *info) unsigned int binfo; char *envp[] = { "RESIZE=1", NULL }; int err, i; + struct blkfront_ring_info *rinfo; switch (info->connected) { case BLKIF_STATE_CONNECTED: @@ -2394,8 +2398,8 @@ static void blkfront_connect(struct blkfront_info *info) "physical-sector-size", sector_size); blkfront_gather_backend_features(info); - for (i = 0; i < info->nr_rings; i++) { - err = blkfront_setup_indirect(&info->rinfo[i]); + for_each_rinfo(info, rinfo, i) { + err = blkfront_setup_indirect(rinfo); if (err) { xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s", info->xbdev->otherend); @@ -2416,8 +2420,8 @@ static void blkfront_connect(struct blkfront_info *info) /* Kick pending requests. */ info->connected = BLKIF_STATE_CONNECTED; - for (i = 0; i < info->nr_rings; i++) - kick_pending_request_queues(&info->rinfo[i]); + for_each_rinfo(info, rinfo, i) + kick_pending_request_queues(rinfo); device_add_disk(&info->xbdev->dev, info->gd, NULL); @@ -2652,9 +2656,9 @@ static void purge_persistent_grants(struct blkfront_info *info) { unsigned int i; unsigned long flags; + struct blkfront_ring_info *rinfo; - for (i = 0; i < info->nr_rings; i++) { - struct blkfront_ring_info *rinfo = &info->rinfo[i]; + for_each_rinfo(info, rinfo, i) { struct grant *gnt_list_entry, *tmp; spin_lock_irqsave(&rinfo->ring_lock, flags); diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c index be79d6c6a4e458bc330467af1c0e150125526242..1bb00a959c67f28c15b0928cb70a4f44237acffc 100644 --- a/drivers/bus/sunxi-rsb.c +++ b/drivers/bus/sunxi-rsb.c @@ -345,7 +345,7 @@ static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr, if (ret) goto unlock; - *buf = readl(rsb->regs + RSB_DATA); + *buf = readl(rsb->regs + RSB_DATA) & GENMASK(len * 8 - 1, 0); unlock: mutex_unlock(&rsb->lock); diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index f702c85c81b657ec5e0dfe0712ab32e309028fa0..440019655fbbe6bba864d0a8438543d0356b2e17 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -1266,6 +1266,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK("gpu", 0x50000000, 0x14, -1, -1, 0x00010201, 0xffffffff, 0), SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -1, 0x40000000 , 0xffffffff, SYSC_MODULE_QUIRK_SGX), + SYSC_QUIRK("lcdc", 0, 0, 0x54, -1, 0x4f201000, 0xffffffff, + SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050, 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -1, 0x4ea2080d, 0xffffffff, @@ -1294,7 +1296,6 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK("gpu", 0, 0xfe00, 0xfe10, -1, 0x40000000 , 0xffffffff, 0), SYSC_QUIRK("hsi", 0, 0, 0x10, 0x14, 0x50043101, 0xffffffff, 0), SYSC_QUIRK("iss", 0, 0, 0x10, -1, 0x40000101, 0xffffffff, 0), - SYSC_QUIRK("lcdc", 0, 0, 0x54, -1, 0x4f201000, 0xffffffff, 0), SYSC_QUIRK("mcasp", 0, 0, 0x4, -1, 0x44306302, 0xffffffff, 0), SYSC_QUIRK("mcasp", 0, 0, 0x4, -1, 0x44307b02, 0xffffffff, 0), SYSC_QUIRK("mcbsp", 0, -1, 0x8c, -1, 0, 0, 0), @@ -1400,7 +1401,7 @@ static void sysc_init_revision_quirks(struct sysc *ddata) } /* 1-wire needs module's internal clocks enabled for reset */ -static void sysc_clk_enable_quirk_hdq1w(struct sysc *ddata) +static void sysc_pre_reset_quirk_hdq1w(struct sysc *ddata) { int offset = 0x0c; /* HDQ_CTRL_STATUS */ u16 val; @@ -1488,7 +1489,7 @@ static void sysc_init_module_quirks(struct sysc *ddata) return; if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_HDQ1W) { - ddata->clk_enable_quirk = sysc_clk_enable_quirk_hdq1w; + ddata->clk_disable_quirk = sysc_pre_reset_quirk_hdq1w; return; } diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index 886b2638c730308a6dce2061b18a06b04845fa18..c51292c2a131e0133872258e11470b6c5c58b73a 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c @@ -519,7 +519,7 @@ static const struct block_device_operations gdrom_bdops = { .check_events = gdrom_bdops_check_events, .ioctl = gdrom_bdops_ioctl, #ifdef CONFIG_COMPAT - .ioctl = blkdev_compat_ptr_ioctl, + .compat_ioctl = blkdev_compat_ptr_ioctl, #endif }; diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c index c78127ccbc0dd019b34ec784f439179c3d91d466..638c693e17adab63bc46a5b6979902865c34e8a2 100644 --- a/drivers/char/ipmi/ipmi_si_platform.c +++ b/drivers/char/ipmi/ipmi_si_platform.c @@ -194,7 +194,7 @@ static int platform_ipmi_probe(struct platform_device *pdev) else io.slave_addr = slave_addr; - io.irq = platform_get_irq(pdev, 0); + io.irq = platform_get_irq_optional(pdev, 0); if (io.irq > 0) io.irq_setup = ipmi_std_irq_setup; else @@ -378,7 +378,7 @@ static int acpi_ipmi_probe(struct platform_device *pdev) io.irq = tmp; io.irq_setup = acpi_gpe_irq_setup; } else { - int irq = platform_get_irq(pdev, 0); + int irq = platform_get_irq_optional(pdev, 0); if (irq > 0) { io.irq = irq; diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile index 5a0d99d4fec0b1870e9f7b63e5e9e4444aa6950c..9567e5197f740f3c3b3ffeadc0495c928cd94399 100644 --- a/drivers/char/tpm/Makefile +++ b/drivers/char/tpm/Makefile @@ -21,9 +21,11 @@ tpm-$(CONFIG_EFI) += eventlog/efi.o tpm-$(CONFIG_OF) += eventlog/of.o obj-$(CONFIG_TCG_TIS_CORE) += tpm_tis_core.o obj-$(CONFIG_TCG_TIS) += tpm_tis.o -obj-$(CONFIG_TCG_TIS_SPI) += tpm_tis_spi_mod.o -tpm_tis_spi_mod-y := tpm_tis_spi.o -tpm_tis_spi_mod-$(CONFIG_TCG_TIS_SPI_CR50) += tpm_tis_spi_cr50.o + +obj-$(CONFIG_TCG_TIS_SPI) += tpm_tis_spi.o +tpm_tis_spi-y := tpm_tis_spi_main.o +tpm_tis_spi-$(CONFIG_TCG_TIS_SPI_CR50) += tpm_tis_spi_cr50.o + obj-$(CONFIG_TCG_TIS_I2C_ATMEL) += tpm_i2c_atmel.o obj-$(CONFIG_TCG_TIS_I2C_INFINEON) += tpm_i2c_infineon.o obj-$(CONFIG_TCG_TIS_I2C_NUVOTON) += tpm_i2c_nuvoton.o diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index 13696deceae8e7fb73862ea99d731a58fe647f67..760329598b9960855f42119ed01098acca312057 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c @@ -525,6 +525,8 @@ static int tpm2_init_bank_info(struct tpm_chip *chip, u32 bank_index) return 0; } + bank->crypto_id = HASH_ALGO__LAST; + return tpm2_pcr_read(chip, 0, &digest, &bank->digest_size); } diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi_main.c similarity index 100% rename from drivers/char/tpm/tpm_tis_spi.c rename to drivers/char/tpm/tpm_tis_spi_main.c diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index f0f2b599fd7e90a5faacc1499f28aa058ef79d0d..95adf6c6db3db641d5be25d8573abd4232f309d4 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -4713,7 +4713,7 @@ EXPORT_SYMBOL(of_clk_get_by_name); * * Returns: The number of clocks that are possible parents of this node */ -unsigned int of_clk_get_parent_count(struct device_node *np) +unsigned int of_clk_get_parent_count(const struct device_node *np) { int count; @@ -4725,7 +4725,7 @@ unsigned int of_clk_get_parent_count(struct device_node *np) } EXPORT_SYMBOL_GPL(of_clk_get_parent_count); -const char *of_clk_get_parent_name(struct device_node *np, int index) +const char *of_clk_get_parent_name(const struct device_node *np, int index) { struct of_phandle_args clkspec; struct property *prop; diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c index f6c120cca0d43d304bed3d2016d19f5d08e5445f..cf192907b7dc869c50e5ea4dbb45d74ef2dc495d 100644 --- a/drivers/clk/imx/clk-imx8mp.c +++ b/drivers/clk/imx/clk-imx8mp.c @@ -560,7 +560,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev) hws[IMX8MP_CLK_MEDIA_AXI] = imx8m_clk_hw_composite("media_axi", imx8mp_media_axi_sels, ccm_base + 0x8a00); hws[IMX8MP_CLK_MEDIA_APB] = imx8m_clk_hw_composite("media_apb", imx8mp_media_apb_sels, ccm_base + 0x8a80); hws[IMX8MP_CLK_HDMI_APB] = imx8m_clk_hw_composite("hdmi_apb", imx8mp_media_apb_sels, ccm_base + 0x8b00); - hws[IMX8MP_CLK_HDMI_AXI] = imx8m_clk_hw_composite("hdmi_axi", imx8mp_media_apb_sels, ccm_base + 0x8b80); + hws[IMX8MP_CLK_HDMI_AXI] = imx8m_clk_hw_composite("hdmi_axi", imx8mp_media_axi_sels, ccm_base + 0x8b80); hws[IMX8MP_CLK_GPU_AXI] = imx8m_clk_hw_composite("gpu_axi", imx8mp_gpu_axi_sels, ccm_base + 0x8c00); hws[IMX8MP_CLK_GPU_AHB] = imx8m_clk_hw_composite("gpu_ahb", imx8mp_gpu_ahb_sels, ccm_base + 0x8c80); hws[IMX8MP_CLK_NOC] = imx8m_clk_hw_composite_critical("noc", imx8mp_noc_sels, ccm_base + 0x8d00); @@ -686,7 +686,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev) hws[IMX8MP_CLK_CAN1_ROOT] = imx_clk_hw_gate2("can1_root_clk", "can1", ccm_base + 0x4350, 0); hws[IMX8MP_CLK_CAN2_ROOT] = imx_clk_hw_gate2("can2_root_clk", "can2", ccm_base + 0x4360, 0); hws[IMX8MP_CLK_SDMA1_ROOT] = imx_clk_hw_gate4("sdma1_root_clk", "ipg_root", ccm_base + 0x43a0, 0); - hws[IMX8MP_CLK_ENET_QOS_ROOT] = imx_clk_hw_gate4("enet_qos_root_clk", "enet_axi", ccm_base + 0x43b0, 0); + hws[IMX8MP_CLK_ENET_QOS_ROOT] = imx_clk_hw_gate4("enet_qos_root_clk", "sim_enet_root_clk", ccm_base + 0x43b0, 0); hws[IMX8MP_CLK_SIM_ENET_ROOT] = imx_clk_hw_gate4("sim_enet_root_clk", "enet_axi", ccm_base + 0x4400, 0); hws[IMX8MP_CLK_GPU2D_ROOT] = imx_clk_hw_gate4("gpu2d_root_clk", "gpu2d_div", ccm_base + 0x4450, 0); hws[IMX8MP_CLK_GPU3D_ROOT] = imx_clk_hw_gate4("gpu3d_root_clk", "gpu3d_core_div", ccm_base + 0x4460, 0); diff --git a/drivers/clk/imx/clk-scu.c b/drivers/clk/imx/clk-scu.c index fbef740704d0c10a9972a60206cb0e7b0ce1dc55..b8b2072742a5608ff6fe7abf0bfbf9a34fdca4f0 100644 --- a/drivers/clk/imx/clk-scu.c +++ b/drivers/clk/imx/clk-scu.c @@ -43,12 +43,12 @@ struct imx_sc_msg_req_set_clock_rate { __le32 rate; __le16 resource; u8 clk; -} __packed; +} __packed __aligned(4); struct req_get_clock_rate { __le16 resource; u8 clk; -} __packed; +} __packed __aligned(4); struct resp_get_clock_rate { __le32 rate; @@ -84,7 +84,7 @@ struct imx_sc_msg_get_clock_parent { struct req_get_clock_parent { __le16 resource; u8 clk; - } __packed req; + } __packed __aligned(4) req; struct resp_get_clock_parent { u8 parent; } resp; @@ -121,7 +121,7 @@ struct imx_sc_msg_req_clock_enable { u8 clk; u8 enable; u8 autog; -} __packed; +} __packed __aligned(4); static inline struct clk_scu *to_clk_scu(struct clk_hw *hw) { diff --git a/drivers/clk/qcom/dispcc-sc7180.c b/drivers/clk/qcom/dispcc-sc7180.c index dd7af41e47eb96b7180410927852782e6873b850..0a5d395bce93584f85bca6a0fb390147eda26aee 100644 --- a/drivers/clk/qcom/dispcc-sc7180.c +++ b/drivers/clk/qcom/dispcc-sc7180.c @@ -592,24 +592,6 @@ static struct clk_branch disp_cc_mdss_rot_clk = { }, }; -static struct clk_branch disp_cc_mdss_rscc_ahb_clk = { - .halt_reg = 0x400c, - .halt_check = BRANCH_HALT, - .clkr = { - .enable_reg = 0x400c, - .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data){ - .name = "disp_cc_mdss_rscc_ahb_clk", - .parent_data = &(const struct clk_parent_data){ - .hw = &disp_cc_mdss_ahb_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, - .ops = &clk_branch2_ops, - }, - }, -}; - static struct clk_branch disp_cc_mdss_rscc_vsync_clk = { .halt_reg = 0x4008, .halt_check = BRANCH_HALT, @@ -687,7 +669,6 @@ static struct clk_regmap *disp_cc_sc7180_clocks[] = { [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr, [DISP_CC_MDSS_ROT_CLK] = &disp_cc_mdss_rot_clk.clkr, [DISP_CC_MDSS_ROT_CLK_SRC] = &disp_cc_mdss_rot_clk_src.clkr, - [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr, [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr, [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr, [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr, diff --git a/drivers/clk/qcom/videocc-sc7180.c b/drivers/clk/qcom/videocc-sc7180.c index c363c3cc544e2cd796d3678ad364b069b5368c81..276e5ecd48403f41914792981dd3c015e7a4c9fa 100644 --- a/drivers/clk/qcom/videocc-sc7180.c +++ b/drivers/clk/qcom/videocc-sc7180.c @@ -97,7 +97,7 @@ static struct clk_branch video_cc_vcodec0_axi_clk = { static struct clk_branch video_cc_vcodec0_core_clk = { .halt_reg = 0x890, - .halt_check = BRANCH_HALT, + .halt_check = BRANCH_HALT_VOTED, .clkr = { .enable_reg = 0x890, .enable_mask = BIT(0), diff --git a/drivers/clk/ti/clk-43xx.c b/drivers/clk/ti/clk-43xx.c index af3e7805769e259405838de531133ae86b514eeb..e5538d577ce57a61f27843af95fbcf030ca7317b 100644 --- a/drivers/clk/ti/clk-43xx.c +++ b/drivers/clk/ti/clk-43xx.c @@ -78,7 +78,7 @@ static const struct omap_clkctrl_reg_data am4_gfx_l3_clkctrl_regs[] __initconst }; static const struct omap_clkctrl_reg_data am4_l4_rtc_clkctrl_regs[] __initconst = { - { AM4_L4_RTC_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clk_32768_ck" }, + { AM4_L4_RTC_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clkdiv32k_ick" }, { 0 }, }; diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c index 9d808d595ca85e22ce8b0f9471c53e91a6a99446..eb0ba7818eb042c2d83f7809e910d2f7eeb95bba 100644 --- a/drivers/clocksource/hyperv_timer.c +++ b/drivers/clocksource/hyperv_timer.c @@ -343,7 +343,8 @@ static u64 notrace read_hv_clock_tsc_cs(struct clocksource *arg) static u64 read_hv_sched_clock_tsc(void) { - return read_hv_clock_tsc() - hv_sched_clock_offset; + return (read_hv_clock_tsc() - hv_sched_clock_offset) * + (NSEC_PER_SEC / HV_CLOCK_HZ); } static void suspend_hv_clock_tsc(struct clocksource *arg) @@ -398,7 +399,8 @@ static u64 notrace read_hv_clock_msr_cs(struct clocksource *arg) static u64 read_hv_sched_clock_msr(void) { - return read_hv_clock_msr() - hv_sched_clock_offset; + return (read_hv_clock_msr() - hv_sched_clock_offset) * + (NSEC_PER_SEC / HV_CLOCK_HZ); } static struct clocksource hyperv_cs_msr = { diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index cbe6c94bf158648ac0240037b5aac0fac8f38422..808874bccf4ace34791e520b8d5bde8854d1dbb4 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1076,9 +1076,17 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy) pol = policy->last_policy; } else if (def_gov) { pol = cpufreq_parse_policy(def_gov->name); - } else { - return -ENODATA; + /* + * In case the default governor is neiter "performance" + * nor "powersave", fall back to the initial policy + * value set by the driver. + */ + if (pol == CPUFREQ_POLICY_UNKNOWN) + pol = policy->policy; } + if (pol != CPUFREQ_POLICY_PERFORMANCE && + pol != CPUFREQ_POLICY_POWERSAVE) + return -ENODATA; } return cpufreq_set_policy(policy, gov, pol); diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index cceee8bc3c2f745a02ab7b648d29458cbeaf2283..7dcf2093e5316a229dd9396feb73011a3f4f5fe3 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -738,7 +738,6 @@ struct devfreq *devfreq_add_device(struct device *dev, { struct devfreq *devfreq; struct devfreq_governor *governor; - static atomic_t devfreq_no = ATOMIC_INIT(-1); int err = 0; if (!dev || !profile || !governor_name) { @@ -800,8 +799,7 @@ struct devfreq *devfreq_add_device(struct device *dev, devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev); atomic_set(&devfreq->suspend_count, 0); - dev_set_name(&devfreq->dev, "devfreq%d", - atomic_inc_return(&devfreq_no)); + dev_set_name(&devfreq->dev, "%s", dev_name(dev)); err = device_register(&devfreq->dev); if (err) { mutex_unlock(&devfreq->lock); diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index f4ace9af2191bf954be9224f157b23b904822190..ccc9eda1bc282851a6b4d2777297de271b03b029 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -108,6 +108,7 @@ static int dma_buf_release(struct inode *inode, struct file *file) dma_resv_fini(dmabuf->resv); module_put(dmabuf->owner); + kfree(dmabuf->name); kfree(dmabuf); return 0; } diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index e51d836afcc7765d37ad5b084404aeebd13a8d69..1092d4ce723e436e7f3971bafcbeb084e505b12f 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -1947,8 +1947,6 @@ static void dma_tc_handle(struct coh901318_chan *cohc) return; } - spin_lock(&cohc->lock); - /* * When we reach this point, at least one queue item * should have been moved over from cohc->queue to @@ -1969,8 +1967,6 @@ static void dma_tc_handle(struct coh901318_chan *cohc) if (coh901318_queue_start(cohc) == NULL) cohc->busy = 0; - spin_unlock(&cohc->lock); - /* * This tasklet will remove items from cohc->active * and thus terminates them. diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index c3b1283b6d31a4c948a3060432e8d0cb0d0c350b..17909fd1820ffc7fc4c3bf74aaab22dec682234c 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -1151,7 +1151,7 @@ int dma_async_device_register(struct dma_device *device) } if (!device->device_release) - dev_warn(device->dev, + dev_dbg(device->dev, "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n"); kref_init(&device->ref); diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c index 1d7347825b95a8a21d0fec1bda397a6093509ffc..989b7a25ca6144d34df05e6ab92fda3089812096 100644 --- a/drivers/dma/idxd/cdev.c +++ b/drivers/dma/idxd/cdev.c @@ -81,9 +81,9 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp) dev = &idxd->pdev->dev; idxd_cdev = &wq->idxd_cdev; - dev_dbg(dev, "%s called\n", __func__); + dev_dbg(dev, "%s called: %d\n", __func__, idxd_wq_refcount(wq)); - if (idxd_wq_refcount(wq) > 1 && wq_dedicated(wq)) + if (idxd_wq_refcount(wq) > 0 && wq_dedicated(wq)) return -EBUSY; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); @@ -204,6 +204,7 @@ static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq) minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL); if (minor < 0) { rc = minor; + kfree(dev); goto ida_err; } @@ -212,7 +213,6 @@ static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq) rc = device_register(dev); if (rc < 0) { dev_err(&idxd->pdev->dev, "device register failed\n"); - put_device(dev); goto dev_reg_err; } idxd_cdev->minor = minor; @@ -221,8 +221,8 @@ static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq) dev_reg_err: ida_simple_remove(&cdev_ctx->minor_ida, MINOR(dev->devt)); + put_device(dev); ida_err: - kfree(dev); idxd_cdev->dev = NULL; return rc; } diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c index 6d907fe150aa4d508337b6f86d38da237f456318..6ca6e520a2fa4943e2b895f1816408de77d6c6e2 100644 --- a/drivers/dma/idxd/sysfs.c +++ b/drivers/dma/idxd/sysfs.c @@ -124,6 +124,7 @@ static int idxd_config_bus_probe(struct device *dev) rc = idxd_device_config(idxd); if (rc < 0) { spin_unlock_irqrestore(&idxd->dev_lock, flags); + module_put(THIS_MODULE); dev_warn(dev, "Device config failed: %d\n", rc); return rc; } @@ -132,6 +133,7 @@ static int idxd_config_bus_probe(struct device *dev) rc = idxd_device_enable(idxd); if (rc < 0) { spin_unlock_irqrestore(&idxd->dev_lock, flags); + module_put(THIS_MODULE); dev_warn(dev, "Device enable failed: %d\n", rc); return rc; } @@ -142,6 +144,7 @@ static int idxd_config_bus_probe(struct device *dev) rc = idxd_register_dma_device(idxd); if (rc < 0) { spin_unlock_irqrestore(&idxd->dev_lock, flags); + module_put(THIS_MODULE); dev_dbg(dev, "Failed to register dmaengine device\n"); return rc; } @@ -516,7 +519,7 @@ static ssize_t group_tokens_reserved_store(struct device *dev, if (val > idxd->max_tokens) return -EINVAL; - if (val > idxd->nr_tokens) + if (val > idxd->nr_tokens + group->tokens_reserved) return -EINVAL; group->tokens_reserved = val; @@ -901,6 +904,20 @@ static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr, return sprintf(buf, "%u\n", wq->size); } +static int total_claimed_wq_size(struct idxd_device *idxd) +{ + int i; + int wq_size = 0; + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + wq_size += wq->size; + } + + return wq_size; +} + static ssize_t wq_size_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -920,7 +937,7 @@ static ssize_t wq_size_store(struct device *dev, if (wq->state != IDXD_WQ_DISABLED) return -EPERM; - if (size > idxd->max_wq_size) + if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size) return -EINVAL; wq->size = size; @@ -999,12 +1016,14 @@ static ssize_t wq_type_store(struct device *dev, return -EPERM; old_type = wq->type; - if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL])) + if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE])) + wq->type = IDXD_WQT_NONE; + else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL])) wq->type = IDXD_WQT_KERNEL; else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER])) wq->type = IDXD_WQT_USER; else - wq->type = IDXD_WQT_NONE; + return -EINVAL; /* If we are changing queue type, clear the name */ if (wq->type != old_type) diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 066b21a3223261f26a73a71266d3b0a3e67bb51d..4d4477df4ede751a9b4277833d851dc48fe8641f 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -1331,13 +1331,14 @@ static void sdma_free_chan_resources(struct dma_chan *chan) sdma_channel_synchronize(chan); - if (sdmac->event_id0) + if (sdmac->event_id0 >= 0) sdma_event_disable(sdmac, sdmac->event_id0); if (sdmac->event_id1) sdma_event_disable(sdmac, sdmac->event_id1); sdmac->event_id0 = 0; sdmac->event_id1 = 0; + sdmac->context_loaded = false; sdma_set_channel_priority(sdmac, 0); @@ -1631,7 +1632,7 @@ static int sdma_config(struct dma_chan *chan, memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg)); /* Set ENBLn earlier to make sure dma request triggered after that */ - if (sdmac->event_id0) { + if (sdmac->event_id0 >= 0) { if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events) return -EINVAL; sdma_event_enable(sdmac, sdmac->event_id0); diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 3a45079d11ecff40db8ea5b1824c55894a7429e7..4a750e29bfb533953584bc101f6e5eec99cc0239 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -281,7 +281,7 @@ static struct tegra_dma_desc *tegra_dma_desc_get( /* Do not allocate if desc are waiting for ack */ list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) { - if (async_tx_test_ack(&dma_desc->txd)) { + if (async_tx_test_ack(&dma_desc->txd) && !dma_desc->cb_count) { list_del(&dma_desc->node); spin_unlock_irqrestore(&tdc->lock, flags); dma_desc->txd.flags = 0; @@ -756,10 +756,6 @@ static int tegra_dma_terminate_all(struct dma_chan *dc) bool was_busy; spin_lock_irqsave(&tdc->lock, flags); - if (list_empty(&tdc->pending_sg_req)) { - spin_unlock_irqrestore(&tdc->lock, flags); - return 0; - } if (!tdc->busy) goto skip_dma_stop; diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c index c1511298ece2a319a616f06d94aa95b5548ac56d..4d7561a1b3e3bbd91fad6993a1825777ad88a88e 100644 --- a/drivers/dma/ti/k3-udma-glue.c +++ b/drivers/dma/ti/k3-udma-glue.c @@ -564,12 +564,12 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn, if (IS_ERR(flow->udma_rflow)) { ret = PTR_ERR(flow->udma_rflow); dev_err(dev, "UDMAX rflow get err %d\n", ret); - goto err; + return ret; } if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) { - xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow); - return -ENODEV; + ret = -ENODEV; + goto err_rflow_put; } /* request and cfg rings */ @@ -578,7 +578,7 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn, if (!flow->ringrx) { ret = -ENODEV; dev_err(dev, "Failed to get RX ring\n"); - goto err; + goto err_rflow_put; } flow->ringrxfdq = k3_ringacc_request_ring(rx_chn->common.ringacc, @@ -586,19 +586,19 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn, if (!flow->ringrxfdq) { ret = -ENODEV; dev_err(dev, "Failed to get RXFDQ ring\n"); - goto err; + goto err_ringrx_free; } ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg); if (ret) { dev_err(dev, "Failed to cfg ringrx %d\n", ret); - goto err; + goto err_ringrxfdq_free; } ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg); if (ret) { dev_err(dev, "Failed to cfg ringrxfdq %d\n", ret); - goto err; + goto err_ringrxfdq_free; } if (rx_chn->remote) { @@ -648,7 +648,7 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn, if (ret) { dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id, ret); - goto err; + goto err_ringrxfdq_free; } rx_chn->flows_ready++; @@ -656,8 +656,17 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn, flow->udma_rflow_id, rx_chn->flows_ready); return 0; -err: - k3_udma_glue_release_rx_flow(rx_chn, flow_idx); + +err_ringrxfdq_free: + k3_ringacc_ring_free(flow->ringrxfdq); + +err_ringrx_free: + k3_ringacc_ring_free(flow->ringrx); + +err_rflow_put: + xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow); + flow->udma_rflow = NULL; + return ret; } diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index ea79c2df28e01b628a8ac08b5af33c7d50a931d1..0536866a58cee788374f63cad40259b122541474 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -5,6 +5,7 @@ */ #include +#include #include #include #include @@ -96,6 +97,24 @@ struct udma_match_data { u32 level_start_idx[]; }; +struct udma_hwdesc { + size_t cppi5_desc_size; + void *cppi5_desc_vaddr; + dma_addr_t cppi5_desc_paddr; + + /* TR descriptor internal pointers */ + void *tr_req_base; + struct cppi5_tr_resp_t *tr_resp_base; +}; + +struct udma_rx_flush { + struct udma_hwdesc hwdescs[2]; + + size_t buffer_size; + void *buffer_vaddr; + dma_addr_t buffer_paddr; +}; + struct udma_dev { struct dma_device ddev; struct device *dev; @@ -112,6 +131,8 @@ struct udma_dev { struct list_head desc_to_purge; spinlock_t lock; + struct udma_rx_flush rx_flush; + int tchan_cnt; int echan_cnt; int rchan_cnt; @@ -130,16 +151,6 @@ struct udma_dev { u32 psil_base; }; -struct udma_hwdesc { - size_t cppi5_desc_size; - void *cppi5_desc_vaddr; - dma_addr_t cppi5_desc_paddr; - - /* TR descriptor internal pointers */ - void *tr_req_base; - struct cppi5_tr_resp_t *tr_resp_base; -}; - struct udma_desc { struct virt_dma_desc vd; @@ -169,7 +180,7 @@ enum udma_chan_state { struct udma_tx_drain { struct delayed_work work; - unsigned long jiffie; + ktime_t tstamp; u32 residue; }; @@ -502,7 +513,7 @@ static bool udma_is_chan_paused(struct udma_chan *uc) { u32 val, pause_mask; - switch (uc->desc->dir) { + switch (uc->config.dir) { case DMA_DEV_TO_MEM: val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG); @@ -551,12 +562,17 @@ static void udma_sync_for_device(struct udma_chan *uc, int idx) } } +static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc) +{ + return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr; +} + static int udma_push_to_ring(struct udma_chan *uc, int idx) { struct udma_desc *d = uc->desc; - struct k3_ring *ring = NULL; - int ret = -EINVAL; + dma_addr_t paddr; + int ret; switch (uc->config.dir) { case DMA_DEV_TO_MEM: @@ -567,21 +583,37 @@ static int udma_push_to_ring(struct udma_chan *uc, int idx) ring = uc->tchan->t_ring; break; default: - break; + return -EINVAL; } - if (ring) { - dma_addr_t desc_addr = udma_curr_cppi5_desc_paddr(d, idx); + /* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */ + if (idx == -1) { + paddr = udma_get_rx_flush_hwdesc_paddr(uc); + } else { + paddr = udma_curr_cppi5_desc_paddr(d, idx); wmb(); /* Ensure that writes are not moved over this point */ udma_sync_for_device(uc, idx); - ret = k3_ringacc_ring_push(ring, &desc_addr); - uc->in_ring_cnt++; } + ret = k3_ringacc_ring_push(ring, &paddr); + if (!ret) + uc->in_ring_cnt++; + return ret; } +static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr) +{ + if (uc->config.dir != DMA_DEV_TO_MEM) + return false; + + if (addr == udma_get_rx_flush_hwdesc_paddr(uc)) + return true; + + return false; +} + static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr) { struct k3_ring *ring = NULL; @@ -610,6 +642,10 @@ static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr) if (cppi5_desc_is_tdcm(*addr)) return ret; + /* Check for flush descriptor */ + if (udma_desc_is_rx_flush(uc, *addr)) + return -ENOENT; + d = udma_udma_desc_from_paddr(uc, *addr); if (d) @@ -890,6 +926,9 @@ static int udma_stop(struct udma_chan *uc) switch (uc->config.dir) { case DMA_DEV_TO_MEM: + if (!uc->cyclic && !uc->desc) + udma_push_to_ring(uc, -1); + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, UDMA_PEER_RT_EN_ENABLE | UDMA_PEER_RT_EN_TEARDOWN); @@ -946,9 +985,10 @@ static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d) peer_bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG); bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG); + /* Transfer is incomplete, store current residue and time stamp */ if (peer_bcnt < bcnt) { uc->tx_drain.residue = bcnt - peer_bcnt; - uc->tx_drain.jiffie = jiffies; + uc->tx_drain.tstamp = ktime_get(); return false; } @@ -961,35 +1001,59 @@ static void udma_check_tx_completion(struct work_struct *work) tx_drain.work.work); bool desc_done = true; u32 residue_diff; - unsigned long jiffie_diff, delay; + ktime_t time_diff; + unsigned long delay; + + while (1) { + if (uc->desc) { + /* Get previous residue and time stamp */ + residue_diff = uc->tx_drain.residue; + time_diff = uc->tx_drain.tstamp; + /* + * Get current residue and time stamp or see if + * transfer is complete + */ + desc_done = udma_is_desc_really_done(uc, uc->desc); + } - if (uc->desc) { - residue_diff = uc->tx_drain.residue; - jiffie_diff = uc->tx_drain.jiffie; - desc_done = udma_is_desc_really_done(uc, uc->desc); - } - - if (!desc_done) { - jiffie_diff = uc->tx_drain.jiffie - jiffie_diff; - residue_diff -= uc->tx_drain.residue; - if (residue_diff) { - /* Try to guess when we should check next time */ - residue_diff /= jiffie_diff; - delay = uc->tx_drain.residue / residue_diff / 3; - if (jiffies_to_msecs(delay) < 5) - delay = 0; - } else { - /* No progress, check again in 1 second */ - delay = HZ; + if (!desc_done) { + /* + * Find the time delta and residue delta w.r.t + * previous poll + */ + time_diff = ktime_sub(uc->tx_drain.tstamp, + time_diff) + 1; + residue_diff -= uc->tx_drain.residue; + if (residue_diff) { + /* + * Try to guess when we should check + * next time by calculating rate at + * which data is being drained at the + * peer device + */ + delay = (time_diff / residue_diff) * + uc->tx_drain.residue; + } else { + /* No progress, check again in 1 second */ + schedule_delayed_work(&uc->tx_drain.work, HZ); + break; + } + + usleep_range(ktime_to_us(delay), + ktime_to_us(delay) + 10); + continue; } - schedule_delayed_work(&uc->tx_drain.work, delay); - } else if (uc->desc) { - struct udma_desc *d = uc->desc; + if (uc->desc) { + struct udma_desc *d = uc->desc; - uc->bcnt += d->residue; - udma_start(uc); - vchan_cookie_complete(&d->vd); + uc->bcnt += d->residue; + udma_start(uc); + vchan_cookie_complete(&d->vd); + break; + } + + break; } } @@ -1033,29 +1097,27 @@ static irqreturn_t udma_ring_irq_handler(int irq, void *data) goto out; } - if (uc->cyclic) { - /* push the descriptor back to the ring */ - if (d == uc->desc) { + if (d == uc->desc) { + /* active descriptor */ + if (uc->cyclic) { udma_cyclic_packet_elapsed(uc); vchan_cyclic_callback(&d->vd); - } - } else { - bool desc_done = false; - - if (d == uc->desc) { - desc_done = udma_is_desc_really_done(uc, d); - - if (desc_done) { + } else { + if (udma_is_desc_really_done(uc, d)) { uc->bcnt += d->residue; udma_start(uc); + vchan_cookie_complete(&d->vd); } else { schedule_delayed_work(&uc->tx_drain.work, 0); } } - - if (desc_done) - vchan_cookie_complete(&d->vd); + } else { + /* + * terminated descriptor, mark the descriptor as + * completed to update the channel's cookie marker + */ + dma_cookie_complete(&d->vd.tx); } } out: @@ -1965,36 +2027,81 @@ static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc, return d; } +/** + * udma_get_tr_counters - calculate TR counters for a given length + * @len: Length of the trasnfer + * @align_to: Preferred alignment + * @tr0_cnt0: First TR icnt0 + * @tr0_cnt1: First TR icnt1 + * @tr1_cnt0: Second (if used) TR icnt0 + * + * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated + * For len >= SZ_64K two TRs are used in a simple way: + * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1) + * Second TR: the remaining length (tr1_cnt0) + * + * Returns the number of TRs the length needs (1 or 2) + * -EINVAL if the length can not be supported + */ +static int udma_get_tr_counters(size_t len, unsigned long align_to, + u16 *tr0_cnt0, u16 *tr0_cnt1, u16 *tr1_cnt0) +{ + if (len < SZ_64K) { + *tr0_cnt0 = len; + *tr0_cnt1 = 1; + + return 1; + } + + if (align_to > 3) + align_to = 3; + +realign: + *tr0_cnt0 = SZ_64K - BIT(align_to); + if (len / *tr0_cnt0 >= SZ_64K) { + if (align_to) { + align_to--; + goto realign; + } + return -EINVAL; + } + + *tr0_cnt1 = len / *tr0_cnt0; + *tr1_cnt0 = len % *tr0_cnt0; + + return 2; +} + static struct udma_desc * udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl, unsigned int sglen, enum dma_transfer_direction dir, unsigned long tx_flags, void *context) { - enum dma_slave_buswidth dev_width; struct scatterlist *sgent; struct udma_desc *d; - size_t tr_size; struct cppi5_tr_type1_t *tr_req = NULL; + u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; unsigned int i; - u32 burst; + size_t tr_size; + int num_tr = 0; + int tr_idx = 0; - if (dir == DMA_DEV_TO_MEM) { - dev_width = uc->cfg.src_addr_width; - burst = uc->cfg.src_maxburst; - } else if (dir == DMA_MEM_TO_DEV) { - dev_width = uc->cfg.dst_addr_width; - burst = uc->cfg.dst_maxburst; - } else { - dev_err(uc->ud->dev, "%s: bad direction?\n", __func__); + if (!is_slave_direction(dir)) { + dev_err(uc->ud->dev, "Only slave cyclic is supported\n"); return NULL; } - if (!burst) - burst = 1; + /* estimate the number of TRs we will need */ + for_each_sg(sgl, sgent, sglen, i) { + if (sg_dma_len(sgent) < SZ_64K) + num_tr++; + else + num_tr += 2; + } /* Now allocate and setup the descriptor. */ tr_size = sizeof(struct cppi5_tr_type1_t); - d = udma_alloc_tr_desc(uc, tr_size, sglen, dir); + d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir); if (!d) return NULL; @@ -2002,19 +2109,46 @@ udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl, tr_req = d->hwdesc[0].tr_req_base; for_each_sg(sgl, sgent, sglen, i) { - d->residue += sg_dma_len(sgent); + dma_addr_t sg_addr = sg_dma_address(sgent); + + num_tr = udma_get_tr_counters(sg_dma_len(sgent), __ffs(sg_addr), + &tr0_cnt0, &tr0_cnt1, &tr1_cnt0); + if (num_tr < 0) { + dev_err(uc->ud->dev, "size %u is not supported\n", + sg_dma_len(sgent)); + udma_free_hwdesc(uc, d); + kfree(d); + return NULL; + } cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0); cppi5_tr_csf_set(&tr_req[i].flags, CPPI5_TR_CSF_SUPR_EVT); - tr_req[i].addr = sg_dma_address(sgent); - tr_req[i].icnt0 = burst * dev_width; - tr_req[i].dim1 = burst * dev_width; - tr_req[i].icnt1 = sg_dma_len(sgent) / tr_req[i].icnt0; + tr_req[tr_idx].addr = sg_addr; + tr_req[tr_idx].icnt0 = tr0_cnt0; + tr_req[tr_idx].icnt1 = tr0_cnt1; + tr_req[tr_idx].dim1 = tr0_cnt0; + tr_idx++; + + if (num_tr == 2) { + cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, + false, false, + CPPI5_TR_EVENT_SIZE_COMPLETION, 0); + cppi5_tr_csf_set(&tr_req[tr_idx].flags, + CPPI5_TR_CSF_SUPR_EVT); + + tr_req[tr_idx].addr = sg_addr + tr0_cnt1 * tr0_cnt0; + tr_req[tr_idx].icnt0 = tr1_cnt0; + tr_req[tr_idx].icnt1 = 1; + tr_req[tr_idx].dim1 = tr1_cnt0; + tr_idx++; + } + + d->residue += sg_dma_len(sgent); } - cppi5_tr_csf_set(&tr_req[i - 1].flags, CPPI5_TR_CSF_EOP); + cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, CPPI5_TR_CSF_EOP); return d; } @@ -2319,47 +2453,66 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction dir, unsigned long flags) { - enum dma_slave_buswidth dev_width; struct udma_desc *d; - size_t tr_size; + size_t tr_size, period_addr; struct cppi5_tr_type1_t *tr_req; - unsigned int i; unsigned int periods = buf_len / period_len; - u32 burst; + u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; + unsigned int i; + int num_tr; - if (dir == DMA_DEV_TO_MEM) { - dev_width = uc->cfg.src_addr_width; - burst = uc->cfg.src_maxburst; - } else if (dir == DMA_MEM_TO_DEV) { - dev_width = uc->cfg.dst_addr_width; - burst = uc->cfg.dst_maxburst; - } else { - dev_err(uc->ud->dev, "%s: bad direction?\n", __func__); + if (!is_slave_direction(dir)) { + dev_err(uc->ud->dev, "Only slave cyclic is supported\n"); return NULL; } - if (!burst) - burst = 1; + num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0, + &tr0_cnt1, &tr1_cnt0); + if (num_tr < 0) { + dev_err(uc->ud->dev, "size %zu is not supported\n", + period_len); + return NULL; + } /* Now allocate and setup the descriptor. */ tr_size = sizeof(struct cppi5_tr_type1_t); - d = udma_alloc_tr_desc(uc, tr_size, periods, dir); + d = udma_alloc_tr_desc(uc, tr_size, periods * num_tr, dir); if (!d) return NULL; tr_req = d->hwdesc[0].tr_req_base; + period_addr = buf_addr; for (i = 0; i < periods; i++) { - cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false, - CPPI5_TR_EVENT_SIZE_COMPLETION, 0); + int tr_idx = i * num_tr; - tr_req[i].addr = buf_addr + period_len * i; - tr_req[i].icnt0 = dev_width; - tr_req[i].icnt1 = period_len / dev_width; - tr_req[i].dim1 = dev_width; + cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false, + false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0); + + tr_req[tr_idx].addr = period_addr; + tr_req[tr_idx].icnt0 = tr0_cnt0; + tr_req[tr_idx].icnt1 = tr0_cnt1; + tr_req[tr_idx].dim1 = tr0_cnt0; + + if (num_tr == 2) { + cppi5_tr_csf_set(&tr_req[tr_idx].flags, + CPPI5_TR_CSF_SUPR_EVT); + tr_idx++; + + cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, + false, false, + CPPI5_TR_EVENT_SIZE_COMPLETION, 0); + + tr_req[tr_idx].addr = period_addr + tr0_cnt1 * tr0_cnt0; + tr_req[tr_idx].icnt0 = tr1_cnt0; + tr_req[tr_idx].icnt1 = 1; + tr_req[tr_idx].dim1 = tr1_cnt0; + } if (!(flags & DMA_PREP_INTERRUPT)) - cppi5_tr_csf_set(&tr_req[i].flags, + cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT); + + period_addr += period_len; } return d; @@ -2517,29 +2670,12 @@ udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, return NULL; } - if (len < SZ_64K) { - num_tr = 1; - tr0_cnt0 = len; - tr0_cnt1 = 1; - } else { - unsigned long align_to = __ffs(src | dest); - - if (align_to > 3) - align_to = 3; - /* - * Keep simple: tr0: SZ_64K-alignment blocks, - * tr1: the remaining - */ - num_tr = 2; - tr0_cnt0 = (SZ_64K - BIT(align_to)); - if (len / tr0_cnt0 >= SZ_64K) { - dev_err(uc->ud->dev, "size %zu is not supported\n", - len); - return NULL; - } - - tr0_cnt1 = len / tr0_cnt0; - tr1_cnt0 = len % tr0_cnt0; + num_tr = udma_get_tr_counters(len, __ffs(src | dest), &tr0_cnt0, + &tr0_cnt1, &tr1_cnt0); + if (num_tr < 0) { + dev_err(uc->ud->dev, "size %zu is not supported\n", + len); + return NULL; } d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM); @@ -2631,6 +2767,9 @@ static enum dma_status udma_tx_status(struct dma_chan *chan, ret = dma_cookie_status(chan, cookie, txstate); + if (!udma_is_chan_running(uc)) + ret = DMA_COMPLETE; + if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc)) ret = DMA_PAUSED; @@ -2697,11 +2836,8 @@ static int udma_pause(struct dma_chan *chan) { struct udma_chan *uc = to_udma_chan(chan); - if (!uc->desc) - return -EINVAL; - /* pause the channel */ - switch (uc->desc->dir) { + switch (uc->config.dir) { case DMA_DEV_TO_MEM: udma_rchanrt_update_bits(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, @@ -2730,11 +2866,8 @@ static int udma_resume(struct dma_chan *chan) { struct udma_chan *uc = to_udma_chan(chan); - if (!uc->desc) - return -EINVAL; - /* resume the channel */ - switch (uc->desc->dir) { + switch (uc->config.dir) { case DMA_DEV_TO_MEM: udma_rchanrt_update_bits(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, @@ -3248,6 +3381,98 @@ static int udma_setup_resources(struct udma_dev *ud) return ch_count; } +static int udma_setup_rx_flush(struct udma_dev *ud) +{ + struct udma_rx_flush *rx_flush = &ud->rx_flush; + struct cppi5_desc_hdr_t *tr_desc; + struct cppi5_tr_type1_t *tr_req; + struct cppi5_host_desc_t *desc; + struct device *dev = ud->dev; + struct udma_hwdesc *hwdesc; + size_t tr_size; + + /* Allocate 1K buffer for discarded data on RX channel teardown */ + rx_flush->buffer_size = SZ_1K; + rx_flush->buffer_vaddr = devm_kzalloc(dev, rx_flush->buffer_size, + GFP_KERNEL); + if (!rx_flush->buffer_vaddr) + return -ENOMEM; + + rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr, + rx_flush->buffer_size, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, rx_flush->buffer_paddr)) + return -ENOMEM; + + /* Set up descriptor to be used for TR mode */ + hwdesc = &rx_flush->hwdescs[0]; + tr_size = sizeof(struct cppi5_tr_type1_t); + hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1); + hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size, + ud->desc_align); + + hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size, + GFP_KERNEL); + if (!hwdesc->cppi5_desc_vaddr) + return -ENOMEM; + + hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr, + hwdesc->cppi5_desc_size, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr)) + return -ENOMEM; + + /* Start of the TR req records */ + hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size; + /* Start address of the TR response array */ + hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size; + + tr_desc = hwdesc->cppi5_desc_vaddr; + cppi5_trdesc_init(tr_desc, 1, tr_size, 0, 0); + cppi5_desc_set_pktids(tr_desc, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT); + cppi5_desc_set_retpolicy(tr_desc, 0, 0); + + tr_req = hwdesc->tr_req_base; + cppi5_tr_init(&tr_req->flags, CPPI5_TR_TYPE1, false, false, + CPPI5_TR_EVENT_SIZE_COMPLETION, 0); + cppi5_tr_csf_set(&tr_req->flags, CPPI5_TR_CSF_SUPR_EVT); + + tr_req->addr = rx_flush->buffer_paddr; + tr_req->icnt0 = rx_flush->buffer_size; + tr_req->icnt1 = 1; + + /* Set up descriptor to be used for packet mode */ + hwdesc = &rx_flush->hwdescs[1]; + hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) + + CPPI5_INFO0_HDESC_EPIB_SIZE + + CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE, + ud->desc_align); + + hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size, + GFP_KERNEL); + if (!hwdesc->cppi5_desc_vaddr) + return -ENOMEM; + + hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr, + hwdesc->cppi5_desc_size, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr)) + return -ENOMEM; + + desc = hwdesc->cppi5_desc_vaddr; + cppi5_hdesc_init(desc, 0, 0); + cppi5_desc_set_pktids(&desc->hdr, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT); + cppi5_desc_set_retpolicy(&desc->hdr, 0, 0); + + cppi5_hdesc_attach_buf(desc, + rx_flush->buffer_paddr, rx_flush->buffer_size, + rx_flush->buffer_paddr, rx_flush->buffer_size); + + dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr, + hwdesc->cppi5_desc_size, DMA_TO_DEVICE); + return 0; +} + #define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ @@ -3361,6 +3586,10 @@ static int udma_probe(struct platform_device *pdev) if (ud->desc_align < dma_get_cache_alignment()) ud->desc_align = dma_get_cache_alignment(); + ret = udma_setup_rx_flush(ud); + if (ret) + return ret; + for (i = 0; i < ud->tchan_cnt; i++) { struct udma_tchan *tchan = &ud->tchans[i]; diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c index 2d263382d797f0440d1e07408445ef4bf76c70fc..880ffd83371871032513e15a94bc5719d57ef1c8 100644 --- a/drivers/edac/synopsys_edac.c +++ b/drivers/edac/synopsys_edac.c @@ -479,20 +479,14 @@ static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p) pinf = &p->ceinfo; if (!priv->p_data->quirks) { snprintf(priv->message, SYNPS_EDAC_MSG_SIZE, - "DDR ECC error type:%s Row %d Bank %d Col %d ", - "CE", pinf->row, pinf->bank, pinf->col); - snprintf(priv->message, SYNPS_EDAC_MSG_SIZE, - "Bit Position: %d Data: 0x%08x\n", + "DDR ECC error type:%s Row %d Bank %d Col %d Bit Position: %d Data: 0x%08x", + "CE", pinf->row, pinf->bank, pinf->col, pinf->bitpos, pinf->data); } else { snprintf(priv->message, SYNPS_EDAC_MSG_SIZE, - "DDR ECC error type:%s Row %d Bank %d Col %d ", - "CE", pinf->row, pinf->bank, pinf->col); - snprintf(priv->message, SYNPS_EDAC_MSG_SIZE, - "BankGroup Number %d Block Number %d ", - pinf->bankgrpnr, pinf->blknr); - snprintf(priv->message, SYNPS_EDAC_MSG_SIZE, - "Bit Position: %d Data: 0x%08x\n", + "DDR ECC error type:%s Row %d Bank %d Col %d BankGroup Number %d Block Number %d Bit Position: %d Data: 0x%08x", + "CE", pinf->row, pinf->bank, pinf->col, + pinf->bankgrpnr, pinf->blknr, pinf->bitpos, pinf->data); } @@ -509,10 +503,8 @@ static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p) "UE", pinf->row, pinf->bank, pinf->col); } else { snprintf(priv->message, SYNPS_EDAC_MSG_SIZE, - "DDR ECC error type :%s Row %d Bank %d Col %d ", - "UE", pinf->row, pinf->bank, pinf->col); - snprintf(priv->message, SYNPS_EDAC_MSG_SIZE, - "BankGroup Number %d Block Number %d", + "DDR ECC error type :%s Row %d Bank %d Col %d BankGroup Number %d Block Number %d", + "UE", pinf->row, pinf->bank, pinf->col, pinf->bankgrpnr, pinf->blknr); } diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 621220ab3d0e348fdff0eda1af30f499ed37d1ce..21ea99f651134be47161831266f00cb43fb85f56 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -552,7 +552,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz, seed = early_memremap(efi.rng_seed, sizeof(*seed)); if (seed != NULL) { - size = seed->size; + size = READ_ONCE(seed->size); early_memunmap(seed, sizeof(*seed)); } else { pr_err("Could not map UEFI random seed!\n"); @@ -562,7 +562,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz, sizeof(*seed) + size); if (seed != NULL) { pr_notice("seeding entropy pool\n"); - add_bootloader_randomness(seed->bits, seed->size); + add_bootloader_randomness(seed->bits, size); early_memunmap(seed, sizeof(*seed) + size); } else { pr_err("Could not map UEFI random seed!\n"); diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c index 7576450c8254b8cc8c1cada05a1c9ac61922ea1f..aff3dfb4d7ba643219254820282a4f4d903edab7 100644 --- a/drivers/firmware/efi/efivars.c +++ b/drivers/firmware/efi/efivars.c @@ -83,13 +83,16 @@ static ssize_t efivar_attr_read(struct efivar_entry *entry, char *buf) { struct efi_variable *var = &entry->var; + unsigned long size = sizeof(var->Data); char *str = buf; + int ret; if (!entry || !buf) return -EINVAL; - var->DataSize = 1024; - if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data)) + ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data); + var->DataSize = size; + if (ret) return -EIO; if (var->Attributes & EFI_VARIABLE_NON_VOLATILE) @@ -116,13 +119,16 @@ static ssize_t efivar_size_read(struct efivar_entry *entry, char *buf) { struct efi_variable *var = &entry->var; + unsigned long size = sizeof(var->Data); char *str = buf; + int ret; if (!entry || !buf) return -EINVAL; - var->DataSize = 1024; - if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data)) + ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data); + var->DataSize = size; + if (ret) return -EIO; str += sprintf(str, "0x%lx\n", var->DataSize); @@ -133,12 +139,15 @@ static ssize_t efivar_data_read(struct efivar_entry *entry, char *buf) { struct efi_variable *var = &entry->var; + unsigned long size = sizeof(var->Data); + int ret; if (!entry || !buf) return -EINVAL; - var->DataSize = 1024; - if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data)) + ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data); + var->DataSize = size; + if (ret) return -EIO; memcpy(buf, var->Data, var->DataSize); @@ -199,6 +208,9 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count) u8 *data; int err; + if (!entry || !buf) + return -EINVAL; + if (in_compat_syscall()) { struct compat_efi_variable *compat; @@ -250,14 +262,16 @@ efivar_show_raw(struct efivar_entry *entry, char *buf) { struct efi_variable *var = &entry->var; struct compat_efi_variable *compat; + unsigned long datasize = sizeof(var->Data); size_t size; + int ret; if (!entry || !buf) return 0; - var->DataSize = 1024; - if (efivar_entry_get(entry, &entry->var.Attributes, - &entry->var.DataSize, entry->var.Data)) + ret = efivar_entry_get(entry, &var->Attributes, &datasize, var->Data); + var->DataSize = datasize; + if (ret) return -EIO; if (in_compat_syscall()) { diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c index 03b43b7a6d1d573d9c9fc0f9421a53b6440baf8a..f71eaa5bf52d411c750aa359aa13f4a00d88130c 100644 --- a/drivers/firmware/imx/imx-scu.c +++ b/drivers/firmware/imx/imx-scu.c @@ -29,6 +29,7 @@ struct imx_sc_chan { struct mbox_client cl; struct mbox_chan *ch; int idx; + struct completion tx_done; }; struct imx_sc_ipc { @@ -100,6 +101,14 @@ int imx_scu_get_handle(struct imx_sc_ipc **ipc) } EXPORT_SYMBOL(imx_scu_get_handle); +/* Callback called when the word of a message is ack-ed, eg read by SCU */ +static void imx_scu_tx_done(struct mbox_client *cl, void *mssg, int r) +{ + struct imx_sc_chan *sc_chan = container_of(cl, struct imx_sc_chan, cl); + + complete(&sc_chan->tx_done); +} + static void imx_scu_rx_callback(struct mbox_client *c, void *msg) { struct imx_sc_chan *sc_chan = container_of(c, struct imx_sc_chan, cl); @@ -149,6 +158,19 @@ static int imx_scu_ipc_write(struct imx_sc_ipc *sc_ipc, void *msg) for (i = 0; i < hdr->size; i++) { sc_chan = &sc_ipc->chans[i % 4]; + + /* + * SCU requires that all messages words are written + * sequentially but linux MU driver implements multiple + * independent channels for each register so ordering between + * different channels must be ensured by SCU API interface. + * + * Wait for tx_done before every send to ensure that no + * queueing happens at the mailbox channel level. + */ + wait_for_completion(&sc_chan->tx_done); + reinit_completion(&sc_chan->tx_done); + ret = mbox_send_message(sc_chan->ch, &data[i]); if (ret < 0) return ret; @@ -247,6 +269,11 @@ static int imx_scu_probe(struct platform_device *pdev) cl->knows_txdone = true; cl->rx_callback = imx_scu_rx_callback; + /* Initial tx_done completion as "done" */ + cl->tx_done = imx_scu_tx_done; + init_completion(&sc_chan->tx_done); + complete(&sc_chan->tx_done); + sc_chan->sc_ipc = sc_ipc; sc_chan->idx = i % 4; sc_chan->ch = mbox_request_channel_byname(cl, chan_name); diff --git a/drivers/firmware/imx/misc.c b/drivers/firmware/imx/misc.c index 4b56a587dacd4b5b65d6d74e83eaec177ba0b7cb..d073cb3ce69937cc6073c936bfbd442172364866 100644 --- a/drivers/firmware/imx/misc.c +++ b/drivers/firmware/imx/misc.c @@ -16,7 +16,7 @@ struct imx_sc_msg_req_misc_set_ctrl { u32 ctrl; u32 val; u16 resource; -} __packed; +} __packed __aligned(4); struct imx_sc_msg_req_cpu_start { struct imx_sc_rpc_msg hdr; @@ -24,18 +24,18 @@ struct imx_sc_msg_req_cpu_start { u32 address_lo; u16 resource; u8 enable; -} __packed; +} __packed __aligned(4); struct imx_sc_msg_req_misc_get_ctrl { struct imx_sc_rpc_msg hdr; u32 ctrl; u16 resource; -} __packed; +} __packed __aligned(4); struct imx_sc_msg_resp_misc_get_ctrl { struct imx_sc_rpc_msg hdr; u32 val; -} __packed; +} __packed __aligned(4); /* * This function sets a miscellaneous control value. diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c index b556612207e536a85dcdc8464b0256d3ff3f992a..af3ae0087de4e7ac8aa30e91dd00b6460aac265c 100644 --- a/drivers/firmware/imx/scu-pd.c +++ b/drivers/firmware/imx/scu-pd.c @@ -61,7 +61,7 @@ struct imx_sc_msg_req_set_resource_power_mode { struct imx_sc_rpc_msg hdr; u16 resource; u8 mode; -} __packed; +} __packed __aligned(4); #define IMX_SCU_PD_NAME_SIZE 20 struct imx_sc_pm_domain { diff --git a/drivers/fsi/Kconfig b/drivers/fsi/Kconfig index 92ce6d85802cc0602146bd23f6a276245f9d2db1..4cc0e630ab79b0be31ff97962a6a9c45507399f7 100644 --- a/drivers/fsi/Kconfig +++ b/drivers/fsi/Kconfig @@ -55,6 +55,7 @@ config FSI_MASTER_AST_CF config FSI_MASTER_ASPEED tristate "FSI ASPEED master" + depends on HAS_IOMEM help This option enables a FSI master that is present behind an OPB bridge in the AST2600. diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 31fee5e918b7d14fafde0477231316fee14dbb53..0017367e94eeac7d47022746c577539a63bf3050 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -21,18 +21,21 @@ #include "gpiolib.h" #include "gpiolib-acpi.h" -#define QUIRK_NO_EDGE_EVENTS_ON_BOOT 0x01l -#define QUIRK_NO_WAKEUP 0x02l - static int run_edge_events_on_boot = -1; module_param(run_edge_events_on_boot, int, 0444); MODULE_PARM_DESC(run_edge_events_on_boot, "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto"); -static int honor_wakeup = -1; -module_param(honor_wakeup, int, 0444); -MODULE_PARM_DESC(honor_wakeup, - "Honor the ACPI wake-capable flag: 0=no, 1=yes, -1=auto"); +static char *ignore_wake; +module_param(ignore_wake, charp, 0444); +MODULE_PARM_DESC(ignore_wake, + "controller@pin combos on which to ignore the ACPI wake flag " + "ignore_wake=controller@pin[,controller@pin[,...]]"); + +struct acpi_gpiolib_dmi_quirk { + bool no_edge_events_on_boot; + char *ignore_wake; +}; /** * struct acpi_gpio_event - ACPI GPIO event handler data @@ -202,6 +205,57 @@ static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio) acpi_gpiochip_request_irq(acpi_gpio, event); } +static bool acpi_gpio_in_ignore_list(const char *controller_in, int pin_in) +{ + const char *controller, *pin_str; + int len, pin; + char *endp; + + controller = ignore_wake; + while (controller) { + pin_str = strchr(controller, '@'); + if (!pin_str) + goto err; + + len = pin_str - controller; + if (len == strlen(controller_in) && + strncmp(controller, controller_in, len) == 0) { + pin = simple_strtoul(pin_str + 1, &endp, 10); + if (*endp != 0 && *endp != ',') + goto err; + + if (pin == pin_in) + return true; + } + + controller = strchr(controller, ','); + if (controller) + controller++; + } + + return false; +err: + pr_err_once("Error invalid value for gpiolib_acpi.ignore_wake: %s\n", + ignore_wake); + return false; +} + +static bool acpi_gpio_irq_is_wake(struct device *parent, + struct acpi_resource_gpio *agpio) +{ + int pin = agpio->pin_table[0]; + + if (agpio->wake_capable != ACPI_WAKE_CAPABLE) + return false; + + if (acpi_gpio_in_ignore_list(dev_name(parent), pin)) { + dev_info(parent, "Ignoring wakeup on pin %d\n", pin); + return false; + } + + return true; +} + /* Always returns AE_OK so that we keep looping over the resources */ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares, void *context) @@ -289,7 +343,7 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares, event->handle = evt_handle; event->handler = handler; event->irq = irq; - event->irq_is_wake = honor_wakeup && agpio->wake_capable == ACPI_WAKE_CAPABLE; + event->irq_is_wake = acpi_gpio_irq_is_wake(chip->parent, agpio); event->pin = pin; event->desc = desc; @@ -1328,7 +1382,9 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] = { DMI_MATCH(DMI_SYS_VENDOR, "MINIX"), DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"), }, - .driver_data = (void *)QUIRK_NO_EDGE_EVENTS_ON_BOOT, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .no_edge_events_on_boot = true, + }, }, { /* @@ -1341,16 +1397,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] = { DMI_MATCH(DMI_SYS_VENDOR, "Wortmann_AG"), DMI_MATCH(DMI_PRODUCT_NAME, "TERRA_PAD_1061"), }, - .driver_data = (void *)QUIRK_NO_EDGE_EVENTS_ON_BOOT, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .no_edge_events_on_boot = true, + }, }, { /* - * Various HP X2 10 Cherry Trail models use an external - * embedded-controller connected via I2C + an ACPI GPIO - * event handler. The embedded controller generates various - * spurious wakeup events when suspended. So disable wakeup - * for its handler (it uses the only ACPI GPIO event handler). - * This breaks wakeup when opening the lid, the user needs + * HP X2 10 models with Cherry Trail SoC + TI PMIC use an + * external embedded-controller connected via I2C + an ACPI GPIO + * event handler on INT33FF:01 pin 0, causing spurious wakeups. + * When suspending by closing the LID, the power to the USB + * keyboard is turned off, causing INT0002 ACPI events to + * trigger once the XHCI controller notices the keyboard is + * gone. So INT0002 events cause spurious wakeups too. Ignoring + * EC wakes breaks wakeup when opening the lid, the user needs * to press the power-button to wakeup the system. The * alternative is suspend simply not working, which is worse. */ @@ -1358,33 +1418,61 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] = { DMI_MATCH(DMI_SYS_VENDOR, "HP"), DMI_MATCH(DMI_PRODUCT_NAME, "HP x2 Detachable 10-p0XX"), }, - .driver_data = (void *)QUIRK_NO_WAKEUP, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .ignore_wake = "INT33FF:01@0,INT0002:00@2", + }, + }, + { + /* + * HP X2 10 models with Bay Trail SoC + AXP288 PMIC use an + * external embedded-controller connected via I2C + an ACPI GPIO + * event handler on INT33FC:02 pin 28, causing spurious wakeups. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"), + DMI_MATCH(DMI_BOARD_NAME, "815D"), + }, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .ignore_wake = "INT33FC:02@28", + }, + }, + { + /* + * HP X2 10 models with Cherry Trail SoC + AXP288 PMIC use an + * external embedded-controller connected via I2C + an ACPI GPIO + * event handler on INT33FF:01 pin 0, causing spurious wakeups. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"), + DMI_MATCH(DMI_BOARD_NAME, "813E"), + }, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .ignore_wake = "INT33FF:01@0", + }, }, {} /* Terminating entry */ }; static int acpi_gpio_setup_params(void) { + const struct acpi_gpiolib_dmi_quirk *quirk = NULL; const struct dmi_system_id *id; - long quirks = 0; id = dmi_first_match(gpiolib_acpi_quirks); if (id) - quirks = (long)id->driver_data; + quirk = id->driver_data; if (run_edge_events_on_boot < 0) { - if (quirks & QUIRK_NO_EDGE_EVENTS_ON_BOOT) + if (quirk && quirk->no_edge_events_on_boot) run_edge_events_on_boot = 0; else run_edge_events_on_boot = 1; } - if (honor_wakeup < 0) { - if (quirks & QUIRK_NO_WAKEUP) - honor_wakeup = 0; - else - honor_wakeup = 1; - } + if (ignore_wake == NULL && quirk && quirk->ignore_wake) + ignore_wake = quirk->ignore_wake; return 0; } diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 4d0106ceeba7bb24d3a21177356cacf6a82f69eb..00fb91feba7086e77827e16e84aee8494b62986f 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -2306,9 +2306,16 @@ static void gpiochip_irq_disable(struct irq_data *d) { struct gpio_chip *chip = irq_data_get_irq_chip_data(d); + /* + * Since we override .irq_disable() we need to mimic the + * behaviour of __irq_disable() in irq/chip.c. + * First call .irq_disable() if it exists, else mimic the + * behaviour of mask_irq() which calls .irq_mask() if + * it exists. + */ if (chip->irq.irq_disable) chip->irq.irq_disable(d); - else + else if (chip->irq.chip->irq_mask) chip->irq.chip->irq_mask(d); gpiochip_disable_irq(chip, d->hwirq); } diff --git a/drivers/gpu/drm/amd/acp/Kconfig b/drivers/gpu/drm/amd/acp/Kconfig index 13340f353ea817e36d79bb1213070e7266beb40f..216d932a7831cc7e64d46560fa3b43b44805b56e 100644 --- a/drivers/gpu/drm/amd/acp/Kconfig +++ b/drivers/gpu/drm/amd/acp/Kconfig @@ -1,5 +1,6 @@ # SPDX-License-Identifier: MIT menu "ACP (Audio CoProcessor) Configuration" + depends on DRM_AMDGPU config DRM_AMD_ACP bool "Enable AMD Audio CoProcessor IP support" diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 97dd9596d17a84e0b51b8773f061441516521103..2992a49ad4a57d0c34ef0c46addf54b681f3ad2e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -579,6 +579,7 @@ struct amdgpu_asic_funcs { /* invalidate hdp read cache */ void (*invalidate_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring); + void (*reset_hdp_ras_error_count)(struct amdgpu_device *adev); /* check if the asic needs a full reset of if soft reset will work */ bool (*need_full_reset)(struct amdgpu_device *adev); /* initialize doorbell layout for specific asic*/ @@ -993,6 +994,8 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t acc_flags); void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags); +void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v, + uint32_t acc_flags); void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value); uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index bc2e72a66db9f8fe316c74c1b6d9fc79c3c37264..abfbe89e805ef2f4ef89585b12f41a9066273fd3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -29,6 +29,7 @@ #include #include #include "amdgpu_xgmi.h" +#include static const unsigned int compute_vmid_bitmap = 0xFF00; @@ -224,7 +225,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd) int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size, void **mem_obj, uint64_t *gpu_addr, - void **cpu_ptr, bool mqd_gfx9) + void **cpu_ptr, bool cp_mqd_gfx9) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct amdgpu_bo *bo = NULL; @@ -240,8 +241,8 @@ int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size, bp.type = ttm_bo_type_kernel; bp.resv = NULL; - if (mqd_gfx9) - bp.flags |= AMDGPU_GEM_CREATE_MQD_GFX9; + if (cp_mqd_gfx9) + bp.flags |= AMDGPU_GEM_CREATE_CP_MQD_GFX9; r = amdgpu_bo_create(adev, &bp, &bo); if (r) { @@ -501,10 +502,11 @@ int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd, metadata_size, &metadata_flags); if (flags) { *flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? - ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT; + KFD_IOC_ALLOC_MEM_FLAGS_VRAM + : KFD_IOC_ALLOC_MEM_FLAGS_GTT; if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) - *flags |= ALLOC_MEM_FLAGS_PUBLIC; + *flags |= KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC; } out_put: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index b0ad3be0b03f10910559813d06ee24367c011431..13feb313e9b3922e53f764d02a1935aba7208c00 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -242,6 +242,9 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, void amdgpu_amdkfd_gpuvm_init_mem_limits(void); void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo); +int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, + struct tile_config *config); + /* KGD2KFD callbacks */ int kgd2kfd_init(void); void kgd2kfd_exit(void); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c index 4bcc175a149d070cde9de9d116edb2f2459fb329..6529caca88fe15ba65943413ede0f3b500c29d2f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -79,7 +79,7 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev, dev_warn(adev->dev, "Invalid sdma engine id (%d), using engine id 0\n", engine_id); - /* fall through */ + fallthrough; case 0: sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL; @@ -319,7 +319,6 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = { .address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset, .get_atc_vmid_pasid_mapping_info = kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, - .get_tile_config = kgd_gfx_v9_get_tile_config, .set_vm_context_page_table_base = kgd_set_vm_context_page_table_base, .get_hive_id = amdgpu_amdkfd_get_hive_id, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index ca91fffb8a3622fba432648066ac8341e43f93e8..4ec6d0c0320107489a4b7072d68f056b48da915a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -42,38 +42,6 @@ enum hqd_dequeue_request_type { SAVE_WAVES }; -/* Because of REG_GET_FIELD() being used, we put this function in the - * asic specific file. - */ -static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, - struct tile_config *config) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - - config->gb_addr_config = adev->gfx.config.gb_addr_config; -#if 0 -/* TODO - confirm REG_GET_FIELD x2, should be OK as is... but - * MC_ARB_RAMCFG register doesn't exist on Vega10 - initial amdgpu - * changes commented out related code, doing the same here for now but - * need to sync with Ken et al - */ - config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, - MC_ARB_RAMCFG, NOOFBANK); - config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, - MC_ARB_RAMCFG, NOOFRANKS); -#endif - - config->tile_config_ptr = adev->gfx.config.tile_mode_array; - config->num_tile_configs = - ARRAY_SIZE(adev->gfx.config.tile_mode_array); - config->macro_tile_config_ptr = - adev->gfx.config.macrotile_mode_array; - config->num_macro_tile_configs = - ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); - - return 0; -} - static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) { return (struct amdgpu_device *)kgd; @@ -805,7 +773,6 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = { .address_watch_get_offset = kgd_address_watch_get_offset, .get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info, - .get_tile_config = amdgpu_amdkfd_get_tile_config, .set_vm_context_page_table_base = set_vm_context_page_table_base, .get_hive_id = amdgpu_amdkfd_get_hive_id, .get_unique_id = amdgpu_amdkfd_get_unique_id, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index 8f052e98a3c6cfc5c18e14cb0a21c4fcd50293c7..0b7e78748540681628a71d3b2703c1bc075208de 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -84,31 +84,6 @@ union TCP_WATCH_CNTL_BITS { float f32All; }; -/* Because of REG_GET_FIELD() being used, we put this function in the - * asic specific file. - */ -static int get_tile_config(struct kgd_dev *kgd, - struct tile_config *config) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - - config->gb_addr_config = adev->gfx.config.gb_addr_config; - config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, - MC_ARB_RAMCFG, NOOFBANK); - config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, - MC_ARB_RAMCFG, NOOFRANKS); - - config->tile_config_ptr = adev->gfx.config.tile_mode_array; - config->num_tile_configs = - ARRAY_SIZE(adev->gfx.config.tile_mode_array); - config->macro_tile_config_ptr = - adev->gfx.config.macrotile_mode_array; - config->num_macro_tile_configs = - ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); - - return 0; -} - static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) { return (struct amdgpu_device *)kgd; @@ -730,7 +705,6 @@ const struct kfd2kgd_calls gfx_v7_kfd2kgd = { .address_watch_get_offset = kgd_address_watch_get_offset, .get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info, .set_scratch_backing_va = set_scratch_backing_va, - .get_tile_config = get_tile_config, .set_vm_context_page_table_base = set_vm_context_page_table_base, .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index 19a10db93d6883942679863418f2540194d0ee18..ccd635b812b550acc07c14851ade557236bf2c8e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -41,31 +41,6 @@ enum hqd_dequeue_request_type { RESET_WAVES }; -/* Because of REG_GET_FIELD() being used, we put this function in the - * asic specific file. - */ -static int get_tile_config(struct kgd_dev *kgd, - struct tile_config *config) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - - config->gb_addr_config = adev->gfx.config.gb_addr_config; - config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, - MC_ARB_RAMCFG, NOOFBANK); - config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, - MC_ARB_RAMCFG, NOOFRANKS); - - config->tile_config_ptr = adev->gfx.config.tile_mode_array; - config->num_tile_configs = - ARRAY_SIZE(adev->gfx.config.tile_mode_array); - config->macro_tile_config_ptr = - adev->gfx.config.macrotile_mode_array; - config->num_macro_tile_configs = - ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); - - return 0; -} - static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) { return (struct amdgpu_device *)kgd; @@ -676,6 +651,5 @@ const struct kfd2kgd_calls gfx_v8_kfd2kgd = { .get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info, .set_scratch_backing_va = set_scratch_backing_va, - .get_tile_config = get_tile_config, .set_vm_context_page_table_base = set_vm_context_page_table_base, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 7f91feff7c4fda8d969305660cafbe97340dc23c..df841c2ac5e741a5a4c8aa530f7b7d8c9c8ac8a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -48,28 +48,6 @@ enum hqd_dequeue_request_type { RESET_WAVES }; - -/* Because of REG_GET_FIELD() being used, we put this function in the - * asic specific file. - */ -int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd, - struct tile_config *config) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - - config->gb_addr_config = adev->gfx.config.gb_addr_config; - - config->tile_config_ptr = adev->gfx.config.tile_mode_array; - config->num_tile_configs = - ARRAY_SIZE(adev->gfx.config.tile_mode_array); - config->macro_tile_config_ptr = - adev->gfx.config.macrotile_mode_array; - config->num_macro_tile_configs = - ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); - - return 0; -} - static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) { return (struct amdgpu_device *)kgd; @@ -736,7 +714,6 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = { .address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset, .get_atc_vmid_pasid_mapping_info = kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, - .get_tile_config = kgd_gfx_v9_get_tile_config, .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base, .get_hive_id = amdgpu_amdkfd_get_hive_id, .get_unique_id = amdgpu_amdkfd_get_unique_id, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h index 63d3e6683dfe1d800424990cb50da8ac26c175e8..aedf67d57449caef06227d9d0e70bda29bbbe45d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h @@ -60,5 +60,3 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd, bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, uint8_t vmid, uint16_t *p_pasid); -int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd, - struct tile_config *config); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index e1d1eed7a25fef12bfed061f3f608ca66d134cfb..9dff792c929036701e78af5f19f24463e9f6448b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -29,6 +29,7 @@ #include "amdgpu_vm.h" #include "amdgpu_amdkfd.h" #include "amdgpu_dma_buf.h" +#include /* BO flag to indicate a KFD userptr BO */ #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63) @@ -400,18 +401,18 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync) static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem) { struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); - bool coherent = mem->alloc_flags & ALLOC_MEM_FLAGS_COHERENT; + bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT; uint32_t mapping_flags; mapping_flags = AMDGPU_VM_PAGE_READABLE; - if (mem->alloc_flags & ALLOC_MEM_FLAGS_WRITABLE) + if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE) mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE; - if (mem->alloc_flags & ALLOC_MEM_FLAGS_EXECUTABLE) + if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE) mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE; switch (adev->asic_type) { case CHIP_ARCTURUS: - if (mem->alloc_flags & ALLOC_MEM_FLAGS_VRAM) { + if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { if (bo_adev == adev) mapping_flags |= coherent ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; @@ -1160,24 +1161,24 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( /* * Check on which domain to allocate BO */ - if (flags & ALLOC_MEM_FLAGS_VRAM) { + if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM; alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE; - alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ? + alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : AMDGPU_GEM_CREATE_NO_CPU_ACCESS; - } else if (flags & ALLOC_MEM_FLAGS_GTT) { + } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT; alloc_flags = 0; - } else if (flags & ALLOC_MEM_FLAGS_USERPTR) { + } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { domain = AMDGPU_GEM_DOMAIN_GTT; alloc_domain = AMDGPU_GEM_DOMAIN_CPU; alloc_flags = 0; if (!offset || !*offset) return -EINVAL; user_addr = untagged_addr(*offset); - } else if (flags & (ALLOC_MEM_FLAGS_DOORBELL | - ALLOC_MEM_FLAGS_MMIO_REMAP)) { + } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | + KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) { domain = AMDGPU_GEM_DOMAIN_GTT; alloc_domain = AMDGPU_GEM_DOMAIN_CPU; bo_type = ttm_bo_type_sg; @@ -1198,7 +1199,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( } INIT_LIST_HEAD(&(*mem)->bo_va_list); mutex_init(&(*mem)->lock); - (*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM); + (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM); /* Workaround for AQL queue wraparound bug. Map the same * memory twice. That means we only actually allocate half @@ -1680,10 +1681,12 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, INIT_LIST_HEAD(&(*mem)->bo_va_list); mutex_init(&(*mem)->lock); + (*mem)->alloc_flags = ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? - ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT) | - ALLOC_MEM_FLAGS_WRITABLE | ALLOC_MEM_FLAGS_EXECUTABLE; + KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT) + | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE + | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE; (*mem)->bo = amdgpu_bo_ref(bo); (*mem)->va = va; @@ -2242,3 +2245,25 @@ int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem) kfree(mem); return 0; } + +/* Returns GPU-specific tiling mode information */ +int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, + struct tile_config *config) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)kgd; + + config->gb_addr_config = adev->gfx.config.gb_addr_config; + config->tile_config_ptr = adev->gfx.config.tile_mode_array; + config->num_tile_configs = + ARRAY_SIZE(adev->gfx.config.tile_mode_array); + config->macro_tile_config_ptr = + adev->gfx.config.macrotile_mode_array; + config->num_macro_tile_configs = + ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); + + /* Those values are not set from GFX9 onwards */ + config->num_banks = adev->gfx.config.num_banks; + config->num_ranks = adev->gfx.config.num_ranks; + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 438d10ae343b54f6c59908323dcafce5d117c53b..af91627b19b0c5dd42a924fc2169136fd5165934 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1208,7 +1208,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, struct amdgpu_fpriv *fpriv = p->filp->driver_priv; struct drm_sched_entity *entity = p->entity; enum drm_sched_priority priority; - struct amdgpu_ring *ring; struct amdgpu_bo_list_entry *e; struct amdgpu_job *job; uint64_t seq; @@ -1261,9 +1260,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, priority = job->base.s_priority; drm_sched_entity_push_job(&job->base, entity); - ring = to_amdgpu_ring(entity->rq->sched); - amdgpu_ring_priority_get(ring, priority); - amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm); ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 94a6c42f29ea59a42f62bc1ef1077e21566df3a1..6ed36a2c5f73f2e89654d34b3cdbd9444c0c0270 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -61,12 +61,24 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp, return -EACCES; } +static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio) +{ + switch (prio) { + case DRM_SCHED_PRIORITY_HIGH_HW: + case DRM_SCHED_PRIORITY_KERNEL: + return AMDGPU_GFX_PIPE_PRIO_HIGH; + default: + return AMDGPU_GFX_PIPE_PRIO_NORMAL; + } +} + static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const u32 ring) { struct amdgpu_device *adev = ctx->adev; struct amdgpu_ctx_entity *entity; struct drm_gpu_scheduler **scheds = NULL, *sched = NULL; unsigned num_scheds = 0; + enum gfx_pipe_priority hw_prio; enum drm_sched_priority priority; int r; @@ -79,46 +91,51 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ? ctx->init_priority : ctx->override_priority; switch (hw_ip) { - case AMDGPU_HW_IP_GFX: - sched = &adev->gfx.gfx_ring[0].sched; - scheds = &sched; - num_scheds = 1; - break; - case AMDGPU_HW_IP_COMPUTE: - scheds = adev->gfx.compute_sched; - num_scheds = adev->gfx.num_compute_sched; - break; - case AMDGPU_HW_IP_DMA: - scheds = adev->sdma.sdma_sched; - num_scheds = adev->sdma.num_sdma_sched; - break; - case AMDGPU_HW_IP_UVD: - sched = &adev->uvd.inst[0].ring.sched; - scheds = &sched; - num_scheds = 1; - break; - case AMDGPU_HW_IP_VCE: - sched = &adev->vce.ring[0].sched; - scheds = &sched; - num_scheds = 1; - break; - case AMDGPU_HW_IP_UVD_ENC: - sched = &adev->uvd.inst[0].ring_enc[0].sched; - scheds = &sched; - num_scheds = 1; - break; - case AMDGPU_HW_IP_VCN_DEC: - scheds = adev->vcn.vcn_dec_sched; - num_scheds = adev->vcn.num_vcn_dec_sched; - break; - case AMDGPU_HW_IP_VCN_ENC: - scheds = adev->vcn.vcn_enc_sched; - num_scheds = adev->vcn.num_vcn_enc_sched; - break; - case AMDGPU_HW_IP_VCN_JPEG: - scheds = adev->jpeg.jpeg_sched; - num_scheds = adev->jpeg.num_jpeg_sched; - break; + case AMDGPU_HW_IP_GFX: + sched = &adev->gfx.gfx_ring[0].sched; + scheds = &sched; + num_scheds = 1; + break; + case AMDGPU_HW_IP_COMPUTE: + hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority); + scheds = adev->gfx.compute_prio_sched[hw_prio]; + num_scheds = adev->gfx.num_compute_sched[hw_prio]; + break; + case AMDGPU_HW_IP_DMA: + scheds = adev->sdma.sdma_sched; + num_scheds = adev->sdma.num_sdma_sched; + break; + case AMDGPU_HW_IP_UVD: + sched = &adev->uvd.inst[0].ring.sched; + scheds = &sched; + num_scheds = 1; + break; + case AMDGPU_HW_IP_VCE: + sched = &adev->vce.ring[0].sched; + scheds = &sched; + num_scheds = 1; + break; + case AMDGPU_HW_IP_UVD_ENC: + sched = &adev->uvd.inst[0].ring_enc[0].sched; + scheds = &sched; + num_scheds = 1; + break; + case AMDGPU_HW_IP_VCN_DEC: + sched = drm_sched_pick_best(adev->vcn.vcn_dec_sched, + adev->vcn.num_vcn_dec_sched); + scheds = &sched; + num_scheds = 1; + break; + case AMDGPU_HW_IP_VCN_ENC: + sched = drm_sched_pick_best(adev->vcn.vcn_enc_sched, + adev->vcn.num_vcn_enc_sched); + scheds = &sched; + num_scheds = 1; + break; + case AMDGPU_HW_IP_VCN_JPEG: + scheds = adev->jpeg.jpeg_sched; + num_scheds = adev->jpeg.num_jpeg_sched; + break; } r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds, @@ -502,6 +519,29 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, return fence; } +static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx, + struct amdgpu_ctx_entity *aentity, + int hw_ip, + enum drm_sched_priority priority) +{ + struct amdgpu_device *adev = ctx->adev; + enum gfx_pipe_priority hw_prio; + struct drm_gpu_scheduler **scheds = NULL; + unsigned num_scheds; + + /* set sw priority */ + drm_sched_entity_set_priority(&aentity->entity, priority); + + /* set hw priority */ + if (hw_ip == AMDGPU_HW_IP_COMPUTE) { + hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority); + scheds = adev->gfx.compute_prio_sched[hw_prio]; + num_scheds = adev->gfx.num_compute_sched[hw_prio]; + drm_sched_entity_modify_sched(&aentity->entity, scheds, + num_scheds); + } +} + void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, enum drm_sched_priority priority) { @@ -514,13 +554,11 @@ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, ctx->init_priority : ctx->override_priority; for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) { - struct drm_sched_entity *entity; - if (!ctx->entities[i][j]) continue; - entity = &ctx->entities[i][j]->entity; - drm_sched_entity_set_priority(entity, ctx_prio); + amdgpu_ctx_set_entity_priority(ctx, ctx->entities[i][j], + i, ctx_prio); } } } @@ -628,20 +666,53 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr) mutex_destroy(&mgr->lock); } + +static void amdgpu_ctx_init_compute_sched(struct amdgpu_device *adev) +{ + int num_compute_sched_normal = 0; + int num_compute_sched_high = AMDGPU_MAX_COMPUTE_RINGS - 1; + int i; + + /* use one drm sched array, gfx.compute_sched to store both high and + * normal priority drm compute schedulers */ + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + if (!adev->gfx.compute_ring[i].has_high_prio) + adev->gfx.compute_sched[num_compute_sched_normal++] = + &adev->gfx.compute_ring[i].sched; + else + adev->gfx.compute_sched[num_compute_sched_high--] = + &adev->gfx.compute_ring[i].sched; + } + + /* compute ring only has two priority for now */ + i = AMDGPU_GFX_PIPE_PRIO_NORMAL; + adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0]; + adev->gfx.num_compute_sched[i] = num_compute_sched_normal; + + i = AMDGPU_GFX_PIPE_PRIO_HIGH; + if (num_compute_sched_high == (AMDGPU_MAX_COMPUTE_RINGS - 1)) { + /* When compute has no high priority rings then use */ + /* normal priority sched array */ + adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0]; + adev->gfx.num_compute_sched[i] = num_compute_sched_normal; + } else { + adev->gfx.compute_prio_sched[i] = + &adev->gfx.compute_sched[num_compute_sched_high - 1]; + adev->gfx.num_compute_sched[i] = + adev->gfx.num_compute_rings - num_compute_sched_normal; + } +} + void amdgpu_ctx_init_sched(struct amdgpu_device *adev) { int i, j; + amdgpu_ctx_init_compute_sched(adev); for (i = 0; i < adev->gfx.num_gfx_rings; i++) { adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched; adev->gfx.num_gfx_sched++; } - for (i = 0; i < adev->gfx.num_compute_rings; i++) { - adev->gfx.compute_sched[i] = &adev->gfx.compute_ring[i].sched; - adev->gfx.num_compute_sched++; - } - for (i = 0; i < adev->sdma.num_instances; i++) { adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched; adev->sdma.num_sdma_sched++; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 3bb74056b9d20b8ca6f0bb3354b2b2e0fb1d7d0b..c0f9a651dc067bbf46ce689535934740ab7315ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -33,6 +33,7 @@ #include "amdgpu.h" #include "amdgpu_pm.h" #include "amdgpu_dm_debugfs.h" +#include "amdgpu_ras.h" /** * amdgpu_debugfs_add_files - Add simple debugfs entries @@ -178,7 +179,7 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, } else { r = get_user(value, (uint32_t *)buf); if (!r) - WREG32(*pos >> 2, value); + amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value, 0); } if (r) { result = r; @@ -783,11 +784,11 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, ssize_t result = 0; uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data; - if (size & 3 || *pos & 3) + if (size > 4096 || size & 3 || *pos & 3) return -EINVAL; /* decode offset */ - offset = *pos & GENMASK_ULL(11, 0); + offset = (*pos & GENMASK_ULL(11, 0)) >> 2; se = (*pos & GENMASK_ULL(19, 12)) >> 12; sh = (*pos & GENMASK_ULL(27, 20)) >> 20; cu = (*pos & GENMASK_ULL(35, 28)) >> 28; @@ -825,7 +826,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, while (size) { uint32_t value; - value = data[offset++]; + value = data[result >> 2]; r = put_user(value, (uint32_t *)buf); if (r) { result = r; @@ -992,18 +993,6 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) return 0; } -void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) -{ - unsigned i; - - for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) { - if (adev->debugfs_regs[i]) { - debugfs_remove(adev->debugfs_regs[i]); - adev->debugfs_regs[i] = NULL; - } - } -} - static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; @@ -1269,9 +1258,43 @@ failure: return 0; } +static int amdgpu_debugfs_sclk_set(void *data, u64 val) +{ + int ret = 0; + uint32_t max_freq, min_freq; + struct amdgpu_device *adev = (struct amdgpu_device *)data; + + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return -EINVAL; + + ret = pm_runtime_get_sync(adev->ddev->dev); + if (ret < 0) + return ret; + + if (is_support_sw_smu(adev)) { + ret = smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, &min_freq, &max_freq, true); + if (ret || val > max_freq || val < min_freq) + return -EINVAL; + ret = smu_set_soft_freq_range(&adev->smu, SMU_SCLK, (uint32_t)val, (uint32_t)val, true); + } else { + return 0; + } + + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + + if (ret) + return -EINVAL; + + return 0; +} + DEFINE_SIMPLE_ATTRIBUTE(fops_ib_preempt, NULL, amdgpu_debugfs_ib_preempt, "%llu\n"); +DEFINE_SIMPLE_ATTRIBUTE(fops_sclk_set, NULL, + amdgpu_debugfs_sclk_set, "%llu\n"); + int amdgpu_debugfs_init(struct amdgpu_device *adev) { int r, i; @@ -1285,6 +1308,15 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) return -EIO; } + adev->smu.debugfs_sclk = + debugfs_create_file("amdgpu_force_sclk", 0200, + adev->ddev->primary->debugfs_root, adev, + &fops_sclk_set); + if (!(adev->smu.debugfs_sclk)) { + DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n"); + return -EIO; + } + /* Register debugfs entries for amdgpu_ttm */ r = amdgpu_ttm_debugfs_init(adev); if (r) { @@ -1335,35 +1367,19 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) } } + amdgpu_ras_debugfs_create_all(adev); + return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list, ARRAY_SIZE(amdgpu_debugfs_list)); } -void amdgpu_debugfs_fini(struct amdgpu_device *adev) -{ - int i; - - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - struct amdgpu_ring *ring = adev->rings[i]; - - if (!ring) - continue; - - amdgpu_debugfs_ring_fini(ring); - } - amdgpu_ttm_debugfs_fini(adev); - debugfs_remove(adev->debugfs_preempt); -} - #else int amdgpu_debugfs_init(struct amdgpu_device *adev) { return 0; } -void amdgpu_debugfs_fini(struct amdgpu_device *adev) { } int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) { return 0; } -void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { } #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h index b382527e359a78361a11f5d7124d4eb9e3e6a053..de12d11015260eb3c44a9ef1cb95a434a26b5fda 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h @@ -32,7 +32,6 @@ struct amdgpu_debugfs { }; int amdgpu_debugfs_regs_init(struct amdgpu_device *adev); -void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev); int amdgpu_debugfs_init(struct amdgpu_device *adev); void amdgpu_debugfs_fini(struct amdgpu_device *adev); int amdgpu_debugfs_add_files(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index a3aaf2e3794cebfeb10479e083e5e1415871f3b7..faa3e7102156c755d8d32063d9d59f0b0cceed7b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -306,6 +306,26 @@ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) BUG(); } +void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags) +{ + trace_amdgpu_mm_wreg(adev->pdev->device, reg, v); + + if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) + writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); + else { + unsigned long flags; + + spin_lock_irqsave(&adev->mmio_idx_lock, flags); + writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4)); + writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); + spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); + } + + if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) { + udelay(500); + } +} + /** * amdgpu_mm_wreg - write to a memory mapped IO register * @@ -319,8 +339,6 @@ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags) { - trace_amdgpu_mm_wreg(adev->pdev->device, reg, v); - if (adev->asic_type >= CHIP_VEGA10 && reg == 0) { adev->last_mm_index = v; } @@ -328,20 +346,26 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))) return amdgpu_kiq_wreg(adev, reg, v); - if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) - writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); - else { - unsigned long flags; + amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags); +} - spin_lock_irqsave(&adev->mmio_idx_lock, flags); - writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4)); - writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); - spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); - } +/* + * amdgpu_mm_wreg_mmio_rlc - write register either with mmio or with RLC path if in range + * + * this function is invoked only the debugfs register access + * */ +void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v, + uint32_t acc_flags) +{ + if (amdgpu_sriov_fullaccess(adev) && + adev->gfx.rlc.funcs && + adev->gfx.rlc.funcs->is_rlcg_access_range) { - if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) { - udelay(500); + if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg)) + return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v); } + + amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags); } /** @@ -2718,6 +2742,9 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work) if (adev->asic_reset_res) goto fail; + + if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count) + adev->mmhub.funcs->reset_ras_error_count(adev); } else { task_barrier_full(&hive->tb); @@ -3193,6 +3220,12 @@ void amdgpu_device_fini(struct amdgpu_device *adev) flush_delayed_work(&adev->delayed_init_work); adev->shutdown = true; + /* make sure IB test finished before entering exclusive mode + * to avoid preemption on IB test + * */ + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_request_full_gpu(adev, false); + /* disable all interrupts */ amdgpu_irq_disable_all(adev); if (adev->mode_info.mode_config_initialized){ @@ -3235,7 +3268,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev) adev->rmmio = NULL; amdgpu_device_doorbell_fini(adev); - amdgpu_debugfs_regs_cleanup(adev); device_remove_file(adev->dev, &dev_attr_pcie_replay_count); if (adev->ucode_sysfs_en) amdgpu_ucode_sysfs_fini(adev); @@ -3881,8 +3913,15 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive, } } - if (!r && amdgpu_ras_intr_triggered()) + if (!r && amdgpu_ras_intr_triggered()) { + list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { + if (tmp_adev->mmhub.funcs && + tmp_adev->mmhub.funcs->reset_ras_error_count) + tmp_adev->mmhub.funcs->reset_ras_error_count(tmp_adev); + } + amdgpu_ras_intr_cleared(); + } list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { if (need_full_reset) { @@ -3928,6 +3967,8 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive, if (r) goto out; + amdgpu_fbdev_set_suspend(tmp_adev, 0); + /* must succeed. */ amdgpu_ras_resume(tmp_adev); @@ -4101,6 +4142,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, */ amdgpu_unregister_gpu_instance(tmp_adev); + amdgpu_fbdev_set_suspend(adev, 1); + /* disable ras on ALL IPs */ if (!(in_ras_intr && !use_baco) && amdgpu_device_ip_need_full_reset(tmp_adev)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h index 168579492a5579dd60177b01cda0a321d034b38d..936d85aa0fbc5fd4387793e6d851c9cddd45c028 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h @@ -448,6 +448,8 @@ struct amdgpu_pm { /* powerplay feature */ uint32_t pp_feature; + /* Used for I2C access to various EEPROMs on relevant ASICs */ + struct i2c_adapter smu_i2c; }; #define R600_SSTU_DFLT 0 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index e1c4c2df716b578bbd7faf2ba91227cb00ff48aa..8ea86ffdea0d8dfbcf860c67bc1cfc08d26885a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1121,18 +1121,16 @@ static void amdgpu_pci_remove(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); - struct amdgpu_device *adev = dev->dev_private; #ifdef MODULE if (THIS_MODULE->state != MODULE_STATE_GOING) #endif DRM_ERROR("Hotplug removal is not supported\n"); drm_dev_unplug(dev); - drm_dev_put(dev); - amdgpu_debugfs_fini(adev); amdgpu_driver_unload_kms(dev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); + drm_dev_put(dev); } static void @@ -1301,24 +1299,55 @@ static int amdgpu_pmops_runtime_idle(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_dev->dev_private; - struct drm_crtc *crtc; + /* we don't want the main rpm_idle to call suspend - we want to autosuspend */ + int ret = 1; if (!adev->runpm) { pm_runtime_forbid(dev); return -EBUSY; } - list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) { - if (crtc->enabled) { - DRM_DEBUG_DRIVER("failing to power off - crtc active\n"); - return -EBUSY; + if (amdgpu_device_has_dc_support(adev)) { + struct drm_crtc *crtc; + + drm_modeset_lock_all(drm_dev); + + drm_for_each_crtc(crtc, drm_dev) { + if (crtc->state->active) { + ret = -EBUSY; + break; + } + } + + drm_modeset_unlock_all(drm_dev); + + } else { + struct drm_connector *list_connector; + struct drm_connector_list_iter iter; + + mutex_lock(&drm_dev->mode_config.mutex); + drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL); + + drm_connector_list_iter_begin(drm_dev, &iter); + drm_for_each_connector_iter(list_connector, &iter) { + if (list_connector->dpms == DRM_MODE_DPMS_ON) { + ret = -EBUSY; + break; + } } + + drm_connector_list_iter_end(&iter); + + drm_modeset_unlock(&drm_dev->mode_config.connection_mutex); + mutex_unlock(&drm_dev->mode_config.mutex); } + if (ret == -EBUSY) + DRM_DEBUG_DRIVER("failing to power off - crtc active\n"); + pm_runtime_mark_last_busy(dev); pm_runtime_autosuspend(dev); - /* we don't want the main rpm_idle to call suspend - we want to autosuspend */ - return 1; + return ret; } long amdgpu_drm_ioctl(struct file *filp, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 7403588684b3462c41a37e76d98e9c6fc3bd532e..6b9c9193cdfa72df9434fd16ccaf4367cfa56fb9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -192,6 +192,14 @@ static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev) return adev->gfx.mec.num_mec > 1; } +bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev, + int queue) +{ + /* Policy: make queue 0 of each pipe as high priority compute queue */ + return (queue == 0); + +} + void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev) { int i, queue, pipe, mec; @@ -565,7 +573,6 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev) int r; struct ras_fs_if fs_info = { .sysfs_name = "gfx_err_count", - .debugfs_name = "gfx_err_inject", }; struct ras_ih_if ih_info = { .cb = amdgpu_gfx_process_ras_data_cb, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index ca17ffb013012dc2a3e04f343976de0a4ae4784a..5825692d07e42a33a31004bebd6b7ebcb4936b09 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -41,6 +41,15 @@ #define AMDGPU_MAX_GFX_QUEUES KGD_MAX_QUEUES #define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES +enum gfx_pipe_priority { + AMDGPU_GFX_PIPE_PRIO_NORMAL = 1, + AMDGPU_GFX_PIPE_PRIO_HIGH, + AMDGPU_GFX_PIPE_PRIO_MAX +}; + +#define AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM 0 +#define AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM 15 + struct amdgpu_mec { struct amdgpu_bo *hpd_eop_obj; u64 hpd_eop_gpu_addr; @@ -151,6 +160,8 @@ struct amdgpu_gfx_config { unsigned num_gpus; unsigned multi_gpu_tile_size; unsigned mc_arb_ramcfg; + unsigned num_banks; + unsigned num_ranks; unsigned gb_addr_config; unsigned num_rbs; unsigned gs_vgt_table_depth; @@ -204,6 +215,7 @@ struct amdgpu_gfx_funcs { u32 queue, u32 vmid); int (*ras_error_inject)(struct amdgpu_device *adev, void *inject_if); int (*query_ras_error_count) (struct amdgpu_device *adev, void *ras_error_status); + void (*reset_ras_error_count) (struct amdgpu_device *adev); }; struct sq_work { @@ -278,8 +290,9 @@ struct amdgpu_gfx { uint32_t num_gfx_sched; unsigned num_gfx_rings; struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS]; + struct drm_gpu_scheduler **compute_prio_sched[AMDGPU_GFX_PIPE_PRIO_MAX]; struct drm_gpu_scheduler *compute_sched[AMDGPU_MAX_COMPUTE_RINGS]; - uint32_t num_compute_sched; + uint32_t num_compute_sched[AMDGPU_GFX_PIPE_PRIO_MAX]; unsigned num_compute_rings; struct amdgpu_irq_src eop_irq; struct amdgpu_irq_src priv_reg_irq; @@ -361,6 +374,8 @@ void amdgpu_gfx_bit_to_mec_queue(struct amdgpu_device *adev, int bit, int *mec, int *pipe, int *queue); bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int mec, int pipe, int queue); +bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev, + int queue); int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, int me, int pipe, int queue); void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index d42be880a236bf8b1d0278a5c9586a0fc25ccdd9..4981e443a88473050e27559e4f43acc1c5a13d58 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -117,12 +117,10 @@ void amdgpu_job_free_resources(struct amdgpu_job *job) static void amdgpu_job_free_cb(struct drm_sched_job *s_job) { - struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); struct amdgpu_job *job = to_amdgpu_job(s_job); drm_sched_job_cleanup(s_job); - amdgpu_ring_priority_put(ring, s_job->s_priority); dma_fence_put(job->fence); amdgpu_sync_free(&job->sync); amdgpu_sync_free(&job->sched_sync); @@ -143,7 +141,6 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, void *owner, struct dma_fence **f) { enum drm_sched_priority priority; - struct amdgpu_ring *ring; int r; if (!f) @@ -158,9 +155,6 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, priority = job->base.s_priority; drm_sched_entity_push_job(&job->base, entity); - ring = to_amdgpu_ring(entity->rq->sched); - amdgpu_ring_priority_get(ring, priority); - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 6a1b62bc3dcfc1e270a60279c9ae778f71a3579f..fd1dc3236ecae0a4c169677a39cc07fe43cb8d2a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -88,9 +88,6 @@ void amdgpu_driver_unload_kms(struct drm_device *dev) if (adev->rmmio == NULL) goto done_free; - if (amdgpu_sriov_vf(adev)) - amdgpu_virt_request_full_gpu(adev, false); - if (adev->runpm) { pm_runtime_get_sync(dev->dev); pm_runtime_forbid(dev->dev); @@ -175,6 +172,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) else if (amdgpu_device_supports_baco(dev) && (amdgpu_runtime_pm != 0) && (adev->asic_type >= CHIP_TOPAZ) && + (adev->asic_type != CHIP_VEGA10) && (adev->asic_type != CHIP_VEGA20) && (adev->asic_type != CHIP_ARCTURUS)) /* enable runpm on VI+ */ adev->runpm = true; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c index 676c48c02d771cddd0ff018d6381ee535c5006ce..ead3dc572ec54471a01cfebcf4c06e985d2a2609 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c @@ -32,7 +32,6 @@ int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev) }; struct ras_fs_if fs_info = { .sysfs_name = "mmhub_err_count", - .debugfs_name = "mmhub_err_inject", }; if (!adev->mmhub.ras_if) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h index 1cd78940cf828b88a1a49b8de8ecfffae88b5ae5..e89fb35fec713f62f5156f27541b5b7911130828 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h @@ -26,6 +26,7 @@ struct amdgpu_mmhub_funcs { int (*ras_late_init)(struct amdgpu_device *adev); void (*query_ras_error_count)(struct amdgpu_device *adev, void *ras_error_status); + void (*reset_ras_error_count)(struct amdgpu_device *adev); }; struct amdgpu_mmhub { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c index 7d5c3a9de9eaf627879fb4695c95b6d664aa9596..6201a5f4b4fa6e523459c0611af0304c1bdb96f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c @@ -30,7 +30,6 @@ int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev) }; struct ras_fs_if fs_info = { .sysfs_name = "pcie_bif_err_count", - .debugfs_name = "pcie_bif_err_inject", }; if (!adev->nbio.ras_if) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 1791c084787d733d6f23313d0a323461a950a26c..c687f5415b3f1d03610f4904a42748ece428f618 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1319,7 +1319,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) amdgpu_amdkfd_unreserve_memory_limit(abo); /* We only remove the fence if the resv has individualized. */ - WARN_ON_ONCE(bo->base.resv != &bo->base._resv); + WARN_ON_ONCE(bo->type == ttm_bo_type_kernel + && bo->base.resv != &bo->base._resv); if (bo->base.resv == &bo->base._resv) amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index bc3cf04a1a94a7813c300dc855ce309166b73f6e..f197f1be0969045743a2198d7fb79d70a7d437bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -92,6 +92,9 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) if (adev->powerplay.pp_funcs->enable_bapm) amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); mutex_unlock(&adev->pm.mutex); + + if (is_support_sw_smu(adev)) + smu_set_ac_dc(&adev->smu); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index d33f7410009467a7e3bd2a510791d66322c9e9ef..be50867ea644fe655913a705aad9cdfbbc8d4763 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -24,6 +24,7 @@ */ #include +#include #include "amdgpu.h" #include "amdgpu_psp.h" @@ -38,6 +39,9 @@ static void psp_set_funcs(struct amdgpu_device *adev); +static int psp_sysfs_init(struct amdgpu_device *adev); +static void psp_sysfs_fini(struct amdgpu_device *adev); + /* * Due to DF Cstate management centralized to PMFW, the firmware * loading sequence will be updated as below: @@ -136,6 +140,13 @@ static int psp_sw_init(void *handle) return ret; } + if (adev->asic_type == CHIP_NAVI10) { + ret= psp_sysfs_init(adev); + if (ret) { + return ret; + } + } + return 0; } @@ -152,6 +163,10 @@ static int psp_sw_fini(void *handle) release_firmware(adev->psp.ta_fw); adev->psp.ta_fw = NULL; } + + if (adev->asic_type == CHIP_NAVI10) + psp_sysfs_fini(adev); + return 0; } @@ -185,6 +200,7 @@ psp_cmd_submit_buf(struct psp_context *psp, int ret; int index; int timeout = 2000; + bool ras_intr = false; mutex_lock(&psp->mutex); @@ -209,7 +225,8 @@ psp_cmd_submit_buf(struct psp_context *psp, * because gpu reset thread triggered and lock resource should * be released for psp resume sequence. */ - if (amdgpu_ras_intr_triggered()) + ras_intr = amdgpu_ras_intr_triggered(); + if (ras_intr) break; msleep(1); amdgpu_asic_invalidate_hdp(psp->adev, NULL); @@ -222,7 +239,7 @@ psp_cmd_submit_buf(struct psp_context *psp, * during psp initialization to avoid breaking hw_init and it doesn't * return -EINVAL. */ - if (psp->cmd_buf_mem->resp.status || !timeout) { + if ((psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) { if (ucode) DRM_WARN("failed to load ucode id (%d) ", ucode->ucode_id); @@ -1816,6 +1833,97 @@ static int psp_set_powergating_state(void *handle, return 0; } +static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + uint32_t fw_ver; + int ret; + + if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { + DRM_INFO("PSP block is not ready yet."); + return -EBUSY; + } + + mutex_lock(&adev->psp.mutex); + ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver); + mutex_unlock(&adev->psp.mutex); + + if (ret) { + DRM_ERROR("Failed to read USBC PD FW, err = %d", ret); + return ret; + } + + return snprintf(buf, PAGE_SIZE, "%x\n", fw_ver); +} + +static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + void *cpu_addr; + dma_addr_t dma_addr; + int ret; + char fw_name[100]; + const struct firmware *usbc_pd_fw; + + if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { + DRM_INFO("PSP block is not ready yet."); + return -EBUSY; + } + + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf); + ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev); + if (ret) + goto fail; + + /* We need contiguous physical mem to place the FW for psp to access */ + cpu_addr = dma_alloc_coherent(adev->dev, usbc_pd_fw->size, &dma_addr, GFP_KERNEL); + + ret = dma_mapping_error(adev->dev, dma_addr); + if (ret) + goto rel_buf; + + memcpy_toio(cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size); + + /* + * x86 specific workaround. + * Without it the buffer is invisible in PSP. + * + * TODO Remove once PSP starts snooping CPU cache + */ +#ifdef CONFIG_X86 + clflush_cache_range(cpu_addr, (usbc_pd_fw->size & ~(L1_CACHE_BYTES - 1))); +#endif + + mutex_lock(&adev->psp.mutex); + ret = psp_load_usbc_pd_fw(&adev->psp, dma_addr); + mutex_unlock(&adev->psp.mutex); + +rel_buf: + dma_free_coherent(adev->dev, usbc_pd_fw->size, cpu_addr, dma_addr); + release_firmware(usbc_pd_fw); + +fail: + if (ret) { + DRM_ERROR("Failed to load USBC PD FW, err = %d", ret); + return ret; + } + + return count; +} + +static DEVICE_ATTR(usbc_pd_fw, S_IRUGO | S_IWUSR, + psp_usbc_pd_fw_sysfs_read, + psp_usbc_pd_fw_sysfs_write); + + + const struct amd_ip_funcs psp_ip_funcs = { .name = "psp", .early_init = psp_early_init, @@ -1834,6 +1942,21 @@ const struct amd_ip_funcs psp_ip_funcs = { .set_powergating_state = psp_set_powergating_state, }; +static int psp_sysfs_init(struct amdgpu_device *adev) +{ + int ret = device_create_file(adev->dev, &dev_attr_usbc_pd_fw); + + if (ret) + DRM_ERROR("Failed to create USBC PD FW control file!"); + + return ret; +} + +static void psp_sysfs_fini(struct amdgpu_device *adev) +{ + device_remove_file(adev->dev, &dev_attr_usbc_pd_fw); +} + static const struct amdgpu_psp_funcs psp_funcs = { .check_fw_loading_status = psp_check_fw_loading_status, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 37fa184f27f6f61093c4f3310704d076174daf3e..297435c0c7c1ad630b16eebdc29ccb7aefb04f4d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -114,6 +114,8 @@ struct psp_funcs int (*mem_training)(struct psp_context *psp, uint32_t ops); uint32_t (*ring_get_wptr)(struct psp_context *psp); void (*ring_set_wptr)(struct psp_context *psp, uint32_t value); + int (*load_usbc_pd_fw)(struct psp_context *psp, dma_addr_t dma_addr); + int (*read_usbc_pd_fw)(struct psp_context *psp, uint32_t *fw_ver); }; #define AMDGPU_XGMI_MAX_CONNECTED_NODES 64 @@ -351,6 +353,14 @@ struct amdgpu_psp_funcs { #define psp_ring_get_wptr(psp) (psp)->funcs->ring_get_wptr((psp)) #define psp_ring_set_wptr(psp, value) (psp)->funcs->ring_set_wptr((psp), (value)) +#define psp_load_usbc_pd_fw(psp, dma_addr) \ + ((psp)->funcs->load_usbc_pd_fw ? \ + (psp)->funcs->load_usbc_pd_fw((psp), (dma_addr)) : -EINVAL) + +#define psp_read_usbc_pd_fw(psp, fw_ver) \ + ((psp)->funcs->read_usbc_pd_fw ? \ + (psp)->funcs->read_usbc_pd_fw((psp), fw_ver) : -EINVAL) + extern const struct amd_ip_funcs psp_ip_funcs; extern const struct amdgpu_ip_block_version psp_v3_1_ip_block; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 930633a0ed646acf420ad771e2924ac5e28eeb50..3c32a94d24240a9adefd469264c5b014cd3821be 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -281,6 +281,11 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user * struct ras_debug_if data; int ret = 0; + if (amdgpu_ras_intr_triggered()) { + DRM_WARN("RAS WARN: error injection currently inaccessible\n"); + return size; + } + ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data); if (ret) return -EINVAL; @@ -394,6 +399,10 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev, .head = obj->head, }; + if (amdgpu_ras_intr_triggered()) + return snprintf(buf, PAGE_SIZE, + "Query currently inaccessible\n"); + if (amdgpu_ras_error_query(obj->adev, &info)) return -EINVAL; @@ -721,6 +730,9 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev, if (adev->nbio.funcs->query_ras_error_count) adev->nbio.funcs->query_ras_error_count(adev, &err_data); break; + case AMDGPU_RAS_BLOCK__XGMI_WAFL: + amdgpu_xgmi_query_ras_error_count(adev, &err_data); + break; default: break; } @@ -1110,6 +1122,32 @@ void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, &amdgpu_ras_debugfs_ops); } +void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct ras_manager *obj; + struct ras_fs_if fs_info; + + /* + * it won't be called in resume path, no need to check + * suspend and gpu reset status + */ + if (!con) + return; + + amdgpu_ras_debugfs_create_ctrl_node(adev); + + list_for_each_entry(obj, &con->head, node) { + if (amdgpu_ras_is_supported(adev, obj->head.block) && + (obj->attr_inuse == 1)) { + sprintf(fs_info.debugfs_name, "%s_err_inject", + ras_block_str(obj->head.block)); + fs_info.head = obj->head; + amdgpu_ras_debugfs_create(adev, &fs_info); + } + } +} + void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev, struct ras_common_if *head) { @@ -1142,7 +1180,6 @@ static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev) static int amdgpu_ras_fs_init(struct amdgpu_device *adev) { amdgpu_ras_sysfs_create_feature_node(adev); - amdgpu_ras_debugfs_create_ctrl_node(adev); return 0; } @@ -1734,18 +1771,30 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev, *hw_supported = 0; *supported = 0; - if (amdgpu_sriov_vf(adev) || + if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw || (adev->asic_type != CHIP_VEGA20 && adev->asic_type != CHIP_ARCTURUS)) return; - if (adev->is_atom_fw && - (amdgpu_atomfirmware_mem_ecc_supported(adev) || - amdgpu_atomfirmware_sram_ecc_supported(adev))) - *hw_supported = AMDGPU_RAS_BLOCK_MASK; + if (amdgpu_atomfirmware_mem_ecc_supported(adev)) { + DRM_INFO("HBM ECC is active.\n"); + *hw_supported |= (1 << AMDGPU_RAS_BLOCK__UMC | + 1 << AMDGPU_RAS_BLOCK__DF); + } else + DRM_INFO("HBM ECC is not presented.\n"); + + if (amdgpu_atomfirmware_sram_ecc_supported(adev)) { + DRM_INFO("SRAM ECC is active.\n"); + *hw_supported |= ~(1 << AMDGPU_RAS_BLOCK__UMC | + 1 << AMDGPU_RAS_BLOCK__DF); + } else + DRM_INFO("SRAM ECC is not presented.\n"); + + /* hw_supported needs to be aligned with RAS block mask. */ + *hw_supported &= AMDGPU_RAS_BLOCK_MASK; *supported = amdgpu_ras_enable == 0 ? - 0 : *hw_supported & amdgpu_ras_mask; + 0 : *hw_supported & amdgpu_ras_mask; } int amdgpu_ras_init(struct amdgpu_device *adev) @@ -1846,8 +1895,6 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev, goto interrupt; } - amdgpu_ras_debugfs_create(adev, fs_info); - r = amdgpu_ras_sysfs_create(adev, fs_info); if (r) goto sysfs; @@ -1856,7 +1903,6 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev, cleanup: amdgpu_ras_sysfs_remove(adev, ras_block); sysfs: - amdgpu_ras_debugfs_remove(adev, ras_block); if (ih_info->cb) amdgpu_ras_interrupt_remove_handler(adev, ih_info); interrupt: @@ -1873,7 +1919,6 @@ void amdgpu_ras_late_fini(struct amdgpu_device *adev, return; amdgpu_ras_sysfs_remove(adev, ras_block); - amdgpu_ras_debugfs_remove(adev, ras_block); if (ih_info->cb) amdgpu_ras_interrupt_remove_handler(adev, ih_info); amdgpu_ras_feature_enable(adev, ras_block, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index a5fe29a9373eab17ad61d77eab525e8c7946e0be..55c3eceb390d4f4656c62d8c0af064564e4c1587 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -592,6 +592,8 @@ int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev, void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, struct ras_fs_if *head); +void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev); + void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev, struct ras_common_if *head); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index ed15b1fa5e9891a969b6e7cb4075ed5c5dd39768..c0096097bbcf47fa6fa33745df66b1671c6989b0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -25,7 +25,6 @@ #include "amdgpu.h" #include "amdgpu_ras.h" #include -#include "smu_v11_0_i2c.h" #include "atom.h" #define EEPROM_I2C_TARGET_ADDR_VEGA20 0xA0 @@ -124,6 +123,7 @@ static int __update_table_header(struct amdgpu_ras_eeprom_control *control, unsigned char *buff) { int ret = 0; + struct amdgpu_device *adev = to_amdgpu_device(control); struct i2c_msg msg = { .addr = 0, .flags = 0, @@ -137,7 +137,7 @@ static int __update_table_header(struct amdgpu_ras_eeprom_control *control, msg.addr = control->i2c_address; - ret = i2c_transfer(&control->eeprom_accessor, &msg, 1); + ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1); if (ret < 1) DRM_ERROR("Failed to write EEPROM table header, ret:%d", ret); @@ -251,33 +251,18 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control) .buf = buff, }; + /* Verify i2c adapter is initialized */ + if (!adev->pm.smu_i2c.algo) + return -ENOENT; + if (!__get_eeprom_i2c_addr(adev, &control->i2c_address)) return -EINVAL; mutex_init(&control->tbl_mutex); - switch (adev->asic_type) { - case CHIP_VEGA20: - ret = smu_v11_0_i2c_eeprom_control_init(&control->eeprom_accessor); - break; - - case CHIP_ARCTURUS: - ret = smu_i2c_eeprom_init(&adev->smu, &control->eeprom_accessor); - break; - - default: - return 0; - } - - if (ret) { - DRM_ERROR("Failed to init I2C controller, ret:%d", ret); - return ret; - } - msg.addr = control->i2c_address; - /* Read/Create table header from EEPROM address 0 */ - ret = i2c_transfer(&control->eeprom_accessor, &msg, 1); + ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1); if (ret < 1) { DRM_ERROR("Failed to read EEPROM table header, ret:%d", ret); return ret; @@ -303,23 +288,6 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control) return ret == 1 ? 0 : -EIO; } -void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control) -{ - struct amdgpu_device *adev = to_amdgpu_device(control); - - switch (adev->asic_type) { - case CHIP_VEGA20: - smu_v11_0_i2c_eeprom_control_fini(&control->eeprom_accessor); - break; - case CHIP_ARCTURUS: - smu_i2c_eeprom_fini(&adev->smu, &control->eeprom_accessor); - break; - - default: - return; - } -} - static void __encode_table_record_to_buff(struct amdgpu_ras_eeprom_control *control, struct eeprom_table_record *record, unsigned char *buff) @@ -476,7 +444,7 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control, control->next_addr += EEPROM_TABLE_RECORD_SIZE; } - ret = i2c_transfer(&control->eeprom_accessor, msgs, num); + ret = i2c_transfer(&adev->pm.smu_i2c, msgs, num); if (ret < 1) { DRM_ERROR("Failed to process EEPROM table records, ret:%d", ret); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h index ca78f812d4369ee7d789d25a850e82550af4ec3f..7e8647a05df7f0aab483d6dbf5f81c87294e07fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h @@ -44,7 +44,6 @@ struct amdgpu_ras_eeprom_table_header { struct amdgpu_ras_eeprom_control { struct amdgpu_ras_eeprom_table_header tbl_hdr; - struct i2c_adapter eeprom_accessor; uint32_t next_addr; unsigned int num_recs; struct mutex tbl_mutex; @@ -79,7 +78,6 @@ struct eeprom_table_record { }__attribute__((__packed__)); int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control); -void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control); int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control); int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 18e11b0fdc3ef561f195ae3fe8c9c31e838f405f..a7e1d0425ed08b74d640a8fd8a9bd4820d7ec30e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -150,76 +150,6 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring) ring->funcs->end_use(ring); } -/** - * amdgpu_ring_priority_put - restore a ring's priority - * - * @ring: amdgpu_ring structure holding the information - * @priority: target priority - * - * Release a request for executing at @priority - */ -void amdgpu_ring_priority_put(struct amdgpu_ring *ring, - enum drm_sched_priority priority) -{ - int i; - - if (!ring->funcs->set_priority) - return; - - if (atomic_dec_return(&ring->num_jobs[priority]) > 0) - return; - - /* no need to restore if the job is already at the lowest priority */ - if (priority == DRM_SCHED_PRIORITY_NORMAL) - return; - - mutex_lock(&ring->priority_mutex); - /* something higher prio is executing, no need to decay */ - if (ring->priority > priority) - goto out_unlock; - - /* decay priority to the next level with a job available */ - for (i = priority; i >= DRM_SCHED_PRIORITY_MIN; i--) { - if (i == DRM_SCHED_PRIORITY_NORMAL - || atomic_read(&ring->num_jobs[i])) { - ring->priority = i; - ring->funcs->set_priority(ring, i); - break; - } - } - -out_unlock: - mutex_unlock(&ring->priority_mutex); -} - -/** - * amdgpu_ring_priority_get - change the ring's priority - * - * @ring: amdgpu_ring structure holding the information - * @priority: target priority - * - * Request a ring's priority to be raised to @priority (refcounted). - */ -void amdgpu_ring_priority_get(struct amdgpu_ring *ring, - enum drm_sched_priority priority) -{ - if (!ring->funcs->set_priority) - return; - - if (atomic_inc_return(&ring->num_jobs[priority]) <= 0) - return; - - mutex_lock(&ring->priority_mutex); - if (priority <= ring->priority) - goto out_unlock; - - ring->priority = priority; - ring->funcs->set_priority(ring, priority); - -out_unlock: - mutex_unlock(&ring->priority_mutex); -} - /** * amdgpu_ring_init - init driver ring struct. * @@ -499,13 +429,6 @@ int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, return 0; } -void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring) -{ -#if defined(CONFIG_DEBUG_FS) - debugfs_remove(ring->ent); -#endif -} - /** * amdgpu_ring_test_helper - tests ring and set sched readiness status * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 24caff085d0046e5e9456b85c8333ab0b002d7e6..9a443013d70d5d4a94099206eb84ecb1662f7b6a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -167,9 +167,6 @@ struct amdgpu_ring_funcs { uint32_t reg0, uint32_t reg1, uint32_t ref, uint32_t mask); void (*emit_tmz)(struct amdgpu_ring *ring, bool start); - /* priority functions */ - void (*set_priority) (struct amdgpu_ring *ring, - enum drm_sched_priority priority); /* Try to soft recover the ring to make the fence signal */ void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid); int (*preempt_ib)(struct amdgpu_ring *ring); @@ -222,6 +219,7 @@ struct amdgpu_ring { struct mutex priority_mutex; /* protected by priority_mutex */ int priority; + bool has_high_prio; #if defined(CONFIG_DEBUG_FS) struct dentry *ent; @@ -258,10 +256,6 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); void amdgpu_ring_commit(struct amdgpu_ring *ring); void amdgpu_ring_undo(struct amdgpu_ring *ring); -void amdgpu_ring_priority_get(struct amdgpu_ring *ring, - enum drm_sched_priority priority); -void amdgpu_ring_priority_put(struct amdgpu_ring *ring, - enum drm_sched_priority priority); int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned ring_size, struct amdgpu_irq_src *irq_src, unsigned irq_type); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h index d3d4707f21688e8a1cba8a6a96390aa4dcfa7b86..60bb3e8b311881885fc4efcedf0462711f0bebbf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h @@ -126,6 +126,9 @@ struct amdgpu_rlc_funcs { void (*stop)(struct amdgpu_device *adev); void (*reset)(struct amdgpu_device *adev); void (*start)(struct amdgpu_device *adev); + void (*update_spm_vmid)(struct amdgpu_device *adev, unsigned vmid); + void (*rlcg_wreg)(struct amdgpu_device *adev, u32 offset, u32 v); + bool (*is_rlcg_access_range)(struct amdgpu_device *adev, uint32_t reg); }; struct amdgpu_rlc { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c index 7854c053e85dc36e81657381c5672f0b43af397d..250a309e4dee78904172fb7b422cad430f08c7e9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c @@ -93,7 +93,6 @@ int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev, struct ras_ih_if *ih_info = (struct ras_ih_if *)ras_ih_info; struct ras_fs_if fs_info = { .sysfs_name = "sdma_err_count", - .debugfs_name = "sdma_err_inject", }; if (!ih_info) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index 485335267d780de5b082c78a288f41f570212a2b..4b352206354b884f5d6cae71d5a26af1008b5edc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -56,6 +56,7 @@ struct amdgpu_sdma_ras_funcs { void (*ras_fini)(struct amdgpu_device *adev); int (*query_ras_error_count)(struct amdgpu_device *adev, uint32_t instance, void *ras_error_status); + void (*reset_ras_error_count)(struct amdgpu_device *adev); }; struct amdgpu_sdma { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index fe131c21e8a37ac01b6b62e117564a2f76c8a6bd..9f44ba7d9d972b5e2651761f11a65c102fd4abd1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -968,7 +968,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) /* Map SG to device */ r = -ENOMEM; nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction); - if (nents != ttm->sg->nents) + if (nents == 0) goto release_sg; /* convert SG to linear array of pages and dma addresses */ @@ -1028,7 +1028,7 @@ int amdgpu_ttm_gart_bind(struct amdgpu_device *adev, struct amdgpu_ttm_tt *gtt = (void *)ttm; int r; - if (abo->flags & AMDGPU_GEM_CREATE_MQD_GFX9) { + if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) { uint64_t page_idx = 1; r = amdgpu_gart_bind(adev, gtt->offset, page_idx, @@ -1036,7 +1036,10 @@ int amdgpu_ttm_gart_bind(struct amdgpu_device *adev, if (r) goto gart_bind_fail; - /* Patch mtype of the second part BO */ + /* The memory type of the first page defaults to UC. Now + * modify the memory type to NC from the second page of + * the BO onward. + */ flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK; flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC); @@ -1837,9 +1840,11 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) *The reserved vram for memory training must be pinned to the specified *place on the VRAM, so reserve it early. */ - r = amdgpu_ttm_training_reserve_vram_init(adev); - if (r) - return r; + if (!amdgpu_sriov_vf(adev)) { + r = amdgpu_ttm_training_reserve_vram_init(adev); + if (r) + return r; + } /* allocate memory as required for VGA * This is used for VGA emulation and pre-OS scanout buffers to @@ -2565,13 +2570,3 @@ int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) return 0; #endif } - -void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev) -{ -#if defined(CONFIG_DEBUG_FS) - unsigned i; - - for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++) - debugfs_remove(adev->mman.debugfs_entries[i]); -#endif -} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 7551f37294458a6d3459150bb07a0c2c99974a49..bd05bbb4878d96a69ebc4b28d93c6d4040fe5d53 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -134,6 +134,5 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, struct ttm_mem_reg *mem); int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev); -void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index f4d40855147b6e350198436be1ad1cce1892f503..9dd51f0d2c11be70b0c1b9cfd360fefde6a54300 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -28,7 +28,6 @@ int amdgpu_umc_ras_late_init(struct amdgpu_device *adev) int r; struct ras_fs_if fs_info = { .sysfs_name = "umc_err_count", - .debugfs_name = "umc_err_inject", }; struct ras_ih_if ih_info = { .cb = amdgpu_umc_process_ras_data_cb, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index f96464e2c157fa6fe575a6ee175790caa87815dc..a41272fbcba23ab7f87915b71b9d7ddbc4b32c03 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -493,14 +493,9 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) { - struct amdgpu_device *adev = ring->adev; struct dma_fence *fence; long r; - /* temporarily disable ib test for sriov */ - if (amdgpu_sriov_vf(adev)) - return 0; - r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL); if (r) goto error; @@ -527,6 +522,9 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring) unsigned i; int r; + if (amdgpu_sriov_vf(adev)) + return 0; + r = amdgpu_ring_alloc(ring, 16); if (r) return r; @@ -656,15 +654,10 @@ err: int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) { - struct amdgpu_device *adev = ring->adev; struct dma_fence *fence = NULL; struct amdgpu_bo *bo = NULL; long r; - /* temporarily disable ib test for sriov */ - if (amdgpu_sriov_vf(adev)) - return 0; - r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &bo, NULL, NULL); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index daaf909d009aacd476f4f4b2e2c6a8606a4a9a5d..f0128f745bd2847c2b7aaee89a3ee7a12a4a2271 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -270,6 +270,9 @@ struct amdgpu_virt { #define amdgpu_sriov_runtime(adev) \ ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_RUNTIME) +#define amdgpu_sriov_fullaccess(adev) \ +(amdgpu_sriov_vf((adev)) && !amdgpu_sriov_runtime((adev))) + #define amdgpu_passthrough(adev) \ ((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index f44c26c373a17ff99ada66c8f8bf985142c59f89..6d9252a27916d1ee5f887846e79427cd3ed9b9a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1080,8 +1080,12 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, struct dma_fence *fence = NULL; bool pasid_mapping_needed = false; unsigned patch_offset = 0; + bool update_spm_vmid_needed = (job->vm && (job->vm->reserved_vmid[vmhub] != NULL)); int r; + if (update_spm_vmid_needed && adev->gfx.rlc.funcs->update_spm_vmid) + adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid); + if (amdgpu_vmid_had_gpu_reset(adev, id)) { gds_switch_needed = true; vm_flush_needed = true; @@ -1442,7 +1446,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, uint64_t incr, entry_end, pe_start; struct amdgpu_bo *pt; - if (flags & AMDGPU_PTE_VALID) { + if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) { /* make sure that the page tables covering the * address range are actually allocated */ @@ -1599,14 +1603,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, goto error_unlock; } - if (flags & AMDGPU_PTE_VALID) { + if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) { struct amdgpu_bo *root = vm->root.base.bo; if (!dma_fence_is_signaled(vm->last_direct)) amdgpu_bo_fence(root, vm->last_direct, true); - - if (!dma_fence_is_signaled(vm->last_delayed)) - amdgpu_bo_fence(root, vm->last_delayed, true); } r = vm->update_funcs->prepare(¶ms, resv, sync_mode); @@ -1714,7 +1715,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, AMDGPU_GPU_PAGES_IN_CPU_PAGE; } - } else if (flags & AMDGPU_PTE_VALID) { + } else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) { addr += bo_adev->vm_manager.vram_base_offset; addr += pfn << PAGE_SHIFT; } @@ -2584,8 +2585,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo) return false; /* Don't evict VM page tables while they are updated */ - if (!dma_fence_is_signaled(bo_base->vm->last_direct) || - !dma_fence_is_signaled(bo_base->vm->last_delayed)) { + if (!dma_fence_is_signaled(bo_base->vm->last_direct)) { amdgpu_vm_eviction_unlock(bo_base->vm); return false; } @@ -2762,11 +2762,7 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) if (timeout <= 0) return timeout; - timeout = dma_fence_wait_timeout(vm->last_direct, true, timeout); - if (timeout <= 0) - return timeout; - - return dma_fence_wait_timeout(vm->last_delayed, true, timeout); + return dma_fence_wait_timeout(vm->last_direct, true, timeout); } /** @@ -2839,7 +2835,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, vm->update_funcs = &amdgpu_vm_sdma_funcs; vm->last_update = NULL; vm->last_direct = dma_fence_get_stub(); - vm->last_delayed = dma_fence_get_stub(); mutex_init(&vm->eviction_lock); vm->evicting = false; @@ -2894,7 +2889,6 @@ error_free_root: error_free_delayed: dma_fence_put(vm->last_direct); - dma_fence_put(vm->last_delayed); drm_sched_entity_destroy(&vm->delayed); error_free_direct: @@ -3097,8 +3091,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) dma_fence_wait(vm->last_direct, false); dma_fence_put(vm->last_direct); - dma_fence_wait(vm->last_delayed, false); - dma_fence_put(vm->last_delayed); list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) { @@ -3209,6 +3201,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) union drm_amdgpu_vm *args = data; struct amdgpu_device *adev = dev->dev_private; struct amdgpu_fpriv *fpriv = filp->driver_priv; + long timeout = msecs_to_jiffies(2000); int r; switch (args->in.op) { @@ -3220,6 +3213,21 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) return r; break; case AMDGPU_VM_OP_UNRESERVE_VMID: + if (amdgpu_sriov_runtime(adev)) + timeout = 8 * timeout; + + /* Wait vm idle to make sure the vmid set in SPM_VMID is + * not referenced anymore. + */ + r = amdgpu_bo_reserve(fpriv->vm.root.base.bo, true); + if (r) + return r; + + r = amdgpu_vm_wait_idle(&fpriv->vm, timeout); + if (r < 0) + return r; + + amdgpu_bo_unreserve(fpriv->vm.root.base.bo); amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0); break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index d00648ee8d54ae66fc8e8ff3327e3a1a5cafdcd7..06fe30e1492d69e66d15233af1fa168be406c8b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -276,7 +276,6 @@ struct amdgpu_vm { /* Last submission to the scheduler entities */ struct dma_fence *last_direct; - struct dma_fence *last_delayed; unsigned int pasid; /* dedicated to vm */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c index 4cc7881f438c984afde0a19ccdb8ce8e8167ed55..cf96c335b258b479c37e10fadad3739a51862ad2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c @@ -104,12 +104,13 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p, if (r) goto error; - tmp = dma_fence_get(f); - if (p->direct) + if (p->direct) { + tmp = dma_fence_get(f); swap(p->vm->last_direct, tmp); - else - swap(p->vm->last_delayed, tmp); - dma_fence_put(tmp); + dma_fence_put(tmp); + } else { + dma_resv_add_shared_fence(p->vm->root.base.bo->tbo.base.resv, f); + } if (fence && !p->direct) swap(*fence, f); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 7a89c91f7b8062271dcd1f70f320ff0f7a5e6606..95b3327168acb0354b4e03c09cfa3be041fe2f8a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -26,7 +26,12 @@ #include "amdgpu_xgmi.h" #include "amdgpu_smu.h" #include "amdgpu_ras.h" +#include "soc15.h" #include "df/df_3_6_offset.h" +#include "xgmi/xgmi_4_0_0_smn.h" +#include "xgmi/xgmi_4_0_0_sh_mask.h" +#include "wafl/wafl2_4_0_0_smn.h" +#include "wafl/wafl2_4_0_0_sh_mask.h" static DEFINE_MUTEX(xgmi_mutex); @@ -36,6 +41,109 @@ static DEFINE_MUTEX(xgmi_mutex); static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE]; static unsigned hive_count = 0; +static const int xgmi_pcs_err_status_reg_vg20[] = { + smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS, + smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000, +}; + +static const int wafl_pcs_err_status_reg_vg20[] = { + smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, + smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000, +}; + +static const int xgmi_pcs_err_status_reg_arct[] = { + smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS, + smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000, + smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x500000, + smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x600000, + smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x700000, + smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x800000, +}; + +/* same as vg20*/ +static const int wafl_pcs_err_status_reg_arct[] = { + smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, + smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000, +}; + +static const struct amdgpu_pcs_ras_field xgmi_pcs_ras_fields[] = { + {"XGMI PCS DataLossErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataLossErr)}, + {"XGMI PCS TrainingErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TrainingErr)}, + {"XGMI PCS CRCErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, CRCErr)}, + {"XGMI PCS BERExceededErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, BERExceededErr)}, + {"XGMI PCS TxMetaDataErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TxMetaDataErr)}, + {"XGMI PCS ReplayBufParityErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayBufParityErr)}, + {"XGMI PCS DataParityErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataParityErr)}, + {"XGMI PCS ReplayFifoOverflowErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoOverflowErr)}, + {"XGMI PCS ReplayFifoUnderflowErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)}, + {"XGMI PCS ElasticFifoOverflowErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ElasticFifoOverflowErr)}, + {"XGMI PCS DeskewErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DeskewErr)}, + {"XGMI PCS DataStartupLimitErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataStartupLimitErr)}, + {"XGMI PCS FCInitTimeoutErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, FCInitTimeoutErr)}, + {"XGMI PCS RecoveryTimeoutErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryTimeoutErr)}, + {"XGMI PCS ReadySerialTimeoutErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialTimeoutErr)}, + {"XGMI PCS ReadySerialAttemptErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialAttemptErr)}, + {"XGMI PCS RecoveryAttemptErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryAttemptErr)}, + {"XGMI PCS RecoveryRelockAttemptErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)}, +}; + +static const struct amdgpu_pcs_ras_field wafl_pcs_ras_fields[] = { + {"WAFL PCS DataLossErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataLossErr)}, + {"WAFL PCS TrainingErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TrainingErr)}, + {"WAFL PCS CRCErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, CRCErr)}, + {"WAFL PCS BERExceededErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, BERExceededErr)}, + {"WAFL PCS TxMetaDataErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TxMetaDataErr)}, + {"WAFL PCS ReplayBufParityErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayBufParityErr)}, + {"WAFL PCS DataParityErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataParityErr)}, + {"WAFL PCS ReplayFifoOverflowErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoOverflowErr)}, + {"WAFL PCS ReplayFifoUnderflowErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)}, + {"WAFL PCS ElasticFifoOverflowErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ElasticFifoOverflowErr)}, + {"WAFL PCS DeskewErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DeskewErr)}, + {"WAFL PCS DataStartupLimitErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataStartupLimitErr)}, + {"WAFL PCS FCInitTimeoutErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, FCInitTimeoutErr)}, + {"WAFL PCS RecoveryTimeoutErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryTimeoutErr)}, + {"WAFL PCS ReadySerialTimeoutErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialTimeoutErr)}, + {"WAFL PCS ReadySerialAttemptErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialAttemptErr)}, + {"WAFL PCS RecoveryAttemptErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryAttemptErr)}, + {"WAFL PCS RecoveryRelockAttemptErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)}, +}; + void *amdgpu_xgmi_hive_try_lock(struct amdgpu_hive_info *hive) { return &hive->device_list; @@ -490,7 +598,6 @@ int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev) }; struct ras_fs_if fs_info = { .sysfs_name = "xgmi_wafl_err_count", - .debugfs_name = "xgmi_wafl_err_inject", }; if (!adev->gmc.xgmi.supported || @@ -560,3 +667,99 @@ uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev, return addr + dram_base_addr; } + +static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev, + uint32_t value, + uint32_t *ue_count, + uint32_t *ce_count, + bool is_xgmi_pcs) +{ + int i; + int ue_cnt; + + if (is_xgmi_pcs) { + /* query xgmi pcs error status, + * only ue is supported */ + for (i = 0; i < ARRAY_SIZE(xgmi_pcs_ras_fields); i ++) { + ue_cnt = (value & + xgmi_pcs_ras_fields[i].pcs_err_mask) >> + xgmi_pcs_ras_fields[i].pcs_err_shift; + if (ue_cnt) { + dev_info(adev->dev, "%s detected\n", + xgmi_pcs_ras_fields[i].err_name); + *ue_count += ue_cnt; + } + } + } else { + /* query wafl pcs error status, + * only ue is supported */ + for (i = 0; i < ARRAY_SIZE(wafl_pcs_ras_fields); i++) { + ue_cnt = (value & + wafl_pcs_ras_fields[i].pcs_err_mask) >> + wafl_pcs_ras_fields[i].pcs_err_shift; + if (ue_cnt) { + dev_info(adev->dev, "%s detected\n", + wafl_pcs_ras_fields[i].err_name); + *ue_count += ue_cnt; + } + } + } + + return 0; +} + +int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + int i; + uint32_t data; + uint32_t ue_cnt = 0, ce_cnt = 0; + + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL)) + return -EINVAL; + + err_data->ue_count = 0; + err_data->ce_count = 0; + + switch (adev->asic_type) { + case CHIP_ARCTURUS: + /* check xgmi pcs error */ + for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++) { + data = RREG32_PCIE(xgmi_pcs_err_status_reg_arct[i]); + if (data) + amdgpu_xgmi_query_pcs_error_status(adev, + data, &ue_cnt, &ce_cnt, true); + } + /* check wafl pcs error */ + for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_arct); i++) { + data = RREG32_PCIE(wafl_pcs_err_status_reg_arct[i]); + if (data) + amdgpu_xgmi_query_pcs_error_status(adev, + data, &ue_cnt, &ce_cnt, false); + } + break; + case CHIP_VEGA20: + default: + /* check xgmi pcs error */ + for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++) { + data = RREG32_PCIE(xgmi_pcs_err_status_reg_vg20[i]); + if (data) + amdgpu_xgmi_query_pcs_error_status(adev, + data, &ue_cnt, &ce_cnt, true); + } + /* check wafl pcs error */ + for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_vg20); i++) { + data = RREG32_PCIE(wafl_pcs_err_status_reg_vg20[i]); + if (data) + amdgpu_xgmi_query_pcs_error_status(adev, + data, &ue_cnt, &ce_cnt, false); + } + break; + } + + err_data->ue_count += ue_cnt; + err_data->ce_count += ce_cnt; + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h index 2aa61adee4592cdf92ee1177b0359982e0d3c73d..4a92067fe595985609fc4f5b29a4a06704631728 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h @@ -37,6 +37,12 @@ struct amdgpu_hive_info { struct task_barrier tb; }; +struct amdgpu_pcs_ras_field { + const char *err_name; + uint32_t pcs_err_mask; + uint32_t pcs_err_shift; +}; + struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock); int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev); int amdgpu_xgmi_add_device(struct amdgpu_device *adev); @@ -48,6 +54,8 @@ int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev); void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev); uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev, uint64_t addr); +int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status); static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev, struct amdgpu_device *bo_adev) diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c index dd30f4e61a8cd97c73c06dc756177dbc33a79d52..cae426c7c0863b06c7c50fcbefae95ce37262ba0 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom.c +++ b/drivers/gpu/drm/amd/amdgpu/atom.c @@ -744,8 +744,8 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) cjiffies = jiffies; if (time_after(cjiffies, ctx->last_jump_jiffies)) { cjiffies -= ctx->last_jump_jiffies; - if ((jiffies_to_msecs(cjiffies) > 5000)) { - DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n"); + if ((jiffies_to_msecs(cjiffies) > 10000)) { + DRM_ERROR("atombios stuck in loop for more than 10secs aborting\n"); ctx->abort = true; } } else { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 8b17f182d53294d538055c58920821bd0cb2f748..c8f2aa1db13b9a4c7099f8a463777a9cf58c854a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -35,6 +35,8 @@ #include "gc/gc_10_1_0_offset.h" #include "gc/gc_10_1_0_sh_mask.h" +#include "smuio/smuio_11_0_0_offset.h" +#include "smuio/smuio_11_0_0_sh_mask.h" #include "navi10_enum.h" #include "hdp/hdp_5_0_0_offset.h" #include "ivsrcid/gfx/irqsrcs_gfx_10_1.h" @@ -52,7 +54,7 @@ * 1. Primary ring * 2. Async ring */ -#define GFX10_NUM_GFX_RINGS 2 +#define GFX10_NUM_GFX_RINGS_NV1X 1 #define GFX10_MEC_HPD_SIZE 2048 #define F32_CE_PROGRAM_RAM_SIZE 65536 @@ -222,6 +224,49 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00800000) }; +static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v) +{ + static void *scratch_reg0; + static void *scratch_reg1; + static void *scratch_reg2; + static void *scratch_reg3; + static void *spare_int; + static uint32_t grbm_cntl; + static uint32_t grbm_idx; + uint32_t i = 0; + uint32_t retries = 50000; + + scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4; + scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4; + scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2)*4; + scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3)*4; + spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4; + + grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL; + grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX; + + if (amdgpu_sriov_runtime(adev)) { + pr_err("shouldn't call rlcg write register during runtime\n"); + return; + } + + writel(v, scratch_reg0); + writel(offset | 0x80000000, scratch_reg1); + writel(1, spare_int); + for (i = 0; i < retries; i++) { + u32 tmp; + + tmp = readl(scratch_reg1); + if (!(tmp & 0x80000000)) + break; + + udelay(10); + } + + if (i >= retries) + pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset); +} + static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] = { /* Pending on emulation bring up */ @@ -500,29 +545,28 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; struct dma_fence *f = NULL; - uint32_t scratch; - uint32_t tmp = 0; + unsigned index; + uint64_t gpu_addr; + uint32_t tmp; long r; - r = amdgpu_gfx_scratch_get(adev, &scratch); - if (r) { - DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r); + r = amdgpu_device_wb_get(adev, &index); + if (r) return r; - } - - WREG32(scratch, 0xCAFEDEAD); + gpu_addr = adev->wb.gpu_addr + (index * 4); + adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(adev, NULL, 256, &ib); - if (r) { - DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); + r = amdgpu_ib_get(adev, NULL, 16, &ib); + if (r) goto err1; - } - ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); - ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START)); - ib.ptr[2] = 0xDEADBEEF; - ib.length_dw = 3; + ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3); + ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM; + ib.ptr[2] = lower_32_bits(gpu_addr); + ib.ptr[3] = upper_32_bits(gpu_addr); + ib.ptr[4] = 0xDEADBEEF; + ib.length_dw = 5; r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); if (r) @@ -530,15 +574,13 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) r = dma_fence_wait_timeout(f, false, timeout); if (r == 0) { - DRM_ERROR("amdgpu: IB test timed out.\n"); r = -ETIMEDOUT; goto err2; } else if (r < 0) { - DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); goto err2; } - tmp = RREG32(scratch); + tmp = adev->wb.wb[index]; if (tmp == 0xDEADBEEF) r = 0; else @@ -547,8 +589,7 @@ err2: amdgpu_ib_free(adev, &ib, NULL); dma_fence_put(f); err1: - amdgpu_gfx_scratch_free(adev, scratch); - + amdgpu_device_wb_free(adev, index); return r; } @@ -1016,6 +1057,10 @@ static int gfx_v10_0_rlc_init(struct amdgpu_device *adev) return r; } + /* init spm vmid with 0xf */ + if (adev->gfx.rlc.funcs->update_spm_vmid) + adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf); + return 0; } @@ -1304,7 +1349,7 @@ static int gfx_v10_0_sw_init(void *handle) case CHIP_NAVI14: case CHIP_NAVI12: adev->gfx.me.num_me = 1; - adev->gfx.me.num_pipe_per_me = 2; + adev->gfx.me.num_pipe_per_me = 1; adev->gfx.me.num_queue_per_pipe = 1; adev->gfx.mec.num_mec = 2; adev->gfx.mec.num_pipe_per_mec = 4; @@ -1783,11 +1828,11 @@ static int gfx_v10_0_init_csb(struct amdgpu_device *adev) adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); /* csib */ - WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI, + WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_HI, adev->gfx.rlc.clear_state_gpu_addr >> 32); - WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_LO, + WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_LO, adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc); - WREG32_SOC15(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size); + WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size); return 0; } @@ -1895,6 +1940,11 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev) if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */ gfx_v10_0_rlc_enable_srm(adev); } else { + if (amdgpu_sriov_vf(adev)) { + gfx_v10_0_init_csb(adev); + return 0; + } + adev->gfx.rlc.funcs->stop(adev); /* disable CG */ @@ -2395,7 +2445,7 @@ static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) for (i = 0; i < adev->gfx.num_gfx_rings; i++) adev->gfx.gfx_ring[i].sched.ready = false; } - WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp); + WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp); for (i = 0; i < adev->usec_timeout; i++) { if (RREG32_SOC15(GC, 0, mmCP_STAT) == 0) @@ -2710,18 +2760,20 @@ static int gfx_v10_0_cp_gfx_start(struct amdgpu_device *adev) amdgpu_ring_commit(ring); /* submit cs packet to copy state 0 to next available state */ - ring = &adev->gfx.gfx_ring[1]; - r = amdgpu_ring_alloc(ring, 2); - if (r) { - DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); - return r; - } - - amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); - amdgpu_ring_write(ring, 0); + if (adev->gfx.num_gfx_rings > 1) { + /* maximum supported gfx ring is 2 */ + ring = &adev->gfx.gfx_ring[1]; + r = amdgpu_ring_alloc(ring, 2); + if (r) { + DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); + return r; + } - amdgpu_ring_commit(ring); + amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_commit(ring); + } return 0; } @@ -2818,39 +2870,41 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev) mutex_unlock(&adev->srbm_mutex); /* Init gfx ring 1 for pipe 1 */ - mutex_lock(&adev->srbm_mutex); - gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1); - ring = &adev->gfx.gfx_ring[1]; - rb_bufsz = order_base_2(ring->ring_size / 8); - tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz); - tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2); - WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp); - /* Initialize the ring buffer's write pointers */ - ring->wptr = 0; - WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr)); - WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr)); - /* Set the wb address wether it's enabled or not */ - rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); - WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr)); - WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & - CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); - wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); - WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, - lower_32_bits(wptr_gpu_addr)); - WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, - upper_32_bits(wptr_gpu_addr)); - - mdelay(1); - WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp); - - rb_addr = ring->gpu_addr >> 8; - WREG32_SOC15(GC, 0, mmCP_RB1_BASE, rb_addr); - WREG32_SOC15(GC, 0, mmCP_RB1_BASE_HI, upper_32_bits(rb_addr)); - WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1); - - gfx_v10_0_cp_gfx_set_doorbell(adev, ring); - mutex_unlock(&adev->srbm_mutex); - + if (adev->gfx.num_gfx_rings > 1) { + mutex_lock(&adev->srbm_mutex); + gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1); + /* maximum supported gfx ring is 2 */ + ring = &adev->gfx.gfx_ring[1]; + rb_bufsz = order_base_2(ring->ring_size / 8); + tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz); + tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2); + WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp); + /* Initialize the ring buffer's write pointers */ + ring->wptr = 0; + WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr)); + /* Set the wb address wether it's enabled or not */ + rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); + WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr)); + WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & + CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); + wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); + WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, + lower_32_bits(wptr_gpu_addr)); + WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, + upper_32_bits(wptr_gpu_addr)); + + mdelay(1); + WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp); + + rb_addr = ring->gpu_addr >> 8; + WREG32_SOC15(GC, 0, mmCP_RB1_BASE, rb_addr); + WREG32_SOC15(GC, 0, mmCP_RB1_BASE_HI, upper_32_bits(rb_addr)); + WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1); + + gfx_v10_0_cp_gfx_set_doorbell(adev, ring); + mutex_unlock(&adev->srbm_mutex); + } /* Switch to pipe 0 */ mutex_lock(&adev->srbm_mutex); gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID0); @@ -3207,6 +3261,22 @@ done: return r; } +static void gfx_v10_0_compute_mqd_set_priority(struct amdgpu_ring *ring, struct v10_compute_mqd *mqd) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { + if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) { + mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH; + ring->has_high_prio = true; + mqd->cp_hqd_queue_priority = + AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM; + } else { + ring->has_high_prio = false; + } + } +} + static int gfx_v10_0_compute_mqd_init(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; @@ -3332,6 +3402,9 @@ static int gfx_v10_0_compute_mqd_init(struct amdgpu_ring *ring) tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); mqd->cp_hqd_ib_control = tmp; + /* set static priority for a compute queue/ring */ + gfx_v10_0_compute_mqd_set_priority(ring, mqd); + /* map_queues packet doesn't need activate the queue, * so only kiq need set this field. */ @@ -3508,6 +3581,7 @@ static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring) /* reset ring buffer */ ring->wptr = 0; + atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0); amdgpu_ring_clear_ring(ring); } else { amdgpu_ring_clear_ring(ring); @@ -3920,9 +3994,8 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev) amdgpu_gfx_off_ctrl(adev, false); mutex_lock(&adev->gfx.gpu_clock_mutex); - WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); - clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) | - ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); + clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER) | + ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER) << 32ULL); mutex_unlock(&adev->gfx.gpu_clock_mutex); amdgpu_gfx_off_ctrl(adev, true); return clock; @@ -3961,7 +4034,8 @@ static int gfx_v10_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS; + adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS_NV1X; + adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS; gfx_v10_0_set_kiq_pm4_funcs(adev); @@ -4209,6 +4283,45 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev, return 0; } +static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) +{ + u32 data; + + data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL); + + data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK; + data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; + + WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data); +} + +static bool gfx_v10_0_check_rlcg_range(struct amdgpu_device *adev, + uint32_t offset, + struct soc15_reg_rlcg *entries, int arr_size) +{ + int i; + uint32_t reg; + + if (!entries) + return false; + + for (i = 0; i < arr_size; i++) { + const struct soc15_reg_rlcg *entry; + + entry = &entries[i]; + reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg; + if (offset == reg) + return true; + } + + return false; +} + +static bool gfx_v10_0_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset) +{ + return gfx_v10_0_check_rlcg_range(adev, offset, NULL, 0); +} + static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = { .is_rlc_enabled = gfx_v10_0_is_rlc_enabled, .set_safe_mode = gfx_v10_0_set_safe_mode, @@ -4219,7 +4332,10 @@ static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = { .resume = gfx_v10_0_rlc_resume, .stop = gfx_v10_0_rlc_stop, .reset = gfx_v10_0_rlc_reset, - .start = gfx_v10_0_rlc_start + .start = gfx_v10_0_rlc_start, + .update_spm_vmid = gfx_v10_0_update_spm_vmid, + .rlcg_wreg = gfx_v10_rlcg_wreg, + .is_rlcg_access_range = gfx_v10_0_is_rlcg_access_range, }; static int gfx_v10_0_set_powergating_state(void *handle, @@ -4414,7 +4530,7 @@ static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, if (flags & AMDGPU_IB_PREEMPTED) control |= INDIRECT_BUFFER_PRE_RESUME(1); - if (!(ib->flags & AMDGPU_IB_FLAG_CE)) + if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid) gfx_v10_0_ring_emit_de_meta(ring, (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 8f20a5dd44fe71a77f374894c61b99073d248505..733d398c61ccb7b6b94c4b4384f4fd4f6f1d4ddb 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -3346,6 +3346,10 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) return r; } + /* init spm vmid with 0xf */ + if (adev->gfx.rlc.funcs->update_spm_vmid) + adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf); + return 0; } @@ -3570,6 +3574,18 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev) return 0; } +static void gfx_v7_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) +{ + u32 data; + + data = RREG32(mmRLC_SPM_VMID); + + data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK; + data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT; + + WREG32(mmRLC_SPM_VMID, data); +} + static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable) { u32 data, orig, tmp, tmp2; @@ -4221,7 +4237,8 @@ static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = { .resume = gfx_v7_0_rlc_resume, .stop = gfx_v7_0_rlc_stop, .reset = gfx_v7_0_rlc_reset, - .start = gfx_v7_0_rlc_start + .start = gfx_v7_0_rlc_start, + .update_spm_vmid = gfx_v7_0_update_spm_vmid }; static int gfx_v7_0_early_init(void *handle) @@ -4338,6 +4355,11 @@ static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev) adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG); mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg; + adev->gfx.config.num_banks = REG_GET_FIELD(mc_arb_ramcfg, + MC_ARB_RAMCFG, NOOFBANK); + adev->gfx.config.num_ranks = REG_GET_FIELD(mc_arb_ramcfg, + MC_ARB_RAMCFG, NOOFRANKS); + adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes; adev->gfx.config.mem_max_burst_length_bytes = 256; if (adev->flags & AMD_IS_APU) { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index fa245973de12cbd07579a08d9c243529bf49f43a..fc32586ef80b1a5c91117b5f469a17826f349408 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1318,6 +1318,10 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) return r; } + /* init spm vmid with 0xf */ + if (adev->gfx.rlc.funcs->update_spm_vmid) + adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf); + return 0; } @@ -1820,6 +1824,11 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev) adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG); mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg; + adev->gfx.config.num_banks = REG_GET_FIELD(mc_arb_ramcfg, + MC_ARB_RAMCFG, NOOFBANK); + adev->gfx.config.num_ranks = REG_GET_FIELD(mc_arb_ramcfg, + MC_ARB_RAMCFG, NOOFRANKS); + adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes; adev->gfx.config.mem_max_burst_length_bytes = 256; if (adev->flags & AMD_IS_APU) { @@ -4421,6 +4430,22 @@ static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req) return r; } +static void gfx_v8_0_mqd_set_priority(struct amdgpu_ring *ring, struct vi_mqd *mqd) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { + if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) { + mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH; + ring->has_high_prio = true; + mqd->cp_hqd_queue_priority = + AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM; + } else { + ring->has_high_prio = false; + } + } +} + static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; @@ -4544,9 +4569,6 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring) /* defaults */ mqd->cp_hqd_eop_rptr = RREG32(mmCP_HQD_EOP_RPTR); mqd->cp_hqd_eop_wptr = RREG32(mmCP_HQD_EOP_WPTR); - mqd->cp_hqd_pipe_priority = RREG32(mmCP_HQD_PIPE_PRIORITY); - mqd->cp_hqd_queue_priority = RREG32(mmCP_HQD_QUEUE_PRIORITY); - mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM); mqd->cp_hqd_ctx_save_base_addr_lo = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_LO); mqd->cp_hqd_ctx_save_base_addr_hi = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_HI); mqd->cp_hqd_cntl_stack_offset = RREG32(mmCP_HQD_CNTL_STACK_OFFSET); @@ -4558,6 +4580,10 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring) mqd->cp_hqd_eop_wptr_mem = RREG32(mmCP_HQD_EOP_WPTR_MEM); mqd->cp_hqd_eop_dones = RREG32(mmCP_HQD_EOP_DONES); + /* set static priority for a queue/ring */ + gfx_v8_0_mqd_set_priority(ring, mqd); + mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM); + /* map_queues packet doesn't need activate the queue, * so only kiq need set this field. */ @@ -5589,6 +5615,18 @@ static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev) } } +static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) +{ + u32 data; + + data = RREG32(mmRLC_SPM_VMID); + + data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK; + data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT; + + WREG32(mmRLC_SPM_VMID, data); +} + static const struct amdgpu_rlc_funcs iceland_rlc_funcs = { .is_rlc_enabled = gfx_v8_0_is_rlc_enabled, .set_safe_mode = gfx_v8_0_set_safe_mode, @@ -5600,7 +5638,8 @@ static const struct amdgpu_rlc_funcs iceland_rlc_funcs = { .resume = gfx_v8_0_rlc_resume, .stop = gfx_v8_0_rlc_stop, .reset = gfx_v8_0_rlc_reset, - .start = gfx_v8_0_rlc_start + .start = gfx_v8_0_rlc_start, + .update_spm_vmid = gfx_v8_0_update_spm_vmid }; static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, @@ -6094,7 +6133,7 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { control |= INDIRECT_BUFFER_PRE_ENB(1); - if (!(ib->flags & AMDGPU_IB_FLAG_CE)) + if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid) gfx_v8_0_ring_emit_de_meta(ring); } @@ -6236,104 +6275,6 @@ static void gfx_v8_0_ring_set_wptr_compute(struct amdgpu_ring *ring) WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); } -static void gfx_v8_0_ring_set_pipe_percent(struct amdgpu_ring *ring, - bool acquire) -{ - struct amdgpu_device *adev = ring->adev; - int pipe_num, tmp, reg; - int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1; - - pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe; - - /* first me only has 2 entries, GFX and HP3D */ - if (ring->me > 0) - pipe_num -= 2; - - reg = mmSPI_WCL_PIPE_PERCENT_GFX + pipe_num; - tmp = RREG32(reg); - tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent); - WREG32(reg, tmp); -} - -static void gfx_v8_0_pipe_reserve_resources(struct amdgpu_device *adev, - struct amdgpu_ring *ring, - bool acquire) -{ - int i, pipe; - bool reserve; - struct amdgpu_ring *iring; - - mutex_lock(&adev->gfx.pipe_reserve_mutex); - pipe = amdgpu_gfx_mec_queue_to_bit(adev, ring->me, ring->pipe, 0); - if (acquire) - set_bit(pipe, adev->gfx.pipe_reserve_bitmap); - else - clear_bit(pipe, adev->gfx.pipe_reserve_bitmap); - - if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) { - /* Clear all reservations - everyone reacquires all resources */ - for (i = 0; i < adev->gfx.num_gfx_rings; ++i) - gfx_v8_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i], - true); - - for (i = 0; i < adev->gfx.num_compute_rings; ++i) - gfx_v8_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i], - true); - } else { - /* Lower all pipes without a current reservation */ - for (i = 0; i < adev->gfx.num_gfx_rings; ++i) { - iring = &adev->gfx.gfx_ring[i]; - pipe = amdgpu_gfx_mec_queue_to_bit(adev, - iring->me, - iring->pipe, - 0); - reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap); - gfx_v8_0_ring_set_pipe_percent(iring, reserve); - } - - for (i = 0; i < adev->gfx.num_compute_rings; ++i) { - iring = &adev->gfx.compute_ring[i]; - pipe = amdgpu_gfx_mec_queue_to_bit(adev, - iring->me, - iring->pipe, - 0); - reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap); - gfx_v8_0_ring_set_pipe_percent(iring, reserve); - } - } - - mutex_unlock(&adev->gfx.pipe_reserve_mutex); -} - -static void gfx_v8_0_hqd_set_priority(struct amdgpu_device *adev, - struct amdgpu_ring *ring, - bool acquire) -{ - uint32_t pipe_priority = acquire ? 0x2 : 0x0; - uint32_t queue_priority = acquire ? 0xf : 0x0; - - mutex_lock(&adev->srbm_mutex); - vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); - - WREG32(mmCP_HQD_PIPE_PRIORITY, pipe_priority); - WREG32(mmCP_HQD_QUEUE_PRIORITY, queue_priority); - - vi_srbm_select(adev, 0, 0, 0, 0); - mutex_unlock(&adev->srbm_mutex); -} -static void gfx_v8_0_ring_set_priority_compute(struct amdgpu_ring *ring, - enum drm_sched_priority priority) -{ - struct amdgpu_device *adev = ring->adev; - bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW; - - if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE) - return; - - gfx_v8_0_hqd_set_priority(adev, ring, acquire); - gfx_v8_0_pipe_reserve_resources(adev, ring, acquire); -} - static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring, u64 addr, u64 seq, unsigned flags) @@ -6966,7 +6907,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { .test_ib = gfx_v8_0_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, - .set_priority = gfx_v8_0_ring_set_priority_compute, .emit_wreg = gfx_v8_0_ring_emit_wreg, }; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 6a49fe452f295b87a77266a19c8d81cc76ee54fc..37c8231f14073a2c7a8f05d9e24c6dba25f94685 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -697,6 +697,11 @@ static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00), }; +static const struct soc15_reg_rlcg rlcg_access_gc_9_0[] = { + {SOC15_REG_ENTRY(GC, 0, mmGRBM_GFX_INDEX)}, + {SOC15_REG_ENTRY(GC, 0, mmSQ_IND_INDEX)}, +}; + static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] = { mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0, @@ -721,6 +726,59 @@ static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] = mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0, }; +void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v) +{ + static void *scratch_reg0; + static void *scratch_reg1; + static void *scratch_reg2; + static void *scratch_reg3; + static void *spare_int; + static uint32_t grbm_cntl; + static uint32_t grbm_idx; + + scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4; + scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4; + scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2)*4; + scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3)*4; + spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4; + + grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL; + grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX; + + if (amdgpu_sriov_runtime(adev)) { + pr_err("shouldn't call rlcg write register during runtime\n"); + return; + } + + if (offset == grbm_cntl || offset == grbm_idx) { + if (offset == grbm_cntl) + writel(v, scratch_reg2); + else if (offset == grbm_idx) + writel(v, scratch_reg3); + + writel(v, ((void __iomem *)adev->rmmio) + (offset * 4)); + } else { + uint32_t i = 0; + uint32_t retries = 50000; + + writel(v, scratch_reg0); + writel(offset | 0x80000000, scratch_reg1); + writel(1, spare_int); + for (i = 0; i < retries; i++) { + u32 tmp; + + tmp = readl(scratch_reg1); + if (!(tmp & 0x80000000)) + break; + + udelay(10); + } + if (i >= retries) + pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset); + } + +} + #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042 @@ -738,9 +796,9 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring); static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring); static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status); -static void gfx_v9_0_clear_ras_edc_counter(struct amdgpu_device *adev); static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev, void *inject_if); +static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev); static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask) @@ -1847,6 +1905,10 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) break; } + /* init spm vmid with 0xf */ + if (adev->gfx.rlc.funcs->update_spm_vmid) + adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf); + return 0; } @@ -1917,7 +1979,7 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev) static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address) { - WREG32_SOC15(GC, 0, mmSQ_IND_INDEX, + WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX, (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | (address << SQ_IND_INDEX__INDEX__SHIFT) | @@ -1929,7 +1991,7 @@ static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t thread, uint32_t regno, uint32_t num, uint32_t *out) { - WREG32_SOC15(GC, 0, mmSQ_IND_INDEX, + WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX, (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | (regno << SQ_IND_INDEX__INDEX__SHIFT) | @@ -1993,7 +2055,8 @@ static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = { .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs, .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q, .ras_error_inject = &gfx_v9_0_ras_error_inject, - .query_ras_error_count = &gfx_v9_0_query_ras_error_count + .query_ras_error_count = &gfx_v9_0_query_ras_error_count, + .reset_ras_error_count = &gfx_v9_0_reset_ras_error_count, }; static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = { @@ -2004,7 +2067,8 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = { .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs, .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q, .ras_error_inject = &gfx_v9_4_ras_error_inject, - .query_ras_error_count = &gfx_v9_4_query_ras_error_count + .query_ras_error_count = &gfx_v9_4_query_ras_error_count, + .reset_ras_error_count = &gfx_v9_4_reset_ras_error_count, }; static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) @@ -3310,6 +3374,22 @@ static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring) WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp); } +static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { + if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) { + mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH; + ring->has_high_prio = true; + mqd->cp_hqd_queue_priority = + AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM; + } else { + ring->has_high_prio = false; + } + } +} + static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; @@ -3446,6 +3526,10 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring) tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); mqd->cp_hqd_ib_control = tmp; + /* set static priority for a queue/ring */ + gfx_v9_0_mqd_set_priority(ring, mqd); + mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM); + /* map_queues packet doesn't need activate the queue, * so only kiq need set this field. */ @@ -3664,6 +3748,7 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring) /* reset ring buffer */ ring->wptr = 0; + atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0); amdgpu_ring_clear_ring(ring); } else { amdgpu_ring_clear_ring(ring); @@ -3963,6 +4048,63 @@ static int gfx_v9_0_soft_reset(void *handle) return 0; } +static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev) +{ + signed long r, cnt = 0; + unsigned long flags; + uint32_t seq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_ring *ring = &kiq->ring; + + BUG_ON(!ring->funcs->emit_rreg); + + spin_lock_irqsave(&kiq->ring_lock, flags); + amdgpu_ring_alloc(ring, 32); + amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); + amdgpu_ring_write(ring, 9 | /* src: register*/ + (5 << 8) | /* dst: memory */ + (1 << 16) | /* count sel */ + (1 << 20)); /* write confirm */ + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + + kiq->reg_val_offs * 4)); + amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + + kiq->reg_val_offs * 4)); + amdgpu_fence_emit_polling(ring, &seq); + amdgpu_ring_commit(ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); + + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + + /* don't wait anymore for gpu reset case because this way may + * block gpu_recover() routine forever, e.g. this virt_kiq_rreg + * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will + * never return if we keep waiting in virt_kiq_rreg, which cause + * gpu_recover() hang there. + * + * also don't wait anymore for IRQ context + * */ + if (r < 1 && (adev->in_gpu_reset || in_interrupt())) + goto failed_kiq_read; + + might_sleep(); + while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { + msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + } + + if (cnt > MAX_KIQ_REG_TRY) + goto failed_kiq_read; + + return (uint64_t)adev->wb.wb[kiq->reg_val_offs] | + (uint64_t)adev->wb.wb[kiq->reg_val_offs + 1 ] << 32ULL; + +failed_kiq_read: + pr_err("failed to read gpu clock\n"); + return ~0; +} + static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev) { uint64_t clock; @@ -3970,16 +4112,7 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev) amdgpu_gfx_off_ctrl(adev, false); mutex_lock(&adev->gfx.gpu_clock_mutex); if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) { - uint32_t tmp, lsb, msb, i = 0; - do { - if (i != 0) - udelay(1); - tmp = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB); - lsb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_LSB); - msb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB); - i++; - } while (unlikely(tmp != msb) && (i < adev->usec_timeout)); - clock = (uint64_t)lsb | ((uint64_t)msb << 32ULL); + clock = gfx_v9_0_kiq_read_clock(adev); } else { WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) | @@ -4053,6 +4186,101 @@ static const u32 sgpr_init_compute_shader[] = 0xbe800080, 0xbf810000, }; +static const u32 vgpr_init_compute_shader_arcturus[] = { + 0xd3d94000, 0x18000080, 0xd3d94001, 0x18000080, 0xd3d94002, 0x18000080, + 0xd3d94003, 0x18000080, 0xd3d94004, 0x18000080, 0xd3d94005, 0x18000080, + 0xd3d94006, 0x18000080, 0xd3d94007, 0x18000080, 0xd3d94008, 0x18000080, + 0xd3d94009, 0x18000080, 0xd3d9400a, 0x18000080, 0xd3d9400b, 0x18000080, + 0xd3d9400c, 0x18000080, 0xd3d9400d, 0x18000080, 0xd3d9400e, 0x18000080, + 0xd3d9400f, 0x18000080, 0xd3d94010, 0x18000080, 0xd3d94011, 0x18000080, + 0xd3d94012, 0x18000080, 0xd3d94013, 0x18000080, 0xd3d94014, 0x18000080, + 0xd3d94015, 0x18000080, 0xd3d94016, 0x18000080, 0xd3d94017, 0x18000080, + 0xd3d94018, 0x18000080, 0xd3d94019, 0x18000080, 0xd3d9401a, 0x18000080, + 0xd3d9401b, 0x18000080, 0xd3d9401c, 0x18000080, 0xd3d9401d, 0x18000080, + 0xd3d9401e, 0x18000080, 0xd3d9401f, 0x18000080, 0xd3d94020, 0x18000080, + 0xd3d94021, 0x18000080, 0xd3d94022, 0x18000080, 0xd3d94023, 0x18000080, + 0xd3d94024, 0x18000080, 0xd3d94025, 0x18000080, 0xd3d94026, 0x18000080, + 0xd3d94027, 0x18000080, 0xd3d94028, 0x18000080, 0xd3d94029, 0x18000080, + 0xd3d9402a, 0x18000080, 0xd3d9402b, 0x18000080, 0xd3d9402c, 0x18000080, + 0xd3d9402d, 0x18000080, 0xd3d9402e, 0x18000080, 0xd3d9402f, 0x18000080, + 0xd3d94030, 0x18000080, 0xd3d94031, 0x18000080, 0xd3d94032, 0x18000080, + 0xd3d94033, 0x18000080, 0xd3d94034, 0x18000080, 0xd3d94035, 0x18000080, + 0xd3d94036, 0x18000080, 0xd3d94037, 0x18000080, 0xd3d94038, 0x18000080, + 0xd3d94039, 0x18000080, 0xd3d9403a, 0x18000080, 0xd3d9403b, 0x18000080, + 0xd3d9403c, 0x18000080, 0xd3d9403d, 0x18000080, 0xd3d9403e, 0x18000080, + 0xd3d9403f, 0x18000080, 0xd3d94040, 0x18000080, 0xd3d94041, 0x18000080, + 0xd3d94042, 0x18000080, 0xd3d94043, 0x18000080, 0xd3d94044, 0x18000080, + 0xd3d94045, 0x18000080, 0xd3d94046, 0x18000080, 0xd3d94047, 0x18000080, + 0xd3d94048, 0x18000080, 0xd3d94049, 0x18000080, 0xd3d9404a, 0x18000080, + 0xd3d9404b, 0x18000080, 0xd3d9404c, 0x18000080, 0xd3d9404d, 0x18000080, + 0xd3d9404e, 0x18000080, 0xd3d9404f, 0x18000080, 0xd3d94050, 0x18000080, + 0xd3d94051, 0x18000080, 0xd3d94052, 0x18000080, 0xd3d94053, 0x18000080, + 0xd3d94054, 0x18000080, 0xd3d94055, 0x18000080, 0xd3d94056, 0x18000080, + 0xd3d94057, 0x18000080, 0xd3d94058, 0x18000080, 0xd3d94059, 0x18000080, + 0xd3d9405a, 0x18000080, 0xd3d9405b, 0x18000080, 0xd3d9405c, 0x18000080, + 0xd3d9405d, 0x18000080, 0xd3d9405e, 0x18000080, 0xd3d9405f, 0x18000080, + 0xd3d94060, 0x18000080, 0xd3d94061, 0x18000080, 0xd3d94062, 0x18000080, + 0xd3d94063, 0x18000080, 0xd3d94064, 0x18000080, 0xd3d94065, 0x18000080, + 0xd3d94066, 0x18000080, 0xd3d94067, 0x18000080, 0xd3d94068, 0x18000080, + 0xd3d94069, 0x18000080, 0xd3d9406a, 0x18000080, 0xd3d9406b, 0x18000080, + 0xd3d9406c, 0x18000080, 0xd3d9406d, 0x18000080, 0xd3d9406e, 0x18000080, + 0xd3d9406f, 0x18000080, 0xd3d94070, 0x18000080, 0xd3d94071, 0x18000080, + 0xd3d94072, 0x18000080, 0xd3d94073, 0x18000080, 0xd3d94074, 0x18000080, + 0xd3d94075, 0x18000080, 0xd3d94076, 0x18000080, 0xd3d94077, 0x18000080, + 0xd3d94078, 0x18000080, 0xd3d94079, 0x18000080, 0xd3d9407a, 0x18000080, + 0xd3d9407b, 0x18000080, 0xd3d9407c, 0x18000080, 0xd3d9407d, 0x18000080, + 0xd3d9407e, 0x18000080, 0xd3d9407f, 0x18000080, 0xd3d94080, 0x18000080, + 0xd3d94081, 0x18000080, 0xd3d94082, 0x18000080, 0xd3d94083, 0x18000080, + 0xd3d94084, 0x18000080, 0xd3d94085, 0x18000080, 0xd3d94086, 0x18000080, + 0xd3d94087, 0x18000080, 0xd3d94088, 0x18000080, 0xd3d94089, 0x18000080, + 0xd3d9408a, 0x18000080, 0xd3d9408b, 0x18000080, 0xd3d9408c, 0x18000080, + 0xd3d9408d, 0x18000080, 0xd3d9408e, 0x18000080, 0xd3d9408f, 0x18000080, + 0xd3d94090, 0x18000080, 0xd3d94091, 0x18000080, 0xd3d94092, 0x18000080, + 0xd3d94093, 0x18000080, 0xd3d94094, 0x18000080, 0xd3d94095, 0x18000080, + 0xd3d94096, 0x18000080, 0xd3d94097, 0x18000080, 0xd3d94098, 0x18000080, + 0xd3d94099, 0x18000080, 0xd3d9409a, 0x18000080, 0xd3d9409b, 0x18000080, + 0xd3d9409c, 0x18000080, 0xd3d9409d, 0x18000080, 0xd3d9409e, 0x18000080, + 0xd3d9409f, 0x18000080, 0xd3d940a0, 0x18000080, 0xd3d940a1, 0x18000080, + 0xd3d940a2, 0x18000080, 0xd3d940a3, 0x18000080, 0xd3d940a4, 0x18000080, + 0xd3d940a5, 0x18000080, 0xd3d940a6, 0x18000080, 0xd3d940a7, 0x18000080, + 0xd3d940a8, 0x18000080, 0xd3d940a9, 0x18000080, 0xd3d940aa, 0x18000080, + 0xd3d940ab, 0x18000080, 0xd3d940ac, 0x18000080, 0xd3d940ad, 0x18000080, + 0xd3d940ae, 0x18000080, 0xd3d940af, 0x18000080, 0xd3d940b0, 0x18000080, + 0xd3d940b1, 0x18000080, 0xd3d940b2, 0x18000080, 0xd3d940b3, 0x18000080, + 0xd3d940b4, 0x18000080, 0xd3d940b5, 0x18000080, 0xd3d940b6, 0x18000080, + 0xd3d940b7, 0x18000080, 0xd3d940b8, 0x18000080, 0xd3d940b9, 0x18000080, + 0xd3d940ba, 0x18000080, 0xd3d940bb, 0x18000080, 0xd3d940bc, 0x18000080, + 0xd3d940bd, 0x18000080, 0xd3d940be, 0x18000080, 0xd3d940bf, 0x18000080, + 0xd3d940c0, 0x18000080, 0xd3d940c1, 0x18000080, 0xd3d940c2, 0x18000080, + 0xd3d940c3, 0x18000080, 0xd3d940c4, 0x18000080, 0xd3d940c5, 0x18000080, + 0xd3d940c6, 0x18000080, 0xd3d940c7, 0x18000080, 0xd3d940c8, 0x18000080, + 0xd3d940c9, 0x18000080, 0xd3d940ca, 0x18000080, 0xd3d940cb, 0x18000080, + 0xd3d940cc, 0x18000080, 0xd3d940cd, 0x18000080, 0xd3d940ce, 0x18000080, + 0xd3d940cf, 0x18000080, 0xd3d940d0, 0x18000080, 0xd3d940d1, 0x18000080, + 0xd3d940d2, 0x18000080, 0xd3d940d3, 0x18000080, 0xd3d940d4, 0x18000080, + 0xd3d940d5, 0x18000080, 0xd3d940d6, 0x18000080, 0xd3d940d7, 0x18000080, + 0xd3d940d8, 0x18000080, 0xd3d940d9, 0x18000080, 0xd3d940da, 0x18000080, + 0xd3d940db, 0x18000080, 0xd3d940dc, 0x18000080, 0xd3d940dd, 0x18000080, + 0xd3d940de, 0x18000080, 0xd3d940df, 0x18000080, 0xd3d940e0, 0x18000080, + 0xd3d940e1, 0x18000080, 0xd3d940e2, 0x18000080, 0xd3d940e3, 0x18000080, + 0xd3d940e4, 0x18000080, 0xd3d940e5, 0x18000080, 0xd3d940e6, 0x18000080, + 0xd3d940e7, 0x18000080, 0xd3d940e8, 0x18000080, 0xd3d940e9, 0x18000080, + 0xd3d940ea, 0x18000080, 0xd3d940eb, 0x18000080, 0xd3d940ec, 0x18000080, + 0xd3d940ed, 0x18000080, 0xd3d940ee, 0x18000080, 0xd3d940ef, 0x18000080, + 0xd3d940f0, 0x18000080, 0xd3d940f1, 0x18000080, 0xd3d940f2, 0x18000080, + 0xd3d940f3, 0x18000080, 0xd3d940f4, 0x18000080, 0xd3d940f5, 0x18000080, + 0xd3d940f6, 0x18000080, 0xd3d940f7, 0x18000080, 0xd3d940f8, 0x18000080, + 0xd3d940f9, 0x18000080, 0xd3d940fa, 0x18000080, 0xd3d940fb, 0x18000080, + 0xd3d940fc, 0x18000080, 0xd3d940fd, 0x18000080, 0xd3d940fe, 0x18000080, + 0xd3d940ff, 0x18000080, 0xb07c0000, 0xbe8a00ff, 0x000000f8, 0xbf11080a, + 0x7e000280, 0x7e020280, 0x7e040280, 0x7e060280, 0x7e080280, 0x7e0a0280, + 0x7e0c0280, 0x7e0e0280, 0x808a880a, 0xbe80320a, 0xbf84fff5, 0xbf9c0000, + 0xd28c0001, 0x0001007f, 0xd28d0001, 0x0002027e, 0x10020288, 0xb88b0904, + 0xb78b4000, 0xd1196a01, 0x00001701, 0xbe8a0087, 0xbefc00c1, 0xd89c4000, + 0x00020201, 0xd89cc080, 0x00040401, 0x320202ff, 0x00000800, 0x808a810a, + 0xbf84fff8, 0xbf810000, +}; + /* When below register arrays changed, please update gpr_reg_size, and sec_ded_counter_reg_size in function gfx_v9_0_do_edc_gpr_workarounds, to cover all gfx9 ASICs */ @@ -4073,6 +4301,23 @@ static const struct soc15_reg_entry vgpr_init_regs[] = { { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff }, }; +static const struct soc15_reg_entry vgpr_init_regs_arcturus[] = { + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0xbf }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 }, /* 64KB LDS */ + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff }, +}; + static const struct soc15_reg_entry sgpr1_init_regs[] = { { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 }, { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 }, @@ -4141,7 +4386,6 @@ static const struct soc15_reg_entry gfx_v9_0_edc_counter_regs[] = { { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16}, { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2}, { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6}, - { SOC15_REG_ENTRY(HDP, 0, mmHDP_EDC_CNT), 0, 1, 1}, }; static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev) @@ -4204,7 +4448,10 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) adev->gfx.config.max_cu_per_sh * adev->gfx.config.max_sh_per_se; int sgpr_work_group_size = 5; - int gpr_reg_size = compute_dim_x / 16 + 6; + int gpr_reg_size = adev->gfx.config.max_shader_engines + 6; + int vgpr_init_shader_size; + const u32 *vgpr_init_shader_ptr; + const struct soc15_reg_entry *vgpr_init_regs_ptr; /* only support when RAS is enabled */ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) @@ -4214,6 +4461,16 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) if (!ring->sched.ready) return 0; + if (adev->asic_type == CHIP_ARCTURUS) { + vgpr_init_shader_ptr = vgpr_init_compute_shader_arcturus; + vgpr_init_shader_size = sizeof(vgpr_init_compute_shader_arcturus); + vgpr_init_regs_ptr = vgpr_init_regs_arcturus; + } else { + vgpr_init_shader_ptr = vgpr_init_compute_shader; + vgpr_init_shader_size = sizeof(vgpr_init_compute_shader); + vgpr_init_regs_ptr = vgpr_init_regs; + } + total_size = (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* VGPRS */ total_size += @@ -4222,7 +4479,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS2 */ total_size = ALIGN(total_size, 256); vgpr_offset = total_size; - total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256); + total_size += ALIGN(vgpr_init_shader_size, 256); sgpr_offset = total_size; total_size += sizeof(sgpr_init_compute_shader); @@ -4235,8 +4492,8 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) } /* load the compute shaders */ - for (i = 0; i < ARRAY_SIZE(vgpr_init_compute_shader); i++) - ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_compute_shader[i]; + for (i = 0; i < vgpr_init_shader_size/sizeof(u32); i++) + ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_shader_ptr[i]; for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++) ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i]; @@ -4248,9 +4505,9 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) /* write the register state for the compute dispatch */ for (i = 0; i < gpr_reg_size; i++) { ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1); - ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs[i]) + ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs_ptr[i]) - PACKET3_SET_SH_REG_START; - ib.ptr[ib.length_dw++] = vgpr_init_regs[i].reg_value; + ib.ptr[ib.length_dw++] = vgpr_init_regs_ptr[i].reg_value; } /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */ gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8; @@ -4262,7 +4519,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) /* write dispatch packet */ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3); - ib.ptr[ib.length_dw++] = compute_dim_x; /* x */ + ib.ptr[ib.length_dw++] = compute_dim_x * 2; /* x */ ib.ptr[ib.length_dw++] = 1; /* y */ ib.ptr[ib.length_dw++] = 1; /* z */ ib.ptr[ib.length_dw++] = @@ -4342,18 +4599,6 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) goto fail; } - switch (adev->asic_type) - { - case CHIP_VEGA20: - gfx_v9_0_clear_ras_edc_counter(adev); - break; - case CHIP_ARCTURUS: - gfx_v9_4_clear_ras_edc_counter(adev); - break; - default: - break; - } - fail: amdgpu_ib_free(adev, &ib, NULL); dma_fence_put(f); @@ -4401,6 +4646,10 @@ static int gfx_v9_0_ecc_late_init(void *handle) if (r) return r; + if (adev->gfx.funcs && + adev->gfx.funcs->reset_ras_error_count) + adev->gfx.funcs->reset_ras_error_count(adev); + r = amdgpu_gfx_ras_late_init(adev); if (r) return r; @@ -4705,6 +4954,47 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev, return 0; } +static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) +{ + u32 data; + + data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL); + + data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK; + data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; + + WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data); +} + +static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev, + uint32_t offset, + struct soc15_reg_rlcg *entries, int arr_size) +{ + int i; + uint32_t reg; + + if (!entries) + return false; + + for (i = 0; i < arr_size; i++) { + const struct soc15_reg_rlcg *entry; + + entry = &entries[i]; + reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg; + if (offset == reg) + return true; + } + + return false; +} + +static bool gfx_v9_0_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset) +{ + return gfx_v9_0_check_rlcg_range(adev, offset, + (void *)rlcg_access_gc_9_0, + ARRAY_SIZE(rlcg_access_gc_9_0)); +} + static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = { .is_rlc_enabled = gfx_v9_0_is_rlc_enabled, .set_safe_mode = gfx_v9_0_set_safe_mode, @@ -4716,7 +5006,10 @@ static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = { .resume = gfx_v9_0_rlc_resume, .stop = gfx_v9_0_rlc_stop, .reset = gfx_v9_0_rlc_reset, - .start = gfx_v9_0_rlc_start + .start = gfx_v9_0_rlc_start, + .update_spm_vmid = gfx_v9_0_update_spm_vmid, + .rlcg_wreg = gfx_v9_0_rlcg_wreg, + .is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range, }; static int gfx_v9_0_set_powergating_state(void *handle, @@ -4919,7 +5212,7 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { control |= INDIRECT_BUFFER_PRE_ENB(1); - if (!(ib->flags & AMDGPU_IB_FLAG_CE)) + if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid) gfx_v9_0_ring_emit_de_meta(ring); } @@ -5044,105 +5337,6 @@ static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring) return wptr; } -static void gfx_v9_0_ring_set_pipe_percent(struct amdgpu_ring *ring, - bool acquire) -{ - struct amdgpu_device *adev = ring->adev; - int pipe_num, tmp, reg; - int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1; - - pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe; - - /* first me only has 2 entries, GFX and HP3D */ - if (ring->me > 0) - pipe_num -= 2; - - reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX) + pipe_num; - tmp = RREG32(reg); - tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent); - WREG32(reg, tmp); -} - -static void gfx_v9_0_pipe_reserve_resources(struct amdgpu_device *adev, - struct amdgpu_ring *ring, - bool acquire) -{ - int i, pipe; - bool reserve; - struct amdgpu_ring *iring; - - mutex_lock(&adev->gfx.pipe_reserve_mutex); - pipe = amdgpu_gfx_mec_queue_to_bit(adev, ring->me, ring->pipe, 0); - if (acquire) - set_bit(pipe, adev->gfx.pipe_reserve_bitmap); - else - clear_bit(pipe, adev->gfx.pipe_reserve_bitmap); - - if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) { - /* Clear all reservations - everyone reacquires all resources */ - for (i = 0; i < adev->gfx.num_gfx_rings; ++i) - gfx_v9_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i], - true); - - for (i = 0; i < adev->gfx.num_compute_rings; ++i) - gfx_v9_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i], - true); - } else { - /* Lower all pipes without a current reservation */ - for (i = 0; i < adev->gfx.num_gfx_rings; ++i) { - iring = &adev->gfx.gfx_ring[i]; - pipe = amdgpu_gfx_mec_queue_to_bit(adev, - iring->me, - iring->pipe, - 0); - reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap); - gfx_v9_0_ring_set_pipe_percent(iring, reserve); - } - - for (i = 0; i < adev->gfx.num_compute_rings; ++i) { - iring = &adev->gfx.compute_ring[i]; - pipe = amdgpu_gfx_mec_queue_to_bit(adev, - iring->me, - iring->pipe, - 0); - reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap); - gfx_v9_0_ring_set_pipe_percent(iring, reserve); - } - } - - mutex_unlock(&adev->gfx.pipe_reserve_mutex); -} - -static void gfx_v9_0_hqd_set_priority(struct amdgpu_device *adev, - struct amdgpu_ring *ring, - bool acquire) -{ - uint32_t pipe_priority = acquire ? 0x2 : 0x0; - uint32_t queue_priority = acquire ? 0xf : 0x0; - - mutex_lock(&adev->srbm_mutex); - soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); - - WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PIPE_PRIORITY, pipe_priority); - WREG32_SOC15_RLC(GC, 0, mmCP_HQD_QUEUE_PRIORITY, queue_priority); - - soc15_grbm_select(adev, 0, 0, 0, 0); - mutex_unlock(&adev->srbm_mutex); -} - -static void gfx_v9_0_ring_set_priority_compute(struct amdgpu_ring *ring, - enum drm_sched_priority priority) -{ - struct amdgpu_device *adev = ring->adev; - bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW; - - if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE) - return; - - gfx_v9_0_hqd_set_priority(adev, ring, acquire); - gfx_v9_0_pipe_reserve_resources(adev, ring, acquire); -} - static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; @@ -6322,10 +6516,13 @@ static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg, return 0; } -static void gfx_v9_0_clear_ras_edc_counter(struct amdgpu_device *adev) +static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev) { int i, j, k; + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + return; + /* read back registers to clear the counters */ mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) { @@ -6513,7 +6710,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = { .test_ib = gfx_v9_0_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, - .set_priority = gfx_v9_0_ring_set_priority_compute, .emit_wreg = gfx_v9_0_ring_emit_wreg, .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c index f099f13d7f1e93d95a0fdcc847cb6b6c0b907270..cceb46faf212abe5f4446742801849bf41505638 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c @@ -893,10 +893,13 @@ int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev, return 0; } -void gfx_v9_4_clear_ras_edc_counter(struct amdgpu_device *adev) +void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev) { int i, j, k; + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + return; + mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < ARRAY_SIZE(gfx_v9_4_edc_counter_regs); i++) { for (j = 0; j < gfx_v9_4_edc_counter_regs[i].se_num; j++) { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h index 2e3f6f755ad47de7d9799bbe5a77aa7e2ac7f6da..1ffecc5c0f0a9c7818a8f78f65851f5417dfb834 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h @@ -32,4 +32,6 @@ int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev, int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev, void *inject_if); +void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev); + #endif /* __GFX_V9_4_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c index e0654a216ab5cb471f253cb00f740524fc66924e..cc866c36793946e5621630e6be0ec2a350f0b056 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c @@ -81,24 +81,31 @@ static void gfxhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev) { uint64_t value; - /* Disable AGP. */ - WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0); - WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_TOP, 0); - WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BOT, 0x00FFFFFF); - - /* Program the system aperture low logical page number. */ - WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_LOW_ADDR, - adev->gmc.vram_start >> 18); - WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR, - adev->gmc.vram_end >> 18); - - /* Set default page address. */ - value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start - + adev->vm_manager.vram_base_offset; - WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, - (u32)(value >> 12)); - WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, - (u32)(value >> 44)); + if (!amdgpu_sriov_vf(adev)) { + /* + * the new L1 policy will block SRIOV guest from writing + * these regs, and they will be programed at host. + * so skip programing these regs. + */ + /* Disable AGP. */ + WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0); + WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_TOP, 0); + WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BOT, 0x00FFFFFF); + + /* Program the system aperture low logical page number. */ + WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_LOW_ADDR, + adev->gmc.vram_start >> 18); + WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR, + adev->gmc.vram_end >> 18); + + /* Set default page address. */ + value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + + adev->vm_manager.vram_base_offset; + WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, + (u32)(value >> 12)); + WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, + (u32)(value >> 44)); + } /* Program "protection fault". */ WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, @@ -260,18 +267,6 @@ static void gfxhub_v2_0_program_invalidation(struct amdgpu_device *adev) int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev) { - if (amdgpu_sriov_vf(adev)) { - /* - * GCMC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are - * VF copy registers so vbios post doesn't program them, for - * SRIOV driver need to program them - */ - WREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE, - adev->gmc.vram_start >> 24); - WREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_TOP, - adev->gmc.vram_end >> 24); - } - /* GART Enable. */ gfxhub_v2_0_init_gart_aperture_regs(adev); gfxhub_v2_0_init_system_aperture_regs(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 10171acbf3e1b37bdc6d0b35a3a205f3424ee734..8606f877478f89b8fc7fbff1cf9bed83311a4197 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -922,32 +922,25 @@ static int gmc_v9_0_late_init(void *handle) if (r) return r; /* Check if ecc is available */ - if (!amdgpu_sriov_vf(adev)) { - switch (adev->asic_type) { - case CHIP_VEGA10: - case CHIP_VEGA20: - case CHIP_ARCTURUS: - r = amdgpu_atomfirmware_mem_ecc_supported(adev); - if (!r) { - DRM_INFO("ECC is not present.\n"); - if (adev->df.funcs->enable_ecc_force_par_wr_rmw) - adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false); - } else { - DRM_INFO("ECC is active.\n"); - } - - r = amdgpu_atomfirmware_sram_ecc_supported(adev); - if (!r) { - DRM_INFO("SRAM ECC is not present.\n"); - } else { - DRM_INFO("SRAM ECC is active.\n"); - } - break; - default: - break; - } + if (!amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_VEGA10)) { + r = amdgpu_atomfirmware_mem_ecc_supported(adev); + if (!r) { + DRM_INFO("ECC is not present.\n"); + if (adev->df.funcs->enable_ecc_force_par_wr_rmw) + adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false); + } else + DRM_INFO("ECC is active.\n"); + + r = amdgpu_atomfirmware_sram_ecc_supported(adev); + if (!r) + DRM_INFO("SRAM ECC is not present.\n"); + else + DRM_INFO("SRAM ECC is active.\n"); } + if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count) + adev->mmhub.funcs->reset_ras_error_count(adev); + r = amdgpu_gmc_ras_late_init(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c index ff2e6e1ccde7ccee5008fe282a4e407a905b4b33..6173951db7b4527875a6cb53b043a124c1ae8227 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c @@ -693,7 +693,7 @@ static int jpeg_v2_0_set_clockgating_state(void *handle, bool enable = (state == AMD_CG_STATE_GATE); if (enable) { - if (jpeg_v2_0_is_idle(handle)) + if (!jpeg_v2_0_is_idle(handle)) return -EBUSY; jpeg_v2_0_enable_clock_gating(adev); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c index c6d046df4b706951455d2095c46f1eb08361f163..c04c2078a7c1f3655762e871c9147db0bafe8b64 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c @@ -477,7 +477,7 @@ static int jpeg_v2_5_set_clockgating_state(void *handle, continue; if (enable) { - if (jpeg_v2_5_is_idle(handle)) + if (!jpeg_v2_5_is_idle(handle)) return -EBUSY; jpeg_v2_5_enable_clock_gating(adev, i); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 49a3a56ec017d65506a943af107ae1a6f185fc9c..396c2a624de08031f3c9e0d71248e2ca89aeb835 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -747,7 +747,19 @@ static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev, err_data->ue_count += ded_count; } +static void mmhub_v1_0_reset_ras_error_count(struct amdgpu_device *adev) +{ + uint32_t i; + + /* read back edc counter registers to reset the counters to 0 */ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) { + for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_edc_cnt_regs); i++) + RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i])); + } +} + const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = { .ras_late_init = amdgpu_mmhub_ras_late_init, .query_ras_error_count = mmhub_v1_0_query_ras_error_count, + .reset_ras_error_count = mmhub_v1_0_reset_ras_error_count, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c index bde1896805219694c1927cbc9a57b39ad9c1fd8d..fb3f228458e5c282fbdd957937e96ebe526df4f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c @@ -72,11 +72,18 @@ static void mmhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev) WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_TOP, 0); WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BOT, 0x00FFFFFF); - /* Program the system aperture low logical page number. */ - WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR, - adev->gmc.vram_start >> 18); - WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR, - adev->gmc.vram_end >> 18); + if (!amdgpu_sriov_vf(adev)) { + /* + * the new L1 policy will block SRIOV guest from writing + * these regs, and they will be programed at host. + * so skip programing these regs. + */ + /* Program the system aperture low logical page number. */ + WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR, + adev->gmc.vram_start >> 18); + WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR, + adev->gmc.vram_end >> 18); + } /* Set default page address. */ value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + @@ -247,18 +254,6 @@ static void mmhub_v2_0_program_invalidation(struct amdgpu_device *adev) int mmhub_v2_0_gart_enable(struct amdgpu_device *adev) { - if (amdgpu_sriov_vf(adev)) { - /* - * MMMC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are - * VF copy registers so vbios post doesn't program them, for - * SRIOV driver need to program them - */ - WREG32_SOC15(MMHUB, 0, mmMMMC_VM_FB_LOCATION_BASE, - adev->gmc.vram_start >> 24); - WREG32_SOC15(MMHUB, 0, mmMMMC_VM_FB_LOCATION_TOP, - adev->gmc.vram_end >> 24); - } - /* GART Enable. */ mmhub_v2_0_init_gart_aperture_regs(adev); mmhub_v2_0_init_system_aperture_regs(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c index a5281df8d84fbd75676420bf6ba16d4ac79af27c..0d413fabd015c9276e5370ca79a2e7079f22459d 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c @@ -1596,7 +1596,19 @@ static void mmhub_v9_4_query_ras_error_count(struct amdgpu_device *adev, err_data->ue_count += ded_count; } +static void mmhub_v9_4_reset_ras_error_count(struct amdgpu_device *adev) +{ + uint32_t i; + + /* read back edc counter registers to reset the counters to 0 */ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) { + for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_edc_cnt_regs); i++) + RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_edc_cnt_regs[i])); + } +} + const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = { .ras_late_init = amdgpu_mmhub_ras_late_init, .query_ras_error_count = mmhub_v9_4_query_ras_error_count, + .reset_ras_error_count = mmhub_v9_4_reset_ras_error_count, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v2_0.h b/drivers/gpu/drm/amd/amdgpu/mmsch_v2_0.h new file mode 100644 index 0000000000000000000000000000000000000000..1b5086c7d4e6ffd94a2c49d4be1fb83f52321d26 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v2_0.h @@ -0,0 +1,338 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __MMSCH_V2_0_H__ +#define __MMSCH_V2_0_H__ + +// addressBlock: uvd0_mmsch_dec +// base address: 0x1e000 +#define mmMMSCH_UCODE_ADDR 0x0000 +#define mmMMSCH_UCODE_ADDR_BASE_IDX 0 +#define mmMMSCH_UCODE_DATA 0x0001 +#define mmMMSCH_UCODE_DATA_BASE_IDX 0 +#define mmMMSCH_SRAM_ADDR 0x0002 +#define mmMMSCH_SRAM_ADDR_BASE_IDX 0 +#define mmMMSCH_SRAM_DATA 0x0003 +#define mmMMSCH_SRAM_DATA_BASE_IDX 0 +#define mmMMSCH_VF_SRAM_OFFSET 0x0004 +#define mmMMSCH_VF_SRAM_OFFSET_BASE_IDX 0 +#define mmMMSCH_DB_SRAM_OFFSET 0x0005 +#define mmMMSCH_DB_SRAM_OFFSET_BASE_IDX 0 +#define mmMMSCH_CTX_SRAM_OFFSET 0x0006 +#define mmMMSCH_CTX_SRAM_OFFSET_BASE_IDX 0 +#define mmMMSCH_CTL 0x0007 +#define mmMMSCH_CTL_BASE_IDX 0 +#define mmMMSCH_INTR 0x0008 +#define mmMMSCH_INTR_BASE_IDX 0 +#define mmMMSCH_INTR_ACK 0x0009 +#define mmMMSCH_INTR_ACK_BASE_IDX 0 +#define mmMMSCH_INTR_STATUS 0x000a +#define mmMMSCH_INTR_STATUS_BASE_IDX 0 +#define mmMMSCH_VF_VMID 0x000b +#define mmMMSCH_VF_VMID_BASE_IDX 0 +#define mmMMSCH_VF_CTX_ADDR_LO 0x000c +#define mmMMSCH_VF_CTX_ADDR_LO_BASE_IDX 0 +#define mmMMSCH_VF_CTX_ADDR_HI 0x000d +#define mmMMSCH_VF_CTX_ADDR_HI_BASE_IDX 0 +#define mmMMSCH_VF_CTX_SIZE 0x000e +#define mmMMSCH_VF_CTX_SIZE_BASE_IDX 0 +#define mmMMSCH_VF_GPCOM_ADDR_LO 0x000f +#define mmMMSCH_VF_GPCOM_ADDR_LO_BASE_IDX 0 +#define mmMMSCH_VF_GPCOM_ADDR_HI 0x0010 +#define mmMMSCH_VF_GPCOM_ADDR_HI_BASE_IDX 0 +#define mmMMSCH_VF_GPCOM_SIZE 0x0011 +#define mmMMSCH_VF_GPCOM_SIZE_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX_HOST 0x0012 +#define mmMMSCH_VF_MAILBOX_HOST_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX_RESP 0x0013 +#define mmMMSCH_VF_MAILBOX_RESP_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX_0 0x0014 +#define mmMMSCH_VF_MAILBOX_0_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX_0_RESP 0x0015 +#define mmMMSCH_VF_MAILBOX_0_RESP_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX_1 0x0016 +#define mmMMSCH_VF_MAILBOX_1_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX_1_RESP 0x0017 +#define mmMMSCH_VF_MAILBOX_1_RESP_BASE_IDX 0 +#define mmMMSCH_CNTL 0x001c +#define mmMMSCH_CNTL_BASE_IDX 0 +#define mmMMSCH_NONCACHE_OFFSET0 0x001d +#define mmMMSCH_NONCACHE_OFFSET0_BASE_IDX 0 +#define mmMMSCH_NONCACHE_SIZE0 0x001e +#define mmMMSCH_NONCACHE_SIZE0_BASE_IDX 0 +#define mmMMSCH_NONCACHE_OFFSET1 0x001f +#define mmMMSCH_NONCACHE_OFFSET1_BASE_IDX 0 +#define mmMMSCH_NONCACHE_SIZE1 0x0020 +#define mmMMSCH_NONCACHE_SIZE1_BASE_IDX 0 +#define mmMMSCH_PDEBUG_STATUS 0x0021 +#define mmMMSCH_PDEBUG_STATUS_BASE_IDX 0 +#define mmMMSCH_PDEBUG_DATA_32UPPERBITS 0x0022 +#define mmMMSCH_PDEBUG_DATA_32UPPERBITS_BASE_IDX 0 +#define mmMMSCH_PDEBUG_DATA_32LOWERBITS 0x0023 +#define mmMMSCH_PDEBUG_DATA_32LOWERBITS_BASE_IDX 0 +#define mmMMSCH_PDEBUG_EPC 0x0024 +#define mmMMSCH_PDEBUG_EPC_BASE_IDX 0 +#define mmMMSCH_PDEBUG_EXCCAUSE 0x0025 +#define mmMMSCH_PDEBUG_EXCCAUSE_BASE_IDX 0 +#define mmMMSCH_PROC_STATE1 0x0026 +#define mmMMSCH_PROC_STATE1_BASE_IDX 0 +#define mmMMSCH_LAST_MC_ADDR 0x0027 +#define mmMMSCH_LAST_MC_ADDR_BASE_IDX 0 +#define mmMMSCH_LAST_MEM_ACCESS_HI 0x0028 +#define mmMMSCH_LAST_MEM_ACCESS_HI_BASE_IDX 0 +#define mmMMSCH_LAST_MEM_ACCESS_LO 0x0029 +#define mmMMSCH_LAST_MEM_ACCESS_LO_BASE_IDX 0 +#define mmMMSCH_IOV_ACTIVE_FCN_ID 0x002a +#define mmMMSCH_IOV_ACTIVE_FCN_ID_BASE_IDX 0 +#define mmMMSCH_SCRATCH_0 0x002b +#define mmMMSCH_SCRATCH_0_BASE_IDX 0 +#define mmMMSCH_SCRATCH_1 0x002c +#define mmMMSCH_SCRATCH_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_SCH_BLOCK_0 0x002d +#define mmMMSCH_GPUIOV_SCH_BLOCK_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CMD_CONTROL_0 0x002e +#define mmMMSCH_GPUIOV_CMD_CONTROL_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CMD_STATUS_0 0x002f +#define mmMMSCH_GPUIOV_CMD_STATUS_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_0 0x0030 +#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_ACTIVE_FCNS_0 0x0031 +#define mmMMSCH_GPUIOV_ACTIVE_FCNS_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_0 0x0032 +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_DW6_0 0x0033 +#define mmMMSCH_GPUIOV_DW6_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_DW7_0 0x0034 +#define mmMMSCH_GPUIOV_DW7_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_DW8_0 0x0035 +#define mmMMSCH_GPUIOV_DW8_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_SCH_BLOCK_1 0x0036 +#define mmMMSCH_GPUIOV_SCH_BLOCK_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CMD_CONTROL_1 0x0037 +#define mmMMSCH_GPUIOV_CMD_CONTROL_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CMD_STATUS_1 0x0038 +#define mmMMSCH_GPUIOV_CMD_STATUS_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_1 0x0039 +#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_ACTIVE_FCNS_1 0x003a +#define mmMMSCH_GPUIOV_ACTIVE_FCNS_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_1 0x003b +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_DW6_1 0x003c +#define mmMMSCH_GPUIOV_DW6_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_DW7_1 0x003d +#define mmMMSCH_GPUIOV_DW7_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_DW8_1 0x003e +#define mmMMSCH_GPUIOV_DW8_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CNTXT 0x003f +#define mmMMSCH_GPUIOV_CNTXT_BASE_IDX 0 +#define mmMMSCH_SCRATCH_2 0x0040 +#define mmMMSCH_SCRATCH_2_BASE_IDX 0 +#define mmMMSCH_SCRATCH_3 0x0041 +#define mmMMSCH_SCRATCH_3_BASE_IDX 0 +#define mmMMSCH_SCRATCH_4 0x0042 +#define mmMMSCH_SCRATCH_4_BASE_IDX 0 +#define mmMMSCH_SCRATCH_5 0x0043 +#define mmMMSCH_SCRATCH_5_BASE_IDX 0 +#define mmMMSCH_SCRATCH_6 0x0044 +#define mmMMSCH_SCRATCH_6_BASE_IDX 0 +#define mmMMSCH_SCRATCH_7 0x0045 +#define mmMMSCH_SCRATCH_7_BASE_IDX 0 +#define mmMMSCH_VFID_FIFO_HEAD_0 0x0046 +#define mmMMSCH_VFID_FIFO_HEAD_0_BASE_IDX 0 +#define mmMMSCH_VFID_FIFO_TAIL_0 0x0047 +#define mmMMSCH_VFID_FIFO_TAIL_0_BASE_IDX 0 +#define mmMMSCH_VFID_FIFO_HEAD_1 0x0048 +#define mmMMSCH_VFID_FIFO_HEAD_1_BASE_IDX 0 +#define mmMMSCH_VFID_FIFO_TAIL_1 0x0049 +#define mmMMSCH_VFID_FIFO_TAIL_1_BASE_IDX 0 +#define mmMMSCH_NACK_STATUS 0x004a +#define mmMMSCH_NACK_STATUS_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX0_DATA 0x004b +#define mmMMSCH_VF_MAILBOX0_DATA_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX1_DATA 0x004c +#define mmMMSCH_VF_MAILBOX1_DATA_BASE_IDX 0 +#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_0 0x004d +#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CMD_STATUS_IP_0 0x004e +#define mmMMSCH_GPUIOV_CMD_STATUS_IP_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0 0x004f +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_1 0x0050 +#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CMD_STATUS_IP_1 0x0051 +#define mmMMSCH_GPUIOV_CMD_STATUS_IP_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1 0x0052 +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CNTXT_IP 0x0053 +#define mmMMSCH_GPUIOV_CNTXT_IP_BASE_IDX 0 +#define mmMMSCH_GPUIOV_SCH_BLOCK_2 0x0054 +#define mmMMSCH_GPUIOV_SCH_BLOCK_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CMD_CONTROL_2 0x0055 +#define mmMMSCH_GPUIOV_CMD_CONTROL_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CMD_STATUS_2 0x0056 +#define mmMMSCH_GPUIOV_CMD_STATUS_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_2 0x0057 +#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_ACTIVE_FCNS_2 0x0058 +#define mmMMSCH_GPUIOV_ACTIVE_FCNS_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_2 0x0059 +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_DW6_2 0x005a +#define mmMMSCH_GPUIOV_DW6_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_DW7_2 0x005b +#define mmMMSCH_GPUIOV_DW7_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_DW8_2 0x005c +#define mmMMSCH_GPUIOV_DW8_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_2 0x005d +#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CMD_STATUS_IP_2 0x005e +#define mmMMSCH_GPUIOV_CMD_STATUS_IP_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2 0x005f +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2_BASE_IDX 0 +#define mmMMSCH_VFID_FIFO_HEAD_2 0x0060 +#define mmMMSCH_VFID_FIFO_HEAD_2_BASE_IDX 0 +#define mmMMSCH_VFID_FIFO_TAIL_2 0x0061 +#define mmMMSCH_VFID_FIFO_TAIL_2_BASE_IDX 0 +#define mmMMSCH_VM_BUSY_STATUS_0 0x0062 +#define mmMMSCH_VM_BUSY_STATUS_0_BASE_IDX 0 +#define mmMMSCH_VM_BUSY_STATUS_1 0x0063 +#define mmMMSCH_VM_BUSY_STATUS_1_BASE_IDX 0 +#define mmMMSCH_VM_BUSY_STATUS_2 0x0064 +#define mmMMSCH_VM_BUSY_STATUS_2_BASE_IDX 0 + +#define MMSCH_VERSION_MAJOR 2 +#define MMSCH_VERSION_MINOR 0 +#define MMSCH_VERSION (MMSCH_VERSION_MAJOR << 16 | MMSCH_VERSION_MINOR) + +enum mmsch_v2_0_command_type { + MMSCH_COMMAND__DIRECT_REG_WRITE = 0, + MMSCH_COMMAND__DIRECT_REG_POLLING = 2, + MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE = 3, + MMSCH_COMMAND__INDIRECT_REG_WRITE = 8, + MMSCH_COMMAND__END = 0xf +}; + +struct mmsch_v2_0_init_header { + uint32_t version; + uint32_t header_size; + uint32_t vcn_init_status; + uint32_t vcn_table_offset; + uint32_t vcn_table_size; +}; + +struct mmsch_v2_0_cmd_direct_reg_header { + uint32_t reg_offset : 28; + uint32_t command_type : 4; +}; + +struct mmsch_v2_0_cmd_indirect_reg_header { + uint32_t reg_offset : 20; + uint32_t reg_idx_space : 8; + uint32_t command_type : 4; +}; + +struct mmsch_v2_0_cmd_direct_write { + struct mmsch_v2_0_cmd_direct_reg_header cmd_header; + uint32_t reg_value; +}; + +struct mmsch_v2_0_cmd_direct_read_modify_write { + struct mmsch_v2_0_cmd_direct_reg_header cmd_header; + uint32_t write_data; + uint32_t mask_value; +}; + +struct mmsch_v2_0_cmd_direct_polling { + struct mmsch_v2_0_cmd_direct_reg_header cmd_header; + uint32_t mask_value; + uint32_t wait_value; +}; + +struct mmsch_v2_0_cmd_end { + struct mmsch_v2_0_cmd_direct_reg_header cmd_header; +}; + +struct mmsch_v2_0_cmd_indirect_write { + struct mmsch_v2_0_cmd_indirect_reg_header cmd_header; + uint32_t reg_value; +}; + +static inline void mmsch_v2_0_insert_direct_wt(struct mmsch_v2_0_cmd_direct_write *direct_wt, + uint32_t *init_table, + uint32_t reg_offset, + uint32_t value) +{ + direct_wt->cmd_header.reg_offset = reg_offset; + direct_wt->reg_value = value; + memcpy((void *)init_table, direct_wt, sizeof(struct mmsch_v2_0_cmd_direct_write)); +} + +static inline void mmsch_v2_0_insert_direct_rd_mod_wt(struct mmsch_v2_0_cmd_direct_read_modify_write *direct_rd_mod_wt, + uint32_t *init_table, + uint32_t reg_offset, + uint32_t mask, uint32_t data) +{ + direct_rd_mod_wt->cmd_header.reg_offset = reg_offset; + direct_rd_mod_wt->mask_value = mask; + direct_rd_mod_wt->write_data = data; + memcpy((void *)init_table, direct_rd_mod_wt, + sizeof(struct mmsch_v2_0_cmd_direct_read_modify_write)); +} + +static inline void mmsch_v2_0_insert_direct_poll(struct mmsch_v2_0_cmd_direct_polling *direct_poll, + uint32_t *init_table, + uint32_t reg_offset, + uint32_t mask, uint32_t wait) +{ + direct_poll->cmd_header.reg_offset = reg_offset; + direct_poll->mask_value = mask; + direct_poll->wait_value = wait; + memcpy((void *)init_table, direct_poll, sizeof(struct mmsch_v2_0_cmd_direct_polling)); +} + +#define MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT(reg, mask, data) { \ + mmsch_v2_0_insert_direct_rd_mod_wt(&direct_rd_mod_wt, \ + init_table, (reg), \ + (mask), (data)); \ + init_table += sizeof(struct mmsch_v2_0_cmd_direct_read_modify_write)/4; \ + table_size += sizeof(struct mmsch_v2_0_cmd_direct_read_modify_write)/4; \ +} + +#define MMSCH_V2_0_INSERT_DIRECT_WT(reg, value) { \ + mmsch_v2_0_insert_direct_wt(&direct_wt, \ + init_table, (reg), \ + (value)); \ + init_table += sizeof(struct mmsch_v2_0_cmd_direct_write)/4; \ + table_size += sizeof(struct mmsch_v2_0_cmd_direct_write)/4; \ +} + +#define MMSCH_V2_0_INSERT_DIRECT_POLL(reg, mask, wait) { \ + mmsch_v2_0_insert_direct_poll(&direct_poll, \ + init_table, (reg), \ + (mask), (wait)); \ + init_table += sizeof(struct mmsch_v2_0_cmd_direct_polling)/4; \ + table_size += sizeof(struct mmsch_v2_0_cmd_direct_polling)/4; \ +} + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c index cf557a428298ba05f825facabafafb0ad648bf92..e08245a446fcecffa350f92fb939a1b5ed76570a 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c @@ -32,6 +32,7 @@ #include "soc15_common.h" #include "navi10_ih.h" +#define MAX_REARM_RETRY 10 static void navi10_ih_set_interrupt_funcs(struct amdgpu_device *adev); @@ -283,6 +284,38 @@ static void navi10_ih_decode_iv(struct amdgpu_device *adev, ih->rptr += 32; } +/** + * navi10_ih_irq_rearm - rearm IRQ if lost + * + * @adev: amdgpu_device pointer + * + */ +static void navi10_ih_irq_rearm(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) +{ + uint32_t reg_rptr = 0; + uint32_t v = 0; + uint32_t i = 0; + + if (ih == &adev->irq.ih) + reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR); + else if (ih == &adev->irq.ih1) + reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1); + else if (ih == &adev->irq.ih2) + reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2); + else + return; + + /* Rearm IRQ / re-write doorbell if doorbell write is lost */ + for (i = 0; i < MAX_REARM_RETRY; i++) { + v = RREG32_NO_KIQ(reg_rptr); + if ((v < ih->ring_size) && (v != ih->rptr)) + WDOORBELL32(ih->doorbell_index, ih->rptr); + else + break; + } +} + /** * navi10_ih_set_rptr - set the IH ring buffer rptr * @@ -297,6 +330,9 @@ static void navi10_ih_set_rptr(struct amdgpu_device *adev, /* XXX check if swapping is necessary on BE */ *ih->rptr_cpu = ih->rptr; WDOORBELL32(ih->doorbell_index, ih->rptr); + + if (amdgpu_sriov_vf(adev)) + navi10_ih_irq_rearm(adev, ih); } else WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr); } diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 2d1bebdf1603d592b2776cd99fd557305b9d1e75..033cbbca2072d0eb430738dccbafddb5c718d119 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -516,7 +516,8 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) !amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); - amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); + if (!amdgpu_sriov_vf(adev)) + amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); break; default: return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h index 36b65797434e506d335b3af071df0ade99fdbd9d..a44fd6060d5ba1274a8cdb179672852dbbea000b 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h +++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h @@ -31,6 +31,9 @@ #define GFX_CMD_RESERVED_MASK 0x7FF00000 #define GFX_CMD_RESPONSE_MASK 0x80000000 +/* USBC PD FW version retrieval command */ +#define C2PMSG_CMD_GFX_USB_PD_FW_VER 0x2000000 + /* TEE Gfx Command IDs for the register interface. * Command ID must be between 0x00010000 and 0x000F0000. */ diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 8ab3bf3158a9cc35d5744102ea5c08275311646d..0afd610a1263faeb276de8ca284eeb4f1308e497 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -26,6 +26,7 @@ #include "amdgpu.h" #include "amdgpu_psp.h" +#include "amdgpu_ras.h" #include "amdgpu_ucode.h" #include "soc15_common.h" #include "psp_v11_0.h" @@ -65,6 +66,9 @@ MODULE_FIRMWARE("amdgpu/arcturus_ta.bin"); /* memory training timeout define */ #define MEM_TRAIN_SEND_MSG_TIMEOUT_US 3000000 +/* For large FW files the time to complete can be very long */ +#define USBC_PD_POLLING_LIMIT_S 240 + static int psp_v11_0_init_microcode(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; @@ -865,6 +869,11 @@ static int psp_v11_0_ras_trigger_error(struct psp_context *psp, if (ret) return -EINVAL; + /* If err_event_athub occurs error inject was successful, however + return status from TA is no long reliable */ + if (amdgpu_ras_intr_triggered()) + return 0; + return ras_cmd->ras_status; } @@ -1109,6 +1118,82 @@ static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value) WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value); } +static int psp_v11_0_load_usbc_pd_fw(struct psp_context *psp, dma_addr_t dma_addr) +{ + struct amdgpu_device *adev = psp->adev; + uint32_t reg_status; + int ret, i = 0; + + /* Write lower 32-bit address of the PD Controller FW */ + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, lower_32_bits(dma_addr)); + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), + 0x80000000, 0x80000000, false); + if (ret) + return ret; + + /* Fireup interrupt so PSP can pick up the lower address */ + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, 0x800000); + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), + 0x80000000, 0x80000000, false); + if (ret) + return ret; + + reg_status = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35); + + if ((reg_status & 0xFFFF) != 0) { + DRM_ERROR("Lower address load failed - MP0_SMN_C2PMSG_35.Bits [15:0] = %02x...\n", + reg_status & 0xFFFF); + return -EIO; + } + + /* Write upper 32-bit address of the PD Controller FW */ + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, upper_32_bits(dma_addr)); + + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), + 0x80000000, 0x80000000, false); + if (ret) + return ret; + + /* Fireup interrupt so PSP can pick up the upper address */ + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, 0x4000000); + + /* FW load takes very long time */ + do { + msleep(1000); + reg_status = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35); + + if (reg_status & 0x80000000) + goto done; + + } while (++i < USBC_PD_POLLING_LIMIT_S); + + return -ETIME; +done: + + if ((reg_status & 0xFFFF) != 0) { + DRM_ERROR("Upper address load failed - MP0_SMN_C2PMSG_35.Bits [15:0] = x%04x\n", + reg_status & 0xFFFF); + return -EIO; + } + + return 0; +} + +static int psp_v11_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver) +{ + struct amdgpu_device *adev = psp->adev; + int ret; + + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER); + + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), + 0x80000000, 0x80000000, false); + if (!ret) + *fw_ver = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36); + + return ret; +} + static const struct psp_funcs psp_v11_0_funcs = { .init_microcode = psp_v11_0_init_microcode, .bootloader_load_kdb = psp_v11_0_bootloader_load_kdb, @@ -1133,6 +1218,8 @@ static const struct psp_funcs psp_v11_0_funcs = { .mem_training = psp_v11_0_memory_training, .ring_get_wptr = psp_v11_0_ring_get_wptr, .ring_set_wptr = psp_v11_0_ring_set_wptr, + .load_usbc_pd_fw = psp_v11_0_load_usbc_pd_fw, + .read_usbc_pd_fw = psp_v11_0_read_usbc_pd_fw }; void psp_v11_0_set_psp_funcs(struct psp_context *psp) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index e55884d204bd70529afe7e61bedf36691556eb62..9159bd46482b1a0165f7820141e1a0f374bf68dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1801,13 +1801,9 @@ static int sdma_v4_0_late_init(void *handle) struct ras_ih_if ih_info = { .cb = sdma_v4_0_process_ras_data_cb, }; - int i; - /* read back edc counter registers to clear the counters */ - if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) { - for (i = 0; i < adev->sdma.num_instances; i++) - RREG32_SDMA(i, mmSDMA0_EDC_COUNTER); - } + if (adev->sdma.funcs && adev->sdma.funcs->reset_ras_error_count) + adev->sdma.funcs->reset_ras_error_count(adev); if (adev->sdma.funcs && adev->sdma.funcs->ras_late_init) return adev->sdma.funcs->ras_late_init(adev, &ih_info); @@ -2572,10 +2568,22 @@ static int sdma_v4_0_query_ras_error_count(struct amdgpu_device *adev, return 0; }; +static void sdma_v4_0_reset_ras_error_count(struct amdgpu_device *adev) +{ + int i; + + /* read back edc counter registers to clear the counters */ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) { + for (i = 0; i < adev->sdma.num_instances; i++) + RREG32_SDMA(i, mmSDMA0_EDC_COUNTER); + } +} + static const struct amdgpu_sdma_ras_funcs sdma_v4_0_ras_funcs = { .ras_late_init = amdgpu_sdma_ras_late_init, .ras_fini = amdgpu_sdma_ras_fini, .query_ras_error_count = sdma_v4_0_query_ras_error_count, + .reset_ras_error_count = sdma_v4_0_reset_ras_error_count, }; static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index 4cb4c891120b23fadf37be6502dd4ea2586a0993..0860e85a2d358be2707a69f37207f3d08f27a6ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c @@ -3439,7 +3439,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, if (adev->asic_type == CHIP_HAINAN) { if ((adev->pdev->revision == 0x81) || - (adev->pdev->revision == 0x83) || (adev->pdev->revision == 0xC3) || (adev->pdev->device == 0x6664) || (adev->pdev->device == 0x6665) || diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c index c902f26cf50d74be8a55fa3df05f6165363f4f35..9bffbab350413c666095ae163214200b9cad626e 100644 --- a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c @@ -46,8 +46,7 @@ #define I2C_NO_STOP 1 #define I2C_RESTART 2 -#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control.eeprom_accessor))->adev -#define to_eeprom_control(x) container_of(x, struct amdgpu_ras_eeprom_control, eeprom_accessor) +#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c)) static void smu_v11_0_i2c_set_clock_gating(struct i2c_adapter *control, bool en) { @@ -592,7 +591,8 @@ static uint32_t smu_v11_0_i2c_eeprom_write_data(struct i2c_adapter *control, static void lock_bus(struct i2c_adapter *i2c, unsigned int flags) { - struct amdgpu_ras_eeprom_control *control = to_eeprom_control(i2c); + struct amdgpu_device *adev = to_amdgpu_device(i2c); + struct amdgpu_ras_eeprom_control *control = &adev->psp.ras.ras->eeprom_control; if (!smu_v11_0_i2c_bus_lock(i2c)) { DRM_ERROR("Failed to lock the bus from SMU"); @@ -610,7 +610,8 @@ static int trylock_bus(struct i2c_adapter *i2c, unsigned int flags) static void unlock_bus(struct i2c_adapter *i2c, unsigned int flags) { - struct amdgpu_ras_eeprom_control *control = to_eeprom_control(i2c); + struct amdgpu_device *adev = to_amdgpu_device(i2c); + struct amdgpu_ras_eeprom_control *control = &adev->psp.ras.ras->eeprom_control; if (!smu_v11_0_i2c_bus_unlock(i2c)) { DRM_ERROR("Failed to unlock the bus from SMU"); @@ -630,7 +631,8 @@ static int smu_v11_0_i2c_eeprom_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num) { int i, ret; - struct amdgpu_ras_eeprom_control *control = to_eeprom_control(i2c_adap); + struct amdgpu_device *adev = to_amdgpu_device(i2c_adap); + struct amdgpu_ras_eeprom_control *control = &adev->psp.ras.ras->eeprom_control; if (!control->bus_locked) { DRM_ERROR("I2C bus unlocked, stopping transaction!"); @@ -679,7 +681,7 @@ int smu_v11_0_i2c_eeprom_control_init(struct i2c_adapter *control) control->class = I2C_CLASS_SPD; control->dev.parent = &adev->pdev->dev; control->algo = &smu_v11_0_i2c_eeprom_i2c_algo; - snprintf(control->name, sizeof(control->name), "RAS EEPROM"); + snprintf(control->name, sizeof(control->name), "AMDGPU EEPROM"); control->lock_ops = &smu_v11_0_i2c_i2c_lock_ops; res = i2c_add_adapter(control); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 2b488dfb2f21cfe192b733ab70da9b41d8d05d03..a40499d51c93cd9a1fd3f262402f92c6ae592dc4 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -89,6 +89,13 @@ #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L #define mmHDP_MEM_POWER_CTRL_BASE_IDX 0 + +/* for Vega20/arcturus regiter offset change */ +#define mmROM_INDEX_VG20 0x00e4 +#define mmROM_INDEX_VG20_BASE_IDX 0 +#define mmROM_DATA_VG20 0x00e5 +#define mmROM_DATA_VG20_BASE_IDX 0 + /* * Indirect registers accessor */ @@ -309,6 +316,8 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev, { u32 *dw_ptr; u32 i, length_dw; + uint32_t rom_index_offset; + uint32_t rom_data_offset; if (bios == NULL) return false; @@ -321,11 +330,23 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev, dw_ptr = (u32 *)bios; length_dw = ALIGN(length_bytes, 4) / 4; + switch (adev->asic_type) { + case CHIP_VEGA20: + case CHIP_ARCTURUS: + rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX_VG20); + rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA_VG20); + break; + default: + rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX); + rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA); + break; + } + /* set rom index to 0 */ - WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0); + WREG32(rom_index_offset, 0); /* read out the rom data */ for (i = 0; i < length_dw; i++) - dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA)); + dw_ptr[i] = RREG32(rom_data_offset); return true; } @@ -831,6 +852,15 @@ static bool soc15_need_full_reset(struct amdgpu_device *adev) /* change this when we implement soft reset */ return true; } + +static void vega20_reset_hdp_ras_error_count(struct amdgpu_device *adev) +{ + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP)) + return; + /*read back hdp ras counter to reset it to 0 */ + RREG32_SOC15(HDP, 0, mmHDP_EDC_CNT); +} + static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, uint64_t *count1) { @@ -998,6 +1028,7 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs = .get_config_memsize = &soc15_get_config_memsize, .flush_hdp = &soc15_flush_hdp, .invalidate_hdp = &soc15_invalidate_hdp, + .reset_hdp_ras_error_count = &vega20_reset_hdp_ras_error_count, .need_full_reset = &soc15_need_full_reset, .init_doorbell_index = &vega20_doorbell_index_init, .get_pcie_usage = &vega20_get_pcie_usage, @@ -1243,6 +1274,10 @@ static int soc15_common_late_init(void *handle) if (amdgpu_sriov_vf(adev)) xgpu_ai_mailbox_get_irq(adev); + if (adev->asic_funcs && + adev->asic_funcs->reset_hdp_ras_error_count) + adev->asic_funcs->reset_hdp_ras_error_count(adev); + if (adev->nbio.funcs->ras_late_init) r = adev->nbio.funcs->ras_late_init(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h index d0fb7a67c1a38d59f3d2622947adaf66452dc215..b03f950c486cb9c5bba06d77c5e64b99157123fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15.h @@ -42,6 +42,13 @@ struct soc15_reg_golden { u32 or_mask; }; +struct soc15_reg_rlcg { + u32 hwip; + u32 instance; + u32 segment; + u32 reg; +}; + struct soc15_reg_entry { uint32_t hwip; uint32_t inst; diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h index 19e870c798967129f317b148719a41bd6a48d8f9..c893c645a4b2d5a22159e17c4e302690c3008e6c 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h @@ -70,10 +70,9 @@ } \ } while (0) -#define AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(a) (amdgpu_sriov_vf((a)) && !amdgpu_sriov_runtime((a))) #define WREG32_RLC(reg, value) \ do { \ - if (AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(adev)) { \ + if (amdgpu_sriov_fullaccess(adev)) { \ uint32_t i = 0; \ uint32_t retries = 50000; \ uint32_t r0 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0; \ @@ -98,7 +97,7 @@ #define WREG32_SOC15_RLC_SHADOW(ip, inst, reg, value) \ do { \ uint32_t target_reg = adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg;\ - if (AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(adev)) { \ + if (amdgpu_sriov_fullaccess(adev)) { \ uint32_t r2 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2; \ uint32_t r3 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3; \ uint32_t grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL; \ diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c index 793bf70e64b1931af561d6d74e1d74925d9cdcf9..14d346321a5f7bfe78c0b4a424cc3113e23e11cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c @@ -186,6 +186,10 @@ static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev, if (rsmu_umc_index_state) umc_v6_1_disable_umc_index_mode(adev); + if ((adev->asic_type == CHIP_ARCTURUS) && + amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) + DRM_WARN("Fail to disable DF-Cstate.\n"); + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { umc_reg_offset = get_umc_6_reg_offset(adev, umc_inst, @@ -199,6 +203,10 @@ static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev, &(err_data->ue_count)); } + if ((adev->asic_type == CHIP_ARCTURUS) && + amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW)) + DRM_WARN("Fail to enable DF-Cstate\n"); + if (rsmu_umc_index_state) umc_v6_1_enable_umc_index_mode(adev); } @@ -228,7 +236,11 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev, SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0); } - /* skip error address process if -ENOMEM */ + mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4); + + if (mc_umc_status == 0) + return; + if (!err_data->err_addr) { /* clear umc status */ WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL); @@ -236,7 +248,6 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev, } err_rec = &err_data->err_addr[err_data->err_addr_cnt]; - mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4); /* calculate error address if ue/ce error is detected */ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && @@ -288,6 +299,10 @@ static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev, if (rsmu_umc_index_state) umc_v6_1_disable_umc_index_mode(adev); + if ((adev->asic_type == CHIP_ARCTURUS) && + amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) + DRM_WARN("Fail to disable DF-Cstate.\n"); + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { umc_reg_offset = get_umc_6_reg_offset(adev, umc_inst, @@ -300,6 +315,10 @@ static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev, umc_inst); } + if ((adev->asic_type == CHIP_ARCTURUS) && + amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW)) + DRM_WARN("Fail to enable DF-Cstate\n"); + if (rsmu_umc_index_state) umc_v6_1_enable_umc_index_mode(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 71f61afdc6551d6c04f1d051ff7643fd51d6f2eb..09b0572b838d29d6a758a570555722b03cbba341 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -1352,7 +1352,7 @@ static int vcn_v1_0_set_clockgating_state(void *handle, if (enable) { /* wait for STATUS to clear */ - if (vcn_v1_0_is_idle(handle)) + if (!vcn_v1_0_is_idle(handle)) return -EBUSY; vcn_v1_0_enable_clock_gating(adev); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index c387c81f869583290ff0ca2f5933fb6e2b0edada..ec8091a661df140e8ffcc6d24d1db85225cc4232 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -29,6 +29,7 @@ #include "soc15d.h" #include "amdgpu_pm.h" #include "amdgpu_psp.h" +#include "mmsch_v2_0.h" #include "vcn/vcn_2_0_0_offset.h" #include "vcn/vcn_2_0_0_sh_mask.h" @@ -54,7 +55,7 @@ static int vcn_v2_0_set_powergating_state(void *handle, enum amd_powergating_state state); static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx, struct dpg_pause_state *new_state); - +static int vcn_v2_0_start_sriov(struct amdgpu_device *adev); /** * vcn_v2_0_early_init - set function pointers * @@ -67,7 +68,10 @@ static int vcn_v2_0_early_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; adev->vcn.num_vcn_inst = 1; - adev->vcn.num_enc_rings = 2; + if (amdgpu_sriov_vf(adev)) + adev->vcn.num_enc_rings = 1; + else + adev->vcn.num_enc_rings = 2; vcn_v2_0_set_dec_ring_funcs(adev); vcn_v2_0_set_enc_ring_funcs(adev); @@ -154,7 +158,10 @@ static int vcn_v2_0_sw_init(void *handle) for (i = 0; i < adev->vcn.num_enc_rings; ++i) { ring = &adev->vcn.inst->ring_enc[i]; ring->use_doorbell = true; - ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i; + if (!amdgpu_sriov_vf(adev)) + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i; + else + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i; sprintf(ring->name, "vcn_enc%d", i); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0); if (r) @@ -163,6 +170,10 @@ static int vcn_v2_0_sw_init(void *handle) adev->vcn.pause_dpg_mode = vcn_v2_0_pause_dpg_mode; + r = amdgpu_virt_alloc_mm_table(adev); + if (r) + return r; + return 0; } @@ -178,6 +189,8 @@ static int vcn_v2_0_sw_fini(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + amdgpu_virt_free_mm_table(adev); + r = amdgpu_vcn_suspend(adev); if (r) return r; @@ -203,6 +216,9 @@ static int vcn_v2_0_hw_init(void *handle) adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, ring->doorbell_index, 0); + if (amdgpu_sriov_vf(adev)) + vcn_v2_0_start_sriov(adev); + r = amdgpu_ring_test_helper(ring); if (r) goto done; @@ -304,6 +320,9 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev) uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); uint32_t offset; + if (amdgpu_sriov_vf(adev)) + return; + /* cache window 0: fw */ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, @@ -448,6 +467,9 @@ static void vcn_v2_0_disable_clock_gating(struct amdgpu_device *adev) { uint32_t data; + if (amdgpu_sriov_vf(adev)) + return; + /* UVD disable CGC */ data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL); if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) @@ -606,6 +628,9 @@ static void vcn_v2_0_enable_clock_gating(struct amdgpu_device *adev) { uint32_t data = 0; + if (amdgpu_sriov_vf(adev)) + return; + /* enable UVD CGC */ data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL); if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) @@ -658,6 +683,9 @@ static void vcn_v2_0_disable_static_power_gating(struct amdgpu_device *adev) uint32_t data = 0; int ret; + if (amdgpu_sriov_vf(adev)) + return; + if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT @@ -705,6 +733,9 @@ static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev) uint32_t data = 0; int ret; + if (amdgpu_sriov_vf(adev)) + return; + if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { /* Before power off, this indicator has to be turned on */ data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS); @@ -1215,9 +1246,12 @@ static int vcn_v2_0_set_clockgating_state(void *handle, struct amdgpu_device *adev = (struct amdgpu_device *)handle; bool enable = (state == AMD_CG_STATE_GATE); + if (amdgpu_sriov_vf(adev)) + return 0; + if (enable) { /* wait for STATUS to clear */ - if (vcn_v2_0_is_idle(handle)) + if (!vcn_v2_0_is_idle(handle)) return -EBUSY; vcn_v2_0_enable_clock_gating(adev); } else { @@ -1631,6 +1665,9 @@ int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring) unsigned i; int r; + if (amdgpu_sriov_vf(adev)) + return 0; + WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 4); if (r) @@ -1667,6 +1704,11 @@ static int vcn_v2_0_set_powergating_state(void *handle, int ret; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (amdgpu_sriov_vf(adev)) { + adev->vcn.cur_state = AMD_PG_STATE_UNGATE; + return 0; + } + if (state == adev->vcn.cur_state) return 0; @@ -1680,6 +1722,215 @@ static int vcn_v2_0_set_powergating_state(void *handle, return ret; } +static int vcn_v2_0_start_mmsch(struct amdgpu_device *adev, + struct amdgpu_mm_table *table) +{ + uint32_t data = 0, loop; + uint64_t addr = table->gpu_addr; + struct mmsch_v2_0_init_header *header; + uint32_t size; + int i; + + header = (struct mmsch_v2_0_init_header *)table->cpu_addr; + size = header->header_size + header->vcn_table_size; + + /* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr + * of memory descriptor location + */ + WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr)); + WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr)); + + /* 2, update vmid of descriptor */ + data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID); + data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK; + /* use domain0 for MM scheduler */ + data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); + WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data); + + /* 3, notify mmsch about the size of this descriptor */ + WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size); + + /* 4, set resp to zero */ + WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0); + + adev->vcn.inst->ring_dec.wptr = 0; + adev->vcn.inst->ring_dec.wptr_old = 0; + vcn_v2_0_dec_ring_set_wptr(&adev->vcn.inst->ring_dec); + + for (i = 0; i < adev->vcn.num_enc_rings; ++i) { + adev->vcn.inst->ring_enc[i].wptr = 0; + adev->vcn.inst->ring_enc[i].wptr_old = 0; + vcn_v2_0_enc_ring_set_wptr(&adev->vcn.inst->ring_enc[i]); + } + + /* 5, kick off the initialization and wait until + * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero + */ + WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001); + + data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP); + loop = 1000; + while ((data & 0x10000002) != 0x10000002) { + udelay(10); + data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP); + loop--; + if (!loop) + break; + } + + if (!loop) { + DRM_ERROR("failed to init MMSCH, " \ + "mmMMSCH_VF_MAILBOX_RESP = 0x%08x\n", data); + return -EBUSY; + } + + return 0; +} + +static int vcn_v2_0_start_sriov(struct amdgpu_device *adev) +{ + int r; + uint32_t tmp; + struct amdgpu_ring *ring; + uint32_t offset, size; + uint32_t table_size = 0; + struct mmsch_v2_0_cmd_direct_write direct_wt = { {0} }; + struct mmsch_v2_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} }; + struct mmsch_v2_0_cmd_direct_polling direct_poll = { {0} }; + struct mmsch_v2_0_cmd_end end = { {0} }; + struct mmsch_v2_0_init_header *header; + uint32_t *init_table = adev->virt.mm_table.cpu_addr; + uint8_t i = 0; + + header = (struct mmsch_v2_0_init_header *)init_table; + direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE; + direct_rd_mod_wt.cmd_header.command_type = + MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE; + direct_poll.cmd_header.command_type = + MMSCH_COMMAND__DIRECT_REG_POLLING; + end.cmd_header.command_type = MMSCH_COMMAND__END; + + if (header->vcn_table_offset == 0 && header->vcn_table_size == 0) { + header->version = MMSCH_VERSION; + header->header_size = sizeof(struct mmsch_v2_0_init_header) >> 2; + + header->vcn_table_offset = header->header_size; + + init_table += header->vcn_table_offset; + + size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); + + MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), + 0xFFFFFFFF, 0x00000004); + + /* mc resume*/ + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + tmp = AMDGPU_UCODE_ID_VCN; + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + adev->firmware.ucode[tmp].tmr_mc_addr_lo); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + adev->firmware.ucode[tmp].tmr_mc_addr_hi); + offset = 0; + } else { + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst->gpu_addr)); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst->gpu_addr)); + offset = size; + } + + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0), + 0); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), + size); + + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst->gpu_addr + offset)); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst->gpu_addr + offset)); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), + 0); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), + AMDGPU_VCN_STACK_SIZE); + + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst->gpu_addr + offset + + AMDGPU_VCN_STACK_SIZE)); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst->gpu_addr + offset + + AMDGPU_VCN_STACK_SIZE)); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), + 0); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2), + AMDGPU_VCN_CONTEXT_SIZE); + + for (r = 0; r < adev->vcn.num_enc_rings; ++r) { + ring = &adev->vcn.inst->ring_enc[r]; + ring->wptr = 0; + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), + lower_32_bits(ring->gpu_addr)); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), + upper_32_bits(ring->gpu_addr)); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), + ring->ring_size / 4); + } + + ring = &adev->vcn.inst->ring_dec; + ring->wptr = 0; + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_RBC_RB_64BIT_BAR_LOW), + lower_32_bits(ring->gpu_addr)); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH), + upper_32_bits(ring->gpu_addr)); + /* force RBC into idle state */ + tmp = order_base_2(ring->ring_size); + tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, tmp); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp); + + /* add end packet */ + tmp = sizeof(struct mmsch_v2_0_cmd_end); + memcpy((void *)init_table, &end, tmp); + table_size += (tmp / 4); + header->vcn_table_size = table_size; + + } + return vcn_v2_0_start_mmsch(adev, &adev->virt.mm_table); +} + static const struct amd_ip_funcs vcn_v2_0_ip_funcs = { .name = "vcn_v2_0", .early_init = vcn_v2_0_early_init, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index 2d64ba1adf992a241a1e62548188e71aa328c345..c6363f5ad564026bc1f9f2d844a6bdd07cdc4630 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -74,29 +74,30 @@ static int amdgpu_ih_clientid_vcns[] = { static int vcn_v2_5_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->asic_type == CHIP_ARCTURUS) { - u32 harvest; - int i; - - adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS; - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - harvest = RREG32_SOC15(UVD, i, mmCC_UVD_HARVESTING); - if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK) - adev->vcn.harvest_config |= 1 << i; - } - - if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 | - AMDGPU_VCN_HARVEST_VCN1)) - /* both instances are harvested, disable the block */ - return -ENOENT; - } else - adev->vcn.num_vcn_inst = 1; if (amdgpu_sriov_vf(adev)) { adev->vcn.num_vcn_inst = 2; adev->vcn.harvest_config = 0; adev->vcn.num_enc_rings = 1; } else { + if (adev->asic_type == CHIP_ARCTURUS) { + u32 harvest; + int i; + + adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS; + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + harvest = RREG32_SOC15(UVD, i, mmCC_UVD_HARVESTING); + if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK) + adev->vcn.harvest_config |= 1 << i; + } + + if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 | + AMDGPU_VCN_HARVEST_VCN1)) + /* both instances are harvested, disable the block */ + return -ENOENT; + } else + adev->vcn.num_vcn_inst = 1; + adev->vcn.num_enc_rings = 2; } @@ -1672,7 +1673,7 @@ static int vcn_v2_5_set_clockgating_state(void *handle, return 0; if (enable) { - if (vcn_v2_5_is_idle(handle)) + if (!vcn_v2_5_is_idle(handle)) return -EBUSY; vcn_v2_5_enable_clock_gating(adev); } else { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 8d56afd76eb3d5e20c1519fc87bbc8bd5b56af8c..0ec5f25adf56b134461685e7cf8516caa681690f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1169,7 +1169,7 @@ static int kfd_ioctl_get_tile_config(struct file *filep, if (!dev) return -EINVAL; - dev->kfd2kgd->get_tile_config(dev->kgd, &config); + amdgpu_amdkfd_get_tile_config(dev->kgd, &config); args->gb_addr_config = config.gb_addr_config; args->num_banks = config.num_banks; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 692abfd2088af5332b7d03c2fe9d6e689365a21d..77ea0f0cb163b93d2819368214592b50e7466fe0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1734,7 +1734,7 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm) retval = amdgpu_amdkfd_alloc_gtt_mem(dev->kgd, size, &(mem_obj->gtt_mem), &(mem_obj->gpu_addr), - (void *)&(mem_obj->cpu_ptr), true); + (void *)&(mem_obj->cpu_ptr), false); return retval; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index 1f8365575b12563a5ee039e0724a9cc5955bd402..15476fca8fa636b772e9a944855c557b03f544a1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -187,7 +187,7 @@ static int create_signal_event(struct file *devkfd, if (p->signal_mapped_size && p->signal_event_count == p->signal_mapped_size / 8) { if (!p->signal_event_limit_reached) { - pr_warn("Signal event wasn't created because limit was reached\n"); + pr_debug("Signal event wasn't created because limit was reached\n"); p->signal_event_limit_reached = true; } return -ENOSPC; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c index bb77b8890e77f8ebd71d1e7539405f0c5224518c..78714f9a8b1189c8a2db6037ac933fd353b06ef6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c @@ -316,7 +316,7 @@ static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id) { /* * node id couldn't be 0 - the three MSB bits of - * aperture shoudn't be 0 + * aperture shouldn't be 0 */ pdd->lds_base = MAKE_LDS_APP_BASE_VI(); pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 436b7f518979185392b264c374b9f0c950f973e1..48cda3073b70448dd96851d2a55fa459c65e447d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -87,9 +87,21 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, int retval; struct kfd_mem_obj *mqd_mem_obj = NULL; - /* From V9, for CWSR, the control stack is located on the next page - * boundary after the mqd, we will use the gtt allocation function - * instead of sub-allocation function. + /* For V9 only, due to a HW bug, the control stack of a user mode + * compute queue needs to be allocated just behind the page boundary + * of its regular MQD buffer. So we allocate an enlarged MQD buffer: + * the first page of the buffer serves as the regular MQD buffer + * purpose and the remaining is for control stack. Although the two + * parts are in the same buffer object, they need different memory + * types: MQD part needs UC (uncached) as usual, while control stack + * needs NC (non coherent), which is different from the UC type which + * is used when control stack is allocated in user space. + * + * Because of all those, we use the gtt allocation function instead + * of sub-allocation function for this enlarged MQD buffer. Moreover, + * in order to achieve two memory types in a single buffer object, we + * pass a special bo flag AMDGPU_GEM_CREATE_CP_MQD_GFX9 to instruct + * amdgpu memory functions to do so. */ if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) { mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 22abdbc6dfd752a87d7911e2480bedef0d190f3e..fe0cd49d4ea7ce1708dfc00e2388a4bd18dd8c9b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -327,10 +327,10 @@ err_alloc_mem: static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd) { struct qcm_process_device *qpd = &pdd->qpd; - uint32_t flags = ALLOC_MEM_FLAGS_GTT | - ALLOC_MEM_FLAGS_NO_SUBSTITUTE | - ALLOC_MEM_FLAGS_WRITABLE | - ALLOC_MEM_FLAGS_EXECUTABLE; + uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT | + KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE | + KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE | + KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE; void *kaddr; int ret; @@ -641,6 +641,11 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn, /* Indicate to other users that MM is no longer valid */ p->mm = NULL; + /* Signal the eviction fence after user mode queues are + * destroyed. This allows any BOs to be freed without + * triggering pointless evictions or waiting for fences. + */ + dma_fence_signal(p->ef); mutex_unlock(&p->mutex); @@ -692,8 +697,9 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd) { struct kfd_dev *dev = pdd->dev; struct qcm_process_device *qpd = &pdd->qpd; - uint32_t flags = ALLOC_MEM_FLAGS_GTT | - ALLOC_MEM_FLAGS_NO_SUBSTITUTE | ALLOC_MEM_FLAGS_EXECUTABLE; + uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT + | KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE + | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE; void *kaddr; int ret; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 5303877c081a080e94940cdc95372535fd0a6b4f..aa0bfa78a66741be2fbb0dfcb8b598d608119498 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -490,7 +490,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, dev->node_props.num_sdma_queues_per_engine); sysfs_show_32bit_prop(buffer, "num_cp_queues", dev->node_props.num_cp_queues); - sysfs_show_64bit_prop(buffer, "unique_id", + sysfs_show_64bit_prop(buffer, "unique_id", dev->node_props.unique_id); if (dev->gpu) { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 1c0449adcef68d627dd4187e10c3a166e900b53b..d3674d805a0adc4631222ae721652767a83772dd 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -132,9 +132,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev); /* removes and deallocates the drm structures, created by the above function */ static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm); -static void -amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector); - static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, struct drm_plane *plane, unsigned long possible_crtcs, @@ -410,8 +407,9 @@ static void dm_vupdate_high_irq(void *interrupt_params) if (acrtc) { acrtc_state = to_dm_crtc_state(acrtc->base.state); - DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id, - amdgpu_dm_vrr_active(acrtc_state)); + DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n", + acrtc->crtc_id, + amdgpu_dm_vrr_active(acrtc_state)); /* Core vblank handling is done here after end of front-porch in * vrr mode, as vblank timestamping will give valid results @@ -461,8 +459,9 @@ static void dm_crtc_high_irq(void *interrupt_params) if (acrtc) { acrtc_state = to_dm_crtc_state(acrtc->base.state); - DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id, - amdgpu_dm_vrr_active(acrtc_state)); + DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n", + acrtc->crtc_id, + amdgpu_dm_vrr_active(acrtc_state)); /* Core vblank handling at start of front-porch is only possible * in non-vrr mode, as only there vblank timestamping will give @@ -525,8 +524,9 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params) acrtc_state = to_dm_crtc_state(acrtc->base.state); - DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id, - amdgpu_dm_vrr_active(acrtc_state)); + DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id, + amdgpu_dm_vrr_active(acrtc_state), + acrtc_state->active_planes); amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); drm_crtc_handle_vblank(&acrtc->base); @@ -546,7 +546,18 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params) &acrtc_state->vrr_params.adjust); } - if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED) { + /* + * If there aren't any active_planes then DCH HUBP may be clock-gated. + * In that case, pageflip completion interrupts won't fire and pageflip + * completion events won't get delivered. Prevent this by sending + * pending pageflip events from here if a flip is still pending. + * + * If any planes are enabled, use dm_pflip_high_irq() instead, to + * avoid race conditions between flip programming and completion, + * which could cause too early flip completion events. + */ + if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED && + acrtc_state->active_planes == 0) { if (acrtc->event) { drm_crtc_send_vblank_event(&acrtc->base, acrtc->event); acrtc->event = NULL; @@ -902,7 +913,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) init_data.asic_id.chip_family = adev->family; - init_data.asic_id.pci_revision_id = adev->rev_id; + init_data.asic_id.pci_revision_id = adev->pdev->revision; init_data.asic_id.hw_internal_rev = adev->external_rev_id; init_data.asic_id.vram_width = adev->gmc.vram_width; @@ -1435,6 +1446,73 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) drm_kms_helper_hotplug_event(dev); } +static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) +{ + struct smu_context *smu = &adev->smu; + int ret = 0; + + if (!is_support_sw_smu(adev)) + return 0; + + /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends + * on window driver dc implementation. + * For Navi1x, clock settings of dcn watermarks are fixed. the settings + * should be passed to smu during boot up and resume from s3. + * boot up: dc calculate dcn watermark clock settings within dc_create, + * dcn20_resource_construct + * then call pplib functions below to pass the settings to smu: + * smu_set_watermarks_for_clock_ranges + * smu_set_watermarks_table + * navi10_set_watermarks_table + * smu_write_watermarks_table + * + * For Renoir, clock settings of dcn watermark are also fixed values. + * dc has implemented different flow for window driver: + * dc_hardware_init / dc_set_power_state + * dcn10_init_hw + * notify_wm_ranges + * set_wm_ranges + * -- Linux + * smu_set_watermarks_for_clock_ranges + * renoir_set_watermarks_table + * smu_write_watermarks_table + * + * For Linux, + * dc_hardware_init -> amdgpu_dm_init + * dc_set_power_state --> dm_resume + * + * therefore, this function apply to navi10/12/14 but not Renoir + * * + */ + switch(adev->asic_type) { + case CHIP_NAVI10: + case CHIP_NAVI14: + case CHIP_NAVI12: + break; + default: + return 0; + } + + mutex_lock(&smu->mutex); + + /* pass data to smu controller */ + if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && + !(smu->watermarks_bitmap & WATERMARKS_LOADED)) { + ret = smu_write_watermarks_table(smu); + + if (ret) { + mutex_unlock(&smu->mutex); + DRM_ERROR("Failed to update WMTABLE!\n"); + return ret; + } + smu->watermarks_bitmap |= WATERMARKS_LOADED; + } + + mutex_unlock(&smu->mutex); + + return 0; +} + /** * dm_hw_init() - Initialize DC device * @handle: The base driver device containing the amdgpu_dm device. @@ -1713,6 +1791,8 @@ static int dm_resume(void *handle) amdgpu_dm_irq_resume_late(adev); + amdgpu_dm_smu_write_watermarks_table(adev); + return 0; } @@ -1826,8 +1906,8 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) caps->aux_min_input_signal = min; } -static void -amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) +void amdgpu_dm_update_connector_after_detect( + struct amdgpu_dm_connector *aconnector) { struct drm_connector *connector = &aconnector->base; struct drm_device *dev = connector->dev; @@ -2156,10 +2236,10 @@ static void handle_hpd_rx_irq(void *param) } } #ifdef CONFIG_DRM_AMD_DC_HDCP - if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) { - if (adev->dm.hdcp_workqueue) - hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index); - } + if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) { + if (adev->dm.hdcp_workqueue) + hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index); + } #endif if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) || (dc_link->type == dc_connection_mst_branch)) @@ -2954,6 +3034,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY) dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true; + /* No userspace support. */ + dm->dc->debug.disable_tri_buf = true; + return 0; fail: kfree(aencoder); @@ -4245,9 +4328,22 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, struct dmcu *dmcu = core_dc->res_pool->dmcu; stream->psr_version = dmcu->dmcu_version.psr_version; - mod_build_vsc_infopacket(stream, - &stream->vsc_infopacket, - &stream->use_vsc_sdp_for_colorimetry); + + // + // should decide stream support vsc sdp colorimetry capability + // before building vsc info packet + // + stream->use_vsc_sdp_for_colorimetry = false; + if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + stream->use_vsc_sdp_for_colorimetry = + aconnector->dc_sink->is_vsc_sdp_colorimetry_supported; + } else { + if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 && + stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) { + stream->use_vsc_sdp_for_colorimetry = true; + } + } + mod_build_vsc_infopacket(stream, &stream->vsc_infopacket); } } finish: @@ -6469,7 +6565,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, uint32_t target_vblank, last_flip_vblank; bool vrr_active = amdgpu_dm_vrr_active(acrtc_state); bool pflip_present = false; - bool swizzle = true; struct { struct dc_surface_update surface_updates[MAX_SURFACES]; struct dc_plane_info plane_infos[MAX_SURFACES]; @@ -6515,9 +6610,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, dc_plane = dm_new_plane_state->dc_state; - if (dc_plane && !dc_plane->tiling_info.gfx9.swizzle) - swizzle = false; - bundle->surface_updates[planes_count].surface = dc_plane; if (new_pcrtc_state->color_mgmt_changed) { bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction; @@ -6726,8 +6818,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, amdgpu_dm_link_setup_psr(acrtc_state->stream); else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) && acrtc_state->stream->link->psr_feature_enabled && - !acrtc_state->stream->link->psr_allow_active && - swizzle) { + !acrtc_state->stream->link->psr_allow_active) { amdgpu_dm_psr_enable(acrtc_state->stream); } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 2bec5e6a3054455b3b6d74f3d4e72607f20c2dba..5cab3e65d9925e6baf89cdadc19bae3c892e356d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -483,6 +483,9 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc); int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, struct dc_plane_state *dc_plane_state); +void amdgpu_dm_update_connector_after_detect( + struct amdgpu_dm_connector *aconnector); + extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs; #endif /* __AMDGPU_DM_H__ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index c4fd148bf6e0f1ee39b0434a0656bf0ee1097788..5b70ed3cdb884ff13a42d69501204683ce80f349 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -412,6 +412,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) link->dig_be = config->link_enc_inst; link->ddc_line = aconnector->dc_link->ddc_hw_inst + 1; link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw; + link->dp.mst_supported = config->mst_supported; display->adjust.disable = 1; link->adjust.auth_delay = 2; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 318b474ff20ee33f5090c22643a5fd8d697588f0..c20fb08c450be4489f020a1030d20c9fa3791cae 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -400,8 +400,8 @@ bool dm_helpers_dp_mst_start_top_mgr( struct amdgpu_dm_connector *aconnector = link->priv; if (!aconnector) { - DRM_ERROR("Failed to found connector for link!"); - return false; + DRM_ERROR("Failed to find connector for link!"); + return false; } if (boot) { @@ -423,8 +423,8 @@ void dm_helpers_dp_mst_stop_top_mgr( struct amdgpu_dm_connector *aconnector = link->priv; if (!aconnector) { - DRM_ERROR("Failed to found connector for link!"); - return; + DRM_ERROR("Failed to find connector for link!"); + return; } DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n", @@ -445,7 +445,7 @@ bool dm_helpers_dp_read_dpcd( struct amdgpu_dm_connector *aconnector = link->priv; if (!aconnector) { - DRM_ERROR("Failed to found connector for link!"); + DRM_ERROR("Failed to find connector for link!"); return false; } @@ -463,7 +463,7 @@ bool dm_helpers_dp_write_dpcd( struct amdgpu_dm_connector *aconnector = link->priv; if (!aconnector) { - DRM_ERROR("Failed to found connector for link!"); + DRM_ERROR("Failed to find connector for link!"); return false; } @@ -483,7 +483,7 @@ bool dm_helpers_submit_i2c( bool result; if (!aconnector) { - DRM_ERROR("Failed to found connector for link!"); + DRM_ERROR("Failed to find connector for link!"); return false; } @@ -538,7 +538,7 @@ bool dm_helpers_is_dp_sink_present(struct dc_link *link) struct amdgpu_dm_connector *aconnector = link->priv; if (!aconnector) { - BUG_ON("Failed to found connector for link!"); + BUG_ON("Failed to find connector for link!"); return true; } @@ -580,6 +580,20 @@ enum dc_edid_status dm_helpers_read_local_edid( /* We don't need the original edid anymore */ kfree(edid); + /* connector->display_info will be parsed from EDID and saved + * into drm_connector->display_info from edid by call stack + * below: + * drm_parse_ycbcr420_deep_color_info + * drm_parse_hdmi_forum_vsdb + * drm_parse_cea_ext + * drm_add_display_info + * drm_connector_update_edid_property + * + * drm_connector->display_info will be used by amdgpu_dm funcs, + * like fill_stream_properties_from_drm_display_mode + */ + amdgpu_dm_update_connector_after_detect(aconnector); + edid_status = dm_helpers_parse_edid_caps( ctx, &sink->dc_edid, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 4c5755633bfd7de1db8ccafa76c8f03b1812fd5d..e8208df420d9d786b9f7ae777a5f66d80bc134b9 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -207,7 +207,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, dsc_caps, NULL, - &dc_sink->sink_dsc_caps.dsc_dec_caps)) + &dc_sink->dsc_caps.dsc_dec_caps)) return false; return true; @@ -262,8 +262,8 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) #if defined(CONFIG_DRM_AMD_DC_DCN) if (!validate_dsc_caps_on_connector(aconnector)) - memset(&aconnector->dc_sink->sink_dsc_caps, - 0, sizeof(aconnector->dc_sink->sink_dsc_caps)); + memset(&aconnector->dc_sink->dsc_caps, + 0, sizeof(aconnector->dc_sink->dsc_caps)); #endif } } @@ -451,6 +451,7 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, aconnector->dc_sink); dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; + aconnector->dc_link->cur_link_settings.lane_count = 0; } drm_connector_unregister(connector); @@ -530,7 +531,7 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p memset(¶ms[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg)); if (vars[i].dsc_enabled && dc_dsc_compute_config( params[i].sink->ctx->dc->res_pool->dscs[0], - ¶ms[i].sink->sink_dsc_caps.dsc_dec_caps, + ¶ms[i].sink->dsc_caps.dsc_dec_caps, params[i].sink->ctx->dc->debug.dsc_min_slice_height_override, 0, params[i].timing, @@ -551,7 +552,7 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn) kbps = div_u64((u64)pbn * 994 * 8 * 54, 64); dc_dsc_compute_config( param.sink->ctx->dc->res_pool->dscs[0], - ¶m.sink->sink_dsc_caps.dsc_dec_caps, + ¶m.sink->dsc_caps.dsc_dec_caps, param.sink->ctx->dc->debug.dsc_min_slice_height_override, (int) kbps, param.timing, &dsc_config); @@ -748,14 +749,14 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, params[count].sink = stream->sink; aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; params[count].port = aconnector->port; - params[count].compression_possible = stream->sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported; + params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported; dc_dsc_get_policy_for_timing(params[count].timing, &dsc_policy); if (!dc_dsc_compute_bandwidth_range( stream->sink->ctx->dc->res_pool->dscs[0], stream->sink->ctx->dc->debug.dsc_min_slice_height_override, dsc_policy.min_target_bpp, dsc_policy.max_target_bpp, - &stream->sink->sink_dsc_caps.dsc_dec_caps, + &stream->sink->dsc_caps.dsc_dec_caps, &stream->timing, ¶ms[count].bw_range)) params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing); @@ -837,7 +838,7 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, if (!aconnector || !aconnector->dc_sink) continue; - if (!aconnector->dc_sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported) + if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported) continue; if (computed_streams[i]) diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 2f1c9584ac32d5c03cd999a50fdf69cc52dffc4b..37fa7b48250ef97a85205b28d6e4a8c4d7a045fa 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -267,7 +267,7 @@ static struct atom_display_object_path_v2 *get_bios_object( && id.enum_id == obj_id.enum_id) return &bp->object_info_tbl.v1_4->display_path[i]; } - /* fall through */ + fallthrough; case OBJECT_TYPE_CONNECTOR: case OBJECT_TYPE_GENERIC: /* Both Generic and Connector Object ID @@ -280,7 +280,7 @@ static struct atom_display_object_path_v2 *get_bios_object( && id.enum_id == obj_id.enum_id) return &bp->object_info_tbl.v1_4->display_path[i]; } - /* fall through */ + fallthrough; default: return NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c index 5d081c42e81b0112fa3c3790464882a236d86124..2c6db379afae1797dfffcf62e56b6de8bca6770c 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c @@ -3265,33 +3265,33 @@ bool bw_calcs(struct dc_context *ctx, bw_fixed_to_int(bw_mul(data-> stutter_exit_watermark[9], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[0].b_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[4], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[1].b_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[5], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[2].b_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[6], bw_int_to_fixed(1000))); - if (ctx->dc->caps.max_slave_planes) { - calcs_output->stutter_entry_wm_ns[3].b_mark = + calcs_output->stutter_entry_wm_ns[0].b_mark = bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[0], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[4].b_mark = + stutter_entry_watermark[4], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[1].b_mark = bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[1], bw_int_to_fixed(1000))); - } else { - calcs_output->stutter_entry_wm_ns[3].b_mark = + stutter_entry_watermark[5], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[2].b_mark = bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[7], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[4].b_mark = + stutter_entry_watermark[6], bw_int_to_fixed(1000))); + if (ctx->dc->caps.max_slave_planes) { + calcs_output->stutter_entry_wm_ns[3].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[0], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[4].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->stutter_entry_wm_ns[3].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[7], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[4].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->stutter_entry_wm_ns[5].b_mark = bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[8], bw_int_to_fixed(1000))); - } - calcs_output->stutter_entry_wm_ns[5].b_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[9], bw_int_to_fixed(1000))); + stutter_entry_watermark[9], bw_int_to_fixed(1000))); calcs_output->urgent_wm_ns[0].b_mark = bw_fixed_to_int(bw_mul(data-> diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c index f0f07b1601520ded4bab842a6e9be8b4836d77f3..3960a8db94cbe9a937c72d08bfac8ee92ec6f003 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c @@ -712,6 +712,11 @@ unsigned int get_highest_allowed_voltage_level(uint32_t hw_internal_rev, uint32_ case PRID_DALI_DF: case PRID_DALI_E3: case PRID_DALI_E4: + case PRID_POLLOCK_94: + case PRID_POLLOCK_95: + case PRID_POLLOCK_E9: + case PRID_POLLOCK_EA: + case PRID_POLLOCK_EB: return 0; default: break; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index 68a1120ff674b81f62817a1cc4b0ef745df0f32e..55d09adbf0d99f534fb1e2913ba457dcc759b9d3 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -115,12 +115,11 @@ void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr, dpp_inst = i; dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz; - prev_dppclk_khz = clk_mgr->base.ctx->dc->current_state->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz; + prev_dppclk_khz = clk_mgr->dccg->pipe_dppclk_khz[i]; - if ((prev_dppclk_khz > dppclk_khz && safe_to_lower) || prev_dppclk_khz < dppclk_khz) { + if (safe_to_lower || prev_dppclk_khz < dppclk_khz) clk_mgr->dccg->funcs->update_dpp_dto( clk_mgr->dccg, dpp_inst, dppclk_khz); - } } } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index 64cbd5462c79bc7f89e58f179ef14bbfd866f949..ab267ddd4abe3387bbef68a3bdd6f486d426f20e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -46,6 +46,7 @@ /* Constants */ #define LPDDR_MEM_RETRAIN_LATENCY 4.977 /* Number obtained from LPDDR4 Training Counter Requirement doc */ +#define SMU_VER_55_51_0 0x373300 /* SMU Version that is able to set DISPCLK below 100MHz */ /* Macros */ @@ -720,6 +721,13 @@ void rn_clk_mgr_construct( } else { struct clk_log_info log_info = {0}; + clk_mgr->smu_ver = rn_vbios_smu_get_smu_version(clk_mgr); + + /* SMU Version 55.51.0 and up no longer have an issue + * that needs to limit minimum dispclk */ + if (clk_mgr->smu_ver >= SMU_VER_55_51_0) + debug->min_disp_clk_khz = 0; + /* TODO: Check we get what we expect during bringup */ clk_mgr->base.dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 6dece1ee30bf9c48f0a013a2cc31774ed01b61e1..2ffb22177df9d31286029769b3a57d0df70921fe 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1365,7 +1365,7 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc) int i; struct dc_state *context = dc->current_state; - if ((!dc->clk_optimized_required && !dc->wm_optimized_required) || dc->optimize_seamless_boot_streams > 0) + if ((!dc->optimized_required) || dc->optimize_seamless_boot_streams > 0) return true; post_surface_trace(dc); @@ -1378,6 +1378,10 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc) } dc->hwss.optimize_bandwidth(dc, context); + + dc->optimized_required = false; + dc->wm_optimized_required = false; + return true; } @@ -1824,11 +1828,12 @@ enum surface_update_type dc_check_update_surfaces_for_stream( // If there's an available clock comparator, we use that. if (dc->clk_mgr->funcs->are_clock_states_equal) { if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk)) - dc->clk_optimized_required = true; + dc->optimized_required = true; // Else we fallback to mem compare. } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) { - dc->clk_optimized_required = true; - } + dc->optimized_required = true; + } else if (dc->wm_optimized_required) + dc->optimized_required = true; } return type; @@ -1867,6 +1872,8 @@ static void copy_surface_update_to_plane( surface->time.index++; if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX) surface->time.index = 0; + + surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips; } if (srf_update->scaling_info) { @@ -2198,7 +2205,7 @@ static void commit_planes_for_stream(struct dc *dc, dc->optimize_seamless_boot_streams--; if (dc->optimize_seamless_boot_streams == 0) - dc->clk_optimized_required = true; + dc->optimized_required = true; } } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 02e1ad3182035f60822b5313e6e2302a8e29b6a5..67cfff1586e9fe6e91cd880e4d6a5942cf2ed8c1 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -585,14 +585,14 @@ static void read_current_link_settings_on_detect(struct dc_link *link) LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED; } -static bool detect_dp( - struct dc_link *link, - struct display_sink_capability *sink_caps, - bool *converter_disable_audio, - struct audio_support *audio_support, - enum dc_detect_reason reason) +static bool detect_dp(struct dc_link *link, + struct display_sink_capability *sink_caps, + bool *converter_disable_audio, + struct audio_support *audio_support, + enum dc_detect_reason reason) { bool boot = false; + sink_caps->signal = link_detect_sink(link, reason); sink_caps->transaction_type = get_ddc_transaction_type(sink_caps->signal); @@ -609,9 +609,8 @@ static bool detect_dp( sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST; link->type = dc_connection_mst_branch; - dal_ddc_service_set_transaction_type( - link->ddc, - sink_caps->transaction_type); + dal_ddc_service_set_transaction_type(link->ddc, + sink_caps->transaction_type); /* * This call will initiate MST topology discovery. Which @@ -640,13 +639,10 @@ static bool detect_dp( if (reason == DETECT_REASON_BOOT) boot = true; - dm_helpers_dp_update_branch_info( - link->ctx, - link); + dm_helpers_dp_update_branch_info(link->ctx, link); - if (!dm_helpers_dp_mst_start_top_mgr( - link->ctx, - link, boot)) { + if (!dm_helpers_dp_mst_start_top_mgr(link->ctx, + link, boot)) { /* MST not supported */ link->type = dc_connection_single; sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT; @@ -654,7 +650,7 @@ static bool detect_dp( } if (link->type != dc_connection_mst_branch && - is_dp_active_dongle(link)) { + is_dp_active_dongle(link)) { /* DP active dongles */ link->type = dc_connection_active_dongle; if (!link->dpcd_caps.sink_count.bits.SINK_COUNT) { @@ -665,14 +661,15 @@ static bool detect_dp( return true; } - if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER) + if (link->dpcd_caps.dongle_type != + DISPLAY_DONGLE_DP_HDMI_CONVERTER) *converter_disable_audio = true; } } else { /* DP passive dongles */ sink_caps->signal = dp_passive_dongle_detection(link->ddc, - sink_caps, - audio_support); + sink_caps, + audio_support); } return true; @@ -973,6 +970,9 @@ static bool dc_link_detect_helper(struct dc_link *link, break; } + if (link->local_sink->edid_caps.panel_patch.disable_fec) + link->ctx->dc->debug.disable_fec = true; + // Check if edid is the same if ((prev_sink != NULL) && ((edid_status == EDID_THE_SAME) || (edid_status == EDID_OK))) same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid); @@ -1498,9 +1498,8 @@ static void enable_stream_features(struct pipe_ctx *pipe_ctx) } } -static enum dc_status enable_link_dp( - struct dc_state *state, - struct pipe_ctx *pipe_ctx) +static enum dc_status enable_link_dp(struct dc_state *state, + struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; enum dc_status status; @@ -1532,7 +1531,8 @@ static enum dc_status enable_link_dp( pipe_ctx->stream_res.pix_clk_params.requested_sym_clk = link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ; if (state->clk_mgr && !apply_seamless_boot_optimization) - state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false); + state->clk_mgr->funcs->update_clocks(state->clk_mgr, + state, false); // during mode switch we do DP_SET_POWER off then on, and OUI is lost dpcd_set_source_specific_data(link); @@ -1540,21 +1540,20 @@ static enum dc_status enable_link_dp( skip_video_pattern = true; if (link_settings.link_rate == LINK_RATE_LOW) - skip_video_pattern = false; - - if (perform_link_training_with_retries( - &link_settings, - skip_video_pattern, - LINK_TRAINING_ATTEMPTS, - pipe_ctx, - pipe_ctx->stream->signal)) { + skip_video_pattern = false; + + if (perform_link_training_with_retries(&link_settings, + skip_video_pattern, + LINK_TRAINING_ATTEMPTS, + pipe_ctx, + pipe_ctx->stream->signal)) { link->cur_link_settings = link_settings; status = DC_OK; - } - else + } else { status = DC_FAIL_DP_LINK_TRAINING; + } - if (link->preferred_training_settings.fec_enable != NULL) + if (link->preferred_training_settings.fec_enable) fec_enable = *link->preferred_training_settings.fec_enable; else fec_enable = true; @@ -1766,8 +1765,7 @@ static void write_i2c_retimer_setting( slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A * needs to be set to 1 on every 0xA-0xC write. @@ -1785,8 +1783,7 @@ static void write_i2c_retimer_setting( pipe_ctx->stream->link->ddc, slave_address, &offset, 1, &value, 1); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } buffer[0] = offset; @@ -1798,8 +1795,7 @@ static void write_i2c_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } } } @@ -1819,8 +1815,7 @@ static void write_i2c_retimer_setting( slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A * needs to be set to 1 on every 0xA-0xC write. @@ -1838,8 +1833,7 @@ static void write_i2c_retimer_setting( pipe_ctx->stream->link->ddc, slave_address, &offset, 1, &value, 1); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } buffer[0] = offset; @@ -1851,8 +1845,7 @@ static void write_i2c_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } } } @@ -1870,8 +1863,7 @@ static void write_i2c_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x00 to 0x23 */ buffer[0] = 0x00; @@ -1882,8 +1874,7 @@ static void write_i2c_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0xff to 0x00 */ buffer[0] = 0xff; @@ -1894,10 +1885,14 @@ static void write_i2c_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } + + return; + +i2c_write_fail: + DC_LOG_DEBUG("Set retimer failed"); } static void write_i2c_default_retimer_setting( @@ -1922,8 +1917,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x0A to 0x17 */ buffer[0] = 0x0A; @@ -1934,8 +1928,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x0B to 0xDA or 0xD8 */ buffer[0] = 0x0B; @@ -1946,8 +1939,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x0A to 0x17 */ buffer[0] = 0x0A; @@ -1958,8 +1950,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x0C to 0x1D or 0x91 */ buffer[0] = 0x0C; @@ -1970,8 +1961,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x0A to 0x17 */ buffer[0] = 0x0A; @@ -1982,8 +1972,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; if (is_vga_mode) { @@ -1998,8 +1987,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x00 to 0x23 */ buffer[0] = 0x00; @@ -2010,8 +1998,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0xff to 0x00 */ buffer[0] = 0xff; @@ -2022,9 +2009,13 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val= 0x%x, i2c_success = %d end here\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } + + return; + +i2c_write_fail: + DC_LOG_DEBUG("Set default retimer failed"); } static void write_i2c_redriver_setting( @@ -2053,8 +2044,7 @@ static void write_i2c_redriver_setting( slave_address, buffer[3], buffer[4], buffer[5], buffer[6], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + DC_LOG_DEBUG("Set redriver failed"); } static void disable_link(struct dc_link *link, enum signal_type signal) @@ -2960,6 +2950,8 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) config.link_enc_inst = pipe_ctx->stream->link->link_enc_hw_inst; config.dpms_off = dpms_off; config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context; + config.mst_supported = (pipe_ctx->stream->signal == + SIGNAL_TYPE_DISPLAY_PORT_MST); cp_psp->funcs.update_stream_config(cp_psp->handle, &config); } } @@ -3410,7 +3402,7 @@ uint32_t dc_link_bandwidth_kbps( link_bw_kbps *= 8; /* 8 bits per byte*/ link_bw_kbps *= link_setting->lane_count; - if (link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) { + if (dc_link_is_fec_supported(link) && !link->dc->debug.disable_fec) { /* Account for FEC overhead. * We have to do it based on caps, * and not based on FEC being set ready, @@ -3454,3 +3446,11 @@ void dc_link_overwrite_extended_receiver_cap( dp_overwrite_extended_receiver_cap(link); } +bool dc_link_is_fec_supported(const struct dc_link *link) +{ + return (dc_is_dp_signal(link->connector_signal) && + link->link_enc->features.fec_supported && + link->dpcd_caps.fec_cap.bits.FEC_CAPABLE && + !IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment)); +} + diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 93127bc90f3cd28e041d85dd2dacef5b61026508..7cbb1efb4f68eb245c33eb7dd525256a6ea0b9ed 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -1446,11 +1446,15 @@ enum link_training_result dc_link_dp_perform_link_training( &link->preferred_training_settings, <_settings); - /* 1. set link rate, lane count and spread. */ + /* Configure lttpr mode */ + if (!link->is_lttpr_mode_transparent) + configure_lttpr_mode(link); + if (link->ctx->dc->work_arounds.lt_early_cr_pattern) start_clock_recovery_pattern_early(link, <_settings, DPRX); - else - dpcd_set_link_settings(link, <_settings); + + /* 1. set link rate, lane count and spread. */ + dpcd_set_link_settings(link, <_settings); if (link->preferred_training_settings.fec_enable != NULL) fec_enable = *link->preferred_training_settings.fec_enable; @@ -1460,8 +1464,6 @@ enum link_training_result dc_link_dp_perform_link_training( dp_set_fec_ready(link, fec_enable); if (!link->is_lttpr_mode_transparent) { - /* Configure lttpr mode */ - configure_lttpr_mode(link); /* 2. perform link training (set link training done * to false is done as well) @@ -1669,11 +1671,11 @@ enum link_training_result dc_link_dp_sync_lt_attempt( dp_set_panel_mode(link, panel_mode); /* Attempt to train with given link training settings */ - /* Set link rate, lane count and spread. */ if (link->ctx->dc->work_arounds.lt_early_cr_pattern) start_clock_recovery_pattern_early(link, <_settings, DPRX); - else - dpcd_set_link_settings(link, <_settings); + + /* Set link rate, lane count and spread. */ + dpcd_set_link_settings(link, <_settings); /* 2. perform link training (set link training done * to false is done as well) @@ -2672,9 +2674,12 @@ static void dp_test_send_link_test_pattern(struct dc_link *link) break; } - test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ? - DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 : - DP_TEST_PATTERN_COLOR_SPACE_YCBCR601; + if (dpcd_test_params.bits.CLR_FORMAT == 0) + test_pattern_color_space = DP_TEST_PATTERN_COLOR_SPACE_RGB; + else + test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ? + DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 : + DP_TEST_PATTERN_COLOR_SPACE_YCBCR601; dc_link_dp_set_test_pattern( link, @@ -3436,6 +3441,17 @@ static bool retrieve_link_cap(struct dc_link *link) sink_id.ieee_device_id, sizeof(sink_id.ieee_device_id)); + /* Quirk Apple MBP 2017 15" Retina panel: Wrong DP_MAX_LINK_RATE */ + { + uint8_t str_mbp_2017[] = { 101, 68, 21, 101, 98, 97 }; + + if ((link->dpcd_caps.sink_dev_id == 0x0010fa) && + !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2017, + sizeof(str_mbp_2017))) { + link->reported_link_cap.link_rate = 0x0c; + } + } + core_link_read_dpcd( link, DP_SINK_HW_REVISION_START, @@ -3720,7 +3736,8 @@ static void set_crtc_test_pattern(struct dc_link *link, struct pipe_ctx *odm_pipe; enum controller_dp_color_space controller_color_space; int opp_cnt = 1; - uint16_t count = 0; + int offset = 0; + int dpg_width = width; switch (test_pattern_color_space) { case DP_TEST_PATTERN_COLOR_SPACE_RGB: @@ -3742,33 +3759,30 @@ static void set_crtc_test_pattern(struct dc_link *link, for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) opp_cnt++; + dpg_width = width / opp_cnt; + offset = dpg_width; - width /= opp_cnt; + opp->funcs->opp_set_disp_pattern_generator(opp, + controller_test_pattern, + controller_color_space, + color_depth, + NULL, + dpg_width, + height, + 0); for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp; - odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); odm_opp->funcs->opp_set_disp_pattern_generator(odm_opp, controller_test_pattern, controller_color_space, color_depth, NULL, - width, - height); - } - opp->funcs->opp_set_disp_pattern_generator(opp, - controller_test_pattern, - controller_color_space, - color_depth, - NULL, - width, - height); - /* wait for dpg to blank pixel data with test pattern */ - for (count = 0; count < 1000; count++) { - if (opp->funcs->dpg_is_blanked(opp)) - break; - udelay(100); + dpg_width, + height, + offset); + offset += offset; } } } @@ -3786,11 +3800,12 @@ static void set_crtc_test_pattern(struct dc_link *link, else if (opp->funcs->opp_set_disp_pattern_generator) { struct pipe_ctx *odm_pipe; int opp_cnt = 1; + int dpg_width = width; for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) opp_cnt++; - width /= opp_cnt; + dpg_width = width / opp_cnt; for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp; @@ -3800,16 +3815,18 @@ static void set_crtc_test_pattern(struct dc_link *link, CONTROLLER_DP_COLOR_SPACE_UDEFINED, color_depth, NULL, - width, - height); + dpg_width, + height, + 0); } opp->funcs->opp_set_disp_pattern_generator(opp, CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, CONTROLLER_DP_COLOR_SPACE_UDEFINED, color_depth, NULL, - width, - height); + dpg_width, + height, + 0); } } break; @@ -3987,6 +4004,11 @@ bool dc_link_dp_set_test_pattern( default: break; } + + if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable) + pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable( + pipe_ctx->stream_res.tg); + pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg); /* update MSA to requested color space */ pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(pipe_ctx->stream_res.stream_enc, &pipe_ctx->stream->timing, @@ -3994,9 +4016,27 @@ bool dc_link_dp_set_test_pattern( pipe_ctx->stream->use_vsc_sdp_for_colorimetry, link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP); + if (pipe_ctx->stream->use_vsc_sdp_for_colorimetry) { + if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) + pipe_ctx->stream->vsc_infopacket.sb[17] |= (1 << 7); // sb17 bit 7 Dynamic Range: 0 = VESA range, 1 = CTA range + else + pipe_ctx->stream->vsc_infopacket.sb[17] &= ~(1 << 7); + resource_build_info_frame(pipe_ctx); + link->dc->hwss.update_info_frame(pipe_ctx); + } + /* CRTC Patterns */ set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space); - + pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg); + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, + CRTC_STATE_VACTIVE); + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, + CRTC_STATE_VBLANK); + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, + CRTC_STATE_VACTIVE); + if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable) + pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable( + pipe_ctx->stream_res.tg); /* Set Test Pattern state */ link->test_pattern_enabled = true; } @@ -4126,8 +4166,7 @@ void dp_set_fec_ready(struct dc_link *link, bool ready) struct link_encoder *link_enc = link->link_enc; uint8_t fec_config = 0; - if (link->dc->debug.disable_fec || - IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment)) + if (!dc_link_is_fec_supported(link) || link->dc->debug.disable_fec) return; if (link_enc->funcs->fec_set_ready && @@ -4162,8 +4201,7 @@ void dp_set_fec_enable(struct dc_link *link, bool enable) { struct link_encoder *link_enc = link->link_enc; - if (link->dc->debug.disable_fec || - IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment)) + if (!dc_link_is_fec_supported(link) || link->dc->debug.disable_fec) return; if (link_enc->funcs->fec_set_enable && diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index 58634f191a55d4406f5ddc41b18b9fbd7ec06b6e..51e0ee6e769507f04e7f8e54c734963a0cef6626 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -431,6 +431,7 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; dsc_cfg.color_depth = stream->timing.display_color_depth; + dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0); dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; @@ -535,6 +536,7 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable) dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; dsc_cfg.color_depth = stream->timing.display_color_depth; + dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; DC_LOG_DSC(" "); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 572ce384253581f0ac060facc10cdefed383bf44..75c7ce4c75811730a21f25c795ccad5144b7777b 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -893,6 +893,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx) int vpc_div = (data->format == PIXEL_FORMAT_420BPP8 || data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1; bool orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir; + int odm_idx = 0; /* * Need to calculate the scan direction for viewport to make adjustments @@ -924,11 +925,13 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx) * stream->dst.width / stream->src.width - src.x * plane_state->dst_rect.width / src.width * stream->dst.width / stream->src.width); - /*modified recout_skip_h calculation due to odm having no recout offset caused by split*/ + /*modified recout_skip_h calculation due to odm having no recout offset*/ while (odm_pipe) { - recout_skip_h += odm_pipe->plane_res.scl_data.recout.width + odm_pipe->plane_res.scl_data.recout.x; + odm_idx++; odm_pipe = odm_pipe->prev_odm_pipe; } + if (odm_idx) + recout_skip_h += odm_idx * data->recout.width; recout_skip_v = data->recout.y - (stream->dst.y + (plane_state->dst_rect.y - stream->src.y) * stream->dst.height / stream->src.height - @@ -2171,10 +2174,10 @@ enum dc_status dc_validate_global_state( if (pipe_ctx->stream != stream) continue; - if (dc->res_pool->funcs->get_default_swizzle_mode && + if (dc->res_pool->funcs->patch_unknown_plane_state && pipe_ctx->plane_state && pipe_ctx->plane_state->tiling_info.gfx9.swizzle == DC_SW_UNKNOWN) { - result = dc->res_pool->funcs->get_default_swizzle_mode(pipe_ctx->plane_state); + result = dc->res_pool->funcs->patch_unknown_plane_state(pipe_ctx->plane_state); if (result != DC_OK) return result; } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index bc1220dce3b17c8585ef239d90ad77bcd3c37ac5..d3ceb39e428efa0069166179bb9d31941e4c35cd 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -39,7 +39,7 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.2.74" +#define DC_VER "3.2.76" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -230,6 +230,7 @@ struct dc_config { bool forced_clocks; bool disable_extended_timeout_support; // Used to disable extended timeout and lttpr feature as well bool multi_mon_pp_mclk_switch; + bool psr_on_dmub; }; enum visual_confirm { @@ -389,6 +390,7 @@ struct dc_debug_options { int always_scale; bool disable_pplib_clock_request; bool disable_clock_gate; + bool disable_mem_low_power; bool disable_dmcu; bool disable_psr; bool force_abm_enable; @@ -410,7 +412,6 @@ struct dc_debug_options { bool dmub_offload_enabled; bool dmcub_emulation; bool dmub_command_table; /* for testing only */ - bool psr_on_dmub; struct dc_bw_validation_profile bw_val_profile; bool disable_fec; bool disable_48mhz_pwrdwn; @@ -520,7 +521,7 @@ struct dc { struct dce_hwseq *hwseq; /* Require to optimize clocks and bandwidth for added/removed planes */ - bool clk_optimized_required; + bool optimized_required; bool wm_optimized_required; /* Require to maintain clocks and bandwidth for UEFI enabled HW */ @@ -871,6 +872,7 @@ struct dc_flip_addrs { unsigned int flip_timestamp_in_us; bool flip_immediate; /* TODO: add flip duration for FreeSync */ + bool triplebuffer_flips; }; bool dc_post_update_surfaces_to_stream( @@ -1024,6 +1026,11 @@ struct dc_sink_dsc_caps { struct dsc_dec_dpcd_caps dsc_dec_caps; }; +struct dc_sink_fec_caps { + bool is_rx_fec_supported; + bool is_topology_fec_supported; +}; + /* * The sink structure contains EDID and other display device properties */ @@ -1037,7 +1044,10 @@ struct dc_sink { struct stereo_3d_features features_3d[TIMING_3D_FORMAT_MAX]; bool converter_disable_audio; - struct dc_sink_dsc_caps sink_dsc_caps; + struct dc_sink_dsc_caps dsc_caps; + struct dc_sink_fec_caps fec_caps; + + bool is_vsc_sdp_colorimetry_supported; /* private to DC core */ struct dc_link *link; diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index c45c7680fa58186a6abbc04effd8195702b67572..00ff5e98278c2f731957a08fcf0c4bb32ca5ed2e 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -333,4 +333,7 @@ bool dc_submit_i2c_oem( uint32_t dc_bandwidth_in_kbps_from_timing( const struct dc_crtc_timing *timing); + +bool dc_link_is_fec_supported(const struct dc_link *link); + #endif /* DC_LINK_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 299f6e00f576b438c6072407a66380845e10d3dd..0d210104ba0a10bf66cabd196b0e305dbf4c2e18 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -231,6 +231,7 @@ struct dc_panel_patch { unsigned int extra_t7_ms; unsigned int skip_scdc_overwrite; unsigned int delay_ignore_msa; + unsigned int disable_fec; }; struct dc_edid_caps { diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c index 68c4049cbc2adaedd986f91e8951b8ad491208f3..743042d5905a690bab22f106f32651891b19aafe 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c @@ -645,7 +645,7 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, case AUX_TRANSACTION_REPLY_AUX_DEFER: case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER: retry_on_defer = true; - /* fall through */ + fallthrough; case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK: if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) { goto fail; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c index 066188ba79498abfc0d523f8b76a26337da4ba1d..24adec407972ae5a90ea6211c5ec146a9becb560 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c @@ -267,6 +267,9 @@ static void set_speed( uint32_t xtal_ref_div = 0; uint32_t prescale = 0; + if (speed == 0) + return; + REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div); if (xtal_ref_div == 0) @@ -274,17 +277,15 @@ static void set_speed( prescale = ((dce_i2c_hw->reference_frequency * 2) / xtal_ref_div) / speed; - if (speed) { - if (dce_i2c_hw->masks->DC_I2C_DDC1_START_STOP_TIMING_CNTL) - REG_UPDATE_N(SPEED, 3, - FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale, - FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2, - FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1); - else - REG_UPDATE_N(SPEED, 2, - FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale, - FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2); - } + if (dce_i2c_hw->masks->DC_I2C_DDC1_START_STOP_TIMING_CNTL) + REG_UPDATE_N(SPEED, 3, + FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale, + FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2, + FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1); + else + REG_UPDATE_N(SPEED, 2, + FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale, + FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2); } static bool setup_engine( diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c index 8aa937f496c4ffb5e81b222b98c9ce97793eda02..51481e922eb9ca2a143b8f582ea8453dd92780f5 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c @@ -479,7 +479,7 @@ static void program_grph_pixel_format( case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: sign = 1; floating = 1; - /* fall through */ + fallthrough; case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: /* shouldn't this get float too? */ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: grph_depth = 3; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c index 2c932c29f1f9ede11ccc915d6a7139a1abccdd00..bc109d4fc6e6bfcbed02821c0d2b93b4e5aaee77 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c @@ -134,17 +134,15 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub, int i = 0; for (i = 0; i < MAX_PIPES; i++) { - if (res_ctx && - res_ctx->pipe_ctx[i].stream && - res_ctx->pipe_ctx[i].stream->link && - res_ctx->pipe_ctx[i].stream->link == link && - res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) { + if (res_ctx->pipe_ctx[i].stream && + res_ctx->pipe_ctx[i].stream->link == link && + res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) { pipe_ctx = &res_ctx->pipe_ctx[i]; break; } } - if (!pipe_ctx || !&pipe_ctx->plane_res || !&pipe_ctx->stream_res) + if (!pipe_ctx) return false; // First, set the psr version @@ -235,6 +233,6 @@ struct dmub_psr *dmub_psr_create(struct dc_context *ctx) */ void dmub_psr_destroy(struct dmub_psr **dmub) { - kfree(dmub); + kfree(*dmub); *dmub = NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c index e441c149ff400d474d3412b0440ffb89584b86eb..deccab0228d2b648ee5512de90f697df799f2c50 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c @@ -830,8 +830,8 @@ static void hubbub1_det_request_size( hubbub1_get_blk256_size(&blk256_width, &blk256_height, bpe); - swath_bytes_horz_wc = height * blk256_height * bpe; - swath_bytes_vert_wc = width * blk256_width * bpe; + swath_bytes_horz_wc = width * blk256_height * bpe; + swath_bytes_vert_wc = height * blk256_width * bpe; *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ? false : /* full 256B request */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 385250e1e3fde9080b0e46a919a7bf8971896606..9cc3314966bd158d596d04273867768ea414eb07 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1048,7 +1048,7 @@ void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx) if (opp != NULL) opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true; - dc->clk_optimized_required = true; + dc->optimized_required = true; if (hubp->funcs->hubp_disconnect) hubp->funcs->hubp_disconnect(hubp); @@ -1099,7 +1099,7 @@ void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) false); hubp->power_gated = true; - dc->clk_optimized_required = false; /* We're powering off, no need to optimize */ + dc->optimized_required = false; /* We're powering off, no need to optimize */ hws->funcs.plane_atomic_power_down(dc, pipe_ctx->plane_res.dpp, @@ -1356,6 +1356,9 @@ void dcn10_init_hw(struct dc *dc) */ if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) { hws->funcs.init_pipes(dc, dc->current_state); + if (dc->res_pool->hubbub->funcs->allow_self_refresh_control) + dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, + !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter); } for (i = 0; i < res_pool->audio_count; i++) { @@ -2717,30 +2720,20 @@ void dcn10_optimize_bandwidth( hws->funcs.verify_allow_pstate_change_high(dc); if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - if (context->stream_count == 0) { + if (context->stream_count == 0) context->bw_ctx.bw.dcn.clk.phyclk_khz = 0; - dc->clk_mgr->funcs->update_clocks( - dc->clk_mgr, - context, - true); - } else if (dc->clk_optimized_required || IS_DIAG_DC(dc->ctx->dce_environment)) { - dc->clk_mgr->funcs->update_clocks( - dc->clk_mgr, - context, - true); - } - } - - if (dc->wm_optimized_required || IS_DIAG_DC(dc->ctx->dce_environment)) { - hubbub->funcs->program_watermarks(hubbub, - &context->bw_ctx.bw.dcn.watermarks, - dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, + dc->clk_mgr->funcs->update_clocks( + dc->clk_mgr, + context, true); } - dc->clk_optimized_required = false; - dc->wm_optimized_required = false; + hubbub->funcs->program_watermarks(hubbub, + &context->bw_ctx.bw.dcn.watermarks, + dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, + true); + dcn10_stereo_hw_frame_pack_wa(dc, context); if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h index eb13589b9a81b6579e164c781310ec169ab780f8..762109174fb879650a32df771596410d43d2f564 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h @@ -62,11 +62,11 @@ SRI(DP_DPHY_FAST_TRAINING, DP, id), \ SRI(DP_SEC_CNTL1, DP, id), \ SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \ - SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \ SRI(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id) #define LE_DCN10_REG_LIST(id)\ + SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \ LE_DCN_COMMON_REG_LIST(id) struct dcn10_link_enc_aux_registers { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index a9a43b397db999b8a8252a295588ba7bb604b7a8..63acb8ff7462803690a54e09fcda6ca65a0bda7d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c @@ -299,7 +299,6 @@ void optc1_set_vtg_params(struct timing_generator *optc, uint32_t asic_blank_end; uint32_t v_init; uint32_t v_fp2 = 0; - int32_t vertical_line_start; struct optc *optc1 = DCN10TG_FROM_TG(optc); @@ -316,9 +315,8 @@ void optc1_set_vtg_params(struct timing_generator *optc, patched_crtc_timing.v_border_top; /* if VSTARTUP is before VSYNC, FP2 is the offset, otherwise 0 */ - vertical_line_start = asic_blank_end - optc1->vstartup_start + 1; - if (vertical_line_start < 0) - v_fp2 = -vertical_line_start; + if (optc1->vstartup_start > asic_blank_end) + v_fp2 = optc1->vstartup_start - asic_blank_end; /* Interlace */ if (REG(OTG_INTERLACE_CONTROL)) { @@ -1195,7 +1193,7 @@ static void optc1_enable_stereo(struct timing_generator *optc, REG_UPDATE_3(OTG_STEREO_CONTROL, OTG_STEREO_EN, stereo_en, OTG_STEREO_SYNC_OUTPUT_LINE_NUM, 0, - OTG_STEREO_SYNC_OUTPUT_POLARITY, 0); + OTG_STEREO_SYNC_OUTPUT_POLARITY, flags->RIGHT_EYE_POLARITY == 0 ? 0 : 1); if (flags->PROGRAM_POLARITY) REG_UPDATE(OTG_STEREO_CONTROL, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 3b71898e859e598caee54fbe4657b9d7826479a6..261bdc3a8218c1e8b03cd8f3fd930561dcb849b7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -570,7 +570,7 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults_drv = { .sanity_checks = true, - .disable_dmcu = true, + .disable_dmcu = false, .force_abm_enable = false, .timing_trace = false, .clock_trace = true, @@ -598,7 +598,7 @@ static const struct dc_debug_options debug_defaults_drv = { }; static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = true, + .disable_dmcu = false, .force_abm_enable = false, .timing_trace = true, .clock_trace = true, @@ -1233,7 +1233,7 @@ static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *cont return DC_OK; } -static enum dc_status dcn10_get_default_swizzle_mode(struct dc_plane_state *plane_state) +static enum dc_status dcn10_patch_unknown_plane_state(struct dc_plane_state *plane_state) { enum dc_status result = DC_OK; @@ -1295,7 +1295,7 @@ static const struct resource_funcs dcn10_res_pool_funcs = { .validate_plane = dcn10_validate_plane, .validate_global = dcn10_validate_global, .add_stream_to_ctx = dcn10_add_stream_to_ctx, - .get_default_swizzle_mode = dcn10_get_default_swizzle_mode, + .patch_unknown_plane_state = dcn10_patch_unknown_plane_state, .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c index 50bffbfdd3947b8fecca4c522e033cea299859ac..62cc2651e00c1a1fea1876b09780053d453520a2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c @@ -70,6 +70,8 @@ void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk) REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_ENABLE[dpp_inst], 0); } + + dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk; } void dccg2_get_dccg_ref_freq(struct dccg *dccg, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c index 13e057d7ee93da28efd01a55cbcea1873801e09b..42bba7c9548b0a0d2ae0814394876a85ec0fbd2d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c @@ -369,84 +369,6 @@ void dpp2_set_cursor_attributes( } } -#define IDENTITY_RATIO(ratio) (dc_fixpt_u3d19(ratio) == (1 << 19)) - -bool dpp2_get_optimal_number_of_taps( - struct dpp *dpp, - struct scaler_data *scl_data, - const struct scaling_taps *in_taps) -{ - /* Some ASICs does not support FP16 scaling, so we reject modes require this*/ - if (scl_data->viewport.width != scl_data->h_active && - scl_data->viewport.height != scl_data->v_active && - dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT && - scl_data->format == PIXEL_FORMAT_FP16) - return false; - - if (scl_data->viewport.width > scl_data->h_active && - dpp->ctx->dc->debug.max_downscale_src_width != 0 && - scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width) - return false; - - /* TODO: add lb check */ - - /* No support for programming ratio of 8, drop to 7.99999.. */ - if (scl_data->ratios.horz.value == (8ll << 32)) - scl_data->ratios.horz.value--; - if (scl_data->ratios.vert.value == (8ll << 32)) - scl_data->ratios.vert.value--; - if (scl_data->ratios.horz_c.value == (8ll << 32)) - scl_data->ratios.horz_c.value--; - if (scl_data->ratios.vert_c.value == (8ll << 32)) - scl_data->ratios.vert_c.value--; - - /* Set default taps if none are provided */ - if (in_taps->h_taps == 0) { - if (dc_fixpt_ceil(scl_data->ratios.horz) > 4) - scl_data->taps.h_taps = 8; - else - scl_data->taps.h_taps = 4; - } else - scl_data->taps.h_taps = in_taps->h_taps; - if (in_taps->v_taps == 0) { - if (dc_fixpt_ceil(scl_data->ratios.vert) > 4) - scl_data->taps.v_taps = 8; - else - scl_data->taps.v_taps = 4; - } else - scl_data->taps.v_taps = in_taps->v_taps; - if (in_taps->v_taps_c == 0) { - if (dc_fixpt_ceil(scl_data->ratios.vert_c) > 4) - scl_data->taps.v_taps_c = 4; - else - scl_data->taps.v_taps_c = 2; - } else - scl_data->taps.v_taps_c = in_taps->v_taps_c; - if (in_taps->h_taps_c == 0) { - if (dc_fixpt_ceil(scl_data->ratios.horz_c) > 4) - scl_data->taps.h_taps_c = 4; - else - scl_data->taps.h_taps_c = 2; - } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1) - /* Only 1 and even h_taps_c are supported by hw */ - scl_data->taps.h_taps_c = in_taps->h_taps_c - 1; - else - scl_data->taps.h_taps_c = in_taps->h_taps_c; - - if (!dpp->ctx->dc->debug.always_scale) { - if (IDENTITY_RATIO(scl_data->ratios.horz)) - scl_data->taps.h_taps = 1; - if (IDENTITY_RATIO(scl_data->ratios.vert)) - scl_data->taps.v_taps = 1; - if (IDENTITY_RATIO(scl_data->ratios.horz_c)) - scl_data->taps.h_taps_c = 1; - if (IDENTITY_RATIO(scl_data->ratios.vert_c)) - scl_data->taps.v_taps_c = 1; - } - - return true; -} - void oppn20_dummy_program_regamma_pwl( struct dpp *dpp, const struct pwl_params *params, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c index 6bdfee20b6a79a17d1c068a0a67a6887c40a94db..1b1ae9ce279998f0931caa3b05f3d71b46f27da4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c @@ -369,6 +369,7 @@ static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_ dsc_reg_vals->pps.block_pred_enable = dsc_cfg->dc_dsc_cfg.block_pred_enable; dsc_reg_vals->pps.line_buf_depth = dsc_cfg->dc_dsc_cfg.linebuf_depth; dsc_reg_vals->alternate_ich_encoding_en = dsc_reg_vals->pps.dsc_version_minor == 1 ? 0 : 1; + dsc_reg_vals->ich_reset_at_eol = (dsc_cfg->is_odm || dsc_reg_vals->num_slices_h > 1) ? 0xF : 0; // TODO: in addition to validating slice height (pic height must be divisible by slice height), // see what happens when the same condition doesn't apply for slice_width/pic_width. @@ -531,7 +532,6 @@ static void dsc_update_from_dsc_parameters(struct dsc_reg_values *reg_vals, cons reg_vals->pps.rc_buf_thresh[i] = reg_vals->pps.rc_buf_thresh[i] >> 6; reg_vals->rc_buffer_model_size = dsc_params->rc_buffer_model_size; - reg_vals->ich_reset_at_eol = reg_vals->num_slices_h == 1 ? 0 : 0xf; } static void dsc_write_to_registers(struct display_stream_compressor *dsc, const struct dsc_reg_values *reg_vals) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 97c0c8ced8e593b40984639a742cfa08ba1d8c2b..233318260da428051d2898c24cc270789de29ba5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -307,7 +307,8 @@ void dcn20_init_blank( COLOR_DEPTH_UNDEFINED, &black_color, otg_active_width, - otg_active_height); + otg_active_height, + 0); if (num_opps == 2) { bottom_opp->funcs->opp_set_disp_pattern_generator( @@ -317,7 +318,8 @@ void dcn20_init_blank( COLOR_DEPTH_UNDEFINED, &black_color, otg_active_width, - otg_active_height); + otg_active_height, + 0); } hws->funcs.wait_for_blank_complete(opp); @@ -645,6 +647,9 @@ enum dc_status dcn20_enable_stream_timing( return DC_ERROR_UNEXPECTED; } + if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal))) + dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx); + pipe_ctx->stream_res.tg->funcs->program_timing( pipe_ctx->stream_res.tg, &stream->timing, @@ -974,7 +979,8 @@ void dcn20_blank_pixel_data( stream->timing.display_color_depth, &black_color, width, - height); + height, + 0); for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { odm_pipe->stream_res.opp->funcs->opp_set_disp_pattern_generator( @@ -985,7 +991,8 @@ void dcn20_blank_pixel_data( stream->timing.display_color_depth, &black_color, width, - height); + height, + 0); } if (!blank) @@ -1656,22 +1663,16 @@ void dcn20_optimize_bandwidth( { struct hubbub *hubbub = dc->res_pool->hubbub; - if (dc->wm_optimized_required || IS_DIAG_DC(dc->ctx->dce_environment)) { - /* program dchubbub watermarks */ - hubbub->funcs->program_watermarks(hubbub, - &context->bw_ctx.bw.dcn.watermarks, - dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, - true); - dc->wm_optimized_required = false; - } + /* program dchubbub watermarks */ + hubbub->funcs->program_watermarks(hubbub, + &context->bw_ctx.bw.dcn.watermarks, + dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, + true); - if (dc->clk_optimized_required || IS_DIAG_DC(dc->ctx->dce_environment)) { - dc->clk_mgr->funcs->update_clocks( - dc->clk_mgr, - context, - true); - dc->wm_optimized_required = false; - } + dc->clk_mgr->funcs->update_clocks( + dc->clk_mgr, + context, + true); } bool dcn20_update_bandwidth( diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c index 6c4f90f586563fb6064c932f33dcefbed56eebfc..1e73357eda340acace5542a64e6045d6fb65b791 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c @@ -111,7 +111,6 @@ static const struct hwseq_private_funcs dcn20_private_funcs = { .enable_power_gating_plane = dcn20_enable_power_gating_plane, .dpp_pg_control = dcn20_dpp_pg_control, .hubp_pg_control = dcn20_hubp_pg_control, - .dsc_pg_control = NULL, .update_odm = dcn20_update_odm, .dsc_pg_control = dcn20_dsc_pg_control, .get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c index 023cc71fad0f55f02232b88a5cd0c3875993c398..138321e151eb11f6fe93bc6e6c4c42b4f30bdcb8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c @@ -45,7 +45,8 @@ void opp2_set_disp_pattern_generator( enum dc_color_depth color_depth, const struct tg_color *solid_color, int width, - int height) + int height, + int offset) { struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp); enum test_pattern_color_format bit_depth; @@ -92,6 +93,11 @@ void opp2_set_disp_pattern_generator( DPG_ACTIVE_WIDTH, width, DPG_ACTIVE_HEIGHT, height); + /* set DPG offset */ + REG_SET_2(DPG_OFFSET_SEGMENT, 0, + DPG_X_OFFSET, offset, + DPG_SEGMENT_WIDTH, 0); + switch (test_pattern) { case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES: case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA: diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h index 4093bec172c181479a31aaf276349ae2975f3bf0..64c5b429c79a168eecdf52b319bbd755eb9deec1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h @@ -36,6 +36,7 @@ #define OPP_DPG_REG_LIST(id) \ SRI(DPG_CONTROL, DPG, id), \ SRI(DPG_DIMENSIONS, DPG, id), \ + SRI(DPG_OFFSET_SEGMENT, DPG, id), \ SRI(DPG_COLOUR_B_CB, DPG, id), \ SRI(DPG_COLOUR_G_Y, DPG, id), \ SRI(DPG_COLOUR_R_CR, DPG, id), \ @@ -53,6 +54,7 @@ uint32_t FMT_422_CONTROL; \ uint32_t DPG_CONTROL; \ uint32_t DPG_DIMENSIONS; \ + uint32_t DPG_OFFSET_SEGMENT; \ uint32_t DPG_COLOUR_B_CB; \ uint32_t DPG_COLOUR_G_Y; \ uint32_t DPG_COLOUR_R_CR; \ @@ -68,6 +70,8 @@ OPP_SF(DPG0_DPG_CONTROL, DPG_HRES, mask_sh), \ OPP_SF(DPG0_DPG_DIMENSIONS, DPG_ACTIVE_WIDTH, mask_sh), \ OPP_SF(DPG0_DPG_DIMENSIONS, DPG_ACTIVE_HEIGHT, mask_sh), \ + OPP_SF(DPG0_DPG_OFFSET_SEGMENT, DPG_X_OFFSET, mask_sh), \ + OPP_SF(DPG0_DPG_OFFSET_SEGMENT, DPG_SEGMENT_WIDTH, mask_sh), \ OPP_SF(DPG0_DPG_COLOUR_R_CR, DPG_COLOUR0_R_CR, mask_sh), \ OPP_SF(DPG0_DPG_COLOUR_R_CR, DPG_COLOUR1_R_CR, mask_sh), \ OPP_SF(DPG0_DPG_COLOUR_B_CB, DPG_COLOUR0_B_CB, mask_sh), \ @@ -97,6 +101,8 @@ type DPG_HRES; \ type DPG_ACTIVE_WIDTH; \ type DPG_ACTIVE_HEIGHT; \ + type DPG_X_OFFSET; \ + type DPG_SEGMENT_WIDTH; \ type DPG_COLOUR0_R_CR; \ type DPG_COLOUR1_R_CR; \ type DPG_COLOUR0_B_CB; \ @@ -144,7 +150,8 @@ void opp2_set_disp_pattern_generator( enum dc_color_depth color_depth, const struct tg_color *solid_color, int width, - int height); + int height, + int offset); bool opp2_dpg_is_blanked(struct output_pixel_processor *opp); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 78971b6b195c3022ed804974455d16d684257ae2..a67395208991f83e01be1f219f3714a4215b3ecf 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -153,6 +153,7 @@ struct _vcs_dpi_ip_params_st dcn2_0_ip = { .xfc_supported = true, .xfc_fill_bw_overhead_percent = 10.0, .xfc_fill_constant_bytes = 0, + .number_of_cursors = 1, }; struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip = { @@ -220,7 +221,8 @@ struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip = { .xfc_supported = true, .xfc_fill_bw_overhead_percent = 10.0, .xfc_fill_constant_bytes = 0, - .ptoi_supported = 0 + .ptoi_supported = 0, + .number_of_cursors = 1, }; struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = { @@ -335,6 +337,117 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = { .use_urgent_burst_bw = 0 }; +struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = { + .clock_limits = { + { + .state = 0, + .dcfclk_mhz = 560.0, + .fabricclk_mhz = 560.0, + .dispclk_mhz = 513.0, + .dppclk_mhz = 513.0, + .phyclk_mhz = 540.0, + .socclk_mhz = 560.0, + .dscclk_mhz = 171.0, + .dram_speed_mts = 8960.0, + }, + { + .state = 1, + .dcfclk_mhz = 694.0, + .fabricclk_mhz = 694.0, + .dispclk_mhz = 642.0, + .dppclk_mhz = 642.0, + .phyclk_mhz = 600.0, + .socclk_mhz = 694.0, + .dscclk_mhz = 214.0, + .dram_speed_mts = 11104.0, + }, + { + .state = 2, + .dcfclk_mhz = 875.0, + .fabricclk_mhz = 875.0, + .dispclk_mhz = 734.0, + .dppclk_mhz = 734.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 875.0, + .dscclk_mhz = 245.0, + .dram_speed_mts = 14000.0, + }, + { + .state = 3, + .dcfclk_mhz = 1000.0, + .fabricclk_mhz = 1000.0, + .dispclk_mhz = 1100.0, + .dppclk_mhz = 1100.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1000.0, + .dscclk_mhz = 367.0, + .dram_speed_mts = 16000.0, + }, + { + .state = 4, + .dcfclk_mhz = 1200.0, + .fabricclk_mhz = 1200.0, + .dispclk_mhz = 1284.0, + .dppclk_mhz = 1284.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1200.0, + .dscclk_mhz = 428.0, + .dram_speed_mts = 16000.0, + }, + /*Extra state, no dispclk ramping*/ + { + .state = 5, + .dcfclk_mhz = 1200.0, + .fabricclk_mhz = 1200.0, + .dispclk_mhz = 1284.0, + .dppclk_mhz = 1284.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1200.0, + .dscclk_mhz = 428.0, + .dram_speed_mts = 16000.0, + }, + }, + .num_states = 5, + .sr_exit_time_us = 8.6, + .sr_enter_plus_exit_time_us = 10.9, + .urgent_latency_us = 4.0, + .urgent_latency_pixel_data_only_us = 4.0, + .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, + .urgent_latency_vm_data_only_us = 4.0, + .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, + .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, + .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0, + .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0, + .max_avg_sdp_bw_use_normal_percent = 40.0, + .max_avg_dram_bw_use_normal_percent = 40.0, + .writeback_latency_us = 12.0, + .ideal_dram_bw_after_urgent_percent = 40.0, + .max_request_size_bytes = 256, + .dram_channel_width_bytes = 2, + .fabric_datapath_to_dcn_data_return_bytes = 64, + .dcn_downspread_percent = 0.5, + .downspread_percent = 0.38, + .dram_page_open_time_ns = 50.0, + .dram_rw_turnaround_time_ns = 17.5, + .dram_return_buffer_per_channel_bytes = 8192, + .round_trip_ping_latency_dcfclk_cycles = 131, + .urgent_out_of_order_return_per_channel_bytes = 256, + .channel_interleave_bytes = 256, + .num_banks = 8, + .num_chans = 8, + .vmm_page_size_bytes = 4096, + .dram_clock_change_latency_us = 404.0, + .dummy_pstate_latency_us = 5.0, + .writeback_dram_clock_change_latency_us = 23.0, + .return_bus_width_bytes = 64, + .dispclk_dppclk_vco_speed_mhz = 3850, + .xfc_bus_transport_time_us = 20, + .xfc_xbuf_latency_tolerance_us = 4, + .use_urgent_burst_bw = 0 +}; + struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 }; #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL @@ -928,7 +1041,7 @@ static const struct resource_caps res_cap_nv14 = { }; static const struct dc_debug_options debug_defaults_drv = { - .disable_dmcu = true, + .disable_dmcu = false, .force_abm_enable = false, .timing_trace = false, .clock_trace = true, @@ -947,7 +1060,7 @@ static const struct dc_debug_options debug_defaults_drv = { }; static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = true, + .disable_dmcu = false, .force_abm_enable = false, .timing_trace = true, .clock_trace = true, @@ -1143,6 +1256,7 @@ static const struct encoder_feature_support link_enc_feature = { .max_hdmi_pixel_clock = 600000, .hdmi_ycbcr420_supported = true, .dp_ycbcr420_supported = true, + .fec_supported = true, .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_HBR3_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true, @@ -1557,7 +1671,7 @@ static void acquire_dsc(struct resource_context *res_ctx, } } -static void release_dsc(struct resource_context *res_ctx, +void dcn20_release_dsc(struct resource_context *res_ctx, const struct resource_pool *pool, struct display_stream_compressor **dsc) { @@ -1617,7 +1731,7 @@ static enum dc_status remove_dsc_from_stream_resource(struct dc *dc, pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream_res.dsc) - release_dsc(&new_ctx->res_ctx, dc->res_pool, &pipe_ctx->stream_res.dsc); + dcn20_release_dsc(&new_ctx->res_ctx, dc->res_pool, &pipe_ctx->stream_res.dsc); } } @@ -2041,14 +2155,17 @@ int dcn20_populate_dml_pipes_from_context( /* todo: default max for now, until there is logic reflecting this in dc*/ pipes[pipe_cnt].dout.output_bpc = 12; /* - * Use max cursor settings for calculations to minimize + * For graphic plane, cursor number is 1, nv12 is 0 * bw calculations due to cursor on/off */ - pipes[pipe_cnt].pipe.src.num_cursors = 2; + if (res_ctx->pipe_ctx[i].plane_state && + res_ctx->pipe_ctx[i].plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) + pipes[pipe_cnt].pipe.src.num_cursors = 0; + else + pipes[pipe_cnt].pipe.src.num_cursors = dc->dml.ip.number_of_cursors; + pipes[pipe_cnt].pipe.src.cur0_src_width = 256; pipes[pipe_cnt].pipe.src.cur0_bpp = dm_cur_32bit; - pipes[pipe_cnt].pipe.src.cur1_src_width = 256; - pipes[pipe_cnt].pipe.src.cur1_bpp = dm_cur_32bit; if (!res_ctx->pipe_ctx[i].plane_state) { pipes[pipe_cnt].pipe.src.is_hsplit = pipes[pipe_cnt].pipe.dest.odm_combine != dm_odm_combine_mode_disabled; @@ -2298,6 +2415,7 @@ bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx) + stream->timing.v_border_bottom; dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; dsc_cfg.color_depth = stream->timing.display_color_depth; + dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; @@ -2384,7 +2502,7 @@ struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc, return secondary_pipe; } -void dcn20_merge_pipes_for_validate( +static void dcn20_merge_pipes_for_validate( struct dc *dc, struct dc_state *context) { @@ -2409,7 +2527,7 @@ void dcn20_merge_pipes_for_validate( odm_pipe->prev_odm_pipe = NULL; odm_pipe->next_odm_pipe = NULL; if (odm_pipe->stream_res.dsc) - release_dsc(&context->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc); + dcn20_release_dsc(&context->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc); /* Clear plane_res and stream_res */ memset(&odm_pipe->plane_res, 0, sizeof(odm_pipe->plane_res)); memset(&odm_pipe->stream_res, 0, sizeof(odm_pipe->stream_res)); @@ -2447,41 +2565,29 @@ int dcn20_validate_apply_pipe_split_flags( struct dc *dc, struct dc_state *context, int vlevel, - bool *split) + bool *split, + bool *merge) { int i, pipe_idx, vlevel_split; + int plane_count = 0; bool force_split = false; - bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC; + bool avoid_split = dc->debug.pipe_split_policy == MPC_SPLIT_AVOID; - /* Single display loop, exits if there is more than one display */ + if (context->stream_count > 1) { + if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP) + avoid_split = true; + } else if (dc->debug.force_single_disp_pipe_split) + force_split = true; + + /* TODO: fix dc bugs and remove this split threshold thing */ for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - bool exit_loop = false; - - if (!pipe->stream || pipe->top_pipe) - continue; - if (dc->debug.force_single_disp_pipe_split) { - if (!force_split) - force_split = true; - else { - force_split = false; - exit_loop = true; - } - } - if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP) { - if (avoid_split) - avoid_split = false; - else { - avoid_split = true; - exit_loop = true; - } - } - if (exit_loop) - break; + if (pipe->stream && !pipe->prev_odm_pipe && + (!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state)) + ++plane_count; } - /* TODO: fix dc bugs and remove this split threshold thing */ - if (context->stream_count > dc->res_pool->pipe_count / 2) + if (plane_count > dc->res_pool->pipe_count / 2) avoid_split = true; /* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */ @@ -2504,11 +2610,12 @@ int dcn20_validate_apply_pipe_split_flags( /* Split loop sets which pipe should be split based on dml outputs and dc flags */ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + int pipe_plane = context->bw_ctx.dml.vba.pipe_plane[pipe_idx]; if (!context->res_ctx.pipe_ctx[i].stream) continue; - if (force_split || context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] > 1) + if (force_split || context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_plane] > 1) split[i] = true; if ((pipe->stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE || @@ -2521,10 +2628,44 @@ int dcn20_validate_apply_pipe_split_flags( split[i] = true; if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) { split[i] = true; - context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = dm_odm_combine_mode_2to1; + context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1; } - context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] = - context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx]; + context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] = + context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_plane]; + + if (pipe->prev_odm_pipe && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] != dm_odm_combine_mode_disabled) { + /*Already split odm pipe tree, don't try to split again*/ + split[i] = false; + split[pipe->prev_odm_pipe->pipe_idx] = false; + } else if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state + && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) { + /*Already split mpc tree, don't try to split again, assumes only 2x mpc combine*/ + split[i] = false; + split[pipe->top_pipe->pipe_idx] = false; + } else if (pipe->prev_odm_pipe || (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state)) { + if (split[i] == false) { + /*Exiting mpc/odm combine*/ + merge[i] = true; + if (pipe->prev_odm_pipe) { + ASSERT(0); /*should not actually happen yet*/ + merge[pipe->prev_odm_pipe->pipe_idx] = true; + } else + merge[pipe->top_pipe->pipe_idx] = true; + } else { + /*Transition from mpc combine to odm combine or vice versa*/ + ASSERT(0); /*should not actually happen yet*/ + split[i] = true; + merge[i] = true; + if (pipe->prev_odm_pipe) { + split[pipe->prev_odm_pipe->pipe_idx] = true; + merge[pipe->prev_odm_pipe->pipe_idx] = true; + } else { + split[pipe->top_pipe->pipe_idx] = true; + merge[pipe->top_pipe->pipe_idx] = true; + } + } + } + /* Adjust dppclk when split is forced, do not bother with dispclk */ if (split[i] && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1) context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2; @@ -2566,7 +2707,7 @@ bool dcn20_fast_validate_bw( if (vlevel > context->bw_ctx.dml.soc.num_states) goto validate_fail; - vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split); + vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, NULL); /*initialize pipe_just_split_from to invalid idx*/ for (i = 0; i < MAX_PIPES; i++) @@ -3026,7 +3167,7 @@ static struct dc_cap_funcs cap_funcs = { }; -enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state) +enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state) { enum dc_status result = DC_OK; @@ -3052,7 +3193,7 @@ static struct resource_funcs dcn20_res_pool_funcs = { .add_stream_to_ctx = dcn20_add_stream_to_ctx, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, .populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context, - .get_default_swizzle_mode = dcn20_get_default_swizzle_mode, + .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .set_mcif_arb_params = dcn20_set_mcif_arb_params, .populate_dml_pipes = dcn20_populate_dml_pipes_from_context, .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link @@ -3290,6 +3431,9 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb( uint32_t hw_internal_rev) { + if (ASICREV_IS_NAVI14_M(hw_internal_rev)) + return &dcn2_0_nv14_soc; + if (ASICREV_IS_NAVI12_P(hw_internal_rev)) return &dcn2_0_nv12_soc; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h index f5893840b79be1089de686411808cc71733e7b8a..9d5bff9455fd0f7f6425de0fbc9ea9b50b1e7ed1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h @@ -119,14 +119,15 @@ void dcn20_set_mcif_arb_params( display_e2e_pipe_params_st *pipes, int pipe_cnt); bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate); -void dcn20_merge_pipes_for_validate( - struct dc *dc, - struct dc_state *context); int dcn20_validate_apply_pipe_split_flags( struct dc *dc, struct dc_state *context, int vlevel, - bool *split); + bool *split, + bool *merge); +void dcn20_release_dsc(struct resource_context *res_ctx, + const struct resource_pool *pool, + struct display_stream_compressor **dsc); bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx); void dcn20_split_stream_for_mpc( struct resource_context *res_ctx, @@ -159,7 +160,7 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream); enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc, struct dc_state *dc_ctx, struct dc_stream_state *dc_stream); enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream); -enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state); +enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state); void dcn20_patch_bounding_box( struct dc *dc, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c index 081ad8e43d581519a0be5ed86aafd67c40eb6ad0..ada65b1a7eb19c5453008685343386285546baec 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c @@ -112,3 +112,25 @@ void dcn21_optimize_pwr_state( true); } +/* If user hotplug a HDMI monitor while in monitor off, + * OS will do a mode set (with output timing) but keep output off. + * In this case DAL will ask vbios to power up the pll in the PHY. + * If user unplug the monitor (while we are on monitor off) or + * system attempt to enter modern standby (which we will disable PLL), + * PHY will hang on the next mode set attempt. + * if enable PLL follow by disable PLL (without executing lane enable/disable), + * RDPCS_PHY_DP_MPLLB_STATE remains 1, + * which indicate that PLL disable attempt actually didn’t go through. + * As a workaround, insert PHY lane enable/disable before PLL disable. + */ +void dcn21_PLAT_58856_wa(struct dc_state *context, struct pipe_ctx *pipe_ctx) +{ + if (!pipe_ctx->stream->dpms_off) + return; + + pipe_ctx->stream->dpms_off = false; + core_link_enable_stream(context, pipe_ctx); + core_link_disable_stream(pipe_ctx); + pipe_ctx->stream->dpms_off = true; +} + diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h index 18273609612316308f41c31116b8a95e2e698908..26bf24d3b59f20c1cdc80f04f6005f9be64573d0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h @@ -44,4 +44,7 @@ void dcn21_optimize_pwr_state( const struct dc *dc, struct dc_state *context); +void dcn21_PLAT_58856_wa(struct dc_state *context, + struct pipe_ctx *pipe_ctx); + #endif /* __DC_HWSS_DCN21_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c index f9a7e43d66b94536bca5da414fa679cbf87f8261..b9ff9767e08fd4680f5f8fb512b81dc5fb7936d8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c @@ -119,7 +119,6 @@ static const struct hwseq_private_funcs dcn21_private_funcs = { .enable_power_gating_plane = dcn20_enable_power_gating_plane, .dpp_pg_control = dcn20_dpp_pg_control, .hubp_pg_control = dcn20_hubp_pg_control, - .dsc_pg_control = NULL, .update_odm = dcn20_update_odm, .dsc_pg_control = dcn20_dsc_pg_control, .get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color, @@ -131,6 +130,7 @@ static const struct hwseq_private_funcs dcn21_private_funcs = { .dccg_init = dcn20_dccg_init, .set_blend_lut = dcn20_set_blend_lut, .set_shaper_3dlut = dcn20_set_shaper_3dlut, + .PLAT_58856_wa = dcn21_PLAT_58856_wa, }; void dcn21_hw_sequencer_construct(struct dc *dc) diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index dce4966eca2032b95f40d694655339b03427a362..51b5910cd05fab3e5efd7c8ddbbc1e88e4684382 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -156,7 +156,8 @@ struct _vcs_dpi_ip_params_st dcn2_1_ip = { .xfc_supported = false, .xfc_fill_bw_overhead_percent = 10.0, .xfc_fill_constant_bytes = 0, - .ptoi_supported = 0 + .ptoi_supported = 0, + .number_of_cursors = 1, }; struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = { @@ -854,7 +855,7 @@ static const struct dc_plane_cap plane_cap = { }; static const struct dc_debug_options debug_defaults_drv = { - .disable_dmcu = true, + .disable_dmcu = false, .force_abm_enable = false, .timing_trace = false, .clock_trace = true, @@ -875,7 +876,7 @@ static const struct dc_debug_options debug_defaults_drv = { }; static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = true, + .disable_dmcu = false, .force_abm_enable = false, .timing_trace = true, .clock_trace = true, @@ -1589,6 +1590,7 @@ static const struct encoder_feature_support link_enc_feature = { .max_hdmi_pixel_clock = 600000, .hdmi_ycbcr420_supported = true, .dp_ycbcr420_supported = true, + .fec_supported = true, .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_HBR3_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true, @@ -1729,6 +1731,19 @@ static int dcn21_populate_dml_pipes_from_context( return pipe_cnt; } +enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *plane_state) +{ + enum dc_status result = DC_OK; + + if (plane_state->ctx->dc->debug.disable_dcc == DCC_ENABLE) { + plane_state->dcc.enable = 1; + /* align to our worst case block width */ + plane_state->dcc.meta_pitch = ((plane_state->src_rect.width + 1023) / 1024) * 1024; + } + result = dcn20_patch_unknown_plane_state(plane_state); + return result; +} + static struct resource_funcs dcn21_res_pool_funcs = { .destroy = dcn21_destroy_resource_pool, .link_enc_create = dcn21_link_encoder_create, @@ -1738,7 +1753,7 @@ static struct resource_funcs dcn21_res_pool_funcs = { .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, .populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context, - .get_default_swizzle_mode = dcn20_get_default_swizzle_mode, + .patch_unknown_plane_state = dcn21_patch_unknown_plane_state, .set_mcif_arb_params = dcn20_set_mcif_arb_params, .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, .update_bw_bounding_box = update_bw_bounding_box @@ -1785,6 +1800,7 @@ static bool dcn21_resource_construct( dc->caps.force_dp_tps4_for_cp2520 = true; dc->caps.extended_aux_timeout_support = true; dc->caps.dmcub_support = true; + dc->caps.is_apu = true; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; @@ -1848,7 +1864,7 @@ static bool dcn21_resource_construct( goto create_fail; } - if (dc->debug.psr_on_dmub) { + if (dc->debug.disable_dmcu) { pool->base.psr = dmub_psr_create(ctx); if (pool->base.psr == NULL) { diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h index 626d22d437f467f9f84d77ffe28aad63d5b1a1fc..968c46dfb50621a53acdecb75950cc0586efa9a9 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h +++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h @@ -32,6 +32,7 @@ struct cp_psp_stream_config { uint8_t otg_inst; uint8_t link_enc_inst; uint8_t stream_enc_inst; + uint8_t mst_supported; void *dm_stream_ctx; bool dpms_off; }; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h index a56b611db15ed8617a49953ed3d56de0d4a3815e..dfd3be452766bd822ba2954393d4b97f67e38382 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h @@ -69,6 +69,7 @@ struct _vcs_dpi_voltage_scaling_st { struct _vcs_dpi_soc_bounding_box_st { struct _vcs_dpi_voltage_scaling_st clock_limits[MAX_CLOCK_LIMIT_STATES]; + unsigned int num_states; double sr_exit_time_us; double sr_enter_plus_exit_time_us; double urgent_latency_us; @@ -111,7 +112,6 @@ struct _vcs_dpi_soc_bounding_box_st { double xfc_bus_transport_time_us; double xfc_xbuf_latency_tolerance_us; int use_urgent_burst_bw; - unsigned int num_states; double min_dcfclk; bool do_urgent_latency_adjustment; double urgent_latency_adjustment_fabric_clock_component_us; @@ -204,6 +204,7 @@ struct _vcs_dpi_ip_params_st { unsigned int LineBufferFixedBpp; unsigned int can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one; unsigned int bug_forcing_LC_req_same_size_fixed; + unsigned int number_of_cursors; }; struct _vcs_dpi_display_xfc_params_st { diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index f285b76888fbae2a3ab674a3c1ac1df770356422..d523fc9547e705d66bf5322384f3a91e1eb78045 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -124,7 +124,7 @@ struct resource_funcs { struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *stream); - enum dc_status (*get_default_swizzle_mode)( + enum dc_status (*patch_unknown_plane_state)( struct dc_plane_state *plane_state); struct stream_encoder *(*find_first_free_match_stream_enc_for_link)( diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index 05ee5295d2c15935f2cef0f9f3cb292c98db2445..336c80a18175754114dda81996ed80c861f126c0 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -27,11 +27,12 @@ #define __DAL_DCCG_H__ #include "dc_types.h" +#include "hw_shared.h" struct dccg { struct dc_context *ctx; const struct dccg_funcs *funcs; - + int pipe_dppclk_khz[MAX_PIPES]; int ref_dppclk; }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h index c59740084ebc48881b6769c47f82313e86bf5d37..7c2a3328b208418306a4c07ee9dab2688cffab84 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h @@ -39,6 +39,7 @@ struct dsc_config { uint32_t pic_height; enum dc_pixel_encoding pixel_encoding; enum dc_color_depth color_depth; /* Bits per component */ + bool is_odm; struct dc_dsc_config dc_dsc_cfg; }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h index fb748f082c5601431f57005ae33f0fd060c20c07..c2b392a533b1ce079cde9d7de086373cacedd3f4 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h @@ -68,6 +68,7 @@ struct encoder_feature_support { unsigned int max_hdmi_pixel_clock; bool hdmi_ycbcr420_supported; bool dp_ycbcr420_supported; + bool fec_supported; }; union dpcd_psr_configuration { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h index 7575564b2265af7f50ab1c473e361a2eaff9cb55..2717352eb697169a362df164bc91e1c2ea676886 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h @@ -310,7 +310,8 @@ struct opp_funcs { enum dc_color_depth color_depth, const struct tg_color *solid_color, int width, - int height); + int height, + int offset); bool (*dpg_is_blanked)( struct output_pixel_processor *opp); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h index b1d736cbcd5acc7918f2445574398a57c014b9de..52a26e6be066b4ff978237d0feb76fb1c33e8e12 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h @@ -145,6 +145,8 @@ struct hwseq_private_funcs { const struct dc_plane_state *plane_state); bool (*set_shaper_3dlut)(struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state); + void (*PLAT_58856_wa)(struct dc_state *context, + struct pipe_ctx *pipe_ctx); }; struct dce_hwseq { diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 0cb8967f0c45862c801dc32009c8342e0153fd78..10b5fa9d25884bdc0e00b7b7a42660048966f3f2 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -50,6 +50,7 @@ enum dmub_cmd_type { DMUB_CMD__REG_REG_WAIT = 4, DMUB_CMD__PLAT_54186_WA = 5, DMUB_CMD__PSR = 64, + DMUB_CMD__ABM = 66, DMUB_CMD__VBIOS = 128, }; @@ -256,6 +257,52 @@ struct dmub_rb_cmd_psr_set_version { struct dmub_cmd_psr_set_version_data psr_set_version_data; }; +struct dmub_cmd_abm_set_pipe_data { + uint32_t ramping_boundary; + uint32_t otg_inst; +}; + +struct dmub_rb_cmd_abm_set_pipe { + struct dmub_cmd_header header; + struct dmub_cmd_abm_set_pipe_data abm_set_pipe_data; +}; + +struct dmub_cmd_abm_set_backlight_data { + uint32_t frame_ramp; +}; + +struct dmub_rb_cmd_abm_set_backlight { + struct dmub_cmd_header header; + struct dmub_cmd_abm_set_backlight_data abm_set_backlight_data; +}; + +struct dmub_cmd_abm_set_level_data { + uint32_t level; +}; + +struct dmub_rb_cmd_abm_set_level { + struct dmub_cmd_header header; + struct dmub_cmd_abm_set_level_data abm_set_level_data; +}; + +struct dmub_cmd_abm_set_ambient_level_data { + uint32_t ambient_lux; +}; + +struct dmub_rb_cmd_abm_set_ambient_level { + struct dmub_cmd_header header; + struct dmub_cmd_abm_set_ambient_level_data abm_set_ambient_level_data; +}; + +struct dmub_cmd_abm_set_pwm_frac_data { + uint32_t fractional_pwm; +}; + +struct dmub_rb_cmd_abm_set_pwm_frac { + struct dmub_cmd_header header; + struct dmub_cmd_abm_set_pwm_frac_data abm_set_pwm_frac_data; +}; + union dmub_rb_cmd { struct dmub_rb_cmd_read_modify_write read_modify_write; struct dmub_rb_cmd_reg_field_update_sequence reg_field_update_seq; @@ -272,6 +319,11 @@ union dmub_rb_cmd { struct dmub_rb_cmd_psr_enable psr_enable; struct dmub_rb_cmd_psr_set_level psr_set_level; struct dmub_rb_cmd_PLAT_54186_wa PLAT_54186_wa; + struct dmub_rb_cmd_abm_set_pipe abm_set_pipe; + struct dmub_rb_cmd_abm_set_backlight abm_set_backlight; + struct dmub_rb_cmd_abm_set_level abm_set_level; + struct dmub_rb_cmd_abm_set_ambient_level abm_set_ambient_level; + struct dmub_rb_cmd_abm_set_pwm_frac abm_set_pwm_frac; }; #pragma pack(pop) diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h index ce793f47f234c173e224f8561e97fc356c2cbb52..d37535d219285c9e032f76c630089caaa2837148 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h @@ -45,4 +45,13 @@ enum psr_version { PSR_VERSION_2_1 = 0x21, // PSR Version 2, includes Y-coordinate support for SU }; +enum dmub_cmd_abm_type { + DMUB_CMD__ABM_INIT_CONFIG = 0, + DMUB_CMD__ABM_SET_PIPE = 1, + DMUB_CMD__ABM_SET_BACKLIGHT = 2, + DMUB_CMD__ABM_SET_LEVEL = 3, + DMUB_CMD__ABM_SET_AMBIENT_LEVEL = 4, + DMUB_CMD__ABM_SET_PWM_FRAC = 5, +}; + #endif /* _DMUB_CMD_DAL_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h index e619fa9cf53ac0e6a76301448beb41723a91406d..c2671f2616c840dbbead13833126aa6d77f989d7 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h @@ -104,7 +104,7 @@ enum dmub_window_id { DMUB_WINDOW_4_MAILBOX, DMUB_WINDOW_5_TRACEBUFF, DMUB_WINDOW_6_FW_STATE, - DMUB_WINDOW_7_RESERVED, + DMUB_WINDOW_7_SCRATCH_MEM, DMUB_WINDOW_TOTAL, }; @@ -316,6 +316,7 @@ struct dmub_srv { enum dmub_asic asic; void *user_ctx; bool is_virtual; + struct dmub_fb scratch_mem_fb; volatile const struct dmub_fw_state *fw_state; /* private: internal use only */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index 45be185ef31298ff8bffd89f73257c058306ad1a..ce32cc7933c40777f185b08063b4b47cd5a892b1 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -52,8 +52,11 @@ /* Default tracebuffer size if meta is absent. */ #define DMUB_TRACE_BUFFER_SIZE (1024) +/* Default scratch mem size. */ +#define DMUB_SCRATCH_MEM_SIZE (256) + /* Number of windows in use. */ -#define DMUB_NUM_WINDOWS (DMUB_WINDOW_6_FW_STATE + 1) +#define DMUB_NUM_WINDOWS (DMUB_WINDOW_TOTAL) /* Base addresses. */ #define DMUB_CW0_BASE (0x60000000) @@ -211,9 +214,11 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub, struct dmub_region *mail = &out->regions[DMUB_WINDOW_4_MAILBOX]; struct dmub_region *trace_buff = &out->regions[DMUB_WINDOW_5_TRACEBUFF]; struct dmub_region *fw_state = &out->regions[DMUB_WINDOW_6_FW_STATE]; + struct dmub_region *scratch_mem = &out->regions[DMUB_WINDOW_7_SCRATCH_MEM]; const struct dmub_fw_meta_info *fw_info; uint32_t fw_state_size = DMUB_FW_STATE_SIZE; uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE; + uint32_t scratch_mem_size = DMUB_SCRATCH_MEM_SIZE; if (!dmub->sw_init) return DMUB_STATUS_INVALID; @@ -256,7 +261,10 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub, fw_state->base = dmub_align(trace_buff->top, 256); fw_state->top = fw_state->base + dmub_align(fw_state_size, 64); - out->fb_size = dmub_align(fw_state->top, 4096); + scratch_mem->base = dmub_align(fw_state->top, 256); + scratch_mem->top = scratch_mem->base + dmub_align(scratch_mem_size, 64); + + out->fb_size = dmub_align(scratch_mem->top, 4096); return DMUB_STATUS_OK; } @@ -334,6 +342,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, struct dmub_fb *mail_fb = params->fb[DMUB_WINDOW_4_MAILBOX]; struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF]; struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE]; + struct dmub_fb *scratch_mem_fb = params->fb[DMUB_WINDOW_7_SCRATCH_MEM]; struct dmub_rb_init_params rb_params; struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6; @@ -370,7 +379,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, dmub->hw_funcs.reset(dmub); if (inst_fb && data_fb && bios_fb && mail_fb && tracebuff_fb && - fw_state_fb) { + fw_state_fb && scratch_mem_fb) { cw2.offset.quad_part = data_fb->gpu_addr; cw2.region.base = DMUB_CW0_BASE + inst_fb->size; cw2.region.top = cw2.region.base + data_fb->size; @@ -396,6 +405,8 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, dmub->fw_state = fw_state_fb->cpu_addr; + dmub->scratch_mem_fb = *scratch_mem_fb; + if (dmub->hw_funcs.setup_windows) dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6); diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h index ea7015f869c97ae4ccafce2c54f49b5baba57c90..8a87d0ed90ae99b701207ae01380d57b3c82d690 100644 --- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h +++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h @@ -134,11 +134,6 @@ #define PICASSO_A0 0x41 /* DCN1_01 */ #define RAVEN2_A0 0x81 -#define RAVEN2_15D8_REV_94 0x94 -#define RAVEN2_15D8_REV_95 0x95 -#define RAVEN2_15D8_REV_E9 0xE9 -#define RAVEN2_15D8_REV_EA 0xEA -#define RAVEN2_15D8_REV_EB 0xEB #define RAVEN1_F0 0xF0 #define RAVEN_UNKNOWN 0xFF #ifndef ASICREV_IS_RAVEN @@ -149,16 +144,17 @@ #define PRID_DALI_E3 0xE3 #define PRID_DALI_E4 0xE4 +#define PRID_POLLOCK_94 0x94 +#define PRID_POLLOCK_95 0x95 +#define PRID_POLLOCK_E9 0xE9 +#define PRID_POLLOCK_EA 0xEA +#define PRID_POLLOCK_EB 0xEB + #define ASICREV_IS_PICASSO(eChipRev) ((eChipRev >= PICASSO_A0) && (eChipRev < RAVEN2_A0)) #ifndef ASICREV_IS_RAVEN2 #define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < RENOIR_A0)) #endif #define ASICREV_IS_RV1_F0(eChipRev) ((eChipRev >= RAVEN1_F0) && (eChipRev < RAVEN_UNKNOWN)) -#define ASICREV_IS_POLLOCK(eChipRev) (eChipRev == RAVEN2_15D8_REV_94 \ - || eChipRev == RAVEN2_15D8_REV_95 \ - || eChipRev == RAVEN2_15D8_REV_E9 \ - || eChipRev == RAVEN2_15D8_REV_EA \ - || eChipRev == RAVEN2_15D8_REV_EB) #define FAMILY_RV 142 /* DCN 1*/ diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h index 89a7092670193ac3b68b1cad97e5c55250f77c88..d66f9d8eefb4946d0aff846e1b09b5d6a7a3378b 100644 --- a/drivers/gpu/drm/amd/display/include/logger_types.h +++ b/drivers/gpu/drm/amd/display/include/logger_types.h @@ -124,36 +124,37 @@ enum dc_log_type { #define DC_MIN_LOG_MASK ((1 << LOG_ERROR) | \ (1 << LOG_DETECTION_EDID_PARSER)) -#define DC_DEFAULT_LOG_MASK ((1 << LOG_ERROR) | \ - (1 << LOG_WARNING) | \ - (1 << LOG_EVENT_MODE_SET) | \ - (1 << LOG_EVENT_DETECTION) | \ - (1 << LOG_EVENT_LINK_TRAINING) | \ - (1 << LOG_EVENT_LINK_LOSS) | \ - (1 << LOG_EVENT_UNDERFLOW) | \ - (1 << LOG_RESOURCE) | \ - (1 << LOG_FEATURE_OVERRIDE) | \ - (1 << LOG_DETECTION_EDID_PARSER) | \ - (1 << LOG_DC) | \ - (1 << LOG_HW_HOTPLUG) | \ - (1 << LOG_HW_SET_MODE) | \ - (1 << LOG_HW_RESUME_S3) | \ - (1 << LOG_HW_HPD_IRQ) | \ - (1 << LOG_SYNC) | \ - (1 << LOG_BANDWIDTH_VALIDATION) | \ - (1 << LOG_MST) | \ - (1 << LOG_DETECTION_DP_CAPS) | \ - (1 << LOG_BACKLIGHT)) | \ - (1 << LOG_I2C_AUX) | \ - (1 << LOG_IF_TRACE) | \ - (1 << LOG_DTN) /* | \ - (1 << LOG_DEBUG) | \ - (1 << LOG_BIOS) | \ - (1 << LOG_SURFACE) | \ - (1 << LOG_SCALER) | \ - (1 << LOG_DML) | \ - (1 << LOG_HW_LINK_TRAINING) | \ - (1 << LOG_HW_AUDIO)| \ - (1 << LOG_BANDWIDTH_CALCS)*/ +#define DC_DEFAULT_LOG_MASK ((1ULL << LOG_ERROR) | \ + (1ULL << LOG_WARNING) | \ + (1ULL << LOG_EVENT_MODE_SET) | \ + (1ULL << LOG_EVENT_DETECTION) | \ + (1ULL << LOG_EVENT_LINK_TRAINING) | \ + (1ULL << LOG_EVENT_LINK_LOSS) | \ + (1ULL << LOG_EVENT_UNDERFLOW) | \ + (1ULL << LOG_RESOURCE) | \ + (1ULL << LOG_FEATURE_OVERRIDE) | \ + (1ULL << LOG_DETECTION_EDID_PARSER) | \ + (1ULL << LOG_DC) | \ + (1ULL << LOG_HW_HOTPLUG) | \ + (1ULL << LOG_HW_SET_MODE) | \ + (1ULL << LOG_HW_RESUME_S3) | \ + (1ULL << LOG_HW_HPD_IRQ) | \ + (1ULL << LOG_SYNC) | \ + (1ULL << LOG_BANDWIDTH_VALIDATION) | \ + (1ULL << LOG_MST) | \ + (1ULL << LOG_DETECTION_DP_CAPS) | \ + (1ULL << LOG_BACKLIGHT)) | \ + (1ULL << LOG_I2C_AUX) | \ + (1ULL << LOG_IF_TRACE) | \ + (1ULL << LOG_HDMI_FRL) | \ + (1ULL << LOG_DTN) /* | \ + (1ULL << LOG_DEBUG) | \ + (1ULL << LOG_BIOS) | \ + (1ULL << LOG_SURFACE) | \ + (1ULL << LOG_SCALER) | \ + (1ULL << LOG_DML) | \ + (1ULL << LOG_HW_LINK_TRAINING) | \ + (1ULL << LOG_HW_AUDIO)| \ + (1ULL << LOG_BANDWIDTH_CALCS)*/ #endif /* __DAL_LOGGER_TYPES_H__ */ diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c index 83eaec4c6ad71fbd2999add781db63d9c5e3fd49..e9fbd94f8635e7654c7a701f604c8ceecdfd5c15 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c @@ -52,8 +52,8 @@ static uint8_t is_cp_desired_hdcp1(struct mod_hdcp *hdcp) * hdcp is not desired */ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { - if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_INACTIVE && - !hdcp->connection.displays[i].adjust.disable) { + if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_INACTIVE && + !hdcp->displays[i].adjust.disable) { is_auth_needed = 1; break; } @@ -73,8 +73,8 @@ static uint8_t is_cp_desired_hdcp2(struct mod_hdcp *hdcp) * hdcp is not desired */ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { - if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_INACTIVE && - !hdcp->connection.displays[i].adjust.disable) { + if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_INACTIVE && + !hdcp->displays[i].adjust.disable) { is_auth_needed = 1; break; } @@ -114,6 +114,9 @@ static enum mod_hdcp_status execution(struct mod_hdcp *hdcp, } else if (is_in_hdcp2_dp_states(hdcp)) { status = mod_hdcp_hdcp2_dp_execution(hdcp, event_ctx, &input->hdcp2); + } else { + event_ctx->unexpected_event = 1; + goto out; } out: return status; @@ -325,7 +328,8 @@ enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp, /* add display to connection */ hdcp->connection.link = *link; *display_container = *display; - status = mod_hdcp_add_display_to_topology(hdcp, display->index); + status = mod_hdcp_add_display_to_topology(hdcp, display_container); + if (status != MOD_HDCP_STATUS_SUCCESS) goto out; @@ -371,10 +375,10 @@ enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp, status = mod_hdcp_remove_display_from_topology(hdcp, index); if (status != MOD_HDCP_STATUS_SUCCESS) goto out; - display->state = MOD_HDCP_DISPLAY_INACTIVE; + memset(display, 0, sizeof(struct mod_hdcp_display)); - /* request authentication for remaining displays*/ - if (get_active_display_count(hdcp) > 0) + /* request authentication when connection is not reset */ + if (current_state(hdcp) != HDCP_UNINITIALIZED) callback_in_ms(hdcp->connection.link.adjust.auth_delay * 1000, output); out: @@ -481,10 +485,8 @@ enum mod_hdcp_operation_mode mod_hdcp_signal_type_to_operation_mode( break; case SIGNAL_TYPE_EDP: case SIGNAL_TYPE_DISPLAY_PORT: - mode = MOD_HDCP_MODE_DP; - break; case SIGNAL_TYPE_DISPLAY_PORT_MST: - mode = MOD_HDCP_MODE_DP_MST; + mode = MOD_HDCP_MODE_DP; break; default: break; diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h index b09d2f5502b38af1a30ac5c18c1aa2a87c0093e3..60ff1a0028ac3754816998afdb6edbba4bd58717 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h @@ -165,7 +165,6 @@ struct mod_hdcp_auth_counters { /* contains values per connection */ struct mod_hdcp_connection { struct mod_hdcp_link link; - struct mod_hdcp_display displays[MAX_NUM_OF_DISPLAYS]; uint8_t is_repeater; uint8_t is_km_stored; uint8_t is_hdcp1_revoked; @@ -201,6 +200,8 @@ struct mod_hdcp { struct mod_hdcp_config config; /* per connection */ struct mod_hdcp_connection connection; + /* per displays */ + struct mod_hdcp_display displays[MAX_NUM_OF_DISPLAYS]; /* per authentication attempt */ struct mod_hdcp_authentication auth; /* per state in an authentication */ @@ -327,7 +328,7 @@ void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size, /* psp functions */ enum mod_hdcp_status mod_hdcp_add_display_to_topology( - struct mod_hdcp *hdcp, uint8_t index); + struct mod_hdcp *hdcp, struct mod_hdcp_display *display); enum mod_hdcp_status mod_hdcp_remove_display_from_topology( struct mod_hdcp *hdcp, uint8_t index); enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp); @@ -391,13 +392,13 @@ enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp); /* hdcp version helpers */ static inline uint8_t is_dp_hdcp(struct mod_hdcp *hdcp) { - return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP || - hdcp->connection.link.mode == MOD_HDCP_MODE_DP_MST); + return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP); } static inline uint8_t is_dp_mst_hdcp(struct mod_hdcp *hdcp) { - return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP_MST); + return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP && + hdcp->connection.link.dp.mst_supported); } static inline uint8_t is_hdmi_dvi_sl_hdcp(struct mod_hdcp *hdcp) @@ -502,11 +503,6 @@ static inline uint8_t is_display_active(struct mod_hdcp_display *display) return display->state >= MOD_HDCP_DISPLAY_ACTIVE; } -static inline uint8_t is_display_added(struct mod_hdcp_display *display) -{ - return display->state >= MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED; -} - static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *display) { return display->state >= MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; @@ -514,35 +510,24 @@ static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *dis static inline uint8_t get_active_display_count(struct mod_hdcp *hdcp) { - uint8_t added_count = 0; - uint8_t i; - - for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) - if (is_display_active(&hdcp->connection.displays[i])) - added_count++; - return added_count; -} - -static inline uint8_t get_added_display_count(struct mod_hdcp *hdcp) -{ - uint8_t added_count = 0; + uint8_t active_count = 0; uint8_t i; for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) - if (is_display_added(&hdcp->connection.displays[i])) - added_count++; - return added_count; + if (is_display_active(&hdcp->displays[i])) + active_count++; + return active_count; } -static inline struct mod_hdcp_display *get_first_added_display( +static inline struct mod_hdcp_display *get_first_active_display( struct mod_hdcp *hdcp) { uint8_t i; struct mod_hdcp_display *display = NULL; for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) - if (is_display_added(&hdcp->connection.displays[i])) { - display = &hdcp->connection.displays[i]; + if (is_display_active(&hdcp->displays[i])) { + display = &hdcp->displays[i]; break; } return display; @@ -555,9 +540,9 @@ static inline struct mod_hdcp_display *get_active_display_at_index( struct mod_hdcp_display *display = NULL; for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) - if (hdcp->connection.displays[i].index == index && - is_display_active(&hdcp->connection.displays[i])) { - display = &hdcp->connection.displays[i]; + if (hdcp->displays[i].index == index && + is_display_active(&hdcp->displays[i])) { + display = &hdcp->displays[i]; break; } return display; @@ -570,8 +555,8 @@ static inline struct mod_hdcp_display *get_empty_display_container( struct mod_hdcp_display *display = NULL; for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) - if (!is_display_active(&hdcp->connection.displays[i])) { - display = &hdcp->connection.displays[i]; + if (!is_display_active(&hdcp->displays[i])) { + display = &hdcp->displays[i]; break; } return display; diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c index 37c8c05497d66487c697712a58bf708deb001e47..f244b72e74e06969a47abab0096948795fa3bc38 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c @@ -129,7 +129,7 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp) static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp) { /* device count must be greater than or equal to tracked hdcp displays */ - return (get_device_count(hdcp) < get_added_display_count(hdcp)) ? + return (get_device_count(hdcp) < get_active_display_count(hdcp)) ? MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE : MOD_HDCP_STATUS_SUCCESS; } diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c index 5bc6706d2af75a8735c3192c4c991003c2dfb131..f3711914364e5958811d846ab880c4ddaf465b57 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c @@ -230,6 +230,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp, (!conn->is_repeater && is_dp_mst_hdcp(hdcp) && input->stream_encryption_dp != PASS)) { fail_and_restart_in_ms(0, &status, output); break; + } else if (conn->hdcp1_retry_count < conn->link.adjust.hdcp1.min_auth_retries_wa) { + fail_and_restart_in_ms(0, &status, output); + break; } if (conn->is_repeater) { set_watchdog_in_ms(hdcp, 5000, output); diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c index 491c00f48026e285d6c9a89c68fb17756ed51657..549c113abcf7fa438de7fb19d70b8e06e07231f6 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c @@ -208,7 +208,7 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp) static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp) { /* device count must be greater than or equal to tracked hdcp displays */ - return (get_device_count(hdcp) < get_added_display_count(hdcp)) ? + return (get_device_count(hdcp) < get_active_display_count(hdcp)) ? MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE : MOD_HDCP_STATUS_SUCCESS; } diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c index ff9d54812e62934053a4d0738e00948b5bdfa19e..bb5130f4228dba70b0574b2e45a1569483cbe0d6 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c @@ -65,6 +65,7 @@ enum mod_hdcp_ddc_message_id { MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME, MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST, + MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2, MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK, MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY, @@ -101,6 +102,7 @@ static const uint8_t hdcp_i2c_offsets[] = { [MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x80, [MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x60, [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x80, + [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2] = 0x80, [MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x60, [MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x60, [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x80, @@ -135,6 +137,7 @@ static const uint32_t hdcp_dpcd_addrs[] = { [MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x692f8, [MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x69318, [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x69330, + [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2] = 0x69340, [MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x693e0, [MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x693f0, [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x69473, @@ -405,7 +408,7 @@ enum mod_hdcp_status mod_hdcp_read_ake_cert(struct mod_hdcp *hdcp) enum mod_hdcp_status status; if (is_dp_hdcp(hdcp)) { - hdcp->auth.msg.hdcp2.ake_cert[0] = 3; + hdcp->auth.msg.hdcp2.ake_cert[0] = HDCP_2_2_AKE_SEND_CERT; status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT, hdcp->auth.msg.hdcp2.ake_cert+1, sizeof(hdcp->auth.msg.hdcp2.ake_cert)-1); @@ -423,7 +426,7 @@ enum mod_hdcp_status mod_hdcp_read_h_prime(struct mod_hdcp *hdcp) enum mod_hdcp_status status; if (is_dp_hdcp(hdcp)) { - hdcp->auth.msg.hdcp2.ake_h_prime[0] = 7; + hdcp->auth.msg.hdcp2.ake_h_prime[0] = HDCP_2_2_AKE_SEND_HPRIME; status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME, hdcp->auth.msg.hdcp2.ake_h_prime+1, sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)-1); @@ -441,7 +444,7 @@ enum mod_hdcp_status mod_hdcp_read_pairing_info(struct mod_hdcp *hdcp) enum mod_hdcp_status status; if (is_dp_hdcp(hdcp)) { - hdcp->auth.msg.hdcp2.ake_pairing_info[0] = 8; + hdcp->auth.msg.hdcp2.ake_pairing_info[0] = HDCP_2_2_AKE_SEND_PAIRING_INFO; status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO, hdcp->auth.msg.hdcp2.ake_pairing_info+1, sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)-1); @@ -459,7 +462,7 @@ enum mod_hdcp_status mod_hdcp_read_l_prime(struct mod_hdcp *hdcp) enum mod_hdcp_status status; if (is_dp_hdcp(hdcp)) { - hdcp->auth.msg.hdcp2.lc_l_prime[0] = 10; + hdcp->auth.msg.hdcp2.lc_l_prime[0] = HDCP_2_2_LC_SEND_LPRIME; status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME, hdcp->auth.msg.hdcp2.lc_l_prime+1, sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)-1); @@ -474,14 +477,27 @@ enum mod_hdcp_status mod_hdcp_read_l_prime(struct mod_hdcp *hdcp) enum mod_hdcp_status mod_hdcp_read_rx_id_list(struct mod_hdcp *hdcp) { - enum mod_hdcp_status status; + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; if (is_dp_hdcp(hdcp)) { - hdcp->auth.msg.hdcp2.rx_id_list[0] = 12; - status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST, - hdcp->auth.msg.hdcp2.rx_id_list+1, - sizeof(hdcp->auth.msg.hdcp2.rx_id_list)-1); + uint32_t device_count = 0; + uint32_t rx_id_list_size = 0; + uint32_t bytes_read = 0; + hdcp->auth.msg.hdcp2.rx_id_list[0] = HDCP_2_2_REP_SEND_RECVID_LIST; + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST, + hdcp->auth.msg.hdcp2.rx_id_list+1, + HDCP_MAX_AUX_TRANSACTION_SIZE); + if (status == MOD_HDCP_STATUS_SUCCESS) { + bytes_read = HDCP_MAX_AUX_TRANSACTION_SIZE; + device_count = HDCP_2_2_DEV_COUNT_LO(hdcp->auth.msg.hdcp2.rx_id_list[2]) + + (HDCP_2_2_DEV_COUNT_HI(hdcp->auth.msg.hdcp2.rx_id_list[1]) << 4); + rx_id_list_size = MIN((21 + 5 * device_count), + (sizeof(hdcp->auth.msg.hdcp2.rx_id_list) - 1)); + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2, + hdcp->auth.msg.hdcp2.rx_id_list + 1 + bytes_read, + (rx_id_list_size - 1) / HDCP_MAX_AUX_TRANSACTION_SIZE * HDCP_MAX_AUX_TRANSACTION_SIZE); + } } else { status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST, hdcp->auth.msg.hdcp2.rx_id_list, @@ -495,7 +511,7 @@ enum mod_hdcp_status mod_hdcp_read_stream_ready(struct mod_hdcp *hdcp) enum mod_hdcp_status status; if (is_dp_hdcp(hdcp)) { - hdcp->auth.msg.hdcp2.repeater_auth_stream_ready[0] = 17; + hdcp->auth.msg.hdcp2.repeater_auth_stream_ready[0] = HDCP_2_2_REP_STREAM_READY; status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY, hdcp->auth.msg.hdcp2.repeater_auth_stream_ready+1, sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)-1); diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h index 6e844825ad23d0d0e8ffabaf62c1740fa97b23fa..d3192b9d0c3d8fb209a938bc1e0c795f8e13d944 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h @@ -37,10 +37,11 @@ /* default logs */ #define HDCP_ERROR_TRACE(hdcp, status) \ HDCP_LOG_ERR(hdcp, \ - "[Link %d] WARNING %s IN STATE %s", \ + "[Link %d] WARNING %s IN STATE %s STAY COUNT %d", \ hdcp->config.index, \ mod_hdcp_status_to_str(status), \ - mod_hdcp_state_id_to_str(hdcp->state.id)) + mod_hdcp_state_id_to_str(hdcp->state.id), \ + hdcp->state.stay_count) #define HDCP_HDCP1_ENABLED_TRACE(hdcp, displayIndex) \ HDCP_LOG_VER(hdcp, \ "[Link %d] HDCP 1.4 enabled on display %d", \ @@ -111,6 +112,9 @@ sizeof(hdcp->auth.msg.hdcp1.bksv)); \ HDCP_DDC_READ_TRACE(hdcp, "BCAPS", &hdcp->auth.msg.hdcp1.bcaps, \ sizeof(hdcp->auth.msg.hdcp1.bcaps)); \ + HDCP_DDC_READ_TRACE(hdcp, "BSTATUS", \ + (uint8_t *)&hdcp->auth.msg.hdcp1.bstatus, \ + sizeof(hdcp->auth.msg.hdcp1.bstatus)); \ HDCP_DDC_WRITE_TRACE(hdcp, "AN", hdcp->auth.msg.hdcp1.an, \ sizeof(hdcp->auth.msg.hdcp1.an)); \ HDCP_DDC_WRITE_TRACE(hdcp, "AKSV", hdcp->auth.msg.hdcp1.aksv, \ diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c index d9cb2383d6de7cdce593753cdc6742ac08bd483a..836e47954938ce492523197c9695f6f3bcbf8c18 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c @@ -54,7 +54,7 @@ enum mod_hdcp_status mod_hdcp_remove_display_from_topology( dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf; - if (!display || !is_display_added(display)) + if (!display || !is_display_active(display)) return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND; memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory)); @@ -76,22 +76,18 @@ enum mod_hdcp_status mod_hdcp_remove_display_from_topology( } enum mod_hdcp_status mod_hdcp_add_display_to_topology( - struct mod_hdcp *hdcp, uint8_t index) + struct mod_hdcp *hdcp, struct mod_hdcp_display *display) { struct psp_context *psp = hdcp->config.psp.handle; struct ta_dtm_shared_memory *dtm_cmd; - struct mod_hdcp_display *display = - get_active_display_at_index(hdcp, index); struct mod_hdcp_link *link = &hdcp->connection.link; if (!psp->dtm_context.dtm_initialized) { DRM_ERROR("Failed to add display topology, DTM TA is not initialized."); + display->state = MOD_HDCP_DISPLAY_INACTIVE; return MOD_HDCP_STATUS_FAILURE; } - if (!display || is_display_added(display)) - return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE; - dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf; memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory)); @@ -113,20 +109,21 @@ enum mod_hdcp_status mod_hdcp_add_display_to_topology( psp_dtm_invoke(psp, dtm_cmd->cmd_id); - if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) + if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) { + display->state = MOD_HDCP_DISPLAY_INACTIVE; return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE; + } - display->state = MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED; HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index); - - return MOD_HDCP_STATUS_SUCCESS; + + return MOD_HDCP_STATUS_SUCCESS; } enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp) { struct psp_context *psp = hdcp->config.psp.handle; - struct mod_hdcp_display *display = get_first_added_display(hdcp); + struct mod_hdcp_display *display = get_first_active_display(hdcp); struct ta_hdcp_shared_memory *hdcp_cmd; if (!psp->hdcp_context.hdcp_initialized) { @@ -177,11 +174,11 @@ enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp) HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp); for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) if (is_display_encryption_enabled( - &hdcp->connection.displays[i])) { - hdcp->connection.displays[i].state = - MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED; + &hdcp->displays[i])) { + hdcp->displays[i].state = + MOD_HDCP_DISPLAY_ACTIVE; HDCP_HDCP1_DISABLED_TRACE(hdcp, - hdcp->connection.displays[i].index); + hdcp->displays[i].index); } return MOD_HDCP_STATUS_SUCCESS; @@ -231,7 +228,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp) { struct psp_context *psp = hdcp->config.psp.handle; struct ta_hdcp_shared_memory *hdcp_cmd; - struct mod_hdcp_display *display = get_first_added_display(hdcp); + struct mod_hdcp_display *display = get_first_active_display(hdcp); hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); @@ -301,14 +298,13 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { - if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED || - hdcp->connection.displays[i].adjust.disable) + if (hdcp->displays[i].adjust.disable) continue; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.session_handle = hdcp->auth.id; - hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.display_handle = hdcp->connection.displays[i].index; + hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.display_handle = hdcp->displays[i].index; hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_ENABLE_DP_STREAM_ENCRYPTION; psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); @@ -316,8 +312,8 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) return MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE; - hdcp->connection.displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; - HDCP_HDCP1_ENABLED_TRACE(hdcp, hdcp->connection.displays[i].index); + hdcp->displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; + HDCP_HDCP1_ENABLED_TRACE(hdcp, hdcp->displays[i].index); } return MOD_HDCP_STATUS_SUCCESS; @@ -364,7 +360,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp) { struct psp_context *psp = hdcp->config.psp.handle; struct ta_hdcp_shared_memory *hdcp_cmd; - struct mod_hdcp_display *display = get_first_added_display(hdcp); + struct mod_hdcp_display *display = get_first_active_display(hdcp); if (!psp->hdcp_context.hdcp_initialized) { DRM_ERROR("Failed to create hdcp session, HDCP TA is not initialized"); @@ -421,11 +417,11 @@ enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp) HDCP_TOP_HDCP2_DESTROY_SESSION_TRACE(hdcp); for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) if (is_display_encryption_enabled( - &hdcp->connection.displays[i])) { - hdcp->connection.displays[i].state = - MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED; + &hdcp->displays[i])) { + hdcp->displays[i].state = + MOD_HDCP_DISPLAY_ACTIVE; HDCP_HDCP2_DISABLED_TRACE(hdcp, - hdcp->connection.displays[i].index); + hdcp->displays[i].index); } return MOD_HDCP_STATUS_SUCCESS; @@ -662,7 +658,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_encryption(struct mod_hdcp *hdcp) { struct psp_context *psp = hdcp->config.psp.handle; struct ta_hdcp_shared_memory *hdcp_cmd; - struct mod_hdcp_display *display = get_first_added_display(hdcp); + struct mod_hdcp_display *display = get_first_active_display(hdcp); hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); @@ -747,10 +743,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { - if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED || - hdcp->connection.displays[i].adjust.disable) + if (hdcp->displays[i].adjust.disable) continue; - hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->connection.displays[i].index; + hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->displays[i].index; hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.session_handle = hdcp->auth.id; hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_ENABLE_DP_STREAM_ENCRYPTION; @@ -759,8 +754,8 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) break; - hdcp->connection.displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; - HDCP_HDCP2_ENABLED_TRACE(hdcp, hdcp->connection.displays[i].index); + hdcp->displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; + HDCP_HDCP2_ENABLED_TRACE(hdcp, hdcp->displays[i].index); } return (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS) ? MOD_HDCP_STATUS_SUCCESS diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h index 891bca555e173b78b3ef5e769cac2c073d8a5630..eae9309cfb24561b7a1981baf465e1f5307e3d20 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h @@ -102,6 +102,7 @@ enum mod_hdcp_status { struct mod_hdcp_displayport { uint8_t rev; uint8_t assr_supported; + uint8_t mst_supported; }; struct mod_hdcp_hdmi { @@ -110,14 +111,12 @@ struct mod_hdcp_hdmi { enum mod_hdcp_operation_mode { MOD_HDCP_MODE_OFF, MOD_HDCP_MODE_DEFAULT, - MOD_HDCP_MODE_DP, - MOD_HDCP_MODE_DP_MST + MOD_HDCP_MODE_DP }; enum mod_hdcp_display_state { MOD_HDCP_DISPLAY_INACTIVE = 0, MOD_HDCP_DISPLAY_ACTIVE, - MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED, MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED }; @@ -157,7 +156,8 @@ struct mod_hdcp_display_adjustment { struct mod_hdcp_link_adjustment_hdcp1 { uint8_t disable : 1; uint8_t postpone_encryption : 1; - uint8_t reserved : 6; + uint8_t min_auth_retries_wa : 1; + uint8_t reserved : 5; }; enum mod_hdcp_force_hdcp_type { diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h index 42cbeffac6402bd099c5ee7f08bb7f1930320be4..13c57ff2abdce9c3aeb235fac4d2f845b20b00d9 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h @@ -34,8 +34,7 @@ struct dc_info_packet; struct mod_vrr_params; void mod_build_vsc_infopacket(const struct dc_stream_state *stream, - struct dc_info_packet *info_packet, - bool *use_vsc_sdp_for_colorimetry); + struct dc_info_packet *info_packet); void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream, struct dc_info_packet *info_packet, int ALLMEnabled, int ALLMValue); diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c index 6a8a056424b85c83dfffcc18d1396bc412fb2c5f..cff3ab15fc0cc5ced207161b57090af2a213bc67 100644 --- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c +++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c @@ -130,8 +130,7 @@ enum ColorimetryYCCDP { }; void mod_build_vsc_infopacket(const struct dc_stream_state *stream, - struct dc_info_packet *info_packet, - bool *use_vsc_sdp_for_colorimetry) + struct dc_info_packet *info_packet) { unsigned int vsc_packet_revision = vsc_packet_undefined; unsigned int i; @@ -139,11 +138,6 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream, unsigned int colorimetryFormat = 0; bool stereo3dSupport = false; - /* Initialize first, later if infopacket is valid determine if VSC SDP - * should be used to signal colorimetry format and pixel encoding. - */ - *use_vsc_sdp_for_colorimetry = false; - if (stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE && stream->view_format != VIEW_3D_FORMAT_NONE) { vsc_packet_revision = vsc_packet_rev1; stereo3dSupport = true; @@ -153,9 +147,8 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream, if (stream->psr_version != 0) vsc_packet_revision = vsc_packet_rev2; - /* Update to revision 5 for extended colorimetry support for DPCD 1.4+ */ - if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 && - stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) + /* Update to revision 5 for extended colorimetry support */ + if (stream->use_vsc_sdp_for_colorimetry) vsc_packet_revision = vsc_packet_rev5; /* VSC packet not needed based on the features @@ -269,13 +262,6 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream, info_packet->valid = true; - /* If we are using VSC SDP revision 05h, use this to signal for - * colorimetry format and pixel encoding. HW should later be - * programmed to set MSA MISC1 bit 6 to indicate ignore - * colorimetry format and pixel encoding in the MSA. - */ - *use_vsc_sdp_for_colorimetry = true; - /* Set VSC SDP fields for pixel encoding and colorimetry format from DP 1.3 specs * Data Bytes DB 18~16 * Bits 3:0 (Colorimetry Format) | Bits 7:4 (Pixel Encoding) diff --git a/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_sh_mask.h new file mode 100644 index 0000000000000000000000000000000000000000..82b6cc25205e34a67905a65e36c9bf880f3cfc6e --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_sh_mask.h @@ -0,0 +1,69 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _wafl2_4_0_0_SH_MASK_HEADER +#define _wafl2_4_0_0_SH_MASK_HEADER + +//PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataLossErr__SHIFT 0x0 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__TrainingErr__SHIFT 0x1 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__CRCErr__SHIFT 0x5 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__BERExceededErr__SHIFT 0x6 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__TxMetaDataErr__SHIFT 0x7 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayBufParityErr__SHIFT 0x8 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataParityErr__SHIFT 0x9 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayFifoOverflowErr__SHIFT 0xa +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayFifoUnderflowErr__SHIFT 0xb +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ElasticFifoOverflowErr__SHIFT 0xc +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DeskewErr__SHIFT 0xd +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataStartupLimitErr__SHIFT 0xf +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__FCInitTimeoutErr__SHIFT 0x10 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryTimeoutErr__SHIFT 0x11 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReadySerialTimeoutErr__SHIFT 0x12 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReadySerialAttemptErr__SHIFT 0x13 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryAttemptErr__SHIFT 0x14 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryRelockAttemptErr__SHIFT 0x15 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ClearBERAccum__SHIFT 0x17 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__BERAccumulator__SHIFT 0x18 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataLossErr_MASK 0x00000001L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__TrainingErr_MASK 0x00000002L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__CRCErr_MASK 0x00000020L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__BERExceededErr_MASK 0x00000040L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__TxMetaDataErr_MASK 0x00000080L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayBufParityErr_MASK 0x00000100L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataParityErr_MASK 0x00000200L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayFifoOverflowErr_MASK 0x00000400L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayFifoUnderflowErr_MASK 0x00000800L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ElasticFifoOverflowErr_MASK 0x00001000L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DeskewErr_MASK 0x00002000L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataStartupLimitErr_MASK 0x00008000L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__FCInitTimeoutErr_MASK 0x00010000L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryTimeoutErr_MASK 0x00020000L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReadySerialTimeoutErr_MASK 0x00040000L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReadySerialAttemptErr_MASK 0x00080000L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryAttemptErr_MASK 0x00100000L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryRelockAttemptErr_MASK 0x00200000L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ClearBERAccum_MASK 0x00800000L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__BERAccumulator_MASK 0xFF000000L + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_smn.h b/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_smn.h new file mode 100644 index 0000000000000000000000000000000000000000..4a51a90c611a8107b38d4a01d722614d9f340ce6 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_smn.h @@ -0,0 +1,29 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _wafl2_4_0_0_SMN_HEADER +#define _wafl2_4_0_0_SMN_HEADER + +#define smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS 0x11cf0210 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_sh_mask.h new file mode 100644 index 0000000000000000000000000000000000000000..f37712f05b0362a4d12140221a7bf9c9bc622676 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_sh_mask.h @@ -0,0 +1,69 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _xgmi_4_0_0_SH_MASK_HEADER +#define _xgmi_4_0_0_SH_MASK_HEADER + +//PCS_GOPX16_PCS_ERROR_STATUS +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataLossErr__SHIFT 0x0 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__TrainingErr__SHIFT 0x1 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__CRCErr__SHIFT 0x5 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__BERExceededErr__SHIFT 0x6 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__TxMetaDataErr__SHIFT 0x7 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayBufParityErr__SHIFT 0x8 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataParityErr__SHIFT 0x9 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayFifoOverflowErr__SHIFT 0xa +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayFifoUnderflowErr__SHIFT 0xb +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ElasticFifoOverflowErr__SHIFT 0xc +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DeskewErr__SHIFT 0xd +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataStartupLimitErr__SHIFT 0xf +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__FCInitTimeoutErr__SHIFT 0x10 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryTimeoutErr__SHIFT 0x11 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReadySerialTimeoutErr__SHIFT 0x12 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReadySerialAttemptErr__SHIFT 0x13 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryAttemptErr__SHIFT 0x14 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryRelockAttemptErr__SHIFT 0x15 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ClearBERAccum__SHIFT 0x17 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__BERAccumulator__SHIFT 0x18 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataLossErr_MASK 0x00000001L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__TrainingErr_MASK 0x00000002L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__CRCErr_MASK 0x00000020L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__BERExceededErr_MASK 0x00000040L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__TxMetaDataErr_MASK 0x00000080L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayBufParityErr_MASK 0x00000100L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataParityErr_MASK 0x00000200L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayFifoOverflowErr_MASK 0x00000400L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayFifoUnderflowErr_MASK 0x00000800L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ElasticFifoOverflowErr_MASK 0x00001000L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DeskewErr_MASK 0x00002000L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataStartupLimitErr_MASK 0x00008000L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__FCInitTimeoutErr_MASK 0x00010000L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryTimeoutErr_MASK 0x00020000L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReadySerialTimeoutErr_MASK 0x00040000L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReadySerialAttemptErr_MASK 0x00080000L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryAttemptErr_MASK 0x00100000L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryRelockAttemptErr_MASK 0x00200000L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ClearBERAccum_MASK 0x00800000L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__BERAccumulator_MASK 0xFF000000L + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_smn.h b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_smn.h new file mode 100644 index 0000000000000000000000000000000000000000..6ccbac4ce87efc707ff19ed26aba1bac2ed56b49 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_smn.h @@ -0,0 +1,29 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _xgmi_4_0_0_SMN_HEADER +#define _xgmi_4_0_0_SMN_HEADER + +#define smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS 0x11af0210 + +#endif diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index abc0eb4ac49360abd4b94f82cd0787976d7cf9c0..a3c238c39ef57233d76465419da5876ba6e886db 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -167,27 +167,6 @@ struct tile_config { #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096 -/* - * Allocation flag domains - * NOTE: This must match the corresponding definitions in kfd_ioctl.h. - */ -#define ALLOC_MEM_FLAGS_VRAM (1 << 0) -#define ALLOC_MEM_FLAGS_GTT (1 << 1) -#define ALLOC_MEM_FLAGS_USERPTR (1 << 2) -#define ALLOC_MEM_FLAGS_DOORBELL (1 << 3) -#define ALLOC_MEM_FLAGS_MMIO_REMAP (1 << 4) - -/* - * Allocation flags attributes/access options. - * NOTE: This must match the corresponding definitions in kfd_ioctl.h. - */ -#define ALLOC_MEM_FLAGS_WRITABLE (1 << 31) -#define ALLOC_MEM_FLAGS_EXECUTABLE (1 << 30) -#define ALLOC_MEM_FLAGS_PUBLIC (1 << 29) -#define ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28) /* TODO */ -#define ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27) -#define ALLOC_MEM_FLAGS_COHERENT (1 << 26) /* For GFXv9 or later */ - /** * struct kfd2kgd_calls * @@ -223,8 +202,6 @@ struct tile_config { * @set_scratch_backing_va: Sets VA for scratch backing memory of a VMID. * Only used for no cp scheduling mode * - * @get_tile_config: Returns GPU-specific tiling mode information - * * @set_vm_context_page_table_base: Program page table base for a VMID * * @invalidate_tlbs: Invalidate TLBs for a specific PASID @@ -310,8 +287,6 @@ struct kfd2kgd_calls { void (*set_scratch_backing_va)(struct kgd_dev *kgd, uint64_t va, uint32_t vmid); - int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config); - void (*set_vm_context_page_table_base)(struct kgd_dev *kgd, uint32_t vmid, uint64_t page_table_base); uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd); diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 9f2428fd98f657124bd46195c043ee49278523a6..e8b27fab6aa1d3eb66aa470942910c807e1fa22f 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -23,15 +23,12 @@ #include #include -#include "pp_debug.h" #include "amdgpu.h" #include "amdgpu_smu.h" #include "smu_internal.h" -#include "soc15_common.h" #include "smu_v11_0.h" #include "smu_v12_0.h" #include "atom.h" -#include "amd_pcie.h" #include "vega20_ppt.h" #include "arcturus_ppt.h" #include "navi10_ppt.h" @@ -121,20 +118,20 @@ static int smu_feature_update_enable_state(struct smu_context *smu, if (enabled) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow, - feature_low); + feature_low, NULL); if (ret) return ret; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh, - feature_high); + feature_high, NULL); if (ret) return ret; } else { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow, - feature_low); + feature_low, NULL); if (ret) return ret; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh, - feature_high); + feature_high, NULL); if (ret) return ret; } @@ -195,21 +192,13 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t return -EINVAL; if (if_version) { - ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion); - if (ret) - return ret; - - ret = smu_read_smc_arg(smu, if_version); + ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version); if (ret) return ret; } if (smu_version) { - ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion); - if (ret) - return ret; - - ret = smu_read_smc_arg(smu, smu_version); + ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version); if (ret) return ret; } @@ -218,17 +207,19 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t } int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t min, uint32_t max) + uint32_t min, uint32_t max, bool lock_needed) { int ret = 0; - if (min <= 0 && max <= 0) - return -EINVAL; - if (!smu_clk_dpm_is_enabled(smu, clk_type)) return 0; + if (lock_needed) + mutex_lock(&smu->mutex); ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max); + if (lock_needed) + mutex_unlock(&smu->mutex); + return ret; } @@ -251,7 +242,7 @@ int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, if (max > 0) { param = (uint32_t)((clk_id << 16) | (max & 0xffff)); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq, - param); + param, NULL); if (ret) return ret; } @@ -259,7 +250,7 @@ int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, if (min > 0) { param = (uint32_t)((clk_id << 16) | (min & 0xffff)); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, - param); + param, NULL); if (ret) return ret; } @@ -335,12 +326,8 @@ int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_typ param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff)); - ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex, - param); - if (ret) - return ret; - - ret = smu_read_smc_arg(smu, ¶m); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmFreqByIndex, + param, ¶m); if (ret) return ret; @@ -542,7 +529,8 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int ret = smu_send_smc_msg_with_param(smu, drv2smu ? SMU_MSG_TransferTableDram2Smu : SMU_MSG_TransferTableSmu2Dram, - table_id | ((argument & 0xFFFF) << 16)); + table_id | ((argument & 0xFFFF) << 16), + NULL); if (ret) return ret; @@ -900,6 +888,7 @@ static int smu_sw_init(void *handle) mutex_init(&smu->sensor_lock); mutex_init(&smu->metrics_lock); + mutex_init(&smu->message_lock); smu->watermarks_bitmap = 0; smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; @@ -943,6 +932,13 @@ static int smu_sw_init(void *handle) return ret; } + if (adev->smu.ppt_funcs->i2c_eeprom_init) { + ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c); + + if (ret) + return ret; + } + return 0; } @@ -952,6 +948,9 @@ static int smu_sw_fini(void *handle) struct smu_context *smu = &adev->smu; int ret; + if (adev->smu.ppt_funcs->i2c_eeprom_fini) + smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c); + kfree(smu->irq_source); smu->irq_source = NULL; @@ -1155,6 +1154,21 @@ static int smu_smc_table_hw_init(struct smu_context *smu, } } } + + if (smu->ppt_funcs->set_power_source) { + /* + * For Navi1X, manually switch it to AC mode as PMFW + * may boot it with DC mode. + */ + if (adev->pm.ac_power) + ret = smu_set_power_source(smu, SMU_POWER_SOURCE_AC); + else + ret = smu_set_power_source(smu, SMU_POWER_SOURCE_DC); + if (ret) { + pr_err("Failed to switch to %s mode!\n", adev->pm.ac_power ? "AC" : "DC"); + return ret; + } + } } if (adev->asic_type != CHIP_ARCTURUS) { ret = smu_notify_display_change(smu); @@ -1471,21 +1485,26 @@ static int smu_disable_dpm(struct smu_context *smu) } /* - * For baco on Arcturus, this operation - * (disable all smu feature) will be handled by SMU FW. + * Disable all enabled SMU features. + * This should be handled in SMU FW, as a backup + * driver can issue call to SMU FW until sequence + * in SMU FW is operational. */ - if (adev->asic_type == CHIP_ARCTURUS) { - if (use_baco && (smu_version > 0x360e00)) - return 0; - } - - /* Disable all enabled SMU features */ ret = smu_system_features_control(smu, false); if (ret) { pr_err("Failed to disable smu features.\n"); return ret; } + /* + * Arcturus does not have BACO bit in disable feature mask. + * Enablement of BACO bit on Arcturus should be skipped. + */ + if (adev->asic_type == CHIP_ARCTURUS) { + if (use_baco && (smu_version > 0x360e00)) + return 0; + } + /* For baco, need to leave BACO feature enabled */ if (use_baco) { /* @@ -1992,7 +2011,7 @@ int smu_set_mp1_state(struct smu_context *smu, return 0; } - ret = smu_send_smc_msg(smu, msg); + ret = smu_send_smc_msg(smu, msg, NULL); if (ret) pr_err("[PrepareMp1] Failed!\n"); @@ -2056,8 +2075,11 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu, smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) && smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { smu_set_watermarks_table(smu, table, clock_ranges); - smu->watermarks_bitmap |= WATERMARKS_EXIST; - smu->watermarks_bitmap &= ~WATERMARKS_LOADED; + + if (!(smu->watermarks_bitmap & WATERMARKS_EXIST)) { + smu->watermarks_bitmap |= WATERMARKS_EXIST; + smu->watermarks_bitmap &= ~WATERMARKS_LOADED; + } } mutex_unlock(&smu->mutex); @@ -2065,6 +2087,29 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu, return 0; } +int smu_set_ac_dc(struct smu_context *smu) +{ + int ret = 0; + + /* controlled by firmware */ + if (smu->dc_controlled_by_gpio) + return 0; + + mutex_lock(&smu->mutex); + if (smu->ppt_funcs->set_power_source) { + if (smu->adev->pm.ac_power) + ret = smu_set_power_source(smu, SMU_POWER_SOURCE_AC); + else + ret = smu_set_power_source(smu, SMU_POWER_SOURCE_DC); + if (ret) + pr_err("Failed to switch to %s mode!\n", + smu->adev->pm.ac_power ? "AC" : "DC"); + } + mutex_unlock(&smu->mutex); + + return ret; +} + const struct amd_ip_funcs smu_ip_funcs = { .name = "smu", .early_init = smu_early_init, @@ -2667,12 +2712,3 @@ uint32_t smu_get_pptable_power_limit(struct smu_context *smu) return ret; } - -int smu_send_smc_msg(struct smu_context *smu, - enum smu_message_type msg) -{ - int ret; - - ret = smu_send_smc_msg_with_param(smu, msg, 0); - return ret; -} diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index d3c4e7a8c1b19f03d9b5fc285bd664091f8ca931..c6d3bef15320a57eeef096fa5a19451da9600629 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -21,7 +21,6 @@ * */ -#include "pp_debug.h" #include #include "amdgpu.h" #include "amdgpu_smu.h" @@ -42,7 +41,7 @@ #include #include "amdgpu_ras.h" -#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control.eeprom_accessor))->adev +#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c)) #define CTF_OFFSET_EDGE 5 #define CTF_OFFSET_HOTSPOT 5 @@ -374,13 +373,13 @@ arcturus_set_single_dpm_table(struct smu_context *smu, ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmFreqByIndex, - (clk_id << 16 | 0xFF)); + (clk_id << 16 | 0xFF), + &num_of_levels); if (ret) { pr_err("[%s] failed to get dpm levels!\n", __func__); return ret; } - smu_read_smc_arg(smu, &num_of_levels); if (!num_of_levels) { pr_err("[%s] number of clk levels is invalid!\n", __func__); return -EINVAL; @@ -390,12 +389,12 @@ arcturus_set_single_dpm_table(struct smu_context *smu, for (i = 0; i < num_of_levels; i++) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmFreqByIndex, - (clk_id << 16 | i)); + (clk_id << 16 | i), + &clk); if (ret) { pr_err("[%s] failed to get dpm freq by index!\n", __func__); return ret; } - smu_read_smc_arg(smu, &clk); if (!clk) { pr_err("[%s] clk value is invalid!\n", __func__); return -EINVAL; @@ -553,13 +552,13 @@ static int arcturus_run_btc(struct smu_context *smu) { int ret = 0; - ret = smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc); + ret = smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc, NULL); if (ret) { pr_err("RunAfllBtc failed!\n"); return ret; } - return smu_send_smc_msg(smu, SMU_MSG_RunDcBtc); + return smu_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL); } static int arcturus_populate_umd_state_clk(struct smu_context *smu) @@ -744,7 +743,8 @@ static int arcturus_upload_dpm_level(struct smu_context *smu, bool max, single_dpm_table->dpm_state.soft_min_level; ret = smu_send_smc_msg_with_param(smu, (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), - (PPCLK_GFXCLK << 16) | (freq & 0xffff)); + (PPCLK_GFXCLK << 16) | (freq & 0xffff), + NULL); if (ret) { pr_err("Failed to set soft %s gfxclk !\n", max ? "max" : "min"); @@ -759,7 +759,8 @@ static int arcturus_upload_dpm_level(struct smu_context *smu, bool max, single_dpm_table->dpm_state.soft_min_level; ret = smu_send_smc_msg_with_param(smu, (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), - (PPCLK_UCLK << 16) | (freq & 0xffff)); + (PPCLK_UCLK << 16) | (freq & 0xffff), + NULL); if (ret) { pr_err("Failed to set soft %s memclk !\n", max ? "max" : "min"); @@ -774,7 +775,8 @@ static int arcturus_upload_dpm_level(struct smu_context *smu, bool max, single_dpm_table->dpm_state.soft_min_level; ret = smu_send_smc_msg_with_param(smu, (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), - (PPCLK_SOCCLK << 16) | (freq & 0xffff)); + (PPCLK_SOCCLK << 16) | (freq & 0xffff), + NULL); if (ret) { pr_err("Failed to set soft %s socclk !\n", max ? "max" : "min"); @@ -1289,12 +1291,11 @@ static int arcturus_get_power_limit(struct smu_context *smu, return -EINVAL; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit, - power_src << 16); + power_src << 16, &asic_default_power_limit); if (ret) { pr_err("[%s] get PPT limit failed!", __func__); return ret; } - smu_read_smc_arg(smu, &asic_default_power_limit); } else { /* the last hope to figure out the ppt limit */ if (!pptable) { @@ -1498,7 +1499,8 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu, ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type); + 1 << workload_type, + NULL); if (ret) { pr_err("Fail to set workload type %d\n", workload_type); return ret; @@ -2188,7 +2190,7 @@ static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control) control->class = I2C_CLASS_SPD; control->dev.parent = &adev->pdev->dev; control->algo = &arcturus_i2c_eeprom_i2c_algo; - snprintf(control->name, sizeof(control->name), "RAS EEPROM"); + snprintf(control->name, sizeof(control->name), "AMDGPU EEPROM"); res = i2c_add_adapter(control); if (res) @@ -2233,7 +2235,7 @@ static int arcturus_set_df_cstate(struct smu_context *smu, return -EINVAL; } - return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state); + return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL); } static const struct pptable_funcs arcturus_ppt_funcs = { @@ -2299,7 +2301,6 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, .system_features_control = smu_v11_0_system_features_control, .send_smc_msg_with_param = smu_v11_0_send_msg_with_param, - .read_smc_arg = smu_v11_0_read_arg, .init_display_count = smu_v11_0_init_display_count, .set_allowed_mask = smu_v11_0_set_allowed_mask, .get_enabled_mask = smu_v11_0_get_enabled_mask, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index bf04cfefb2839afeec239c25de6bf85981f42a38..7740488999df78b06326715cc24d580d35286721 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -1250,7 +1250,7 @@ static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources) switch (sources) { default: pr_err("Unknown throttling event sources."); - /* fall through */ + fallthrough; case 0: protection = false; /* src is unused */ @@ -3698,12 +3698,12 @@ static int smu7_request_link_speed_change_before_state_change( data->force_pcie_gen = PP_PCIEGen2; if (current_link_speed == PP_PCIEGen2) break; - /* fall through */ + fallthrough; case PP_PCIEGen2: if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN2, false)) break; + fallthrough; #endif - /* fall through */ default: data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr); break; diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index 97b6714e83e6729902bab6e23de04b2400f0928b..ae2c318dd6fac4d8b6fb5de077d6f5d7b33778dd 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -362,6 +362,7 @@ struct smu_context struct mutex mutex; struct mutex sensor_lock; struct mutex metrics_lock; + struct mutex message_lock; uint64_t pool_size; struct smu_table_context smu_table; @@ -371,6 +372,9 @@ struct smu_context struct amd_pp_display_configuration *display_config; struct smu_baco_context smu_baco; void *od_settings; +#if defined(CONFIG_DEBUG_FS) + struct dentry *debugfs_sclk; +#endif uint32_t pstate_sclk; uint32_t pstate_mclk; @@ -404,6 +408,7 @@ struct smu_context uint32_t smc_if_version; bool uploading_custom_pp_table; + bool dc_controlled_by_gpio; }; struct i2c_adapter; @@ -514,8 +519,7 @@ struct pptable_funcs { int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu); int (*system_features_control)(struct smu_context *smu, bool en); int (*send_smc_msg_with_param)(struct smu_context *smu, - enum smu_message_type msg, uint32_t param); - int (*read_smc_arg)(struct smu_context *smu, uint32_t *arg); + enum smu_message_type msg, uint32_t param, uint32_t *read_arg); int (*init_display_count)(struct smu_context *smu, uint32_t count); int (*set_allowed_mask)(struct smu_context *smu); int (*get_enabled_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num); @@ -567,6 +571,7 @@ struct pptable_funcs { int (*override_pcie_parameters)(struct smu_context *smu); uint32_t (*get_pptable_power_limit)(struct smu_context *smu); int (*disable_umc_cdr_12gbps_workaround)(struct smu_context *smu); + int (*set_power_source)(struct smu_context *smu, enum smu_power_src_type power_src); }; int smu_load_microcode(struct smu_context *smu); @@ -707,7 +712,7 @@ int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type, int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max, bool lock_needed); int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t min, uint32_t max); + uint32_t min, uint32_t max, bool lock_needed); int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max); int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type, @@ -715,6 +720,7 @@ int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type, enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu); int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level); int smu_set_display_count(struct smu_context *smu, uint32_t count); +int smu_set_ac_dc(struct smu_context *smu); bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type); const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type); const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature); diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h index acccdf621b4efba4ed2f378dcabe252169e358dd..674e426ed59bbe1d056b2371fbf5777492604f27 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h @@ -29,6 +29,7 @@ #define SMU11_DRIVER_IF_VERSION_VG20 0x13 #define SMU11_DRIVER_IF_VERSION_ARCT 0x12 #define SMU11_DRIVER_IF_VERSION_NV10 0x35 +#define SMU11_DRIVER_IF_VERSION_NV12 0x33 #define SMU11_DRIVER_IF_VERSION_NV14 0x36 /* MP Apertures */ @@ -182,9 +183,8 @@ int smu_v11_0_system_features_control(struct smu_context *smu, int smu_v11_0_send_msg_with_param(struct smu_context *smu, enum smu_message_type msg, - uint32_t param); - -int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg); + uint32_t param, + uint32_t *read_arg); int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count); @@ -267,4 +267,7 @@ uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu); int smu_v11_0_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level); +int smu_v11_0_set_power_source(struct smu_context *smu, + enum smu_power_src_type power_src); + #endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h index d79e54b5ebf6065a1d37ebd84dcd6f32c51c0765..7fbebc1979cf15ed884e5b920109e66756e3c253 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h @@ -40,14 +40,13 @@ struct smu_12_0_cmn2aisc_mapping { int smu_v12_0_send_msg_without_waiting(struct smu_context *smu, uint16_t msg); -int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg); - int smu_v12_0_wait_for_response(struct smu_context *smu); int smu_v12_0_send_msg_with_param(struct smu_context *smu, enum smu_message_type msg, - uint32_t param); + uint32_t param, + uint32_t *read_arg); int smu_v12_0_check_fw_status(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index 0d73a49166af3f12c6491d2982a81689716077da..9c60b38ab53a88afd64efb0877248c6e15488f31 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -21,7 +21,6 @@ * */ -#include "pp_debug.h" #include #include #include "amdgpu.h" @@ -31,7 +30,6 @@ #include "amdgpu_atomfirmware.h" #include "smu_v11_0.h" #include "smu11_driver_if_navi10.h" -#include "soc15_common.h" #include "atom.h" #include "navi10_ppt.h" #include "smu_v11_0_pptable.h" @@ -349,7 +347,6 @@ navi10_get_allowed_feature_mask(struct smu_context *smu, | FEATURE_MASK(FEATURE_DS_DCEFCLK_BIT) | FEATURE_MASK(FEATURE_FW_DSTATE_BIT) | FEATURE_MASK(FEATURE_BACO_BIT) - | FEATURE_MASK(FEATURE_ACDC_BIT) | FEATURE_MASK(FEATURE_GFX_SS_BIT) | FEATURE_MASK(FEATURE_APCC_DFLL_BIT) | FEATURE_MASK(FEATURE_FW_CTF_BIT) @@ -393,6 +390,9 @@ navi10_get_allowed_feature_mask(struct smu_context *smu, if (smu->adev->pg_flags & AMD_PG_SUPPORT_JPEG) *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_JPEG_PG_BIT); + if (smu->dc_controlled_by_gpio) + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ACDC_BIT); + /* disable DPM UCLK and DS SOCCLK on navi10 A0 secure board */ if (is_asic_secure(smu)) { /* only for navi10 A0 */ @@ -527,6 +527,9 @@ static int navi10_store_powerplay_table(struct smu_context *smu) table_context->thermal_controller_type = powerplay_table->thermal_controller_type; + if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_HARDWAREDC) + smu->dc_controlled_by_gpio = true; + mutex_lock(&smu_baco->mutex); if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO || powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO) @@ -661,14 +664,14 @@ static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable) if (enable) { /* vcn dpm on is a prerequisite for vcn power gate messages */ if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1, NULL); if (ret) return ret; } power_gate->vcn_gated = false; } else { if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { - ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); + ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL); if (ret) return ret; } @@ -686,14 +689,14 @@ static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) if (enable) { if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { - ret = smu_send_smc_msg(smu, SMU_MSG_PowerUpJpeg); + ret = smu_send_smc_msg(smu, SMU_MSG_PowerUpJpeg, NULL); if (ret) return ret; } power_gate->jpeg_gated = false; } else { if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { - ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownJpeg); + ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownJpeg, NULL); if (ret) return ret; } @@ -970,7 +973,7 @@ static int navi10_force_clk_levels(struct smu_context *smu, if (ret) return size; - ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq); + ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq, false); if (ret) return size; break; @@ -1042,7 +1045,7 @@ static int navi10_pre_display_config_changed(struct smu_context *smu) int ret = 0; uint32_t max_freq = 0; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL); if (ret) return ret; @@ -1062,20 +1065,12 @@ static int navi10_display_config_changed(struct smu_context *smu) { int ret = 0; - if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && - !(smu->watermarks_bitmap & WATERMARKS_LOADED)) { - ret = smu_write_watermarks_table(smu); - if (ret) - return ret; - - smu->watermarks_bitmap |= WATERMARKS_LOADED; - } - if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) && smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, - smu->display_config->num_display); + smu->display_config->num_display, + NULL); if (ret) return ret; } @@ -1102,7 +1097,7 @@ static int navi10_force_dpm_limit_value(struct smu_context *smu, bool highest) return ret; force_freq = highest ? max_freq : min_freq; - ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq); + ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq, false); if (ret) return ret; } @@ -1128,7 +1123,7 @@ static int navi10_unforce_dpm_levels(struct smu_context *smu) if (ret) return ret; - ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq); + ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq, false); if (ret) return ret; } @@ -1400,7 +1395,7 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u if (workload_type < 0) return -EINVAL; smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type); + 1 << workload_type, NULL); return ret; } @@ -1465,7 +1460,8 @@ static int navi10_notify_smc_display_config(struct smu_context *smu) if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetMinDeepSleepDcefclk, - min_clocks.dcef_clock_in_sr/100); + min_clocks.dcef_clock_in_sr/100, + NULL); if (ret) { pr_err("Attempt to set divider for DCEFCLK Failed!"); return ret; @@ -1493,6 +1489,7 @@ static int navi10_set_watermarks_table(struct smu_context *smu, *clock_ranges) { int i; + int ret = 0; Watermarks_t *table = watermarks; if (!table || !clock_ranges) @@ -1544,6 +1541,18 @@ static int navi10_set_watermarks_table(struct smu_context *smu, clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id; } + smu->watermarks_bitmap |= WATERMARKS_EXIST; + + /* pass data to smu controller */ + if (!(smu->watermarks_bitmap & WATERMARKS_LOADED)) { + ret = smu_write_watermarks_table(smu); + if (ret) { + pr_err("Failed to update WMTABLE!"); + return ret; + } + smu->watermarks_bitmap |= WATERMARKS_LOADED; + } + return 0; } @@ -1674,10 +1683,10 @@ static int navi10_set_standard_performance_level(struct smu_context *smu) return navi10_set_performance_level(smu, AMD_DPM_FORCED_LEVEL_AUTO); } - ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq); + ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false); if (ret) return ret; - ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq); + ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false); if (ret) return ret; @@ -1742,10 +1751,10 @@ static int navi10_set_peak_performance_level(struct smu_context *smu) if (ret) return ret; - ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq); + ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false); if (ret) return ret; - ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq); + ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false); if (ret) return ret; @@ -1855,12 +1864,11 @@ static int navi10_get_power_limit(struct smu_context *smu, return -EINVAL; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit, - power_src << 16); + power_src << 16, &asic_default_power_limit); if (ret) { pr_err("[%s] get PPT limit failed!", __func__); return ret; } - smu_read_smc_arg(smu, &asic_default_power_limit); } else { /* the last hope to figure out the ppt limit */ if (!pptable) { @@ -1900,7 +1908,8 @@ static int navi10_update_pcie_parameters(struct smu_context *smu, pptable->PcieLaneCount[i] : pcie_width_cap); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_OverridePcieParameters, - smu_pcie_arg); + smu_pcie_arg, + NULL); if (ret) return ret; @@ -1946,13 +1955,13 @@ static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu, ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetVoltageByDpm, - param); + param, + &value); if (ret) { pr_err("[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!"); return ret; } - smu_read_smc_arg(smu, &value); *voltage = (uint16_t)value; return 0; @@ -2209,7 +2218,7 @@ static int navi10_run_btc(struct smu_context *smu) { int ret = 0; - ret = smu_send_smc_msg(smu, SMU_MSG_RunBtc); + ret = smu_send_smc_msg(smu, SMU_MSG_RunBtc, NULL); if (ret) pr_err("RunBtc failed!\n"); @@ -2221,9 +2230,9 @@ static int navi10_dummy_pstate_control(struct smu_context *smu, bool enable) int result = 0; if (!enable) - result = smu_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE); + result = smu_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE, NULL); else - result = smu_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE); + result = smu_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE, NULL); return result; } @@ -2332,7 +2341,6 @@ static const struct pptable_funcs navi10_ppt_funcs = { .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, .system_features_control = smu_v11_0_system_features_control, .send_smc_msg_with_param = smu_v11_0_send_msg_with_param, - .read_smc_arg = smu_v11_0_read_arg, .init_display_count = smu_v11_0_init_display_count, .set_allowed_mask = smu_v11_0_set_allowed_mask, .get_enabled_mask = smu_v11_0_get_enabled_mask, @@ -2366,6 +2374,7 @@ static const struct pptable_funcs navi10_ppt_funcs = { .get_pptable_power_limit = navi10_get_pptable_power_limit, .run_btc = navi10_run_btc, .disable_umc_cdr_12gbps_workaround = navi10_disable_umc_cdr_12gbps_workaround, + .set_power_source = smu_v11_0_set_power_source, }; void navi10_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c index 810994017f499ed368c3e64d41a60475c73d81b0..7bf52ecba01d3e0255490fdeae73c7fd7d1e56ad 100644 --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c @@ -24,7 +24,6 @@ #include "amdgpu.h" #include "amdgpu_smu.h" #include "smu_internal.h" -#include "soc15_common.h" #include "smu_v12_0_ppsmc.h" #include "smu12_driver_if.h" #include "smu_v12_0.h" @@ -111,8 +110,8 @@ static struct smu_12_0_cmn2aisc_mapping renoir_clk_map[SMU_CLK_COUNT] = { CLK_MAP(GFXCLK, CLOCK_GFXCLK), CLK_MAP(SCLK, CLOCK_GFXCLK), CLK_MAP(SOCCLK, CLOCK_SOCCLK), - CLK_MAP(UCLK, CLOCK_UMCCLK), - CLK_MAP(MCLK, CLOCK_UMCCLK), + CLK_MAP(UCLK, CLOCK_FCLK), + CLK_MAP(MCLK, CLOCK_FCLK), }; static struct smu_12_0_cmn2aisc_mapping renoir_table_map[SMU_TABLE_COUNT] = { @@ -280,7 +279,7 @@ static int renoir_print_clk_levels(struct smu_context *smu, break; case SMU_MCLK: count = NUM_MEMCLK_DPM_LEVELS; - cur_value = metrics.ClockFrequency[CLOCK_UMCCLK]; + cur_value = metrics.ClockFrequency[CLOCK_FCLK]; break; case SMU_DCEFCLK: count = NUM_DCFCLK_DPM_LEVELS; @@ -342,14 +341,14 @@ static int renoir_dpm_set_uvd_enable(struct smu_context *smu, bool enable) if (enable) { /* vcn dpm on is a prerequisite for vcn power gate messages */ if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL); if (ret) return ret; } power_gate->vcn_gated = false; } else { if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { - ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); + ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL); if (ret) return ret; } @@ -367,14 +366,14 @@ static int renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) if (enable) { if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL); if (ret) return ret; } power_gate->jpeg_gated = false; } else { if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL); if (ret) return ret; } @@ -423,7 +422,7 @@ static int renoir_force_dpm_limit_value(struct smu_context *smu, bool highest) return ret; force_freq = highest ? max_freq : min_freq; - ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq); + ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq, false); if (ret) return ret; } @@ -456,7 +455,7 @@ static int renoir_unforce_dpm_levels(struct smu_context *smu) { if (ret) return ret; - ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq); + ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq, false); if (ret) return ret; } @@ -622,22 +621,24 @@ static int renoir_force_clk_levels(struct smu_context *smu, return ret; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, soft_max_level == 0 ? min_freq : - soft_max_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : max_freq); + soft_max_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : max_freq, + NULL); if (ret) return ret; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, soft_min_level == 2 ? max_freq : - soft_min_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : min_freq); + soft_min_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : min_freq, + NULL); if (ret) return ret; break; case SMU_SOCCLK: GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq); GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq); - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq, NULL); if (ret) return ret; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq, NULL); if (ret) return ret; break; @@ -645,10 +646,10 @@ static int renoir_force_clk_levels(struct smu_context *smu, case SMU_FCLK: GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq); GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq); - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq, NULL); if (ret) return ret; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq, NULL); if (ret) return ret; break; @@ -681,7 +682,8 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u } ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type); + 1 << workload_type, + NULL); if (ret) { pr_err_once("Fail to set workload type %d\n", workload_type); return ret; @@ -701,7 +703,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu) if (ret) return ret; - ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq); + ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false); if (ret) return ret; @@ -709,7 +711,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu) if (ret) return ret; - ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq); + ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false); if (ret) return ret; @@ -810,9 +812,10 @@ static int renoir_set_watermarks_table( clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id; } + smu->watermarks_bitmap |= WATERMARKS_EXIST; + /* pass data to smu controller */ - if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && - !(smu->watermarks_bitmap & WATERMARKS_LOADED)) { + if (!(smu->watermarks_bitmap & WATERMARKS_LOADED)) { ret = smu_write_watermarks_table(smu); if (ret) { pr_err("Failed to update WMTABLE!"); @@ -913,7 +916,6 @@ static const struct pptable_funcs renoir_ppt_funcs = { .powergate_vcn = smu_v12_0_powergate_vcn, .powergate_jpeg = smu_v12_0_powergate_jpeg, .send_smc_msg_with_param = smu_v12_0_send_msg_with_param, - .read_smc_arg = smu_v12_0_read_arg, .set_gfx_cgpg = smu_v12_0_set_gfx_cgpg, .gfx_off_control = smu_v12_0_gfx_off_control, .init_smc_tables = smu_v12_0_init_smc_tables, diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h index 7bd200ffcda8b873fcd2a76d23440abca4ae0b49..40c35bcc5a0a2c9a358e6773375e156c7a5bc01c 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_internal.h +++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h @@ -79,12 +79,13 @@ #define smu_set_default_od_settings(smu, initialize) \ ((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0) -int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg); +#define smu_send_smc_msg_with_param(smu, msg, param, read_arg) \ + ((smu)->ppt_funcs->send_smc_msg_with_param? (smu)->ppt_funcs->send_smc_msg_with_param((smu), (msg), (param), (read_arg)) : 0) + +static inline int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg, uint32_t *read_arg) { + return smu_send_smc_msg_with_param(smu, msg, 0, read_arg); +} -#define smu_send_smc_msg_with_param(smu, msg, param) \ - ((smu)->ppt_funcs->send_smc_msg_with_param? (smu)->ppt_funcs->send_smc_msg_with_param((smu), (msg), (param)) : 0) -#define smu_read_smc_arg(smu, arg) \ - ((smu)->ppt_funcs->read_smc_arg? (smu)->ppt_funcs->read_smc_arg((smu), (arg)) : 0) #define smu_alloc_dpm_context(smu) \ ((smu)->ppt_funcs->alloc_dpm_context ? (smu)->ppt_funcs->alloc_dpm_context((smu)) : 0) #define smu_init_display_count(smu, count) \ @@ -210,4 +211,7 @@ int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg); #define smu_disable_umc_cdr_12gbps_workaround(smu) \ ((smu)->ppt_funcs->disable_umc_cdr_12gbps_workaround ? (smu)->ppt_funcs->disable_umc_cdr_12gbps_workaround((smu)) : 0) +#define smu_set_power_source(smu, power_src) \ + ((smu)->ppt_funcs->set_power_source ? (smu)->ppt_funcs->set_power_source((smu), (power_src)) : 0) + #endif diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index c9e5ce135fd42bd417fa64b1fe43bed8fee6cf45..d19e1d0d56c0a3055ee0eefc8f36e6e4198ed752 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -26,7 +26,6 @@ #define SMU_11_0_PARTIAL_PPTABLE -#include "pp_debug.h" #include "amdgpu.h" #include "amdgpu_smu.h" #include "smu_internal.h" @@ -64,7 +63,7 @@ static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu, return 0; } -int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg) +static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg) { struct amdgpu_device *adev = smu->adev; @@ -92,7 +91,8 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu) int smu_v11_0_send_msg_with_param(struct smu_context *smu, enum smu_message_type msg, - uint32_t param) + uint32_t param, + uint32_t *read_arg) { struct amdgpu_device *adev = smu->adev; int ret = 0, index = 0; @@ -101,11 +101,12 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu, if (index < 0) return index; + mutex_lock(&smu->message_lock); ret = smu_v11_0_wait_for_response(smu); if (ret) { pr_err("Msg issuing pre-check failed and " "SMU may be not in the right state!\n"); - return ret; + goto out; } WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); @@ -115,10 +116,21 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu, smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index); ret = smu_v11_0_wait_for_response(smu); - if (ret) + if (ret) { pr_err("failed send message: %10s (%d) \tparam: 0x%08x response %#x\n", smu_get_message_name(smu, msg), index, param, ret); - + goto out; + } + if (read_arg) { + ret = smu_v11_0_read_arg(smu, read_arg); + if (ret) { + pr_err("failed to read message arg: %10s (%d) \tparam: 0x%08x response %#x\n", + smu_get_message_name(smu, msg), index, param, ret); + goto out; + } + } +out: + mutex_unlock(&smu->message_lock); return ret; } @@ -262,6 +274,9 @@ int smu_v11_0_check_fw_version(struct smu_context *smu) case CHIP_NAVI10: smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV10; break; + case CHIP_NAVI12: + smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV12; + break; case CHIP_NAVI14: smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV14; break; @@ -671,12 +686,14 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu) ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSystemVirtualDramAddrHigh, - address_high); + address_high, + NULL); if (ret) return ret; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSystemVirtualDramAddrLow, - address_low); + address_low, + NULL); if (ret) return ret; @@ -685,15 +702,15 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu) address_low = (uint32_t)lower_32_bits(address); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh, - address_high); + address_high, NULL); if (ret) return ret; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow, - address_low); + address_low, NULL); if (ret) return ret; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize, - (uint32_t)memory_pool->size); + (uint32_t)memory_pool->size, NULL); if (ret) return ret; @@ -757,7 +774,7 @@ int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk) int ret; ret = smu_send_smc_msg_with_param(smu, - SMU_MSG_SetMinDeepSleepDcefclk, clk); + SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL); if (ret) pr_err("SMU11 attempt to set divider for DCEFCLK Failed!"); @@ -784,11 +801,13 @@ int smu_v11_0_set_driver_table_location(struct smu_context *smu) if (driver_table->mc_address) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh, - upper_32_bits(driver_table->mc_address)); + upper_32_bits(driver_table->mc_address), + NULL); if (!ret) ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow, - lower_32_bits(driver_table->mc_address)); + lower_32_bits(driver_table->mc_address), + NULL); } return ret; @@ -802,11 +821,13 @@ int smu_v11_0_set_tool_table_location(struct smu_context *smu) if (tool_table->mc_address) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetToolsDramAddrHigh, - upper_32_bits(tool_table->mc_address)); + upper_32_bits(tool_table->mc_address), + NULL); if (!ret) ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetToolsDramAddrLow, - lower_32_bits(tool_table->mc_address)); + lower_32_bits(tool_table->mc_address), + NULL); } return ret; @@ -819,7 +840,7 @@ int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count) if (!smu->pm_enabled) return ret; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL); return ret; } @@ -837,12 +858,12 @@ int smu_v11_0_set_allowed_mask(struct smu_context *smu) bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh, - feature_mask[1]); + feature_mask[1], NULL); if (ret) goto failed; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow, - feature_mask[0]); + feature_mask[0], NULL); if (ret) goto failed; @@ -862,17 +883,11 @@ int smu_v11_0_get_enabled_mask(struct smu_context *smu, return -EINVAL; if (bitmap_empty(feature->enabled, feature->feature_num)) { - ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh); - if (ret) - return ret; - ret = smu_read_smc_arg(smu, &feature_mask_high); + ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high); if (ret) return ret; - ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow); - if (ret) - return ret; - ret = smu_read_smc_arg(smu, &feature_mask_low); + ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low); if (ret) return ret; @@ -894,7 +909,7 @@ int smu_v11_0_system_features_control(struct smu_context *smu, int ret = 0; ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures : - SMU_MSG_DisableAllSmuFeatures)); + SMU_MSG_DisableAllSmuFeatures), NULL); if (ret) return ret; @@ -923,7 +938,7 @@ int smu_v11_0_notify_display_change(struct smu_context *smu) return ret; if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) && smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM) - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL); return ret; } @@ -947,30 +962,24 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock, return -EINVAL; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq, - clk_id << 16); + clk_id << 16, clock); if (ret) { pr_err("[GetMaxSustainableClock] Failed to get max DC clock from SMC!"); return ret; } - ret = smu_read_smc_arg(smu, clock); - if (ret) - return ret; - if (*clock != 0) return 0; /* if DC limit is zero, return AC limit */ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, - clk_id << 16); + clk_id << 16, clock); if (ret) { pr_err("[GetMaxSustainableClock] failed to get max AC clock from SMC!"); return ret; } - ret = smu_read_smc_arg(smu, clock); - - return ret; + return 0; } int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu) @@ -1106,7 +1115,7 @@ int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n) return -EOPNOTSUPP; } - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL); if (ret) { pr_err("[%s] Set power limit Failed!\n", __func__); return ret; @@ -1136,11 +1145,7 @@ int smu_v11_0_get_current_clk_freq(struct smu_context *smu, ret = smu_get_current_clk_freq_by_table(smu, clk_id, &freq); else { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmClockFreq, - (asic_clk_id << 16)); - if (ret) - return ret; - - ret = smu_read_smc_arg(smu, &freq); + (asic_clk_id << 16), &freq); if (ret) return ret; } @@ -1375,9 +1380,9 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) return 0; if (enable) - ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff); + ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL); else - ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff); + ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL); break; default: break; @@ -1515,10 +1520,18 @@ int smu_v11_0_set_xgmi_pstate(struct smu_context *smu, int ret = 0; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetXgmiMode, - pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3); + pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3, + NULL); return ret; } +static int smu_v11_0_ack_ac_dc_interrupt(struct smu_context *smu) +{ + return smu_send_smc_msg(smu, + SMU_MSG_ReenableAcDcInterrupt, + NULL); +} + #define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */ #define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */ @@ -1552,6 +1565,9 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev, break; } + } else if (client_id == SOC15_IH_CLIENTID_MP1) { + if (src_id == 0xfe) + smu_v11_0_ack_ac_dc_interrupt(&adev->smu); } return 0; @@ -1591,6 +1607,12 @@ int smu_v11_0_register_irq_handler(struct smu_context *smu) if (ret) return ret; + ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1, + 0xfe, + irq_src); + if (ret) + return ret; + return ret; } @@ -1628,14 +1650,14 @@ int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu) { int ret = 0; - ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME); + ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL); return ret; } static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v11_0_baco_seq baco_seq) { - return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq); + return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL); } bool smu_v11_0_baco_is_support(struct smu_context *smu) @@ -1704,12 +1726,12 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) data |= 0x80000000; WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data); - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0, NULL); } else { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1, NULL); } } else { - ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco); + ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco, NULL); if (ret) goto out; @@ -1777,19 +1799,13 @@ int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c param = (clk_id & 0xffff) << 16; if (max) { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param); - if (ret) - goto failed; - ret = smu_read_smc_arg(smu, max); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param, max); if (ret) goto failed; } if (min) { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param); - if (ret) - goto failed; - ret = smu_read_smc_arg(smu, min); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min); if (ret) goto failed; } @@ -1811,7 +1827,7 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_ if (max > 0) { param = (uint32_t)((clk_id << 16) | (max & 0xffff)); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, - param); + param, NULL); if (ret) return ret; } @@ -1819,7 +1835,7 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_ if (min > 0) { param = (uint32_t)((clk_id << 16) | (min & 0xffff)); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, - param); + param, NULL); if (ret) return ret; } @@ -1939,3 +1955,18 @@ int smu_v11_0_set_performance_level(struct smu_context *smu, return ret; } +int smu_v11_0_set_power_source(struct smu_context *smu, + enum smu_power_src_type power_src) +{ + int pwr_source; + + pwr_source = smu_power_get_index(smu, (uint32_t)power_src); + if (pwr_source < 0) + return -EINVAL; + + return smu_send_smc_msg_with_param(smu, + SMU_MSG_NotifyPowerSource, + pwr_source, + NULL); +} + diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c index 870e6db2907eb67e43d75ee26850c173d2985960..169ebdad87b87be62a8d58d754bee8a29565b7ee 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c @@ -20,7 +20,6 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -#include "pp_debug.h" #include #include "amdgpu.h" #include "amdgpu_smu.h" @@ -50,7 +49,7 @@ int smu_v12_0_send_msg_without_waiting(struct smu_context *smu, return 0; } -int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg) +static int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg) { struct amdgpu_device *adev = smu->adev; @@ -78,7 +77,8 @@ int smu_v12_0_wait_for_response(struct smu_context *smu) int smu_v12_0_send_msg_with_param(struct smu_context *smu, enum smu_message_type msg, - uint32_t param) + uint32_t param, + uint32_t *read_arg) { struct amdgpu_device *adev = smu->adev; int ret = 0, index = 0; @@ -87,11 +87,12 @@ smu_v12_0_send_msg_with_param(struct smu_context *smu, if (index < 0) return index; + mutex_lock(&smu->message_lock); ret = smu_v12_0_wait_for_response(smu); if (ret) { pr_err("Msg issuing pre-check failed and " "SMU may be not in the right state!\n"); - return ret; + goto out; } WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); @@ -101,10 +102,21 @@ smu_v12_0_send_msg_with_param(struct smu_context *smu, smu_v12_0_send_msg_without_waiting(smu, (uint16_t)index); ret = smu_v12_0_wait_for_response(smu); - if (ret) + if (ret) { pr_err("Failed to send message 0x%x, response 0x%x param 0x%x\n", index, ret, param); - + goto out; + } + if (read_arg) { + ret = smu_v12_0_read_arg(smu, read_arg); + if (ret) { + pr_err("Failed to read message arg 0x%x, response 0x%x param 0x%x\n", + index, ret, param); + goto out; + } + } +out: + mutex_unlock(&smu->message_lock); return ret; } @@ -163,9 +175,9 @@ int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate) return 0; if (gate) - return smu_send_smc_msg(smu, SMU_MSG_PowerDownSdma); + return smu_send_smc_msg(smu, SMU_MSG_PowerDownSdma, NULL); else - return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma); + return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma, NULL); } int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate) @@ -174,9 +186,9 @@ int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate) return 0; if (gate) - return smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); + return smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL); else - return smu_send_smc_msg(smu, SMU_MSG_PowerUpVcn); + return smu_send_smc_msg(smu, SMU_MSG_PowerUpVcn, NULL); } int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate) @@ -185,9 +197,9 @@ int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate) return 0; if (gate) - return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0); + return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL); else - return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0); + return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL); } int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable) @@ -196,7 +208,9 @@ int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable) return 0; return smu_v12_0_send_msg_with_param(smu, - SMU_MSG_SetGfxCGPG, enable ? 1 : 0); + SMU_MSG_SetGfxCGPG, + enable ? 1 : 0, + NULL); } int smu_v12_0_read_sensor(struct smu_context *smu, @@ -262,10 +276,10 @@ int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable) int ret = 0, timeout = 500; if (enable) { - ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff); + ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL); } else { - ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff); + ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL); /* confirm gfx is back to "on" state, timeout is 0.5 second */ while (!(smu_v12_0_get_gfxoff_status(smu) == 2)) { @@ -331,17 +345,11 @@ int smu_v12_0_get_enabled_mask(struct smu_context *smu, if (!feature_mask || num < 2) return -EINVAL; - ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh); - if (ret) - return ret; - ret = smu_read_smc_arg(smu, &feature_mask_high); + ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high); if (ret) return ret; - ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow); - if (ret) - return ret; - ret = smu_read_smc_arg(smu, &feature_mask_low); + ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low); if (ret) return ret; @@ -388,14 +396,11 @@ int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c switch (clk_type) { case SMU_GFXCLK: case SMU_SCLK: - ret = smu_send_smc_msg(smu, SMU_MSG_GetMaxGfxclkFrequency); + ret = smu_send_smc_msg(smu, SMU_MSG_GetMaxGfxclkFrequency, max); if (ret) { pr_err("Attempt to get max GX frequency from SMC Failed !\n"); goto failed; } - ret = smu_read_smc_arg(smu, max); - if (ret) - goto failed; break; case SMU_UCLK: case SMU_FCLK: @@ -419,14 +424,11 @@ int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c switch (clk_type) { case SMU_GFXCLK: case SMU_SCLK: - ret = smu_send_smc_msg(smu, SMU_MSG_GetMinGfxclkFrequency); + ret = smu_send_smc_msg(smu, SMU_MSG_GetMinGfxclkFrequency, min); if (ret) { pr_err("Attempt to get min GX frequency from SMC Failed !\n"); goto failed; } - ret = smu_read_smc_arg(smu, min); - if (ret) - goto failed; break; case SMU_UCLK: case SMU_FCLK: @@ -450,7 +452,7 @@ failed: } int smu_v12_0_mode2_reset(struct smu_context *smu){ - return smu_v12_0_send_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2); + return smu_v12_0_send_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2, NULL); } int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, @@ -458,45 +460,42 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_ { int ret = 0; - if (max < min) - return -EINVAL; - switch (clk_type) { case SMU_GFXCLK: case SMU_SCLK: - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min, NULL); if (ret) return ret; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max, NULL); if (ret) return ret; break; case SMU_FCLK: case SMU_MCLK: - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min, NULL); if (ret) return ret; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max, NULL); if (ret) return ret; break; case SMU_SOCCLK: - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min, NULL); if (ret) return ret; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max, NULL); if (ret) return ret; break; case SMU_VCLK: - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min, NULL); if (ret) return ret; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max, NULL); if (ret) return ret; break; @@ -515,11 +514,13 @@ int smu_v12_0_set_driver_table_location(struct smu_context *smu) if (driver_table->mc_address) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh, - upper_32_bits(driver_table->mc_address)); + upper_32_bits(driver_table->mc_address), + NULL); if (!ret) ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow, - lower_32_bits(driver_table->mc_address)); + lower_32_bits(driver_table->mc_address), + NULL); } return ret; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c index 49e5ef3e3876cb53de5d3b2990a2470f62abbe8f..16aa171971d3aaa6ad81af88d27f53c385a3fdb7 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c @@ -33,6 +33,8 @@ #include "smu7_smumgr.h" #include "vega20_hwmgr.h" +#include "smu_v11_0_i2c.h" + /* MP Apertures */ #define MP0_Public 0x03800000 #define MP0_SRAM 0x03900000 @@ -406,6 +408,7 @@ static int vega20_smu_init(struct pp_hwmgr *hwmgr) struct vega20_smumgr *priv; unsigned long tools_size = 0x19000; int ret = 0; + struct amdgpu_device *adev = hwmgr->adev; struct cgs_firmware_info info = {0}; @@ -505,6 +508,10 @@ static int vega20_smu_init(struct pp_hwmgr *hwmgr) priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].version = 0x01; priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffInt_t); + ret = smu_v11_0_i2c_eeprom_control_init(&adev->pm.smu_i2c); + if (ret) + goto err4; + return 0; err4: @@ -537,6 +544,9 @@ static int vega20_smu_fini(struct pp_hwmgr *hwmgr) { struct vega20_smumgr *priv = (struct vega20_smumgr *)(hwmgr->smu_backend); + struct amdgpu_device *adev = hwmgr->adev; + + smu_v11_0_i2c_eeprom_control_fini(&adev->pm.smu_i2c); if (priv) { amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle, @@ -560,6 +570,7 @@ static int vega20_smu_fini(struct pp_hwmgr *hwmgr) kfree(hwmgr->smu_backend); hwmgr->smu_backend = NULL; } + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c index 4ad8d6c14ee5e6a323dec8d129d0ae6dad4af184..49ff3756bd9fc1c6186b54a826e161bfce18598b 100644 --- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c @@ -21,7 +21,6 @@ * */ -#include "pp_debug.h" #include #include "amdgpu.h" #include "amdgpu_smu.h" @@ -587,7 +586,7 @@ static int vega20_check_powerplay_table(struct smu_context *smu) static int vega20_run_btc_afll(struct smu_context *smu) { - return smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc); + return smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc, NULL); } #define FEATURE_MASK(feature) (1ULL << feature) @@ -670,13 +669,13 @@ vega20_set_single_dpm_table(struct smu_context *smu, ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmFreqByIndex, - (clk_id << 16 | 0xFF)); + (clk_id << 16 | 0xFF), + &num_of_levels); if (ret) { pr_err("[GetNumOfDpmLevel] failed to get dpm levels!"); return ret; } - smu_read_smc_arg(smu, &num_of_levels); if (!num_of_levels) { pr_err("[GetNumOfDpmLevel] number of clk levels is invalid!"); return -EINVAL; @@ -687,12 +686,12 @@ vega20_set_single_dpm_table(struct smu_context *smu, for (i = 0; i < num_of_levels; i++) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmFreqByIndex, - (clk_id << 16 | i)); + (clk_id << 16 | i), + &clk); if (ret) { pr_err("[GetDpmFreqByIndex] failed to get dpm freq by index!"); return ret; } - smu_read_smc_arg(smu, &clk); if (!clk) { pr_err("[GetDpmFreqByIndex] clk value is invalid!"); return -EINVAL; @@ -1200,7 +1199,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max, single_dpm_table->dpm_state.soft_min_level; ret = smu_send_smc_msg_with_param(smu, (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), - (PPCLK_GFXCLK << 16) | (freq & 0xffff)); + (PPCLK_GFXCLK << 16) | (freq & 0xffff), + NULL); if (ret) { pr_err("Failed to set soft %s gfxclk !\n", max ? "max" : "min"); @@ -1215,7 +1215,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max, single_dpm_table->dpm_state.soft_min_level; ret = smu_send_smc_msg_with_param(smu, (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), - (PPCLK_UCLK << 16) | (freq & 0xffff)); + (PPCLK_UCLK << 16) | (freq & 0xffff), + NULL); if (ret) { pr_err("Failed to set soft %s memclk !\n", max ? "max" : "min"); @@ -1230,7 +1231,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max, single_dpm_table->dpm_state.soft_min_level; ret = smu_send_smc_msg_with_param(smu, (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), - (PPCLK_SOCCLK << 16) | (freq & 0xffff)); + (PPCLK_SOCCLK << 16) | (freq & 0xffff), + NULL); if (ret) { pr_err("Failed to set soft %s socclk !\n", max ? "max" : "min"); @@ -1245,7 +1247,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max, single_dpm_table->dpm_state.soft_min_level; ret = smu_send_smc_msg_with_param(smu, (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), - (PPCLK_FCLK << 16) | (freq & 0xffff)); + (PPCLK_FCLK << 16) | (freq & 0xffff), + NULL); if (ret) { pr_err("Failed to set soft %s fclk !\n", max ? "max" : "min"); @@ -1260,7 +1263,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max, if (!max) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, - (PPCLK_DCEFCLK << 16) | (freq & 0xffff)); + (PPCLK_DCEFCLK << 16) | (freq & 0xffff), + NULL); if (ret) { pr_err("Failed to set hard min dcefclk !\n"); return ret; @@ -1421,7 +1425,9 @@ static int vega20_force_clk_levels(struct smu_context *smu, } ret = smu_send_smc_msg_with_param(smu, - SMU_MSG_SetMinLinkDpmByIndex, soft_min_level); + SMU_MSG_SetMinLinkDpmByIndex, + soft_min_level, + NULL); if (ret) pr_err("Failed to set min link dpm level!\n"); @@ -1477,13 +1483,13 @@ static int vega20_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu, ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetAVFSVoltageByDpm, - ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq)); + ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq), + voltage); if (ret) { pr_err("[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!"); return ret; } - smu_read_smc_arg(smu, voltage); *voltage = *voltage / VOLTAGE_SCALE; return 0; @@ -1956,8 +1962,10 @@ static int vega20_set_power_profile_mode(struct smu_context *smu, long *input, u workload_type = smu_workload_get_type(smu, smu->power_profile_mode); if (workload_type < 0) return -EINVAL; - smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type); + smu_send_smc_msg_with_param(smu, + SMU_MSG_SetWorkloadMask, + 1 << workload_type, + NULL); return ret; } @@ -2029,7 +2037,8 @@ vega20_set_uclk_to_highest_dpm_level(struct smu_context *smu, dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, - (PPCLK_UCLK << 16) | dpm_table->dpm_state.hard_min_level); + (PPCLK_UCLK << 16) | dpm_table->dpm_state.hard_min_level, + NULL); if (ret) { pr_err("[%s] Set hard min uclk failed!", __func__); return ret; @@ -2047,7 +2056,7 @@ static int vega20_pre_display_config_changed(struct smu_context *smu) if (!smu->smu_dpm.dpm_context) return -EINVAL; - smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0); + smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL); ret = vega20_set_uclk_to_highest_dpm_level(smu, &dpm_table->mem_table); if (ret) @@ -2074,7 +2083,8 @@ static int vega20_display_config_changed(struct smu_context *smu) smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, - smu->display_config->num_display); + smu->display_config->num_display, + NULL); } return ret; @@ -2247,7 +2257,8 @@ vega20_notify_smc_display_config(struct smu_context *smu) if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetMinDeepSleepDcefclk, - min_clocks.dcef_clock_in_sr/100); + min_clocks.dcef_clock_in_sr/100, + NULL); if (ret) { pr_err("Attempt to set divider for DCEFCLK Failed!"); return ret; @@ -2262,7 +2273,8 @@ vega20_notify_smc_display_config(struct smu_context *smu) memtable->dpm_state.hard_min_level = min_clocks.memory_clock/100; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, - (PPCLK_UCLK << 16) | memtable->dpm_state.hard_min_level); + (PPCLK_UCLK << 16) | memtable->dpm_state.hard_min_level, + NULL); if (ret) { pr_err("[%s] Set hard min uclk failed!", __func__); return ret; @@ -2853,8 +2865,10 @@ static int vega20_set_thermal_fan_table(struct smu_context *smu) struct smu_table_context *table_context = &smu->smu_table; PPTable_t *pptable = table_context->driver_pptable; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetFanTemperatureTarget, - (uint32_t)pptable->FanTargetTemperature); + ret = smu_send_smc_msg_with_param(smu, + SMU_MSG_SetFanTemperatureTarget, + (uint32_t)pptable->FanTargetTemperature, + NULL); return ret; } @@ -2864,15 +2878,13 @@ static int vega20_get_fan_speed_rpm(struct smu_context *smu, { int ret; - ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm); + ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm, speed); if (ret) { pr_err("Attempt to get current RPM from SMC Failed!\n"); return ret; } - smu_read_smc_arg(smu, speed); - return 0; } @@ -3137,7 +3149,7 @@ static int vega20_set_df_cstate(struct smu_context *smu, return -EINVAL; } - return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state); + return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL); } static int vega20_update_pcie_parameters(struct smu_context *smu, @@ -3155,7 +3167,8 @@ static int vega20_update_pcie_parameters(struct smu_context *smu, pptable->PcieLaneCount[i] : pcie_width_cap); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_OverridePcieParameters, - smu_pcie_arg); + smu_pcie_arg, + NULL); } return ret; @@ -3229,7 +3242,6 @@ static const struct pptable_funcs vega20_ppt_funcs = { .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, .system_features_control = smu_v11_0_system_features_control, .send_smc_msg_with_param = smu_v11_0_send_msg_with_param, - .read_smc_arg = smu_v11_0_read_arg, .init_display_count = smu_v11_0_init_display_count, .set_allowed_mask = smu_v11_0_set_allowed_mask, .get_enabled_mask = smu_v11_0_get_enabled_mask, diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c index ea5cd1e1730494e60bca57643f65dc23bcfb7f00..e7933930a65770c3d63d9d72e90b86798f25a8b0 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c @@ -146,14 +146,14 @@ static const struct of_device_id komeda_of_match[] = { MODULE_DEVICE_TABLE(of, komeda_of_match); -static int komeda_rt_pm_suspend(struct device *dev) +static int __maybe_unused komeda_rt_pm_suspend(struct device *dev) { struct komeda_drv *mdrv = dev_get_drvdata(dev); return komeda_dev_suspend(mdrv->mdev); } -static int komeda_rt_pm_resume(struct device *dev) +static int __maybe_unused komeda_rt_pm_resume(struct device *dev) { struct komeda_drv *mdrv = dev_get_drvdata(dev); diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c index 952199cc0462e7cdefdeb3c9cf9e0677d360a18a..dce4672e3fc8d15529fde53e3e66984cfc6797d6 100644 --- a/drivers/gpu/drm/bochs/bochs_hw.c +++ b/drivers/gpu/drm/bochs/bochs_hw.c @@ -157,10 +157,8 @@ int bochs_hw_init(struct drm_device *dev) size = min(size, mem); } - if (pci_request_region(pdev, 0, "bochs-drm") != 0) { - DRM_ERROR("Cannot request framebuffer\n"); - return -EBUSY; - } + if (pci_request_region(pdev, 0, "bochs-drm") != 0) + DRM_WARN("Cannot request framebuffer, boot fb still active?\n"); bochs->fb_map = ioremap(addr, size); if (bochs->fb_map == NULL) { diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c index 3ec34cf6fdadcc3e9e6211fff9aaa275801a8b0b..2bc6e4f85171697a9fb7ff29831eb5316bba05af 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c +++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c @@ -210,8 +210,7 @@ static int anx6345_dp_link_training(struct anx6345 *anx6345) if (err) return err; - dpcd[0] = drm_dp_max_link_rate(anx6345->dpcd); - dpcd[0] = drm_dp_link_rate_to_bw_code(dpcd[0]); + dpcd[0] = dp_bw; err = regmap_write(anx6345->map[I2C_IDX_DPTX], SP_DP_MAIN_LINK_BW_SET_REG, dpcd[0]); if (err) diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index f85c15ad8486e61486139b659355ad3f863ba74b..383b1073d7de43cf1afe41d8d611b79ea5be3591 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -1624,28 +1624,34 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode) frame.colorspace = HDMI_COLORSPACE_RGB; /* Set up colorimetry */ - switch (hdmi->hdmi_data.enc_out_encoding) { - case V4L2_YCBCR_ENC_601: - if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV601) - frame.colorimetry = HDMI_COLORIMETRY_EXTENDED; - else + if (!hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format)) { + switch (hdmi->hdmi_data.enc_out_encoding) { + case V4L2_YCBCR_ENC_601: + if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV601) + frame.colorimetry = HDMI_COLORIMETRY_EXTENDED; + else + frame.colorimetry = HDMI_COLORIMETRY_ITU_601; + frame.extended_colorimetry = + HDMI_EXTENDED_COLORIMETRY_XV_YCC_601; + break; + case V4L2_YCBCR_ENC_709: + if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV709) + frame.colorimetry = HDMI_COLORIMETRY_EXTENDED; + else + frame.colorimetry = HDMI_COLORIMETRY_ITU_709; + frame.extended_colorimetry = + HDMI_EXTENDED_COLORIMETRY_XV_YCC_709; + break; + default: /* Carries no data */ frame.colorimetry = HDMI_COLORIMETRY_ITU_601; + frame.extended_colorimetry = + HDMI_EXTENDED_COLORIMETRY_XV_YCC_601; + break; + } + } else { + frame.colorimetry = HDMI_COLORIMETRY_NONE; frame.extended_colorimetry = - HDMI_EXTENDED_COLORIMETRY_XV_YCC_601; - break; - case V4L2_YCBCR_ENC_709: - if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV709) - frame.colorimetry = HDMI_COLORIMETRY_EXTENDED; - else - frame.colorimetry = HDMI_COLORIMETRY_ITU_709; - frame.extended_colorimetry = - HDMI_EXTENDED_COLORIMETRY_XV_YCC_709; - break; - default: /* Carries no data */ - frame.colorimetry = HDMI_COLORIMETRY_ITU_601; - frame.extended_colorimetry = - HDMI_EXTENDED_COLORIMETRY_XV_YCC_601; - break; + HDMI_EXTENDED_COLORIMETRY_XV_YCC_601; } frame.scan_mode = HDMI_SCAN_MODE_NONE; diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index 402a690d1fe5653d40c38f849d54a8141393a6e1..e4c0ea03ae3a42bb52c90e402196f5e59ac28fe0 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -298,7 +298,7 @@ static inline int tc_poll_timeout(struct tc_data *tc, unsigned int addr, static int tc_aux_wait_busy(struct tc_data *tc) { - return tc_poll_timeout(tc, DP0_AUXSTATUS, AUX_BUSY, 0, 1000, 100000); + return tc_poll_timeout(tc, DP0_AUXSTATUS, AUX_BUSY, 0, 100, 100000); } static int tc_aux_write_data(struct tc_data *tc, const void *data, @@ -641,7 +641,7 @@ static int tc_aux_link_setup(struct tc_data *tc) if (ret) goto err; - ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 1, 1000); + ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 100, 100000); if (ret == -ETIMEDOUT) { dev_err(tc->dev, "Timeout waiting for PHY to become ready"); return ret; @@ -877,7 +877,7 @@ static int tc_wait_link_training(struct tc_data *tc) int ret; ret = tc_poll_timeout(tc, DP0_LTSTAT, LT_LOOPDONE, - LT_LOOPDONE, 1, 1000); + LT_LOOPDONE, 500, 100000); if (ret) { dev_err(tc->dev, "Link training timeout waiting for LT_LOOPDONE!\n"); return ret; @@ -950,7 +950,7 @@ static int tc_main_link_enable(struct tc_data *tc) dp_phy_ctrl &= ~(DP_PHY_RST | PHY_M1_RST | PHY_M0_RST); ret = regmap_write(tc->regmap, DP_PHY_CTRL, dp_phy_ctrl); - ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 1, 1000); + ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 500, 100000); if (ret) { dev_err(dev, "timeout waiting for phy become ready"); return ret; diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c index 40c4d4a5517b53e47f0f485c6deb3c64015c6968..e3eb6364c0f707d9f6c256c5d4526df581769097 100644 --- a/drivers/gpu/drm/bridge/ti-tfp410.c +++ b/drivers/gpu/drm/bridge/ti-tfp410.c @@ -150,7 +150,8 @@ static int tfp410_attach(struct drm_bridge *bridge, dvi->next_bridge->type, dvi->next_bridge->ddc); if (ret) { - dev_err(dvi->dev, "drm_connector_init() failed: %d\n", ret); + dev_err(dvi->dev, "drm_connector_init_with_ddc() failed: %d\n", + ret); return ret; } diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c index 841794e19eb6c08272ef1fc7d515a91f17e250db..7443114bd713c27611d40fc50912e457aef6d321 100644 --- a/drivers/gpu/drm/drm_client_modeset.c +++ b/drivers/gpu/drm/drm_client_modeset.c @@ -951,7 +951,8 @@ bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation) * depending on the hardware this may require the framebuffer * to be in a specific tiling format. */ - if ((*rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_180 || + if (((*rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_0 && + (*rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_180) || !plane->rotation_property) return false; diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index a4b98f8055f426aa77465bf5888dbd2c7d493352..c6fbe6e6bc9dcf7807d5422e621d3adbd1da5641 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -1280,6 +1280,85 @@ drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch) #undef DEVICE_ID_ANY #undef DEVICE_ID +struct edid_quirk { + u8 mfg_id[2]; + u8 prod_id[2]; + u32 quirks; +}; + +#define MFG(first, second) { (first), (second) } +#define PROD_ID(first, second) { (first), (second) } + +/* + * Some devices have unreliable OUIDs where they don't set the device ID + * correctly, and as a result we need to use the EDID for finding additional + * DP quirks in such cases. + */ +static const struct edid_quirk edid_quirk_list[] = { + /* Optional 4K AMOLED panel in the ThinkPad X1 Extreme 2nd Generation + * only supports DPCD backlight controls + */ + { MFG(0x4c, 0x83), PROD_ID(0x41, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) }, + /* + * Some Dell CML 2020 systems have panels support both AUX and PWM + * backlight control, and some only support AUX backlight control. All + * said panels start up in AUX mode by default, and we don't have any + * support for disabling HDR mode on these panels which would be + * required to switch to PWM backlight control mode (plus, I'm not + * even sure we want PWM backlight controls over DPCD backlight + * controls anyway...). Until we have a better way of detecting these, + * force DPCD backlight mode on all of them. + */ + { MFG(0x06, 0xaf), PROD_ID(0x9b, 0x32), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) }, + { MFG(0x06, 0xaf), PROD_ID(0xeb, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) }, + { MFG(0x4d, 0x10), PROD_ID(0xc7, 0x14), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) }, + { MFG(0x4d, 0x10), PROD_ID(0xe6, 0x14), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) }, +}; + +#undef MFG +#undef PROD_ID + +/** + * drm_dp_get_edid_quirks() - Check the EDID of a DP device to find additional + * DP-specific quirks + * @edid: The EDID to check + * + * While OUIDs are meant to be used to recognize a DisplayPort device, a lot + * of manufacturers don't seem to like following standards and neglect to fill + * the dev-ID in, making it impossible to only use OUIDs for determining + * quirks in some cases. This function can be used to check the EDID and look + * up any additional DP quirks. The bits returned by this function correspond + * to the quirk bits in &drm_dp_quirk. + * + * Returns: a bitmask of quirks, if any. The driver can check this using + * drm_dp_has_quirk(). + */ +u32 drm_dp_get_edid_quirks(const struct edid *edid) +{ + const struct edid_quirk *quirk; + u32 quirks = 0; + int i; + + if (!edid) + return 0; + + for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) { + quirk = &edid_quirk_list[i]; + if (memcmp(quirk->mfg_id, edid->mfg_id, + sizeof(edid->mfg_id)) == 0 && + memcmp(quirk->prod_id, edid->prod_code, + sizeof(edid->prod_code)) == 0) + quirks |= quirk->quirks; + } + + DRM_DEBUG_KMS("DP sink: EDID mfg %*phD prod-ID %*phD quirks: 0x%04x\n", + (int)sizeof(edid->mfg_id), edid->mfg_id, + (int)sizeof(edid->prod_code), edid->prod_code, quirks); + + return quirks; +} +EXPORT_SYMBOL(drm_dp_get_edid_quirks); + /** * drm_dp_read_desc - read sink/branch descriptor from DPCD * @aux: DisplayPort AUX channel diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 4fbec48075d398fae17a7d834ad8bcf245482db9..70c4b7afed12494951da8fdaa0132733e87ad8bf 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -1937,7 +1937,7 @@ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port, return parent_lct + 1; } -static bool drm_dp_mst_is_dp_mst_end_device(u8 pdt, bool mcs) +static bool drm_dp_mst_is_end_device(u8 pdt, bool mcs) { switch (pdt) { case DP_PEER_DEVICE_DP_LEGACY_CONV: @@ -1967,13 +1967,13 @@ drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt, /* Teardown the old pdt, if there is one */ if (port->pdt != DP_PEER_DEVICE_NONE) { - if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) { + if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) { /* * If the new PDT would also have an i2c bus, * don't bother with reregistering it */ if (new_pdt != DP_PEER_DEVICE_NONE && - drm_dp_mst_is_dp_mst_end_device(new_pdt, new_mcs)) { + drm_dp_mst_is_end_device(new_pdt, new_mcs)) { port->pdt = new_pdt; port->mcs = new_mcs; return 0; @@ -1993,7 +1993,7 @@ drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt, port->mcs = new_mcs; if (port->pdt != DP_PEER_DEVICE_NONE) { - if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) { + if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) { /* add i2c over sideband */ ret = drm_dp_mst_register_i2c_bus(&port->aux); } else { @@ -2172,7 +2172,7 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb, } if (port->pdt != DP_PEER_DEVICE_NONE && - drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) { + drm_dp_mst_is_end_device(port->pdt, port->mcs)) { port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); drm_connector_set_tile_property(port->connector); @@ -2302,14 +2302,18 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, mutex_unlock(&mgr->lock); } - if (old_ddps != port->ddps) { - if (port->ddps) { - if (!port->input) { - drm_dp_send_enum_path_resources(mgr, mstb, - port); - } + /* + * Reprobe PBN caps on both hotplug, and when re-probing the link + * for our parent mstb + */ + if (old_ddps != port->ddps || !created) { + if (port->ddps && !port->input) { + ret = drm_dp_send_enum_path_resources(mgr, mstb, + port); + if (ret == 1) + changed = true; } else { - port->available_pbn = 0; + port->full_pbn = 0; } } @@ -2401,11 +2405,10 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, port->ddps = conn_stat->displayport_device_plug_status; if (old_ddps != port->ddps) { - if (port->ddps) { - dowork = true; - } else { - port->available_pbn = 0; - } + if (port->ddps && !port->input) + drm_dp_send_enum_path_resources(mgr, mstb, port); + else + port->full_pbn = 0; } new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type; @@ -2556,13 +2559,6 @@ static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mg if (port->input || !port->ddps) continue; - if (!port->available_pbn) { - drm_modeset_lock(&mgr->base.lock, NULL); - drm_dp_send_enum_path_resources(mgr, mstb, port); - drm_modeset_unlock(&mgr->base.lock); - changed = true; - } - if (port->mstb) mstb_child = drm_dp_mst_topology_get_mstb_validated( mgr, port->mstb); @@ -2996,6 +2992,7 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); if (ret > 0) { + ret = 0; path_res = &txmsg->reply.u.path_resources; if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { @@ -3008,14 +3005,22 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, path_res->port_number, path_res->full_payload_bw_number, path_res->avail_payload_bw_number); - port->available_pbn = - path_res->avail_payload_bw_number; + + /* + * If something changed, make sure we send a + * hotplug + */ + if (port->full_pbn != path_res->full_payload_bw_number || + port->fec_capable != path_res->fec_capable) + ret = 1; + + port->full_pbn = path_res->full_payload_bw_number; port->fec_capable = path_res->fec_capable; } } kfree(txmsg); - return 0; + return ret; } static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb) @@ -3589,13 +3594,9 @@ drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb) /* The link address will need to be re-sent on resume */ mstb->link_address_sent = false; - list_for_each_entry(port, &mstb->ports, next) { - /* The PBN for each port will also need to be re-probed */ - port->available_pbn = 0; - + list_for_each_entry(port, &mstb->ports, next) if (port->mstb) drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb); - } } /** @@ -4858,41 +4859,102 @@ static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port, return false; } -static inline -int drm_dp_mst_atomic_check_bw_limit(struct drm_dp_mst_branch *branch, - struct drm_dp_mst_topology_state *mst_state) +static int +drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port, + struct drm_dp_mst_topology_state *state); + +static int +drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb, + struct drm_dp_mst_topology_state *state) { - struct drm_dp_mst_port *port; struct drm_dp_vcpi_allocation *vcpi; - int pbn_limit = 0, pbn_used = 0; + struct drm_dp_mst_port *port; + int pbn_used = 0, ret; + bool found = false; - list_for_each_entry(port, &branch->ports, next) { - if (port->mstb) - if (drm_dp_mst_atomic_check_bw_limit(port->mstb, mst_state)) - return -ENOSPC; + /* Check that we have at least one port in our state that's downstream + * of this branch, otherwise we can skip this branch + */ + list_for_each_entry(vcpi, &state->vcpis, next) { + if (!vcpi->pbn || + !drm_dp_mst_port_downstream_of_branch(vcpi->port, mstb)) + continue; - if (port->available_pbn > 0) - pbn_limit = port->available_pbn; + found = true; + break; } - DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch has %d PBN available\n", - branch, pbn_limit); + if (!found) + return 0; - list_for_each_entry(vcpi, &mst_state->vcpis, next) { - if (!vcpi->pbn) - continue; + if (mstb->port_parent) + DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]\n", + mstb->port_parent->parent, mstb->port_parent, + mstb); + else + DRM_DEBUG_ATOMIC("[MSTB:%p] Checking bandwidth limits\n", + mstb); + + list_for_each_entry(port, &mstb->ports, next) { + ret = drm_dp_mst_atomic_check_port_bw_limit(port, state); + if (ret < 0) + return ret; + + pbn_used += ret; + } + + return pbn_used; +} + +static int +drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port, + struct drm_dp_mst_topology_state *state) +{ + struct drm_dp_vcpi_allocation *vcpi; + int pbn_used = 0; + + if (port->pdt == DP_PEER_DEVICE_NONE) + return 0; + + if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) { + bool found = false; + + list_for_each_entry(vcpi, &state->vcpis, next) { + if (vcpi->port != port) + continue; + if (!vcpi->pbn) + return 0; + + found = true; + break; + } + if (!found) + return 0; - if (drm_dp_mst_port_downstream_of_branch(vcpi->port, branch)) - pbn_used += vcpi->pbn; + /* This should never happen, as it means we tried to + * set a mode before querying the full_pbn + */ + if (WARN_ON(!port->full_pbn)) + return -EINVAL; + + pbn_used = vcpi->pbn; + } else { + pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb, + state); + if (pbn_used <= 0) + return pbn_used; } - DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch used %d PBN\n", - branch, pbn_used); - if (pbn_used > pbn_limit) { - DRM_DEBUG_ATOMIC("[MST BRANCH:%p] No available bandwidth\n", - branch); + if (pbn_used > port->full_pbn) { + DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n", + port->parent, port, pbn_used, + port->full_pbn); return -ENOSPC; } - return 0; + + DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n", + port->parent, port, pbn_used, port->full_pbn); + + return pbn_used; } static inline int @@ -5090,9 +5152,15 @@ int drm_dp_mst_atomic_check(struct drm_atomic_state *state) ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state); if (ret) break; - ret = drm_dp_mst_atomic_check_bw_limit(mgr->mst_primary, mst_state); - if (ret) + + mutex_lock(&mgr->lock); + ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary, + mst_state); + mutex_unlock(&mgr->lock); + if (ret < 0) break; + else + ret = 0; } return ret; @@ -5490,7 +5558,8 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port) if (drm_dp_read_desc(port->mgr->aux, &desc, true)) return NULL; - if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) && + if (drm_dp_has_quirk(&desc, 0, + DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) && port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 && port->parent == port->mgr->mst_primary) { u8 downstreamport; diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index c4c704e019618adb81e1f7af185436c369721c21..eb009d3ab48fa318ddd51dae56784d288ccef17d 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -48,6 +48,11 @@ #include "drm_internal.h" #include "drm_legacy.h" +#if defined(CONFIG_MMU) && defined(CONFIG_TRANSPARENT_HUGEPAGE) +#include +#include +#endif + /* from BKL pushdown */ DEFINE_MUTEX(drm_global_mutex); @@ -872,3 +877,139 @@ struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags) return file; } EXPORT_SYMBOL_FOR_TESTS_ONLY(mock_drm_getfile); + +#ifdef CONFIG_MMU +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +/* + * drm_addr_inflate() attempts to construct an aligned area by inflating + * the area size and skipping the unaligned start of the area. + * adapted from shmem_get_unmapped_area() + */ +static unsigned long drm_addr_inflate(unsigned long addr, + unsigned long len, + unsigned long pgoff, + unsigned long flags, + unsigned long huge_size) +{ + unsigned long offset, inflated_len; + unsigned long inflated_addr; + unsigned long inflated_offset; + + offset = (pgoff << PAGE_SHIFT) & (huge_size - 1); + if (offset && offset + len < 2 * huge_size) + return addr; + if ((addr & (huge_size - 1)) == offset) + return addr; + + inflated_len = len + huge_size - PAGE_SIZE; + if (inflated_len > TASK_SIZE) + return addr; + if (inflated_len < len) + return addr; + + inflated_addr = current->mm->get_unmapped_area(NULL, 0, inflated_len, + 0, flags); + if (IS_ERR_VALUE(inflated_addr)) + return addr; + if (inflated_addr & ~PAGE_MASK) + return addr; + + inflated_offset = inflated_addr & (huge_size - 1); + inflated_addr += offset - inflated_offset; + if (inflated_offset > offset) + inflated_addr += huge_size; + + if (inflated_addr > TASK_SIZE - len) + return addr; + + return inflated_addr; +} + +/** + * drm_get_unmapped_area() - Get an unused user-space virtual memory area + * suitable for huge page table entries. + * @file: The struct file representing the address space being mmap()'d. + * @uaddr: Start address suggested by user-space. + * @len: Length of the area. + * @pgoff: The page offset into the address space. + * @flags: mmap flags + * @mgr: The address space manager used by the drm driver. This argument can + * probably be removed at some point when all drivers use the same + * address space manager. + * + * This function attempts to find an unused user-space virtual memory area + * that can accommodate the size we want to map, and that is properly + * aligned to facilitate huge page table entries matching actual + * huge pages or huge page aligned memory in buffer objects. Buffer objects + * are assumed to start at huge page boundary pfns (io memory) or be + * populated by huge pages aligned to the start of the buffer object + * (system- or coherent memory). Adapted from shmem_get_unmapped_area. + * + * Return: aligned user-space address. + */ +unsigned long drm_get_unmapped_area(struct file *file, + unsigned long uaddr, unsigned long len, + unsigned long pgoff, unsigned long flags, + struct drm_vma_offset_manager *mgr) +{ + unsigned long addr; + unsigned long inflated_addr; + struct drm_vma_offset_node *node; + + if (len > TASK_SIZE) + return -ENOMEM; + + /* + * @pgoff is the file page-offset the huge page boundaries of + * which typically aligns to physical address huge page boundaries. + * That's not true for DRM, however, where physical address huge + * page boundaries instead are aligned with the offset from + * buffer object start. So adjust @pgoff to be the offset from + * buffer object start. + */ + drm_vma_offset_lock_lookup(mgr); + node = drm_vma_offset_lookup_locked(mgr, pgoff, 1); + if (node) + pgoff -= node->vm_node.start; + drm_vma_offset_unlock_lookup(mgr); + + addr = current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags); + if (IS_ERR_VALUE(addr)) + return addr; + if (addr & ~PAGE_MASK) + return addr; + if (addr > TASK_SIZE - len) + return addr; + + if (len < HPAGE_PMD_SIZE) + return addr; + if (flags & MAP_FIXED) + return addr; + /* + * Our priority is to support MAP_SHARED mapped hugely; + * and support MAP_PRIVATE mapped hugely too, until it is COWed. + * But if caller specified an address hint, respect that as before. + */ + if (uaddr) + return addr; + + inflated_addr = drm_addr_inflate(addr, len, pgoff, flags, + HPAGE_PMD_SIZE); + + if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) && + len >= HPAGE_PUD_SIZE) + inflated_addr = drm_addr_inflate(inflated_addr, len, pgoff, + flags, HPAGE_PUD_SIZE); + return inflated_addr; +} +#else /* CONFIG_TRANSPARENT_HUGEPAGE */ +unsigned long drm_get_unmapped_area(struct file *file, + unsigned long uaddr, unsigned long len, + unsigned long pgoff, unsigned long flags, + struct drm_vma_offset_manager *mgr) +{ + return current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags); +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +EXPORT_SYMBOL_GPL(drm_get_unmapped_area); +#endif /* CONFIG_MMU */ diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index a421a2eed48ad1b3bcf3cc4523280c49c98a5791..df31e5782eed1b372cc6076da54250c9eace0159 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -254,11 +254,16 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem) if (ret) goto err_zero_use; - if (obj->import_attach) + if (obj->import_attach) { shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf); - else + } else { + pgprot_t prot = PAGE_KERNEL; + + if (!shmem->map_cached) + prot = pgprot_writecombine(prot); shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, - VM_MAP, pgprot_writecombine(PAGE_KERNEL)); + VM_MAP, prot); + } if (!shmem->vaddr) { DRM_DEBUG_KMS("Failed to vmap pages\n"); @@ -540,8 +545,9 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) } vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND; - vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); - vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + if (!shmem->map_cached) + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); vma->vm_ops = &drm_gem_shmem_vm_ops; return 0; diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c index b481cafdde280bbaddf0e9168c448fafba95d095..825abe38201acfe6aecd742f89b2b5b3e9165df0 100644 --- a/drivers/gpu/drm/drm_lease.c +++ b/drivers/gpu/drm/drm_lease.c @@ -542,10 +542,12 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev, } DRM_DEBUG_LEASE("Creating lease\n"); + /* lessee will take the ownership of leases */ lessee = drm_lease_create(lessor, &leases); if (IS_ERR(lessee)) { ret = PTR_ERR(lessee); + idr_destroy(&leases); goto out_leases; } @@ -580,7 +582,6 @@ out_lessee: out_leases: put_unused_fd(fd); - idr_destroy(&leases); DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl failed: %d\n", ret); return ret; diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 10336b144c722b5aa03bca91d3d28685c009bf72..d4d64518e11b8fc06f45eddf2673a8c6ba0bfe34 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -1698,6 +1698,13 @@ static int drm_mode_parse_cmdline_options(const char *str, if (rotation && freestanding) return -EINVAL; + if (!(rotation & DRM_MODE_ROTATE_MASK)) + rotation |= DRM_MODE_ROTATE_0; + + /* Make sure there is exactly one rotation defined */ + if (!is_power_of_2(rotation & DRM_MODE_ROTATE_MASK)) + return -EINVAL; + mode->rotation_reflection = rotation; return 0; diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 86d9b0e45c8c6325e391be65da146b63357c78fe..1de2cde2277ca62a17f67dfcf487dfb7c6133fb3 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -967,7 +967,7 @@ int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, index = 0; for_each_sg(sgt->sgl, sg, sgt->nents, count) { - len = sg->length; + len = sg_dma_len(sg); page = sg_page(sg); addr = sg_dma_address(sg); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c index 32d9fac587f9a2157d28ab3cfaee92c4505e4c68..76d38561c91031c06bd4a39fd13782695813b417 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c @@ -12,6 +12,7 @@ #include "common.xml.h" #include "state.xml.h" +#include "state_blt.xml.h" #include "state_hi.xml.h" #include "state_3d.xml.h" #include "cmdstream.xml.h" @@ -233,6 +234,8 @@ void etnaviv_buffer_end(struct etnaviv_gpu *gpu) struct etnaviv_cmdbuf *buffer = &gpu->buffer; unsigned int waitlink_offset = buffer->user_size - 16; u32 link_target, flush = 0; + bool has_blt = !!(gpu->identity.minor_features5 & + chipMinorFeatures5_BLT_ENGINE); lockdep_assert_held(&gpu->lock); @@ -248,16 +251,38 @@ void etnaviv_buffer_end(struct etnaviv_gpu *gpu) if (flush) { unsigned int dwords = 7; + if (has_blt) + dwords += 10; + link_target = etnaviv_buffer_reserve(gpu, buffer, dwords); CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); + if (has_blt) { + CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1); + CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT); + CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT); + CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0); + } CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush); - if (gpu->exec_state == ETNA_PIPE_3D) - CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE, - VIVS_TS_FLUSH_CACHE_FLUSH); + if (gpu->exec_state == ETNA_PIPE_3D) { + if (has_blt) { + CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1); + CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1); + CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0); + } else { + CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE, + VIVS_TS_FLUSH_CACHE_FLUSH); + } + } CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); + if (has_blt) { + CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1); + CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT); + CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT); + CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0); + } CMD_END(buffer); etnaviv_buffer_replace_wait(buffer, waitlink_offset, @@ -323,6 +348,8 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, bool switch_mmu_context = gpu->mmu_context != mmu_context; unsigned int new_flush_seq = READ_ONCE(gpu->mmu_context->flush_seq); bool need_flush = switch_mmu_context || gpu->flush_seq != new_flush_seq; + bool has_blt = !!(gpu->identity.minor_features5 & + chipMinorFeatures5_BLT_ENGINE); lockdep_assert_held(&gpu->lock); @@ -433,6 +460,15 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, * 2 semaphore stall + 1 event + 1 wait + 1 link. */ return_dwords = 7; + + /* + * When the BLT engine is present we need 6 more dwords in the return + * target: 3 enable/flush/disable + 4 enable/semaphore stall/disable, + * but we don't need the normal TS flush state. + */ + if (has_blt) + return_dwords += 6; + return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords); CMD_LINK(cmdbuf, return_dwords, return_target); @@ -447,11 +483,25 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR); - CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE, - VIVS_TS_FLUSH_CACHE_FLUSH); + if (has_blt) { + CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1); + CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1); + CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0); + } else { + CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE, + VIVS_TS_FLUSH_CACHE_FLUSH); + } } CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); + + if (has_blt) { + CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1); + CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT); + CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT); + CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0); + } + CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) | VIVS_GL_EVENT_FROM_PE); CMD_WAIT(buffer); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 6b43c1c94e8f4c318b153dab1231be1c8dcaf379..a8685b2e1803cf91318fd2b50960915710426353 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -551,6 +551,7 @@ static int etnaviv_bind(struct device *dev) mutex_init(&priv->gem_lock); INIT_LIST_HEAD(&priv->gem_list); priv->num_gpus = 0; + priv->shm_gfp_mask = GFP_HIGHUSER | __GFP_RETRY_MAYFAIL | __GFP_NOWARN; priv->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(drm->dev); if (IS_ERR(priv->cmdbuf_suballoc)) { diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h index efc656efeb0fd1a844796ace71d8a8ce100a3780..4d8dc9236e5fd6bdf9dd361343ce7a2b74ecbd6a 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h @@ -35,6 +35,7 @@ struct etnaviv_drm_private { int num_gpus; struct device_dma_parameters dma_parms; struct etnaviv_gpu *gpu[ETNA_MAX_PIPES]; + gfp_t shm_gfp_mask; struct etnaviv_cmdbuf_suballoc *cmdbuf_suballoc; struct etnaviv_iommu_global *mmu_global; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 6adea180d62995717406da88f6bd3694be53d74a..dc9ef302f517607f7d0b7ef98595c8bfa393dbfd 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -602,6 +602,7 @@ static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags, int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file, u32 size, u32 flags, u32 *handle) { + struct etnaviv_drm_private *priv = dev->dev_private; struct drm_gem_object *obj = NULL; int ret; @@ -624,8 +625,7 @@ int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file, * above new_inode() why this is required _and_ expected if you're * going to pin these pages. */ - mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER | - __GFP_RETRY_MAYFAIL | __GFP_NOWARN); + mapping_set_gfp_mask(obj->filp->f_mapping, priv->shm_gfp_mask); etnaviv_gem_obj_add(dev, obj); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h index 6b68fe16041b2307a284ec2f78d54049e4c898be..98e60df882b688bf16edfff1930f26553e353748 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h @@ -105,7 +105,7 @@ struct etnaviv_gem_submit { unsigned int nr_pmrs; struct etnaviv_perfmon_request *pmrs; unsigned int nr_bos; - struct etnaviv_gem_submit_bo bos[0]; + struct etnaviv_gem_submit_bo bos[]; /* No new members here, the previous one is variable-length! */ }; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index 799ec20b267dc991f71571596acab277d7307d0c..a31eeff2b297a5cb56bb4161a7c572c8948f7c2c 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -333,9 +333,13 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) gpu->identity.revision = etnaviv_field(chipIdentity, VIVS_HI_CHIP_IDENTITY_REVISION); } else { + u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE); gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL); gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV); + gpu->identity.product_id = gpu_read(gpu, VIVS_HI_CHIP_PRODUCT_ID); + gpu->identity.customer_id = gpu_read(gpu, VIVS_HI_CHIP_CUSTOMER_ID); + gpu->identity.eco_id = gpu_read(gpu, VIVS_HI_CHIP_ECO_ID); /* * !!!! HACK ALERT !!!! @@ -350,7 +354,6 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) /* Another special case */ if (etnaviv_is_model_rev(gpu, GC300, 0x2201)) { - u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE); u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME); if (chipDate == 0x20080814 && chipTime == 0x12051100) { @@ -373,6 +376,12 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) gpu->identity.model = chipModel_GC3000; gpu->identity.revision &= 0xffff; } + + if (etnaviv_is_model_rev(gpu, GC1000, 0x5037) && (chipDate == 0x20120617)) + gpu->identity.eco_id = 1; + + if (etnaviv_is_model_rev(gpu, GC320, 0x5303) && (chipDate == 0x20140511)) + gpu->identity.eco_id = 1; } dev_info(gpu->dev, "model: GC%x, revision: %x\n", @@ -506,7 +515,7 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu) /* read idle register. */ idle = gpu_read(gpu, VIVS_HI_IDLE_STATE); - /* try reseting again if FE it not idle */ + /* try resetting again if FE is not idle */ if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) { dev_dbg(gpu->dev, "FE is not idle\n"); continue; @@ -772,6 +781,14 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) gpu->identity.features &= ~chipFeatures_FAST_CLEAR; } + /* + * If the GPU is part of a system with DMA addressing limitations, + * request pages for our SHM backend buffers from the DMA32 zone to + * hopefully avoid performance killing SWIOTLB bounce buffering. + */ + if (dma_addressing_limited(gpu->dev)) + priv->shm_gfp_mask |= GFP_DMA32; + /* Create buffer: */ ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &gpu->buffer, PAGE_SIZE); @@ -851,6 +868,13 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m) verify_dma(gpu, &debug); + seq_puts(m, "\tidentity\n"); + seq_printf(m, "\t model: 0x%x\n", gpu->identity.model); + seq_printf(m, "\t revision: 0x%x\n", gpu->identity.revision); + seq_printf(m, "\t product_id: 0x%x\n", gpu->identity.product_id); + seq_printf(m, "\t customer_id: 0x%x\n", gpu->identity.customer_id); + seq_printf(m, "\t eco_id: 0x%x\n", gpu->identity.eco_id); + seq_puts(m, "\tfeatures\n"); seq_printf(m, "\t major_features: 0x%08x\n", gpu->identity.features); @@ -930,6 +954,20 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m) seq_puts(m, "\t FP is not idle\n"); if ((idle & VIVS_HI_IDLE_STATE_TS) == 0) seq_puts(m, "\t TS is not idle\n"); + if ((idle & VIVS_HI_IDLE_STATE_BL) == 0) + seq_puts(m, "\t BL is not idle\n"); + if ((idle & VIVS_HI_IDLE_STATE_ASYNCFE) == 0) + seq_puts(m, "\t ASYNCFE is not idle\n"); + if ((idle & VIVS_HI_IDLE_STATE_MC) == 0) + seq_puts(m, "\t MC is not idle\n"); + if ((idle & VIVS_HI_IDLE_STATE_PPA) == 0) + seq_puts(m, "\t PPA is not idle\n"); + if ((idle & VIVS_HI_IDLE_STATE_WD) == 0) + seq_puts(m, "\t WD is not idle\n"); + if ((idle & VIVS_HI_IDLE_STATE_NN) == 0) + seq_puts(m, "\t NN is not idle\n"); + if ((idle & VIVS_HI_IDLE_STATE_TP) == 0) + seq_puts(m, "\t TP is not idle\n"); if (idle & VIVS_HI_IDLE_STATE_AXI_LP) seq_puts(m, "\t AXI low power mode\n"); @@ -1805,11 +1843,15 @@ static int etnaviv_gpu_rpm_suspend(struct device *dev) if (atomic_read(&gpu->sched.hw_rq_count)) return -EBUSY; - /* Check whether the hardware (except FE) is idle */ - mask = gpu->idle_mask & ~VIVS_HI_IDLE_STATE_FE; + /* Check whether the hardware (except FE and MC) is idle */ + mask = gpu->idle_mask & ~(VIVS_HI_IDLE_STATE_FE | + VIVS_HI_IDLE_STATE_MC); idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask; - if (idle != mask) + if (idle != mask) { + dev_warn_ratelimited(dev, "GPU not yet idle, mask: 0x%08x\n", + idle); return -EBUSY; + } return etnaviv_gpu_hw_suspend(gpu); } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h index 97bb48042b4d64ed85287c7dee87af05aa574099..8ea48697d13218da8f2cfdc2e657932365f48391 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h @@ -15,11 +15,11 @@ struct etnaviv_gem_submit; struct etnaviv_vram_mapping; struct etnaviv_chip_identity { - /* Chip model. */ u32 model; - - /* Revision value.*/ u32 revision; + u32 product_id; + u32 customer_id; + u32 eco_id; /* Supported feature fields. */ u32 features; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c index 39b463db76c9de7c080be0cbada1b5c75838f950..167971a09be79ce0973fb8e50314c5a061a5d39c 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c @@ -6,9 +6,43 @@ #include "etnaviv_gpu.h" static const struct etnaviv_chip_identity etnaviv_chip_identities[] = { + { + .model = 0x400, + .revision = 0x4652, + .product_id = 0x70001, + .customer_id = 0x100, + .eco_id = 0, + .stream_count = 4, + .register_max = 64, + .thread_count = 128, + .shader_core_count = 1, + .vertex_cache_size = 8, + .vertex_output_buffer_size = 1024, + .pixel_pipes = 1, + .instruction_count = 256, + .num_constants = 320, + .buffer_size = 0, + .varyings_count = 8, + .features = 0xa0e9e004, + .minor_features0 = 0xe1299fff, + .minor_features1 = 0xbe13b219, + .minor_features2 = 0xce110010, + .minor_features3 = 0x8000001, + .minor_features4 = 0x20102, + .minor_features5 = 0x120000, + .minor_features6 = 0x0, + .minor_features7 = 0x0, + .minor_features8 = 0x0, + .minor_features9 = 0x0, + .minor_features10 = 0x0, + .minor_features11 = 0x0, + }, { .model = 0x7000, .revision = 0x6214, + .product_id = ~0U, + .customer_id = ~0U, + .eco_id = ~0U, .stream_count = 16, .register_max = 64, .thread_count = 1024, @@ -43,7 +77,13 @@ bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu) for (i = 0; i < ARRAY_SIZE(etnaviv_chip_identities); i++) { if (etnaviv_chip_identities[i].model == ident->model && - etnaviv_chip_identities[i].revision == ident->revision) { + etnaviv_chip_identities[i].revision == ident->revision && + (etnaviv_chip_identities[i].product_id == ident->product_id || + etnaviv_chip_identities[i].product_id == ~0U) && + (etnaviv_chip_identities[i].customer_id == ident->customer_id || + etnaviv_chip_identities[i].customer_id == ~0U) && + (etnaviv_chip_identities[i].eco_id == ident->eco_id || + etnaviv_chip_identities[i].eco_id == ~0U)) { memcpy(ident, &etnaviv_chip_identities[i], sizeof(*ident)); return true; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c index 8adbf2861bff4fe805d8bac3c9ef7108e54e7bac..e6795bafcbb9768e84035be679103f4a9709948f 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c @@ -32,6 +32,7 @@ struct etnaviv_pm_domain { }; struct etnaviv_pm_domain_meta { + unsigned int feature; const struct etnaviv_pm_domain *domains; u32 nr_domains; }; @@ -410,36 +411,78 @@ static const struct etnaviv_pm_domain doms_vg[] = { static const struct etnaviv_pm_domain_meta doms_meta[] = { { + .feature = chipFeatures_PIPE_3D, .nr_domains = ARRAY_SIZE(doms_3d), .domains = &doms_3d[0] }, { + .feature = chipFeatures_PIPE_2D, .nr_domains = ARRAY_SIZE(doms_2d), .domains = &doms_2d[0] }, { + .feature = chipFeatures_PIPE_VG, .nr_domains = ARRAY_SIZE(doms_vg), .domains = &doms_vg[0] } }; +static unsigned int num_pm_domains(const struct etnaviv_gpu *gpu) +{ + unsigned int num = 0, i; + + for (i = 0; i < ARRAY_SIZE(doms_meta); i++) { + const struct etnaviv_pm_domain_meta *meta = &doms_meta[i]; + + if (gpu->identity.features & meta->feature) + num += meta->nr_domains; + } + + return num; +} + +static const struct etnaviv_pm_domain *pm_domain(const struct etnaviv_gpu *gpu, + unsigned int index) +{ + const struct etnaviv_pm_domain *domain = NULL; + unsigned int offset = 0, i; + + for (i = 0; i < ARRAY_SIZE(doms_meta); i++) { + const struct etnaviv_pm_domain_meta *meta = &doms_meta[i]; + + if (!(gpu->identity.features & meta->feature)) + continue; + + if (meta->nr_domains < (index - offset)) { + offset += meta->nr_domains; + continue; + } + + domain = meta->domains + (index - offset); + } + + return domain; +} + int etnaviv_pm_query_dom(struct etnaviv_gpu *gpu, struct drm_etnaviv_pm_domain *domain) { - const struct etnaviv_pm_domain_meta *meta = &doms_meta[domain->pipe]; + const unsigned int nr_domains = num_pm_domains(gpu); const struct etnaviv_pm_domain *dom; - if (domain->iter >= meta->nr_domains) + if (domain->iter >= nr_domains) return -EINVAL; - dom = meta->domains + domain->iter; + dom = pm_domain(gpu, domain->iter); + if (!dom) + return -EINVAL; domain->id = domain->iter; domain->nr_signals = dom->nr_signals; strncpy(domain->name, dom->name, sizeof(domain->name)); domain->iter++; - if (domain->iter == meta->nr_domains) + if (domain->iter == nr_domains) domain->iter = 0xff; return 0; @@ -448,14 +491,16 @@ int etnaviv_pm_query_dom(struct etnaviv_gpu *gpu, int etnaviv_pm_query_sig(struct etnaviv_gpu *gpu, struct drm_etnaviv_pm_signal *signal) { - const struct etnaviv_pm_domain_meta *meta = &doms_meta[signal->pipe]; + const unsigned int nr_domains = num_pm_domains(gpu); const struct etnaviv_pm_domain *dom; const struct etnaviv_pm_signal *sig; - if (signal->domain >= meta->nr_domains) + if (signal->domain >= nr_domains) return -EINVAL; - dom = meta->domains + signal->domain; + dom = pm_domain(gpu, signal->domain); + if (!dom) + return -EINVAL; if (signal->iter >= dom->nr_signals) return -EINVAL; diff --git a/drivers/gpu/drm/etnaviv/state_blt.xml.h b/drivers/gpu/drm/etnaviv/state_blt.xml.h index daae55995def0114ecd419351e63abb49474ab96..0e8bcf9dcc93b8a94fdb6189b413a4638762abcc 100644 --- a/drivers/gpu/drm/etnaviv/state_blt.xml.h +++ b/drivers/gpu/drm/etnaviv/state_blt.xml.h @@ -46,6 +46,8 @@ DEALINGS IN THE SOFTWARE. /* This is a cut-down version of the state_blt.xml.h file */ +#define VIVS_BLT_SET_COMMAND 0x000140ac + #define VIVS_BLT_ENABLE 0x000140b8 #define VIVS_BLT_ENABLE_ENABLE 0x00000001 diff --git a/drivers/gpu/drm/etnaviv/state_hi.xml.h b/drivers/gpu/drm/etnaviv/state_hi.xml.h index 41d8da2b6f4fc048919515be517076adcbb8f7e0..deaaa99fa65486d1405b12158945a56651997e90 100644 --- a/drivers/gpu/drm/etnaviv/state_hi.xml.h +++ b/drivers/gpu/drm/etnaviv/state_hi.xml.h @@ -8,17 +8,17 @@ http://0x04.net/cgit/index.cgi/rules-ng-ng git clone git://0x04.net/rules-ng-ng The rules-ng-ng source files this header was generated from are: -- state.xml ( 26087 bytes, from 2017-12-18 16:51:59) -- common.xml ( 35468 bytes, from 2018-01-22 13:48:54) -- common_3d.xml ( 14615 bytes, from 2017-12-18 16:51:59) -- state_hi.xml ( 30232 bytes, from 2018-02-15 15:48:01) -- copyright.xml ( 1597 bytes, from 2016-12-08 16:37:56) -- state_2d.xml ( 51552 bytes, from 2016-12-08 16:37:56) -- state_3d.xml ( 79992 bytes, from 2017-12-18 16:51:59) -- state_blt.xml ( 13405 bytes, from 2017-12-18 16:51:59) -- state_vg.xml ( 5975 bytes, from 2016-12-08 16:37:56) - -Copyright (C) 2012-2018 by the following authors: +- state.xml ( 26666 bytes, from 2019-12-20 21:20:35) +- common.xml ( 35468 bytes, from 2018-02-10 13:09:26) +- common_3d.xml ( 15058 bytes, from 2019-12-28 20:02:03) +- state_hi.xml ( 30552 bytes, from 2019-12-28 20:02:48) +- copyright.xml ( 1597 bytes, from 2018-02-10 13:09:26) +- state_2d.xml ( 51552 bytes, from 2018-02-10 13:09:26) +- state_3d.xml ( 83098 bytes, from 2019-12-28 20:02:03) +- state_blt.xml ( 14252 bytes, from 2019-10-20 19:59:15) +- state_vg.xml ( 5975 bytes, from 2018-02-10 13:09:26) + +Copyright (C) 2012-2019 by the following authors: - Wladimir J. van der Laan - Christian Gmeiner - Lucas Stach @@ -48,6 +48,9 @@ DEALINGS IN THE SOFTWARE. #define MMU_EXCEPTION_SLAVE_NOT_PRESENT 0x00000001 #define MMU_EXCEPTION_PAGE_NOT_PRESENT 0x00000002 #define MMU_EXCEPTION_WRITE_VIOLATION 0x00000003 +#define MMU_EXCEPTION_OUT_OF_BOUND 0x00000004 +#define MMU_EXCEPTION_READ_SECURITY_VIOLATION 0x00000005 +#define MMU_EXCEPTION_WRITE_SECURITY_VIOLATION 0x00000006 #define VIVS_HI 0x00000000 #define VIVS_HI_CLOCK_CONTROL 0x00000000 @@ -81,6 +84,13 @@ DEALINGS IN THE SOFTWARE. #define VIVS_HI_IDLE_STATE_IM 0x00000200 #define VIVS_HI_IDLE_STATE_FP 0x00000400 #define VIVS_HI_IDLE_STATE_TS 0x00000800 +#define VIVS_HI_IDLE_STATE_BL 0x00001000 +#define VIVS_HI_IDLE_STATE_ASYNCFE 0x00002000 +#define VIVS_HI_IDLE_STATE_MC 0x00004000 +#define VIVS_HI_IDLE_STATE_PPA 0x00008000 +#define VIVS_HI_IDLE_STATE_WD 0x00010000 +#define VIVS_HI_IDLE_STATE_NN 0x00020000 +#define VIVS_HI_IDLE_STATE_TP 0x00040000 #define VIVS_HI_IDLE_STATE_AXI_LP 0x80000000 #define VIVS_HI_AXI_CONFIG 0x00000008 @@ -140,6 +150,8 @@ DEALINGS IN THE SOFTWARE. #define VIVS_HI_CHIP_TIME 0x0000002c +#define VIVS_HI_CHIP_CUSTOMER_ID 0x00000030 + #define VIVS_HI_CHIP_MINOR_FEATURE_0 0x00000034 #define VIVS_HI_CACHE_CONTROL 0x00000038 @@ -237,6 +249,8 @@ DEALINGS IN THE SOFTWARE. #define VIVS_HI_BLT_INTR 0x000000d4 +#define VIVS_HI_CHIP_ECO_ID 0x000000e8 + #define VIVS_HI_AUXBIT 0x000000ec #define VIVS_PM 0x00000000 diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index 8428ae12dfa5e8c4de938ac46c0689053d3609a5..1f79bc2a881e1f3889e5161ef14fbba33d1764be 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -55,6 +55,7 @@ static const char * const decon_clks_name[] = { struct decon_context { struct device *dev; struct drm_device *drm_dev; + void *dma_priv; struct exynos_drm_crtc *crtc; struct exynos_drm_plane planes[WINDOWS_NR]; struct exynos_drm_plane_config configs[WINDOWS_NR]; @@ -644,7 +645,7 @@ static int decon_bind(struct device *dev, struct device *master, void *data) decon_clear_channels(ctx->crtc); - return exynos_drm_register_dma(drm_dev, dev); + return exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv); } static void decon_unbind(struct device *dev, struct device *master, void *data) @@ -654,7 +655,7 @@ static void decon_unbind(struct device *dev, struct device *master, void *data) decon_atomic_disable(ctx->crtc); /* detach this sub driver from iommu mapping if supported. */ - exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev); + exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv); } static const struct component_ops decon_component_ops = { diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index ff59c641fa809224b96972928c3b6ae6a5fb3c21..f2d87a7445c7334d2b8eed104096daa276e399aa 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c @@ -40,6 +40,7 @@ struct decon_context { struct device *dev; struct drm_device *drm_dev; + void *dma_priv; struct exynos_drm_crtc *crtc; struct exynos_drm_plane planes[WINDOWS_NR]; struct exynos_drm_plane_config configs[WINDOWS_NR]; @@ -127,19 +128,19 @@ static int decon_ctx_initialize(struct decon_context *ctx, decon_clear_channels(ctx->crtc); - return exynos_drm_register_dma(drm_dev, ctx->dev); + return exynos_drm_register_dma(drm_dev, ctx->dev, &ctx->dma_priv); } static void decon_ctx_remove(struct decon_context *ctx) { /* detach this sub driver from iommu mapping if supported. */ - exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev); + exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv); } static u32 decon_calc_clkdiv(struct decon_context *ctx, const struct drm_display_mode *mode) { - unsigned long ideal_clk = mode->htotal * mode->vtotal * mode->vrefresh; + unsigned long ideal_clk = mode->clock; u32 clkdiv; /* Find the clock divider value that gets us closest to ideal_clk */ diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c index 9ebc02768847eafaf73e58e1668f032bb693db45..619f81435c1b2273abe2d61e7ee6d58dc0ce73a5 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dma.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c @@ -58,7 +58,7 @@ static inline void clear_dma_max_seg_size(struct device *dev) * mapping. */ static int drm_iommu_attach_device(struct drm_device *drm_dev, - struct device *subdrv_dev) + struct device *subdrv_dev, void **dma_priv) { struct exynos_drm_private *priv = drm_dev->dev_private; int ret; @@ -74,7 +74,14 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev, return ret; if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) { - if (to_dma_iommu_mapping(subdrv_dev)) + /* + * Keep the original DMA mapping of the sub-device and + * restore it on Exynos DRM detach, otherwise the DMA + * framework considers it as IOMMU-less during the next + * probe (in case of deferred probe or modular build) + */ + *dma_priv = to_dma_iommu_mapping(subdrv_dev); + if (*dma_priv) arm_iommu_detach_device(subdrv_dev); ret = arm_iommu_attach_device(subdrv_dev, priv->mapping); @@ -98,19 +105,21 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev, * mapping */ static void drm_iommu_detach_device(struct drm_device *drm_dev, - struct device *subdrv_dev) + struct device *subdrv_dev, void **dma_priv) { struct exynos_drm_private *priv = drm_dev->dev_private; - if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) + if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) { arm_iommu_detach_device(subdrv_dev); - else if (IS_ENABLED(CONFIG_IOMMU_DMA)) + arm_iommu_attach_device(subdrv_dev, *dma_priv); + } else if (IS_ENABLED(CONFIG_IOMMU_DMA)) iommu_detach_device(priv->mapping, subdrv_dev); clear_dma_max_seg_size(subdrv_dev); } -int exynos_drm_register_dma(struct drm_device *drm, struct device *dev) +int exynos_drm_register_dma(struct drm_device *drm, struct device *dev, + void **dma_priv) { struct exynos_drm_private *priv = drm->dev_private; @@ -137,13 +146,14 @@ int exynos_drm_register_dma(struct drm_device *drm, struct device *dev) priv->mapping = mapping; } - return drm_iommu_attach_device(drm, dev); + return drm_iommu_attach_device(drm, dev, dma_priv); } -void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev) +void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev, + void **dma_priv) { if (IS_ENABLED(CONFIG_EXYNOS_IOMMU)) - drm_iommu_detach_device(drm, dev); + drm_iommu_detach_device(drm, dev, dma_priv); } void exynos_drm_cleanup_dma(struct drm_device *drm) diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index ba0f868b2477f667168bc5ff0406d8f84ca64a44..57defeb445223b533df7e38b144182b733a78c11 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -270,7 +270,7 @@ static int exynos_drm_bind(struct device *dev) struct drm_encoder *encoder; struct drm_device *drm; unsigned int clone_mask; - int cnt, ret; + int ret; drm = drm_dev_alloc(&exynos_drm_driver, dev); if (IS_ERR(drm)) @@ -293,10 +293,9 @@ static int exynos_drm_bind(struct device *dev) exynos_drm_mode_config_init(drm); /* setup possible_clones. */ - cnt = 0; clone_mask = 0; list_for_each_entry(encoder, &drm->mode_config.encoder_list, head) - clone_mask |= (1 << (cnt++)); + clone_mask |= drm_encoder_mask(encoder); list_for_each_entry(encoder, &drm->mode_config.encoder_list, head) encoder->possible_clones = clone_mask; diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index d4d21d8cfb9060169ab86bda8ef49b879eaa5b4a..6ae9056e7a18f7a73a9d7096c98e6b61061534b6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -223,8 +223,10 @@ static inline bool is_drm_iommu_supported(struct drm_device *drm_dev) return priv->mapping ? true : false; } -int exynos_drm_register_dma(struct drm_device *drm, struct device *dev); -void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev); +int exynos_drm_register_dma(struct drm_device *drm, struct device *dev, + void **dma_priv); +void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev, + void **dma_priv); void exynos_drm_cleanup_dma(struct drm_device *drm); #ifdef CONFIG_DRM_EXYNOS_DPI diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 38e43d957ae010f1b722d93936f48c6fea164f43..e080aa92338c060252b985484067405e23e2c7d6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -1772,8 +1772,9 @@ static int exynos_dsi_probe(struct platform_device *pdev) ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies), dsi->supplies); if (ret) { - dev_info(dev, "failed to get regulators: %d\n", ret); - return -EPROBE_DEFER; + if (ret != -EPROBE_DEFER) + dev_info(dev, "failed to get regulators: %d\n", ret); + return ret; } dsi->clks = devm_kcalloc(dev, @@ -1786,9 +1787,10 @@ static int exynos_dsi_probe(struct platform_device *pdev) dsi->clks[i] = devm_clk_get(dev, clk_names[i]); if (IS_ERR(dsi->clks[i])) { if (strcmp(clk_names[i], "sclk_mipi") == 0) { - strcpy(clk_names[i], OLD_SCLK_MIPI_CLK_NAME); - i--; - continue; + dsi->clks[i] = devm_clk_get(dev, + OLD_SCLK_MIPI_CLK_NAME); + if (!IS_ERR(dsi->clks[i])) + continue; } dev_info(dev, "failed to get the clock: %s\n", diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 8ea2e1d77802a40670b23a9c5d300e6e75cac036..29ab8be8604c9ba0881d818eab5c055be699ecb2 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -97,6 +97,7 @@ struct fimc_scaler { struct fimc_context { struct exynos_drm_ipp ipp; struct drm_device *drm_dev; + void *dma_priv; struct device *dev; struct exynos_drm_ipp_task *task; struct exynos_drm_ipp_formats *formats; @@ -1133,7 +1134,7 @@ static int fimc_bind(struct device *dev, struct device *master, void *data) ctx->drm_dev = drm_dev; ipp->drm_dev = drm_dev; - exynos_drm_register_dma(drm_dev, dev); + exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv); exynos_drm_ipp_register(dev, ipp, &ipp_funcs, DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE | @@ -1153,7 +1154,7 @@ static void fimc_unbind(struct device *dev, struct device *master, struct exynos_drm_ipp *ipp = &ctx->ipp; exynos_drm_ipp_unregister(dev, ipp); - exynos_drm_unregister_dma(drm_dev, dev); + exynos_drm_unregister_dma(drm_dev, dev, &ctx->dma_priv); } static const struct component_ops fimc_component_ops = { diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 21aec38702fc2b731a1ae4a982ed15fced156175..bb67cad8371f03da95d878e334e32b1cff2b10e9 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -167,6 +167,7 @@ static struct fimd_driver_data exynos5420_fimd_driver_data = { struct fimd_context { struct device *dev; struct drm_device *drm_dev; + void *dma_priv; struct exynos_drm_crtc *crtc; struct exynos_drm_plane planes[WINDOWS_NR]; struct exynos_drm_plane_config configs[WINDOWS_NR]; @@ -1090,7 +1091,7 @@ static int fimd_bind(struct device *dev, struct device *master, void *data) if (is_drm_iommu_supported(drm_dev)) fimd_clear_channels(ctx->crtc); - return exynos_drm_register_dma(drm_dev, dev); + return exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv); } static void fimd_unbind(struct device *dev, struct device *master, @@ -1100,7 +1101,7 @@ static void fimd_unbind(struct device *dev, struct device *master, fimd_atomic_disable(ctx->crtc); - exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev); + exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv); if (ctx->encoder) exynos_dpi_remove(ctx->encoder); diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 2a3382d43bc9020722617f421f9d3e7c6ae6bed1..fcee33a43aca3ee45a2c9d2584b551dd507c1cd1 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -232,6 +232,7 @@ struct g2d_runqueue_node { struct g2d_data { struct device *dev; + void *dma_priv; struct clk *gate_clk; void __iomem *regs; int irq; @@ -1409,7 +1410,7 @@ static int g2d_bind(struct device *dev, struct device *master, void *data) return ret; } - ret = exynos_drm_register_dma(drm_dev, dev); + ret = exynos_drm_register_dma(drm_dev, dev, &g2d->dma_priv); if (ret < 0) { dev_err(dev, "failed to enable iommu.\n"); g2d_fini_cmdlist(g2d); @@ -1434,7 +1435,7 @@ static void g2d_unbind(struct device *dev, struct device *master, void *data) priv->g2d_dev = NULL; cancel_work_sync(&g2d->runqueue_work); - exynos_drm_unregister_dma(g2d->drm_dev, dev); + exynos_drm_unregister_dma(g2d->drm_dev, dev, &g2d->dma_priv); } static const struct component_ops g2d_component_ops = { diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 88b6fcaa20be0976d4ff3f3009a75d548a6398f7..45e9aee8366a8f5bfc14fd60c6301424a5603f17 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -97,6 +97,7 @@ struct gsc_scaler { struct gsc_context { struct exynos_drm_ipp ipp; struct drm_device *drm_dev; + void *dma_priv; struct device *dev; struct exynos_drm_ipp_task *task; struct exynos_drm_ipp_formats *formats; @@ -1169,7 +1170,7 @@ static int gsc_bind(struct device *dev, struct device *master, void *data) ctx->drm_dev = drm_dev; ctx->drm_dev = drm_dev; - exynos_drm_register_dma(drm_dev, dev); + exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv); exynos_drm_ipp_register(dev, ipp, &ipp_funcs, DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE | @@ -1189,7 +1190,7 @@ static void gsc_unbind(struct device *dev, struct device *master, struct exynos_drm_ipp *ipp = &ctx->ipp; exynos_drm_ipp_unregister(dev, ipp); - exynos_drm_unregister_dma(drm_dev, dev); + exynos_drm_unregister_dma(drm_dev, dev, &ctx->dma_priv); } static const struct component_ops gsc_component_ops = { diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index b98482990d1ade89faab2ec41a187ecc269264f8..dafa87b82052967ca27292c261008125a8e4d58d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c @@ -56,6 +56,7 @@ struct rot_variant { struct rot_context { struct exynos_drm_ipp ipp; struct drm_device *drm_dev; + void *dma_priv; struct device *dev; void __iomem *regs; struct clk *clock; @@ -243,7 +244,7 @@ static int rotator_bind(struct device *dev, struct device *master, void *data) rot->drm_dev = drm_dev; ipp->drm_dev = drm_dev; - exynos_drm_register_dma(drm_dev, dev); + exynos_drm_register_dma(drm_dev, dev, &rot->dma_priv); exynos_drm_ipp_register(dev, ipp, &ipp_funcs, DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE, @@ -261,7 +262,7 @@ static void rotator_unbind(struct device *dev, struct device *master, struct exynos_drm_ipp *ipp = &rot->ipp; exynos_drm_ipp_unregister(dev, ipp); - exynos_drm_unregister_dma(rot->drm_dev, rot->dev); + exynos_drm_unregister_dma(rot->drm_dev, rot->dev, &rot->dma_priv); } static const struct component_ops rotator_component_ops = { diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c index 497973e9b2c55a7c827254ffe96f45041ba59e94..93c43c8d914ee78a1b275b700c6486026958ce84 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c +++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c @@ -39,6 +39,7 @@ struct scaler_data { struct scaler_context { struct exynos_drm_ipp ipp; struct drm_device *drm_dev; + void *dma_priv; struct device *dev; void __iomem *regs; struct clk *clock[SCALER_MAX_CLK]; @@ -450,7 +451,7 @@ static int scaler_bind(struct device *dev, struct device *master, void *data) scaler->drm_dev = drm_dev; ipp->drm_dev = drm_dev; - exynos_drm_register_dma(drm_dev, dev); + exynos_drm_register_dma(drm_dev, dev, &scaler->dma_priv); exynos_drm_ipp_register(dev, ipp, &ipp_funcs, DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE | @@ -470,7 +471,8 @@ static void scaler_unbind(struct device *dev, struct device *master, struct exynos_drm_ipp *ipp = &scaler->ipp; exynos_drm_ipp_unregister(dev, ipp); - exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev); + exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev, + &scaler->dma_priv); } static const struct component_ops scaler_component_ops = { diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 3e5f1a77286d9e783a81fc3b71f0bdd385392e6f..1a7c828fc41d8d723fe444a5b9bd35de3361b513 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -1805,18 +1805,10 @@ static int hdmi_resources_init(struct hdmi_context *hdata) hdata->reg_hdmi_en = devm_regulator_get_optional(dev, "hdmi-en"); - if (PTR_ERR(hdata->reg_hdmi_en) != -ENODEV) { + if (PTR_ERR(hdata->reg_hdmi_en) != -ENODEV) if (IS_ERR(hdata->reg_hdmi_en)) return PTR_ERR(hdata->reg_hdmi_en); - ret = regulator_enable(hdata->reg_hdmi_en); - if (ret) { - DRM_DEV_ERROR(dev, - "failed to enable hdmi-en regulator\n"); - return ret; - } - } - return hdmi_bridge_init(hdata); } @@ -2023,6 +2015,15 @@ static int hdmi_probe(struct platform_device *pdev) } } + if (!IS_ERR(hdata->reg_hdmi_en)) { + ret = regulator_enable(hdata->reg_hdmi_en); + if (ret) { + DRM_DEV_ERROR(dev, + "failed to enable hdmi-en regulator\n"); + goto err_hdmiphy; + } + } + pm_runtime_enable(dev); audio_infoframe = &hdata->audio.infoframe; @@ -2047,7 +2048,8 @@ err_unregister_audio: err_rpm_disable: pm_runtime_disable(dev); - + if (!IS_ERR(hdata->reg_hdmi_en)) + regulator_disable(hdata->reg_hdmi_en); err_hdmiphy: if (hdata->hdmiphy_port) put_device(&hdata->hdmiphy_port->dev); diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 38ae9c32feef594f398ab246582067604b9c29f7..21b726baedeaa625a975d45a37fe3ef834baa154 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -94,6 +94,7 @@ struct mixer_context { struct platform_device *pdev; struct device *dev; struct drm_device *drm_dev; + void *dma_priv; struct exynos_drm_crtc *crtc; struct exynos_drm_plane planes[MIXER_WIN_NR]; unsigned long flags; @@ -894,12 +895,14 @@ static int mixer_initialize(struct mixer_context *mixer_ctx, } } - return exynos_drm_register_dma(drm_dev, mixer_ctx->dev); + return exynos_drm_register_dma(drm_dev, mixer_ctx->dev, + &mixer_ctx->dma_priv); } static void mixer_ctx_remove(struct mixer_context *mixer_ctx) { - exynos_drm_unregister_dma(mixer_ctx->drm_dev, mixer_ctx->dev); + exynos_drm_unregister_dma(mixer_ctx->drm_dev, mixer_ctx->dev, + &mixer_ctx->dma_priv); } static int mixer_enable_vblank(struct exynos_drm_crtc *crtc) diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h b/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h index 0da86020041094acd261e7949f68a7c9707909d0..e2ac09894a6d7c15be9730aa1ec481f895a07bbc 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h @@ -83,7 +83,6 @@ #define VSIZE_OFST 20 #define LDI_INT_EN 0x741C #define FRAME_END_INT_EN_OFST 1 -#define UNDERFLOW_INT_EN_OFST 2 #define LDI_CTRL 0x7420 #define BPP_OFST 3 #define DATA_GATE_EN BIT(2) diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c index 73cd28a6ea078f95608bfb623bc5cd81a191946a..86000127d4eec54465ee2e4db8403c6f52cac117 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c @@ -46,7 +46,6 @@ struct ade_hw_ctx { struct clk *media_noc_clk; struct clk *ade_pix_clk; struct reset_control *reset; - struct work_struct display_reset_wq; bool power_on; int irq; @@ -136,7 +135,6 @@ static void ade_init(struct ade_hw_ctx *ctx) */ ade_update_bits(base + ADE_CTRL, FRM_END_START_OFST, FRM_END_START_MASK, REG_EFFECTIVE_IN_ADEEN_FRMEND); - ade_update_bits(base + LDI_INT_EN, UNDERFLOW_INT_EN_OFST, MASK(1), 1); } static bool ade_crtc_mode_fixup(struct drm_crtc *crtc, @@ -304,17 +302,6 @@ static void ade_crtc_disable_vblank(struct drm_crtc *crtc) MASK(1), 0); } -static void drm_underflow_wq(struct work_struct *work) -{ - struct ade_hw_ctx *ctx = container_of(work, struct ade_hw_ctx, - display_reset_wq); - struct drm_device *drm_dev = ctx->crtc->dev; - struct drm_atomic_state *state; - - state = drm_atomic_helper_suspend(drm_dev); - drm_atomic_helper_resume(drm_dev, state); -} - static irqreturn_t ade_irq_handler(int irq, void *data) { struct ade_hw_ctx *ctx = data; @@ -331,12 +318,6 @@ static irqreturn_t ade_irq_handler(int irq, void *data) MASK(1), 1); drm_crtc_handle_vblank(crtc); } - if (status & BIT(UNDERFLOW_INT_EN_OFST)) { - ade_update_bits(base + LDI_INT_CLR, UNDERFLOW_INT_EN_OFST, - MASK(1), 1); - DRM_ERROR("LDI underflow!"); - schedule_work(&ctx->display_reset_wq); - } return IRQ_HANDLED; } @@ -919,7 +900,6 @@ static void *ade_hw_ctx_alloc(struct platform_device *pdev, if (ret) return ERR_PTR(-EIO); - INIT_WORK(&ctx->display_reset_wq, drm_underflow_wq); ctx->crtc = crtc; return ctx; diff --git a/drivers/gpu/drm/i915/Kconfig.profile b/drivers/gpu/drm/i915/Kconfig.profile index c280b6ae38ebe9dc6659c257ced8c2cef9bab644..0bfd276c19fe60be6f0b75c533656ad7fa3dcbe2 100644 --- a/drivers/gpu/drm/i915/Kconfig.profile +++ b/drivers/gpu/drm/i915/Kconfig.profile @@ -20,6 +20,9 @@ config DRM_I915_HEARTBEAT_INTERVAL check the health of the GPU and undertake regular house-keeping of internal driver state. + This is adjustable via + /sys/class/drm/card?/engine/*/heartbeat_interval_ms + May be 0 to disable heartbeats and therefore disable automatic GPU hang detection. @@ -33,11 +36,18 @@ config DRM_I915_PREEMPT_TIMEOUT expires, the HW will be reset to allow the more important context to execute. + This is adjustable via + /sys/class/drm/card?/engine/*/preempt_timeout_ms + May be 0 to disable the timeout. -config DRM_I915_SPIN_REQUEST - int "Busywait for request completion (us)" - default 5 # microseconds + The compiled in default may get overridden at driver probe time on + certain platforms and certain engines which will be reflected in the + sysfs control. + +config DRM_I915_MAX_REQUEST_BUSYWAIT + int "Busywait for request completion limit (ns)" + default 8000 # nanoseconds help Before sleeping waiting for a request (GPU operation) to complete, we may spend some time polling for its completion. As the IRQ may @@ -45,6 +55,9 @@ config DRM_I915_SPIN_REQUEST check if the request will complete in the time it would have taken us to enable the interrupt. + This is adjustable via + /sys/class/drm/card?/engine/*/max_busywait_duration_ns + May be 0 to disable the initial spin. In practice, we estimate the cost of enabling the interrupt (if currently disabled) to be a few microseconds. @@ -60,6 +73,9 @@ config DRM_I915_STOP_TIMEOUT that the reset itself may take longer and so be more disruptive to interactive or low latency workloads. + This is adjustable via + /sys/class/drm/card?/engine/*/stop_timeout_ms + config DRM_I915_TIMESLICE_DURATION int "Scheduling quantum for userspace batches (ms, jiffy granularity)" default 1 # milliseconds @@ -73,4 +89,7 @@ config DRM_I915_TIMESLICE_DURATION is scheduled for execution for the timeslice duration, before switching to the next context. + This is adjustable via + /sys/class/drm/card?/engine/*/timeslice_duration_ms + May be 0 to disable timeslicing. diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index bc28c31c4f78faa4cfdf68bee6fc06288aaf0149..9f887a86e555d26954e2d663437be6c01a88f755 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -47,6 +47,7 @@ i915-y += i915_drv.o \ i915_sysfs.o \ i915_utils.o \ intel_device_info.o \ + intel_dram.o \ intel_memory_region.o \ intel_pch.o \ intel_pm.o \ @@ -79,9 +80,11 @@ gt-y += \ gt/debugfs_gt.o \ gt/debugfs_gt_pm.o \ gt/gen6_ppgtt.o \ + gt/gen7_renderclear.o \ gt/gen8_ppgtt.o \ gt/intel_breadcrumbs.o \ gt/intel_context.o \ + gt/intel_context_param.o \ gt/intel_context_sseu.o \ gt/intel_engine_cs.o \ gt/intel_engine_heartbeat.o \ @@ -107,7 +110,8 @@ gt-y += \ gt/intel_rps.o \ gt/intel_sseu.o \ gt/intel_timeline.o \ - gt/intel_workarounds.o + gt/intel_workarounds.o \ + gt/sysfs_engines.o # autogenerated null render state gt-y += \ gt/gen6_renderstate.o \ diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index d842e280699d7e5a8742d3a8dccd76449ee14234..17cee6f80d8be37d339b7f3555616365fa481ac5 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -599,13 +599,13 @@ static void gen11_dsi_gate_clocks(struct intel_encoder *encoder) u32 tmp; enum phy phy; - mutex_lock(&dev_priv->dpll_lock); + mutex_lock(&dev_priv->dpll.lock); tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0); for_each_dsi_phy(phy, intel_dsi->phys) tmp |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp); - mutex_unlock(&dev_priv->dpll_lock); + mutex_unlock(&dev_priv->dpll.lock); } static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder) @@ -615,13 +615,13 @@ static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder) u32 tmp; enum phy phy; - mutex_lock(&dev_priv->dpll_lock); + mutex_lock(&dev_priv->dpll.lock); tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0); for_each_dsi_phy(phy, intel_dsi->phys) tmp &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp); - mutex_unlock(&dev_priv->dpll_lock); + mutex_unlock(&dev_priv->dpll.lock); } static void gen11_dsi_map_pll(struct intel_encoder *encoder, @@ -633,7 +633,7 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder, enum phy phy; u32 val; - mutex_lock(&dev_priv->dpll_lock); + mutex_lock(&dev_priv->dpll.lock); val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0); for_each_dsi_phy(phy, intel_dsi->phys) { @@ -652,7 +652,7 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder, intel_de_posting_read(dev_priv, ICL_DPCLKA_CFGCR0); - mutex_unlock(&dev_priv->dpll_lock); + mutex_unlock(&dev_priv->dpll.lock); } static void @@ -1350,15 +1350,15 @@ static void gen11_dsi_get_timings(struct intel_encoder *encoder, static void gen11_dsi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); intel_dsc_get_config(encoder, pipe_config); /* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */ - pipe_config->port_clock = - cnl_calc_wrpll_link(dev_priv, &pipe_config->dpll_hw_state); + pipe_config->port_clock = intel_dpll_get_freq(i915, + pipe_config->shared_dpll); pipe_config->hw.adjusted_mode.crtc_clock = intel_dsi->pclk; if (intel_dsi->dual_link) diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index c86d7a35c8169feec4bd0fd82f6f7f113b1436ae..457b258683d300f5e4a8060c33837cc52f7b1d09 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -133,15 +133,37 @@ intel_plane_destroy_state(struct drm_plane *plane, kfree(plane_state); } +unsigned int intel_plane_pixel_rate(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + unsigned int src_w, src_h, dst_w, dst_h; + unsigned int pixel_rate = crtc_state->pixel_rate; + + src_w = drm_rect_width(&plane_state->uapi.src) >> 16; + src_h = drm_rect_height(&plane_state->uapi.src) >> 16; + dst_w = drm_rect_width(&plane_state->uapi.dst); + dst_h = drm_rect_height(&plane_state->uapi.dst); + + /* Downscaling limits the maximum pixel rate */ + dst_w = min(src_w, dst_w); + dst_h = min(src_h, dst_h); + + return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_rate, src_w * src_h), + dst_w * dst_h); +} + unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { const struct drm_framebuffer *fb = plane_state->hw.fb; unsigned int cpp; + unsigned int pixel_rate; if (!plane_state->uapi.visible) return 0; + pixel_rate = intel_plane_pixel_rate(crtc_state, plane_state); + cpp = fb->format->cpp[0]; /* @@ -153,7 +175,7 @@ unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state, if (fb->format->is_yuv && fb->format->num_planes > 1) cpp *= 4; - return cpp * crtc_state->pixel_rate; + return pixel_rate * cpp; } int intel_plane_calc_min_cdclk(struct intel_atomic_state *state, diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h index 2bcf15e34728bbe81a3608deb21103fa4f756877..a6bbf42bae1f13c7d401ca385dee6df5175ed227 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h @@ -18,6 +18,9 @@ struct intel_plane_state; extern const struct drm_plane_helper_funcs intel_plane_helper_funcs; +unsigned int intel_plane_pixel_rate(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); + unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state, diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index 30fb7c887ff006dcb0e3378443a08cd13a0e6bec..62f234f641de60f6caa98cd1ac1fc299adb74102 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -149,6 +149,10 @@ static const struct { { 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 }, { 148352, AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 }, { 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 }, + { 296703, AUD_CONFIG_PIXEL_CLOCK_HDMI_296703 }, + { 297000, AUD_CONFIG_PIXEL_CLOCK_HDMI_297000 }, + { 593407, AUD_CONFIG_PIXEL_CLOCK_HDMI_593407 }, + { 594000, AUD_CONFIG_PIXEL_CLOCK_HDMI_594000 }, }; /* HDMI N/CTS table */ @@ -234,6 +238,7 @@ static const struct hdmi_aud_ncts hdmi_aud_ncts_36bpp[] = { /* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */ static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_state) { + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; int i; @@ -243,6 +248,9 @@ static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_sta break; } + if (INTEL_GEN(dev_priv) < 12 && adjusted_mode->crtc_clock > 148500) + i = ARRAY_SIZE(hdmi_audio_clock); + if (i == ARRAY_SIZE(hdmi_audio_clock)) { DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", adjusted_mode->crtc_clock); @@ -844,7 +852,7 @@ static void glk_force_audio_cdclk(struct drm_i915_private *dev_priv, struct intel_crtc *crtc; int ret; - crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A); + crtc = intel_get_first_crtc(dev_priv); if (!crtc) return; diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 2049cf5b54f38d05d794459d5346fa53ec9284a5..8391246472024ccb517a01f6e7541e5a176dea49 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -26,7 +26,6 @@ */ #include -#include #include "display/intel_display.h" #include "display/intel_display_types.h" diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h index c17199caeff8295b3ebf52a6ef4b85f22fd9645b..e29e79faa01b9d3e97627cfeb74cba7ab0b226f6 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.h +++ b/drivers/gpu/drm/i915/display/intel_bios.h @@ -32,8 +32,6 @@ #include -#include - struct drm_i915_private; struct intel_crtc_state; struct intel_encoder; diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 0741d643455ba29b97b28db94c6dc2fa113f14fb..979a0241fdcbf3c0176833a3587f58dc549d9351 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -1868,6 +1868,8 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { + struct intel_encoder *encoder; + if (!intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config)) return; @@ -1876,8 +1878,28 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv, intel_dump_cdclk_config(cdclk_config, "Changing CDCLK to"); + /* + * Lock aux/gmbus while we change cdclk in case those + * functions use cdclk. Not all platforms/ports do, + * but we'll lock them all for simplicity. + */ + mutex_lock(&dev_priv->gmbus_mutex); + for_each_intel_dp(&dev_priv->drm, encoder) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + mutex_lock_nest_lock(&intel_dp->aux.hw_mutex, + &dev_priv->gmbus_mutex); + } + dev_priv->display.set_cdclk(dev_priv, cdclk_config, pipe); + for_each_intel_dp(&dev_priv->drm, encoder) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + mutex_unlock(&intel_dp->aux.hw_mutex); + } + mutex_unlock(&dev_priv->gmbus_mutex); + if (drm_WARN(&dev_priv->drm, intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config), "cdclk state doesn't match!\n")) { diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index 36dd52d2a9ee099ecff295f110ff586bdf74c94e..c1cce93a1c257482c1a0cbe7c9b86b4b9425ea17 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -348,48 +348,56 @@ static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state) crtc_state->csc_mode); } -/* - * Set up the pipe CSC unit on CherryView. - */ -static void cherryview_load_csc_matrix(const struct intel_crtc_state *crtc_state) +static void chv_load_cgm_csc(struct intel_crtc *crtc, + const struct drm_property_blob *blob) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct drm_color_ctm *ctm = blob->data; enum pipe pipe = crtc->pipe; + u16 coeffs[9]; + int i; - if (crtc_state->hw.ctm) { - const struct drm_color_ctm *ctm = crtc_state->hw.ctm->data; - u16 coeffs[9] = {}; - int i; - - for (i = 0; i < ARRAY_SIZE(coeffs); i++) { - u64 abs_coeff = - ((1ULL << 63) - 1) & ctm->matrix[i]; - - /* Round coefficient. */ - abs_coeff += 1 << (32 - 13); - /* Clamp to hardware limits. */ - abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_8_0 - 1); - - /* Write coefficients in S3.12 format. */ - if (ctm->matrix[i] & (1ULL << 63)) - coeffs[i] = 1 << 15; - coeffs[i] |= ((abs_coeff >> 32) & 7) << 12; - coeffs[i] |= (abs_coeff >> 20) & 0xfff; - } + for (i = 0; i < ARRAY_SIZE(coeffs); i++) { + u64 abs_coeff = ((1ULL << 63) - 1) & ctm->matrix[i]; + + /* Round coefficient. */ + abs_coeff += 1 << (32 - 13); + /* Clamp to hardware limits. */ + abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_8_0 - 1); - intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF01(pipe), - coeffs[1] << 16 | coeffs[0]); - intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF23(pipe), - coeffs[3] << 16 | coeffs[2]); - intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF45(pipe), - coeffs[5] << 16 | coeffs[4]); - intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF67(pipe), - coeffs[7] << 16 | coeffs[6]); - intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF8(pipe), coeffs[8]); + coeffs[i] = 0; + + /* Write coefficients in S3.12 format. */ + if (ctm->matrix[i] & (1ULL << 63)) + coeffs[i] |= 1 << 15; + + coeffs[i] |= ((abs_coeff >> 32) & 7) << 12; + coeffs[i] |= (abs_coeff >> 20) & 0xfff; } - intel_de_write(dev_priv, CGM_PIPE_MODE(pipe), crtc_state->cgm_mode); + intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF01(pipe), + coeffs[1] << 16 | coeffs[0]); + intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF23(pipe), + coeffs[3] << 16 | coeffs[2]); + intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF45(pipe), + coeffs[5] << 16 | coeffs[4]); + intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF67(pipe), + coeffs[7] << 16 | coeffs[6]); + intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF8(pipe), + coeffs[8]); +} + +/* convert hw value with given bit_precision to lut property val */ +static u32 intel_color_lut_pack(u32 val, int bit_precision) +{ + u32 max = 0xffff >> (16 - bit_precision); + + val = clamp_val(val, 0, max); + + if (bit_precision < 16) + val <<= 16 - bit_precision; + + return val; } static u32 i9xx_lut_8(const struct drm_color_lut *color) @@ -399,6 +407,13 @@ static u32 i9xx_lut_8(const struct drm_color_lut *color) drm_color_lut_extract(color->blue, 8); } +static void i9xx_lut_8_pack(struct drm_color_lut *entry, u32 val) +{ + entry->red = intel_color_lut_pack(REG_FIELD_GET(LGC_PALETTE_RED_MASK, val), 8); + entry->green = intel_color_lut_pack(REG_FIELD_GET(LGC_PALETTE_GREEN_MASK, val), 8); + entry->blue = intel_color_lut_pack(REG_FIELD_GET(LGC_PALETTE_BLUE_MASK, val), 8); +} + /* i965+ "10.6" bit interpolated format "even DW" (low 8 bits) */ static u32 i965_lut_10p6_ldw(const struct drm_color_lut *color) { @@ -415,48 +430,34 @@ static u32 i965_lut_10p6_udw(const struct drm_color_lut *color) (color->blue >> 8); } -static u32 ilk_lut_10(const struct drm_color_lut *color) +static void i965_lut_10p6_pack(struct drm_color_lut *entry, u32 ldw, u32 udw) { - return drm_color_lut_extract(color->red, 10) << 20 | - drm_color_lut_extract(color->green, 10) << 10 | - drm_color_lut_extract(color->blue, 10); + entry->red = REG_FIELD_GET(PALETTE_RED_MASK, udw) << 8 | + REG_FIELD_GET(PALETTE_RED_MASK, ldw); + entry->green = REG_FIELD_GET(PALETTE_GREEN_MASK, udw) << 8 | + REG_FIELD_GET(PALETTE_GREEN_MASK, ldw); + entry->blue = REG_FIELD_GET(PALETTE_BLUE_MASK, udw) << 8 | + REG_FIELD_GET(PALETTE_BLUE_MASK, ldw); } -/* Loads the legacy palette/gamma unit for the CRTC. */ -static void i9xx_load_luts_internal(const struct intel_crtc_state *crtc_state, - const struct drm_property_blob *blob) +static u16 i965_lut_11p6_max_pack(u32 val) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum pipe pipe = crtc->pipe; - int i; - - if (HAS_GMCH(dev_priv)) { - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) - assert_dsi_pll_enabled(dev_priv); - else - assert_pll_enabled(dev_priv, pipe); - } - - if (blob) { - const struct drm_color_lut *lut = blob->data; - - for (i = 0; i < 256; i++) { - u32 word = i9xx_lut_8(&lut[i]); + /* PIPEGCMAX is 11.6, clamp to 10.6 */ + return clamp_val(val, 0, 0xffff); +} - if (HAS_GMCH(dev_priv)) - intel_de_write(dev_priv, PALETTE(pipe, i), - word); - else - intel_de_write(dev_priv, LGC_PALETTE(pipe, i), - word); - } - } +static u32 ilk_lut_10(const struct drm_color_lut *color) +{ + return drm_color_lut_extract(color->red, 10) << 20 | + drm_color_lut_extract(color->green, 10) << 10 | + drm_color_lut_extract(color->blue, 10); } -static void i9xx_load_luts(const struct intel_crtc_state *crtc_state) +static void ilk_lut_10_pack(struct drm_color_lut *entry, u32 val) { - i9xx_load_luts_internal(crtc_state, crtc_state->hw.gamma_lut); + entry->red = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_RED_MASK, val), 10); + entry->green = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_GREEN_MASK, val), 10); + entry->blue = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_BLUE_MASK, val), 10); } static void i9xx_color_commit(const struct intel_crtc_state *crtc_state) @@ -525,6 +526,35 @@ static void skl_color_commit(const struct intel_crtc_state *crtc_state) ilk_load_csc_matrix(crtc_state); } +static void i9xx_load_lut_8(struct intel_crtc *crtc, + const struct drm_property_blob *blob) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct drm_color_lut *lut; + enum pipe pipe = crtc->pipe; + int i; + + if (!blob) + return; + + lut = blob->data; + + for (i = 0; i < 256; i++) + intel_de_write(dev_priv, PALETTE(pipe, i), + i9xx_lut_8(&lut[i])); +} + +static void i9xx_load_luts(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; + + assert_pll_enabled(dev_priv, crtc->pipe); + + i9xx_load_lut_8(crtc, gamma_lut); +} + static void i965_load_lut_10p6(struct intel_crtc *crtc, const struct drm_property_blob *blob) { @@ -548,14 +578,38 @@ static void i965_load_lut_10p6(struct intel_crtc *crtc, static void i965_load_luts(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) + assert_dsi_pll_enabled(dev_priv); + else + assert_pll_enabled(dev_priv, crtc->pipe); + if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) - i9xx_load_luts(crtc_state); + i9xx_load_lut_8(crtc, gamma_lut); else i965_load_lut_10p6(crtc, gamma_lut); } +static void ilk_load_lut_8(struct intel_crtc *crtc, + const struct drm_property_blob *blob) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct drm_color_lut *lut; + enum pipe pipe = crtc->pipe; + int i; + + if (!blob) + return; + + lut = blob->data; + + for (i = 0; i < 256; i++) + intel_de_write(dev_priv, LGC_PALETTE(pipe, i), + i9xx_lut_8(&lut[i])); +} + static void ilk_load_lut_10(struct intel_crtc *crtc, const struct drm_property_blob *blob) { @@ -566,7 +620,7 @@ static void ilk_load_lut_10(struct intel_crtc *crtc, for (i = 0; i < lut_size; i++) intel_de_write(dev_priv, PREC_PALETTE(pipe, i), - ilk_lut_10(&lut[i])); + ilk_lut_10(&lut[i])); } static void ilk_load_luts(const struct intel_crtc_state *crtc_state) @@ -575,7 +629,7 @@ static void ilk_load_luts(const struct intel_crtc_state *crtc_state) const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) - i9xx_load_luts(crtc_state); + ilk_load_lut_8(crtc, gamma_lut); else ilk_load_lut_10(crtc, gamma_lut); } @@ -685,7 +739,7 @@ static void ivb_load_luts(const struct intel_crtc_state *crtc_state) const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut; if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) { - i9xx_load_luts(crtc_state); + ilk_load_lut_8(crtc, gamma_lut); } else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) { ivb_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE | PAL_PREC_INDEX_VALUE(0)); @@ -708,7 +762,7 @@ static void bdw_load_luts(const struct intel_crtc_state *crtc_state) const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut; if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) { - i9xx_load_luts(crtc_state); + ilk_load_lut_8(crtc, gamma_lut); } else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) { bdw_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE | PAL_PREC_INDEX_VALUE(0)); @@ -729,9 +783,8 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state) struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size; + int i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size; const struct drm_color_lut *lut = crtc_state->hw.degamma_lut->data; - u32 i; /* * When setting the auto-increment bit, the hardware seems to @@ -770,8 +823,7 @@ static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_stat struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size; - u32 i; + int i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size; /* * When setting the auto-increment bit, the hardware seems to @@ -812,7 +864,7 @@ static void glk_load_luts(const struct intel_crtc_state *crtc_state) glk_load_degamma_lut_linear(crtc_state); if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) { - i9xx_load_luts(crtc_state); + ilk_load_lut_8(crtc, gamma_lut); } else { bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0)); ivb_load_lut_ext_max(crtc); @@ -856,7 +908,7 @@ icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state) const struct drm_color_lut *lut = blob->data; struct intel_dsb *dsb = intel_dsb_get(crtc); enum pipe pipe = crtc->pipe; - u32 i; + int i; /* * Program Super Fine segment (let's call it seg1)... @@ -889,7 +941,7 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state) const struct drm_color_lut *entry; struct intel_dsb *dsb = intel_dsb_get(crtc); enum pipe pipe = crtc->pipe; - u32 i; + int i; /* * Program Fine segment (let's call it seg2)... @@ -948,7 +1000,7 @@ static void icl_load_luts(const struct intel_crtc_state *crtc_state) switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) { case GAMMA_MODE_MODE_8BIT: - i9xx_load_luts(crtc_state); + ilk_load_lut_8(crtc, gamma_lut); break; case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED: icl_program_gamma_superfine_segment(crtc_state); @@ -974,6 +1026,13 @@ static u32 chv_cgm_degamma_udw(const struct drm_color_lut *color) return drm_color_lut_extract(color->red, 14); } +static void chv_cgm_gamma_pack(struct drm_color_lut *entry, u32 ldw, u32 udw) +{ + entry->green = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_GREEN_MASK, ldw), 10); + entry->blue = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_BLUE_MASK, ldw), 10); + entry->red = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_RED_MASK, udw), 10); +} + static void chv_load_cgm_degamma(struct intel_crtc *crtc, const struct drm_property_blob *blob) { @@ -1020,21 +1079,24 @@ static void chv_load_cgm_gamma(struct intel_crtc *crtc, static void chv_load_luts(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut; + const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; + const struct drm_property_blob *ctm = crtc_state->hw.ctm; - cherryview_load_csc_matrix(crtc_state); + if (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC) + chv_load_cgm_csc(crtc, ctm); - if (crtc_state_is_legacy_gamma(crtc_state)) { - i9xx_load_luts(crtc_state); - return; - } - - if (degamma_lut) + if (crtc_state->cgm_mode & CGM_PIPE_MODE_DEGAMMA) chv_load_cgm_degamma(crtc, degamma_lut); - if (gamma_lut) + if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA) chv_load_cgm_gamma(crtc, gamma_lut); + else + i965_load_luts(crtc_state); + + intel_de_write(dev_priv, CGM_PIPE_MODE(crtc->pipe), + crtc_state->cgm_mode); } void intel_color_load_luts(const struct intel_crtc_state *crtc_state) @@ -1660,28 +1722,13 @@ bool intel_color_lut_equal(struct drm_property_blob *blob1, return true; } -/* convert hw value with given bit_precision to lut property val */ -static u32 intel_color_lut_pack(u32 val, u32 bit_precision) +static struct drm_property_blob *i9xx_read_lut_8(struct intel_crtc *crtc) { - u32 max = 0xffff >> (16 - bit_precision); - - val = clamp_val(val, 0, max); - - if (bit_precision < 16) - val <<= 16 - bit_precision; - - return val; -} - -static struct drm_property_blob * -i9xx_read_lut_8(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; - struct drm_color_lut *blob_data; - u32 i, val; + struct drm_color_lut *lut; + int i; blob = drm_property_create_blob(&dev_priv->drm, sizeof(struct drm_color_lut) * LEGACY_LUT_LENGTH, @@ -1689,20 +1736,12 @@ i9xx_read_lut_8(const struct intel_crtc_state *crtc_state) if (IS_ERR(blob)) return NULL; - blob_data = blob->data; + lut = blob->data; for (i = 0; i < LEGACY_LUT_LENGTH; i++) { - if (HAS_GMCH(dev_priv)) - val = intel_de_read(dev_priv, PALETTE(pipe, i)); - else - val = intel_de_read(dev_priv, LGC_PALETTE(pipe, i)); - - blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET( - LGC_PALETTE_RED_MASK, val), 8); - blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET( - LGC_PALETTE_GREEN_MASK, val), 8); - blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET( - LGC_PALETTE_BLUE_MASK, val), 8); + u32 val = intel_de_read(dev_priv, PALETTE(pipe, i)); + + i9xx_lut_8_pack(&lut[i], val); } return blob; @@ -1710,22 +1749,21 @@ i9xx_read_lut_8(const struct intel_crtc_state *crtc_state) static void i9xx_read_luts(struct intel_crtc_state *crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + if (!crtc_state->gamma_enable) return; - crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state); + crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc); } -static struct drm_property_blob * -i965_read_lut_10p6(const struct intel_crtc_state *crtc_state) +static struct drm_property_blob *i965_read_lut_10p6(struct intel_crtc *crtc) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; + int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; - struct drm_color_lut *blob_data; - u32 i, val1, val2; + struct drm_color_lut *lut; blob = drm_property_create_blob(&dev_priv->drm, sizeof(struct drm_color_lut) * lut_size, @@ -1733,51 +1771,42 @@ i965_read_lut_10p6(const struct intel_crtc_state *crtc_state) if (IS_ERR(blob)) return NULL; - blob_data = blob->data; + lut = blob->data; for (i = 0; i < lut_size - 1; i++) { - val1 = intel_de_read(dev_priv, PALETTE(pipe, 2 * i + 0)); - val2 = intel_de_read(dev_priv, PALETTE(pipe, 2 * i + 1)); - - blob_data[i].red = REG_FIELD_GET(PALETTE_RED_MASK, val2) << 8 | - REG_FIELD_GET(PALETTE_RED_MASK, val1); - blob_data[i].green = REG_FIELD_GET(PALETTE_GREEN_MASK, val2) << 8 | - REG_FIELD_GET(PALETTE_GREEN_MASK, val1); - blob_data[i].blue = REG_FIELD_GET(PALETTE_BLUE_MASK, val2) << 8 | - REG_FIELD_GET(PALETTE_BLUE_MASK, val1); + u32 ldw = intel_de_read(dev_priv, PALETTE(pipe, 2 * i + 0)); + u32 udw = intel_de_read(dev_priv, PALETTE(pipe, 2 * i + 1)); + + i965_lut_10p6_pack(&lut[i], ldw, udw); } - blob_data[i].red = REG_FIELD_GET(PIPEGCMAX_RGB_MASK, - intel_de_read(dev_priv, PIPEGCMAX(pipe, 0))); - blob_data[i].green = REG_FIELD_GET(PIPEGCMAX_RGB_MASK, - intel_de_read(dev_priv, PIPEGCMAX(pipe, 1))); - blob_data[i].blue = REG_FIELD_GET(PIPEGCMAX_RGB_MASK, - intel_de_read(dev_priv, PIPEGCMAX(pipe, 2))); + lut[i].red = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 0))); + lut[i].green = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 1))); + lut[i].blue = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 2))); return blob; } static void i965_read_luts(struct intel_crtc_state *crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + if (!crtc_state->gamma_enable) return; if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) - crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state); + crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc); else - crtc_state->hw.gamma_lut = i965_read_lut_10p6(crtc_state); + crtc_state->hw.gamma_lut = i965_read_lut_10p6(crtc); } -static struct drm_property_blob * -chv_read_cgm_lut(const struct intel_crtc_state *crtc_state) +static struct drm_property_blob *chv_read_cgm_gamma(struct intel_crtc *crtc) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; + int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; - struct drm_color_lut *blob_data; - u32 i, val; + struct drm_color_lut *lut; blob = drm_property_create_blob(&dev_priv->drm, sizeof(struct drm_color_lut) * lut_size, @@ -1785,18 +1814,13 @@ chv_read_cgm_lut(const struct intel_crtc_state *crtc_state) if (IS_ERR(blob)) return NULL; - blob_data = blob->data; + lut = blob->data; for (i = 0; i < lut_size; i++) { - val = intel_de_read(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0)); - blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET( - CGM_PIPE_GAMMA_GREEN_MASK, val), 10); - blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET( - CGM_PIPE_GAMMA_BLUE_MASK, val), 10); - - val = intel_de_read(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1)); - blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET( - CGM_PIPE_GAMMA_RED_MASK, val), 10); + u32 ldw = intel_de_read(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0)); + u32 udw = intel_de_read(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1)); + + chv_cgm_gamma_pack(&lut[i], ldw, udw); } return blob; @@ -1804,22 +1828,46 @@ chv_read_cgm_lut(const struct intel_crtc_state *crtc_state) static void chv_read_luts(struct intel_crtc_state *crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA) - crtc_state->hw.gamma_lut = chv_read_cgm_lut(crtc_state); + crtc_state->hw.gamma_lut = chv_read_cgm_gamma(crtc); else i965_read_luts(crtc_state); } -static struct drm_property_blob * -ilk_read_lut_10(const struct intel_crtc_state *crtc_state) +static struct drm_property_blob *ilk_read_lut_8(struct intel_crtc *crtc) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; - struct drm_color_lut *blob_data; - u32 i, val; + struct drm_color_lut *lut; + int i; + + blob = drm_property_create_blob(&dev_priv->drm, + sizeof(struct drm_color_lut) * LEGACY_LUT_LENGTH, + NULL); + if (IS_ERR(blob)) + return NULL; + + lut = blob->data; + + for (i = 0; i < LEGACY_LUT_LENGTH; i++) { + u32 val = intel_de_read(dev_priv, LGC_PALETTE(pipe, i)); + + i9xx_lut_8_pack(&lut[i], val); + } + + return blob; +} + +static struct drm_property_blob *ilk_read_lut_10(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; + enum pipe pipe = crtc->pipe; + struct drm_property_blob *blob; + struct drm_color_lut *lut; blob = drm_property_create_blob(&dev_priv->drm, sizeof(struct drm_color_lut) * lut_size, @@ -1827,17 +1875,12 @@ ilk_read_lut_10(const struct intel_crtc_state *crtc_state) if (IS_ERR(blob)) return NULL; - blob_data = blob->data; + lut = blob->data; for (i = 0; i < lut_size; i++) { - val = intel_de_read(dev_priv, PREC_PALETTE(pipe, i)); - - blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET( - PREC_PALETTE_RED_MASK, val), 10); - blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET( - PREC_PALETTE_GREEN_MASK, val), 10); - blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET( - PREC_PALETTE_BLUE_MASK, val), 10); + u32 val = intel_de_read(dev_priv, PREC_PALETTE(pipe, i)); + + ilk_lut_10_pack(&lut[i], val); } return blob; @@ -1845,6 +1888,8 @@ ilk_read_lut_10(const struct intel_crtc_state *crtc_state) static void ilk_read_luts(struct intel_crtc_state *crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + if (!crtc_state->gamma_enable) return; @@ -1852,21 +1897,19 @@ static void ilk_read_luts(struct intel_crtc_state *crtc_state) return; if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) - crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state); + crtc_state->hw.gamma_lut = ilk_read_lut_8(crtc); else - crtc_state->hw.gamma_lut = ilk_read_lut_10(crtc_state); + crtc_state->hw.gamma_lut = ilk_read_lut_10(crtc); } -static struct drm_property_blob * -glk_read_lut_10(const struct intel_crtc_state *crtc_state, u32 prec_index) +static struct drm_property_blob *glk_read_lut_10(struct intel_crtc *crtc, + u32 prec_index) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - int hw_lut_size = ivb_lut_10_size(prec_index); + int i, hw_lut_size = ivb_lut_10_size(prec_index); enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; - struct drm_color_lut *blob_data; - u32 i, val; + struct drm_color_lut *lut; blob = drm_property_create_blob(&dev_priv->drm, sizeof(struct drm_color_lut) * hw_lut_size, @@ -1874,20 +1917,15 @@ glk_read_lut_10(const struct intel_crtc_state *crtc_state, u32 prec_index) if (IS_ERR(blob)) return NULL; - blob_data = blob->data; + lut = blob->data; intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), prec_index | PAL_PREC_AUTO_INCREMENT); for (i = 0; i < hw_lut_size; i++) { - val = intel_de_read(dev_priv, PREC_PAL_DATA(pipe)); - - blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET( - PREC_PAL_DATA_RED_MASK, val), 10); - blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET( - PREC_PAL_DATA_GREEN_MASK, val), 10); - blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET( - PREC_PAL_DATA_BLUE_MASK, val), 10); + u32 val = intel_de_read(dev_priv, PREC_PAL_DATA(pipe)); + + ilk_lut_10_pack(&lut[i], val); } intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0); @@ -1897,13 +1935,15 @@ glk_read_lut_10(const struct intel_crtc_state *crtc_state, u32 prec_index) static void glk_read_luts(struct intel_crtc_state *crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + if (!crtc_state->gamma_enable) return; if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) - crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state); + crtc_state->hw.gamma_lut = ilk_read_lut_8(crtc); else - crtc_state->hw.gamma_lut = glk_read_lut_10(crtc_state, PAL_PREC_INDEX_VALUE(0)); + crtc_state->hw.gamma_lut = glk_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0)); } void intel_color_init(struct intel_crtc *crtc) diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index f49c98f6cb7e3e63ed66868eeacb71d5e5bf3698..78f9b6cde810148dbef367eff5693eb7646531d7 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -32,7 +32,6 @@ #include #include #include -#include #include "i915_drv.h" #include "intel_connector.h" diff --git a/drivers/gpu/drm/i915/display/intel_csr.c b/drivers/gpu/drm/i915/display/intel_csr.c index 57320c12839fca51330724215c7a0cf79a55cfeb..3112572cfb7dbc3875c12305f7d1cb5458280043 100644 --- a/drivers/gpu/drm/i915/display/intel_csr.c +++ b/drivers/gpu/drm/i915/display/intel_csr.c @@ -40,8 +40,8 @@ #define GEN12_CSR_MAX_FW_SIZE ICL_CSR_MAX_FW_SIZE -#define TGL_CSR_PATH "i915/tgl_dmc_ver2_04.bin" -#define TGL_CSR_VERSION_REQUIRED CSR_VERSION(2, 4) +#define TGL_CSR_PATH "i915/tgl_dmc_ver2_06.bin" +#define TGL_CSR_VERSION_REQUIRED CSR_VERSION(2, 6) #define TGL_CSR_MAX_FW_SIZE 0x6000 MODULE_FIRMWARE(TGL_CSR_PATH); diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 9f7d1d7189ae916a4b4cebc8020732027b8824c1..73d0f4648c06a7f473743bed78715f75caad96d8 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -1325,164 +1325,6 @@ intel_ddi_get_crtc_encoder(struct intel_crtc *crtc) return ret; } -static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, - i915_reg_t reg) -{ - int refclk; - int n, p, r; - u32 wrpll; - - wrpll = intel_de_read(dev_priv, reg); - switch (wrpll & WRPLL_REF_MASK) { - case WRPLL_REF_SPECIAL_HSW: - /* - * muxed-SSC for BDW. - * non-SSC for non-ULT HSW. Check FUSE_STRAP3 - * for the non-SSC reference frequency. - */ - if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) { - if (intel_de_read(dev_priv, FUSE_STRAP3) & HSW_REF_CLK_SELECT) - refclk = 24; - else - refclk = 135; - break; - } - /* fall through */ - case WRPLL_REF_PCH_SSC: - /* - * We could calculate spread here, but our checking - * code only cares about 5% accuracy, and spread is a max of - * 0.5% downspread. - */ - refclk = 135; - break; - case WRPLL_REF_LCPLL: - refclk = 2700; - break; - default: - MISSING_CASE(wrpll); - return 0; - } - - r = wrpll & WRPLL_DIVIDER_REF_MASK; - p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT; - n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT; - - /* Convert to KHz, p & r have a fixed point portion */ - return (refclk * n * 100) / (p * r); -} - -static int skl_calc_wrpll_link(const struct intel_dpll_hw_state *pll_state) -{ - u32 p0, p1, p2, dco_freq; - - p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK; - p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK; - - if (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1)) - p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8; - else - p1 = 1; - - - switch (p0) { - case DPLL_CFGCR2_PDIV_1: - p0 = 1; - break; - case DPLL_CFGCR2_PDIV_2: - p0 = 2; - break; - case DPLL_CFGCR2_PDIV_3: - p0 = 3; - break; - case DPLL_CFGCR2_PDIV_7: - p0 = 7; - break; - } - - switch (p2) { - case DPLL_CFGCR2_KDIV_5: - p2 = 5; - break; - case DPLL_CFGCR2_KDIV_2: - p2 = 2; - break; - case DPLL_CFGCR2_KDIV_3: - p2 = 3; - break; - case DPLL_CFGCR2_KDIV_1: - p2 = 1; - break; - } - - dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) - * 24 * 1000; - - dco_freq += (((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) - * 24 * 1000) / 0x8000; - - if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0)) - return 0; - - return dco_freq / (p0 * p1 * p2 * 5); -} - -int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv, - struct intel_dpll_hw_state *pll_state) -{ - u32 p0, p1, p2, dco_freq, ref_clock; - - p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK; - p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK; - - if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1)) - p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >> - DPLL_CFGCR1_QDIV_RATIO_SHIFT; - else - p1 = 1; - - - switch (p0) { - case DPLL_CFGCR1_PDIV_2: - p0 = 2; - break; - case DPLL_CFGCR1_PDIV_3: - p0 = 3; - break; - case DPLL_CFGCR1_PDIV_5: - p0 = 5; - break; - case DPLL_CFGCR1_PDIV_7: - p0 = 7; - break; - } - - switch (p2) { - case DPLL_CFGCR1_KDIV_1: - p2 = 1; - break; - case DPLL_CFGCR1_KDIV_2: - p2 = 2; - break; - case DPLL_CFGCR1_KDIV_3: - p2 = 3; - break; - } - - ref_clock = cnl_hdmi_pll_ref_clock(dev_priv); - - dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) - * ref_clock; - - dco_freq += (((pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >> - DPLL_CFGCR0_DCO_FRACTION_SHIFT) * ref_clock) / 0x8000; - - if (drm_WARN_ON(&dev_priv->drm, p0 == 0 || p1 == 0 || p2 == 0)) - return 0; - - return dco_freq / (p0 * p1 * p2 * 5); -} - static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv, enum port port) { @@ -1505,77 +1347,6 @@ static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv, } } -static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv, - const struct intel_dpll_hw_state *pll_state) -{ - u32 m1, m2_int, m2_frac, div1, div2, ref_clock; - u64 tmp; - - ref_clock = dev_priv->cdclk.hw.ref; - - if (INTEL_GEN(dev_priv) >= 12) { - m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK; - m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT; - m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK; - - if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) { - m2_frac = pll_state->mg_pll_bias & - DKL_PLL_BIAS_FBDIV_FRAC_MASK; - m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT; - } else { - m2_frac = 0; - } - } else { - m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK; - m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK; - - if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) { - m2_frac = pll_state->mg_pll_div0 & - MG_PLL_DIV0_FBDIV_FRAC_MASK; - m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT; - } else { - m2_frac = 0; - } - } - - switch (pll_state->mg_clktop2_hsclkctl & - MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) { - case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2: - div1 = 2; - break; - case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3: - div1 = 3; - break; - case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5: - div1 = 5; - break; - case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7: - div1 = 7; - break; - default: - MISSING_CASE(pll_state->mg_clktop2_hsclkctl); - return 0; - } - - div2 = (pll_state->mg_clktop2_hsclkctl & - MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >> - MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT; - - /* div2 value of 0 is same as 1 means no div */ - if (div2 == 0) - div2 = 1; - - /* - * Adjust the original formula to delay the division by 2^22 in order to - * minimize possible rounding errors. - */ - tmp = (u64)m1 * m2_int * ref_clock + - (((u64)m1 * m2_frac * ref_clock) >> 22); - tmp = div_u64(tmp, 5 * div1 * div2); - - return tmp; -} - static void ddi_dotclock_get(struct intel_crtc_state *pipe_config) { int dotclock; @@ -1601,215 +1372,22 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config) pipe_config->hw.adjusted_mode.crtc_clock = dotclock; } -static void icl_ddi_clock_get(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state; - enum port port = encoder->port; - enum phy phy = intel_port_to_phy(dev_priv, port); - int link_clock; - - if (intel_phy_is_combo(dev_priv, phy)) { - link_clock = cnl_calc_wrpll_link(dev_priv, pll_state); - } else { - enum intel_dpll_id pll_id = intel_get_shared_dpll_id(dev_priv, - pipe_config->shared_dpll); - - if (pll_id == DPLL_ID_ICL_TBTPLL) - link_clock = icl_calc_tbt_pll_link(dev_priv, port); - else - link_clock = icl_calc_mg_pll_link(dev_priv, pll_state); - } - - pipe_config->port_clock = link_clock; - - ddi_dotclock_get(pipe_config); -} - -static void cnl_ddi_clock_get(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state; - int link_clock; - - if (pll_state->cfgcr0 & DPLL_CFGCR0_HDMI_MODE) { - link_clock = cnl_calc_wrpll_link(dev_priv, pll_state); - } else { - link_clock = pll_state->cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK; - - switch (link_clock) { - case DPLL_CFGCR0_LINK_RATE_810: - link_clock = 81000; - break; - case DPLL_CFGCR0_LINK_RATE_1080: - link_clock = 108000; - break; - case DPLL_CFGCR0_LINK_RATE_1350: - link_clock = 135000; - break; - case DPLL_CFGCR0_LINK_RATE_1620: - link_clock = 162000; - break; - case DPLL_CFGCR0_LINK_RATE_2160: - link_clock = 216000; - break; - case DPLL_CFGCR0_LINK_RATE_2700: - link_clock = 270000; - break; - case DPLL_CFGCR0_LINK_RATE_3240: - link_clock = 324000; - break; - case DPLL_CFGCR0_LINK_RATE_4050: - link_clock = 405000; - break; - default: - drm_WARN(&dev_priv->drm, 1, "Unsupported link rate\n"); - break; - } - link_clock *= 2; - } - - pipe_config->port_clock = link_clock; - - ddi_dotclock_get(pipe_config); -} - -static void skl_ddi_clock_get(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config) -{ - struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state; - int link_clock; - - /* - * ctrl1 register is already shifted for each pll, just use 0 to get - * the internal shift for each field - */ - if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0)) { - link_clock = skl_calc_wrpll_link(pll_state); - } else { - link_clock = pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0); - link_clock >>= DPLL_CTRL1_LINK_RATE_SHIFT(0); - - switch (link_clock) { - case DPLL_CTRL1_LINK_RATE_810: - link_clock = 81000; - break; - case DPLL_CTRL1_LINK_RATE_1080: - link_clock = 108000; - break; - case DPLL_CTRL1_LINK_RATE_1350: - link_clock = 135000; - break; - case DPLL_CTRL1_LINK_RATE_1620: - link_clock = 162000; - break; - case DPLL_CTRL1_LINK_RATE_2160: - link_clock = 216000; - break; - case DPLL_CTRL1_LINK_RATE_2700: - link_clock = 270000; - break; - default: - drm_WARN(encoder->base.dev, 1, - "Unsupported link rate\n"); - break; - } - link_clock *= 2; - } - - pipe_config->port_clock = link_clock; - - ddi_dotclock_get(pipe_config); -} - -static void hsw_ddi_clock_get(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - int link_clock = 0; - u32 val, pll; - - val = hsw_pll_to_ddi_pll_sel(pipe_config->shared_dpll); - switch (val & PORT_CLK_SEL_MASK) { - case PORT_CLK_SEL_LCPLL_810: - link_clock = 81000; - break; - case PORT_CLK_SEL_LCPLL_1350: - link_clock = 135000; - break; - case PORT_CLK_SEL_LCPLL_2700: - link_clock = 270000; - break; - case PORT_CLK_SEL_WRPLL1: - link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL(0)); - break; - case PORT_CLK_SEL_WRPLL2: - link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL(1)); - break; - case PORT_CLK_SEL_SPLL: - pll = intel_de_read(dev_priv, SPLL_CTL) & SPLL_FREQ_MASK; - if (pll == SPLL_FREQ_810MHz) - link_clock = 81000; - else if (pll == SPLL_FREQ_1350MHz) - link_clock = 135000; - else if (pll == SPLL_FREQ_2700MHz) - link_clock = 270000; - else { - drm_WARN(&dev_priv->drm, 1, "bad spll freq\n"); - return; - } - break; - default: - drm_WARN(&dev_priv->drm, 1, "bad port clock sel\n"); - return; - } - - pipe_config->port_clock = link_clock * 2; - - ddi_dotclock_get(pipe_config); -} - -static int bxt_calc_pll_link(const struct intel_dpll_hw_state *pll_state) -{ - struct dpll clock; - - clock.m1 = 2; - clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22; - if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE) - clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK; - clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT; - clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT; - clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT; - - return chv_calc_dpll_params(100000, &clock); -} - -static void bxt_ddi_clock_get(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config) -{ - pipe_config->port_clock = - bxt_calc_pll_link(&pipe_config->dpll_hw_state); - - ddi_dotclock_get(pipe_config); -} - static void intel_ddi_clock_get(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum phy phy = intel_port_to_phy(dev_priv, encoder->port); - if (INTEL_GEN(dev_priv) >= 11) - icl_ddi_clock_get(encoder, pipe_config); - else if (IS_CANNONLAKE(dev_priv)) - cnl_ddi_clock_get(encoder, pipe_config); - else if (IS_GEN9_LP(dev_priv)) - bxt_ddi_clock_get(encoder, pipe_config); - else if (IS_GEN9_BC(dev_priv)) - skl_ddi_clock_get(encoder, pipe_config); - else if (INTEL_GEN(dev_priv) <= 8) - hsw_ddi_clock_get(encoder, pipe_config); + if (intel_phy_is_tc(dev_priv, phy) && + intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll) == + DPLL_ID_ICL_TBTPLL) + pipe_config->port_clock = icl_calc_tbt_pll_link(dev_priv, + encoder->port); + else + pipe_config->port_clock = + intel_dpll_get_freq(dev_priv, pipe_config->shared_dpll); + + ddi_dotclock_get(pipe_config); } void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state, @@ -3049,7 +2627,7 @@ static void icl_map_plls_to_ports(struct intel_encoder *encoder, enum phy phy = intel_port_to_phy(dev_priv, encoder->port); u32 val; - mutex_lock(&dev_priv->dpll_lock); + mutex_lock(&dev_priv->dpll.lock); val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0); drm_WARN_ON(&dev_priv->drm, @@ -3075,7 +2653,7 @@ static void icl_map_plls_to_ports(struct intel_encoder *encoder, val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, phy); intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val); - mutex_unlock(&dev_priv->dpll_lock); + mutex_unlock(&dev_priv->dpll.lock); } static void icl_unmap_plls_to_ports(struct intel_encoder *encoder) @@ -3084,13 +2662,13 @@ static void icl_unmap_plls_to_ports(struct intel_encoder *encoder) enum phy phy = intel_port_to_phy(dev_priv, encoder->port); u32 val; - mutex_lock(&dev_priv->dpll_lock); + mutex_lock(&dev_priv->dpll.lock); val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0); val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy); intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val); - mutex_unlock(&dev_priv->dpll_lock); + mutex_unlock(&dev_priv->dpll.lock); } static void icl_sanitize_port_clk_off(struct drm_i915_private *dev_priv, @@ -3189,7 +2767,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder, if (drm_WARN_ON(&dev_priv->drm, !pll)) return; - mutex_lock(&dev_priv->dpll_lock); + mutex_lock(&dev_priv->dpll.lock); if (INTEL_GEN(dev_priv) >= 11) { if (!intel_phy_is_combo(dev_priv, phy)) @@ -3233,7 +2811,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder, hsw_pll_to_ddi_pll_sel(pll)); } - mutex_unlock(&dev_priv->dpll_lock); + mutex_unlock(&dev_priv->dpll.lock); } static void intel_ddi_clk_disable(struct intel_encoder *encoder) @@ -3987,8 +3565,9 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder, if (!intel_hdmi_handle_sink_scrambling(encoder, connector, crtc_state->hdmi_high_tmds_clock_ratio, crtc_state->hdmi_scrambling)) - DRM_ERROR("[CONNECTOR:%d:%s] Failed to configure sink scrambling/TMDS bit clock ratio\n", - connector->base.id, connector->name); + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Failed to configure sink " + "scrambling/TMDS bit clock ratio\n", + connector->base.id, connector->name); /* Display WA #1143: skl,kbl,cfl */ if (IS_GEN9_BC(dev_priv)) { diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h index 167c6579d972e472a28462f03edabdd4a6b11aee..55fd72b901fe4c79ae760461449e412477784e27 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.h +++ b/drivers/gpu/drm/i915/display/intel_ddi.h @@ -6,8 +6,6 @@ #ifndef __INTEL_DDI_H__ #define __INTEL_DDI_H__ -#include - #include "intel_display.h" struct drm_connector_state; @@ -47,7 +45,5 @@ u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder, int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder, bool enable); void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder); -int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv, - struct intel_dpll_hw_state *state); #endif /* __INTEL_DDI_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 3031e64ee518c37355efb31c5089f12a5aac6b66..346846609f45c9aa522be701d3aaddfb3484d10d 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -41,7 +41,6 @@ #include #include #include -#include #include "display/intel_crt.h" #include "display/intel_ddi.h" @@ -2720,9 +2719,10 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, /* * We assume the primary plane for pipe A has - * the highest stride limits of them all. + * the highest stride limits of them all, + * if in case pipe A is disabled, use the first pipe from pipe_mask. */ - crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A); + crtc = intel_get_first_crtc(dev_priv); if (!crtc) return 0; @@ -9542,7 +9542,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) } /* Check if any DPLLs are using the SSC source */ - for (i = 0; i < dev_priv->num_shared_dpll; i++) { + for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) { u32 temp = intel_de_read(dev_priv, PCH_DPLL(i)); if (!(temp & DPLL_VCO_ENABLE)) @@ -10129,6 +10129,9 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state) BIT(PLANE_CURSOR))) == 0) val |= PIPEMISC_HDR_MODE_PRECISION; + if (INTEL_GEN(dev_priv) >= 12) + val |= PIPEMISC_PIXEL_ROUNDING_TRUNC; + intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val); } @@ -14299,11 +14302,11 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv, if (new_crtc_state->hw.active) I915_STATE_WARN(!(pll->active_mask & crtc_mask), "pll active mismatch (expected pipe %c in active mask 0x%02x)\n", - pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask); + pipe_name(crtc->pipe), pll->active_mask); else I915_STATE_WARN(pll->active_mask & crtc_mask, "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n", - pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask); + pipe_name(crtc->pipe), pll->active_mask); I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask), "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n", @@ -14332,10 +14335,10 @@ verify_shared_dpll_state(struct intel_crtc *crtc, I915_STATE_WARN(pll->active_mask & crtc_mask, "pll active mismatch (didn't expect pipe %c in active mask)\n", - pipe_name(drm_crtc_index(&crtc->base))); + pipe_name(crtc->pipe)); I915_STATE_WARN(pll->state.crtc_mask & crtc_mask, "pll enabled crtcs mismatch (found %x in enabled mask)\n", - pipe_name(drm_crtc_index(&crtc->base))); + pipe_name(crtc->pipe)); } } @@ -14359,8 +14362,10 @@ verify_disabled_dpll_state(struct drm_i915_private *dev_priv) { int i; - for (i = 0; i < dev_priv->num_shared_dpll; i++) - verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL); + for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) + verify_single_dpll_state(dev_priv, + &dev_priv->dpll.shared_dplls[i], + NULL, NULL); } static void @@ -14743,8 +14748,8 @@ static int intel_atomic_check(struct drm_device *dev, /* Catch I915_MODE_FLAG_INHERITED */ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - if (new_crtc_state->hw.mode.private_flags != - old_crtc_state->hw.mode.private_flags) + if (new_crtc_state->uapi.mode.private_flags != + old_crtc_state->uapi.mode.private_flags) new_crtc_state->uapi.mode_changed = true; } @@ -15318,7 +15323,6 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state) struct intel_crtc *crtc; struct intel_crtc_state *old_crtc_state, *new_crtc_state; struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; - const u8 num_pipes = INTEL_NUM_PIPES(dev_priv); u8 update_pipes = 0, modeset_pipes = 0; int i; @@ -15355,7 +15359,7 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state) continue; if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, - entries, num_pipes, pipe)) + entries, I915_MAX_PIPES, pipe)) continue; entries[pipe] = new_crtc_state->wm.skl.ddb; @@ -15393,7 +15397,7 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state) continue; drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, - entries, num_pipes, pipe)); + entries, I915_MAX_PIPES, pipe)); entries[pipe] = new_crtc_state->wm.skl.ddb; modeset_pipes &= ~BIT(pipe); @@ -15428,7 +15432,7 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state) continue; drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, - entries, num_pipes, pipe)); + entries, I915_MAX_PIPES, pipe)); entries[pipe] = new_crtc_state->wm.skl.ddb; modeset_pipes &= ~BIT(pipe); @@ -16320,7 +16324,6 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) struct intel_plane *plane; const struct drm_plane_funcs *plane_funcs; unsigned int supported_rotations; - unsigned int possible_crtcs; const u32 *formats; int num_formats; int ret, zpos; @@ -16401,18 +16404,16 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) plane->get_hw_state = i9xx_plane_get_hw_state; plane->check_plane = i9xx_plane_check; - possible_crtcs = BIT(pipe); - if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, - possible_crtcs, plane_funcs, + 0, plane_funcs, formats, num_formats, i9xx_format_modifiers, DRM_PLANE_TYPE_PRIMARY, "primary %c", pipe_name(pipe)); else ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, - possible_crtcs, plane_funcs, + 0, plane_funcs, formats, num_formats, i9xx_format_modifiers, DRM_PLANE_TYPE_PRIMARY, @@ -16454,7 +16455,6 @@ static struct intel_plane * intel_cursor_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) { - unsigned int possible_crtcs; struct intel_plane *cursor; int ret, zpos; @@ -16487,10 +16487,8 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv, if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv)) cursor->cursor.size = ~0; - possible_crtcs = BIT(pipe); - ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base, - possible_crtcs, &intel_cursor_plane_funcs, + 0, &intel_cursor_plane_funcs, intel_cursor_formats, ARRAY_SIZE(intel_cursor_formats), cursor_format_modifiers, @@ -16619,6 +16617,18 @@ static void intel_crtc_free(struct intel_crtc *crtc) kfree(crtc); } +static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv) +{ + struct intel_plane *plane; + + for_each_intel_plane(&dev_priv->drm, plane) { + struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, + plane->pipe); + + plane->base.possible_crtcs = drm_crtc_mask(&crtc->base); + } +} + static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) { struct intel_plane *primary, *cursor; @@ -16697,6 +16707,8 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) intel_color_init(crtc); + intel_crtc_crc_init(crtc); + drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe); return 0; @@ -17785,11 +17797,9 @@ static void plane_config_fini(struct intel_initial_plane_config *plane_config) i915_vma_put(plane_config->vma); } -int intel_modeset_init(struct drm_i915_private *i915) +/* part #1: call before irq install */ +int intel_modeset_init_noirq(struct drm_i915_private *i915) { - struct drm_device *dev = &i915->drm; - enum pipe pipe; - struct intel_crtc *crtc; int ret; i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0); @@ -17814,6 +17824,17 @@ int intel_modeset_init(struct drm_i915_private *i915) intel_fbc_init(i915); + return 0; +} + +/* part #2: call after irq install */ +int intel_modeset_init(struct drm_i915_private *i915) +{ + struct drm_device *dev = &i915->drm; + enum pipe pipe; + struct intel_crtc *crtc; + int ret; + intel_init_pm(i915); intel_panel_sanitize_ssc(i915); @@ -17834,6 +17855,7 @@ int intel_modeset_init(struct drm_i915_private *i915) } } + intel_plane_possible_crtcs_init(i915); intel_shared_dpll_init(dev); intel_update_fdi_pll_freq(i915); @@ -18311,7 +18333,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) struct intel_connector *connector; struct drm_connector_list_iter conn_iter; u8 active_pipes = 0; - int i; for_each_intel_crtc(dev, crtc) { struct intel_crtc_state *crtc_state = @@ -18340,33 +18361,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) readout_plane_state(dev_priv); - for (i = 0; i < dev_priv->num_shared_dpll; i++) { - struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; - - pll->on = pll->info->funcs->get_hw_state(dev_priv, pll, - &pll->state.hw_state); - - if (IS_ELKHARTLAKE(dev_priv) && pll->on && - pll->info->id == DPLL_ID_EHL_DPLL4) { - pll->wakeref = intel_display_power_get(dev_priv, - POWER_DOMAIN_DPLL_DC_OFF); - } - - pll->state.crtc_mask = 0; - for_each_intel_crtc(dev, crtc) { - struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); - - if (crtc_state->hw.active && - crtc_state->shared_dpll == pll) - pll->state.crtc_mask |= 1 << crtc->pipe; - } - pll->active_mask = pll->state.crtc_mask; - - drm_dbg_kms(&dev_priv->drm, - "%s hw state readout: crtc_mask 0x%08x, on %i\n", - pll->info->name, pll->state.crtc_mask, pll->on); - } + intel_dpll_readout_hw_state(dev_priv); for_each_intel_encoder(dev, encoder) { pipe = 0; @@ -18623,7 +18618,6 @@ intel_modeset_setup_hw_state(struct drm_device *dev, struct intel_encoder *encoder; struct intel_crtc *crtc; intel_wakeref_t wakeref; - int i; wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); @@ -18676,19 +18670,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev, intel_modeset_update_connector_atomic_state(dev); - for (i = 0; i < dev_priv->num_shared_dpll; i++) { - struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; - - if (!pll->on || pll->active_mask) - continue; - - drm_dbg_kms(&dev_priv->drm, - "%s enabled but not in use, disabling\n", - pll->info->name); - - pll->info->funcs->disable(dev_priv, pll); - pll->on = false; - } + intel_dpll_sanitize_state(dev_priv); if (IS_G4X(dev_priv)) { g4x_wm_get_hw_state(dev_priv); @@ -18820,6 +18802,15 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915) #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) +static bool +has_transcoder(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder) +{ + if (cpu_transcoder == TRANSCODER_EDP) + return HAS_TRANSCODER_EDP(dev_priv); + else + return INTEL_INFO(dev_priv)->pipe_mask & BIT(cpu_transcoder); +} + struct intel_display_error_state { u32 power_well_driver; @@ -18928,7 +18919,7 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv) for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) { enum transcoder cpu_transcoder = transcoders[i]; - if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder]) + if (!has_transcoder(dev_priv, cpu_transcoder)) continue; error->transcoder[i].available = true; diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index f92efbbec838272775edbd254bd8a0cff5f25582..adb1225a34809cee9e67b84e76d093a6232a0113 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -26,7 +26,6 @@ #define _INTEL_DISPLAY_H_ #include -#include enum link_m_n_set; struct dpll; @@ -40,6 +39,7 @@ struct drm_framebuffer; struct drm_i915_error_state_buf; struct drm_i915_gem_object; struct drm_i915_private; +struct drm_mode_fb_cmd2; struct drm_modeset_acquire_ctx; struct drm_plane; struct drm_plane_state; @@ -47,6 +47,7 @@ struct i915_ggtt_view; struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; +struct intel_crtc_state; struct intel_digital_port; struct intel_dp; struct intel_encoder; @@ -55,7 +56,6 @@ struct intel_plane; struct intel_plane_state; struct intel_remapped_info; struct intel_rotation_info; -struct intel_crtc_state; enum i915_gpio { GPIOA, @@ -313,10 +313,11 @@ enum phy_fia { }; #define for_each_pipe(__dev_priv, __p) \ - for ((__p) = 0; (__p) < INTEL_NUM_PIPES(__dev_priv); (__p)++) + for ((__p) = 0; (__p) < I915_MAX_PIPES; (__p)++) \ + for_each_if(INTEL_INFO(__dev_priv)->pipe_mask & BIT(__p)) #define for_each_pipe_masked(__dev_priv, __p, __mask) \ - for ((__p) = 0; (__p) < INTEL_NUM_PIPES(__dev_priv); (__p)++) \ + for_each_pipe(__dev_priv, __p) \ for_each_if((__mask) & BIT(__p)) #define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \ @@ -614,6 +615,7 @@ intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info, /* modesetting */ void intel_modeset_init_hw(struct drm_i915_private *i915); +int intel_modeset_init_noirq(struct drm_i915_private *i915); int intel_modeset_init(struct drm_i915_private *i915); void intel_modeset_driver_remove(struct drm_i915_private *i915); void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915); diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index 46954cc7b6c014133022e721adf2ec82315fd1a4..1e6eb7f2f72dbf7afa3d0be9a466c38b8cce0225 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -920,8 +920,13 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused) int i; drm_modeset_lock_all(dev); - for (i = 0; i < dev_priv->num_shared_dpll; i++) { - struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; + + seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n", + dev_priv->dpll.ref_clks.nssc, + dev_priv->dpll.ref_clks.ssc); + + for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) { + struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i]; seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name, pll->info->id); diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 6e25a13171615f35e9e4392d916a0b726dad029b..246e406bb385edbcf0d44d1c970c037136eff2b5 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -939,11 +939,17 @@ unlock: static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) { - bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv, - SKL_DISP_PW_2); + enum i915_power_well_id high_pg; - drm_WARN_ONCE(&dev_priv->drm, pg2_enabled, - "PG2 not disabled to enable DC5.\n"); + /* Power wells at this level and above must be disabled for DC5 entry */ + if (INTEL_GEN(dev_priv) >= 12) + high_pg = TGL_DISP_PW_3; + else + high_pg = SKL_DISP_PW_2; + + drm_WARN_ONCE(&dev_priv->drm, + intel_display_power_well_is_enabled(dev_priv, high_pg), + "Power wells above platform's DC5 limit still enabled.\n"); drm_WARN_ONCE(&dev_priv->drm, (intel_de_read(dev_priv, DC_STATE_EN) & @@ -2740,7 +2746,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, BIT_ULL(POWER_DOMAIN_INIT)) #define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ - TGL_PW_2_POWER_DOMAINS | \ + TGL_PW_3_POWER_DOMAINS | \ BIT_ULL(POWER_DOMAIN_MODESET) | \ BIT_ULL(POWER_DOMAIN_AUX_A) | \ BIT_ULL(POWER_DOMAIN_AUX_B) | \ @@ -3936,7 +3942,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .name = "power well 3", .domains = TGL_PW_3_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, + .id = TGL_DISP_PW_3, { .hsw.regs = &hsw_power_well_regs, .hsw.idx = ICL_PW_CTL_IDX_PW_3, diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index 601e000ffd0dc630357fab73d85b1aa621bf47e3..da64a5edae7ad14c75f3519bc14771ea8bf8f141 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -100,6 +100,7 @@ enum i915_power_well_id { SKL_DISP_PW_MISC_IO, SKL_DISP_PW_1, SKL_DISP_PW_2, + TGL_DISP_PW_3, SKL_DISP_DC_OFF, }; diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 0d8a643054648663256eb71ed82e90f1c5e2f03a..5e00e611f077f80a29fe048551f7b732f6b78a7c 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -39,7 +39,6 @@ #include #include #include -#include #include #include @@ -642,6 +641,14 @@ struct intel_crtc_scaler_state { /* Flag to use the scanline counter instead of the pixel counter */ #define I915_MODE_FLAG_USE_SCANLINE_COUNTER (1<<2) +struct intel_wm_level { + bool enable; + u32 pri_val; + u32 spr_val; + u32 cur_val; + u32 fbc_val; +}; + struct intel_pipe_wm { struct intel_wm_level wm[5]; bool fbc_wm_enabled; @@ -650,6 +657,14 @@ struct intel_pipe_wm { bool sprites_scaled; }; +struct skl_wm_level { + u16 min_ddb_alloc; + u16 plane_res_b; + u8 plane_res_l; + bool plane_en; + bool ignore_lines; +}; + struct skl_plane_wm { struct skl_wm_level wm[8]; struct skl_wm_level uv_wm[8]; @@ -1046,6 +1061,32 @@ struct intel_crtc_state { enum transcoder mst_master_transcoder; }; +enum intel_pipe_crc_source { + INTEL_PIPE_CRC_SOURCE_NONE, + INTEL_PIPE_CRC_SOURCE_PLANE1, + INTEL_PIPE_CRC_SOURCE_PLANE2, + INTEL_PIPE_CRC_SOURCE_PLANE3, + INTEL_PIPE_CRC_SOURCE_PLANE4, + INTEL_PIPE_CRC_SOURCE_PLANE5, + INTEL_PIPE_CRC_SOURCE_PLANE6, + INTEL_PIPE_CRC_SOURCE_PLANE7, + INTEL_PIPE_CRC_SOURCE_PIPE, + /* TV/DP on pre-gen5/vlv can't use the pipe source. */ + INTEL_PIPE_CRC_SOURCE_TV, + INTEL_PIPE_CRC_SOURCE_DP_B, + INTEL_PIPE_CRC_SOURCE_DP_C, + INTEL_PIPE_CRC_SOURCE_DP_D, + INTEL_PIPE_CRC_SOURCE_AUTO, + INTEL_PIPE_CRC_SOURCE_MAX, +}; + +#define INTEL_PIPE_CRC_ENTRIES_NR 128 +struct intel_pipe_crc { + spinlock_t lock; + int skipped; + enum intel_pipe_crc_source source; +}; + struct intel_crtc { struct drm_crtc base; enum pipe pipe; @@ -1089,6 +1130,10 @@ struct intel_crtc { /* per pipe DSB related info */ struct intel_dsb dsb; + +#ifdef CONFIG_DEBUG_FS + struct intel_pipe_crc pipe_crc; +#endif }; struct intel_plane { @@ -1235,6 +1280,7 @@ struct intel_dp { int max_link_rate; /* sink or branch descriptor */ struct drm_dp_desc desc; + u32 edid_quirks; struct drm_dp_aux aux; u32 aux_busy_last_status; u8 train_set[4]; @@ -1406,9 +1452,18 @@ vlv_pipe_to_channel(enum pipe pipe) } } +static inline struct intel_crtc * +intel_get_first_crtc(struct drm_i915_private *dev_priv) +{ + return to_intel_crtc(drm_crtc_from_index(&dev_priv->drm, 0)); +} + static inline struct intel_crtc * intel_get_crtc_for_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) { + /* pipe_to_crtc_mapping may have hole on any of 3 display pipe system */ + drm_WARN_ON(&dev_priv->drm, + !(INTEL_INFO(dev_priv)->pipe_mask & BIT(pipe))); return dev_priv->pipe_to_crtc_mapping[pipe]; } @@ -1598,11 +1653,15 @@ intel_crtc_has_dp_encoder(const struct intel_crtc_state *crtc_state) (1 << INTEL_OUTPUT_DP_MST) | (1 << INTEL_OUTPUT_EDP)); } + static inline void intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe) { - drm_wait_one_vblank(&dev_priv->drm, pipe); + struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + + drm_crtc_wait_one_vblank(&crtc->base); } + static inline void intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, enum pipe pipe) { diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 0a5a9197f8f5d094082a11ea03b1416c00f9efed..0a417cd2af2bc02dd8f992b28b6b810c6f3e2232 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -40,7 +40,6 @@ #include #include #include -#include #include "i915_debugfs.h" #include "i915_drv.h" @@ -2399,7 +2398,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, struct intel_connector *intel_connector = intel_dp->attached_connector; struct intel_digital_connector_state *intel_conn_state = to_intel_digital_connector_state(conn_state); - bool constant_n = drm_dp_has_quirk(&intel_dp->desc, + bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0, DP_DPCD_QUIRK_CONSTANT_N); int ret = 0, output_bpp; @@ -4515,7 +4514,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) * it don't care about read it here and in intel_edp_init_dpcd(). */ if (!intel_dp_is_edp(intel_dp) && - !drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_SINK_COUNT)) { + !drm_dp_has_quirk(&intel_dp->desc, 0, + DP_DPCD_QUIRK_NO_SINK_COUNT)) { u8 count; ssize_t r; @@ -5682,6 +5682,7 @@ intel_dp_set_edid(struct intel_dp *intel_dp) intel_dp->has_audio = drm_detect_monitor_audio(edid); drm_dp_cec_set_edid(&intel_dp->aux, edid); + intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid); } static void @@ -5694,6 +5695,7 @@ intel_dp_unset_edid(struct intel_dp *intel_dp) intel_connector->detect_edid = NULL; intel_dp->has_audio = false; + intel_dp->edid_quirks = 0; } static int @@ -6449,6 +6451,7 @@ static int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port, bool is_repeater, u8 content_type) { + int ret; struct hdcp2_dp_errata_stream_type stream_type_msg; if (is_repeater) @@ -6464,8 +6467,11 @@ int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port, stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE; stream_type_msg.stream_type = content_type; - return intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg, + ret = intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg, sizeof(stream_type_msg)); + + return ret < 0 ? ret : 0; + } static @@ -7562,8 +7568,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, edid = drm_get_edid(connector, &intel_dp->aux.ddc); if (edid) { if (drm_add_edid_modes(connector, edid)) { - drm_connector_update_edid_property(connector, - edid); + drm_connector_update_edid_property(connector, edid); + intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid); } else { kfree(edid); edid = ERR_PTR(-EINVAL); @@ -7609,9 +7615,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, intel_panel_setup_backlight(connector, pipe); if (fixed_mode) { - /* We do not know the orientation, but their might be a quirk */ drm_connector_set_panel_orientation_with_quirk(connector, - DRM_MODE_PANEL_ORIENTATION_UNKNOWN, + dev_priv->vbt.orientation, fixed_mode->hdisplay, fixed_mode->vdisplay); } diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index 3da1660547888b2095444b1009de21304dacc2ee..0c7be8ed1423add57f03a1b034d94d15995f8d27 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -8,8 +8,6 @@ #include -#include - #include "i915_reg.h" enum pipe; diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c index 48276237b362347f8f6bb413ac83fc6454bc0ae6..3e706bb850a8714a91ca8af57ec56a9aeff31190 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c @@ -328,15 +328,31 @@ intel_dp_aux_display_control_capable(struct intel_connector *connector) int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector) { struct intel_panel *panel = &intel_connector->panel; - struct drm_i915_private *dev_priv = to_i915(intel_connector->base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(intel_connector->encoder); + struct drm_device *dev = intel_connector->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); if (i915_modparams.enable_dpcd_backlight == 0 || - (i915_modparams.enable_dpcd_backlight == -1 && - dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE)) + !intel_dp_aux_display_control_capable(intel_connector)) return -ENODEV; - if (!intel_dp_aux_display_control_capable(intel_connector)) + /* + * There are a lot of machines that don't advertise the backlight + * control interface to use properly in their VBIOS, :\ + */ + if (dev_priv->vbt.backlight.type != + INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE && + !drm_dp_has_quirk(&intel_dp->desc, intel_dp->edid_quirks, + DP_QUIRK_FORCE_DPCD_BACKLIGHT)) { + DRM_DEV_INFO(dev->dev, + "Panel advertises DPCD backlight support, but " + "VBT disagrees. If your backlight controls " + "don't work try booting with " + "i915.enable_dpcd_backlight=1. If your machine " + "needs this, please file a _new_ bug report on " + "drm/i915, see " FDO_BUG_URL " for details.\n"); return -ENODEV; + } panel->backlight.setup = intel_dp_aux_setup_backlight; panel->backlight.enable = intel_dp_aux_enable_backlight; diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 306c1549a680e2d4fa5bedf187da13ed6935b141..44f3fd251ca1f1ccc4bd9600c7ba50a26b0efd67 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -50,7 +50,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; void *port = connector->port; - bool constant_n = drm_dp_has_quirk(&intel_dp->desc, + bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0, DP_DPCD_QUIRK_CONSTANT_N); int bpp, slots = -EINVAL; @@ -548,12 +548,41 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector) return ret; } +static int +intel_dp_mst_connector_late_register(struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + int ret; + + ret = drm_dp_mst_connector_late_register(connector, + intel_connector->port); + if (ret < 0) + return ret; + + ret = intel_connector_register(connector); + if (ret < 0) + drm_dp_mst_connector_early_unregister(connector, + intel_connector->port); + + return ret; +} + +static void +intel_dp_mst_connector_early_unregister(struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + + intel_connector_unregister(connector); + drm_dp_mst_connector_early_unregister(connector, + intel_connector->port); +} + static const struct drm_connector_funcs intel_dp_mst_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .atomic_get_property = intel_digital_connector_atomic_get_property, .atomic_set_property = intel_digital_connector_atomic_set_property, - .late_register = intel_connector_register, - .early_unregister = intel_connector_unregister, + .late_register = intel_dp_mst_connector_late_register, + .early_unregister = intel_dp_mst_connector_early_unregister, .destroy = intel_connector_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = intel_digital_connector_duplicate_state, diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index e5bfe5245276522ed802c0d466433eb94325c699..2d47f1f756a2ec294c2c939b20305d255931f6a3 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -45,6 +45,22 @@ * commit phase. */ +struct intel_dpll_mgr { + const struct dpll_info *dpll_info; + + bool (*get_dplls)(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder); + void (*put_dplls)(struct intel_atomic_state *state, + struct intel_crtc *crtc); + void (*update_active_dpll)(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder); + void (*update_ref_clks)(struct drm_i915_private *i915); + void (*dump_hw_state)(struct drm_i915_private *dev_priv, + const struct intel_dpll_hw_state *hw_state); +}; + static void intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll_state *shared_dpll) @@ -52,8 +68,8 @@ intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv, enum intel_dpll_id i; /* Copy shared dpll state */ - for (i = 0; i < dev_priv->num_shared_dpll; i++) { - struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; + for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) { + struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i]; shared_dpll[i] = pll->state; } @@ -88,7 +104,7 @@ struct intel_shared_dpll * intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv, enum intel_dpll_id id) { - return &dev_priv->shared_dplls[id]; + return &dev_priv->dpll.shared_dplls[id]; } /** @@ -103,11 +119,14 @@ enum intel_dpll_id intel_get_shared_dpll_id(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { - if (drm_WARN_ON(&dev_priv->drm, pll < dev_priv->shared_dplls || - pll > &dev_priv->shared_dplls[dev_priv->num_shared_dpll])) + long pll_idx = pll - dev_priv->dpll.shared_dplls; + + if (drm_WARN_ON(&dev_priv->drm, + pll_idx < 0 || + pll_idx >= dev_priv->dpll.num_shared_dpll)) return -1; - return (enum intel_dpll_id) (pll - dev_priv->shared_dplls); + return pll_idx; } /* For ILK+ */ @@ -144,7 +163,7 @@ void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state) if (drm_WARN_ON(&dev_priv->drm, pll == NULL)) return; - mutex_lock(&dev_priv->dpll_lock); + mutex_lock(&dev_priv->dpll.lock); drm_WARN_ON(&dev_priv->drm, !pll->state.crtc_mask); if (!pll->active_mask) { drm_dbg(&dev_priv->drm, "setting up %s\n", pll->info->name); @@ -153,7 +172,7 @@ void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state) pll->info->funcs->prepare(dev_priv, pll); } - mutex_unlock(&dev_priv->dpll_lock); + mutex_unlock(&dev_priv->dpll.lock); } /** @@ -173,7 +192,7 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state) if (drm_WARN_ON(&dev_priv->drm, pll == NULL)) return; - mutex_lock(&dev_priv->dpll_lock); + mutex_lock(&dev_priv->dpll.lock); old_mask = pll->active_mask; if (drm_WARN_ON(&dev_priv->drm, !(pll->state.crtc_mask & crtc_mask)) || @@ -199,7 +218,7 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state) pll->on = true; out: - mutex_unlock(&dev_priv->dpll_lock); + mutex_unlock(&dev_priv->dpll.lock); } /** @@ -222,7 +241,7 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state) if (pll == NULL) return; - mutex_lock(&dev_priv->dpll_lock); + mutex_lock(&dev_priv->dpll.lock); if (drm_WARN_ON(&dev_priv->drm, !(pll->active_mask & crtc_mask))) goto out; @@ -243,7 +262,7 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state) pll->on = false; out: - mutex_unlock(&dev_priv->dpll_lock); + mutex_unlock(&dev_priv->dpll.lock); } static struct intel_shared_dpll * @@ -262,7 +281,7 @@ intel_find_shared_dpll(struct intel_atomic_state *state, drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1)); for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) { - pll = &dev_priv->shared_dplls[i]; + pll = &dev_priv->dpll.shared_dplls[i]; /* Only want to check enabled timings first */ if (shared_dpll[i].crtc_mask == 0) { @@ -362,9 +381,9 @@ void intel_shared_dpll_swap_state(struct intel_atomic_state *state) if (!state->dpll_set) return; - for (i = 0; i < dev_priv->num_shared_dpll; i++) { + for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) { struct intel_shared_dpll *pll = - &dev_priv->shared_dplls[i]; + &dev_priv->dpll.shared_dplls[i]; swap(pll->state, shared_dpll[i]); } @@ -462,7 +481,7 @@ static bool ibx_get_dpll(struct intel_atomic_state *state, if (HAS_PCH_IBX(dev_priv)) { /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */ i = (enum intel_dpll_id) crtc->pipe; - pll = &dev_priv->shared_dplls[i]; + pll = &dev_priv->dpll.shared_dplls[i]; drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n", @@ -506,6 +525,19 @@ static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = { .get_hw_state = ibx_pch_dpll_get_hw_state, }; +static const struct dpll_info pch_plls[] = { + { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 }, + { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 }, + { }, +}; + +static const struct intel_dpll_mgr pch_pll_mgr = { + .dpll_info = pch_plls, + .get_dplls = ibx_get_dpll, + .put_dplls = intel_put_dpll, + .dump_hw_state = ibx_dump_hw_state, +}; + static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { @@ -818,8 +850,8 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */, } static struct intel_shared_dpll * -hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state, - struct intel_crtc *crtc) +hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); @@ -846,8 +878,47 @@ hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state, return pll; } +static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv, + const struct intel_shared_dpll *pll) +{ + int refclk; + int n, p, r; + u32 wrpll = pll->state.hw_state.wrpll; + + switch (wrpll & WRPLL_REF_MASK) { + case WRPLL_REF_SPECIAL_HSW: + /* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */ + if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) { + refclk = dev_priv->dpll.ref_clks.nssc; + break; + } + /* fall through */ + case WRPLL_REF_PCH_SSC: + /* + * We could calculate spread here, but our checking + * code only cares about 5% accuracy, and spread is a max of + * 0.5% downspread. + */ + refclk = dev_priv->dpll.ref_clks.ssc; + break; + case WRPLL_REF_LCPLL: + refclk = 2700000; + break; + default: + MISSING_CASE(wrpll); + return 0; + } + + r = wrpll & WRPLL_DIVIDER_REF_MASK; + p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT; + n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT; + + /* Convert to KHz, p & r have a fixed point portion */ + return (refclk * n / 10) / (p * r) * 2; +} + static struct intel_shared_dpll * -hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state) +hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); struct intel_shared_dpll *pll; @@ -878,6 +949,69 @@ hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state) return pll; } +static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915, + const struct intel_shared_dpll *pll) +{ + int link_clock = 0; + + switch (pll->info->id) { + case DPLL_ID_LCPLL_810: + link_clock = 81000; + break; + case DPLL_ID_LCPLL_1350: + link_clock = 135000; + break; + case DPLL_ID_LCPLL_2700: + link_clock = 270000; + break; + default: + drm_WARN(&i915->drm, 1, "bad port clock sel\n"); + break; + } + + return link_clock * 2; +} + +static struct intel_shared_dpll * +hsw_ddi_spll_get_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + if (WARN_ON(crtc_state->port_clock / 2 != 135000)) + return NULL; + + crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | + SPLL_REF_MUXED_SSC; + + return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state, + BIT(DPLL_ID_SPLL)); +} + +static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915, + const struct intel_shared_dpll *pll) +{ + int link_clock = 0; + + switch (pll->state.hw_state.spll & SPLL_FREQ_MASK) { + case SPLL_FREQ_810MHz: + link_clock = 81000; + break; + case SPLL_FREQ_1350MHz: + link_clock = 135000; + break; + case SPLL_FREQ_2700MHz: + link_clock = 270000; + break; + default: + drm_WARN(&i915->drm, 1, "bad spll freq\n"); + break; + } + + return link_clock * 2; +} + static bool hsw_get_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) @@ -889,23 +1023,14 @@ static bool hsw_get_dpll(struct intel_atomic_state *state, memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { - pll = hsw_ddi_hdmi_get_dpll(state, crtc); - } else if (intel_crtc_has_dp_encoder(crtc_state)) { - pll = hsw_ddi_dp_get_dpll(crtc_state); - } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) { - if (WARN_ON(crtc_state->port_clock / 2 != 135000)) - return false; - - crtc_state->dpll_hw_state.spll = - SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC; - - pll = intel_find_shared_dpll(state, crtc, - &crtc_state->dpll_hw_state, - BIT(DPLL_ID_SPLL)); - } else { + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + pll = hsw_ddi_wrpll_get_dpll(state, crtc); + else if (intel_crtc_has_dp_encoder(crtc_state)) + pll = hsw_ddi_lcpll_get_dpll(crtc_state); + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) + pll = hsw_ddi_spll_get_dpll(state, crtc); + else return false; - } if (!pll) return false; @@ -918,6 +1043,16 @@ static bool hsw_get_dpll(struct intel_atomic_state *state, return true; } +static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915) +{ + i915->dpll.ref_clks.ssc = 135000; + /* Non-SSC is only used on non-ULT HSW. */ + if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT) + i915->dpll.ref_clks.nssc = 24000; + else + i915->dpll.ref_clks.nssc = 135000; +} + static void hsw_dump_hw_state(struct drm_i915_private *dev_priv, const struct intel_dpll_hw_state *hw_state) { @@ -929,12 +1064,14 @@ static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = { .enable = hsw_ddi_wrpll_enable, .disable = hsw_ddi_wrpll_disable, .get_hw_state = hsw_ddi_wrpll_get_hw_state, + .get_freq = hsw_ddi_wrpll_get_freq, }; static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = { .enable = hsw_ddi_spll_enable, .disable = hsw_ddi_spll_disable, .get_hw_state = hsw_ddi_spll_get_hw_state, + .get_freq = hsw_ddi_spll_get_freq, }; static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv, @@ -958,6 +1095,25 @@ static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = { .enable = hsw_ddi_lcpll_enable, .disable = hsw_ddi_lcpll_disable, .get_hw_state = hsw_ddi_lcpll_get_hw_state, + .get_freq = hsw_ddi_lcpll_get_freq, +}; + +static const struct dpll_info hsw_plls[] = { + { "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 }, + { "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 }, + { "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 }, + { "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON }, + { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON }, + { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON }, + { }, +}; + +static const struct intel_dpll_mgr hsw_pll_mgr = { + .dpll_info = hsw_plls, + .get_dplls = hsw_get_dpll, + .put_dplls = intel_put_dpll, + .update_ref_clks = hsw_update_dpll_ref_clks, + .dump_hw_state = hsw_dump_hw_state, }; struct skl_dpll_regs { @@ -1230,6 +1386,7 @@ struct skl_wrpll_params { static void skl_wrpll_params_populate(struct skl_wrpll_params *params, u64 afe_clock, + int ref_clock, u64 central_freq, u32 p0, u32 p1, u32 p2) { @@ -1289,14 +1446,15 @@ static void skl_wrpll_params_populate(struct skl_wrpll_params *params, * Intermediate values are in Hz. * Divide by MHz to match bsepc */ - params->dco_integer = div_u64(dco_freq, 24 * MHz(1)); + params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1)); params->dco_fraction = - div_u64((div_u64(dco_freq, 24) - + div_u64((div_u64(dco_freq, ref_clock / KHz(1)) - params->dco_integer * MHz(1)) * 0x8000, MHz(1)); } static bool skl_ddi_calculate_wrpll(int clock /* in Hz */, + int ref_clock, struct skl_wrpll_params *wrpll_params) { u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */ @@ -1362,14 +1520,15 @@ skip_remaining_dividers: */ p0 = p1 = p2 = 0; skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2); - skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq, - p0, p1, p2); + skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock, + ctx.central_freq, p0, p1, p2); return true; } static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state) { + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); u32 ctrl1, cfgcr1, cfgcr2; struct skl_wrpll_params wrpll_params = { 0, }; @@ -1382,6 +1541,7 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state) ctrl1 |= DPLL_CTRL1_HDMI_MODE(0); if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000, + i915->dpll.ref_clks.nssc, &wrpll_params)) return false; @@ -1404,6 +1564,64 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state) return true; } +static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915, + const struct intel_shared_dpll *pll) +{ + const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state; + int ref_clock = i915->dpll.ref_clks.nssc; + u32 p0, p1, p2, dco_freq; + + p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK; + p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK; + + if (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1)) + p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8; + else + p1 = 1; + + + switch (p0) { + case DPLL_CFGCR2_PDIV_1: + p0 = 1; + break; + case DPLL_CFGCR2_PDIV_2: + p0 = 2; + break; + case DPLL_CFGCR2_PDIV_3: + p0 = 3; + break; + case DPLL_CFGCR2_PDIV_7: + p0 = 7; + break; + } + + switch (p2) { + case DPLL_CFGCR2_KDIV_5: + p2 = 5; + break; + case DPLL_CFGCR2_KDIV_2: + p2 = 2; + break; + case DPLL_CFGCR2_KDIV_3: + p2 = 3; + break; + case DPLL_CFGCR2_KDIV_1: + p2 = 1; + break; + } + + dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) * + ref_clock; + + dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) * + ref_clock / 0x8000; + + if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0)) + return 0; + + return dco_freq / (p0 * p1 * p2 * 5); +} + static bool skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) { @@ -1444,6 +1662,40 @@ skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) return true; } +static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915, + const struct intel_shared_dpll *pll) +{ + int link_clock = 0; + + switch ((pll->state.hw_state.ctrl1 & + DPLL_CTRL1_LINK_RATE_MASK(0)) >> + DPLL_CTRL1_LINK_RATE_SHIFT(0)) { + case DPLL_CTRL1_LINK_RATE_810: + link_clock = 81000; + break; + case DPLL_CTRL1_LINK_RATE_1080: + link_clock = 108000; + break; + case DPLL_CTRL1_LINK_RATE_1350: + link_clock = 135000; + break; + case DPLL_CTRL1_LINK_RATE_1620: + link_clock = 162000; + break; + case DPLL_CTRL1_LINK_RATE_2160: + link_clock = 216000; + break; + case DPLL_CTRL1_LINK_RATE_2700: + link_clock = 270000; + break; + default: + drm_WARN(&i915->drm, 1, "Unsupported link rate\n"); + break; + } + + return link_clock * 2; +} + static bool skl_get_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) @@ -1493,6 +1745,25 @@ static bool skl_get_dpll(struct intel_atomic_state *state, return true; } +static int skl_ddi_pll_get_freq(struct drm_i915_private *i915, + const struct intel_shared_dpll *pll) +{ + /* + * ctrl1 register is already shifted for each pll, just use 0 to get + * the internal shift for each field + */ + if (pll->state.hw_state.ctrl1 & DPLL_CTRL1_HDMI_MODE(0)) + return skl_ddi_wrpll_get_freq(i915, pll); + else + return skl_ddi_lcpll_get_freq(i915, pll); +} + +static void skl_update_dpll_ref_clks(struct drm_i915_private *i915) +{ + /* No SSC ref */ + i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref; +} + static void skl_dump_hw_state(struct drm_i915_private *dev_priv, const struct intel_dpll_hw_state *hw_state) { @@ -1507,12 +1778,30 @@ static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = { .enable = skl_ddi_pll_enable, .disable = skl_ddi_pll_disable, .get_hw_state = skl_ddi_pll_get_hw_state, + .get_freq = skl_ddi_pll_get_freq, }; static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = { .enable = skl_ddi_dpll0_enable, .disable = skl_ddi_dpll0_disable, .get_hw_state = skl_ddi_dpll0_get_hw_state, + .get_freq = skl_ddi_pll_get_freq, +}; + +static const struct dpll_info skl_plls[] = { + { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON }, + { "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 }, + { "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 }, + { "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 }, + { }, +}; + +static const struct intel_dpll_mgr skl_pll_mgr = { + .dpll_info = skl_plls, + .get_dplls = skl_get_dpll, + .put_dplls = intel_put_dpll, + .update_ref_clks = skl_update_dpll_ref_clks, + .dump_hw_state = skl_dump_hw_state, }; static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, @@ -1903,6 +2192,23 @@ bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state) return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div); } +static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915, + const struct intel_shared_dpll *pll) +{ + const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state; + struct dpll clock; + + clock.m1 = 2; + clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22; + if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE) + clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK; + clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT; + clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT; + clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT; + + return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock); +} + static bool bxt_get_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) @@ -1936,6 +2242,13 @@ static bool bxt_get_dpll(struct intel_atomic_state *state, return true; } +static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915) +{ + i915->dpll.ref_clks.ssc = 100000; + i915->dpll.ref_clks.nssc = 100000; + /* DSI non-SSC ref 19.2MHz */ +} + static void bxt_dump_hw_state(struct drm_i915_private *dev_priv, const struct intel_dpll_hw_state *hw_state) { @@ -1959,66 +2272,7 @@ static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = { .enable = bxt_ddi_pll_enable, .disable = bxt_ddi_pll_disable, .get_hw_state = bxt_ddi_pll_get_hw_state, -}; - -struct intel_dpll_mgr { - const struct dpll_info *dpll_info; - - bool (*get_dplls)(struct intel_atomic_state *state, - struct intel_crtc *crtc, - struct intel_encoder *encoder); - void (*put_dplls)(struct intel_atomic_state *state, - struct intel_crtc *crtc); - void (*update_active_dpll)(struct intel_atomic_state *state, - struct intel_crtc *crtc, - struct intel_encoder *encoder); - void (*dump_hw_state)(struct drm_i915_private *dev_priv, - const struct intel_dpll_hw_state *hw_state); -}; - -static const struct dpll_info pch_plls[] = { - { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 }, - { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 }, - { }, -}; - -static const struct intel_dpll_mgr pch_pll_mgr = { - .dpll_info = pch_plls, - .get_dplls = ibx_get_dpll, - .put_dplls = intel_put_dpll, - .dump_hw_state = ibx_dump_hw_state, -}; - -static const struct dpll_info hsw_plls[] = { - { "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 }, - { "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 }, - { "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 }, - { "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON }, - { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON }, - { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON }, - { }, -}; - -static const struct intel_dpll_mgr hsw_pll_mgr = { - .dpll_info = hsw_plls, - .get_dplls = hsw_get_dpll, - .put_dplls = intel_put_dpll, - .dump_hw_state = hsw_dump_hw_state, -}; - -static const struct dpll_info skl_plls[] = { - { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON }, - { "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 }, - { "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 }, - { "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 }, - { }, -}; - -static const struct intel_dpll_mgr skl_pll_mgr = { - .dpll_info = skl_plls, - .get_dplls = skl_get_dpll, - .put_dplls = intel_put_dpll, - .dump_hw_state = skl_dump_hw_state, + .get_freq = bxt_ddi_pll_get_freq, }; static const struct dpll_info bxt_plls[] = { @@ -2032,6 +2286,7 @@ static const struct intel_dpll_mgr bxt_pll_mgr = { .dpll_info = bxt_plls, .get_dplls = bxt_get_dpll, .put_dplls = intel_put_dpll, + .update_ref_clks = bxt_update_dpll_ref_clks, .dump_hw_state = bxt_dump_hw_state, }; @@ -2275,27 +2530,12 @@ static void cnl_wrpll_params_populate(struct skl_wrpll_params *params, params->dco_fraction = dco & 0x7fff; } -int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv) -{ - int ref_clock = dev_priv->cdclk.hw.ref; - - /* - * For ICL+, the spec states: if reference frequency is 38.4, - * use 19.2 because the DPLL automatically divides that by 2. - */ - if (INTEL_GEN(dev_priv) >= 11 && ref_clock == 38400) - ref_clock = 19200; - - return ref_clock; -} - static bool -cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state, - struct skl_wrpll_params *wrpll_params) +__cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state, + struct skl_wrpll_params *wrpll_params, + int ref_clock) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); u32 afe_clock = crtc_state->port_clock * 5; - u32 ref_clock; u32 dco_min = 7998000; u32 dco_max = 10000000; u32 dco_mid = (dco_min + dco_max) / 2; @@ -2327,15 +2567,22 @@ cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state, return false; cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv); - - ref_clock = cnl_hdmi_pll_ref_clock(dev_priv); - cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock, pdiv, qdiv, kdiv); return true; } +static bool +cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state, + struct skl_wrpll_params *wrpll_params) +{ + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + + return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params, + i915->dpll.ref_clks.nssc); +} + static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state) { u32 cfgcr0, cfgcr1; @@ -2363,6 +2610,68 @@ static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state) return true; } +static int __cnl_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv, + const struct intel_shared_dpll *pll, + int ref_clock) +{ + const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state; + u32 p0, p1, p2, dco_freq; + + p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK; + p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK; + + if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1)) + p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >> + DPLL_CFGCR1_QDIV_RATIO_SHIFT; + else + p1 = 1; + + + switch (p0) { + case DPLL_CFGCR1_PDIV_2: + p0 = 2; + break; + case DPLL_CFGCR1_PDIV_3: + p0 = 3; + break; + case DPLL_CFGCR1_PDIV_5: + p0 = 5; + break; + case DPLL_CFGCR1_PDIV_7: + p0 = 7; + break; + } + + switch (p2) { + case DPLL_CFGCR1_KDIV_1: + p2 = 1; + break; + case DPLL_CFGCR1_KDIV_2: + p2 = 2; + break; + case DPLL_CFGCR1_KDIV_3: + p2 = 3; + break; + } + + dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) * + ref_clock; + + dco_freq += (((pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >> + DPLL_CFGCR0_DCO_FRACTION_SHIFT) * ref_clock) / 0x8000; + + if (drm_WARN_ON(&dev_priv->drm, p0 == 0 || p1 == 0 || p2 == 0)) + return 0; + + return dco_freq / (p0 * p1 * p2 * 5); +} + +static int cnl_ddi_wrpll_get_freq(struct drm_i915_private *i915, + const struct intel_shared_dpll *pll) +{ + return __cnl_ddi_wrpll_get_freq(i915, pll, i915->dpll.ref_clks.nssc); +} + static bool cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) { @@ -2408,6 +2717,44 @@ cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) return true; } +static int cnl_ddi_lcpll_get_freq(struct drm_i915_private *i915, + const struct intel_shared_dpll *pll) +{ + int link_clock = 0; + + switch (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK) { + case DPLL_CFGCR0_LINK_RATE_810: + link_clock = 81000; + break; + case DPLL_CFGCR0_LINK_RATE_1080: + link_clock = 108000; + break; + case DPLL_CFGCR0_LINK_RATE_1350: + link_clock = 135000; + break; + case DPLL_CFGCR0_LINK_RATE_1620: + link_clock = 162000; + break; + case DPLL_CFGCR0_LINK_RATE_2160: + link_clock = 216000; + break; + case DPLL_CFGCR0_LINK_RATE_2700: + link_clock = 270000; + break; + case DPLL_CFGCR0_LINK_RATE_3240: + link_clock = 324000; + break; + case DPLL_CFGCR0_LINK_RATE_4050: + link_clock = 405000; + break; + default: + drm_WARN(&i915->drm, 1, "Unsupported link rate\n"); + break; + } + + return link_clock * 2; +} + static bool cnl_get_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) @@ -2457,6 +2804,21 @@ static bool cnl_get_dpll(struct intel_atomic_state *state, return true; } +static int cnl_ddi_pll_get_freq(struct drm_i915_private *i915, + const struct intel_shared_dpll *pll) +{ + if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) + return cnl_ddi_wrpll_get_freq(i915, pll); + else + return cnl_ddi_lcpll_get_freq(i915, pll); +} + +static void cnl_update_dpll_ref_clks(struct drm_i915_private *i915) +{ + /* No SSC reference */ + i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref; +} + static void cnl_dump_hw_state(struct drm_i915_private *dev_priv, const struct intel_dpll_hw_state *hw_state) { @@ -2470,6 +2832,7 @@ static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = { .enable = cnl_ddi_pll_enable, .disable = cnl_ddi_pll_disable, .get_hw_state = cnl_ddi_pll_get_hw_state, + .get_freq = cnl_ddi_pll_get_freq, }; static const struct dpll_info cnl_plls[] = { @@ -2483,6 +2846,7 @@ static const struct intel_dpll_mgr cnl_pll_mgr = { .dpll_info = cnl_plls, .get_dplls = cnl_get_dpll, .put_dplls = intel_put_dpll, + .update_ref_clks = cnl_update_dpll_ref_clks, .dump_hw_state = cnl_dump_hw_state, }; @@ -2578,7 +2942,7 @@ static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state, { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); const struct icl_combo_pll_params *params = - dev_priv->cdclk.hw.ref == 24000 ? + dev_priv->dpll.ref_clks.nssc == 24000 ? icl_dp_combo_pll_24MHz_values : icl_dp_combo_pll_19_2MHz_values; int clock = crtc_state->port_clock; @@ -2601,9 +2965,9 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state, struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); if (INTEL_GEN(dev_priv) >= 12) { - switch (dev_priv->cdclk.hw.ref) { + switch (dev_priv->dpll.ref_clks.nssc) { default: - MISSING_CASE(dev_priv->cdclk.hw.ref); + MISSING_CASE(dev_priv->dpll.ref_clks.nssc); /* fall-through */ case 19200: case 38400: @@ -2614,9 +2978,9 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state, break; } } else { - switch (dev_priv->cdclk.hw.ref) { + switch (dev_priv->dpll.ref_clks.nssc) { default: - MISSING_CASE(dev_priv->cdclk.hw.ref); + MISSING_CASE(dev_priv->dpll.ref_clks.nssc); /* fall-through */ case 19200: case 38400: @@ -2631,6 +2995,49 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state, return true; } +static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915, + const struct intel_shared_dpll *pll) +{ + /* + * The PLL outputs multiple frequencies at the same time, selection is + * made at DDI clock mux level. + */ + drm_WARN_ON(&i915->drm, 1); + + return 0; +} + +static int icl_wrpll_ref_clock(struct drm_i915_private *i915) +{ + int ref_clock = i915->dpll.ref_clks.nssc; + + /* + * For ICL+, the spec states: if reference frequency is 38.4, + * use 19.2 because the DPLL automatically divides that by 2. + */ + if (ref_clock == 38400) + ref_clock = 19200; + + return ref_clock; +} + +static bool +icl_calc_wrpll(struct intel_crtc_state *crtc_state, + struct skl_wrpll_params *wrpll_params) +{ + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + + return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params, + icl_wrpll_ref_clock(i915)); +} + +static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915, + const struct intel_shared_dpll *pll) +{ + return __cnl_ddi_wrpll_get_freq(i915, pll, + icl_wrpll_ref_clock(i915)); +} + static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder, struct intel_dpll_hw_state *pll_state) @@ -2645,7 +3052,7 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state, ret = icl_calc_tbt_pll(crtc_state, &pll_params); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) || intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) - ret = cnl_ddi_calculate_wrpll(crtc_state, &pll_params); + ret = icl_calc_wrpll(crtc_state, &pll_params); else ret = icl_calc_dp_combo_pll(crtc_state, &pll_params); @@ -2768,7 +3175,7 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, struct intel_dpll_hw_state *pll_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - int refclk_khz = dev_priv->cdclk.hw.ref; + int refclk_khz = dev_priv->dpll.ref_clks.nssc; int clock = crtc_state->port_clock; u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac; u32 iref_ndiv, iref_trim, iref_pulse_w; @@ -2969,6 +3376,78 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, return true; } +static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv, + const struct intel_shared_dpll *pll) +{ + const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state; + u32 m1, m2_int, m2_frac, div1, div2, ref_clock; + u64 tmp; + + ref_clock = dev_priv->dpll.ref_clks.nssc; + + if (INTEL_GEN(dev_priv) >= 12) { + m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK; + m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT; + m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK; + + if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) { + m2_frac = pll_state->mg_pll_bias & + DKL_PLL_BIAS_FBDIV_FRAC_MASK; + m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT; + } else { + m2_frac = 0; + } + } else { + m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK; + m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK; + + if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) { + m2_frac = pll_state->mg_pll_div0 & + MG_PLL_DIV0_FBDIV_FRAC_MASK; + m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT; + } else { + m2_frac = 0; + } + } + + switch (pll_state->mg_clktop2_hsclkctl & + MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) { + case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2: + div1 = 2; + break; + case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3: + div1 = 3; + break; + case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5: + div1 = 5; + break; + case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7: + div1 = 7; + break; + default: + MISSING_CASE(pll_state->mg_clktop2_hsclkctl); + return 0; + } + + div2 = (pll_state->mg_clktop2_hsclkctl & + MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >> + MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT; + + /* div2 value of 0 is same as 1 means no div */ + if (div2 == 0) + div2 = 1; + + /* + * Adjust the original formula to delay the division by 2^22 in order to + * minimize possible rounding errors. + */ + tmp = (u64)m1 * m2_int * ref_clock + + (((u64)m1 * m2_frac * ref_clock) >> 22); + tmp = div_u64(tmp, 5 * div1 * div2); + + return tmp; +} + /** * icl_set_active_port_dpll - select the active port DPLL for a given CRTC * @crtc_state: state for the CRTC to select the DPLL for @@ -3201,7 +3680,7 @@ static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv, hw_state->mg_pll_tdc_coldst_bias = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port)); - if (dev_priv->cdclk.hw.ref == 38400) { + if (dev_priv->dpll.ref_clks.nssc == 38400) { hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART; hw_state->mg_pll_bias_mask = 0; } else { @@ -3682,6 +4161,12 @@ static void mg_pll_disable(struct drm_i915_private *dev_priv, icl_pll_disable(dev_priv, pll, enable_reg); } +static void icl_update_dpll_ref_clks(struct drm_i915_private *i915) +{ + /* No SSC ref */ + i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref; +} + static void icl_dump_hw_state(struct drm_i915_private *dev_priv, const struct intel_dpll_hw_state *hw_state) { @@ -3709,18 +4194,21 @@ static const struct intel_shared_dpll_funcs combo_pll_funcs = { .enable = combo_pll_enable, .disable = combo_pll_disable, .get_hw_state = combo_pll_get_hw_state, + .get_freq = icl_ddi_combo_pll_get_freq, }; static const struct intel_shared_dpll_funcs tbt_pll_funcs = { .enable = tbt_pll_enable, .disable = tbt_pll_disable, .get_hw_state = tbt_pll_get_hw_state, + .get_freq = icl_ddi_tbt_pll_get_freq, }; static const struct intel_shared_dpll_funcs mg_pll_funcs = { .enable = mg_pll_enable, .disable = mg_pll_disable, .get_hw_state = mg_pll_get_hw_state, + .get_freq = icl_ddi_mg_pll_get_freq, }; static const struct dpll_info icl_plls[] = { @@ -3739,6 +4227,7 @@ static const struct intel_dpll_mgr icl_pll_mgr = { .get_dplls = icl_get_dplls, .put_dplls = icl_put_dplls, .update_active_dpll = icl_update_active_dpll, + .update_ref_clks = icl_update_dpll_ref_clks, .dump_hw_state = icl_dump_hw_state, }; @@ -3753,6 +4242,7 @@ static const struct intel_dpll_mgr ehl_pll_mgr = { .dpll_info = ehl_plls, .get_dplls = icl_get_dplls, .put_dplls = icl_put_dplls, + .update_ref_clks = icl_update_dpll_ref_clks, .dump_hw_state = icl_dump_hw_state, }; @@ -3760,6 +4250,7 @@ static const struct intel_shared_dpll_funcs dkl_pll_funcs = { .enable = mg_pll_enable, .disable = mg_pll_disable, .get_hw_state = dkl_pll_get_hw_state, + .get_freq = icl_ddi_mg_pll_get_freq, }; static const struct dpll_info tgl_plls[] = { @@ -3780,6 +4271,7 @@ static const struct intel_dpll_mgr tgl_pll_mgr = { .get_dplls = icl_get_dplls, .put_dplls = icl_put_dplls, .update_active_dpll = icl_update_active_dpll, + .update_ref_clks = icl_update_dpll_ref_clks, .dump_hw_state = icl_dump_hw_state, }; @@ -3814,7 +4306,7 @@ void intel_shared_dpll_init(struct drm_device *dev) dpll_mgr = &pch_pll_mgr; if (!dpll_mgr) { - dev_priv->num_shared_dpll = 0; + dev_priv->dpll.num_shared_dpll = 0; return; } @@ -3822,14 +4314,14 @@ void intel_shared_dpll_init(struct drm_device *dev) for (i = 0; dpll_info[i].name; i++) { drm_WARN_ON(dev, i != dpll_info[i].id); - dev_priv->shared_dplls[i].info = &dpll_info[i]; + dev_priv->dpll.shared_dplls[i].info = &dpll_info[i]; } - dev_priv->dpll_mgr = dpll_mgr; - dev_priv->num_shared_dpll = i; - mutex_init(&dev_priv->dpll_lock); + dev_priv->dpll.mgr = dpll_mgr; + dev_priv->dpll.num_shared_dpll = i; + mutex_init(&dev_priv->dpll.lock); - BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS); + BUG_ON(dev_priv->dpll.num_shared_dpll > I915_NUM_PLLS); } /** @@ -3856,7 +4348,7 @@ bool intel_reserve_shared_dplls(struct intel_atomic_state *state, struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr; + const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr; if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr)) return false; @@ -3879,7 +4371,7 @@ void intel_release_shared_dplls(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr; + const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr; /* * FIXME: this function is called for every platform having a @@ -3908,7 +4400,7 @@ void intel_update_active_dpll(struct intel_atomic_state *state, struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr; + const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr; if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr)) return; @@ -3916,6 +4408,84 @@ void intel_update_active_dpll(struct intel_atomic_state *state, dpll_mgr->update_active_dpll(state, crtc, encoder); } +/** + * intel_dpll_get_freq - calculate the DPLL's output frequency + * @i915: i915 device + * @pll: DPLL for which to calculate the output frequency + * + * Return the output frequency corresponding to @pll's current state. + */ +int intel_dpll_get_freq(struct drm_i915_private *i915, + const struct intel_shared_dpll *pll) +{ + if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq)) + return 0; + + return pll->info->funcs->get_freq(i915, pll); +} + +static void readout_dpll_hw_state(struct drm_i915_private *i915, + struct intel_shared_dpll *pll) +{ + struct intel_crtc *crtc; + + pll->on = pll->info->funcs->get_hw_state(i915, pll, + &pll->state.hw_state); + + if (IS_ELKHARTLAKE(i915) && pll->on && + pll->info->id == DPLL_ID_EHL_DPLL4) { + pll->wakeref = intel_display_power_get(i915, + POWER_DOMAIN_DPLL_DC_OFF); + } + + pll->state.crtc_mask = 0; + for_each_intel_crtc(&i915->drm, crtc) { + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + + if (crtc_state->hw.active && crtc_state->shared_dpll == pll) + pll->state.crtc_mask |= 1 << crtc->pipe; + } + pll->active_mask = pll->state.crtc_mask; + + drm_dbg_kms(&i915->drm, + "%s hw state readout: crtc_mask 0x%08x, on %i\n", + pll->info->name, pll->state.crtc_mask, pll->on); +} + +void intel_dpll_readout_hw_state(struct drm_i915_private *i915) +{ + int i; + + if (i915->dpll.mgr && i915->dpll.mgr->update_ref_clks) + i915->dpll.mgr->update_ref_clks(i915); + + for (i = 0; i < i915->dpll.num_shared_dpll; i++) + readout_dpll_hw_state(i915, &i915->dpll.shared_dplls[i]); +} + +static void sanitize_dpll_state(struct drm_i915_private *i915, + struct intel_shared_dpll *pll) +{ + if (!pll->on || pll->active_mask) + return; + + drm_dbg_kms(&i915->drm, + "%s enabled but not in use, disabling\n", + pll->info->name); + + pll->info->funcs->disable(i915, pll); + pll->on = false; +} + +void intel_dpll_sanitize_state(struct drm_i915_private *i915) +{ + int i; + + for (i = 0; i < i915->dpll.num_shared_dpll; i++) + sanitize_dpll_state(i915, &i915->dpll.shared_dplls[i]); +} + /** * intel_shared_dpll_dump_hw_state - write hw_state to dmesg * @dev_priv: i915 drm device @@ -3926,8 +4496,8 @@ void intel_update_active_dpll(struct intel_atomic_state *state, void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv, const struct intel_dpll_hw_state *hw_state) { - if (dev_priv->dpll_mgr) { - dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state); + if (dev_priv->dpll.mgr) { + dev_priv->dpll.mgr->dump_hw_state(dev_priv, hw_state); } else { /* fallback for platforms that don't use the shared dpll * infrastructure diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h index 2a104c64291df45f1abc848e260a1fdbfe021c15..5d9a2bc371e7cdcec0f1608856382e819264b54d 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h @@ -278,6 +278,15 @@ struct intel_shared_dpll_funcs { bool (*get_hw_state)(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state); + + /** + * @get_freq: + * + * Hook for calculating the pll's output frequency based on its + * current state. + */ + int (*get_freq)(struct drm_i915_private *i915, + const struct intel_shared_dpll *pll); }; /** @@ -372,15 +381,18 @@ void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state, void intel_update_active_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder); +int intel_dpll_get_freq(struct drm_i915_private *i915, + const struct intel_shared_dpll *pll); void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state); void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state); void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state); void intel_shared_dpll_swap_state(struct intel_atomic_state *state); void intel_shared_dpll_init(struct drm_device *dev); +void intel_dpll_readout_hw_state(struct drm_i915_private *dev_priv); +void intel_dpll_sanitize_state(struct drm_i915_private *dev_priv); void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv, const struct intel_dpll_hw_state *hw_state); -int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv); enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port); bool intel_dpll_is_combophy(enum intel_dpll_id id); diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c index 76ae01277fd63cc9c1fd6c740818be2d3744da8a..d7a6bf2277df4e56599d0b1420a1a593c0559e63 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.c +++ b/drivers/gpu/drm/i915/display/intel_dsb.c @@ -52,7 +52,7 @@ static inline bool intel_dsb_enable_engine(struct intel_dsb *dsb) dsb_ctrl = intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id)); if (DSB_STATUS & dsb_ctrl) { - DRM_DEBUG_KMS("DSB engine is busy.\n"); + drm_dbg_kms(&dev_priv->drm, "DSB engine is busy.\n"); return false; } @@ -72,7 +72,7 @@ static inline bool intel_dsb_disable_engine(struct intel_dsb *dsb) dsb_ctrl = intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id)); if (DSB_STATUS & dsb_ctrl) { - DRM_DEBUG_KMS("DSB engine is busy.\n"); + drm_dbg_kms(&dev_priv->drm, "DSB engine is busy.\n"); return false; } @@ -115,20 +115,20 @@ intel_dsb_get(struct intel_crtc *crtc) obj = i915_gem_object_create_internal(i915, DSB_BUF_SIZE); if (IS_ERR(obj)) { - DRM_ERROR("Gem object creation failed\n"); + drm_err(&i915->drm, "Gem object creation failed\n"); goto out; } vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0); if (IS_ERR(vma)) { - DRM_ERROR("Vma creation failed\n"); + drm_err(&i915->drm, "Vma creation failed\n"); i915_gem_object_put(obj); goto out; } buf = i915_gem_object_pin_map(vma->obj, I915_MAP_WC); if (IS_ERR(buf)) { - DRM_ERROR("Command buffer creation failed\n"); + drm_err(&i915->drm, "Command buffer creation failed\n"); goto out; } @@ -203,7 +203,7 @@ void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg, } if (drm_WARN_ON(&dev_priv->drm, dsb->free_pos >= DSB_BUF_SIZE)) { - DRM_DEBUG_KMS("DSB buffer overflow\n"); + drm_dbg_kms(&dev_priv->drm, "DSB buffer overflow\n"); return; } @@ -277,7 +277,7 @@ void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val) } if (drm_WARN_ON(&dev_priv->drm, dsb->free_pos >= DSB_BUF_SIZE)) { - DRM_DEBUG_KMS("DSB buffer overflow\n"); + drm_dbg_kms(&dev_priv->drm, "DSB buffer overflow\n"); return; } @@ -310,7 +310,8 @@ void intel_dsb_commit(struct intel_dsb *dsb) goto reset; if (is_dsb_busy(dsb)) { - DRM_ERROR("HEAD_PTR write failed - dsb engine is busy.\n"); + drm_err(&dev_priv->drm, + "HEAD_PTR write failed - dsb engine is busy.\n"); goto reset; } intel_de_write(dev_priv, DSB_HEAD(pipe, dsb->id), @@ -322,15 +323,18 @@ void intel_dsb_commit(struct intel_dsb *dsb) (tail - dsb->free_pos * 4)); if (is_dsb_busy(dsb)) { - DRM_ERROR("TAIL_PTR write failed - dsb engine is busy.\n"); + drm_err(&dev_priv->drm, + "TAIL_PTR write failed - dsb engine is busy.\n"); goto reset; } - DRM_DEBUG_KMS("DSB execution started - head 0x%x, tail 0x%x\n", - i915_ggtt_offset(dsb->vma), tail); + drm_dbg_kms(&dev_priv->drm, + "DSB execution started - head 0x%x, tail 0x%x\n", + i915_ggtt_offset(dsb->vma), tail); intel_de_write(dev_priv, DSB_TAIL(pipe, dsb->id), i915_ggtt_offset(dsb->vma) + tail); if (wait_for(!is_dsb_busy(dsb), 1)) { - DRM_ERROR("Timed out waiting for DSB workload completion.\n"); + drm_err(&dev_priv->drm, + "Timed out waiting for DSB workload completion.\n"); goto reset; } diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c index 694498f4b7195885d3815ea85d4d01daa4c32d46..574dcfec9577e81389ec28c4b9942a41f40f6b90 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c @@ -36,7 +36,6 @@ #include #include -#include #include